text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
tutorial, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@TUTORIALS_DIR@/04-lattice_boltzmann/04-lattice_boltzmann_part3.py",
gpu=True)
@skipIfMissingFeatures
class Tutorial(ut.TestCase):
system = tutorial.system
if __name__ == "__main__":
ut.main()
|
KaiSzuttor/espresso
|
testsuite/scripts/tutorials/test_04-lattice_boltzmann_part3.py
|
Python
|
gpl-3.0
| 1,042
|
[
"ESPResSo"
] |
cc7a4e7c15b6290ee2c05f42663f74f46f3baad4547f19d353a188bcbcd08e79
|
"""
Test the about xblock
"""
import datetime
import pytz
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from mock import patch
from nose.plugins.attrib import attr
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from course_modes.models import CourseMode
from track.tests import EventTrackingTestCase
from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_CLOSED_MODULESTORE
from student.models import CourseEnrollment
from student.tests.factories import UserFactory, CourseEnrollmentAllowedFactory
from shoppingcart.models import Order, PaidCourseRegistration
from xmodule.course_module import CATALOG_VISIBILITY_ABOUT, CATALOG_VISIBILITY_NONE
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from util.milestones_helpers import (
set_prerequisite_courses,
seed_milestone_relationship_types,
get_prerequisite_courses_display,
)
from .helpers import LoginEnrollmentTestCase
# HTML for registration button
REG_STR = "<form id=\"class_enroll_form\" method=\"post\" data-remote=\"true\" action=\"/change_enrollment\">"
SHIB_ERROR_STR = "The currently logged-in user account does not have permission to enroll in this course."
@attr('shard_1')
class AboutTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase, EventTrackingTestCase):
"""
Tests about xblock.
"""
def setUp(self):
super(AboutTestCase, self).setUp()
self.course = CourseFactory.create()
self.about = ItemFactory.create(
category="about", parent_location=self.course.location,
data="OOGIE BLOOGIE", display_name="overview"
)
self.course_without_about = CourseFactory.create(catalog_visibility=CATALOG_VISIBILITY_NONE)
self.about = ItemFactory.create(
category="about", parent_location=self.course_without_about.location,
data="WITHOUT ABOUT", display_name="overview"
)
self.course_with_about = CourseFactory.create(catalog_visibility=CATALOG_VISIBILITY_ABOUT)
self.about = ItemFactory.create(
category="about", parent_location=self.course_with_about.location,
data="WITH ABOUT", display_name="overview"
)
self.purchase_course = CourseFactory.create(org='MITx', number='buyme', display_name='Course To Buy')
self.course_mode = CourseMode(course_id=self.purchase_course.id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=10)
self.course_mode.save()
def test_anonymous_user(self):
"""
This test asserts that a non-logged in user can visit the course about page
"""
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
# Check that registration button is present
self.assertIn(REG_STR, resp.content)
def test_logged_in(self):
"""
This test asserts that a logged-in user can visit the course about page
"""
self.setup_user()
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
def test_already_enrolled(self):
"""
Asserts that the end user sees the appropriate messaging
when he/she visits the course about page, but is already enrolled
"""
self.setup_user()
self.enroll(self.course, True)
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("You are registered for this course", resp.content)
self.assertIn("View Courseware", resp.content)
@override_settings(COURSE_ABOUT_VISIBILITY_PERMISSION="see_about_page")
def test_visible_about_page_settings(self):
"""
Verify that the About Page honors the permission settings in the course module
"""
url = reverse('about_course', args=[self.course_with_about.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("WITH ABOUT", resp.content)
url = reverse('about_course', args=[self.course_without_about.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 404)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_logged_in_marketing(self):
self.setup_user()
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
# should be redirected
self.assertEqual(resp.status_code, 302)
# follow this time, and check we're redirected to the course info page
resp = self.client.get(url, follow=True)
target_url = resp.redirect_chain[-1][0]
info_url = reverse('info', args=[self.course.id.to_deprecated_string()])
self.assertTrue(target_url.endswith(info_url))
@patch.dict(settings.FEATURES, {'ENABLE_PREREQUISITE_COURSES': True, 'MILESTONES_APP': True})
def test_pre_requisite_course(self):
seed_milestone_relationship_types()
pre_requisite_course = CourseFactory.create(org='edX', course='900', display_name='pre requisite course')
course = CourseFactory.create(pre_requisite_courses=[unicode(pre_requisite_course.id)])
self.setup_user()
url = reverse('about_course', args=[unicode(course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
pre_requisite_courses = get_prerequisite_courses_display(course)
pre_requisite_course_about_url = reverse('about_course', args=[unicode(pre_requisite_courses[0]['key'])])
self.assertIn("<span class=\"important-dates-item-text pre-requisite\"><a href=\"{}\">{}</a></span>"
.format(pre_requisite_course_about_url, pre_requisite_courses[0]['display']),
resp.content.strip('\n'))
@patch.dict(settings.FEATURES, {'ENABLE_PREREQUISITE_COURSES': True, 'MILESTONES_APP': True})
def test_about_page_unfulfilled_prereqs(self):
seed_milestone_relationship_types()
pre_requisite_course = CourseFactory.create(
org='edX',
course='900',
display_name='pre requisite course',
)
pre_requisite_courses = [unicode(pre_requisite_course.id)]
# for this failure to occur, the enrollment window needs to be in the past
course = CourseFactory.create(
org='edX',
course='1000',
# closed enrollment
enrollment_start=datetime.datetime(2013, 1, 1),
enrollment_end=datetime.datetime(2014, 1, 1),
start=datetime.datetime(2013, 1, 1),
end=datetime.datetime(2030, 1, 1),
pre_requisite_courses=pre_requisite_courses,
)
set_prerequisite_courses(course.id, pre_requisite_courses)
self.setup_user()
self.enroll(self.course, True)
self.enroll(pre_requisite_course, True)
url = reverse('about_course', args=[unicode(course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
pre_requisite_courses = get_prerequisite_courses_display(course)
pre_requisite_course_about_url = reverse('about_course', args=[unicode(pre_requisite_courses[0]['key'])])
self.assertIn("<span class=\"important-dates-item-text pre-requisite\"><a href=\"{}\">{}</a></span>"
.format(pre_requisite_course_about_url, pre_requisite_courses[0]['display']),
resp.content.strip('\n'))
url = reverse('about_course', args=[unicode(pre_requisite_course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
@attr('shard_1')
class AboutTestCaseXML(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Tests for the course about page
"""
MODULESTORE = TEST_DATA_MIXED_CLOSED_MODULESTORE
# The following XML test course (which lives at common/test/data/2014)
# is closed; we're testing that an about page still appears when
# the course is already closed
xml_course_id = SlashSeparatedCourseKey('edX', 'detached_pages', '2014')
# this text appears in that course's about page
# common/test/data/2014/about/overview.html
xml_data = "about page 463139"
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_logged_in_xml(self):
self.setup_user()
url = reverse('about_course', args=[self.xml_course_id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_anonymous_user_xml(self):
url = reverse('about_course', args=[self.xml_course_id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
@attr('shard_1')
class AboutWithCappedEnrollmentsTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
This test case will check the About page when a course has a capped enrollment
"""
def setUp(self):
"""
Set up the tests
"""
super(AboutWithCappedEnrollmentsTestCase, self).setUp()
self.course = CourseFactory.create(metadata={"max_student_enrollments_allowed": 1})
self.about = ItemFactory.create(
category="about", parent_location=self.course.location,
data="OOGIE BLOOGIE", display_name="overview"
)
def test_enrollment_cap(self):
"""
This test will make sure that enrollment caps are enforced
"""
self.setup_user()
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn('<a href="#" class="register">', resp.content)
self.enroll(self.course, verify=True)
# create a new account since the first account is already registered for the course
self.email = 'foo_second@test.com'
self.password = 'bar'
self.username = 'test_second'
self.create_account(self.username,
self.email, self.password)
self.activate_user(self.email)
self.login(self.email, self.password)
# Get the about page again and make sure that the page says that the course is full
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Course is full", resp.content)
# Try to enroll as well
result = self.enroll(self.course)
self.assertFalse(result)
# Check that registration button is not present
self.assertNotIn(REG_STR, resp.content)
@attr('shard_1')
class AboutWithInvitationOnly(ModuleStoreTestCase):
"""
This test case will check the About page when a course is invitation only.
"""
def setUp(self):
super(AboutWithInvitationOnly, self).setUp()
self.course = CourseFactory.create(metadata={"invitation_only": True})
self.about = ItemFactory.create(
category="about", parent_location=self.course.location,
display_name="overview"
)
def test_invitation_only(self):
"""
Test for user not logged in, invitation only course.
"""
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Enrollment in this course is by invitation only", resp.content)
# Check that registration button is not present
self.assertNotIn(REG_STR, resp.content)
def test_invitation_only_but_allowed(self):
"""
Test for user logged in and allowed to enroll in invitation only course.
"""
# Course is invitation only, student is allowed to enroll and logged in
user = UserFactory.create(username='allowed_student', password='test', email='allowed_student@test.com')
CourseEnrollmentAllowedFactory(email=user.email, course_id=self.course.id)
self.client.login(username=user.username, password='test')
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(u"Register for {}".format(self.course.id.course), resp.content.decode('utf-8'))
# Check that registration button is present
self.assertIn(REG_STR, resp.content)
@attr('shard_1')
@patch.dict(settings.FEATURES, {'RESTRICT_ENROLL_BY_REG_METHOD': True})
class AboutTestCaseShibCourse(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Test cases covering about page behavior for courses that use shib enrollment domain ("shib courses")
"""
def setUp(self):
super(AboutTestCaseShibCourse, self).setUp()
self.course = CourseFactory.create(enrollment_domain="shib:https://idp.stanford.edu/")
self.about = ItemFactory.create(
category="about", parent_location=self.course.location,
data="OOGIE BLOOGIE", display_name="overview"
)
def test_logged_in_shib_course(self):
"""
For shib courses, logged in users will see the register button, but get rejected once they click there
"""
self.setup_user()
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
self.assertIn(u"Register for {}".format(self.course.id.course), resp.content.decode('utf-8'))
self.assertIn(SHIB_ERROR_STR, resp.content)
self.assertIn(REG_STR, resp.content)
def test_anonymous_user_shib_course(self):
"""
For shib courses, anonymous users will also see the register button
"""
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
self.assertIn(u"Register for {}".format(self.course.id.course), resp.content.decode('utf-8'))
self.assertIn(SHIB_ERROR_STR, resp.content)
self.assertIn(REG_STR, resp.content)
@attr('shard_1')
class AboutWithClosedEnrollment(ModuleStoreTestCase):
"""
This test case will check the About page for a course that has enrollment start/end
set but it is currently outside of that period.
"""
def setUp(self):
super(AboutWithClosedEnrollment, self).setUp()
self.course = CourseFactory.create(metadata={"invitation_only": False})
# Setup enrollment period to be in future
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
nextday = tomorrow + datetime.timedelta(days=1)
self.course.enrollment_start = tomorrow
self.course.enrollment_end = nextday
self.course = self.update_course(self.course, self.user.id)
self.about = ItemFactory.create(
category="about", parent_location=self.course.location,
display_name="overview"
)
def test_closed_enrollmement(self):
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Enrollment is Closed", resp.content)
# Check that registration button is not present
self.assertNotIn(REG_STR, resp.content)
def test_course_price_is_not_visble_in_sidebar(self):
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
# course price is not visible ihe course_about page when the course
# mode is not set to honor
self.assertNotIn('<span class="important-dates-item-text">$10</span>', resp.content)
@attr('shard_1')
@patch.dict(settings.FEATURES, {'ENABLE_SHOPPING_CART': True})
@patch.dict(settings.FEATURES, {'ENABLE_PAID_COURSE_REGISTRATION': True})
class AboutPurchaseCourseTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
This test class runs through a suite of verifications regarding
purchaseable courses
"""
def setUp(self):
super(AboutPurchaseCourseTestCase, self).setUp()
self.course = CourseFactory.create(org='MITx', number='buyme', display_name='Course To Buy')
self._set_ecomm(self.course)
def _set_ecomm(self, course):
"""
Helper method to turn on ecommerce on the course
"""
course_mode = CourseMode(
course_id=course.id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=10,
)
course_mode.save()
def test_anonymous_user(self):
"""
Make sure an anonymous user sees the purchase button
"""
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Add buyme to Cart ($10)", resp.content)
def test_logged_in(self):
"""
Make sure a logged in user sees the purchase button
"""
self.setup_user()
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Add buyme to Cart ($10)", resp.content)
def test_already_in_cart(self):
"""
This makes sure if a user has this course in the cart, that the expected message
appears
"""
self.setup_user()
cart = Order.get_cart_for_user(self.user)
PaidCourseRegistration.add_to_order(cart, self.course.id)
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("This course is in your", resp.content)
self.assertNotIn("Add buyme to Cart ($10)", resp.content)
def test_already_enrolled(self):
"""
This makes sure that the already enrolled message appears for paywalled courses
"""
self.setup_user()
# note that we can't call self.enroll here since that goes through
# the Django student views, which doesn't allow for enrollments
# for paywalled courses
CourseEnrollment.enroll(self.user, self.course.id)
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("You are registered for this course", resp.content)
self.assertIn("View Courseware", resp.content)
self.assertNotIn("Add buyme to Cart ($10)", resp.content)
def test_closed_enrollment(self):
"""
This makes sure that paywalled courses also honor the registration
window
"""
self.setup_user()
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
nextday = tomorrow + datetime.timedelta(days=1)
self.course.enrollment_start = tomorrow
self.course.enrollment_end = nextday
self.course = self.update_course(self.course, self.user.id)
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Enrollment is Closed", resp.content)
self.assertNotIn("Add buyme to Cart ($10)", resp.content)
# course price is visible ihe course_about page when the course
# mode is set to honor and it's price is set
self.assertIn('<span class="important-dates-item-text">$10</span>', resp.content)
def test_invitation_only(self):
"""
This makes sure that the invitation only restirction takes prescendence over
any purchase enablements
"""
course = CourseFactory.create(metadata={"invitation_only": True})
self._set_ecomm(course)
self.setup_user()
url = reverse('about_course', args=[course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Enrollment in this course is by invitation only", resp.content)
def test_enrollment_cap(self):
"""
Make sure that capped enrollments work even with
paywalled courses
"""
course = CourseFactory.create(
metadata={
"max_student_enrollments_allowed": 1,
"display_coursenumber": "buyme",
}
)
self._set_ecomm(course)
self.setup_user()
url = reverse('about_course', args=[course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Add buyme to Cart ($10)", resp.content)
# note that we can't call self.enroll here since that goes through
# the Django student views, which doesn't allow for enrollments
# for paywalled courses
CourseEnrollment.enroll(self.user, course.id)
# create a new account since the first account is already registered for the course
email = 'foo_second@test.com'
password = 'bar'
username = 'test_second'
self.create_account(username,
email, password)
self.activate_user(email)
self.login(email, password)
# Get the about page again and make sure that the page says that the course is full
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Course is full", resp.content)
self.assertNotIn("Add buyme to Cart ($10)", resp.content)
def test_free_course_display(self):
"""
Make sure other courses that don't have shopping cart enabled don't display the add-to-cart button
and don't display the course_price field if Cosmetic Price is disabled.
"""
course = CourseFactory.create(org='MITx', number='free', display_name='Course For Free')
self.setup_user()
url = reverse('about_course', args=[course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertNotIn("Add free to Cart (Free)", resp.content)
self.assertNotIn('<p class="important-dates-item-title">Price</p>', resp.content)
|
htzy/bigfour
|
lms/djangoapps/courseware/tests/test_about.py
|
Python
|
agpl-3.0
| 23,595
|
[
"VisIt"
] |
43221702578b27945be7f42c9453681745e848d9389cd1f824288cac33f29af7
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from PyQt5 import QtWidgets, QtCore, QtGui
from peacock.utils import WidgetUtils
class MediaControlWidgetBase(object):
"""
Base for media controls.
"""
_icon_size = QtCore.QSize(32, 32)
#: Emitted when the play timer is started/stopped
playStart = QtCore.pyqtSignal()
playStop = QtCore.pyqtSignal()
def __init__(self):
super(MediaControlWidgetBase, self).__init__()
# Initiate time member variables
self._times = None #function
self._current_step = -1
self._num_steps = None
self._playing = False
# Widget settings
self.setEnabled(False)
self.setStyleSheet("QGroupBox {border:0}")
# Add the main layout for this widget
self.MainLayout = QtWidgets.QVBoxLayout()
self.setLayout(self.MainLayout)
# Define a widget to contain the button objects
self.ButtonLayout = QtWidgets.QHBoxLayout()
self.MainLayout.addLayout(self.ButtonLayout)
# Media control buttons
self.__addButton('BeginButton', "Set the simulation to the beginning.", 'begin.ico')
self.__addButton('BackwardButton', "Move simulation back one timestep.", 'backward.ico')
self.__addButton('PlayButton', "Play through the simulation with time.", 'play.ico')
self.__addButton('PauseButton', "Stop playing through the simulation.", 'pause.ico')
self.__addButton('ForwardButton', "Move simulation forward one timestep.", 'forward.ico')
self.__addButton('EndButton', "Set the simulation to the end.", 'end.ico')
# Move the timestep/time edit boxes to the right side
self.ButtonLayout.addStretch(1)
# TimeStep display/edit
self.__addEditBox('TimeStepDisplay', 'Timestep:', "Set the simulation timestep.", True)
self.__addEditBox('TimeDisplay', 'Time:', "Set the simulation time.")
self.__addEditBox('FrameDelayDisplay', 'Frame delay:', "Set the delay of playback, in milliseconds.", True, "100")
# Slider
self.TimeSlider = QtWidgets.QSlider()
self.MainLayout.addWidget(self.TimeSlider)
self.Timer = QtCore.QTimer()
self.Timer.timeout.connect(self.timerUpdate)
self.Timer.setInterval(100)
# Call MooseWidget::setup()
self.setup()
def updateControls(self, **kwargs):
"""
General callback used by all of the widgets contained within this widget.
"""
self.setEnabled(True)
def updateTimeDisplay(self):
"""
Update the time display widgets.
"""
if len(self._times) == 0:
self.setEnabled(False)
return
else:
self.setEnabled(True)
step = self._current_step
if step == -1:
step = self._num_steps - 1
if not self._playing:
if step == 0:
self.BackwardButton.setEnabled(False)
self.BeginButton.setEnabled(False)
self.ForwardButton.setEnabled(True)
self.EndButton.setEnabled(True)
elif step == self._num_steps - 1:
self.BackwardButton.setEnabled(True)
self.BeginButton.setEnabled(True)
self.ForwardButton.setEnabled(False)
self.EndButton.setEnabled(False)
else:
self.BackwardButton.setEnabled(True)
self.BeginButton.setEnabled(True)
self.ForwardButton.setEnabled(True)
self.EndButton.setEnabled(True)
self.TimeSlider.setRange(0, self._num_steps - 1)
self.TimeSlider.setValue(step)
if not self.TimeStepDisplay.hasFocus():
self.TimeStepDisplay.blockSignals(True)
self.TimeStepDisplay.setText(str(step))
self.TimeStepDisplay.blockSignals(False)
if not self.TimeDisplay.hasFocus():
self.TimeDisplay.blockSignals(True)
self.TimeDisplay.setText(str(self._times[step]))
self.TimeDisplay.blockSignals(False)
def start(self):
"""
Start the play timer.
"""
self.playStart.emit()
self.Timer.start()
def stop(self):
"""
Stop the play timer.
"""
self.playStop.emit()
self.Timer.stop()
def _setupPauseButton(self, qObject):
qObject.setEnabled(False)
qObject.setVisible(False)
def _callbackBeginButton(self):
self._callbackPauseButton()
self.updateControls(timestep=0, time=None)
def _callbackBackwardButton(self):
self._callbackPauseButton()
if self._current_step == -1:
self._current_step = self._num_steps - 1
self.updateControls(timestep=self._current_step - 1, time=None)
def timerUpdate(self):
timestep = self._current_step + 1
if timestep > len(self._times) - 1:
self._callbackPauseButton()
return
self.updateControls(timestep=timestep, time=None)
def _callbackPlayButton(self):
if self._current_step == len(self._times) - 1:
self.BeginButton.clicked.emit(True)
self.PauseButton.setEnabled(True)
self.PauseButton.setVisible(True)
self.PlayButton.setEnabled(False)
self.PlayButton.setVisible(False)
self.BeginButton.setEnabled(False)
self.BackwardButton.setEnabled(False)
self.ForwardButton.setEnabled(False)
self.EndButton.setEnabled(False)
self.TimeDisplay.setEnabled(False)
self.TimeStepDisplay.setEnabled(False)
self.TimeSlider.setEnabled(False)
self._playing = True
self._callbackFrameDelayDisplay()
self.start()
def _callbackPauseButton(self):
self._playing = False
self.stop()
self.PauseButton.setEnabled(False)
self.PauseButton.setVisible(False)
self.PlayButton.setEnabled(True)
self.PlayButton.setVisible(True)
status = self._current_step > 0
self.BeginButton.setEnabled(status)
self.BackwardButton.setEnabled(status)
status = self._current_step != len(self._times) - 1
self.ForwardButton.setEnabled(status)
self.EndButton.setEnabled(status)
self.TimeDisplay.setEnabled(True)
self.TimeStepDisplay.setEnabled(True)
self.TimeSlider.setEnabled(True)
def _callbackForwardButton(self):
self._callbackPauseButton()
self.updateControls(timestep=self._current_step + 1, time=None)
def _callbackEndButton(self):
self._callbackPauseButton()
self.updateControls(timestep=-1, time=None)
def _callbackTimeStepDisplay(self, text):
self._callbackPauseButton()
if text:
self.updateControls(timestep=int(float(text)), time=None)
def _callbackTimeDisplay(self, text):
self._callbackPauseButton()
if text:
self.updateControls(time=float(text), timestep=None)
def _callbackFrameDelayDisplay(self, text=""):
text = self.FrameDelayDisplay.text()
if text:
self.Timer.setInterval(int(text))
def _setupTimeSlider(self, qobject):
qobject.setOrientation(QtCore.Qt.Horizontal)
qobject.sliderReleased.connect(self._callbackTimeSlider)
def _callbackTimeSlider(self):
self.updateControls(timestep=self.TimeSlider.value(), time=None)
def __addButton(self, name, tooltip, icon):
qobject = QtWidgets.QPushButton(self)
qobject.setToolTip(tooltip)
qobject.clicked.connect(getattr(self, '_callback' + name))
qobject.setIcon(WidgetUtils.createIcon(icon))
qobject.setIconSize(self._icon_size)
qobject.setFixedSize(qobject.iconSize())
qobject.setStyleSheet("QPushButton {border:none}")
self.ButtonLayout.addWidget(qobject)
setattr(self, name, qobject)
def __addEditBox(self, name, label, tooltip, int_validate=False, default=""):
edit = QtWidgets.QLineEdit()
if int_validate:
validate = QtGui.QIntValidator()
else:
validate = QtGui.QDoubleValidator()
validate.setBottom(0)
edit.setValidator(validate)
edit.setToolTip(tooltip)
edit.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum)
edit.setText(default)
edit.textChanged.connect(getattr(self, '_callback' + name))
label = QtWidgets.QLabel(label)
label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
label.setBuddy(edit)
self.ButtonLayout.addWidget(label)
self.ButtonLayout.addWidget(edit)
setattr(self, name, edit)
setattr(self, name + 'Label', label)
|
harterj/moose
|
python/peacock/base/MediaControlWidgetBase.py
|
Python
|
lgpl-2.1
| 9,084
|
[
"MOOSE"
] |
33246a3b29831f46b4ae3bb4230b3603ff25eb7932d9d2da3cd074164f4cd637
|
import random
import numpy as np
from nose import with_setup
from numpy.testing import assert_almost_equal
from pybbn.gaussian.inference import GaussianInference
def setup():
"""
Setup.
:return: None.
"""
random.seed(37)
np.random.seed(37)
np.set_printoptions(
precision=10,
formatter={'float': lambda v: f'{v:.10f}'}
)
def teardown():
"""
Teardown.
:return: None.
"""
pass
def get_cowell_data():
"""
Gets Cowell data.
:return: Data and headers.
"""
n = 10000
Y = np.random.normal(0, 1, n)
X = np.random.normal(Y, 1, n)
Z = np.random.normal(X, 1, n)
D = np.vstack([Y, X, Z]).T
return D, ['Y', 'X', 'Z']
def get_castillo_data():
"""
Gets Castillo data.
:return: Data and headers.
"""
n = 10000
A = np.random.normal(0, 1, n)
B = np.random.normal(0, 1, n)
C = np.random.normal(A, 1, n)
D = np.random.normal(0.2 * A + 0.8 * B, 1, n)
E = np.vstack([A, B, C, D]).T
return E, ['A', 'B', 'C', 'D']
@with_setup(setup, teardown)
def test_cowell_x():
"""
Tests inference with Cowell example (X=1.5).
"""
X, H = get_cowell_data()
M = X.mean(axis=0)
E = np.cov(X.T)
g = GaussianInference(H, M, E)
print(g.H)
print(g.I)
print(g.M)
print(g.E)
print(g.P)
print('-' * 15)
g = g.do_inference('X', 1.5)
print(g.H)
print(g.I)
print(g.M)
print(g.E)
print(g.meta)
print(g.P)
assert_almost_equal(g.M, [-0.7447794831, -1.5222039705])
assert_almost_equal(g.E, [[0.4962114580, 0.0020891582],
[0.0020891582, 0.9843995081]])
@with_setup(setup, teardown)
def test_cowell_z():
"""
Tests inference with Cowell example (z=1.5).
"""
X, H = get_cowell_data()
M = X.mean(axis=0)
E = np.cov(X.T)
g = GaussianInference(H, M, E)
print(g.H)
print(g.I)
print(g.M)
print(g.E)
print('-' * 15)
g = g.do_inference('Z', 1.5)
print(g.H)
print(g.I)
print(g.M)
print(g.E)
print(g.meta)
print(g.P)
assert_almost_equal(g.M, [-0.4978580082, -1.0141860551])
assert_almost_equal(g.E, [[0.6552719951, 0.3226010216],
[0.3226010216, 0.6542698781]])
@with_setup(setup, teardown)
def test_cowell_y():
"""
Tests inference with Cowell example (Y=1.5).
"""
X, H = get_cowell_data()
M = X.mean(axis=0)
E = np.cov(X.T)
g = GaussianInference(H, M, E)
print(g.H)
print(g.I)
print(g.M)
print(g.E)
print('-' * 15)
g = g.do_inference('Y', 1.5)
print(g.H)
print(g.I)
print(g.M)
print(g.E)
print(g.meta)
print(g.P)
assert_almost_equal(g.M, [-1.5175865285, -1.5280767750])
assert_almost_equal(g.E, [[1.0099559400, 1.0160891744],
[1.0160891744, 2.0066503668]])
@with_setup(setup, teardown)
def test_do_inferences():
"""
Tests multiple inferences with Castillo example (A=1, B=2, C=3).
"""
X, H = get_castillo_data()
M = X.mean(axis=0)
E = np.cov(X.T)
g = GaussianInference(H, M, E)
print(g.H)
print(g.I)
print(g.M)
print(g.E)
print(g.P)
print('-' * 15)
g1 = g.do_inferences([('A', 1), ('B', 2), ('C', 3)])
print(g1.M)
e = np.array([-1.8320539239])
assert_almost_equal(g1.M, e, decimal=0.001)
@with_setup(setup, teardown)
def test_repr():
"""
Tests GaussianInference repr function.
"""
X, H = get_castillo_data()
M = X.mean(axis=0)
E = np.cov(X.T)
g = GaussianInference(H, M, E)
print(g)
o = str(g)
e = 'GaussianInference[H=[A,B,C,D], M=[0.002,-0.009,0.007,-0.018], E=[[0.991,0.008,1.001,0.204]|[0.008,1.010,' \
'0.014,0.799]|[1.001,0.014,1.996,0.225]|[0.204,0.799,0.225,1.685]], meta={}]'
assert o == e
print(g.marginals)
o = g.marginals
e = [{'name': 'A', 'mean': -0.0017234068142374496, 'var': 0.9907002440358944},
{'name': 'B', 'mean': 0.009171006220968045, 'var': 1.0100180410420976},
{'name': 'C', 'mean': -0.006711963688230272, 'var': 1.9957039315017837},
{'name': 'D', 'mean': 0.018085596717747506, 'var': 1.6851371822157823}]
assert len(e) == len(o)
for act, obs in zip(e, o):
assert act['name'] == obs['name']
assert_almost_equal(act['mean'], obs['mean'])
assert_almost_equal(act['var'], obs['var'])
@with_setup(setup, teardown)
def test_sample_marginals():
"""
Tests sampling marginals.
"""
X, H = get_castillo_data()
M = X.mean(axis=0)
E = np.cov(X.T)
g = GaussianInference(H, M, E)
print(g)
e = [{'name': 'A', 'mean': -0.0017234068142374496, 'var': 0.9907002440358944},
{'name': 'B', 'mean': 0.009171006220968045, 'var': 1.0100180410420976},
{'name': 'C', 'mean': -0.006711963688230272, 'var': 1.9957039315017837},
{'name': 'D', 'mean': 0.018085596717747506, 'var': 1.6851371822157823}]
marginals = g.sample_marginals(size=10000)
a = marginals['A']
b = marginals['B']
c = marginals['C']
d = marginals['D']
print(a.mean())
print(b.mean())
print(c.mean())
print(d.mean())
print('-' * 15)
assert_almost_equal(a.mean(), e[0]['mean'], decimal=0.001)
assert_almost_equal(b.mean(), e[1]['mean'], decimal=0.001)
assert_almost_equal(c.mean(), e[2]['mean'], decimal=0.001)
assert_almost_equal(d.mean(), e[3]['mean'], decimal=0.001)
print(a.var())
print(b.var())
print(c.var())
print(d.var())
print('-' * 15)
assert_almost_equal(a.var(), e[0]['var'], decimal=0.001)
assert_almost_equal(b.var(), e[1]['var'], decimal=0.001)
assert_almost_equal(c.var(), e[2]['var'], decimal=0.001)
assert_almost_equal(d.var(), e[3]['var'], decimal=0.001)
gg = g.do_inference('A', 0.0)
print(gg.marginals)
print('-' * 15)
m = gg.sample_marginals()
print(m['A'].mean(), m['A'].var())
print(m['B'].mean(), m['B'].var())
print(m['C'].mean(), m['C'].var())
print(m['D'].mean(), m['D'].var())
|
vangj/py-bbn
|
tests/gaussian/test_inference.py
|
Python
|
apache-2.0
| 6,123
|
[
"Gaussian"
] |
70c930f5f52dcc00cbd885de2a40234fd31a40e89a8d3dc17e8c91b00338f67d
|
import unittest
import numpy as np
import warnings
warnings.simplefilter('error')
from nose.plugins.attrib import attr
from moltools import read_dal
HF_FILE = """
************************************************************************
*************** Dalton - An Electronic Structure Program ***************
************************************************************************
This is output from DALTON 2016.alpha
----------------------------------------------------------------------------
NOTE:
Dalton is an experimental code for the evaluation of molecular
properties using (MC)SCF, DFT, CI, and CC wave functions.
The authors accept no responsibility for the performance of
the code or for the correctness of the results.
The code (in whole or part) is provided under a licence and
is not to be reproduced for further distribution without
the written permission of the authors or their representatives.
See the home page "http://daltonprogram.org" for further information.
If results obtained with this code are published,
the appropriate citations would be both of:
K. Aidas, C. Angeli, K. L. Bak, V. Bakken, R. Bast,
L. Boman, O. Christiansen, R. Cimiraglia, S. Coriani,
J. Cukras, P. Dahle, E. K. Dalskov, U. Ekstroem,
T. Enevoldsen, J. J. Eriksen, P. Ettenhuber, B. Fernandez,
L. Ferrighi, H. Fliegl, L. Frediani, K. Hald, A. Halkier,
C. Haettig, H. Heiberg, T. Helgaker, A. C. Hennum,
H. Hettema, E. Hjertenaes, S. Hoest, I.-M. Hoeyvik,
M. F. Iozzi, B. Jansik, H. J. Aa. Jensen, D. Jonsson,
P. Joergensen, M. Kaminski, J. Kauczor, S. Kirpekar,
T. Kjaergaard, W. Klopper, S. Knecht, R. Kobayashi, H. Koch,
J. Kongsted, A. Krapp, K. Kristensen, A. Ligabue,
O. B. Lutnaes, J. I. Melo, K. V. Mikkelsen, R. H. Myhre,
C. Neiss, C. B. Nielsen, P. Norman, J. Olsen,
J. M. H. Olsen, A. Osted, M. J. Packer, F. Pawlowski,
T. B. Pedersen, P. F. Provasi, S. Reine, Z. Rinkevicius,
T. A. Ruden, K. Ruud, V. Rybkin, P. Salek, C. C. M. Samson,
A. Sanchez de Meras, T. Saue, S. P. A. Sauer,
B. Schimmelpfennig, K. Sneskov, A. H. Steindal,
K. O. Sylvester-Hvid, P. R. Taylor, A. M. Teale,
E. I. Tellgren, D. P. Tew, A. J. Thorvaldsen, L. Thoegersen,
O. Vahtras, M. A. Watson, D. J. D. Wilson, M. Ziolkowski
and H. Agren,
"The Dalton quantum chemistry program system",
WIREs Comput. Mol. Sci. 2013. (doi: 10.1002/wcms.1172)
and
Dalton, a Molecular Electronic Structure Program,
Release Dalton2016.alpha (2015), see http://daltonprogram.org
----------------------------------------------------------------------------
Authors in alphabetical order (major contribution(s) in parenthesis):
Kestutis Aidas, Vilnius University, Lithuania (QM/MM)
Celestino Angeli, University of Ferrara, Italy (NEVPT2)
Keld L. Bak, UNI-C, Denmark (AOSOPPA, non-adiabatic coupling, magnetic properties)
Vebjoern Bakken, University of Oslo, Norway (DALTON; geometry optimizer, symmetry detection)
Radovan Bast, KTH Stockholm, Sweden (DALTON installation and execution frameworks)
Pablo Baudin, University of Valencia, Spain (Cholesky excitation energies)
Linus Boman, NTNU, Norway (Cholesky decomposition and subsystems)
Ove Christiansen, Aarhus University, Denmark (CC module)
Renzo Cimiraglia, University of Ferrara, Italy (NEVPT2)
Sonia Coriani, University of Trieste, Italy (CC module, MCD in RESPONS)
Janusz Cukras, University of Trieste, Italy (MChD in RESPONS)
Paal Dahle, University of Oslo, Norway (Parallelization)
Erik K. Dalskov, UNI-C, Denmark (SOPPA)
Thomas Enevoldsen, Univ. of Southern Denmark, Denmark (SOPPA)
Janus J. Eriksen, Aarhus University, Denmark (Polarizable embedding model, TDA)
Berta Fernandez, U. of Santiago de Compostela, Spain (doublet spin, ESR in RESPONS)
Lara Ferrighi, Aarhus University, Denmark (PCM Cubic response)
Heike Fliegl, University of Oslo, Norway (CCSD(R12))
Luca Frediani, UiT The Arctic U. of Norway, Norway (PCM)
Bin Gao, UiT The Arctic U. of Norway, Norway (Gen1Int library)
Christof Haettig, Ruhr-University Bochum, Germany (CC module)
Kasper Hald, Aarhus University, Denmark (CC module)
Asger Halkier, Aarhus University, Denmark (CC module)
Erik D. Hedegaard, Univ. of Southern Denmark, Denmark (Polarizable embedding model, QM/MM)
Hanne Heiberg, University of Oslo, Norway (geometry analysis, selected one-electron integrals)
Trygve Helgaker, University of Oslo, Norway (DALTON; ABACUS, ERI, DFT modules, London, and much more)
Alf Christian Hennum, University of Oslo, Norway (Parity violation)
Hinne Hettema, University of Auckland, New Zealand (quadratic response in RESPONS; SIRIUS supersymmetry)
Eirik Hjertenaes, NTNU, Norway (Cholesky decomposition)
Maria Francesca Iozzi, University of Oslo, Norway (RPA)
Brano Jansik Technical Univ. of Ostrava Czech Rep. (DFT cubic response)
Hans Joergen Aa. Jensen, Univ. of Southern Denmark, Denmark (DALTON; SIRIUS, RESPONS, ABACUS modules, London, and much more)
Dan Jonsson, UiT The Arctic U. of Norway, Norway (cubic response in RESPONS module)
Poul Joergensen, Aarhus University, Denmark (RESPONS, ABACUS, and CC modules)
Maciej Kaminski, University of Warsaw, Poland (CPPh in RESPONS)
Joanna Kauczor, Linkoeping University, Sweden (Complex polarization propagator (CPP) module)
Sheela Kirpekar, Univ. of Southern Denmark, Denmark (Mass-velocity & Darwin integrals)
Wim Klopper, KIT Karlsruhe, Germany (R12 code in CC, SIRIUS, and ABACUS modules)
Stefan Knecht, ETH Zurich, Switzerland (Parallel CI and MCSCF)
Rika Kobayashi, Australian National Univ., Australia (DIIS in CC, London in MCSCF)
Henrik Koch, NTNU, Norway (CC module, Cholesky decomposition)
Jacob Kongsted, Univ. of Southern Denmark, Denmark (Polarizable embedding model, QM/MM)
Andrea Ligabue, University of Modena, Italy (CTOCD, AOSOPPA)
Nanna H. List Univ. of Southern Denmark, Denmark (Polarizable embedding model)
Ola B. Lutnaes, University of Oslo, Norway (DFT Hessian)
Juan I. Melo, University of Buenos Aires, Argentina (LRESC, Relativistic Effects on NMR Shieldings)
Kurt V. Mikkelsen, University of Copenhagen, Denmark (MC-SCRF and QM/MM)
Rolf H. Myhre, NTNU, Norway (Cholesky, subsystems and ECC2)
Christian Neiss, Univ. Erlangen-Nuernberg, Germany (CCSD(R12))
Christian B. Nielsen, University of Copenhagen, Denmark (QM/MM)
Patrick Norman, Linkoeping University, Sweden (Cubic response and complex response in RESPONS)
Jeppe Olsen, Aarhus University, Denmark (SIRIUS CI/density modules)
Jogvan Magnus H. Olsen, Univ. of Southern Denmark, Denmark (Polarizable embedding model, QM/MM)
Anders Osted, Copenhagen University, Denmark (QM/MM)
Martin J. Packer, University of Sheffield, UK (SOPPA)
Filip Pawlowski, Kazimierz Wielki University, Poland (CC3)
Morten N. Pedersen, Univ. of Southern Denmark, Denmark (Polarizable embedding model)
Thomas B. Pedersen, University of Oslo, Norway (Cholesky decomposition)
Patricio F. Provasi, University of Northeastern, Argentina (Analysis of coupling constants in localized orbitals)
Zilvinas Rinkevicius, KTH Stockholm, Sweden (open-shell DFT, ESR)
Elias Rudberg, KTH Stockholm, Sweden (DFT grid and basis info)
Torgeir A. Ruden, University of Oslo, Norway (Numerical derivatives in ABACUS)
Kenneth Ruud, UiT The Arctic U. of Norway, Norway (DALTON; ABACUS magnetic properties and much more)
Pawel Salek, KTH Stockholm, Sweden (DALTON; DFT code)
Claire C. M. Samson University of Karlsruhe Germany (Boys localization, r12 integrals in ERI)
Alfredo Sanchez de Meras, University of Valencia, Spain (CC module, Cholesky decomposition)
Trond Saue, Paul Sabatier University, France (direct Fock matrix construction)
Stephan P. A. Sauer, University of Copenhagen, Denmark (SOPPA(CCSD), SOPPA prop., AOSOPPA, vibrational g-factors)
Bernd Schimmelpfennig, Forschungszentrum Karlsruhe, Germany (AMFI module)
Kristian Sneskov, Aarhus University, Denmark (Polarizable embedding model, QM/MM)
Arnfinn H. Steindal, UiT The Arctic U. of Norway, Norway (parallel QM/MM, Polarizable embedding model)
Casper Steinmann, Univ. of Southern Denmark, Denmark (QFIT, Polarizable embedding model)
K. O. Sylvester-Hvid, University of Copenhagen, Denmark (MC-SCRF)
Peter R. Taylor, VLSCI/Univ. of Melbourne, Australia (Symmetry handling ABACUS, integral transformation)
Andrew M. Teale, University of Nottingham, England (DFT-AC, DFT-D)
David P. Tew, University of Bristol, England (CCSD(R12))
Olav Vahtras, KTH Stockholm, Sweden (triplet response, spin-orbit, ESR, TDDFT, open-shell DFT)
David J. Wilson, La Trobe University, Australia (DFT Hessian and DFT magnetizabilities)
Hans Agren, KTH Stockholm, Sweden (SIRIUS module, RESPONS, MC-SCRF solvation model)
--------------------------------------------------------------------------------
Date and time (Linux) : Tue Jun 23 22:14:19 2015
Host name : archer
* Work memory size : 64000000 = 488.28 megabytes.
+ memory for in-core integrals : 100000000
* Directories for basis set searches:
1) /home/ignat/test/water
2) /home/ignat/repos/dalton/build_gnu/basis
Compilation information
-----------------------
Who compiled | ignat
Host | archer
System | Linux-4.0.5-1-ARCH
CMake generator | Unix Makefiles
Processor | x86_64
64-bit integers | OFF
MPI | On
Fortran compiler | /usr/bin/mpif90
Fortran compiler version | GNU Fortran (GCC) 5.1.0
Fortran flags | -DVAR_GFORTRAN -DGFORTRAN=445 -ffloat-store -fcray
| -pointer -m64 -O0 -g -fbacktrace -fcray-pointer -
| Wuninitialized
C compiler | /usr/bin/mpicc
C compiler version | gcc (GCC) 5.1.0
C flags | -std=c99 -DRESTRICT=restrict -DFUNDERSCORE=1 -DHAV
| E_NO_LSEEK64 -ffloat-store -Wall -m64 -O0 -g3
C++ compiler | /usr/bin/mpicxx
C++ compiler version | unknown
C++ flags | -g -Wall -fno-rtti -fno-exceptions -m64 -march=nat
| ive -O0 -g3
BLAS | /usr/lib/libblas.so
LAPACK | /usr/lib/liblapack.so
Static linking | OFF
Last Git revision | 9e6893dfe1675186f4e5a0c5ca97afe725e7638b
Git branch | master
Configuration time | 2015-06-13 18:52:22.411053
* MPI run using 4 processes.
Content of the .dal input file
----------------------------------
**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
**WAVE FUNCTION
.HF
.INTERFACE
**INTEGRAL
.DIPLEN
.SECMOM
**RESPONSE
.PROPAV
XDIPLEN
.PROPAV
YDIPLEN
.PROPAV
ZDIPLEN
*QUADRATIC
.QLOP
.DIPLEN
**END OF DALTON INPUT
Content of the .mol file
----------------------------
ATOMBASIS
Atomtypes=2 Charge=0 Nosymm
Charge=8.0 Atoms=1 Basis=ano-1 4 3 1
O 0.00000 0.00000 0.00000
Charge=1.0 Atoms=2 Basis=ano-1 2
H 1.43043 0.00000 1.10716
H -1.43043 0.00000 1.10716
*******************************************************************
*********** Output from DALTON general input processing ***********
*******************************************************************
--------------------------------------------------------------------------------
Overall default print level: 0
Print level for DALTON.STAT: 1
Parallel calculation using MPI
AO-direct calculation (in sections where implemented)
HERMIT 1- and 2-electron integral sections will be executed
"Old" integral transformation used (limited to max 255 basis functions)
Wave function sections will be executed (SIRIUS module)
Dynamic molecular response properties section will be executed (RESPONSE module)
--------------------------------------------------------------------------------
****************************************************************************
*************** Output of molecule and basis set information ***************
****************************************************************************
The two title cards from your ".mol" input:
------------------------------------------------------------------------
1:
2:
------------------------------------------------------------------------
Atomic type no. 1
--------------------
Nuclear charge: 8.00000
Number of symmetry independent centers: 1
Number of basis sets to read; 2
The basis set is "ano-1 4 3 1" from the basis set library.
Basis set file used for this atomic type with Z = 8 :
"/home/ignat/repos/dalton/build_gnu/basis/ano-1"
Atomic type no. 2
--------------------
Nuclear charge: 1.00000
Number of symmetry independent centers: 2
Number of basis sets to read; 2
The basis set is "ano-1 2" from the basis set library.
Basis set file used for this atomic type with Z = 1 :
"/home/ignat/repos/dalton/build_gnu/basis/ano-1"
SYMGRP: Point group information
-------------------------------
@ Point group: C1
Isotopic Masses
---------------
O 15.994915
H 1.007825
H 1.007825
Total mass: 18.010565 amu
Natural abundance: 99.730 %
Center-of-mass coordinates (a.u.): 0.000000 0.000000 0.123908
Atoms and basis sets
--------------------
Number of atom types : 2
Total number of atoms: 3
label atoms charge prim cont basis
----------------------------------------------------------------------
O 1 8.0000 61 18 [14s9p4d|4s3p1d]
H 2 1.0000 8 2 [8s|2s]
----------------------------------------------------------------------
total: 3 10.0000 77 22
----------------------------------------------------------------------
Spherical harmonic basis used.
Threshold for neglecting AO integrals: 1.00D-12
Cartesian Coordinates (a.u.)
----------------------------
Total number of coordinates: 9
O : 1 x 0.0000000000 2 y 0.0000000000 3 z 0.0000000000
H : 4 x 1.4304300000 5 y 0.0000000000 6 z 1.1071600000
H : 7 x -1.4304300000 8 y 0.0000000000 9 z 1.1071600000
Interatomic separations (in Angstrom):
--------------------------------------
O H H
------ ------ ------
O : 0.000000
H : 0.957201 0.000000
H : 0.957201 1.513902 0.000000
Max interatomic separation is 1.5139 Angstrom ( 2.8609 Bohr)
between atoms 3 and 2, "H " and "H ".
Min HX interatomic separation is 0.9572 Angstrom ( 1.8088 Bohr)
Bond distances (Angstrom):
--------------------------
atom 1 atom 2 distance
------ ------ --------
bond distance: H O 0.957201
bond distance: H O 0.957201
Bond angles (degrees):
----------------------
atom 1 atom 2 atom 3 angle
------ ------ ------ -----
bond angle: H O H 104.520
Principal moments of inertia (u*A**2) and principal axes
--------------------------------------------------------
IA 0.614459 1.000000 0.000000 0.000000
IB 1.154917 0.000000 0.000000 1.000000
IC 1.769375 0.000000 1.000000 0.000000
Rotational constants
--------------------
@ The molecule is planar.
A B C
822478.2742 437589.1937 285625.6621 MHz
27.434922 14.596404 9.527447 cm-1
@ Nuclear repulsion energy : 9.194951107924 Hartree
.---------------------------------------.
| Starting in Integral Section (HERMIT) |
`---------------------------------------'
***************************************************************************************
****************** Output from **INTEGRALS input processing (HERMIT) ******************
***************************************************************************************
*************************************************************************
****************** Output from HERMIT input processing ******************
*************************************************************************
Default print level: 1
* Nuclear model: Point charge
Calculation of one-electron Hamiltonian integrals.
The following one-electron property integrals are calculated as requested:
- overlap integrals
- dipole length integrals
- second moment integrals
Center of mass (bohr): 0.000000000000 0.000000000000 0.123907664973
Operator center (bohr): 0.000000000000 0.000000000000 0.000000000000
Gauge origin (bohr): 0.000000000000 0.000000000000 0.000000000000
Dipole origin (bohr): 0.000000000000 0.000000000000 0.000000000000
************************************************************************
************************** Output from HERINT **************************
************************************************************************
>>> Time used in ONEDRV is 0.15 seconds
>>> Time used in QUADRUP is 0.27 seconds
>>> Time used in KINENE is 0.28 seconds
>>> Time used in SECMOM is 0.27 seconds
>>> Time used in GABGEN is 0.28 seconds
>>>> Total CPU time used in HERMIT: 1.49 seconds
>>>> Total wall time used in HERMIT: 1.49 seconds
.----------------------------------.
| End of Integral Section (HERMIT) |
`----------------------------------'
.--------------------------------------------.
| Starting in Wave Function Section (SIRIUS) |
`--------------------------------------------'
*** Output from Huckel module :
Using EWMO model: T
Using EHT model: F
Number of Huckel orbitals each symmetry: 7
EWMO - Energy Weighted Maximum Overlap - is a Huckel type method,
which normally is better than Extended Huckel Theory.
Reference: Linderberg and Ohrn, Propagators in Quantum Chemistry (Wiley, 1973)
Huckel EWMO eigenvalues for symmetry : 1
-20.684968 -1.611697 -0.778263 -0.688371 -0.616200
-0.232270 -0.168131
**********************************************************************
*SIRIUS* a direct, restricted step, second order MCSCF program *
**********************************************************************
Date and time (Linux) : Tue Jun 23 22:14:21 2015
Host name : archer
Title lines from ".mol" input file:
Print level on unit LUPRI = 2 is 0
Print level on unit LUW4 = 2 is 5
@ Restricted, closed shell Hartree-Fock calculation.
@ Time-dependent Hartree-Fock calculation (random phase approximation).
Fock matrices are calculated directly and in parallel without use of integrals on disk.
Initial molecular orbitals are obtained according to
".MOSTART EWMO " input option
Wave function specification
============================
@ Wave function type >>> HF <<<
@ Number of closed shell electrons 10
@ Number of electrons in active shells 0
@ Total charge of the molecule 0
@ Spin multiplicity and 2 M_S 1 0
@ Total number of symmetries 1 (point group: C1 )
@ Reference state symmetry 1 (irrep name : A )
Orbital specifications
======================
@ Abelian symmetry species All | 1
@ | A
--- | ---
@ Occupied SCF orbitals 5 | 5
@ Secondary orbitals 17 | 17
@ Total number of orbitals 22 | 22
@ Number of basis functions 22 | 22
Optimization information
========================
@ Number of configurations 1
@ Number of orbital rotations 85
------------------------------------------
@ Total number of variables 86
Maximum number of Fock iterations 0
Maximum number of DIIS iterations 60
Maximum number of QC-SCF iterations 60
Threshold for SCF convergence 1.00D-05
***********************************************
***** DIIS acceleration of SCF iterations *****
***********************************************
C1-DIIS algorithm; max error vectors = 8
Iter Total energy Error norm Delta(E) DIIS dim.
-----------------------------------------------------------------------------
@ 1 -75.8864462763 1.48374D+00 -7.59D+01 1
Virial theorem: -V/T = 1.997526
@ MULPOP O -0.74; H 0.37; H 0.37;
-----------------------------------------------------------------------------
@ 2 -76.0298087217 3.23206D-01 -1.43D-01 2
Virial theorem: -V/T = 2.002410
@ MULPOP O -0.68; H 0.34; H 0.34;
-----------------------------------------------------------------------------
@ 3 -76.0358019237 8.47837D-02 -5.99D-03 3
Virial theorem: -V/T = 1.999778
@ MULPOP O -0.68; H 0.34; H 0.34;
-----------------------------------------------------------------------------
@ 4 -76.0364968396 4.21539D-02 -6.95D-04 4
Virial theorem: -V/T = 2.001194
@ MULPOP O -0.66; H 0.33; H 0.33;
-----------------------------------------------------------------------------
@ 5 -76.0365782749 6.52162D-03 -8.14D-05 5
Virial theorem: -V/T = 2.000434
@ MULPOP O -0.67; H 0.33; H 0.33;
-----------------------------------------------------------------------------
@ 6 -76.0365858837 1.62052D-03 -7.61D-06 6
Virial theorem: -V/T = 2.000420
@ MULPOP O -0.67; H 0.34; H 0.34;
-----------------------------------------------------------------------------
@ 7 -76.0365862987 2.28849D-04 -4.15D-07 7
Virial theorem: -V/T = 2.000445
@ MULPOP O -0.67; H 0.34; H 0.34;
-----------------------------------------------------------------------------
@ 8 -76.0365863067 3.19997D-05 -8.07D-09 8
Virial theorem: -V/T = 2.000444
@ MULPOP O -0.67; H 0.34; H 0.34;
-----------------------------------------------------------------------------
@ 9 -76.0365863069 4.79125D-06 -1.43D-10 8
@ *** DIIS converged in 9 iterations !
@ Converged SCF energy, gradient: -76.036586306879 4.79D-06
- total time used in SIRFCK : 0.00 seconds
*** SCF orbital energy analysis ***
Number of electrons : 10
Orbital occupations : 5
Sym Hartree-Fock orbital energies
1 A -20.57082761 -1.35552717 -0.72515978 -0.58965453 -0.51438127
0.06726777 0.19936705 0.28276809 0.30520709 0.30854233
0.41052344 0.75137306 0.93835825 1.97004726 1.97082070
2.03471323 2.04464288 2.07836885 2.10854643 2.27493764
3.08575297 3.64296060
E(LUMO) : 0.06726777 au (symmetry 1)
- E(HOMO) : -0.51438127 au (symmetry 1)
------------------------------------------
gap : 0.58164903 au
>>> Writing SIRIFC interface file
>>>> CPU and wall time for SCF : 0.793 0.792
.-----------------------------------.
| >>> Final results from SIRIUS <<< |
`-----------------------------------'
@ Spin multiplicity: 1
@ Spatial symmetry: 1 ( irrep A in C1 )
@ Total charge of molecule: 0
@ Final HF energy: -76.036586306879
@ Nuclear repulsion: 9.194951107924
@ Electronic energy: -85.231537414803
@ Final gradient norm: 0.000004791249
Date and time (Linux) : Tue Jun 23 22:14:22 2015
Host name : archer
File label for MO orbitals: 23Jun15 FOCKDIIS
(Only coefficients >0.0100 are printed.)
Molecular orbitals for symmetry species 1 (A )
------------------------------------------------
Orbital 1 2 3 4 5 6 7
1 O :1s -1.0000 0.0232 0.0000 0.0289 -0.0000 -0.1739 -0.0000
2 O :1s -0.0004 -0.7853 0.0000 0.4549 -0.0000 -1.4539 -0.0000
3 O :1s 0.0001 0.0782 0.0000 0.0961 0.0000 -1.3926 -0.0000
4 O :1s -0.0003 0.0313 0.0000 0.0130 0.0000 -0.4518 -0.0000
5 O :2px 0.0000 -0.0000 -0.7141 0.0000 -0.0000 -0.0000 0.7781
6 O :2py 0.0000 -0.0000 -0.0000 0.0000 0.9980 -0.0000 -0.0000
7 O :2pz -0.0007 -0.0956 0.0000 -0.8242 0.0000 -0.4563 0.0000
8 O :2px 0.0000 -0.0000 0.0365 -0.0000 -0.0000 -0.0000 0.3204
9 O :2py 0.0000 -0.0000 -0.0000 0.0000 0.0487 -0.0000 -0.0000
10 O :2pz 0.0007 0.0546 0.0000 0.0115 0.0000 -0.3844 -0.0000
11 O :2px 0.0000 -0.0000 0.0160 -0.0000 -0.0000 -0.0000 -0.0634
12 O :2py 0.0000 -0.0000 -0.0000 0.0000 -0.0220 -0.0000 -0.0000
13 O :2pz -0.0008 0.0205 0.0000 0.0254 0.0000 -0.0635 -0.0000
15 O :3d1- -0.0000 0.0000 0.0000 -0.0000 0.0335 0.0000 -0.0000
16 O :3d0 0.0000 -0.0058 -0.0000 -0.0347 0.0000 -0.0068 0.0000
17 O :3d1+ 0.0000 0.0000 -0.0529 -0.0000 0.0000 -0.0000 0.0258
18 O :3d2+ 0.0002 -0.0153 0.0000 -0.0079 0.0000 -0.0128 -0.0000
19 H :1s -0.0003 -0.1834 -0.2759 -0.2306 -0.0000 1.1625 -1.5677
20 H :1s 0.0002 0.0112 0.1117 0.0144 -0.0000 0.9376 -1.2823
21 H :1s -0.0003 -0.1834 0.2759 -0.2306 0.0000 1.1625 1.5677
22 H :1s 0.0002 0.0112 -0.1117 0.0144 -0.0000 0.9376 1.2823
Orbital 8 9 10 11 12 13 14
1 O :1s -0.2139 0.1097 0.0000 0.0000 0.1569 -0.0000 0.0000
2 O :1s -1.7441 0.9149 0.0000 0.0000 1.3858 -0.0000 0.0000
3 O :1s -3.0951 1.1760 -0.0000 0.0000 0.8992 -0.0000 -0.0000
4 O :1s -1.3323 0.4375 -0.0000 0.0000 0.3096 -0.0000 -0.0000
5 O :2px -0.0000 0.0000 0.0000 1.3472 -0.0000 -0.0278 0.0000
6 O :2py 0.0000 -0.0000 -0.0343 0.0000 -0.0000 -0.0000 -0.0470
7 O :2pz -0.4287 0.2238 -0.0000 0.0000 0.9047 -0.0000 -0.0000
8 O :2px -0.0000 -0.0000 0.0000 1.9871 -0.0000 1.3420 0.0000
9 O :2py 0.0000 -0.0000 0.9016 0.0000 -0.0000 -0.0000 0.4206
10 O :2pz -0.5821 1.2365 -0.0000 0.0000 0.4394 -0.0000 -0.0000
11 O :2px -0.0000 -0.0000 0.0000 0.5497 -0.0000 0.3521 0.0000
12 O :2py 0.0000 -0.0000 0.4311 0.0000 -0.0000 -0.0000 -0.8809
13 O :2pz -0.1081 0.4990 -0.0000 0.0000 0.0307 -0.0000 -0.0000
15 O :3d1- -0.0000 0.0000 -0.0052 0.0000 0.0000 0.0000 0.2121
16 O :3d0 -0.0032 -0.0033 -0.0000 -0.0000 0.0195 -0.0000 -0.0000
17 O :3d1+ -0.0000 0.0000 -0.0000 -0.0172 -0.0000 0.1293 -0.0000
18 O :3d2+ -0.0125 0.0014 -0.0000 0.0000 -0.1222 0.0000 -0.0000
19 H :1s 1.4254 -0.7456 0.0000 -2.3719 -1.2763 -1.2532 0.0000
20 H :1s 1.1629 -0.5317 0.0000 -1.4834 0.0461 -1.9624 0.0000
21 H :1s 1.4254 -0.7456 -0.0000 2.3719 -1.2763 1.2532 -0.0000
22 H :1s 1.1629 -0.5317 0.0000 1.4834 0.0461 1.9624 0.0000
Orbital 15
2 O :1s -0.0704
3 O :1s -0.2560
4 O :1s 0.1429
7 O :2pz -0.0684
10 O :2pz 0.3265
13 O :2pz -0.7630
16 O :3d0 0.4462
18 O :3d2+ -0.2304
19 H :1s 0.0476
20 H :1s 0.0728
21 H :1s 0.0476
22 H :1s 0.0728
>>>> Total CPU time used in SIRIUS : 0.81 seconds
>>>> Total wall time used in SIRIUS : 0.81 seconds
Date and time (Linux) : Tue Jun 23 22:14:22 2015
Host name : archer
.---------------------------------------.
| End of Wave Function Section (SIRIUS) |
`---------------------------------------'
.------------------------------------------------.
| Starting in Dynamic Property Section (RESPONS) |
`------------------------------------------------'
------------------------------------------------------------------------------
RESPONSE - an MCSCF, MC-srDFT, DFT, and SOPPA response property program
------------------------------------------------------------------------------
<<<<<<<<<< OUTPUT FROM RESPONSE INPUT PROCESSING >>>>>>>>>>
CHANGES OF DEFAULTS FOR RSPINP:
-------------------------------
AO-direct Fock matrix calculations.
Default : Using Fock type decoupling of the two-electron density matrix :
Add DV*(FC+FV) instead of DV*FC to E[2] approximate orbital diagonal
Quadratic Response calculation
------------------------------
First hyperpolarizability calculation : HYPCAL= T
Spin of operator A , ISPINA= 0
Spin of operator B , ISPINB= 0
Spin of operator C , ISPINC= 0
1 B-frequencies 0.000000D+00
1 C-frequencies 0.000000D+00
Print level : IPRHYP = 2
Maximum number of iterations in lin.rsp. solver: MAXITL = 60
Threshold for convergence of linear resp. eq.s : THCLR = 1.000D-03
Maximum iterations in optimal orbital algorithm: MAXITO = 5
Direct one-index transformation : DIROIT = T
3 A OPERATORS OF SYMMETRY NO: 1 AND LABELS:
XDIPLEN
YDIPLEN
ZDIPLEN
3 B OPERATORS OF SYMMETRY NO: 1 AND LABELS:
XDIPLEN
YDIPLEN
ZDIPLEN
3 C OPERATORS OF SYMMETRY NO: 1 AND LABELS:
XDIPLEN
YDIPLEN
ZDIPLEN
SCF energy : -76.036586306878860
-- inactive part : -85.231537414802574
-- nuclear repulsion : 9.194951107923721
***************************************
*** RHF response calculation (TDHF) ***
***************************************
Calculation of electronic one-electron expectation values
----------------------------------------------------------
(Note that to get e.g. a dipole moment you must multiply the
electronic number by -1 and add the nuclear contribution.)
*** Individual non-zero orbital contributions
*** to the expectation value for property XDIPLEN :
Inactive 1 1 in sym 1 : -0.00000000
Inactive 2 2 in sym 1 : 0.00000000
Inactive 3 3 in sym 1 : 0.00000000
Inactive 4 4 in sym 1 : 0.00000000
Inactive 5 5 in sym 1 : -0.00000000
XDIPLEN inactive part: 7.44252169D-15
XDIPLEN active part : 0.00000000D+00
XDIPLEN total : 7.44252169D-15
*** Individual non-zero orbital contributions
*** to the expectation value for property YDIPLEN :
Inactive 1 1 in sym 1 : -0.00000000
Inactive 2 2 in sym 1 : 0.00000000
Inactive 3 3 in sym 1 : -0.00000000
Inactive 4 4 in sym 1 : 0.00000000
Inactive 5 5 in sym 1 : -0.00000000
YDIPLEN inactive part: 7.53885075D-16
YDIPLEN active part : 0.00000000D+00
YDIPLEN total : 7.53885075D-16
*** Individual non-zero orbital contributions
*** to the expectation value for property ZDIPLEN :
Inactive 1 1 in sym 1 : 0.00049027
Inactive 2 2 in sym 1 : 0.64776114
Inactive 3 3 in sym 1 : 0.83917250
Inactive 4 4 in sym 1 : -0.20936129
Inactive 5 5 in sym 1 : 0.08212857
ZDIPLEN inactive part: 1.36019118
ZDIPLEN active part : 0.00000000
ZDIPLEN total : 1.36019118
Linear response calculations for quadratic response
- singlet property operator of symmetry 1 ( A )
Perturbation symmetry. KSYMOP: 1
Perturbation spin symmetry.TRPLET: F
Orbital variables. KZWOPT: 85
Configuration variables. KZCONF: 0
Total number of variables. KZVAR : 85
QRLRVE -- linear response calculation for symmetry 1 ( A )
QRLRVE -- operator label : XDIPLEN
QRLRVE -- operator spin : 0
QRLRVE -- frequencies : 0.000000
<<< SOLVING SETS OF LINEAR EQUATIONS FOR LINEAR RESPONSE PROPERTIES >>>
Operator symmetry = 1 ( A ); triplet = F
*** THE REQUESTED 1 SOLUTION VECTORS CONVERGED
Convergence of RSP solution vectors, threshold = 1.00D-03
---------------------------------------------------------------
(dimension of paired reduced space: 10)
RSP solution vector no. 1; norm of residual 5.09D-04
*** RSPCTL MICROITERATIONS CONVERGED
@ QRLRVE: SINGLET SOLUTION FOR SYMMETRY 1 ( A ) LABEL XDIPLEN FREQUENCY 0.000000D+00
@ QRLRVE: << XDIPLEN ; XDIPLEN >> ( 0.00000): 6.88797139440
@ QRLRVE: << YDIPLEN ; XDIPLEN >> ( 0.00000): -2.457085259705E-15
@ QRLRVE: << ZDIPLEN ; XDIPLEN >> ( 0.00000): 4.993539086389E-14
QRLRVE -- linear response calculation for symmetry 1 ( A )
QRLRVE -- operator label : YDIPLEN
QRLRVE -- operator spin : 0
QRLRVE -- frequencies : 0.000000
<<< SOLVING SETS OF LINEAR EQUATIONS FOR LINEAR RESPONSE PROPERTIES >>>
Operator symmetry = 1 ( A ); triplet = F
*** THE REQUESTED 1 SOLUTION VECTORS CONVERGED
Convergence of RSP solution vectors, threshold = 1.00D-03
---------------------------------------------------------------
(dimension of paired reduced space: 12)
RSP solution vector no. 1; norm of residual 5.04D-05
*** RSPCTL MICROITERATIONS CONVERGED
@ QRLRVE: SINGLET SOLUTION FOR SYMMETRY 1 ( A ) LABEL YDIPLEN FREQUENCY 0.000000D+00
@ QRLRVE: << XDIPLEN ; YDIPLEN >> ( 0.00000): -2.463163106656E-15
@ QRLRVE: << YDIPLEN ; YDIPLEN >> ( 0.00000): 5.18329883914
@ QRLRVE: << ZDIPLEN ; YDIPLEN >> ( 0.00000): 1.063418866696E-15
QRLRVE -- linear response calculation for symmetry 1 ( A )
QRLRVE -- operator label : ZDIPLEN
QRLRVE -- operator spin : 0
QRLRVE -- frequencies : 0.000000
<<< SOLVING SETS OF LINEAR EQUATIONS FOR LINEAR RESPONSE PROPERTIES >>>
Operator symmetry = 1 ( A ); triplet = F
*** THE REQUESTED 1 SOLUTION VECTORS CONVERGED
Convergence of RSP solution vectors, threshold = 1.00D-03
---------------------------------------------------------------
(dimension of paired reduced space: 12)
RSP solution vector no. 1; norm of residual 2.92D-04
*** RSPCTL MICROITERATIONS CONVERGED
@ QRLRVE: SINGLET SOLUTION FOR SYMMETRY 1 ( A ) LABEL ZDIPLEN FREQUENCY 0.000000D+00
@ QRLRVE: << XDIPLEN ; ZDIPLEN >> ( 0.00000): 4.355582821759E-14
@ QRLRVE: << YDIPLEN ; ZDIPLEN >> ( 0.00000): 1.521124169556E-15
@ QRLRVE: << ZDIPLEN ; ZDIPLEN >> ( 0.00000): 5.94876530901
======================================================================
>>>>>>>> L I N E A R R E S P O N S E F U N C T I O N S <<<<<<<<
======================================================================
The -<<A;B>>(omega_b) functions from vectors generated
in a *QUADRA calculation of <<A;B,C>>(omega_b,omega_c)
Note: the accuracy of off-diagonal elements will be linear
in the convergence threshold THCLR = 1.00D-03
Perturbation symmetry. KSYMOP: 1
Perturbation spin symmetry.TRPLET: F
Orbital variables. KZWOPT: 85
Configuration variables. KZCONF: 0
Total number of variables. KZVAR : 85
@ Singlet linear response function in a.u.
@ A operator, symmetry, frequency: XDIPLEN 1 -0.000000
@ B operator, symmetry, frequency: XDIPLEN 1 0.000000
@ Value of linear response -<<A;B>>(omega): 6.887971394397
@ Singlet linear response function in a.u.
@ A operator, symmetry, frequency: YDIPLEN 1 -0.000000
@ B operator, symmetry, frequency: XDIPLEN 1 0.000000
@ Value of linear response -<<A;B>>(omega): -0.000000000000
@ Singlet linear response function in a.u.
@ A operator, symmetry, frequency: ZDIPLEN 1 -0.000000
@ B operator, symmetry, frequency: XDIPLEN 1 0.000000
@ Value of linear response -<<A;B>>(omega): 0.000000000000
@ Singlet linear response function in a.u.
@ A operator, symmetry, frequency: XDIPLEN 1 -0.000000
@ B operator, symmetry, frequency: YDIPLEN 1 0.000000
@ Value of linear response -<<A;B>>(omega): -0.000000000000
@ Singlet linear response function in a.u.
@ A operator, symmetry, frequency: YDIPLEN 1 -0.000000
@ B operator, symmetry, frequency: YDIPLEN 1 0.000000
@ Value of linear response -<<A;B>>(omega): 5.183298839138
@ Singlet linear response function in a.u.
@ A operator, symmetry, frequency: ZDIPLEN 1 -0.000000
@ B operator, symmetry, frequency: YDIPLEN 1 0.000000
@ Value of linear response -<<A;B>>(omega): 0.000000000000
@ Singlet linear response function in a.u.
@ A operator, symmetry, frequency: XDIPLEN 1 -0.000000
@ B operator, symmetry, frequency: ZDIPLEN 1 0.000000
@ Value of linear response -<<A;B>>(omega): 0.000000000000
@ Singlet linear response function in a.u.
@ A operator, symmetry, frequency: YDIPLEN 1 -0.000000
@ B operator, symmetry, frequency: ZDIPLEN 1 0.000000
@ Value of linear response -<<A;B>>(omega): 0.000000000000
@ Singlet linear response function in a.u.
@ A operator, symmetry, frequency: ZDIPLEN 1 -0.000000
@ B operator, symmetry, frequency: ZDIPLEN 1 0.000000
@ Value of linear response -<<A;B>>(omega): 5.948765309008
Results from quadratic response calculation
--------------------------------------------
CRLRV3 -- linear response calc for sym: 1
CRLRV3 -- operator label1: XDIPLEN
CRLRV3 -- operator label2: XDIPLEN
CRLRV3 -- freqr1 : 0.000000D+00
CRLRV3 -- freqr2 : 0.000000D+00
<<< SOLVING SETS OF LINEAR EQUATIONS FOR LINEAR RESPONSE PROPERTIES >>>
Operator symmetry = 1 ( A ); triplet = F
*** THE REQUESTED 1 SOLUTION VECTORS CONVERGED
Convergence of RSP solution vectors, threshold = 1.00D-03
---------------------------------------------------------------
(dimension of paired reduced space: 10)
RSP solution vector no. 1; norm of residual 9.37D-04
*** RSPCTL MICROITERATIONS CONVERGED
XDIPLEN XDIPLEN freq1 freq2 Norm
---------------------------------------------------------------
0.000000 0.000000 9.56741326
@ B-freq = 0.000000 C-freq = 0.000000 beta(X;X,X) = -0.00000000
@ B-freq = 0.000000 C-freq = 0.000000 beta(Y;X,X) = -0.00000000
@ B-freq = 0.000000 C-freq = 0.000000 beta(Z;X,X) = -12.10518652
@ B-freq = 0.000000 C-freq = 0.000000 beta(X;Y,X) = beta(Y,X,X)
CRLRV3 -- linear response calc for sym: 1
CRLRV3 -- operator label1: YDIPLEN
CRLRV3 -- operator label2: XDIPLEN
CRLRV3 -- freqr1 : 0.000000D+00
CRLRV3 -- freqr2 : 0.000000D+00
<<< SOLVING SETS OF LINEAR EQUATIONS FOR LINEAR RESPONSE PROPERTIES >>>
Operator symmetry = 1 ( A ); triplet = F
*** THE REQUESTED 1 SOLUTION VECTORS CONVERGED
Convergence of RSP solution vectors, threshold = 1.00D-03
---------------------------------------------------------------
(dimension of paired reduced space: 8)
RSP solution vector no. 1; norm of residual 3.95D-04
*** RSPCTL MICROITERATIONS CONVERGED
YDIPLEN XDIPLEN freq1 freq2 Norm
---------------------------------------------------------------
0.000000 0.000000 7.26114949
@ B-freq = 0.000000 C-freq = 0.000000 beta(X;Y,X) = -0.00000000
@ B-freq = 0.000000 C-freq = 0.000000 beta(Y;Y,X) = 0.00000000
@ B-freq = 0.000000 C-freq = 0.000000 beta(Z;Y,X) = 0.00000000
@ B-freq = 0.000000 C-freq = 0.000000 beta(X;Z,X) = beta(Z,X,X)
@ B-freq = 0.000000 C-freq = 0.000000 beta(Y;Z,X) = beta(Z,Y,X)
CRLRV3 -- linear response calc for sym: 1
CRLRV3 -- operator label1: ZDIPLEN
CRLRV3 -- operator label2: XDIPLEN
CRLRV3 -- freqr1 : 0.000000D+00
CRLRV3 -- freqr2 : 0.000000D+00
<<< SOLVING SETS OF LINEAR EQUATIONS FOR LINEAR RESPONSE PROPERTIES >>>
Operator symmetry = 1 ( A ); triplet = F
*** THE REQUESTED 1 SOLUTION VECTORS CONVERGED
Convergence of RSP solution vectors, threshold = 1.00D-03
---------------------------------------------------------------
(dimension of paired reduced space: 8)
RSP solution vector no. 1; norm of residual 5.90D-04
*** RSPCTL MICROITERATIONS CONVERGED
ZDIPLEN XDIPLEN freq1 freq2 Norm
---------------------------------------------------------------
0.000000 0.000000 10.50779485
@ B-freq = 0.000000 C-freq = 0.000000 beta(X;Z,X) = -12.09975413
@ B-freq = 0.000000 C-freq = 0.000000 beta(Y;Z,X) = 0.00000000
@ B-freq = 0.000000 C-freq = 0.000000 beta(Z;Z,X) = 0.00000000
@ B-freq = 0.000000 C-freq = 0.000000 beta(X;X,Y) = beta(Y,X,X)
@ B-freq = 0.000000 C-freq = 0.000000 beta(Y;X,Y) = beta(Y,Y,X)
@ B-freq = 0.000000 C-freq = 0.000000 beta(Z;X,Y) = beta(Z,Y,X)
@ B-freq = 0.000000 C-freq = 0.000000 beta(X;Y,Y) = beta(Y,Y,X)
CRLRV3 -- linear response calc for sym: 1
CRLRV3 -- operator label1: YDIPLEN
CRLRV3 -- operator label2: YDIPLEN
CRLRV3 -- freqr1 : 0.000000D+00
CRLRV3 -- freqr2 : 0.000000D+00
<<< SOLVING SETS OF LINEAR EQUATIONS FOR LINEAR RESPONSE PROPERTIES >>>
Operator symmetry = 1 ( A ); triplet = F
*** THE REQUESTED 1 SOLUTION VECTORS CONVERGED
Convergence of RSP solution vectors, threshold = 1.00D-03
---------------------------------------------------------------
(dimension of paired reduced space: 12)
RSP solution vector no. 1; norm of residual 2.33D-04
*** RSPCTL MICROITERATIONS CONVERGED
YDIPLEN YDIPLEN freq1 freq2 Norm
---------------------------------------------------------------
0.000000 0.000000 13.91330727
@ B-freq = 0.000000 C-freq = 0.000000 beta(X;Y,Y) = 0.00000000
@ B-freq = 0.000000 C-freq = 0.000000 beta(Y;Y,Y) = -0.00000000
@ B-freq = 0.000000 C-freq = 0.000000 beta(Z;Y,Y) = 2.18839797
@ B-freq = 0.000000 C-freq = 0.000000 beta(X;Z,Y) = beta(Z,Y,X)
@ B-freq = 0.000000 C-freq = 0.000000 beta(Y;Z,Y) = beta(Z,Y,Y)
CRLRV3 -- linear response calc for sym: 1
CRLRV3 -- operator label1: ZDIPLEN
CRLRV3 -- operator label2: YDIPLEN
CRLRV3 -- freqr1 : 0.000000D+00
CRLRV3 -- freqr2 : 0.000000D+00
<<< SOLVING SETS OF LINEAR EQUATIONS FOR LINEAR RESPONSE PROPERTIES >>>
Operator symmetry = 1 ( A ); triplet = F
*** THE REQUESTED 1 SOLUTION VECTORS CONVERGED
Convergence of RSP solution vectors, threshold = 1.00D-03
---------------------------------------------------------------
(dimension of paired reduced space: 10)
RSP solution vector no. 1; norm of residual 2.49D-04
*** RSPCTL MICROITERATIONS CONVERGED
ZDIPLEN YDIPLEN freq1 freq2 Norm
---------------------------------------------------------------
0.000000 0.000000 9.08114434
@ B-freq = 0.000000 C-freq = 0.000000 beta(X;Z,Y) = 0.00000000
@ B-freq = 0.000000 C-freq = 0.000000 beta(Y;Z,Y) = 2.18809724
@ B-freq = 0.000000 C-freq = 0.000000 beta(Z;Z,Y) = -0.00000000
@ B-freq = 0.000000 C-freq = 0.000000 beta(X;X,Z) = beta(Z,X,X)
@ B-freq = 0.000000 C-freq = 0.000000 beta(Y;X,Z) = beta(Z,Y,X)
@ B-freq = 0.000000 C-freq = 0.000000 beta(Z;X,Z) = beta(Z,Z,X)
@ B-freq = 0.000000 C-freq = 0.000000 beta(X;Y,Z) = beta(Z,Y,X)
@ B-freq = 0.000000 C-freq = 0.000000 beta(Y;Y,Z) = beta(Z,Y,Y)
@ B-freq = 0.000000 C-freq = 0.000000 beta(Z;Y,Z) = beta(Z,Z,Y)
@ B-freq = 0.000000 C-freq = 0.000000 beta(X;Z,Z) = beta(Z,Z,X)
@ B-freq = 0.000000 C-freq = 0.000000 beta(Y;Z,Z) = beta(Z,Z,Y)
CRLRV3 -- linear response calc for sym: 1
CRLRV3 -- operator label1: ZDIPLEN
CRLRV3 -- operator label2: ZDIPLEN
CRLRV3 -- freqr1 : 0.000000D+00
CRLRV3 -- freqr2 : 0.000000D+00
<<< SOLVING SETS OF LINEAR EQUATIONS FOR LINEAR RESPONSE PROPERTIES >>>
Operator symmetry = 1 ( A ); triplet = F
*** THE REQUESTED 1 SOLUTION VECTORS CONVERGED
Convergence of RSP solution vectors, threshold = 1.00D-03
---------------------------------------------------------------
(dimension of paired reduced space: 10)
RSP solution vector no. 1; norm of residual 9.42D-04
*** RSPCTL MICROITERATIONS CONVERGED
ZDIPLEN ZDIPLEN freq1 freq2 Norm
---------------------------------------------------------------
0.000000 0.000000 10.30701610
@ B-freq = 0.000000 C-freq = 0.000000 beta(X;Z,Z) = -0.00000000
@ B-freq = 0.000000 C-freq = 0.000000 beta(Y;Z,Z) = -0.00000000
@ B-freq = 0.000000 C-freq = 0.000000 beta(Z;Z,Z) = -2.64089412
>>>> Total CPU time used in RESPONSE: 1.02 seconds
>>>> Total wall time used in RESPONSE: 1.02 seconds
.-------------------------------------------.
| End of Dynamic Property Section (RESPONS) |
`-------------------------------------------'
>>>> Total CPU time used in DALTON: 3.34 seconds
>>>> Total wall time used in DALTON: 3.35 seconds
Date and time (Linux) : Tue Jun 23 22:14:23 2015
Host name : archer
"""
@attr(speed = 'fast' )
class ReadDalTestCase( unittest.TestCase ):
def test_read_beta_hf(self):
ats, dipole, alpha, beta = read_dal.read_beta_hf_string( HF_FILE, in_AA = False,
out_AA = False)
assert len(ats) == 3
np.testing.assert_allclose( dipole, np.array([0, 0, 0.85413]), atol = 1e-4)
a = np.zeros( (3, 3,) )
a[0, 0] = 6.8879
a[1, 1] = 5.1833
a[2, 2] = 5.94877
np.testing.assert_allclose( alpha, a, atol = 1e-4 )
if __name__ == '__main__':
unittest.main()
|
fishstamp82/moltools
|
moltools/test/test_read_dal.py
|
Python
|
mit
| 51,427
|
[
"Dalton"
] |
e4f406d527f56a25e6856a9d096cd12298eed58cd80985b76f6c3ce81f727d80
|
import os
from DIRAC import gConfig, gLogger
from DIRAC.Core.Utilities import List
from DIRAC.Core.DISET.AuthManager import AuthManager
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.Core.Utilities.Extensions import extensionsByPriority
# from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from WebAppDIRAC.Lib import Conf
DEFAULT_SCHEMA = [
[
"Tools",
[
["app", "Application Wizard", "DIRAC.ApplicationWizard"],
["app", "Job Launchpad", "DIRAC.JobLaunchpad"],
["app", "Notepad", "DIRAC.Notepad"],
["app", "Proxy Upload", "DIRAC.ProxyUpload"],
],
],
[
"Applications",
[
["app", "Accounting", "DIRAC.Accounting"],
["app", "Activity Monitor", "DIRAC.ActivityMonitor"],
["app", "Configuration Manager", "DIRAC.ConfigurationManager"],
["app", "Job Monitor", "DIRAC.JobMonitor"],
["app", "Downtimes", "DIRAC.Downtimes"],
["app", "File Catalog", "DIRAC.FileCatalog"],
["app", "Job Monitor", "DIRAC.JobMonitor"],
["app", "Job Summary", "DIRAC.JobSummary"],
["app", "Pilot Monitor", "DIRAC.PilotMonitor"],
["app", "Pilot Summary", "DIRAC.PilotSummary"],
["app", "Proxy Manager", "DIRAC.ProxyManager"],
["app", "Public State Manager", "DIRAC.PublicStateManager"],
["app", "Registry Manager", "DIRAC.RegistryManager"],
["app", "Request Monitor", "DIRAC.RequestMonitor"],
["app", "Resource Summary", "DIRAC.ResourceSummary"],
["app", "Site Summary", "DIRAC.SiteSummary"],
["app", "Space Occupancy", "DIRAC.SpaceOccupancy"],
["app", "System Administration", "DIRAC.SystemAdministration"],
["app", "Transformation Monitor", "DIRAC.TransformationMonitor"],
],
],
]
class SessionData:
__handlers = {}
__groupMenu = {}
__extensions = []
__extVersion = "ext-6.2.0"
__configuration = {}
@classmethod
def setHandlers(cls, handlers):
"""Set handlers
:param dict handlers: handlers
"""
cls.__handlers = {}
for k in handlers:
handler = handlers[k]
cls.__handlers[handler.LOCATION.strip("/")] = handler
# Calculate extensions
cls.__extensions = extensionsByPriority()
for ext in ["DIRAC", "WebAppDIRAC"]:
if ext in cls.__extensions:
cls.__extensions.append(cls.__extensions.pop(cls.__extensions.index(ext)))
def __init__(self, credDict, setup):
self.__credDict = credDict
self.__setup = setup
def __isGroupAuthApp(self, appLoc):
"""The method checks if the application is authorized for a certain user group
:param str appLoc It is the application name for example: DIRAC.JobMonitor
:return bool -- if the handler is authorized to the user returns True otherwise False
"""
handlerLoc = "/".join(List.fromChar(appLoc, ".")[1:])
if not handlerLoc:
gLogger.error("Application handler does not exists:", appLoc)
return False
if handlerLoc not in self.__handlers:
gLogger.error("Handler %s required by %s does not exist!" % (handlerLoc, appLoc))
return False
handler = self.__handlers[handlerLoc]
auth = AuthManager(Conf.getAuthSectionForHandler(handlerLoc))
gLogger.info("Authorization: %s -> %s" % (dict(self.__credDict), handler.AUTH_PROPS))
return auth.authQuery("", dict(self.__credDict), handler.AUTH_PROPS)
def __generateSchema(self, base, path):
"""Generate a menu schema based on the user credentials
:param str base: base
:param str path: path
:return: list
"""
# Calculate schema
schema = []
fullName = "%s/%s" % (base, path)
result = gConfig.getSections(fullName)
if not result["OK"]:
return schema
sectionsList = result["Value"]
for sName in sectionsList:
subSchema = self.__generateSchema(base, "%s/%s" % (path, sName))
if subSchema:
schema.append((sName, subSchema))
result = gConfig.getOptions(fullName)
if not result["OK"]:
return schema
optionsList = result["Value"]
for opName in optionsList:
opVal = gConfig.getValue("%s/%s" % (fullName, opName))
if opVal.startswith("link|"):
schema.append(("link", opName, opVal[5:])) # pylint: disable=unsubscriptable-object
continue
if self.__isGroupAuthApp(opVal):
schema.append(("app", opName, opVal))
return schema
def __generateDefaultSchema(self):
"""Generate a menu schema based on the user credentials
:param str base: base
:param str path: path
:return: list
"""
schema = []
for section, apps in DEFAULT_SCHEMA:
appList = []
for app in apps:
if self.__isGroupAuthApp(app[-1]):
appList.append(app)
if appList:
schema.append((section, appList))
return schema
def __getGroupMenu(self):
"""Load the schema from the CS and filter based on the group
:param dict cfg: dictionary with current configuration
:return: list
"""
menuSection = "%s/Schema" % (Conf.BASECS)
# Somebody coming from HTTPS and not with a valid group
group = self.__credDict.get("group", "")
# Cache time!
if group not in self.__groupMenu:
result = gConfig.getSections(menuSection)
if not result["OK"] or not result["Value"]:
self.__groupMenu[group] = self.__generateDefaultSchema()
else:
self.__groupMenu[group] = self.__generateSchema(menuSection, "")
return self.__groupMenu[group]
@classmethod
def getWebAppPath(cls):
"""Get WebApp path
:return: str
"""
return os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "WebApp")
@classmethod
def getExtJSVersion(cls):
"""Get ExtJS version
:return: str
"""
return cls.__extVersion
@classmethod
def getWebConfiguration(cls):
"""Get WebApp configuration
:return: dict
"""
result = gConfig.getOptionsDictRecursively("/WebApp")
if not cls.__configuration and result["OK"]:
cls.__configuration = result["Value"]
return cls.__configuration
def getData(self):
"""Return session data
:return: dict
"""
data = {
"configuration": self.getWebConfiguration(),
"menu": self.__getGroupMenu(),
"user": self.__credDict,
"validGroups": [],
# 'groupsStatuses': '',
"setup": self.__setup,
"validSetups": gConfig.getSections("/DIRAC/Setups")["Value"],
"extensions": self.__extensions,
"extVersion": self.getExtJSVersion(),
}
# Add valid groups if known
username = self.__credDict.get("username", "anonymous")
if username != "anonymous":
result = Registry.getGroupsForUser(username)
if not result["OK"]:
return result
data["validGroups"] = result["Value"]
# result = gProxyManager.getGroupsStatusByUsername(username) # pylint: disable=no-member
# if result['OK']:
# data['groupsStatuses'] = result['Value']
# Calculate baseURL
baseURL = [Conf.rootURL().strip("/"), "s:%s" % data["setup"], "g:%s" % self.__credDict.get("group", "")]
data["baseURL"] = "/%s" % "/".join(baseURL)
return data
|
DIRACGrid/WebAppDIRAC
|
src/WebAppDIRAC/Lib/SessionData.py
|
Python
|
gpl-3.0
| 8,005
|
[
"DIRAC"
] |
f3e6c0140c307bb17375d04dc99164df14a4c14ce816e89ca0c155e545433e14
|
import calendar
import csv
import datetime
import gzip
import itertools
import math
import operator
import os
import random
import re
import pickle
import numpy
import scipy
import scipy.stats
import scipy.signal
import scipy.spatial
import pandas
from . import romannumerals
from functools import reduce
# ToDo: Split up the statistical and scientific functions from more general utility ones into separate modules. statstools maybe?
CODON_TABLE = {'GUC': 'V', 'ACC': 'T', 'GUA': 'V', 'GUG': 'V', 'GUU': 'V', 'AAC': 'N', 'CCU': 'P', 'UGG': 'W',
'AGC': 'S', 'AUC': 'I', 'CAU': 'H', 'AAU': 'N', 'AGU': 'S', 'ACU': 'T', 'CAC': 'H', 'ACG': 'T',
'CCG': 'P', 'CCA': 'P', 'ACA': 'T', 'CCC': 'P', 'GGU': 'G', 'UCU': 'S', 'GCG': 'A', 'UGC': 'C',
'CAG': 'Q', 'GAU': 'D', 'UAU': 'Y', 'CGG': 'R', 'UCG': 'S', 'AGG': 'R', 'GGG': 'G', 'UCC': 'S',
'UCA': 'S', 'GAG': 'E', 'GGA': 'G', 'UAC': 'Y', 'GAC': 'D', 'GAA': 'E', 'AUA': 'I', 'GCA': 'A',
'CUU': 'L', 'GGC': 'G', 'AUG': 'M', 'CUG': 'L', 'CUC': 'L', 'AGA': 'R', 'CUA': 'L', 'GCC': 'A',
'AAA': 'K', 'AAG': 'K', 'CAA': 'Q', 'UUU': 'F', 'CGU': 'R', 'CGA': 'R', 'GCU': 'A', 'UGU': 'C',
'AUU': 'I', 'UUG': 'L', 'UUA': 'L', 'CGC': 'R', 'UUC': 'F'}
WHITESPACE = re.compile(r'\s+')
ALPHANUMERIC = [chr(i) for i in range(48, 58)] + [chr(i) for i in range(65, 91)] + [chr(i) for i in range(97, 123)]
# TMP_DIR = '/data/nrnb01_nobackup/dskola'
TMP_DIR = '/tmp/{}'.format(os.environ['USER'])
def log_print(message, tabs=1):
print('{}{}{}'.format(pretty_now(), '\t'*tabs, message))
def replace_multi(string, char_list, replacement_char=''):
"""
Convenience function to replace multiple characters in a string in a single call.
:param:`char_list` can either be a list of strings or a string.
"""
for substring in char_list:
string = string.replace(substring, replacement_char)
return string
def clean_string(string, illegal_chars=[' ', '\t', ',', ';', '|'], replacement_char='_'):
"""
Returns a copy of string that has all non-allowed characters replaced by a new character (default: underscore)
Really just a wrapper around replace_multi but with different defaults oriented toward filenames.
"""
return replace_multi(string, illegal_chars, replacement_char)
def pretty_now():
"""
Returns the current date/time in a nicely formatted string (without so many decimal places)
"""
return datetime.datetime.strftime(datetime.datetime.now(), '%Y-%b-%d %H:%M:%S')
def wrap_indent_para(text, line_width=80, indent=0, hanging_indent=0):
"""
Given a string of text (with no line breaks), will return a formatted string where the text has been wrapped to
the specified :param:`line_width` (no hyphenation supported), an indentation of :param:`indent` spaces is made on
the first line, and a handing indent of :param:`hanging_indent` is made on all subsequent lines.
"""
assert indent < line_width, 'Specified indent of {} spaces is too big for line width of {}'.format(indent, line_width)
assert hanging_indent < line_width, 'Specified handing indent of {} spaces is too big for line width of {}'.format(hanging_indent, line_width)
lines = []
word_list = text.split(' ')[::-1]
this_line = []
assert len(word_list[-1]) + indent <= line_width, 'First word {} is too long for line width of {} and indent of {}'.format(this_word, line_width, indent)
if indent > 0:
this_line.append(' ' * indent)
line_pos = indent
while word_list:
this_word = word_list.pop()
L = len(this_word)
assert L + hanging_indent <= line_width, 'Word {} is too long for line width of {} and hanging indent of {}'.format(this_word, line_width, hanging_indent)
if line_pos + L + hanging_indent >= line_width:
# start new line if we would go over the right edge
lines.append(' '.join(this_line))
this_line = []
if hanging_indent > 0:
this_line.append(' '*hanging_indent)
this_line.append(this_word)
line_pos = L + hanging_indent
else:
this_line.append(this_word)
line_pos += L
lines.append(' '.join(this_line))
return '\n'.join(lines)
def generate_log_func(log_filename):
"""
Returns a function that prints messages to screen and saves them to :param:`log_filename`.
"""
def log_func(message, verbosity=0):
"""
Print the contents of :param:`message` to screen as well as write them to
the log file specified at creation time.
:param:`verbosity` is currently ignored.
"""
log_string = '{}\t{}'.format(pretty_now(), message)
print(log_string)
if log_filename:
with open(log_filename, 'at') as log_file:
log_file.write(log_string+'\n')
return log_func
class ClassProperty(property):
"""
Subclass of property that allows class methods to be properties. Does not allow setting.
Can be used as a decorator in conjunction with @classmethod
"""
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
def halves(number):
"""
Returns a pair of integers corresponding to as close to an even split of <number> as possible
"""
left_half = number // 2
right_half = number - left_half
return left_half, right_half
def first_upper(text):
if len(text) == 1:
return text[0].upper()
else:
return text[0].upper() + text[1:]
def first_lower(text):
if len(text) == 1:
return text[0].lower()
else:
return text[0].lower() + text[1:]
def rev_complement(seq):
complements = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C', 'N': 'N', '': ''}
return ''.join([complements[x] for x in seq[::-1]])
def dna_to_rna(seq):
return seq.replace('T', 'U')
def rna_to_dna(seq):
return seq.replace('U', 'T')
def translate_dna(dna_sequence, reading_frame_offset=0, reading_frame_direction=1):
"""
Returns a list of 1-letter amino acid strings corresponding to the translated codons in a sequence of DNA characters
at a specified offset (0,1,2) and direction (-1 or 1)
"""
assert reading_frame_offset in (0, 1, 2)
assert reading_frame_direction in (-1, 1)
if reading_frame_direction == -1:
dna_sequence = rev_complement(dna_sequence)
return [CODON_TABLE[codon] for codon in
split_codons(sequence=dna_to_rna(dna_sequence), reading_frame_offset=reading_frame_offset,
reading_frame_direction=reading_frame_direction)]
def translate_rna(rna_sequence, reading_frame_offset=0, reading_frame_direction=1):
"""
Returns a list of 1-letter amino acid strings corresponding to the translated codons in a sequence of DNA characters
at a specified offset (0,1,2) and direction (-1 or 1)
"""
return [CODON_TABLE[codon] for codon in
split_codons(sequence=rna_sequence, reading_frame_offset=reading_frame_offset,
reading_frame_direction=reading_frame_direction)]
def split_codons(sequence, reading_frame_offset=0, reading_frame_direction=1):
"""
Returns a list of 3-character strings representing codons extracted from a sequence of DNA characters
at a specified offset (0,1,2) and direction (-1 or 1)
"""
assert reading_frame_offset in (0, 1, 2)
assert reading_frame_direction in (-1, 1)
codons = []
num_codons = int((len(sequence) - reading_frame_offset) / 3)
for codon in range(num_codons):
codons.append(sequence[codon * 3 + reading_frame_offset:(codon + 1) * 3 + reading_frame_offset])
return codons
def parse_line_dict(line, field_names, split_char='\t', strict=True, defaults=None):
"""
Divides a string into a dictionary of named fields and values, assuming
the values are given in the same order as <field_names> and separated by <split_char>
"""
if not strict:
assert len(field_names) == len(defaults)
result = {}
split_line = line.strip().split(split_char)
for idx, field in enumerate(field_names):
try:
result[field] = split_line[idx]
except IndexError as ie:
if strict:
print()
'Missing field {} in line: {}'.format(field, line)
raise ie
else:
result[field] = defaults[idx]
return result
def dict_apply(func, dict_1, dict_2):
new_dict = {}
all_keys = set(dict_1.keys()).union(list(dict_2.keys()))
for k in all_keys:
if k in dict_1 and k in dict_2:
new_dict[k] = func(dict_1[k], dict_2[k])
elif k in dict_1:
new_dict[k] = dict_1[k]
else:
new_dict[k] = dict_2[k]
return new_dict
def dict_add(dict_1, dict_2):
return dict_apply(operator.add, dict_1, dict_2)
def dict_sub(dict_1, dict_2):
return dict_apply(operator.sub, dict_1, dict_2)
def dict_diff(dict_a, dict_b):
"""
Performs an elementwise subtraction of dict_b from dict_a
"""
diff_dict = {}
a = set(dict_a.keys())
b = set(dict_b.keys())
a_only = a.difference(b)
b_only = b.difference(a)
common = a.intersection(b)
for k in a_only:
diff_dict[k] = dict_a[k]
for k in b_only:
diff_dict[k] = -dict_b[k]
for k in common:
diff_dict[k] = dict_a[k] - dict_b[k]
return diff_dict
def split_with_defaults(line, split_char='\t', defaults=[]):
"""
Divides a string into a list of values separated by <split_char>.
Populate missing values with the corresponding items from <defaults>
"""
split_line = line.strip().split(split_char)
assert len(split_line) <= len(defaults)
return split_line + defaults[len(split_line) - len(defaults):]
def freq(an_iterable):
"""
Generates a dictionary of object frequencies for the given iterable
"""
freq_dict = {}
for c in an_iterable:
if c not in freq_dict:
freq_dict[c] = 1
else:
freq_dict[c] += 1
return freq_dict
def mode(an_iterable, rank=0, exclude=[]):
"""
Returns the most common object in <an_iterable> that is not in <exclude_list>
This is the default behavior, if <rank> is 0. If <rank> != 0, return the <rank>+1-most
common item in <an_iterable>.
"""
if exclude:
exclude_set = set(exclude)
return \
sorted([f for f in list(freq(an_iterable).items()) if f[0] not in exclude_set], key=lambda x: x[1],
reverse=True)[
rank][0]
else:
return sorted(list(freq(an_iterable).items()), key=lambda x: x[1], reverse=True)[rank][0]
def convert_chroms(chrom_string, dest='ucsc'):
"""
Refactored to auto-detect source (<source> parameter will be ignored).
:param chrom_string:
:param source:
:param dest:
:return:
"""
try:
chrom_string = str(romannumerals.roman_to_int(chrom_string))
except ValueError:
pass
if dest == 'ensembl':
if chrom_string == 'chrM':
return 'dmel_mitochonrdion_genome'
elif chrom_string[:3].lower() == 'chr':
return chrom_string[3:]
else:
return chrom_string
elif dest == 'ucsc':
if chrom_string == 'dmel_mitochondrion_genome':
return 'chrM'
elif chrom_string[:3].lower() == 'chr':
return chrom_string
else:
return 'chr{}'.format(chrom_string)
elif dest == 'yeast':
if chrom_string[:3].lower() == 'chr':
chrom_string = chrom_string[3:]
try:
return romannumerals.int_to_roman(int(chrom_string))
except ValueError:
return chrom_string
else:
raise ValueError('Unknown destination {}'.format(dest))
# def convert_chroms(chrom_string, source, dest):
# if source == dest:
# return chrom_string
# if source == 'ucsc':
# if dest == 'ensembl':
# if chrom_string == 'chrM':
# return 'dmel_mitochonrdion_genome'
# elif chrom_string[:3].lower() == 'chr':
# return chrom_string[3:]
# else:
# return chrom_string
# else:
# raise ValueError('Unknown destination {} for source {}'.format(dest, source))
# elif source == 'ensembl':
# if dest == 'ucsc':
# if chrom_string == 'dmel_mitochondrion_genome':
# return 'chrM'
# return 'chr{}'.format(chrom_string)
# else:
# raise ValueError('Unknown destination {} for source {}'.format(dest, source))
# else:
# raise ValueError('Unknown source {}'.format(source))
def convert_csv_to_tsv(filepath):
"""
:param filepath:
:return:
Convert <filepath> to a .tsv file with the same mantissa
"""
with open(filepath, 'rU') as infile:
r = csv.reader(infile, dialect=csv.excel)
with open(filepath.strip('.csv') + '.tsv', 'w') as outfile:
w = csv.writer(outfile, dialect=csv.excel_tab)
for line in r:
w.writerow(line)
def home_path(subfolder):
"""
Return a path consisting of "subfolder" joined to the current user's home directory
"""
return os.path.join(os.environ['HOME'], subfolder)
def parse_path(fullpath):
"""
:param fullpath:
:return:
Parses <fullpath> into its components and returns a tuple consisting of the directory, the filename mantissa and the extension.
"""
split_path = fullpath.split(os.sep)
path_prefix = os.sep.join(split_path[:-1])
filename = split_path[-1]
split_filename = filename.split('.')
filename_prefix = '.'.join(split_filename[:-1])
extension = split_filename[-1]
return path_prefix, filename_prefix, extension
def rev_complement(seq):
complements = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C', 'N': 'N', '': ''}
return [complements[x] for x in seq[::-1]]
def DNA_to_RNA(seq):
return seq.replace('T', 'U')
def RNA_to_DNA(seq):
return seq.replace('U', 'T')
def parse_chromosome_ID(chromosome_identifier):
"""
Parses a chromosome identifier and returns an integer chromosome number.
The identifier consists of two parts (first optional):
first, one of the words "chr", "chromosome", or nothing.
second, a numeric digit or roman numeral representing the chromosome number
The two parts may be separated by any amount of whitespace.
If no valid match to this pattern is found, it will return None
"""
re.IGNORECASE = False
chromosome_identifier = str(chromosome_identifier).strip()
# OK, it's not a refseq/genbank troublemaker, maybe it's some flavor of numerical identifier . . .
m = re.match(r"(?P<prefix>chro?m?o?s?o?m?e?|\b)\s*(?P<number>\d+|\b|\B)(?P<numeral>[MDCLXVI]*\Z)",
chromosome_identifier)
if m and bool(m.group('number')) != bool(
m.group('numeral')): # check that the pattern matches and we don't have both a number and a numeral
if m.group('numeral'):
try:
num = str(romannumerals.roman_to_int(m.group('numeral')))
except ValueError as ex:
return None
else:
return num
elif m.group('number'):
try:
num = m.group('number')
except ValueError as ex:
return None
else:
return num
# if it doesn't fit any of these patterns, just return the original input
return chromosome_identifier
def parse_fasta_list(fasta):
"""
:param fasta:
:return:
Returns the contents of a FASTA string as a list of dictionaries, each with a header and sequence key-value pair.
"""
return [{'header': split_seq[0], 'sequence': ''.join(split_seq[1:])} for split_seq in
[seq.split('\n') for seq in fasta.split('>')]]
def parse_fasta_dict(fasta_string):
"""
:param fasta:
:return:
Returns the contents of a FASTA string as a dictionary of sequences keyed by the first substring in the header string prior to a space
"""
return dict(
[(re.split(WHITESPACE, split_seq[0])[0], ''.join(split_seq[1:])) for split_seq in
[seq.split('\n') for seq in fasta_string.split('>')] if re.split(WHITESPACE, split_seq[0])[0] != ''])
def read_fasta(fasta_filename):
"""
Reads the contents of :param:`fasta_filename` and returns a dictionary of strings keyed by sequence name.
"""
with open(fasta_filename, 'r') as fasta_file:
fasta_string = fasta_file.read()
return parse_fasta_dict(fasta_string)
def write_fasta_dict(sequence_dict, fasta_filename, COL_WIDTH=60):
"""
Given a dictionary <sequence_dict> of genetic sequence, write out the contents to a FASTA-formatted text file at <fname>
:param sequence_dict:
:return:
"""
with open(fasta_filename, 'w') as fasta_file:
for seq in numerical_string_sort(sequence_dict):
fasta_file.write('>{}\n'.format(seq))
pointer = 0
while pointer + COL_WIDTH < len(sequence_dict[seq]):
fasta_file.write(sequence_dict[seq][pointer:pointer + COL_WIDTH] + '\n')
pointer += COL_WIDTH
if pointer < len(sequence_dict[seq]):
fasta_file.write(sequence_dict[seq][pointer:] + '\n')
def compute_fasta_offset(sequence_location, header_size, line_size, cr_lf_size=1):
"""
Given a location on a FASTA sequence (assuming one sequence per file),
the length of the header line and a line length (including CR/LF),
(assumes the line size is constant throughout the file),
returns the file location of the specified sequence location
"""
num_lines = int(sequence_location / (line_size - cr_lf_size))
line_offset = sequence_location % (line_size - cr_lf_size)
return num_lines * line_size + line_offset + header_size
def convert_nbinom_params(mu, var):
"""
Converts mean and variance into the n and p parameters used by scipy.stats
"""
if not var > mu:
raise ValueError('Variance must be greater than mean for negative binomial distribution')
p = mu / float(var)
n = mu * p / float(1 - p)
return n, p
def convert_binom_params(mu, var):
"""
Returns the n and p parameters of a binomial distribution that has expected value <mu> and expected variance <var>
:param mu:
:param var:
:return:
"""
p = (var - mu) / float(-mu)
n = iround(mu / float(p))
return n, p
def fit_neg_binom(data):
"""
Estimates n and p parameters (as defined by scipy.stats) of a negative binomial distribution fitting the data
:param data:
:return:
"""
mu = data.mean()
var = data.var()
return convert_nbinom_params(mu, var)
def convert_normal_lognormal(mu, var):
"""
Converts the parameters mu and sigma of a lognormal distribution to the expected mean
and variance of such a distribution. The log of such a distribution will have
mean and variance equal to it's parameters
See http://www.mathworks.com/help/stats/lognstat.html for details
"""
mu = float(mu)
var = float(var)
new_mu = math.exp(mu + var / 2)
new_var = math.exp(2 * mu + var) * (math.exp(var) - 1)
return new_mu, new_var
def convert_lognormal_normal(mu, var):
"""
Converts the moments of a lognormal distribution (mean and variance)
to the parameters mu and sigma needed to generate such a distribution.
See http://www.mathworks.com/help/stats/lognstat.html for details
"""
mu = float(mu)
var = float(var)
new_mu = math.log(mu ** 2 / math.sqrt(var + mu ** 2))
new_sigma = math.sqrt(math.log(var / mu ** 2 + 1))
return new_mu, new_sigma
def logit(arr):
return numpy.log(arr / (1 - arr))
def logistic(arr, L, k, x0=0):
return L / (1 + numpy.exp(-k * (arr - x0)))
def rank(arr):
"""
Return an array consisting of the ranks of the elements in <arr>. Currently doesn't explicitly deal with ties,
so behavior is not specified.
"""
r = numpy.zeros(len(arr), dtype=numpy.int)
a = numpy.argsort(arr)
i = numpy.arange(len(arr))
r[a[i]] = i
return r
def quadratic_formula(a, b, c):
"""
Returns the two real-valued solutions to the quadratic formula (if they exist).
:param a:
:param b:
:param c:
:return:
"""
d = b ** 2 - 4 * a * c
if d >= 0:
sol1 = (-b + math.sqrt(d)) / float(2 * a)
sol2 = (-b - math.sqrt(d)) / float(2 * a)
return sol1, sol2
else:
print()
'No real solutions'
def dist_similarity_pcc(arr1, arr2, bin_min=None, bin_max=None, num_bins=100):
if bin_min is None:
bin_min = min(arr1.min(), arr2.min())
if bin_max is None:
bin_max = max(arr1.max, arr2.max)
h1 = numpy.histogram(arr1, numpy.linspace(0, bin_max, num=num_bins))[0]
h2 = numpy.histogram(arr2, numpy.linspace(0, bin_max, num=num_bins))[0]
return scipy.stats.pearsonr(h1, h2)[0]
def equilibirum(A, B, Kd):
"""
Returns the final concentrations [AB],[A],[B]
given the total concentrations of reactants A and B and the
dissociation constant Kd
"""
a = 1
b = -(B + A + Kd)
c = A * B
sol1, sol2 = quadratic_formula(a, b, c)
A_1 = A - sol1
B_1 = B - sol1
A_2 = A - sol2
B_2 = B - sol2
error_1 = A_1 * B_1 / sol1 - Kd
error_2 = A_2 * B_2 / sol2 - Kd
if error_1 < error_2 and sol1 > 0 and A_1 > 0 and B_1 > 0:
return sol1, A_1, B_1
elif sol2 > 0 and A_2 > 0 and B_2 > 0:
return sol2, A_2, B_2
else:
print()
"No plausible solutions found (all solutions involve negative concentrations)!"
def generate_genome_table(fasta_filename, genome_table_filename=''):
total_size = 0
genome_table = {}
with open(fasta_filename, 'rU') as fasta_file:
print()
'Checking the lengths of all sequences in {} ...'.format(fasta_filename)
fasta_dict = parse_fasta_dict(fasta_file.read())
for chrom in sorted(fasta_dict):
if len(fasta_dict[chrom]) > 0:
genome_table[chrom] = len(fasta_dict[chrom])
total_size += genome_table[chrom]
print()
'{}\t{}'.format(chrom, genome_table[chrom])
print()
'Total size: {}'.format(total_size)
if genome_table_filename:
with open(genome_table_filename, 'w') as genome_table_file:
print()
'Writing genome table to {}'.format(genome_table_filename)
genome_table_writer = csv.writer(genome_table_file, dialect=csv.excel_tab)
for chrom in sorted(genome_table):
genome_table_writer.writerow([chrom, genome_table[chrom]])
return genome_table
def count_seq_sizes(fasta_file, verbose=True):
"""
:param fasta_file:
:return:
Analyzes a FASTA file and returns a dictionary of sizes keyed by sequence name.
"""
start_time = datetime.datetime.now()
seq_sizes = {}
for line in fasta_file:
if line.startswith('>'):
seq_name = re.split(WHITESPACE, line[1:].strip())[0]
if verbose:
print()
'Analyzing sequence {}'.format(seq_name)
seq_sizes[seq_name] = 0
else:
seq_sizes[seq_name] += len(line.strip())
print()
'Done in {}.'.format(datetime.datetime.now() - start_time)
return seq_sizes
def indent(text, numtabs=1):
"""
Indents a block of text by adding a specified number of tabs (default 1) to
the beginning of each line
"""
return '\n'.join(['\t' * numtabs + line for line in text.split('\n')])
def first_leaf(nested_dict):
"""
On the assumption that all the leaves of a nested dictionary (tree) structure are in some way equivalent,
this is a quick method of returning the first such leaf without knowing the specific keys used
to construct the nested dict.
"""
partial_dict = nested_dict
while True: # infinite loop
try:
# see if we are dictionary-like, and if so go down one level
partial_dict = partial_dict[list(partial_dict.keys())[0]]
except AttributeError:
try:
# if not, perhaps we are a list or other list-like object?
partial_dict = list(partial_dict)
except TypeError:
# we're not dictionary-like and not list-like, assume we're a leaf and return
return partial_dict
else:
# if we are list-like, go down to the next level
partial_dict = partial_dict[0]
def sterilize_dict(unclean_dict):
"""
Recursively converts a data structure containing one or more nested levels of collections.defaultdict to plain dicts.
It will stop the breadth-first search at the first level that is not convertible to a dict, and copy these subtrees over to the
new structure
"""
try:
# unclean_dict.default_factory = None
clean_dict = dict(unclean_dict)
# print clean_dict
except TypeError:
return unclean_dict
except ValueError:
return unclean_dict
else:
# if type(unclean_dict) == type({}):
for k in list(unclean_dict.keys()):
# print 'key: {}'.format(k)
clean_dict[k] = sterilize_dict(unclean_dict[k])
return clean_dict
def flatten(l, ltypes=(list, tuple)):
"""
:param l: a list to flatten
:param ltypes: valid variable types to unflatten
:return: a flattened list
Flattens an arbitrarily-deep nested list
Credit: http://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html
adapted from Mike C. Fletcher's BasicTypes
"""
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return ltype(l)
def threshold(vec, thresh):
return numpy.greater_equal(vec, thresh) * vec
def quantize(vector, precision_factor):
"""
Returns a copy of <vector> that is scaled by <precision_factor> and then rounded to the nearest integer.
To re-scale, simply divide by <precision_factor>.
Note that because of rounding, an open interval from (x,y) will give rise
to up to (x - y) * <precision_factor> + 1 bins.
"""
return (numpy.asarray(vector) * precision_factor).round(0)
def set_partitions(parent_set, num_partitions):
"""
A very efficient algorithm (Algorithm U) is described by Knuth in the Art of Computer Programming, Volume 4, Fascicle 3B to find all set partitions with a given number of blocks.
Python implementation by Adeel Zafar Soomro, retrieved from "http://codereview.stackexchange.com/questions/1526/finding-all-k-subset-partitions" on May 30, 2014. Variables renamed by me.
"""
m = num_partitions
ns = parent_set
def visit(n, a):
ps = [[] for i in range(m)]
for j in range(n):
ps[a[j + 1]].append(ns[j])
return ps
def f(mu, nu, sigma, n, a):
if mu == 2:
yield visit(n, a)
else:
for v in f(mu - 1, nu - 1, (mu + sigma) % 2, n, a):
yield v
if nu == mu + 1:
a[mu] = mu - 1
yield visit(n, a)
while a[nu] > 0:
a[nu] = a[nu] - 1
yield visit(n, a)
elif nu > mu + 1:
if (mu + sigma) % 2 == 1:
a[nu - 1] = mu - 1
else:
a[mu] = mu - 1
if (a[nu] + sigma) % 2 == 1:
for v in b(mu, nu - 1, 0, n, a):
yield v
else:
for v in f(mu, nu - 1, 0, n, a):
yield v
while a[nu] > 0:
a[nu] = a[nu] - 1
if (a[nu] + sigma) % 2 == 1:
for v in b(mu, nu - 1, 0, n, a):
yield v
else:
for v in f(mu, nu - 1, 0, n, a):
yield v
def b(mu, nu, sigma, n, a):
if nu == mu + 1:
while a[nu] < mu - 1:
visit(n, a)
a[nu] = a[nu] + 1
visit(n, a)
a[mu] = 0
elif nu > mu + 1:
if (a[nu] + sigma) % 2 == 1:
for v in f(mu, nu - 1, 0, n, a):
yield v
else:
for v in b(mu, nu - 1, 0, n, a):
yield v
while a[nu] < mu - 1:
a[nu] = a[nu] + 1
if (a[nu] + sigma) % 2 == 1:
for v in f(mu, nu - 1, 0, n, a):
yield v
else:
for v in b(mu, nu - 1, 0, n, a):
yield v
if (mu + sigma) % 2 == 1:
a[nu - 1] = 0
else:
a[mu] = 0
if mu == 2:
visit(n, a)
else:
for v in b(mu - 1, nu - 1, (mu + sigma) % 2, n, a):
yield v
n = len(ns)
a = [0] * (n + 1)
for j in range(1, m + 1):
a[n - m + j] = j - 1
return f(m, n, 0, n, a)
def count_lines(fname):
"""
Returns the number of lines in <fname>
"""
with open(fname) as f:
i = -1
for i, x in enumerate(f):
pass
return i + 1
def triangular_kernel(bandwidth, normalize=False):
bandwidth = int(bandwidth)
midpoint = int(bandwidth / float(2) - 0.5)
kern = numpy.zeros(bandwidth)
for pos in range(bandwidth):
kern[pos] = 1 - abs(midpoint - pos) / float(midpoint + 1)
if normalize:
return kern / float(bandwidth)
else:
return kern
def triangular_kernel_2d(bandwidth, normalize=False):
bandwidth = int(bandwidth)
midpoint = int(bandwidth / float(2) - 0.5)
kern_1d = numpy.zeros(bandwidth)
for pos in range(bandwidth):
kern_1d[pos] = 1 - abs(midpoint - pos) / float(midpoint + 1)
if normalize:
kern_1d /= float(bandwidth)
kern_2d = numpy.vstack([kern_1d for i in range(bandwidth)])
return kern_2d * kern_2d.T
def gaussian_kernel(sd, sd_cutoff=3, normalize=False):
bw = sd_cutoff * sd * 2 + 1
midpoint = sd_cutoff * sd
kern = numpy.zeros(bw)
frozen_rv = scipy.stats.norm(scale=sd)
for i in range(bw):
kern[i] = frozen_rv.pdf(i - midpoint)
if normalize:
kern = kern / kern.max()
return kern
def gaussian_kernel_2d(sd, sd_cutoff=3, normalize=False):
bw = int(sd_cutoff * sd * 2 + 1)
midpoint = sd_cutoff * sd
kern_1d = numpy.zeros(bw)
frozen_rv = scipy.stats.norm(scale=sd)
for i in range(bw):
kern_1d[i] = frozen_rv.pdf(i - midpoint)
if normalize:
kern_1d = kern_1d / kern_1d.max()
kern_2d = numpy.vstack([kern_1d for i in range(bw)])
return kern_2d * kern_2d.T
def square_kernel(width, normalize=False):
kernel = numpy.ones(width)
if normalize:
kernel /= width
return kernel
def apply_kernel(vec, kern):
# print('Vector has shape: {}, Kernel has shape: {}'.format(vec.shape, kern.shape))
return scipy.signal.fftconvolve(vec, kern, mode='same')
def bisect_root(solve_func, lower_bound, upper_bound, convergence_tolerance, max_iters=float('inf')):
"""
Implements the bisection method of numerically finding a root of an equation in one
variable. If multiple roots exist, only one will be found.
<solve_func> must be a function that takes a single parameter that returns zero when
the parameter is equal to a root.
<lower_bound> and <upper_bound> specify the boundaries of the search space.
<convergence_tolerance> specificies how close to zero the function output must be to
considered converged.
<max_iters>: maximum number of iterations to run (defaults to infinite)
"""
iter_count = 0
f_b = solve_func((lower_bound + upper_bound) / float(2))
while math.fabs(f_b) > convergence_tolerance and iter_count <= max_iters:
iter_count += 1
midpoint = (lower_bound + upper_bound) / float(2)
# print iter_count, lower_bound, upper_bound
# print midpoint
f_a = solve_func(lower_bound)
f_b = solve_func(midpoint)
f_c = solve_func(upper_bound)
# print '\t{}, {}, {}'.format(f_a, f_b, f_c)
if f_b == 0:
return midpoint
elif math.copysign(1, f_a) != math.copysign(1, f_b):
upper_bound = midpoint
elif math.copysign(1, f_c) != math.copysign(1, f_b):
lower_bound = midpoint
return midpoint
def _empirical_p_val_vectorized_left(data, values, standard_approximation=True):
"""
"""
i = 0
p_vals = numpy.zeros(len(values))
for value_idx, value in enumerate(values):
if data[i] <= value:
# p_vals.append()
while i < len(data) and data[i] <= value:
# print(value <= data[i], i < len(data))
# print(value, i, data[i])
i += 1
i -= 1
p_vals[value_idx] = ((i + 1 + (0,1)[bool(standard_approximation)]) / (len(data)+ (0,1)[bool(standard_approximation)]))
else:
p_vals[value_idx] = (0,1)[bool(standard_approximation)] / (len(data)+ (0,1)[bool(standard_approximation)])
return p_vals
def _empirical_p_val_vectorized_right(data, values, standard_approximation=True):
"""
"""
values = values[::-1]
data = data[::-1]
#print(values, data)
i = 0
p_vals = numpy.zeros(len(values))
for value_idx, value in enumerate(values):
#print(value, i, data[i])
if data[i] >= value:
while i < len(data) and data[i] >= value:
# print(value <= data[i], i < len(data))
i += 1
# print(value, i, data[i])
i -= 1
p_vals[value_idx] = ((i + 1 + (0,1)[bool(standard_approximation)]) / (len(data)+ (0,1)[bool(standard_approximation)]))
else:
p_vals[value_idx] = (0,1)[bool(standard_approximation)] / (len(data)+ (0,1)[bool(standard_approximation)])
#print(p_vals[value_idx])
return p_vals[::-1]
def empirical_p_val(data, values, tail='both', standard_approximation=True, is_sorted=False):
"""
Given an unsorted vector of observed data :param:`data`, returns the standard approximation
(adds pseudocount of 1 to prevent 0 p-values) to the empirical p-value for :param:`value`
using either a one-sided or two-sided significance test.
:param:`tail` must be 'left', 'right' (for a one-sided test) or 'both' (for a two-sided test)
"""
if tail not in ('left', 'right', 'both'):
raise ValueError('Invalid value for parameter :tail:, {}'.format(tail))
try:
len(values)
except TypeError:
is_vector=False
value=values
else:
is_vector=True
if is_vector and not is_sorted:
data = sorted(data)
value_sort_idx = numpy.argsort(values)
restore_values_sort_idx = numpy.argsort(values)
values=values[value_sort_idx]
del(value_sort_idx)
if tail in ('left', 'both'):
if is_vector:
left_p_val = _empirical_p_val_vectorized_left(data, values, standard_approximation=standard_approximation)
else:
left_p_val = (numpy.sum(numpy.less_equal(data,value)) + 1) / (len(data) + 1)
if tail in ('right', 'both'):
if is_vector:
right_p_val = _empirical_p_val_vectorized_right(data, values, standard_approximation=standard_approximation)
else:
right_p_val = (numpy.sum(numpy.greater_equal(data,value)) + 1) / (len(data) + 1)
if tail == 'left':
p_vals = left_p_val
elif tail == 'right':
p_vals = right_p_val
else:
p_vals = numpy.minimum(numpy.minimum(left_p_val, right_p_val) * 2,1)
if is_vector and not is_sorted:
p_vals = p_vals[restore_values_sort_idx]
return p_vals
def quantile(data, q):
"""
Returns the value corresponding to the <q>th quantile of <data>
"""
if len(data) > 0:
return sorted(data)[min(len(data) - 1, max(0, int(round(len(data) * q))))]
else:
print(data)
return None
def quantiles(data):
"""
Returns a pandas Series of the quantiles of data in <data>. Quantiles start at 1 / (len(data) + 1) and
end at len(data) / (len(data) + 1) to avoid singularities at the 0 and 1 quantiles.
to prevent
:param data:
:return:
"""
sort_indices = numpy.argsort(data)
quants = pandas.Series(numpy.zeros(len(data)))
try:
quants.index = data.index
except AttributeError:
pass
quants[sort_indices] = (numpy.arange(len(data)) + 1) / float(len(data) + 1)
return quants
def gaussian_norm(arr):
"""
Quantile normalizes the given array to a standard Gaussian distribution
:param data:
:return:
"""
quants = numpy.array(quantiles(arr))
std_normal = scipy.stats.norm(loc=0, scale=1)
normed = std_normal.ppf(quants)
return normed
def de_norm(quants, original_data):
"""
Given a matched Series of quantiles and the original data, return the
:param quants:
:param original_data:
:return:
"""
return original_data.order().iloc[numpy.array(quants * len(quants)).astype(int)]
def degauss(normed_values, original_data):
"""
Given a Series of values normalized to a standard Gaussian,
and the original distribution of values, return a de-quantile-normalized Series.
"""
quants = scipy.stats.norm(loc=0, scale=1).cdf(normed_values)
return de_norm(quants, original_data)
def qnorm(p, mean=0.0, sd=1.0):
"""
Modified from the author's original perl code (original comments follow below)
by dfield@yahoo-inc.com. May 3, 2004.
Lower tail quantile for standard normal distribution function.
This function returns an approximation of the inverse cumulative
standard normal distribution function. I.e., given P, it returns
an approximation to the X satisfying P = Pr{Z <= X} where Z is a
random variable from the standard normal distribution.
The algorithm uses a minimax approximation by rational functions
and the result has a relative error whose absolute value is less
than 1.15e-9.
Author: Peter John Acklam
Time-stamp: 2000-07-19 18:26:14
E-mail: pjacklam@online.no
WWW URL: http://home.online.no/~pjacklam
"""
if p <= 0 or p >= 1:
# The original perl code exits here, we'll throw an exception instead
raise ValueError("Argument to ltqnorm %f must be in open interval (0,1)" % p)
# Coefficients in rational approximations.
a = (-3.969683028665376e+01, 2.209460984245205e+02, \
- 2.759285104469687e+02, 1.383577518672690e+02, \
- 3.066479806614716e+01, 2.506628277459239e+00)
b = (-5.447609879822406e+01, 1.615858368580409e+02, \
- 1.556989798598866e+02, 6.680131188771972e+01, \
- 1.328068155288572e+01)
c = (-7.784894002430293e-03, -3.223964580411365e-01, \
- 2.400758277161838e+00, -2.549732539343734e+00, \
4.374664141464968e+00, 2.938163982698783e+00)
d = (7.784695709041462e-03, 3.224671290700398e-01, \
2.445134137142996e+00, 3.754408661907416e+00)
# Define break-points.
plow = 0.02425
phigh = 1 - plow
# Rational approximation for lower region:
if p < plow:
q = math.sqrt(-2 * math.log(p))
z = (((((c[0] * q + c[1]) * q + c[2]) * q + c[3]) * q + c[4]) * q + c[5]) / \
((((d[0] * q + d[1]) * q + d[2]) * q + d[3]) * q + 1)
# Rational approximation for upper region:
elif phigh < p:
q = math.sqrt(-2 * math.log(1 - p))
z = -(((((c[0] * q + c[1]) * q + c[2]) * q + c[3]) * q + c[4]) * q + c[5]) / \
((((d[0] * q + d[1]) * q + d[2]) * q + d[3]) * q + 1)
# Rational approximation for central region:
else:
q = p - 0.5
r = q * q
z = (((((a[0] * r + a[1]) * r + a[2]) * r + a[3]) * r + a[4]) * r + a[5]) * q / \
(((((b[0] * r + b[1]) * r + b[2]) * r + b[3]) * r + b[4]) * r + 1)
# transform to non-standard:
return mean + z * sd # !@#$% sorry, just discovered Sep. 9, 2011
def SEP(n, p):
"""
Returns the standard error of the proportion.
"""
return math.sqrt(p * (1 - p) / float(n))
def iround(x):
"""iround(number) -> integer
Round a number to the nearest integer.
Author: Gribouillis on daniweb.com
"""
y = round(float(x)) - 0.5
return int(y) + (y > 0)
def round_sig(number, n):
"""
Rounds <number> to <n> significant figures
"""
if number == 0:
return 0
else:
return round(number, -int(math.floor(math.log10(abs(number)))) + (n - 1))
def datecode(delimiter='', month_type='num'):
"""
Returns a string containing the current year, month and day, optionally separated by <delimiter>
"""
n = datetime.datetime.now()
if month_type == 'num':
mon = '{:02}'.format(n.month)
elif month_type == 'short':
mon = calendar.month_abbr[n.month]
elif month_type == 'long':
mon = calendar.month_name[n.month]
else:
raise ValueError("Invalid value {} for parameter <month_type>".format(month_type))
return delimiter.join(('{:02}'.format(n.year), mon, '{:02}'.format(n.day)))
def filter_file_list(path, file_list=[], endswith=''):
"""
Returns the members of <file_list> that:
1. Exist in <path>
and
2. Have size > 0
3. Ends with <endswith>, if specified
If no <file_list)> is given, return every file in the list that has size > 0
"""
if not file_list:
file_list = os.listdir(path)
return [fname for fname in file_list if
os.path.isfile(os.path.join(path, fname)) and os.stat(os.path.join(path, fname)).st_size > 0 and (
not endswith or fname[-len(endswith):] == endswith)]
def prep_curve(x_y_tuples, curve_type):
"""
Prepares and returns a list of x_y tuples by prepending or appending the appropriate endpoints depending on the curve type.
If <curve_type> is 'ROC', (0,1) and (1,0) points will be added to extend the curve to the corners.
If <curve_type> is 'PR', (0,y_0) and (x_n,0) points will be added, where y_0 is the first y-value (precision)
and x_n is the last x-value (recall). This has the effect of terminating the ends of the curve with line
segments directly to the axes.
If <curve_type> is 'plain', no points will be added and only the area under the known points will be calculated (no extrapolation).
"""
sorted_tuples = sorted(x_y_tuples, key=lambda x: (x[0], -x[1]))
if curve_type == 'PR':
if sorted_tuples[0][0] != 0:
sorted_tuples = [(0, sorted_tuples[0][1])] + sorted_tuples
if sorted_tuples[-1][1] != 0:
sorted_tuples += [(sorted_tuples[-1][0], 0)]
elif curve_type == 'ROC':
if sorted_tuples[0] != (1, 0):
sorted_tuples = [(1, 0)] + sorted_tuples
if sorted_tuples[-1] != (0, 1):
sorted_tuples += [(0, 1)]
elif curve_type == 'plain':
pass
else:
raise ValueError('Invalid value for curve_type. Got: {}'.format(curve_type))
return sorted_tuples
def MCC(TP, TN, FP, FN):
"""
Returns the Matthews Correlation Coefficient
"""
return (TP * TN - FP * FN) / math.sqrt((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN))
def AUC(x_y_tuples, curve_type='PR'):
"""
Given a list of tuples of x-y pairs returns the area under the curve described by those pairs.
If <curve_type> is 'ROC', (0,1) and (1,0) points will be added to extend the curve to the corners.
If <curve_type> is 'PR', (0,y_0) and (x_n,0) points will be added, where y_0 is the first y-value (precision)
and x_n is the last x-value (recall). This has the effect of terminating the ends of the curve with line
segments directly to the axes.
If <curve_type> is 'plain', no points will be added and only the area under the known points will be calculated (no extrapolation).
The curve between the points is modeled as a straight line between points.
"""
sorted_tuples = prep_curve(x_y_tuples, curve_type)
auc = 0
for item_idx in range(1, len(sorted_tuples)):
auc += (sorted_tuples[item_idx - 1][1] + sorted_tuples[item_idx][1]) / 2 * (
sorted_tuples[item_idx][0] - sorted_tuples[item_idx - 1][0])
return auc
def rep(string):
"""Generator that yields an infinite supply of the given string"""
while True:
yield string
# def establish_path(path_to_check, silent=False):
# if not (os.path.isdir(path_to_check) or os.path.isfile(path_to_check) or os.path.islink(path_to_check)):
# if not silent:
# print()
# "Path {} does not exist, creating ...".format(path_to_check)
# path_dirs = []
# p, q = os.path.split(path_to_check)
# print 'p: {}, q: {}'.format(p, q)
# while p != '/':
# path_dirs.append(q)
# p, q = os.path.split(p)
# print 'p: {}, q: {}'.format(p, q)
# path_dirs.append(q)
# path_dirs.append(p)
# partial_path = ''
# print path_dirs
# for path_element in path_dirs[::-1]:
# partial_path = os.path.join(partial_path, path_element)
# print partial_path
# if not (os.path.isdir(partial_path) or os.path.isfile(partial_path) or os.path.islink(partial_path)):
# os.mkdir(partial_path)
# else:
# if not silent:
# print()
# 'Path {} already exists.'.format(path_to_check)
def establish_path(path_to_check, silent=False):
if not os.path.exists(path_to_check):
os.makedirs(path_to_check)
def bootstrap(seq, n):
"""
Return <n> samples obtained from <seq> by sampling with replacement
"""
samples = []
for i in range(n):
samples.append(random.choice(seq))
return samples
def flatten_list(nested_list):
"""
Returns one flat list from a nested list (list of lists)
Should be easier to comprehend than the syntax of a the nested list comprehension that would otherwise be used
"""
new_list = []
for sublist in nested_list:
for item in sublist:
new_list.append(item)
return new_list
def tsv(filename):
"""
Given the filename of a tsv file, returns a csv.reader object
"""
try:
in_file = open(filename, 'rU')
return csv.reader(in_file, dialect='excel-tab')
except IOError as io:
print()
"I/O error attempting to open {}".format(filename)
print()
", ".join(io.args)
return None
def convert(input, type):
"""
Little in-line func to do string-specified type conversions
"""
if type == 'float':
return float(input)
elif type == 'int':
return int(input)
elif type == 'str':
return str(input)
else:
return None
def smart_convert(data_string):
"""
Attempts to convert a raw string into the following data types, returns the first successful:
int, float, boolean, str
"""
value = data_string.strip()
type_list = [int, float]
for var_type in type_list:
try:
converted_var = var_type(value)
return converted_var
except ValueError:
pass
# No match found
if value == 'True':
return True
if value == 'False':
return False
return str(value)
def sliding_mean(a, window_size=1):
b = numpy.zeros(len(a))
for i in range(len(a)):
b[i] = numpy.sum(a[max(0, i - window_size):min(len(a), i + window_size + 1)]) / float(window_size * 2 + 1)
return b
def freq(input_iterable, case_sensitive=True):
"""
Returns a dictionary keyed by each item in <input_iterable>, returning a dictionary
keyed by value holding the number of occurrances of that value.
"""
freq_dist = {}
for item in input_iterable:
if not case_sensitive:
item = item.lower()
if item not in freq_dist:
freq_dist[item] = 1
else:
freq_dist[item] += 1
return freq_dist
def unique(input_iterable, case_sensitive=True):
"""
Return a list of all unique items in <input_iterable>
"""
if case_sensitive:
return list(set(list(input_iterable)))
else:
return list(set([i.lower() for i in input_iterable]))
def common_items(iterable_of_iterables):
"""
Returns the combined intersection of all iterables within <iterable_of_iterables>
"""
set_list = [set(it) for it in iterable_of_iterables] # convert to list of sets
common_items = set(set_list[0])
for i in range(1, len(set_list)):
common_items = common_items.intersection(set_list[i])
return common_items
def nCk(n, k):
"""
Returns the number of combinations of n choose k (binomial coefficient).
"""
mul = lambda x, y: x * y
return int(round(reduce(mul, (float(n - i) / (i + 1) for i in range(k)), 1)))
def partial_shuffle(sequence, n=None):
"""
Efficiently returns n random members of sequence (without replacement)
"""
if n == None:
n = len(sequence)
sequence_copy = list(sequence)
assert n <= len(sequence_copy)
draw = []
for i in range(n):
r = random.randint(0, len(sequence_copy) - 1)
# print r, sequence_copy
draw.append(sequence_copy[r])
if r == len(sequence_copy) - 1:
sequence_copy.pop()
else:
sequence_copy[r] = sequence_copy.pop()
return draw
def geomean(iterable):
"""
Returns the geometric mean (the n-th root of the product of n terms) of an iterable
"""
n = 0
first_item = True
for x in iterable:
n += 1
if first_item:
product = x
first_item = False
else:
product *= x
return product ** (1 / float(n))
def confusion_matrix(precision, recall, positives, universe):
"""
Given precision and recall for a test, as well as the number of positive results and the size of the tested space (universe),
return a dictionary with the expected fraction of true positives, false positives, true negatives and false negatives, as well as their
absolute numbers given the size of the universe, as well as estimates of the Real Positives and Real Negatives.
"""
assert recall > 0 # otherwise size of false negatives becomes infinite
TP = precision * positives
TPF = TP / float(universe)
FP = (1 - precision) * positives
FPF = FP / float(universe)
TN = universe - TP - FP
TNF = TN / float(universe)
FN = TP * (1 - recall) / recall
FNF = FN / float(universe)
RP = max(0, min(universe, TP + FN))
RPF = RP / float(universe)
RN = universe - RP
RNF = RN / float(universe)
TPR = recall
TNR = TN / float(RN)
FPR = FP / float(RN)
return {'TP': TP, 'TPF': TPF, 'FP': FP, 'FPF': FPF, 'TN': TN, 'TNF': TNF, 'FN': FN, 'FNF': FNF, 'RP': RP,
'RPF': RPF, 'RN': RN, 'RNF': RNF, 'TPR': TPR, 'TNR': TNR, 'specificity': TNR, 'FPR': FPR,
'sensitivity': recall}
def jaccard(iterable_1, iterable_2):
s_1 = set(iterable_1)
s_2 = set(iterable_2)
return len(s_1.intersection(s_2)) / len(s_1.union(s_2))
def expected_overlap(universe_size, precision_A, recall_A, positives_A, precision_B, recall_B, positives_B,
split_values=False, search_space_integration_method='min'):
"""
Given precision, recall, number of called positives and size of tested space (universe) for two datasets,
A & B, return the number of expected overlapping values (intersection of positives in A with positives in B).
If split_values is True, return the overlapping true positives and overlapping false positives as a tuple of (Ov_TP, Ov_FP)
Note: the datasets must be filtered to include only the hits present in the intersection of the tested spaces before calculating the
input parameters - otherwise the results are invalid.
Note: The expectation of overlap assumes conditional independence of the errors of the two datasets - which is rare.
Dependence will lead to an observed overlap greater than the expectation calculated here.
<search_space_integration_method> specifies the function used to integrate the two estimates of the size of RP and RN
for the two datasets:
armean = arithmetic mean
geomean = geometric mean
min = minimum
max = maximum
"""
# print 'recall_A: {}, recall_B:{}'.format(recall_A, recall_B)
if recall_A == 0 or recall_B == 0:
return 0
# print ('recalls are OK')
matrixA = confusion_matrix(precision_A, recall_A, positives_A, universe_size)
matrixB = confusion_matrix(precision_B, recall_B, positives_B, universe_size)
if search_space_integration_method == 'armean':
consensusRP = (matrixA['RP'] + matrixB['RP']) / 2
consensusRN = (matrixA['RN'] + matrixB['RN']) / 2
elif search_space_integration_method == 'geomean':
# print 'RP:'
# print matrixA['RP'], matrixB['RP']
# print 'RN'
# print matrixA['RN'], matrixB['RN']
consensusRP = math.sqrt(matrixA['RP'] * matrixB['RP'])
consensusRN = math.sqrt(matrixA['RN'] * matrixB['RN'])
elif search_space_integration_method == 'min':
consensusRP = min(matrixA['RP'], matrixB['RP'])
consensusRN = min(matrixA['RN'], matrixB['RN'])
elif search_space_integration_method == 'max':
consensusRP = max(matrixA['RP'], matrixB['RP'])
consensusRN = max(matrixA['RN'], matrixB['RN'])
else:
raise ValueError(
"Invalid argument for search_space_integration_method: {}".format(search_space_integration_method))
overlapsTP = recall_A * recall_B * consensusRP
overlapsFP = matrixA['FPR'] * matrixB['FPR'] * consensusRN
if split_values:
return overlapsTP, overlapsFP
else:
# print overlapsTP + overlapsFP
return overlapsTP + overlapsFP
def expected_overlap_FDR(precision_A, recall_A, positives_A, FDR_A, precision_B, recall_B, positives_B, FDR_B):
"""
"""
expected_overlap_A = positives_A * precision_B * recall_A + positives_A * (1 - precision_B) * FDR_A
# expected_overlap_B = positives_B * precision_A * recall_B + positives_B * (1 - precision_A) * FDR_B
return expected_overlap_A # , expected_overlap_B
def group_iter(lst, n):
"""group([0,3,4,10,2,3], 2) => iterator
Group an iterable into an n-tuples iterable. Incomplete tuples
are discarded e.g.
>>> list(group(range(10), 3))
[(0, 1, 2), (3, 4, 5), (6, 7, 8)]
Author: Brian Quinlan
Date: 2004
URL: http://code.activestate.com/recipes/303060-group-a-list-into-sequential-n-tuples/
"""
return zip(*[itertools.islice(lst, i, None, n) for i in range(n)])
def reshape(seq, how):
"""Reshape the sequence according to the template in ``how``.
Examples
========
>>> from sympy.utilities import reshape
>>> seq = range(1, 9)
>>> reshape(seq, [4]) # lists of 4
[[1, 2, 3, 4], [5, 6, 7, 8]]
>>> reshape(seq, (4,)) # tuples of 4
[(1, 2, 3, 4), (5, 6, 7, 8)]
>>> reshape(seq, (2, 2)) # tuples of 4
[(1, 2, 3, 4), (5, 6, 7, 8)]
>>> reshape(seq, (2, [2])) # (i, i, [i, i])
[(1, 2, [3, 4]), (5, 6, [7, 8])]
>>> reshape(seq, ((2,), [2])) # etc....
[((1, 2), [3, 4]), ((5, 6), [7, 8])]
>>> reshape(seq, (1, [2], 1))
[(1, [2, 3], 4), (5, [6, 7], 8)]
>>> reshape(tuple(seq), ([[1], 1, (2,)],))
(([[1], 2, (3, 4)],), ([[5], 6, (7, 8)],))
>>> reshape(tuple(seq), ([1], 1, (2,)))
(([1], 2, (3, 4)), ([5], 6, (7, 8)))
>>> reshape(range(12), [2, [3, set([2])], (1, (3,), 1)])
[[0, 1, [2, 3, 4, set([5, 6])], (7, (8, 9, 10), 11)]]
Author: Chris Smith
Date: 14 Sep 2012
URL: http://code.activestate.com/recipes/578262-reshape-a-sequence/
"""
m = sum(flatten(how))
n, rem = divmod(len(seq), m)
if m < 0 or rem:
raise ValueError('template must sum to positive number '
'that divides the length of the sequence')
i = 0
how_type = type(how)
rv = [None] * n
for k in range(len(rv)):
rv[k] = []
for hi in how:
if type(hi) is int:
rv[k].extend(seq[i: i + hi])
i += hi
else:
n = sum(flatten(hi))
hi_type = type(hi)
rv[k].append(hi_type(reshape(seq[i: i + n], hi)[0]))
i += n
rv[k] = how_type(rv[k])
return type(seq)(rv)
def group(iterator, n=2, partial_final_item=False):
""" Given an iterator, it returns sub-lists made of n items
(except the last that can have len < n)
inspired by http://countergram.com/python-group-iterator-list-function
Author: Sandro Tosi
Date: 11 Apr 2011
URL: http://sandrotosi.blogspot.com/2011/04/python-group-list-in-sub-lists-of-n.html
Modified slightly with option to return partial final items or not by Dylan Skola Oct 02, 2014
"""
accumulator = []
for item in iterator:
accumulator.append(item)
if len(accumulator) == n: # tested as fast as separate counter
yield accumulator
accumulator = [] # tested faster than accumulator[:] = []
# and tested as fast as re-using one list object
if len(accumulator) != 0 and (len(accumulator) == n or partial_final_item):
yield accumulator
def finite_difference(signal):
output = numpy.zeros(len(signal))
for i in range(len(signal) - 1):
output[i] = signal[i + 1] - signal[i]
return output
def find_0_crossings(signal, start_pos, rising_falling=''):
"""
Find all indices at which the <signal> vector crosses the 0 axis.
If <rising_falling> is 'rising', report only ascending crossings of the 0 axis
If <rising_falling> is 'falling', report only descending crossings of the 0 axis
"""
if rising_falling:
assert rising_falling in ('rising', 'falling')
crossings = []
prev_val = signal[start_pos]
for i in range(start_pos, len(signal)):
if rising_falling == 'rising' or not rising_falling:
if prev_val <= 0 and signal[i] > 0:
crossings.append(i)
elif rising_falling == 'falling' or not rising_falling:
if prev_val >= 0 and signal[i] < 0:
crossings.append(i)
prev_val = signal[i]
return crossings
def merge_dfs(df_sequence):
"""
Given a sequence of pandas DataFrames, return a DataFrame containing the merged contents
of the individual DataFrames. That is, column and row indices will be the union of the components,
and the contents of a cell will be the value appearing earliest in the sequence (if more than
one non-NaN value exists).
"""
total_df = df_sequence[0]
if len(df_sequence) > 1:
for df in df_sequence[1:]:
total_df = total_df.combine_first(df)
return total_df
class Raveller(object):
"""
Within the context of a hierarchical index structure,
convert scalar indices to 3-D indices and vice-versa.
"""
def __init__(self, rows_per_page, cols_per_row):
self.cols_per_row = cols_per_row
self.rows_per_page = rows_per_page
self.items_per_page = self.rows_per_page * self.cols_per_row
def ravel(self, page, row, col):
"""
Convert page, row and col address into a scalar index
"""
assert row < self.rows_per_page
assert col < self.cols_per_row
return int(page * self.items_per_page + row * self.cols_per_row + col)
def unravel(self, index):
"""
Convert a scalar index into page, row and col address
"""
index = int(index)
page = int(index / self.items_per_page)
index -= int(page * self.items_per_page)
row = int(index / self.cols_per_row)
index -= int(row * self.cols_per_row)
return page, row, index
def robust_pcc(vector_1, vector_2, return_pval=False):
"""
Calculates the PCC between <vector_1> and <vector_2> in such a way as to guarantee a result
under almost any circumstances. That is, it is robust to:
* NaN values in either vector (positions with a NaN in either vector will be excluded)
* inappropriate datatype (scipy.stat.pearsonr normally only works on numpy.float64)
:param vector_1:
:param vector_2:
:param return_pval:
:return:
"""
if vector_1.dtype != numpy.float64:
vector_1 = vector_1.astype(numpy.float64)
if vector_2.dtype != numpy.float64:
vector_2 = vector_2.astype(numpy.float64)
non_nan = numpy.nonzero(numpy.equal((1 - numpy.isnan(vector_1)) * (1 - numpy.isnan(vector_2)), True))[0]
# print non_nan
pcc_tuple = scipy.stats.pearsonr(vector_1[non_nan], vector_2[non_nan])
if return_pval:
return pcc_tuple
else:
return pcc_tuple[0]
# def remove_nans(vector):
# """
# Simply return a new vector with all NaN values stripped. Easier than masking.
# :param vector:
# :return:
# """
# return vector[numpy.nonzero(numpy.equal(numpy.isnan(vector), False))[0]]
def clean_array(arr):
"""
Returns a copy of :param:`arr` with all inf, neginf and NaN values removed
"""
return arr[numpy.nonzero(~(numpy.isnan(arr) | numpy.isinf(arr) | numpy.isneginf(arr)))[0]]
def remove_joint_nans(vector_1, vector_2):
"""
Returns a pair of vectors consisting of all locations that are Not(NaN in vector 1 AND NaN in vector 2)
:param vector_1:
:param vector_2:
:return:
"""
non_nans = numpy.nonzero(numpy.equal((1 - numpy.isnan(vector_1)) * (1 - numpy.isnan(vector_2)), True))[0]
return vector_1[non_nans], vector_2[non_nans]
def random_identifier(length, allowed_chars=ALPHANUMERIC):
"""
Returns a random alphanumeric identifier
"""
return ''.join(random.sample(allowed_chars, length))
class MemMap(object):
def __init__(self, arr, read_only=False, tmp_dir=TMP_DIR):
establish_path(tmp_dir)
random_fname = os.path.join(tmp_dir, '{}.npy'.format(random_identifier(32)))
numpy.save(random_fname, arr=arr)
self.fname = random_fname
self.array = numpy.load(random_fname, mmap_mode=('r+', 'r')[read_only])
def __del__(self):
try:
os.remove(self.fname)
except Exception as ex:
print()
'Tried to remove temporary memmap file {} but caught {} instead!'.format(self.fname, ex)
def replace_with_mem_map(arr, read_only=True, tmp_dir=TMP_DIR):
return MemMap(arr, read_only=read_only, tmp_dir=tmp_dir).array
def get_open_fds():
'''
return the number of open file descriptors for current process
.. warning: will only work on UNIX-like os-es.
'''
import subprocess
pid = os.getpid()
procs = subprocess.check_output(
["lsof", '-w', '-Ff', "-p", str(pid)])
nprocs = len(
[s for s in procs.split('\n') if s and s[0] == 'f' and s[1:].isdigit()]
)
return nprocs
def flexible_split(arr, num_splits, view=True):
"""
Performs much like numpy.split() but doesn't raise an exception if the array cannot be split perfectly evenly.
Instead the last sub-array will be of slightly-different size.
If <num_splits> is greater than the length of <arr>, remaining sub-arrays will be empty.
If <view> is true, return a list of views into the original array
:param arr:
:param num_splits:
:return:
"""
l = len(arr)
offset = iround(l / float(num_splits))
sub_arrs = []
for i in range(num_splits):
start_pos = i * offset
if i < num_splits - 1:
end_pos = (i + 1) * offset
else:
end_pos = l
sub_arrs.append(arr[start_pos:end_pos])
return sub_arrs
def string_compare(string_1, string_2):
"""
Since Numpy doesn't implement the .equal() ufunc for string arrays, and there doesn't seem to be a built-in
in the standard libraries, I've created my own for this, though since it loops over the arrays its not very
performant.
Returns a boolean array in which the value at each position is equal to the equality of the two strings at the
corresponding position.
:param string_1:
:param string_2:
:return:
"""
assert len(string_1) == len(string_2)
L = len(string_1)
comparison = numpy.zeros(L, dtype=numpy.bool)
for i in range(L):
comparison[i] = string_1[i] == string_2[i]
return comparison
# Some convenience functions for similarity metrics
def sse(vec_a, vec_b):
return ((vec_a - vec_b) ** 2).sum()
def mse(vec_a, vec_b):
return sse(vec_a, vec_b) / float(len(vec_a))
def rmse(vec_a, vec_b):
return numpy.sqrt(mse(vec_a, vec_b))
# Deprecated because numerically unstable at small values:
# def cosine_similarity(vec_a, vec_b):
# return numpy.dot(vec_a, vec_b) / (numpy.linalg.norm(vec_a) * numpy.linalg.norm(vec_b))
def cosine_similarity(vec_a, vec_b):
return 1 - scipy.spatial.distance.cosine(vec_a, vec_b)
def robust_pcc(vec_a, vec_b):
"""
Version of Pearson correlation that propagates numerical overflow and underflow as Inf or -Inf
"""
a_m, b_m = vec_a.mean(), vec_b.mean()
a_s, b_s = vec_a.std(), vec_b.std()
return (vec_a - a_m).dot(vec_b - b_m) / (a_s*b_s) / 100
def sign_weighed_cosine(arr_1, arr_2, alpha=0.5):
"""
Returns a similarity metric from -1 to 1 that is analogous
to cosine similarity weighted by the sign of the difference between
the vectors.
If arr_2 represents the truth and arr_1 the prediction, then
higher values of :param:`alpha` result in greater weighting of
false positives (positive prediction error) than false negatives.
If alpha = 0.5 it becomes equivalent to cosine similarity
:param:`alpha`: a value between 0 and 1
"""
assert 0 <= alpha <= 1
arr_1 = numpy.array(arr_1)
arr_2 = numpy.array(arr_2)
delta = arr_1 - arr_2
weights = numpy.empty(len(arr_1))
weights[delta > 0] = alpha
weights[delta < 0] = 1 - alpha
weights[delta == 0] = 0.5
l_1 = numpy.sqrt(numpy.sum(arr_1**2 * weights))
l_2 = numpy.sqrt(numpy.sum(arr_2**2 * weights))
return ((arr_1 * arr_2) * weights).sum() / (l_1 * l_2)
def pearson_correlation(vec_a, vec_b):
return scipy.stats.pearsonr(vec_a,vec_b)[0]
def spearman_correlation(vec_a, vec_b):
return scipy.stats.spearmanr(vec_a,vec_b)[0]
class Serializer(object):
def __init__(self):
self.cur_index = -1
self.index_to_name = []
self.name_to_index = {}
def add_item(self, name):
self.cur_index += 1
self.index_to_name.append(name)
assert name not in self.name_to_index
self.name_to_index[name] = self.cur_index
return self.cur_index
def get_index(self, name):
'''
Return an existing index for <name> if present, otherwise make one and return it.
:param name:
:return:
'''
if name in self.name_to_index:
return self.name_to_index[name]
else:
self.cur_index += 1
self.name_to_index[name] = self.cur_index
self.index_to_name.append(name)
return self.cur_index
def semi_pcc(x, y, mean_x, mean_y):
"""
Returns the equivalent of a Pearson Correlation, only with pre-defined means for both vectors.
"""
e_x = x - mean_x
e_y = y - mean_y
return numpy.dot(e_x, e_y) / (numpy.sqrt(numpy.dot(e_x, e_x)) * numpy.sqrt(numpy.dot(e_y, e_y)))
# def l2_norm(arr):
# """
# Returns the L2 norm of <arr> much faster than numpy.linalg.norm
# Update: As of 08/07/2017 no longer seems faster than the numpy function (at least for ~200K inputs)
# :param x:
# :param y:
# :return:
# """
# return numpy.sqrt(numpy.dot(arr, arr))
def cosine_similarity(x, y):
"""
Returns the cosine similarity of two vectors
:param x:
:param y:
:return:
"""
return numpy.dot(x, y) / (numpy.linalg.norm(x) * numpy.linalg.norm(y))
def numerical_string_sort(sequence_to_sort, reverse=False):
"""
Returns a sorted version of <sequence_to_sort> that sorts any aligned numerical
components of the strings in numerical, not lexicographical order.
"""
digit_parser = re.compile(r'[A-Za-z]+|\d+')
def maybe_int(s):
"""
Returns an integer representation of :param:`s` if a legal one exists, otherwise
returns the string representation of :param:`s`.
"""
try:
return int(str(s))
except ValueError:
return str(s)
def get_type_layout(key_tuple):
"""
Returns the type of each element in :param:`key_tuple`.
"""
return [type(element) for element in key_tuple]
def apply_type_layout(key_tuple, layout_tuple):
"""
Converts each element in :param:`key_tuple` using the
corresponding type function in :param:`layout_tuple`
"""
return [layout_element(key_element) for key_element, layout_element in zip(key_tuple, layout_tuple)]
decomposed_keys = {x:tuple([maybe_int(s) for s in re.findall(digit_parser, str(x))]) for x in sequence_to_sort}
# trim all keys to have the same (minimal) layout of decomposed elements
minimal_layout=get_type_layout(list(decomposed_keys.values())[0])
for key in list(decomposed_keys.values())[1:]:
this_layout = get_type_layout(key)
min_len = min(len(this_layout), len(minimal_layout))
this_layout = this_layout[:min_len]
for field_idx in range(min_len):
if this_layout[field_idx] == str:
minimal_layout[field_idx] = str
decomposed_keys = {original_key:apply_type_layout(decomposed_key, minimal_layout) for original_key, decomposed_key in decomposed_keys.items()}
return sorted(sequence_to_sort, key=lambda x:decomposed_keys[x], reverse=reverse)
def unmean(cur_mean, cur_N, value_to_remove):
"""
Removes the influence of <value_to_remove> from a mean value that currently is calculated
from <cur_N> samples.
That is, if <value_to_remove> is the <cur_N>th sample, return what the mean of the 1-(<cur_N>-1)th samples
must be.
"""
return cur_mean * (float(cur_N) / float(cur_N - 1)) - (value_to_remove / float(cur_N - 1))
def find_file_gzipped(base_filename, mode='r'):
"""
Convenience function that looks first for a gzipped version of a file, then a plaintext
version, and returns a file handle if successful, and None if not.
:param base_filename:
:return:
"""
try:
return gzip.open(base_filename + '.gz', mode)
except (IOError, OSError):
try:
return open(base_filename, mode)
except (IOError, OSError):
return None
def reverse_map_dict(my_dict):
"""
Assuming that <my_dict> contains iterables of value elements, generate and return a 'reverse-mapped' dictionary from <my_dict>
such that the new dictionary is keyed by all the value elements that appear in <my_dict> and contains a list of keys
in the original dictionary that were linked to that value.
"""
reversed_dict = {}
for k, v in my_dict.items():
for element in v:
if element not in reversed_dict:
reversed_dict[element] = []
reversed_dict[element].append(k)
return reversed_dict
def argmax2d(arr):
"""
Returns the coordinates of the maximum value in a 2D array-like
"""
m = float('-Inf')
m_i = 0
m_j = 0
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
if arr[i, j] > m:
m_i = i
m_j = j
m = arr[i, j]
return m_i, m_j
def invert_dict(dictionary, multi_value=False):
"""
Returns a new dictionary keyed by the values in <dictionary> and
containing the matching key.
If <multi_value> is True, values are sets of keys that matched each
value in the iterables contained in <dictionary>.
I realize that this is a poor explanation but I'm in a hurry here . . .
"""
flipped_dict = {}
if multi_value:
for k, v in dictionary.items():
for item in v:
if item in flipped_dict:
flipped_dict[item].add(k)
else:
flipped_dict[item] = set([k])
else:
for k, v in dictionary.items():
flipped_dict[v] = k
return flipped_dict
class CaselessDict(object):
"""
Defines an object that mimics dicitonary functionality except that key operations are case-insensitive
"""
def __init__(self, base_dict):
self._original_dict = base_dict
self._case_translation = {}
for key in self._original_dict.keys():
if key.lower() in self._case_translation:
raise ValueError('Key collision: {} in original dictionary matches existing lower case {}'.format(key, key.lower()))
else:
self._case_translation[key.lower()] = key
def __getitem__(self, item_key):
return self._original_dict[self._case_translation[item_key.lower()]]
def __iter__(self):
for key in self._original_dict.keys():
yield key
def __contains__(self, key):
return key.lower() in self._case_translation
def __setitem__(self, new_key, new_value):
self._original_dict[new_key] = new_value
self._case_translation[new_key.lower()] = new_key
def __delitem__(self, del_key):
del (self._original_dict[del_key])
del (self._case_translation[del_key.lower()])
def __len__(self):
return len(self._original_dict)
def __nonzero__(self):
return len(self._original_dict) > 0
def keys(self):
return self._original_dict.keys()
def items(self):
return self._original_dict.items()
def values(self):
return self._original_dict.values()
def array_max(arrays):
"""
Returns the element-wise maximum of a sequence of arrays
"""
this_max = numpy.maximum(arrays[0], arrays[1])
if len(arrays) > 2:
for arr in arrays[2:]:
this_max = numpy.maximum(this_max, arr)
return this_max
def symmetrize(a):
"""
Given a triangular (upper or lower) matrix, return a symmetric full matrix.
"""
return a + a.T - numpy.diag(a.diagonal())
def my_normal_pdf(arr, mean=0, sigma=1):
"""
Returns the probability density function (PDF) of a normal distribution having
the specified parameters for every value in :param:`arr`
For whatever reason, this seems to be about 3 times faster than scipy.stats.norm.pdf
"""
const1 = 1 / (sigma * (2 * numpy.pi)**0.5)
const2 = 2 * sigma **2
return const1 * numpy.exp(-(arr - mean)**2 / const2)
def binary_int_min(func, bounds, max_iter=None, verbose=False):
"""
Given a concave up (has one minimum and no inflection points) function of one integer, :param:`func`,
will use gradient descent (sort of) to find the global minimum. This is useful for finding the
index of the smallest value in an array of values generated from a concave up function.
:param:`max_iter`: terminate if no solution found within this number of iterations
:param:`verbose`: print extra status messages
:return: the value of x that minimizes the value of func(x)
"""
left, right = bounds
done = False
i = 0
f_left = func(left)
f_right = func(right-1)
while not done:
mid = int((right + left)/2)
f_mid_left = func(mid)
f_mid_right = func(mid+1)
if verbose:
print(i)
print(left, mid, mid+1, right)
print(f_left, f_mid_left, f_mid_right, f_right)
# print(left >= mid - 1, right <= mid +2)
if left >= mid - 1 and right <= mid +2:
done=True
else:
if f_mid_left < f_mid_right:
right = mid
f_right = f_mid_left
else:
left= mid + 1
f_left = f_mid_right
i += 1
if max_iter and i > max_iter:
done=True
return (left, mid, mid+1, right)[numpy.argmin((f_left, f_mid_left, f_mid_right, f_right))]
def split_half(num):
"""
Given an integer (such as the length of a sequence), returns a tuple of integers given two indices that will evenly (as close as possible)
and consistently partition it into two halves.
"""
# ToDo: Replace with Bisect module
left_width = int(num/2)
right_width = num - left_width
return (left_width, right_width)
def hypergeometric_test(a, b, universe):
"""
Returns the p-value of a Fisher's exact test for the significance of the overlap between a and b
(H_alt is that they are more overlapping than expected by chance)
"""
a = set(a)
b = set(b)
universe = set(universe)
overlap_size = len(a.intersection(b))
contingency_table = numpy.array([[overlap_size, len(a)-overlap_size], [len(b)-overlap_size, len(universe.difference(a.union(b)))]])
return scipy.stats.fisher_exact(contingency_table, alternative='greater')[1]
def welchs_ttest_onesided(a, b, alternative='greater', alpha=0.05):
"""
Wrapper around scipy.stats.ttest_ind() that provides one-sided hypothesis testing
(original function only handles two-sided alternatives).
if alternative is 'greater', then H_1 = mean(a) > mean(b)
if alternative is 'lesser', then H_1 = mean(a) < mean(b)
Returns a boolean value for rejection of the null hypothesis at the given alpha
"""
check_params('alternative', alternative, ('greater', 'lesser'))
t, p = scipy.stats.ttest_ind(a, b, equal_var=False)
if alternative=='greater':
return p/2 < alpha and t > 0
else:
return p/2 < alpha and t < 0
def check_params(parameter_name, parameter_passed_value, valid_values):
"""
Utiility function to automate input validation and associated status messages
"""
assert parameter_passed_value in valid_values, 'Parameter {} received an invalid value {}. Valid choices: {}'.format(parameter_name, parameter_passed_value, ','.join(valid_values))
def fisher_overlap(set_a, set_b, universe_size, alternative='greater'):
"""
Returns the p-value of a Fisher's exact test performed on the overlap of
the elements of set_a and set_b.
The default alternative hypothesis is "greater"
"""
intersection_size = len(set(set_a).intersection(set_b))
union_size = len(set(set_a).union(set_b))
contingency_table = [[intersection_size, len(set_a) - intersection_size],
[len(set_b) - intersection_size, universe_size - union_size]]
oddsratio, pvalue = scipy.stats.fisher_exact(contingency_table, alternative)
return pvalue
class Serializer():
def __init__(self, start=0):
"""
Acts as an enumerator / serializer with a counter that increments each time it is queried.
"""
self.start=start
self.counter=start
def enumerate_item(self, item):
"""
Return a tuple consisting of a globally-unique
serial number and the item itself.
"""
self.counter += 1
return (self.counter - 1, item)
def get_value(self):
self.counter += 1
return self.counter - 1
class InProgress():
def __init__(self, task_message):
"""
Convenience class that produces a status message on intialization, then
a completion message on the same line when the .done() method is called.
"""
self.start_time = datetime.datetime.now()
print('{} ... '.format(task_message), end='', flush=True)
def done(self):
elapsed_time = datetime.datetime.now() - self.start_time
print('done in {}'.format(elapsed_time))
def clear_screen():
"""
Clears the terminal buffer
"""
print('\033c')
def group_similarity(data_matrix, group_a_columns, group_b_columns, all_corrs = None, corr_method='pearson'):
"""
Returns a tuple consisting of:
(similarity of members of group a, similarity of members of group b, similarity of members between groups)
:param:`all_corrs` should be a matrix of similarity coefficients between samples. If not provided, will generate
using the method specified in :param:`corr_method`
"""
all_samples = sorted(set(group_a_columns).union(group_b_columns))
assert len(all_samples) == len(group_a_columns) + len(group_b_columns) # make sure no samples in both groups
if all_corrs is None:
all_corrs = data_matrix.loc[:,all_samples].corr(method=corr_method)
between_group_corrs = numpy.array([all_corrs.loc[group_a_sample,group_b_sample] for group_a_sample, group_b_sample in itertools.product(group_a_columns, group_b_columns)])
group_a_corrs = numpy.array([all_corrs.loc[x,y] for x, y in itertools.combinations(group_a_columns, 2)])
group_b_corrs = numpy.array([all_corrs.loc[x,y] for x, y in itertools.combinations(group_b_columns, 2)])
return group_a_corrs, group_b_corrs, between_group_corrs
def group_similarity_test(data_matrix, group_a_columns, group_b_columns, corr_method='pearson', num_runs=50000, tail='right'):
"""
Return an empirical p-value from a permutation test of the null hypothesis that the mean similarity between
samples in groups a and b is the same as within each group.
:param:`tail` If 'left', tests the alternative hypothesis that the between-group differences are less than within group, if 'right', that they are greater, if 'both', well, both.
"""
all_samples = sorted(set(group_a_columns).union(group_b_columns))
assert len(all_samples) == len(group_a_columns) + len(group_b_columns) # make sure no samples in both groups
all_corrs = data_matrix.loc[:,all_samples].corr(method=corr_method)
real_group_a_corrs, real_group_b_corrs, real_between_group_corrs = group_similarity(data_matrix=data_matrix,
group_a_columns=group_a_columns,
group_b_columns=group_b_columns,
all_corrs=all_corrs,
corr_method=corr_method)
real_diff = numpy.concatenate((real_group_a_corrs, real_group_b_corrs), axis=0).mean() - real_between_group_corrs.mean()
shuff_diffs = numpy.zeros(num_runs)
for i in range(num_runs):
numpy.random.shuffle(all_samples)
shuff_group_a_samples = all_samples[:len(group_a_columns)]
shuff_group_b_samples = all_samples[len(group_a_columns):]
shuff_group_a_corrs, shuff_group_b_corrs, shuff_between_group_corrs = group_similarity(data_matrix=data_matrix,
group_a_columns=shuff_group_a_samples,
group_b_columns=shuff_group_b_samples,
all_corrs=all_corrs,
corr_method=corr_method)
this_diff = numpy.concatenate((shuff_group_a_corrs, shuff_group_b_corrs), axis=0).mean() - shuff_between_group_corrs.mean()
shuff_diffs[i] = this_diff
real_similarity_pval = toolbox.empirical_p_val(shuff_diffs, real_diff, tail='right')
return real_similarity_pval
def qnorm(df, axis=0):
"""
Quantile normalize the columns (or rows, if :param:`axis`=1 (not tested)) of a pandas DataFrame :param:`df`.
Copypasted from stackoverflow user "ayhan" (http://stackoverflow.com/questions/37935920/quantile-normalization-on-pandas-dataframe)
Rrturns the normalized dataframe.
"""
rank_mean = df.stack().groupby(df.rank(method='first').stack().astype(int), axis=axis).mean()
return df.rank(method='min').stack().astype(int).map(rank_mean).unstack()
def znorm(arr):
"""
Returns the z-score transform of :param:`arr`
"""
return (arr-arr.mean()) / arr.std()
def l2norm(arr):
"""
Returns :param:`arr` divided by its L2 norm.
"""
return arr / numpy.linalg.norm(arr)
def l1norm(arr):
"""
Returns :param:`arr` divided by its L1 norm (makes it sum to 1.0)
"""
return arr / arr.sum()
def mean_norm(arr):
"""
Returns :param:`arra` divide by its mean (makes it have a mean of 1.0)
"""
return arr / arr.mean()
def generate_contingency_table(items_1, items_2, universe):
"""
Given two sets of items and the universe of all possible items,
returns a 2x2 numpy array containing a contingency table in the form:
(- items_1), (- items 2) (+ items_1), (- items_2)
(- items_1), (+ items 2) (+ items_1), (+ items_2)
"""
items_1 = set(items_1)
items_2 = set(items_2)
universe = set(universe)
items_union = items_1.union(items_2)
cont_table=numpy.array([[len(universe.difference(items_1.union(items_2))), len(items_1.difference(items_2))],
[len(items_2.difference(items_1)), len(items_1.intersection(items_2))]])
return cont_table
def walk_up(arr, start_pos):
"""
Performs a greedy search for a local maximum of :param:`arr` from the given :param:`start_pos`.
"""
cur_pos = start_pos
if arr[cur_pos - 1] > arr[cur_pos + 1]:
# go left
while cur_pos >= 0:
if arr[cur_pos - 1] > arr[cur_pos]:
cur_pos -= 1
else:
break
else:
# go right
while cur_pos < len(arr):
if arr[cur_pos + 1] > arr[cur_pos]:
cur_pos += 1
else:
break
return cur_pos
def gzip_pickle_load(fname):
"""
Convenience function to load from a gzipped pickle file with simple syntax
"""
return pickle.load(gzip.open(fname, 'rb'))
def gzip_pickle_save(obj, fname):
"""
Convenience function to save to a gzipped pickle file with simple syntax
"""
pickle.dump(obj, gzip.open(fname, 'wb'))
def reflect_triu(df):
"""
Returns a lower-triangle matrix of an upper triangle matrix reflected
across the diagonal.
"""
assert df.shape[0] == df.shape[1]
n = df.shape[0]
result = df.copy()
rows, cols = numpy.triu_indices(n, 1)
for i in range(len(rows)):
r = rows[i]
c = cols[i]
try:
result.iloc[c,r] = result.iloc[r,c]
except AttributeError:
result[c,r] = result[r,c]
return result
def my_diag_indices(n, k=0):
"""
Return the indices corresponding to the kth diagonal of an n X n array
in the form of a tuple of (x coords, y coords).
Created since numpy does not provide this function.
"""
if k <= 0:
x_coords = numpy.arange(-k, n)
y_coords = numpy.arange(0, n + k)
else:
x_coords = numpy.arange(0, n - k)
y_coords = numpy.arange(k, n)
return (x_coords, y_coords)
def pairwise_apply(df, func, axis=1):
"""
Returns a square matrix containing the application of a two parameter function
to each pair of columns in :param:`df`
"""
n = df.shape[axis]
results = numpy.zeros((n, n))
rows, cols = numpy.triu_indices(n, 0)
for i in range(len(rows)):
r = rows[i]
c = cols[i]
results[r, c] = func(df.iloc[:,r], df.iloc[:,c])
return reflect_triu(results)
def pairwise_apply_vec(data_vector, func):
"""
Returns a matrix containing the result of :param:`func`
applied to every pair of elements in :param:`data_vector`
"""
n = len(data_vector)
a = numpy.repeat(numpy.array(data_vector), n).reshape(n,n)
return func(a, a.T)
def subdivide(dividand, num_bins):
"""
Approximates an even partition of :param:`dividand` into :param:`num_bins`
using integers.
"""
q, r = numpy.divmod(dividand, num_bins)
results = numpy.full(num_bins, fill_value=int(q), dtype=int)
results[:int(r)] += 1
return results
def roundto(num, nearest):
"""
Rounds :param:`num` to the nearest increment of :param:`nearest`
"""
return int((num+(nearest/2)) // nearest * nearest)
def validate_param(param_name, value_received, allowable_values):
assert value_received in allowable_values, 'Received invalid value \'{}\' for parameter {}. Allowable values: {}'.format(value_received, param_name, ', '.join(allowable_values))
def truncate_array_tuple(array_tuple, prefix_trim, suffix_trim):
"""
Given a pair of arrays, trim :param:`prefix_trim` elements from
the beginning and :param:`suffix_trim` elements from the end.
"""
if prefix_trim > 0 and suffix_trim > 0:
return tuple([arr[prefix_trim:-suffix_trim] for arr in array_tuple])
if prefix_trim > 0:
return tuple([arr[prefix_trim:] for arr in array_tuple])
if suffix_trim > 0:
return tuple([arr[:-suffix_trim] for arr in array_tuple])
return array_tuple
def mux_2d_points(paired_coords, n):
"""
Converts a tuple of equal length numpy arrays (representing, e.g. x and y coordinates
for a set of points) into a single array containing the enumeration of such points
encoded as x_coord + length * y_coord.
"""
x_coords, y_coords = paired_coords
return x_coords * n + y_coords
def demux_2d_points(muxed_points, n):
"""
Converts a single numpy array containing an enumeration of 2D points
encoded as x_coord + length * y_coord into a 2-tuple of x_coord and y_coord
numpy arrays.
"""
return numpy.divmod(muxed_points, n)
def glue_matrix(matrix, start_diagonal=1, truncate_rows_more=True):
"""
Given a square matrix :param:`matrix`, return a new matrix
consisting of the upper and lower triangles of the original
matrix, truncated at diagonal :param:`start_diagonal`.
If :param:`truncate_rows_more` is True, returns a matrix that is one
column wider than tall. If False, returned matrix is one row taller
than wide.
"""
assert matrix.shape[0] == matrix.shape[1]
n = matrix.shape[0]
assert start_diagonal >= 0
if truncate_rows_more:
new_shape = n - start_diagonal, n - start_diagonal + 1
else:
new_shape = n - start_diagonal + 1, n - start_diagonal
glued_matrix = numpy.empty(new_shape, dtype=matrix.dtype)
row_tui, col_tui = numpy.triu_indices(n, start_diagonal)
row_tli, col_tli = numpy.tril_indices(n, -start_diagonal)
if truncate_rows_more:
glued_matrix[(row_tui, col_tui - start_diagonal + 1)] = matrix[(row_tui, col_tui)]
glued_matrix[(row_tli - start_diagonal, col_tli)] = matrix[(row_tli, col_tli)]
else:
glued_matrix[(row_tui, col_tui - start_diagonal)] = matrix[(row_tui, col_tui)]
glued_matrix[(row_tli - start_diagonal + 1, col_tli)] = matrix[(row_tli, col_tli)]
return glued_matrix
def rescale(data):
"""
Returns a copy of data that has been linearly mapped to the interval 0-1
"""
data_max, data_min = data.max(), data.min()
data -= data_min
data /= (data_max - data_min)
return data
def replace_nans_diagonal_means(matrix, start_diagonal=0, end_diagonal=0):
"""
Returns a copy of :param:`matrix` where all NaN values are replaced
by the mean of that cell's diagonal vector (computed without NaNs).
Requires that no diagonals consist only of NaNs (run trim_matrix_edges first)
"""
assert matrix.shape[0] == matrix.shape[1]
n = matrix.shape[0]
if end_diagonal == 0:
end_diagonal = n - 1
start_diagonal = -end_diagonal
filled_matrix = matrix.copy()
for diag_idx in range(start_diagonal, end_diagonal):
diag_indices = my_diag_indices(n, diag_idx)
diag_vector = matrix[diag_indices]
bad_locs = numpy.isnan(diag_vector)
good_locs = numpy.logical_not(bad_locs)
diag_mean = diag_vector[good_locs].mean()
diag_vector[bad_locs] = diag_mean
filled_matrix[diag_indices] = diag_vector
return filled_matrix
def compute_matrix_trim_points(x):
"""
Returns a 4-tuple for the following coordinates needed to trim :param:`x`
so that all edge rows and columns that contain no valid entries are removed.
"""
# rows
nan_rows = (numpy.isnan(x).sum(axis=1) == x.shape[0]).astype(int)
row_transitions = numpy.diff(nan_rows)
row_candidate_start_trim_points = numpy.nonzero(row_transitions < 0)[0]
if nan_rows[0] == 1 and len(row_candidate_start_trim_points) > 0:
row_start_trim_point = row_candidate_start_trim_points[0] + 1
else:
row_start_trim_point = 0
row_candidate_end_trim_points = numpy.nonzero(row_transitions > 0)[0]
if nan_rows[-1] == 1 and len(row_candidate_end_trim_points) > 0:
row_end_trim_point = row_candidate_end_trim_points[-1]
else:
row_end_trim_point = x.shape[0]
# cols
nan_cols = (numpy.isnan(x).sum(axis=0) == x.shape[1]).astype(int)
col_transitions = numpy.diff(nan_cols)
col_candidate_start_trim_points = numpy.nonzero(col_transitions < 0)[0]
if nan_cols[0] == 1 and len(col_candidate_start_trim_points) > 0:
col_start_trim_point = col_candidate_start_trim_points[0] + 1
else:
col_start_trim_point = 0
col_candidate_end_trim_points = numpy.nonzero(col_transitions > 0)[0]
if nan_cols[-1] == 1 and len(col_candidate_end_trim_points) > 0:
col_end_trim_point = col_candidate_end_trim_points[-1] + 1
else:
col_end_trim_point = x.shape[1]
return row_start_trim_point, row_end_trim_point, col_start_trim_point, col_end_trim_point
def trim_matrix_edges(matrix):
"""
Returns a copy of :param:`matrix` with all edge rows and columns removed that contain no valid entries.
"""
row_start_trim_point, row_end_trim_point, col_start_trim_point, col_end_trim_point = compute_matrix_trim_points(matrix)
return matrix[row_start_trim_point:row_end_trim_point, col_start_trim_point:col_end_trim_point]
def isbad(data):
"""
Returns a Boolean numpy array of the same dimensions as :param:`data`,
indicating whether each cell is any of nan, neginf or inf.
"""
return numpy.logical_or(numpy.logical_or(numpy.isnan(data), numpy.isinf(data)), numpy.isneginf(data))
def clean_matrix(matrix):
"""
Given a 2D matrix :param:`matrix`, return the largest contiguous subset of that matrix that
is lacking any inf, neginf or nan entries.
"""
done = False
while not done:
rowsums = isbad(matrix).sum(axis=1)
colsums = isbad(matrix).sum(axis=0)
max_rowsum = rowsums.max()
max_colsum = colsums.max()
if max_rowsum == 0 and max_colsum == 0:
done = True
else:
if max_rowsum >= max_colsum:
rows_to_keep = rowsums != max_rowsum
matrix = matrix[rows_to_keep,:]
else:
cols_to_keep = colsums != max_colsum
matrix = matrix[:, cols_to_keep]
plt.imshow(matrix)
plt.show()
return matrix
def force_odd(num):
if num % 2 == 0:
num += 1
return num
def force_even(num):
if num % 2 == 1:
num += 1
return num
def pairwise_min_distance(vec_a, vec_b):
"""
Given a pair of sorted vectors, returns a pair of vectors
giving the distance of each element in vec_a to the closest element
of vec_b, and vice-versa.
"""
mat_a, mat_b = numpy.meshgrid(vec_a, vec_b)
diffs = mat_a - mat_b
a_closest = numpy.abs(diffs).min(axis=0)
b_closest = numpy.abs(diffs).min(axis=1)
return a_closest, b_closest
def empirical_dx(arr, bandwidth=5):
"""
Given a numpy.array :param:`arr` representing the
output of a function over a uniform input, return the
empirical derivative of that array derived
from the finite difference of that array smoothed by
a Gaussian kernel with scale :param:`bandwidth`.
"""
smoothed_arr = scipy.convolve(arr, toolbox.gaussian_kernel(bandwidth), mode='same')
arr_dx = numpy.diff(smoothed_arr)
return arr_dx
def zero_crossings(arr):
"""
Return every zero-crossing of :param:`arr`
"""
z = numpy.greater(arr, 0)
return numpy.nonzero(numpy.logical_xor(z[1:], z[:-1]))[0]
def pca_reconstruction(transformed_data, pca_object, pcs_to_remove=[]):
"""
Given a fit PCA object and a matrix of transformed datapoints,
returns a reconstructed data matrix with zero or more principle components
removed.
"""
transformed_data = numpy.delete(transformed_data, pcs_to_remove, axis=1)
components = numpy.delete(pca_object.components_, pcs_to_remove, axis=0)
return (numpy.dot(transformed_data, components) + pca_object.mean_)
def remove_pcs(data, pcs_to_remove=[]):
"""
Returns a
"""
this_pca = PCA()
this_pca.fit(data)
reconstructed_data = pca_reconstruction(transformed_data=this_pca.transform(data),
pca_object=this_pca,
pcs_to_remove=pcs_to_remove)
try:
reconstructed_data = pandas.DataFrame(reconstructed_data, index=data.index, columns=data.columns)
except AttributeError:
pass
return reconstructed_data
def scale_vec(vec, new_min, new_max):
"""
Scales :param:`vec` to span the range [min_val,max_val]
"""
data_min, data_max = numpy.min(vec), numpy.max(vec)
data_span = data_max - data_min
scaled_span = new_max - new_min
scaled_vec = ((vec - data_min) / data_span * scaled_span) + new_min
return scaled_vec
def scale_vec_span(vec, new_magnitude=1):
"""
Scales :param:`vec` to have new span :param:`new_magnitude`
"""
data_min, data_max = numpy.min(vec), numpy.max(vec)
data_span = data_max - data_min
scaled_vec = vec / data_span * new_magnitude
return scaled_vec
def weight_matched_sampling(target_weights, query_weights, num_samples=0, num_bins='auto', pseudocount=1, replace=True):
"""
Given two pandas.Series objets, :param:`target_weights` and :param:`query_weights`,
return a numpy.Array of indices in query weights, sampled randomly with replacement,
such that the distribution of weights in the query sample approximates the distribution of weights
in the target.
"""
if not num_samples:
num_samples = len(query_weights)
if num_hist_bins == 'kde':
smoothed_target_distro = scipy.stats.gaussian_kde(target_weights)
smoothed_query_distro = scipy.stats.gaussian_kde(query_weights)
prob_vector = smoothed_target_distro.pdf(query_weights.values) / smoothed_query_distro.pdf(query_weights.values)
prob_vector /= prob_vector.sum()
else:
target_counts, bins = numpy.histogram(target_weights, bins=num_hist_bins)
target_freqs = target_counts / target_counts.sum()
query_counts, _ = numpy.histogram(query_weights, bins=bins)
query_freqs = (query_counts + pseudocount) / query_counts.sum()
query_bin_membership = numpy.digitize(query_weights, bins=bins[:-1]) - 1
prob_vector = target_freqs[query_bin_membership] / query_freqs[query_bin_membership]
prob_vector /= prob_vector.sum()
return numpy.random.choice(a=query_weights.index, size=num_samples, replace=replace, p=prob_vector)
def weight_matched_sampling2(target_weights, query_weights, num_samples=0, num_bins='auto', pseudocount=1, replace=True):
"""
Given two pandas.Series objets, :param:`target_weights` and :param:`query_weights`,
return a numpy.Array of indices in query weights, sampled randomly with replacement,
such that the distribution of weights in the query sample approximates the distribution of weights
in the target.
"""
if not num_samples:
num_samples = len(query_weights)
target_counts, bins = numpy.histogram(target_weights, bins=num_bins)
target_freqs = target_counts / target_counts.sum()
needed_query_counts = numpy.round(target_freqs * num_samples).astype(int)
query_bin_membership = pandas.Series(numpy.digitize(query_weights, bins=bins[:-1]) - 1, index=query_weights.index)
query_samples = []
## Needs work to account for empty bins. Can't just do adjacent. But don't want to go too far either.
# candidate_samples = {}
# for bin_num in range(len(needed_query_counts)):
# these_candidates = query_bin_membership.loc[query_bin_membership == bin_num].index
# if len(these_candidates) == 0: # if no samples availalable in query, move its sample requirements to adjacent bins.
# if bin_num > 0:
# if bin_num < len(needed_query_counts) - 1:
# left, right = toolbox.split_half(needed_query_counts[bin_num])
# if numpy.random.rand(1) > 0.5: # prevent systematic bias toward larger bin
# left, right = right, left
# needed_query_counts[bin_num - 1] += left
# needed_query_counts[bin_num + 1] += right
# needed_query_counts[bin_num - 1] += needed_query_counts[bin_num]
# else:
# needed_query_counts[bin_num + 1] += needed_query_counts[bin_num]
# needed_query_counts[bin_num]
# candidate_samples[bin_num] = these_candidates
# for bin_num in range(len(needed_query_counts)):
# if candidate_samples[bin_num]
for bin_num, this_count in enumerate(needed_query_counts):
candidate_samples = query_bin_membership.loc[query_bin_membership == bin_num].index
if len(candidate_samples) > 0:
# print(bin_num, candidate_samples)
# print('picking {} samples'.format(this_count))
query_samples += list(numpy.random.choice(a=candidate_samples, size=this_count, replace=replace))
else:
print('no query samples found for bin {} [{},{})'.format(bin_num, bins[bin_num], bins[bin_num+1]))
return query_samples
def convert_categorical_to_boolean(categorical_series):
boolean_matrix = pandas.DataFrame(index=categorical_series.index, dtype=bool)
for value in categorical_series.unique():
boolean_matrix[value] = False
boolean_matrix.loc[categorical_series.loc[categorical_series == value].index, value] = True
return boolean_matrix
|
phageghost/pg_tools
|
pgtools/toolbox.py
|
Python
|
mit
| 105,168
|
[
"Brian",
"Gaussian",
"VisIt"
] |
43244ed6291f33cc8dba69a15d3c0ad4ad011e7f39f0f17a78d579c2ba6456c3
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from nose.tools import * # noqa PEP8 asserts
from osf_tests import factories
from tests.base import OsfTestCase
from website.util import api_url_for
from website.views import find_bookmark_collection
class TestSearchViews(OsfTestCase):
def setUp(self):
super(TestSearchViews, self).setUp()
import website.search.search as search
search.delete_all()
robbie = factories.UserFactory(fullname='Robbie Williams')
self.project = factories.ProjectFactory(creator=robbie)
self.contrib = factories.UserFactory(fullname='Brian May')
for i in range(0, 12):
factories.UserFactory(fullname='Freddie Mercury{}'.format(i))
self.user_one = factories.AuthUserFactory()
self.user_two = factories.AuthUserFactory()
self.project_private_user_one = factories.ProjectFactory(title='aaa', creator=self.user_one, is_public=False)
self.project_private_user_two = factories.ProjectFactory(title='aaa', creator=self.user_two, is_public=False)
self.project_public_user_one = factories.ProjectFactory(title='aaa', creator=self.user_one, is_public=True)
self.project_public_user_two = factories.ProjectFactory(title='aaa', creator=self.user_two, is_public=True)
def tearDown(self):
super(TestSearchViews, self).tearDown()
import website.search.search as search
search.delete_all()
def test_search_views(self):
#Test search contributor
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': self.contrib.fullname})
assert_equal(res.status_code, 200)
result = res.json['users']
assert_equal(len(result), 1)
brian = result[0]
assert_equal(brian['fullname'], self.contrib.fullname)
assert_in('profile_image_url', brian)
assert_equal(brian['registered'], self.contrib.is_registered)
assert_equal(brian['active'], self.contrib.is_active)
#Test search pagination
res = self.app.get(url, {'query': 'fr'})
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(pages, 3)
assert_equal(page, 0)
#Test default page 1
res = self.app.get(url, {'query': 'fr', 'page': 1})
assert_equal(res.status_code, 200)
result = res.json['users']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(page, 1)
#Test default page 2
res = self.app.get(url, {'query': 'fr', 'page': 2})
assert_equal(res.status_code, 200)
result = res.json['users']
page = res.json['page']
assert_equal(len(result), 4)
assert_equal(page, 2)
#Test smaller pages
res = self.app.get(url, {'query': 'fr', 'size': 5})
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(page, 0)
assert_equal(pages, 3)
#Test smaller pages page 2
res = self.app.get(url, {'query': 'fr', 'page': 2, 'size': 5, })
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 4)
assert_equal(page, 2)
assert_equal(pages, 3)
#Test search projects
url = '/search/'
res = self.app.get(url, {'q': self.project.title})
assert_equal(res.status_code, 200)
#Test search node
res = self.app.post_json(
api_url_for('search_node'),
{'query': self.project.title},
auth=factories.AuthUserFactory().auth
)
assert_equal(res.status_code, 200)
#Test search node includePublic true
res = self.app.post_json(
api_url_for('search_node'),
{'query': 'a', 'includePublic': True},
auth=self.user_one.auth
)
node_ids = [node['id'] for node in res.json['nodes']]
assert_in(self.project_private_user_one._id, node_ids)
assert_in(self.project_public_user_one._id, node_ids)
assert_in(self.project_public_user_two._id, node_ids)
assert_not_in(self.project_private_user_two._id, node_ids)
#Test search node includePublic false
res = self.app.post_json(
api_url_for('search_node'),
{'query': 'a', 'includePublic': False},
auth=self.user_one.auth
)
node_ids = [node['id'] for node in res.json['nodes']]
assert_in(self.project_private_user_one._id, node_ids)
assert_in(self.project_public_user_one._id, node_ids)
assert_not_in(self.project_public_user_two._id, node_ids)
assert_not_in(self.project_private_user_two._id, node_ids)
#Test search user
url = '/api/v1/search/user/'
res = self.app.get(url, {'q': 'Umwali'})
assert_equal(res.status_code, 200)
assert_false(res.json['results'])
user_one = factories.AuthUserFactory(fullname='Joe Umwali')
user_two = factories.AuthUserFactory(fullname='Joan Uwase')
res = self.app.get(url, {'q': 'Umwali'})
assert_equal(res.status_code, 200)
assert_equal(len(res.json['results']), 1)
assert_false(res.json['results'][0]['social'])
user_one.social = {
'github': user_one.given_name,
'twitter': user_one.given_name,
'ssrn': user_one.given_name
}
user_one.save()
res = self.app.get(url, {'q': 'Umwali'})
assert_equal(res.status_code, 200)
assert_equal(len(res.json['results']), 1)
assert_not_in('Joan', res.body)
assert_true(res.json['results'][0]['social'])
assert_equal(res.json['results'][0]['names']['fullname'], user_one.fullname)
assert_equal(res.json['results'][0]['social']['github'], 'http://github.com/{}'.format(user_one.given_name))
assert_equal(res.json['results'][0]['social']['twitter'], 'http://twitter.com/{}'.format(user_one.given_name))
assert_equal(res.json['results'][0]['social']['ssrn'], 'http://papers.ssrn.com/sol3/cf_dev/AbsByAuth.cfm?per_id={}'.format(user_one.given_name))
user_two.social = {
'profileWebsites': ['http://me.com/{}'.format(user_two.given_name)],
'orcid': user_two.given_name,
'linkedIn': user_two.given_name,
'scholar': user_two.given_name,
'impactStory': user_two.given_name,
'baiduScholar': user_two.given_name
}
user_two.save()
user_three = factories.AuthUserFactory(fullname='Janet Umwali')
user_three.social = {
'github': user_three.given_name,
'ssrn': user_three.given_name
}
user_three.save()
res = self.app.get(url, {'q': 'Umwali'})
assert_equal(res.status_code, 200)
assert_equal(len(res.json['results']), 2)
assert_true(res.json['results'][0]['social'])
assert_true(res.json['results'][1]['social'])
assert_not_equal(res.json['results'][0]['social']['ssrn'], res.json['results'][1]['social']['ssrn'])
assert_not_equal(res.json['results'][0]['social']['github'], res.json['results'][1]['social']['github'])
res = self.app.get(url, {'q': 'Uwase'})
assert_equal(res.status_code, 200)
assert_equal(len(res.json['results']), 1)
assert_true(res.json['results'][0]['social'])
assert_not_in('ssrn', res.json['results'][0]['social'])
assert_equal(res.json['results'][0]['social']['profileWebsites'][0], 'http://me.com/{}'.format(user_two.given_name))
assert_equal(res.json['results'][0]['social']['impactStory'], 'https://impactstory.org/u/{}'.format(user_two.given_name))
assert_equal(res.json['results'][0]['social']['orcid'], 'http://orcid.org/{}'.format(user_two.given_name))
assert_equal(res.json['results'][0]['social']['baiduScholar'], 'http://xueshu.baidu.com/scholarID/{}'.format(user_two.given_name))
assert_equal(res.json['results'][0]['social']['linkedIn'], 'https://www.linkedin.com/{}'.format(user_two.given_name))
assert_equal(res.json['results'][0]['social']['scholar'], 'http://scholar.google.com/citations?user={}'.format(user_two.given_name))
class TestODMTitleSearch(OsfTestCase):
""" Docs from original method:
:arg term: The substring of the title.
:arg category: Category of the node.
:arg isDeleted: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isFolder: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isRegistration: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg includePublic: yes or no. Whether the projects listed should include public projects.
:arg includeContributed: yes or no. Whether the search should include projects the current user has
contributed to.
:arg ignoreNode: a list of nodes that should not be included in the search.
:return: a list of dictionaries of projects
"""
def setUp(self):
super(TestODMTitleSearch, self).setUp()
self.user = factories.AuthUserFactory()
self.user_two = factories.AuthUserFactory()
self.project = factories.ProjectFactory(creator=self.user, title="foo")
self.project_two = factories.ProjectFactory(creator=self.user_two, title="bar")
self.public_project = factories.ProjectFactory(creator=self.user_two, is_public=True, title="baz")
self.registration_project = factories.RegistrationFactory(creator=self.user, title="qux")
self.folder = factories.CollectionFactory(creator=self.user, title="quux", category='project')
self.dashboard = find_bookmark_collection(self.user)
self.dashboard.category = 'project'
self.dashboard.save()
self.url = api_url_for('search_projects_by_title')
def test_search_projects_by_title(self):
res = self.app.get(self.url, {'term': self.project.title}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.public_project.title,
'includePublic': 'yes',
'includeContributed': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'no',
'includeContributed': 'yes'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'no',
'includeContributed': 'yes',
'isRegistration': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.public_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.registration_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 2)
res = self.app.get(self.url,
{
'term': self.registration_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.folder.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'yes'
}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
res = self.app.get(self.url,
{
'term': self.folder.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 0)
res = self.app.get(self.url,
{
'term': self.dashboard.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 0)
res = self.app.get(self.url,
{
'term': self.dashboard.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'yes'
}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
|
TomBaxter/osf.io
|
osf_tests/test_search_views.py
|
Python
|
apache-2.0
| 15,093
|
[
"Brian"
] |
b0dfa51c8ad881dcb11c359738e410bfbf98e66d2b2d23df2e9bd4308772dfdb
|
import os
from flask import render_template, flash, request
from subprocess import check_output
from uuid import uuid1
from app import app
from app.config import UPLOAD_FOLDER, SECRET_KEY
from werkzeug.utils import secure_filename
app.secret_key = SECRET_KEY
ALLOWED_EXTENSIONS = ['faa', 'txt', 'fasta', 'fa']
print("Uploads folder: {}".format(UPLOAD_FOLDER))
#Configure Flask process
if not os.path.isdir(UPLOAD_FOLDER):
os.mkdir(UPLOAD_FOLDER)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16 MB Limit
# A utility function. We only want some types of files uploaded. This returns a boolean if the file extensions matches.
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
# A utility function to save a file object that has been put into our web form.
def save_file(request_file_obj, batch_id):
if request_file_obj and allowed_file(request_file_obj.filename):
## Gotta make sure it doesn't have a name like "../../../etc/passwd"
filename = secure_filename(request_file_obj.filename)
## makes the full path using our batch id
full_file_path = os.path.join(app.config['UPLOAD_FOLDER'], batch_id, filename)
## Save our file
request_file_obj.save(full_file_path)
#Return the path so we know where
return full_file_path
@app.route('/find_orthologs', methods=['GET', 'POST'])
def find_orthologs_view():
# If we have already filled in our form below we are going to POST those files into our web server.
if request.method == 'POST':
# Generate a new Batch ID for this upload so we don't overwrite anything.
batch_id = str(uuid1())
# Make our batch folder.
os.mkdir(os.path.join(app.config['UPLOAD_FOLDER'], batch_id))
# Pull the files from our Web Form
file_a = request.files['file1']
file_b = request.files['file2']
# Call the utility function from earlier on those files.
path_a = save_file(file_a, batch_id)
path_b = save_file(file_b, batch_id)
print("PathA is: {}, PathB is: {}".format(path_a, path_b))
# Check if we can use both files, if not then redirect to here to try again
if path_a is None or path_b is None:
print("File Check Failed. Paths are missing")
flash("Please check the file type and try again. Supported extensions: {}".format(ALLOWED_EXTENSIONS))
else:
#------------------- Place Ortholog Script Here -------------------
# This area is where you can call the reciprocal blast routine. The two amino acid files
# are stored in 'path_a' and 'path_b'. The 'check_output' function call under this comment
# is an example of running a shell command from this script. DO NOT USE A SHELL.
# Security issues: https://docs.python.org/2/library/subprocess.html#frequently-used-arguments
# Calling a shell command on the files that we uploaded just to show you we can. :)
return '''Our Batch # was <b>{}</b> <br> Concatenated files:<br> {}'''.format(batch_id,
check_output(["cat",
path_a,
path_b]))
return render_template('find_orthologs.html')
|
Raghavan-Lab/BioDashboard
|
app/views/find_orthologs_view.py
|
Python
|
gpl-2.0
| 3,586
|
[
"BLAST"
] |
9200f5787d0495c8b1ffaab1d8ecf891c7076dabcfbeacc69149b332dc209e9c
|
#!/usr/bin/python3
# Online Python Tutor
# Copyright (C) 2010-2011 Philip J. Guo (philip@pgbovine.net)
# https://github.com/pgbovine/OnlinePythonTutor/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# A full logger for Python program execution
# (based on pdb, the standard Python debugger)
# This is the meat of the Online Python Tutor back-end. It implements a
# full logger for Python program execution (based on pdb, the standard
# Python debugger imported via the bdb module), printing out the values
# of all in-scope data structures after each executed instruction.
# Note that I've only tested this logger on Python 2.5, so it will
# probably fail in subtle ways on other Python 2.X (and will DEFINITELY
# fail on Python 3.X).
# upper-bound on the number of executed lines, in order to guard against
# infinite loops
MAX_EXECUTED_LINES = 200
def set_max_executed_lines(m):
global MAX_EXECUTED_LINES
MAX_EXECUTED_LINES = m
import sys
import bdb # the KEY import here!
import os
import re
import traceback
import io
import p4_encoder
IGNORE_VARS = set(('__stdout__', '__builtins__', '__name__', '__exception__', '__locals__', '__qualname__'))
def get_user_stdout(frame):
#print("In get_user_stdout")
f_globals = frame.f_globals
#print("In get_user_stdout, f_globals", f_globals)
return frame.f_globals['__stdout__'].getvalue()
def get_user_globals(frame):
d = filter_var_dict(frame.f_globals)
# also filter out __return__ for globals only, but NOT for locals
if '__return__' in d:
del d['__return__']
return d
def get_user_locals(frame):
return filter_var_dict(frame.f_locals)
def filter_var_dict(d):
ret = {}
for (k,v) in d.items():
if k not in IGNORE_VARS:
ret[k] = v
return ret
# -----------EPW Postprocessor to remove some aliases ------------
remap_tags = {'LIST':'P_LIST',
'SET':'P_SET',
'DICT':'P_DICT',
'TUPLE':'P_TUPLE',
'INSTANCE': 'P_INSTANCE',
'CLASS' : 'P_CLASS'}
def make_aliases_explicit(trace_snapshot):
""" For hi-fidelity and for teaching aliases / pass-by-reference,
we prefer to only render an aliased structure only once per snapshot.
So we look through the trace datastructure and reduce
duplicate instances to alias equivalents.
"""
def reduce_aliases(bindings, seen_ids):
""" process a dictionary of bindings. """
new_bindings = {}
if type(bindings) != dict:
return bindings # an alias into the old structure
for (name, val) in bindings.items():
new_bindings[name] = walk_structure(val, seen_ids)
return new_bindings
def walk_structure(val, seen_ids):
""" Process a potentially nested data structure """
if type(val) is not list: return val # an alias into the old structure
if len(val) <= 2: return val # an alias into the old structure
tag = val[0]
if tag not in remap_tags: return val # an alias into the old structure
id = val[1]
if id in seen_ids:
return [remap_tags[tag], id]
result = val[:2]
for xs in val[2:]: # walk all elems in the structure
result.append(walk_structure(xs, seen_ids))
# remember that we've dealt with this structure
seen_ids |= {id}
return result
seen_ids = set()
new_snapshot = {}
## print("trace_snapshot is ", trace_snapshot);
## print("type of trace_snapshot is", type(trace_snapshot))
for (key, val) in trace_snapshot.items():
if key == "globals":
new_snapshot[key] = reduce_aliases(val, seen_ids)
elif key == "stack_locals":
newframes = []
for frme in val:
framename = frme[0]
old_bindings = frme[1]
newframes.append([framename, reduce_aliases(old_bindings, seen_ids)])
new_snapshot[key] = newframes
else:
new_snapshot[key] = val
return new_snapshot
#------------------------------------------------------------------
class PGLogger(bdb.Bdb):
def __init__(self, finalizer_func, ignore_id=False):
bdb.Bdb.__init__(self)
self.mainpyfile = ''
self._wait_for_mainpyfile = 0
# a function that takes the output trace as a parameter and
# processes it
self.finalizer_func = finalizer_func
# each entry contains a dict with the information for a single
# executed line
self.trace = []
# don't print out a custom ID for each object
# (for regression testing)
self.ignore_id = ignore_id
def reset(self):
bdb.Bdb.reset(self)
self.forget()
def forget(self):
self.lineno = None
self.stack = []
self.curindex = 0
self.curframe = None
def setup(self, f, t):
self.forget()
self.stack, self.curindex = self.get_stack(f, t)
self.curframe = self.stack[self.curindex][0]
# Override Bdb methods
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
if self._wait_for_mainpyfile:
return
if self.stop_here(frame):
self.interaction(frame, None, 'call')
def user_line(self, frame):
"""This function is called when we stop or break at this line."""
if self._wait_for_mainpyfile:
if (self.canonic(frame.f_code.co_filename) != "<string>" or
frame.f_lineno <= 0):
return
self._wait_for_mainpyfile = False
self.interaction(frame, None, 'step_line')
def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
frame.f_locals['__return__'] = return_value
self.interaction(frame, None, 'return')
def user_exception(self, frame, exc_info):
exc_type, exc_value, exc_traceback = exc_info
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
frame.f_locals['__exception__'] = exc_type, exc_value
if type(exc_type) == type(''):
exc_type_name = exc_type
else: exc_type_name = exc_type.__name__
self.interaction(frame, exc_traceback, 'exception')
# General interaction function
def interaction(self, frame, traceback, event_type):
if (frame.f_code.co_filename != '<string>'): return # don't simulate non-user code
self.setup(frame, traceback)
tos = self.stack[self.curindex]
lineno = tos[1]
# each element is a pair of (function name, ENCODED locals dict)
encoded_stack_locals = []
# climb up until you find '<module>', which is (hopefully) the global scope
i = self.curindex
while True:
cur_frame = self.stack[i][0]
cur_name = cur_frame.f_code.co_name
if cur_name == '<module>':
break
# special case for lambdas - grab their line numbers too
if cur_name == '<lambda>':
cur_name = 'lambda on line ' + str(cur_frame.f_code.co_firstlineno)
elif cur_name == '':
cur_name = 'unnamed function'
# encode in a JSON-friendly format now, in order to prevent ill
# effects of aliasing later down the line ...
encoded_locals = {}
for (k, v) in get_user_locals(cur_frame).items():
# don't display some built-in locals ...
if k not in { '__module__', '__doc__' } : # (EPW: suppress __doc__ in class defn )
encoded_locals[k] = p4_encoder.encode(v, self.ignore_id)
encoded_stack_locals.append((cur_name, encoded_locals))
i -= 1
# encode in a JSON-friendly format now, in order to prevent ill
# effects of aliasing later down the line ...
encoded_globals = {}
for (k, v) in get_user_globals(tos[0]).items():
#print("getting user globals %s --> %s" % (k,v))
if k not in { '__doc__'} : # (EPW: suppress __doc__ at module level)
encoded_globals[k] = p4_encoder.encode(v, self.ignore_id)
#print("Got trace_entry")
#print(type(tos), len(tos))
frame = tos[0]
#print("type elem1", type(frame))
f_code = frame.f_code
#print("type f_code", type(f_code))
co_name = f_code.co_name
#print("type co_name", type(co_name), co_name)
trace_entry = dict({'line':lineno,
'event':event_type,
'func_name':co_name,
'globals':encoded_globals,
'stack_locals':encoded_stack_locals,
'stdout':get_user_stdout(tos[0])} )
#print("Looking for exception")
#print(trace_entry)
# if there's an exception, then record its info:
if event_type == 'exception':
# always check in f_locals
exc = frame.f_locals['__exception__']
trace_entry['exception_msg'] = exc[0].__name__ + ': ' + str(exc[1])
# when instantiating objects there is a noop step
# due to hiding of __qualname__, don't show it.
'''dup = False
if len(self.trace) > 0:
import copy
newView = copy.copy(trace_entry)
newView["event"]="-"
oldView = copy.copy(self.trace[-1])
oldView["event"]="-"
dup = (newView == oldView)'''
#if (not dup):
self.trace.append(trace_entry)
if len(self.trace) >= MAX_EXECUTED_LINES:
self.trace.append(dict(event='instruction_limit_reached', exception_msg='(stopped after ' + str(MAX_EXECUTED_LINES) + ' steps to prevent possible infinite loop)'))
self.force_terminate()
self.forget()
def _runscript(self, script_str):
# When bdb sets tracing, a number of call and line events happens
# BEFORE debugger even reaches user's code (and the exact sequence of
# events depends on python version). So we take special measures to
# avoid stopping before we reach the main script (see user_line and
# user_call for details).
self._wait_for_mainpyfile = True
# ok, let's try to sorta 'sandbox' the user script by not
# allowing certain potentially dangerous operations:
user_builtins = {}
for (k,v) in __builtins__.items():
# commented by dave... it's okay to allow imports now, albeit ugly
# if k in ('reload', 'input', 'apply', 'open', 'compile',
# '__import__', 'file', 'eval', 'execfile',
# 'exit', 'quit', 'raw_input',
# 'dir', 'globals', 'locals', 'vars',
# 'compile'):
# continue
user_builtins[k] = v
# redirect stdout of the user program to a memory buffer
# This slightly more elaborate version than just using __stdout__
# and is recommended in the Python3 docs, and also works when running
# in the PyScripter IDE. (EPW)
self.saved_stdout = sys.stdout # save stdout for later restoration
user_stdout = io.StringIO()
sys.stdout = user_stdout # redirect
user_globals = {"__name__" : "__main__",
"__builtins__" : user_builtins,
"__stdout__" : user_stdout}
try:
self.run(script_str, user_globals, user_globals)
# sys.exit ...
except SystemExit:
sys.exit(0)
except:
traceback.print_exc() # uncomment this to see the REAL exception msg
trace_entry = dict(event='uncaught_exception')
exc = sys.exc_info()[1]
if hasattr(exc, 'lineno'):
trace_entry['line'] = exc.lineno
if hasattr(exc, 'offset'):
trace_entry['offset'] = exc.offset
if hasattr(exc, 'msg'):
trace_entry['exception_msg'] = "Error: " + exc.msg
else:
trace_entry['exception_msg'] = "Unknown error"
self.trace.append(trace_entry)
self.finalize()
sys.exit(0) # need to forceably STOP execution
def force_terminate(self):
self.finalize()
sys.exit(0) # need to forceably STOP execution
def finalize(self):
old_trace_sz = len(self.trace)
sys.stdout = self.saved_stdout # restore the original stream
assert len(self.trace) <= (MAX_EXECUTED_LINES + 2)
# filter all entries after 'return' from '<module>', since they
# seem extraneous:
res = []
for e in self.trace:
# EPW added the logic here to make aliases explicit
res.append(make_aliases_explicit(e))
#res.append(e)
if e['event'] == 'return' and e['func_name'] == '<module>':
break
# another hack: if the SECOND to last entry is an 'exception'
# and the last entry is return from <module>, then axe the last
# entry, for aesthetic reasons :)
if len(res) >= 2 and \
res[-2]['event'] == 'exception' and \
res[-1]['event'] == 'return' and res[-1]['func_name'] == '<module>':
res.pop()
self.trace = res # use this if you don't want singletons for aliases
## print("----------- Filtered trace (from %s to %s) --------- " % (old_trace_sz, len(self.trace)))
##
## for e in self.trace:
## print(e)
## sys.stdout.flush()
self.finalizer_func(self.trace)
# the MAIN meaty function!!!
def exec_script_str(script_str, finalizer_func, ignore_id=False, stdin=""):
logger = PGLogger(finalizer_func, ignore_id)
sys.stdin = io.StringIO(stdin)
logger._runscript(script_str)
logger.finalize()
import pprint
def exec_file_and_pretty_print(mainpyfile):
if not os.path.exists(mainpyfile):
print('Error: ' + mainpyfile + ' does not exist')
sys.exit(1)
def pretty_print(output_lst):
for e in output_lst:
pprint.pprint(e)
t = open(mainpyfile).read()
output_lst = exec_script_str(t, pretty_print)
def exec_file_and_dump(rootname, isString = False):
mainpyfile = rootname + ".py"
outputfile = rootname + ".trace"
if not isString:
if not os.path.exists(mainpyfile):
print('Error: ' + mainpyfile + ' does not exist')
sys.exit(1)
def dump_to_outf(output_lst):
import json
sep = "the_trace = [\n"
for e in output_lst:
ppje = json.dumps(e)
outf.write(sep)
outf.write(ppje)
outf.write('\n')
sep = ','
outf.write(']\n')
if isString:
prog = rootname
else:
prog = open(mainpyfile).read()
newtext = prog.replace("\n", "\\n")
newtext = newtext.replace('"', "'")
if isString:
outf = sys.stdout
else:
outf = open(outputfile, "w")
outf.write('// mock data for UI, generated by a tool.\n\n')
outf.write('the_code = "')
outf.write(newtext)
outf.write('"\n\n')
output_lst = exec_script_str(prog, dump_to_outf)
outf.close()
if __name__ == '__main__':
# need this round-about import to get __builtins__ to work :0
# Without this, only on the command line, __builtins__ near
# the top of _runscript resolves to the
# module rather than the dict. (Under PyScripter
# execution, it works ok).
import p4_logger
#p4_logger.exec_file_and_pretty_print(sys.argv[1])
#p4_logger.exec_file_and_dump(sys.argv[1])
p4_logger.exec_file_and_dump("\n".join(sys.stdin.readlines()), True)
|
cemc/python3jail
|
static/maketrace/p4_logger.py
|
Python
|
gpl-3.0
| 16,405
|
[
"EPW"
] |
ef4b2a752521983621babfe30fc116e80ad5f5aea9a26f341c1c6c0accf8f970
|
import math
import heapq
import vmdutil
import re
from collections import namedtuple
from vmdutil import vmddef
from vmdutil import pmxutil
from vmdutil import pmxdef
from vmdutil import vmdmotion
FRAME_MIN = 0
FRAME_MAX = 4294967295 # UINT32_MAX
class PriorityQueue():
def __init__(self):
self.items = set()
self.queue = []
def push(self, n):
heapq.heappush(self.queue, n)
return
def pop(self):
if len(self.queue) > 0:
r = heapq.heappop(self.queue)
return r
else:
return None
def top(self):
return self.queue[0] if len(self.queue) > 0 else None
class FrameRange():
def __init__(self, frame_ranges=None):
self.frame_ranges = frame_ranges
if frame_ranges is not None:
self.min_frame = min([r[0] for r in frame_ranges])
self.max_frame = max([r[1] for r in frame_ranges])
else:
self.min_frame = 0
self.max_frame = FRAME_MAX
def is_in_range(self, frame_no):
if self.frame_ranges is None:
return True
for r in self.frame_ranges:
if r[0] <= frame_no <= r[1]:
return True
return False
def is_over_max(self, frame_no):
if self.frame_ranges is None:
return False
return True if frame_no > self.max_frame else False
def replace_bonedef_position(bone1, bone2, axis):
new_position = []
for index in range(len(bone1.position)):
if index in axis:
new_position.append(bone2.position[index])
else:
new_position.append(bone1.position[index])
new_position = tuple(new_position)
return bone1._replace(position=new_position)
MotionFrame = namedtuple('MotionFrame', 'frame_no type model_id bone_name')
# type:
# 'o': key frames of overwrite bones
# 'b': bones(watcher, target, ext)
# 'c': camera frames of 'cut'
# 'v': camera frames
# 'r': delay
# 'u': addtional
class LookAt():
def __init__(self, watcher_pmx_name, watcher_vmd_name):
self.watcher_pmx_name = watcher_pmx_name
self.watcher_vmd_name = watcher_vmd_name
self.target_pos = (0, 0, 0)
self.frame_ranges = FrameRange()
self.target_vmd_name = None
self.target_pmx_name = None
self.target_mode = 'FIXED'
self.point_mode = 'FACE'
self.overwrite_bones = ['首', '頭', '両目']
self.target_bone = '両目'
self.target_bone_has_motion = False
self.DEFAULT_CONTSTRAINT = [(179.0, 179.0, 179.0), (1, 1, .5)]
self.constraints = {
'首': [(10, 20, 10), (1, 1, .8)],
'頭': [(30, 40, 20), (1, 1, .8)],
'両目': [(20, 30, 0), (1, 1, 0)],
}
self.vmd_blend_ratios = {
'首': (0, 0, 0),
'頭': (0, 0, 0),
'両目': (0, 0, 0),
}
self.forward_dirs = {
'首': (0, 0, -1),
'頭': (0, 0, -1),
'両目': (0, 0, -1),
}
self.up_blend_weight = {
'首': 1.0,
'頭': 1.0,
'両目': 1.0,
}
self.watcher_extlink = None
self.ignore_zone = math.radians(140)
self.global_up = (0, 1, 0)
self.omega_limit = math.pi / 40
self.additional_frame_nos = []
self.near_mode = False
self.vmd_lerp = False
self.use_vmd_interpolation = False
self.WATCHER = 0
self.TARGET = 1
self.WATCHER_EX = 2
self.bone_defs = {}
self.bone_dict = {}
def set_target_pos(self, pos):
self.target_pos = pos
def set_target_vmd(self, vmd_name):
self.target_vmd_name = vmd_name
def set_target_pmx(self, pmx_name):
self.target_pmx_name = pmx_name
def set_point_mode(self, mode='FACE'):
self.point_mode = mode
def set_overwrite_bones(self, bone_names, constraints=None):
self.overwrite_bones = bone_names
for bone_name in bone_names:
if constraints and bone_name in constraints:
self.constraints[bone_name] = constraints[bone_name]
elif bone_name not in self.constraints:
self.constraints[bone_name] = self.DEFAULT_CONTSTRAINT
else:
pass
def set_target_bone(self, bone_name):
self.target_bone = bone_name
def set_frame_ranges(self, frame_ranges):
self.frame_ranges = FrameRange(frame_ranges)
def set_omega_limit(self, limit):
self.omega_limit = limit
def set_ignore_zone(self, zone):
self.ignore_zone = zone
def set_constraint(self, bone_name, constraint):
if bone_name in self.constraints:
self.constraints[bone_name] = constraint
def set_vmd_blend_ratio(self, bone_name, ratio):
self.vmd_blend_ratios[bone_name] = ratio
def set_forward_dir(self, bone_name, dir):
self.forward_dirs[bone_name] = vmdutil.normalize_v(dir)
def set_up_blend_weight(self, bone_name, weight):
self.up_blend_weight[bone_name] = weight
def set_near_mode(self, b):
self.near_mode = b
def set_vmd_lerp(self, b):
self.vmd_lerp = b
def set_use_vmd_interpolation(self, b):
self.use_vmd_interpolation = b
def set_additional_frames(self, frame_nos):
self.additional_frame_nos = frame_nos
def set_watcher_external_link(self, bone_name, pmx_name, vmd_name):
self.watcher_extlink = (bone_name, pmx_name, vmd_name)
def add_frames(self, queue):
for frame_no in self.additional_frame_nos:
queue.push(MotionFrame(frame_no, 'u', -1, 'A'))
def need_vmd_blend(self):
if self.use_vmd_interpolation:
return False
for b in self.vmd_blend_ratios.values():
for r in b:
if r > 0:
return True
return False
def load(self):
self.watcher_pmx = pmxutil.Pmxio()
self.watcher_pmx.load(self.watcher_pmx_name)
self.watcher_vmd = vmdutil.Vmdio()
self.watcher_vmd.load(self.watcher_vmd_name)
self.bone_defs[self.WATCHER] = self.watcher_pmx.get_elements('bones')
self.watcher_motions = self.watcher_vmd.get_frames('bones')
if self.target_vmd_name:
self.target_vmd = vmdutil.Vmdio()
self.target_vmd.load(self.target_vmd_name)
if vmdutil.is_camera_header(self.target_vmd.header):
self.target_mode = 'CAMERA'
self.target_motions = self.target_vmd.get_frames('cameras')
else:
if not self.target_pmx_name:
raise Exception('pmx not setted')
else:
self.target_pmx = pmxutil.Pmxio()
self.target_pmx.load(self.target_pmx_name)
self.target_mode = 'MODEL'
self.target_motions = self.target_vmd.get_frames('bones')
self.bone_defs[self.TARGET] = self.target_pmx.get_elements(
'bones')
if self.watcher_extlink is not None:
self.watcher_extlink_pmx = pmxutil.Pmxio()
self.watcher_extlink_pmx.load(self.watcher_extlink[1])
self.watcher_extlink_vmd = vmdutil.Vmdio()
self.watcher_extlink_vmd.load(self.watcher_extlink[2])
self.bone_defs[self.WATCHER_EX] = (
self.watcher_extlink_pmx.get_elements('bones'))
def check_bones(self, bone_names, bone_dict):
for name in bone_names:
if name not in bone_dict:
return False
return True
def make_arm_dir(self):
base_dirs = {}
leaf_indexes = self.watcher_transform.leaf_indexes
graph = self.watcher_transform.transform_bone_graph
bone_defs = self.watcher_transform.bone_defs
for leaf_index in leaf_indexes:
if leaf_index in self.overwrite_indexes:
bone_def = bone_defs[leaf_index]
if (bone_def.flag & pmxdef.BONE_DISP_DIR ==
pmxdef.BONE_DISP_DIR):
disp_to_bone_index = bone_def.disp_dir
base_dir = vmdutil.sub_v(
bone_defs[disp_to_bone_index].position,
bone_def.position)
else:
base_dir = bone_def.disp_dir
base_dirs[leaf_index] = base_dir
degree = graph.in_degree(leaf_index)
if degree <= 0:
continue
parent_index = next(iter(graph.preds[leaf_index]))
while True:
if parent_index in self.overwrite_indexes:
base_dirs[parent_index] = base_dir
degree = graph.in_degree(parent_index)
if degree <= 0:
break
parent_index = next(iter(graph.preds[parent_index]))
return base_dirs
def setup_watcher_extlink(self, queue):
bone_defs = self.bone_defs[self.WATCHER_EX]
ext_bone = self.watcher_extlink[0]
self.bone_dict[self.WATCHER_EX] = bone_dict = pmxutil.make_index_dict(
bone_defs)
if not self.check_bones([ext_bone], bone_dict):
raise Exception('external link bone is not in pmx')
self.watcher_extlink_transform = extt = vmdmotion.BoneTransformation(
bone_defs, self.watcher_extlink_vmd.get_frames('bones'),
[ext_bone], True)
for bone_index in extt.transform_bone_indexes:
bone_name = bone_defs[bone_index].name_jp
for motion in extt.motion_name_dict[bone_name]:
queue.push(MotionFrame(
motion.frame, 'b', self.WATCHER_EX, bone_name))
return self.watcher_extlink_transform
def setup_watcher(self, queue):
bone_defs = self.bone_defs[self.WATCHER]
self.bone_dict[self.WATCHER] = bone_dict = pmxutil.make_index_dict(
bone_defs)
if '両目' in self.overwrite_bones:
bone_defs[bone_dict['両目']] = replace_bonedef_position(
bone_defs[bone_dict['両目']],
bone_defs[bone_dict['右目']], [1])
self.constraints_rad = {
bone_name:
[math.radians(k) for k in self.constraints[bone_name][0]]
for bone_name in self.overwrite_bones}
if not self.check_bones(self.overwrite_bones, bone_dict):
raise Exception('bones to be overwritten are not in pmx.')
# bone_graph
self.watcher_transform = vmdmotion.BoneTransformation(
bone_defs, self.watcher_motions, self.overwrite_bones, True)
self.overwrite_indexes = [
self.watcher_transform.bone_name_to_index[bone_name]
for bone_name in self.overwrite_bones]
self.overwrite_indexes = pmxutil.get_transform_order(
self.overwrite_indexes, bone_defs)
self.overwrite_bones = [
bone_defs[bone_index].name_jp for
bone_index in self.overwrite_indexes]
# make dir
if 'ARM' == self.point_mode:
self.base_dirs = self.make_arm_dir()
else:
self.base_dirs = {}
for index in self.overwrite_indexes:
bone_name = bone_defs[index].name_jp
dir = self.forward_dirs.get(bone_name)
if dir is not None:
self.base_dirs[index] = dir
else:
self.base_dirs[index] = (0, 0, -1)
# queue frames
for bone_index in self.watcher_transform.transform_bone_indexes:
bone_def = bone_defs[bone_index]
bone_name = bone_def.name_jp
for motion in (
self.watcher_transform.motion_name_dict[bone_name]):
if bone_name not in self.overwrite_bones:
queue.push(MotionFrame(
motion.frame, 'b', self.WATCHER, bone_name))
else:
queue.push(MotionFrame(
motion.frame, 'o', self.WATCHER, bone_name))
if self.watcher_extlink is not None:
transform = self.setup_watcher_extlink(queue)
self.watcher_transform.set_external_link(
transform, self.watcher_extlink[0])
return
def setup_target(self, queue):
if 'CAMERA' == self.target_mode:
self.target_transform = vmdmotion.VmdMotion(self.target_motions)
sorted_motions = self.target_transform.sorted_motions
for i, motion in enumerate(sorted_motions):
type = 'c' if (
i > 0 and
sorted_motions[i - 1].frame == motion.frame - 1) else 'v'
queue.push(MotionFrame(
motion.frame, type, self.TARGET, 'CAMERA'))
return
elif 'MODEL' == self.target_mode:
bone_defs = self.bone_defs[self.TARGET]
self.bone_dict[self.TARGET] = d = (
pmxutil.make_index_dict(bone_defs))
if self.target_bone not in d:
raise Exception('target bone is not in pmx.')
if self.target_bone == '両目':
bone_defs[d['両目']] = replace_bonedef_position(
bone_defs[d['両目']],
bone_defs[d['右目']], [1, 2])
# pmx
self.target_transform = vmdmotion.BoneTransformation(
bone_defs, self.target_motions, [self.target_bone], True)
for bone_index in self.target_transform.transform_bone_indexes:
bone_def = bone_defs[bone_index]
bone_name = bone_def.name_jp
for motion in (
self.target_transform.motion_name_dict[bone_name]):
queue.push(
MotionFrame(motion.frame, 'b', self.TARGET, bone_name))
return
def get_camera_pos(self, rotation, position, distance):
direction = vmdutil.camera_direction(rotation, distance)
return vmdutil.add_v(position, direction)
def get_target_camera_pos(self, frame_no):
rotation, position, distance, angle_of_view = (
self.target_transform.get_vmd_transform(frame_no))
pos = self.get_camera_pos(rotation, position, distance)
return pos
def get_target_model_pos(self, frame_no):
bone_dict = self.target_transform.bone_name_to_index
global_target, vmd_target, additional_transform = (
self.target_transform.do_transform(
frame_no, bone_dict[self.target_bone]))
return global_target[1]
def get_target_pos(self, frame_no):
if 'FIXED' == self.target_mode:
return self.target_pos
elif 'CAMERA' == self.target_mode:
return self.get_target_camera_pos(frame_no)
elif 'MODEL' == self.target_mode:
return self.get_target_model_pos(frame_no)
def check_ignore_case(self, body_dir, look_dir):
if self.ignore_zone <= 0:
return False
body_dir_y = vmdutil.project_to_plane_v(
body_dir, self.global_up)
look_dir_y = vmdutil.project_to_plane_v(
look_dir, self.global_up)
angle_around_y = vmdutil.angle_v(
body_dir_y, look_dir_y)
return angle_around_y > self.ignore_zone
def scale_turn(self, bone_name, turn, r=False):
constraint = self.constraints[bone_name]
weight = constraint[1]
if r:
weight = [1 - k for k in weight]
turn = [k * j for k, j in zip(turn, weight)]
return turn
def apply_constraints(self, bone_name, turn):
constraint_rad = self.constraints_rad[bone_name]
turn = [vmdutil.clamp(turn[i],
-constraint_rad[i], constraint_rad[i])
for i in range(len(turn))]
return turn
def copy_vmd_of_overwrite_bones(
self, frame_no, frame_type, bone_name=None):
if 'o' not in frame_type:
return []
new_frames = list()
if bone_name is not None:
frame = self.watcher_transform.get_vmd_frame(frame_no, bone_name)
if frame is not None:
return [frame]
else:
return []
for bone_name in self.overwrite_bones:
frame = self.watcher_transform.get_vmd_frame(frame_no, bone_name)
if frame is not None:
new_frames.append(frame)
return new_frames
def get_watcher_center_transform(self, frame_no):
bone_dict = self.watcher_transform.bone_name_to_index
global_center, vmd_center, additional_center = (
self.watcher_transform.do_transform(frame_no, bone_dict['センター']))
if global_center is None:
global_center = (vmdutil.QUATERNION_IDENTITY, (0, 0, 0))
return global_center
def apply_near_mode(self, bone_index, rotation, target_pos):
bone_defs = self.watcher_transform.bone_defs
leaves = self.watcher_transform.transform_bone_graph.get_leaves(
bone_index)
for ow_index in self.overwrite_indexes:
if ow_index in leaves:
delta = vmdutil.sub_v(
bone_defs[bone_index].position,
bone_defs[ow_index].position)
delta = vmdutil.rotate_v3q(delta, rotation)
target_pos = vmdutil.add_v(target_pos, delta)
break # first leaf in sorted overwrite-bones
return target_pos
def get_face_rotation(
self, frame_type, frame_no, bone_index, parent_index,
watcher_v, watcher_dir, watcher_pos, up,
target_v, target_pos):
bone_defs = self.watcher_transform.bone_defs
bone_name = bone_defs[bone_index].name_jp
look_dir = vmdutil.sub_v(target_pos, watcher_pos)
if self.check_ignore_case(watcher_dir, look_dir):
return None
turn = vmdutil.look_at(
watcher_dir, up, look_dir, self.global_up)
if (self.vmd_lerp and
bone_index not in self.watcher_transform.leaf_indexes):
vmd_rot = self.watcher_transform.get_vmd_transform(
frame_no, bone_index)[0]
vmd_euler = vmdutil.quaternion_to_euler(vmd_rot)
turn = [turn[0], turn[1], 0]
turn = self.scale_turn(bone_name, turn)
vmd_euler = self.scale_turn(bone_name, vmd_euler, True)
turn = vmdutil.add_v(turn, vmd_euler)
else:
turn = self.scale_turn(bone_name, turn)
turn = self.apply_constraints(bone_name, turn)
hrot = tuple(vmdutil.euler_to_quaternion(turn))
return hrot
def get_arm_rotation(
self, frame_type, frame_no, bone_index, parent_index,
watcher_v, watcher_dir, watcher_pos, watcher_axis, watcher_up,
target_v, target_pos):
bone_defs = self.watcher_transform.bone_defs
bone_name = bone_defs[bone_index].name_jp
look_dir = vmdutil.sub_v(target_pos, watcher_pos)
turn = vmdutil.look_at_fixed_axis(
watcher_dir, watcher_up, look_dir)
turn = self.apply_constraints(
bone_name, [turn, 0, 0])[0]
hrot = tuple(vmdutil.quaternion(watcher_axis, turn))
return hrot
def get_rotation(self, frame_no, frame_type, bone_index,
watcher_v, target_v, target_pos):
bone_graph = self.watcher_transform.transform_bone_graph
bone_defs = self.watcher_transform.bone_defs
bone_def = bone_defs[bone_index]
if bone_graph.in_degree(bone_index) > 0:
parent_index = next(iter(bone_graph.preds[bone_index]))
global_parent, vmd_parent, add_parent = (
self.watcher_transform.do_transform(
frame_no, parent_index))
add_trans = self.watcher_transform.get_additional_transform(
frame_no, bone_index)
neck_rotation, neck_pos = vmdmotion.get_global_transform(
(vmdutil.QUATERNION_IDENTITY, [0, 0, 0]), bone_def,
vmd_parent, bone_defs[parent_index],
global_parent, add_trans)
else:
# neck_pos = bone_def.position
raise Exception('overwrite bone should not be root.')
forward_dir = self.base_dirs[bone_index]
base_dir = vmdutil.rotate_v3q(forward_dir, global_parent[0])
if self.near_mode:
target_pos = self.apply_near_mode(
bone_index, neck_rotation, target_pos)
if (
bone_def.flag & pmxdef.BONE_AXIS_IS_FIXED ==
pmxdef.BONE_AXIS_IS_FIXED):
axis = bone_def.fixed_axis
up = vmdutil.rotate_v3q(axis, global_parent[0])
hrot = self.get_arm_rotation(
frame_type, frame_no, bone_index,
parent_index,
watcher_v, base_dir, neck_pos, axis, up,
target_v, target_pos)
else:
up = (0, -forward_dir[2], forward_dir[1])
up = vmdutil.rotate_v3q(up, global_parent[0])
hrot = self.get_face_rotation(
frame_type, frame_no, bone_index,
parent_index,
watcher_v, base_dir, neck_pos, up,
target_v, target_pos)
return hrot
def make_look_at_frames(
self, frame_type, frame_no, target_pos,
next_frame_no, next_center_transform, next_target_pos,
bone_index=None):
overwrite_frames = list()
bone_defs = self.watcher_transform.bone_defs
if next_frame_no is not None:
target_v = vmdutil.sub_v(next_target_pos, target_pos)
target_v = vmdutil.scale_v(
target_v, 1 / (next_frame_no - frame_no))
# center velocity
global_center, vmd_center, add_center = (
self.watcher_transform.do_transform(
frame_no,
self.watcher_transform.bone_name_to_index['センター']))
cpos = global_center[1]
watcher_v = vmdutil.sub_v(next_center_transform[1], cpos)
watcher_v = vmdutil.scale_v(
watcher_v, 1 / (next_frame_no - frame_no))
else:
target_v = (0, 0, 0)
cpos = (0, 0, 0)
watcher_v = (0, 0, 0)
def get_lookat_frame(b_index):
result = list()
bone_def = bone_defs[b_index]
bone_name = bone_def.name_jp
hrot = self.get_rotation(
frame_no, frame_type, b_index,
watcher_v, target_v, target_pos)
if hrot is None: # ignore_case
if (self.use_vmd_interpolation and
b_index not in self.watcher_transform.leaf_indexes):
vmd_frame = self.watcher_transform.get_vmd_frame(
frame_no, bone_name)
if vmd_frame:
result.append(vmd_frame)
return result
self.watcher_transform.do_transform(
frame_no, b_index, (hrot, (0, 0, 0)))
if (not self.use_vmd_interpolation or
b_index in self.watcher_transform.leaf_indexes):
result.append(vmddef.BONE_SAMPLE._replace(
frame=frame_no,
name=bone_name.encode(vmddef.ENCODING),
rotation=hrot))
else:
vmd_frame = self.watcher_transform.get_vmd_frame(
frame_no, bone_name)
if vmd_frame:
result.append(vmd_frame._replace(
rotation=hrot))
return result
if bone_index is not None:
result = get_lookat_frame(bone_index)
if 0 == len(result):
return []
else:
overwrite_frames.extend(result)
else:
for bone_index in self.overwrite_indexes:
result = get_lookat_frame(bone_index)
if 0 == len(result):
return []
else:
overwrite_frames.extend(result)
# vmd_blend
if self.need_vmd_blend():
overwrite_frames = self.blend_vmd(
frame_no, frame_type, overwrite_frames,
watcher_v, target_v, target_pos)
return overwrite_frames
def blend_vmd(self, frame_no, frame_type, overwrite_frames,
watcher_v, target_v, target_pos):
def find_frame(bone_name):
for index, frame in enumerate(overwrite_frames):
if vmdutil.b_to_str(frame.name) == bone_name:
return overwrite_frames.pop(index)
bone_defs = self.watcher_transform.bone_defs
# remove transformation data from db
self.watcher_transform.delete_descendants(
frame_no, self.overwrite_indexes[0])
# blend vmd
for bone_index in self.overwrite_indexes:
bone_name = bone_defs[bone_index].name_jp
if bone_index in self.watcher_transform.leaf_indexes: # eyes
# lookat
hrot = self.get_rotation(
frame_no, frame_type, bone_index,
watcher_v, target_v, target_pos)
if hrot is not None:
frame = find_frame(bone_name)
frame = frame._replace(rotation=hrot)
self.watcher_transform.do_transform(
frame_no, bone_index, (hrot, (0, 0, 0)))
overwrite_frames.append(frame)
else:
ratio = self.vmd_blend_ratios.get(bone_name, (0, 0, 0))
# blend
frame = find_frame(bone_name)
if ratio[0] > 0 or ratio[1] > 0 or ratio[2] > 0:
vmd_rot = self.watcher_transform.get_vmd_transform(
frame_no, bone_index)[0]
vmd_euler = vmdutil.quaternion_to_euler(vmd_rot)
if vmd_euler[0] > 0: # up
weight = self.up_blend_weight.get(bone_name, 1.0)
vmd_euler = (
vmd_euler[0] * weight,
vmd_euler[1], vmd_euler[2])
vmd_euler = [i * j for i, j in zip(vmd_euler, ratio)]
look_euler = vmdutil.quaternion_to_euler(frame.rotation)
# blend
look_euler = [i + j for i, j in zip(look_euler, vmd_euler)]
look_euler = self.apply_constraints(bone_name, look_euler)
hrot = tuple(vmdutil.euler_to_quaternion(look_euler))
frame = frame._replace(rotation=hrot)
self.watcher_transform.do_transform(
frame_no, bone_index, (hrot, (0, 0, 0)))
else:
self.watcher_transform.do_transform(
frame_no, bone_index, (frame.rotation, (0, 0, 0)))
overwrite_frames.append(frame)
return overwrite_frames
def camera_delay(
self, frame_no, frame_type, overwrite_frames,
queue, prev):
if prev['frame_no'] < 0:
return overwrite_frames
if 'c' in frame_type:
maxrot = max(
[math.acos(vmdutil.clamp(vmdutil.diff_q(
motion.rotation, prev['frames'][motion.name].rotation)[3],
-1, 1))
for motion in overwrite_frames if
prev['frames'].get(motion.name) is not None])
omega = maxrot / (frame_no - prev['frame_no'])
if omega > self.omega_limit:
delay_to = math.ceil(maxrot / self.omega_limit) + frame_no
while True:
peek = queue.top()
if (peek is None or delay_to <= peek.frame_no or
'o' in peek.type):
break
queue.pop()
queue.push(MotionFrame(delay_to, 'r', -1, 'DELAY'))
return []
else:
return overwrite_frames
else:
return overwrite_frames
def look_at_npath(self):
self.load()
queue = PriorityQueue()
self.setup_watcher(queue)
self.setup_target(queue)
self.add_frames(queue)
new_frames = dict()
bone_defs = self.watcher_transform.bone_defs
queue_backup = queue.queue
for bone_index in self.overwrite_indexes:
bone_name = bone_defs[bone_index].name_jp
queue.queue = queue_backup[:]
new_frames[bone_index] = list()
is_leaf = bone_index in self.watcher_transform.leaf_indexes
prev_overwrites = {'frame_no': -1, 'frames': []}
while True:
motion_frame = queue.pop()
if motion_frame is None:
break
frame_no = motion_frame.frame_no
frame_type = motion_frame.type
while (queue.top() is not None and
queue.top().frame_no == frame_no):
dummy = queue.pop()
frame_type += dummy.type
if not self.frame_ranges.is_in_range(frame_no):
new_frames[bone_index].extend(
self.copy_vmd_of_overwrite_bones(
frame_no, frame_type, bone_name))
continue
if (not is_leaf and self.watcher_transform.get_vmd_frame(
frame_no, bone_name) is None):
continue
target_pos = self.get_target_pos(frame_no)
next_frame = queue.top()
if next_frame is not None:
next_frame_no = next_frame.frame_no
next_center_transform = (
self.get_watcher_center_transform(next_frame_no))
# TODO reuse
next_target_pos = self.get_target_pos(next_frame_no)
else:
next_frame_no = None
next_center_transform = None
next_target_pos = None
overwrite_frames = self.make_look_at_frames(
frame_type, frame_no, target_pos,
next_frame_no, next_center_transform,
next_target_pos,
bone_index)
if len(overwrite_frames) <= 0:
continue
if (is_leaf and 'CAMERA' == self.target_mode and
self.omega_limit > 0):
overwrite_frames = self.camera_delay(
frame_no, frame_type, overwrite_frames,
queue, prev_overwrites)
if len(overwrite_frames) > 0:
prev_overwrites['frame_no'] = frame_no
prev_overwrites['frames'] = {
frame.name: frame for frame in overwrite_frames}
new_frames[bone_index].extend(overwrite_frames)
self.watcher_transform.replace_vmd_frames(new_frames[bone_index])
return [f for inner_list in new_frames.values() for f in inner_list]
def look_at(self):
if self.use_vmd_interpolation:
return self.look_at_npath()
self.load()
queue = PriorityQueue()
self.setup_watcher(queue)
self.setup_target(queue)
self.add_frames(queue)
new_frames = list()
prev_overwrites = {'frame_no': -1, 'frames': []}
o_frame_pattern = re.compile('^o*$')
vmd_blend = self.need_vmd_blend()
while True:
motion_frame = queue.pop()
if motion_frame is None:
break
frame_no = motion_frame.frame_no
frame_type = motion_frame.type
while queue.top() is not None and queue.top().frame_no == frame_no:
dummy = queue.pop()
frame_type += dummy.type
if not self.frame_ranges.is_in_range(frame_no):
new_frames.extend(
self.copy_vmd_of_overwrite_bones(frame_no, frame_type))
continue
if (not vmd_blend and not self.vmd_lerp and
not self.use_vmd_interpolation and
o_frame_pattern.match(frame_type)):
continue
target_pos = self.get_target_pos(frame_no)
next_frame = queue.top()
if next_frame is not None:
next_frame_no = next_frame.frame_no
next_center_transform = (
self.get_watcher_center_transform(next_frame_no))
# TODO reuse
next_target_pos = self.get_target_pos(next_frame_no)
else:
next_frame_no = None
next_center_transform = None
next_target_pos = None
overwrite_frames = self.make_look_at_frames(
frame_type, frame_no, target_pos,
next_frame_no, next_center_transform, next_target_pos)
if len(overwrite_frames) == 0:
continue
if 'CAMERA' == self.target_mode and self.omega_limit > 0:
overwrite_frames = self.camera_delay(
frame_no, frame_type, overwrite_frames,
queue, prev_overwrites)
if len(overwrite_frames) > 0:
prev_overwrites['frame_no'] = frame_no
prev_overwrites['frames'] = {
frame.name: frame for frame in overwrite_frames}
new_frames.extend(overwrite_frames)
self.watcher_transform.delete(frame_no)
if 'MODEL' == self.target_mode:
self.target_transform.delete(frame_no)
return new_frames
if __name__ == '__main__':
print('use trace_camera.py or trace_model.py.')
|
Hashi4/vmdgadgets
|
vmdgadgets/lookat.py
|
Python
|
apache-2.0
| 34,404
|
[
"VMD"
] |
a3f1f7600c064dc7d0f8d5dc75df4a60f1dc587e1a712cfa166df7471ce853f3
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2014 Michael Cook <mcook@mackal.net>
#
# GPLv3
"""
Processes an eqlog file and generates SQL to update factions
Should work with a full log, but cleaning up the log will be quicker
The file needs at least the zone enter messages, faction messages,
and slain messages in their full to work
IMPORTANT: faction messages from non-kills should be filtered out ...
File prep:
I just did a $ grep 'faction\\|slain\\|entered' on the log file
to clean up the log for processing
"""
import re, sys, os
import collections
# str to str so we don't have to worry about string cat
factiontable = {
"Agents of Dreadspire": "396",
"Agents of Mistmoore": "1",
"Agnarr": "2",
"Ak'Anon Strike Force V": "497",
"Akheva": "3",
"Allize Taeew": "4",
"Allize Volew": "5",
"Ancestors of the Crypt": "499",
"Ancestors of Valdeholm": "498",
"Anchorites of Brell Serilis": "6",
"Ancient Cyclops": "481",
"Ankhefenmut": "397",
"Anti-mage": "8",
"Antonius Bayle": "9",
"Arboreans of Faydark": "10",
"Arcane Scientists": "11",
"Army of Light": "494",
"Ashen Order": "12",
"Askr the Lost": "13",
"Aviak": "14",
"Banker": "15",
"Battalion of Marr": "16",
"Beetle": "457",
"Befallen Inhabitants": "17",
"Bertoxxulous": "382",
"Beta Neutral": "18",
"Betrayers of Di`Zok": "19",
"Bloodgills": "20",
"Bloodsabers": "21",
"Broken Skull Clan": "22",
"Brood of Di`Zok": "23",
"Brood of Kotiz": "24",
"Brood of Ssraeshza": "25",
"Brownie": "26",
"Burning Dead": "27",
"Burynai Legion": "28",
"Butcherblock Bandits": "29",
"Cabilis Residents": "30",
"Carson McCabe": "31",
"Cazic Thule": "368",
"Chetari": "32",
"Children of Dranik": "398",
"Circle Of Unseen Hands": "33",
"Citizens of Froststone": "399",
"Citizens of Gukta": "35",
"Citizens of Qeynos": "36",
"Citizens of Seru": "37",
"Citizens of Sharvahl": "483",
"Citizens of Takish-Hiz": "38",
"Clan Grikbar": "39",
"Clan Kolbok": "40",
"Clan Runnyeye": "41",
"Class 41": "377",
"Claws of Veeshan": "42",
"Cleaving Tooth Clan": "383",
"Clerics of Tunare": "43",
"Clerics of Underfoot": "44",
"Clockwork Gnome": "45",
"Clurg": "46",
"Coalition of Tradefolk": "47",
"Coalition of TradeFolk III": "369",
"Coalition of Tradefolk Underground": "48",
"Coldain": "49",
"Combine Empire": "50",
"Commons Residents": "51",
"Concillium Universus": "52",
"Corrupt Qeynos Guards": "53",
"Coterie Elite": "54",
"Coterie of the Eternal Night": "55",
"Craftkeepers": "56",
"Craknek Warriors": "57",
"Creatures of Darkhollow": "400",
"Creatures of Gloomingdeep": "401",
"Creatures of Justice": "58",
"Creatures of Taelosia": "59",
"Creep Reapers": "402",
"Crescent Guards": "493",
"Crimson Hands": "60",
"Critters of Jaggedpine": "61",
"Crusaders of Greenmist": "62",
"Crushbone Orcs": "63",
"Crystal Caverns Terrors/Spiders/Crawlers": "395",
"Cult of the Arisen": "64",
"Cult of the Great Saprophyte": "65",
"Cursed Drakes": "403",
"DaBashers": "66",
"Dain Frostreaver IV": "67",
"Dar Khura": "68",
"Dark Bargainers": "69",
"Dark Ones": "70",
"Dark Reflection": "71",
"Dark Reign": "404",
"Dark Sendings": "72",
"Darkpaws of Jaggedpine": "73",
"Dawnhoppers": "74",
"Death Fist Orcs": "405",
"Deathfist Orcs": "75",
"Deep Muses": "76",
"Deep Sporali": "406",
"Deeppockets": "77",
"Deepshade Collective": "78",
"Deepwater Knights": "79",
"Defective Clockwork": "80",
"Defenders of the Broodlands": "407",
"Defenders of the Haven": "81",
"Deklean Korgad": "408",
"Denizens of Discord": "409",
"Denizens of Fear": "82",
"Denizens of Mischief": "391",
"Dervish Cutthroats": "83",
"Disciples of Kerafyrm": "84",
"Disciples of Rhag`Zadune": "85",
"Dismal Rage": "86",
"Dranik Loyalists": "410",
"Dreadguard Inner": "87",
"Dreadguard Outer": "88",
"Drusella Sathir": "89",
"Dulaks Clan": "459",
"Ebon Mask": "90",
"Eldritch Collective": "91",
"Elementals": "374",
"Emerald Warriors": "92",
"Emperor Ssraeshza": "93",
"Erudite Citizen": "380",
"EvilEye": "94",
"Exiled Frogloks": "95",
"Expedition 328": "411",
"Eye of Seru": "96",
"Faerie": "97",
"Fallen Guard of Illsalin": "412",
"Fallen of Bloody Kithicor": "98",
"Faydarks Champions": "99",
"FelGuard": "100",
"Firiona Vie": "101",
"Fizzlethorp": "414",
"Fizzlethorpe": "102",
"Followers of Korucust": "103",
"Forgotten Guktan Spirits": "104",
"Free Traders of Malgrinnor": "415",
"The Freeport Militia": "105",
"Frogloks of Guk": "106",
"Frogloks of Krup": "107",
"Frogloks of Kunark": "108",
"Frogloks of Sebilis": "109",
"Frostfoot Goblins": "110",
"FungusMan": "111",
"Gate Callers": "112",
"Gate Keepers": "113",
"Gelistial": "114",
"Gem Choppers": "115",
"Geonid Collective": "116",
"Ghouls of Neriak": "117",
"Giant Spider": "386",
"Gladiators of Mata Muram": "416",
"Goblin": "118",
"Goblins of Cleaving Tooth": "119",
"Goblins of Fire Peak": "120",
"Goblins of Mountain Death": "121",
"Gor Taku": "122",
"Gralloks": "123",
"Greater Brann Giants": "124",
"Greater Jord Giants": "125",
"Greater Vann Giants": "126",
"Greater Vind Giants": "127",
"Green Blood Knights": "128",
"Greenfoot Goblins": "417",
"Grieg": "129",
"Grimlings of the Forest": "392",
"Grimlings of the Moor": "130",
"Grobb Merchants": "131",
"Guardians of Shar Vahl": "132",
"Guardians of the Vale": "133",
"Guardians of Veeshan": "134",
"Guards of Gloomingdeep": "475",
"Guards of Qeynos": "135",
"Guktan Elders": "136",
"Guktan Suppliers": "484",
"Gunthaks Clan": "458",
"Hall of the Ebon Mask": "137",
"Hand Legionnaries": "138",
"Hand of Seru": "139",
"Harbingers Clan": "373",
"Haven Defenders": "140",
"Haven Smugglers": "141",
"Heart of Seru": "142",
"Heretics": "143",
"Hexxt": "144",
"High Council of Erudin": "145",
"High Council of Gukta": "146",
"High Guard of Erudin": "147",
"HighHold Citizens": "148",
"Highpass Guards": "149",
"HoHMaiden": "471",
"Holgresh": "150",
"Horde of Xalgoz": "151",
"House of Fordel": "152",
"House of Midst": "153",
"House of Stout": "154",
"Iksar": "371",
"Indifferent": "463",
"Indigo Brotherhood": "155",
"Inhabitants of Air": "464",
"Inhabitants of Firiona Vie": "418",
"Inhabitants of Hate": "156",
"Inhabitants of Tanaan": "157",
"Innoruuk's Curse of the Cauldron": "158",
"Invaders of the Moor": "503",
"Jaggedpine Treefolk": "159",
"Jaled-Dar": "160",
"Johanius Barleou": "161",
"Kaladim Citizens": "162",
"Kaladim Merchants": "419",
"Kane Bayle": "164",
"Karana": "165",
"Karana Bandits": "166",
"Karana Residents": "167",
"Katta Castellum Citizens": "168",
"Kazon Stormhammer": "169",
"Kedge": "420",
"Keepers of the Art": "170",
"Keepers of the Claw": "171",
"Kejek Village": "172",
"Kejekan": "173",
"Kelethin Merchants": "174",
"Kerra": "421",
"Kerra Isle": "175",
"Kessdona": "422",
"Khati Sha": "423",
"King Ak'Anon": "176",
"King Aythox Thex": "379",
"King Naythox Thex": "177",
"King Tearis Thex": "178",
"King Tormax": "179",
"King Xorbb": "180",
"Kingdom of Above and Below": "181",
"Kithicor Residents": "182",
"Knights of Thunder": "183",
"Knights of Truth": "184",
"Kobold": "185",
"Kobolds of Fire Pit": "186",
"Kobolds of Gloomingdeep": "424",
"Koka'Vor Tribe": "501",
"KOS": "366",
"KOS Inhabitants of Air": "465",
"KOS Plane of Disease": "466",
"KOS Plane of Innovation": "468",
"KOS Plane of Nightmare": "467",
"KOS Plane of Storms": "489",
"KOS Plane of Time": "469",
"KOS_animal": "367",
"Krag": "187",
"Kromrif": "188",
"Kromzek": "189",
"Kunark Fire Giants": "190",
"Lake Recondite Bandits": "191",
"Lanys T`Vyl": "425",
"League of Antonican Bards": "192",
"Legion of Cabilis": "193",
"Legion of Mata Muram": "194",
"Lesser Brann Giants": "195",
"Lesser Jord Giants": "196",
"Lesser Vann Giants": "197",
"Lesser Vind Giants": "198",
"Lithiniath": "199",
"Lizard Man": "200",
"Lodikai": "201",
"Lorekeepers of Gukta": "202",
"Lost Kingdom of Lok": "203",
"Lost Minions of Miragul": "204",
"Loyals": "454",
"Luclin": "205",
"Madmen": "480",
"Magus Conlegium": "206",
"Mayong Mistmoore": "207",
"Mayor Gubbin": "208",
"Meldrath": "209",
"Merchants of Ak'Anon": "210",
"Merchants of Erudin": "211",
"Merchants of Felwithe": "212",
"Merchants of Halas": "213",
"Merchants of Highpass": "214",
"Merchants of Kaladim": "215",
"Merchants of Ogguk": "216",
"Merchants of Qeynos": "217",
"Merchants of Rivervale": "218",
"Mermaid": "426",
"Mermaids": "375",
"Miners Guild 249": "219",
"Miners Guild 628": "220",
"Minions of Scale": "221",
"Minions of the Sunlord": "222",
"Minions of Tirranun": "427",
"Minions of Underfoot": "223",
"Mountain Death Clan": "384",
"Mucktail Gnolls": "224",
"Murrissa Sandwhisper": "372",
"Nadox Clan": "472",
"Nadox Initiate": "225",
"Nagafen": "226",
"Najena": "227",
"Nathyn Illuminious": "228",
"Needlite": "460",
"Neriak Merchants": "486",
"Neriak Ogre": "378",
"Neriak Trolls": "229",
"Nest Guardians": "428",
"New Alliance of Stone": "230",
"Nihil": "231",
"Nitram": "474",
"Noobie Monsters KOS to Guards": "394",
"Norrath's Keepers": "429",
"Oggok Citizens": "233",
"Oggok Guards": "232",
"Ogguk Residents": "430",
"Ogre": "431",
"Ogre Warriors": "234",
"OmensBatRat": "485",
"OmensMurks": "487",
"Opal Dark Briar": "235",
"Oracle of Karnon": "236",
"Oracle of Marud": "237",
"Orc": "238",
"Order of Autarkic Umbrage": "239",
"Order of Three": "240",
"Orphans": "452",
"Othmir": "241",
"Outcasts and Mutants": "242",
"Overlord Mata Muram": "432",
"Owlbears of the Moor": "505",
"Pack of Tomar": "243",
"Paebala": "244",
"Paladins of Gukta": "245",
"Paladins of Underfoot": "246",
"Paludal_Mushrooms": "490",
"Paludal_Underbulk": "491",
"Peace Keepers": "247",
"Phingel Autropos": "433",
"Phinigel Autropos": "248",
"Pickclaw Goblins": "249",
"Pirates of Gunthak": "250",
"Pirates of Iceclad": "251",
"Pirates of the Pine": "252",
"Pixie": "253",
"Pixtt": "254",
"Planar Collective": "455",
"Planes_Neutral": "488",
"Prexuz": "255",
"Priests of Innoruuk": "256",
"Priests of Life": "257",
"Priests of Marr": "258",
"Priests of Mischief": "259",
"Primordial Malice": "260",
"Prisoners of Justice": "261",
"Progeny": "262",
"Protectors of Growth": "263",
"Protectors of Gukta": "264",
"Protectors of Pine": "265",
"Qeynos Citizens": "434",
"QRG Protected Animals": "267",
"Queen Cristanos Thex": "268",
"Rallos Zek": "269",
"Rav": "270",
"Residents of Gloomingdeep": "476",
"Residents of Jaggedpine": "271",
"Residents of Karanas": "272",
"Riftseekers": "435",
"Rikkukin": "436",
"Ring of Scale": "273",
"Riptide Goblins": "274",
"Rogues of the White Rose": "275",
"Root of Innuruuk": "276",
"Rujarkian Slavers": "277",
"Rygorr Clan Snow Orcs": "278",
"Sabertooths of Blackburrow": "279",
"Sandworkers": "280",
"Sarnak Collective": "281",
"Scaled Mystics": "282",
"Scions of Dreadspire": "437",
"Scorchclaw Goblins": "438",
"Seru": "284",
"Servants of Aero": "285",
"Servants of Hydro": "286",
"Servants of Inferno": "287",
"Servants of Saryrn": "288",
"Servants of Terra": "289",
"Servants of Tunare": "290",
"Shadowed Men": "291",
"Shadowknights of Night Keep": "292",
"Shak Dratha": "293",
"Shamen of Justice": "294",
"Shamen of War": "295",
"Shei Vinitras": "296",
"Shik Nar": "297",
"Shoulders of Seru": "298",
"Shralok Orcs": "299",
"Silent Fist Clan": "300",
"Silla Herald": "496",
"Sirens of the Grotto": "301",
"Sky Talons": "439",
"Skytalons": "302",
"Snowfang Gnolls": "303",
"Soldiers of Tunare": "304",
"Solusek Mining Co": "305",
"Song Weavers": "306",
"Spider": "500",
"Spire Spirits": "388",
"Spirits of Katta Castellum": "307",
"Spirocs of Timorous": "308",
"Splitpaw Clan": "309",
"Sporali": "310",
"Sporali Collective": "440",
"Steel Warriors": "311",
"Steelslaves": "312",
"Stillmoon Acolytes": "441",
"Stone Hive Bixies": "313",
"Storm Guard": "314",
"Storm Guardians": "315",
"Storm Reapers": "316",
"Sustainers": "453",
"Swamp Giants of Kunark": "370",
"Swift Tails": "317",
"Syrik Iceblood": "318",
"Tarmok Tribe": "390",
"Taruun": "319",
"Temple Of Sol Ro": "442",
"Temple of Solusek Ro": "320",
"The Bloodtribe": "389",
"The Cral Ligi Clan": "321",
"The Dark Alliance": "443",
"The Dead": "322",
"The Forsaken": "323",
"The Grol Baku Clan": "324",
"The Guardians": "444",
"The HotWingz": "325",
"The Kromdek": "326",
"The Kromdul": "327",
"The Rainkeeper": "328",
"The Recuso": "329",
"The Sambata Tribe": "330",
"The Spurned": "331",
"The Tro Jeg Clan": "332",
"The Truth": "333",
"The Vas Ren Clan": "334",
"The_Angry_Sambata": "492",
"Thought Leeches": "335",
"Thrall of Kly": "336",
"Thunder Guardians": "445",
"Tirranun": "446",
"TizmakClan": "337",
"Traders of the Haven": "338",
"Trakanon": "339",
"Treants of Jaggedpine": "340",
"Tribe Vrodak": "341",
"True Spirit": "342",
"Trusik Tribe": "447",
"Tserrina Syl'Tor": "343",
"Tunare's Scouts": "283",
"Tunarean Court": "344",
"Ulthork": "345",
"Undead Frogloks of Guk": "346",
"Undead Residents of Kithicor": "381",
"Underbulks": "461",
"Unkempt Druids": "347",
"Unrest Inhabitants": "376",
"VahShir Crusaders": "348",
"Valdanov Zevfeer": "349",
"Validus Custodus": "350",
"Veeshan": "351",
"Velketor": "352",
"Venril Sathir": "353",
"Verish Mal": "456",
"VillagerRoom": "482",
"Vishimtar": "448",
"Volkara": "449",
"Volkara's Brood": "450",
"Vornol Transon": "354",
"Vox": "355",
"Warlord Ngrub": "473",
"Wayfarers Brotherhood": "356",
"WehateThelin": "470",
"Werewolf": "357",
"Whisperling": "358",
"Whistling Fist Brotherhood": "359",
"Wisps": "462",
"Witnesses of Hate": "393",
"Wizards of Gukta": "360",
"Wolves of the Moor": "504",
"Wolves of the North": "361",
"Yar`lir": "451",
"Yelinak": "362",
"Yunjo Slave Resistance": "363",
"Zazamoukh": "364",
"Zlandicar": "365",
"Zordakalicus Ragefire": "385",
"Zun'Muram": "502",
"Human": "506",
"Donovon":"507",
}
# There are some duplicate keys here, too lazy for now ..
zonetable = {
"The Abysmal Sea": 279,
"The Acrylia Caverns": 154,
"The Plane of Sky": 71,
"Ak'Anon": 55,
"The Akheva Ruins": 179,
"Anguish, the Fallen Palace": 317,
"Designer Apprentice": 999,
"Arcstone, Isle of Spirits": 369,
"The Arena": 77,
"The Arena Two": 180,
"Art Testing Domain": 996,
"Ashengate, Reliquary of the Scale": 406,
"Jewel of Atiiki": 418,
"Aviak Village": 53,
"Barindu, Hanging Gardens": 283,
"Barren Coast": 422,
"The Barter Hall": 346,
"The Bazaar": 151,
"Befallen": 36,
"Befallen": 411,
"The Gorge of King Xorbb": 16,
"Temple of Bertoxxulous": 469,
"Blackburrow": 17,
"Blacksail Folly": 428,
"The Bloodfields": 301,
"Bloodmoon Keep": 445,
"Bastion of Thunder": 209,
"The Broodlands": 337,
"The Buried Sea": 423,
"The Burning Wood": 87,
"Butcherblock Mountains": 68,
"Cabilis East": 106,
"Cabilis West": 82,
"Dagnor's Cauldron": 70,
"Nobles' Causeway": 303,
"Accursed Temple of CazicThule": 48,
"Muramite Proving Grounds": 304,
"Muramite Proving Grounds": 305,
"Muramite Proving Grounds": 306,
"Muramite Proving Grounds": 307,
"Muramite Proving Grounds": 308,
"Muramite Proving Grounds": 309,
"The Howling Stones": 105,
"Chardok": 103,
"Chardok: The Halls of Betrayal": 277,
"The City of Mist": 90,
"Loading": 190,
"Cobaltscar": 117,
"The Crypt of Decay": 200,
"The Commonlands": 408,
"West Commonlands": 21,
"Corathus Creep": 365,
"Sporali Caverns": 366,
"The Corathus Mines": 367,
"Crescent Reach": 394,
"Crushbone": 58,
"Crypt of Shade": 449,
"The Crystal Caverns": 121,
"Crystallos, Lair of the Awakened": 446,
"Sunset Home": 26,
"The Crypt of Dalnir": 104,
"The Dawnshroud Peaks": 174,
"Deadbone Reef": 427,
"Lavaspinner's Lair": 341,
"Tirranun's Delve": 342,
"The Seething Wall": 373,
"The Devastation": 372,
"Direwind Cliffs": 405,
"Korafax, Home of the Riders": 470,
"Citadel of the Worldslayer": 471,
"The Hive": 354,
"The Hatchery": 355,
"The Cocoons": 356,
"Queen Sendaii`s Lair": 357,
"Dragonscale Hills": 442,
"Deepscar's Den": 451,
"The Ruined City of Dranik": 336,
"Catacombs of Dranik": 328,
"Catacombs of Dranik": 329,
"Catacombs of Dranik": 330,
"Dranik's Hollows": 318,
"Dranik's Hollows": 319,
"Dranik's Hollows": 320,
"Sewers of Dranik": 331,
"Sewers of Dranik": 332,
"Sewers of Dranik": 333,
"Dranik's Scar": 302,
"The Dreadlands": 86,
"Dreadspire Keep": 351,
"The Temple of Droga": 81,
"Dulak's Harbor": 225,
"Eastern Plains of Karana": 15,
"The Undershore": 362,
"Snarlstone Dens": 363,
"Eastern Wastes": 116,
"The Echo Caverns": 153,
"East Commonlands": 22,
"The Elddar Forest": 378,
"Tunare's Shrine": 379,
"The Emerald Jungle": 94,
"Erudin": 24,
"The Erudin Palace": 23,
"Erud's Crossing": 98,
"Marauders Mire": 130,
"Everfrost Peaks": 30,
"The Plane of Fear": 72,
"The Feerrott": 47,
"Northern Felwithe": 61,
"Southern Felwithe": 62,
"Ferubi, Forgotten Temple of Taelosia": 284,
"The Forgotten Halls": 998,
"The Field of Bone": 78,
"Firiona Vie": 84,
"Academy of Arcane Sciences": 385,
"Arena": 388,
"City Hall": 389,
"East Freeport": 382,
"Hall of Truth: Bounty": 391,
"Freeport Militia House: My Precious": 387,
"Freeport Sewers": 384,
"Temple of Marr": 386,
"Theater of the Tranquil": 390,
"West Freeport": 383,
"East Freeport": 10,
"North Freeport": 8,
"West Freeport": 9,
"Frontier Mountains": 92,
"Frostcrypt, Throne of the Shade King": 402,
"The Tower of Frozen Shadow": 111,
"The Fungus Grove": 157,
"The Greater Faydark": 54,
"The Great Divide": 118,
"Grieg's End": 163,
"Grimling Forest": 167,
"Grobb": 52,
"The Plane of Growth": 127,
"The Mechamatic Guardian": 447,
"Guild Hall": 345,
"Guild Lobby": 344,
"Deepest Guk: Cauldron of Lost Souls": 229,
"The Drowning Crypt": 234,
"The Ruins of Old Guk": 66,
"Deepest Guk: Ancient Aqueducts": 239,
"The Mushroom Grove": 244,
"Deepest Guk: The Curse Reborn": 249,
"Deepest Guk: Chapel of the Witnesses": 254,
"The Root Garden": 259,
"Deepest Guk: Accursed Sanctuary": 264,
"The City of Guk": 65,
"The Gulf of Gunthak": 224,
"Gyrospire Beza": 440,
"Gyrospire Zeka": 441,
"Halas": 29,
"Harbinger's Spire": 335,
"Plane of Hate": 76,
"The Plane of Hate": 186,
"Hate's Fury": 228,
"High Keep": 6,
"Highpass Hold": 5,
"Highpass Hold": 407,
"HighKeep": 412,
"Hills of Shade": 444,
"The Halls of Honor": 211,
"The Temple of Marr": 220,
"The Hole": 39,
"Hollowshade Moor": 166,
"The Iceclad Ocean": 110,
"Icefall Glacier": 400,
"Ikkinz, Chambers of Transcendence": 294,
"Ruins of Illsalin": 347,
"Illsalin Marketplace": 348,
"Temple of Korlach": 349,
"The Nargil Pits": 350,
"Inktu'Ta, the Unmasked Chapel": 296,
"Innothule Swamp": 46,
"The Innothule Swamp": 413,
"The Jaggedpine Forest": 181,
"Jardel's Hook": 424,
"Kael Drakkel": 113,
"Kaesora": 88,
"South Kaladim": 60,
"North Kaladim": 67,
"Karnor's Castle": 102,
"Katta Castellum": 160,
"Katta Castrum": 416,
"Kedge Keep": 64,
"Kerra Isle": 74,
"Kithicor Forest": 410,
"Kithicor Forest": 20,
"Kod'Taz, Broken Trial Grounds": 293,
"Korascian Warrens": 476,
"Kurn's Tower": 97,
"Lake of Ill Omen": 85,
"Lake Rathetear": 51,
"The Lavastorm Mountains": 27,
"Mons Letalis": 169,
"The Lesser Faydark": 57,
"Loading Zone": 184,
"New Loading Zone": 185,
"Loping Plains": 443,
"The Maiden's Eye": 173,
"Maiden's Grave": 429,
"Meldrath's Majestic Mansion": 437,
"Fortress Mechanotus": 436,
"Goru`kar Mesa": 397,
"Miragul's Menagerie: Silent Gallery": 232,
"Miragul's Menagerie: Frozen Nightmare": 237,
"The Spider Den": 242,
"Miragul's Menagerie: Hushed Banquet": 247,
"The Frosted Halls": 252,
"The Forgotten Wastes": 257,
"Miragul's Menagerie: Heart of the Menagerie": 262,
"The Morbid Laboratory": 267,
"The Theater of Imprisoned Horror": 271,
"Miragul's Menagerie: Grand Library": 275,
"The Plane of Mischief": 126,
"The Castle of Mistmoore": 59,
"Misty Thicket": 33,
"The Misty Thicket": 415,
"Mistmoore's Catacombs: Forlorn Caverns": 233,
"Mistmoore's Catacombs: Dreary Grotto": 238,
"Mistmoore's Catacombs: Struggles within the Progeny": 243,
"Mistmoore's Catacombs: Chambers of Eternal Affliction": 248,
"Mistmoore's Catacombs: Sepulcher of the Damned": 253,
"Mistmoore's Catacombs: Scion Lair of Fury": 258,
"Mistmoore's Catacombs: Cesspits of Putrescence": 263,
"Mistmoore's Catacombs: Aisles of Blood": 268,
"Mistmoore's Catacombs: Halls of Sanguinary Rites": 272,
"Mistmoore's Catacombs: Infernal Sanctuary": 276,
"Monkey Rock": 425,
"Blightfire Moors": 395,
"Marus Seru": 168,
"The Crypt of Nadox": 227,
"Najena": 44,
"Natimbi, the Broken Shores": 280,
"Dragon Necropolis": 123,
"Nedaria's Landing": 182,
"Nektropos": 28,
"The Nektulos Forest": 25,
"Shadowed Grove": 368,
"Neriak - Foreign Quarter": 40,
"Neriak - Commons": 41,
"Neriak - 3rd Gate": 42,
"Neriak Palace": 43,
"Netherbian Lair": 161,
"Nexus": 152,
"The Lair of Terris Thule": 221,
"The Northern Plains of Karana": 13,
"North Desert of Ro": 392,
"Northern Desert of Ro": 34,
"The Mines of Nurga": 107,
"Oasis of Marr": 37,
"Oceangreen Hills": 466,
"Oceangreen Village": 467,
"The Ocean of Tears": 409,
"Oggok": 49,
"BlackBurrow": 468,
"Old Bloodfields": 472,
"Old Commonlands": 457,
"City of Dranik": 474,
"Field of Scale": 452,
"Highpass Hold": 458,
"Kaesora Library": 453,
"Kaesora Hatchery": 454,
"Bloody Kithicor": 456,
"Kurn's Tower": 455,
"Ocean of Tears": 69,
"The Overthere": 93,
"Paineel": 75,
"The Paludal Caverns": 156,
"The Lair of the Splitpaw": 18,
"The Permafrost Caverns": 73,
"The Plane of Air": 215,
"The Plane of Disease": 205,
"The Plane of Earth": 218,
"The Plane of Earth": 222,
"The Plane of Fire": 217,
"The Plane of Innovation": 206,
"The Plane of Justice": 201,
"The Plane of Knowledge": 202,
"The Plane of Nightmares": 204,
"The Plane of Storms": 210,
"Drunder, the Fortress of Zek": 214,
"The Plane of Time": 219,
"The Plane of Time": 223,
"Torment, the Plane of Pain": 207,
"The Plane of Tranquility": 203,
"The Plane of Valor": 208,
"Plane of War": 213,
"The Plane of Water": 216,
"The Precipice of War": 473,
"Muramite Provinggrounds": 316,
"The Qeynos Aqueduct System": 45,
"The Western Plains of Karana": 12,
"South Qeynos": 1,
"North Qeynos": 2,
"The Qeynos Hills": 4,
"Qinimi, Court of Nihilia": 281,
"The Surefall Glade": 3,
"Qvic, Prayer Grounds of Calling": 295,
"Qvic, the Hidden Vault": 299,
"Sverag, Stronghold of Rage": 374,
"Razorthorn, Tower of Sullon Zek": 375,
"Rathe Council Chamber": 477,
"The Rathe Mountains": 50,
"Redfeather Isle": 430,
"Relic, the Artifact City": 370,
"Riftseekers' Sanctum": 334,
"Rivervale": 19,
"Riwwi, Coliseum of Games": 282,
"Blackfeather Roost": 398,
"The Rujarkian Hills: Bloodied Quarries": 230,
"The Rujarkian Hills: Halls of War": 235,
"The Rujarkian Hills: Wind Bridges": 240,
"The Rujarkian Hills: Prison Break": 245,
"The Rujarkian Hills: Drudge Hollows": 250,
"The Rujarkian Hills: Fortified Lair of the Taskmasters": 255,
"The Rujarkian Hills: Hidden Vale of Deceit": 260,
"The Rujarkian Hills: Blazing Forge ": 265,
"The Rujarkian Hills: Arena of Chance": 269,
"The Rujarkian Hills: Barracks of War": 273,
"The Liberated Citadel of Runnyeye": 11,
"The Scarlet Desert": 175,
"The Ruins of Sebilis": 89,
"Shadeweaver's Thicket": 165,
"Shadow Haven": 150,
"Shadowrest": 187,
"Shadow Spine": 364,
"The City of Shar Vahl": 155,
"The Open Sea": 435,
"The Open Sea": 431,
"The Open Sea": 432,
"The Open Sea": 433,
"The Open Sea": 434,
"S.H.I.P. Workshop": 439,
"Silyssar, New Chelsith": 420,
"Siren's Grotto": 125,
"The Skyfire Mountains": 91,
"Skylance": 371,
"Skyshrine": 114,
"The Sleeper's Tomb": 128,
"Sewers of Nihilia, Emanating Cre": 288,
"Sewers of Nihilia, Lair of Trapp": 286,
"Sewers of Nihilia, Purifying Pla": 287,
"Sewers of Nihilia, Pool of Sludg": 285,
"Solusek's Eye": 31,
"Nagafen's Lair": 32,
"The Caverns of Exile": 278,
"The Tower of Solusek Ro": 212,
"The Temple of Solusek Ro": 80,
"Solteris, the Throne of Ro": 421,
"The Southern Plains of Karana": 14,
"South Desert of Ro": 393,
"Southern Desert of Ro": 35,
"Sanctus Seru": 159,
"Ssraeshza Temple": 162,
"The Steam Factory": 438,
"Steamfont Mountains": 56,
"The Steamfont Mountains": 448,
"The Steppes": 399,
"Stillmoon Temple": 338,
"The Ascent": 339,
"The Stonebrunt Mountains": 100,
"Stone Hive": 396,
"Suncrest Isle": 426,
"Sunderock Springs": 403,
"The Swamp of No Hope": 83,
"Tacvi, The Broken Temple": 298,
"Takish-Hiz: Sunken Library": 231,
"Takish-Hiz: Shifting Tower": 236,
"Takish-Hiz: Fading Temple": 241,
"Takish-Hiz: Royal Observatory": 246,
"Takish-Hiz: River of Recollection": 251,
"Takish-Hiz: Sandfall Corridors": 256,
"Takish-Hiz: Balancing Chamber": 261,
"Takish-Hiz: Sweeping Tides": 266,
"Takish-Hiz: Antiquated Palace": 270,
"Ruins of Takish-Hiz": 376,
"The Root of Ro": 377,
"Takish-Hiz: Prismatic Corridors": 274,
"The Temple of Veeshan": 124,
"The Tenebrous Mountains": 172,
"Thalassius, the Coral Keep": 417,
"Theater of Blood": 380,
"Deathknell, Tower of Dissonance": 381,
"The Deep": 164,
"The Grey": 171,
"The Nest": 343,
"The Void": 459,
"The Void": 460,
"The Void": 461,
"The Void": 462,
"The Void": 463,
"The Void": 464,
"The Void": 465,
"Thundercrest Isles": 340,
"The City of Thurgadin": 115,
"Icewell Keep": 129,
"Timorous Deep": 96,
"Tipt, Treacherous Crags": 289,
"The Torgiran Mines": 226,
"Toskirakk": 475,
"Toxxulia Forest": 38,
"Toxxulia Forest": 414,
"Trakanon's Teeth": 95,
"EverQuest Tutorial": 183,
"The Mines of Gloomingdeep": 188,
"The Mines of Gloomingdeep": 189,
"The Twilight Sea": 170,
"Txevu, Lair of the Elite": 297,
"The Umbral Plains": 176,
"The Estate of Unrest": 63,
"Uqua, the Ocean God Chantry": 292,
"Valdeholm": 401,
"Veeshan's Peak": 108,
"Veksar": 109,
"Velketor's Labyrinth": 112,
"Vergalid Mines": 404,
"Vex Thal": 158,
"Vxed, the Crumbling Caverns": 290,
"The Wakening Land": 119,
"Wall of Slaughter": 300,
"The Warrens": 101,
"The Warsliks Woods": 79,
"Stoneroot Falls": 358,
"Prince's Manor": 359,
"Caverns of the Lost": 360,
"Lair of the Korlach": 361,
"The Western Wastes": 120,
"Yxtta, Pulpit of Exiles ": 291,
"Zhisza, the Shissar Sanctuary": 419,
"The Nektulos Forest": 25,
"Brell's Rest": 480,
"The Cooling Chamber": 483,
"Pellucid Grotto": 488,
"Arthicrex": 485,
"The Foundation": 486,
"The Underquarry": 482,
"Brell's Arena": 492,
"Volska's Husk": 489,
"The Convorteum": 491,
"The Library": 704,
"Morell's Castle": 707,
"Al'Kabor's Nightmare": 709,
"Erudin Burning": 706,
"The Feerrott": 700,
"The Grounds": 703,
"Miragul's Nightmare": 710,
"Sanctum Somnium": 708,
"Fear Itself": 711,
"House of Thule": 701,
"House of Thule, Upper Floors": 702,
"The Well": 705,
"Sunrise Hills": 712,
"Argath, Bastion of Illdaera": 724,
"Valley of Lunanyn": 725,
"Sarith, City of Tides": 726,
"Rubak Oseka, Temple of the Sea": 727,
"Beasts' Domain": 728,
"The Resplendent Temple": 729,
"Pillars of Alra": 730,
"Windsong Sanctuary": 731,
"Erillion, City of Bronze": 732,
"Sepulcher of Order": 733,
"Sepulcher East": 734,
"Sepulcher West": 735,
"Wedding Chapel": 493,
"Wedding Chapel": 494,
"Lair of the Risen": 495,
"The Bazaar": 151,
"Brell's Temple": 490,
"Fungal Forest": 481,
"Lichen Creep": 487,
"Kernagir, the Shining City": 484,
"The Breeding Grounds": 757,
"Chapterhouse of the Fallen": 760,
"The Crystal Caverns: Fragment of Fear": 756,
"East Wastes: Zeixshi-Kar's Awakening": 755,
"Evantil, the Vile Oak": 758,
"Grelleth's Palace, the Chateau of Filth": 759,
"Kael Drakkel: The King's Madness": 754,
"Shard's Landing": 752,
"Valley of King Xorbb": 753,
}
def factionsetname(item):
"Generates faction set name"
return re.sub(' ', '', item[0]) + re.sub('-', '', item[1])
def cleanmobname(name):
"Cleans mob name for DB look up"
return re.sub(' ', '_', name)
class FactionSet(object):
"""
FactionSet class
name: name of the faction set
primary: primary faction ID
hits: faction hits assumes a dict like object
faction ID: hit value
"""
def __init__(self, name, primid, hits):
self.name = name
self.primary = primid
self.hits = hits.copy()
def __repr__(self):
return str((self.name, self.primary, self.hits))
# factionsets[name].hits[key] == factionsets[name][key]
def __getitem__(self, key):
return self.hits[key]
# names need to be unique to the set to work
def __eq__(self, other):
return self.name == other.name
def __contains__(self, key):
"Wrapper to key in hits"
return key in self.hits
def generate_sql(self):
"Generates SQL statements"
statement = ('INSERT INTO npc_faction (name, primaryfaction) VALUES '
'(\'{}\', \'{}\');\n'.format(self.name, self.primary) +
'SELECT id INTO @setid FROM npc_faction WHERE name = '
'\'{}\' LIMIT 1;\n'.format(self.name))
for hit in self.hits:
statement += ('INSERT INTO npc_faction_entries '
'(npc_faction_id, faction_id, value, npc_value) '
'VALUES (@setid, \'{}\', \'{}\', \'{}\');\n'
.format(hit, self.hits[hit],
1 if int(self.hits[hit]) < 0 else 0))
return statement
class Mob(object):
"""
Mob class
name: name of mob
zone: zone ID for mob
faction: faction set name
"""
def __init__(self, name, zone, faction):
self.name = name
self.zone = zone
self.faction = faction
def __repr__(self):
return str((self.name, self.zone, self.faction))
def __eq__(self, other):
return self.name == other.name and self.zone == other.zone
def generate_sql(self):
"Generates SQL statements"
return ('UPDATE npc_types SET npc_faction_id = @{} WHERE '
'name RLIKE \'{}\' AND id >= {} AND id <= {};'
.format(self.faction, cleanmobname(self.name), self.zone * 1000,
self.zone * 1000 + 999))
def main(filename):
"Processes eqlog and generates SQL to update mob factions"
if not os.path.exists(filename):
print(filename + ' not found')
exit(-1)
pfaction = re.compile(r'\[.*\] Your faction standing with (.*) has been '
r'adjusted by (.*)\.')
pslain1 = re.compile(r'\[.*\] You have slain (.*)!')
pslain2 = re.compile(r'\[.*\] (.*) has been slain by .*!')
penter = re.compile(r'\[.*\] You have entered (.*)\.')
factions = {} # mob: mob object
factionsets = {} # set name: set object
hits = collections.OrderedDict() # faction ID: value
nohits = [] # mobs with no faction hits
setname = None
primary = None
zone = None
eqlog = open(filename, 'r')
for line in eqlog:
m = penter.match(line)
if m:
if not re.search('PvP|levitation', line):
zone = zonetable[m.group(1)] if \
m.group(1) in zonetable else m.group(1)
continue
m = pfaction.match(line)
if m:
if not setname and not hits.items():
setname = factionsetname(m.groups())
primary = factiontable[m.group(1)]
hits[factiontable[m.group(1)]] = m.group(2)
continue
m = pslain1.match(line)
if not m:
m = pslain2.match(line)
if m:
# hits will be empty if no faction hits, so we skip it
if m.group(1) not in factions and hits.items():
factions[m.group(1)] = Mob(m.group(1), zone, setname)
if setname not in factionsets:
factionsets[setname] = FactionSet(setname, primary, hits)
elif not hits.items():
nohits.append(m.group(1))
hits.clear()
setname = None
primary = None
continue
eqlog.close()
print('-- Faction set entries')
for fset in factionsets.values():
print(fset.generate_sql())
print('-- Mob entries')
for setname in factionsets:
print('SELECT id INTO @{0} FROM npc_faction WHERE name = \'{0}\' '
'LIMIT 1;'.format(setname))
print()
# The zone limiting assumes the mob ids follows PEQ's scheme
for mob in factions.values():
print(mob.generate_sql())
# This might output some pets
if len(nohits):
print('-- some of these might be pets')
for mob in nohits:
print('-- no faction hit {}'.format(mob))
return 0
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Incorrect arguments. python ' + sys.argv[0] + ' filename')
exit(-1)
main(sys.argv[1])
|
mackal/faction.py
|
faction3.py
|
Python
|
gpl-3.0
| 35,996
|
[
"CRYSTAL"
] |
3ff706a08803752823f369f7536a476e173f20303ca77523475fc50eea930399
|
#!/usr/bin/python
'''This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.'''
import time
import RPi.GPIO as GPIO
GPIO_inputs = [26,19,13,6,5,11]
GPIO_outputs = [20,21,12,7,8,25,24,23,18,2,3,4,17,27,22,10,9,14] # Pin 14 not used due to hardware failure on my board.
# Settings
max_timeout = 20 # Maximum amount of time traffic is permitted to wait.
green_time = 8 # Initial time a light is green.
amber_time = 3 # Time a light stays amber before going red.
extend = 3 # Time green light is extended by if cars still present
max_iteration = 3 # Maximum amount of times green light is extended
bounce = 100 # Reed switch debounce time. Must be greater than 0.
# Timers
t_now = 0
red1time = 0
red2time = 0
red3time = 0
red4time = 0
red5time = 0
red6time = 0
# States: 0 = Green, 1 = Amber, 2 = Red
state1 = 0
state2 = 0
state3 = 0
state4 = 0
state5 = 0
state6 = 0
# Sensor states: 0 = Free, 1 = Occupied
sense1 = False
sense2 = False
sense3 = False
sense4 = False
sense5 = False
sense6 = False
def setup():
GPIO.setmode(GPIO.BCM) # Use BCM chip numbering
# Define inputs
GPIO.setup(GPIO_inputs, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
# Define outputs
GPIO.setup(GPIO_outputs, GPIO.OUT)
# Setup event listeners
GPIO.add_event_detect(26, GPIO.BOTH, callback=sensor_event, bouncetime=bounce)
GPIO.add_event_detect(19, GPIO.BOTH, callback=sensor_event, bouncetime=bounce)
GPIO.add_event_detect(13, GPIO.BOTH, callback=sensor_event, bouncetime=bounce)
GPIO.add_event_detect(6, GPIO.BOTH, callback=sensor_event, bouncetime=bounce)
GPIO.add_event_detect(5, GPIO.BOTH, callback=sensor_event, bouncetime=bounce)
GPIO.add_event_detect(11, GPIO.BOTH, callback=sensor_event, bouncetime=bounce)
def sensor_event(channel):
global sense1
global sense2
global sense3
global sense4
global sense5
global sense6
if GPIO.input(channel): # Sensor high
if channel == 26:
sense1 = True
elif channel == 19:
sense2 = True
elif channel == 13:
sense3 = True
elif channel == 6:
sense4 = True
elif channel == 5:
sense5 = True
elif channel == 11:
sense6 = True
print("Car detected: ", channel)
else: # Sensor low
if channel == 26:
sense1 = False
elif channel == 19:
sense2 = False
elif channel == 13:
sense3 = False
elif channel == 6:
sense4 = False
elif channel == 5:
sense5 = False
elif channel == 11:
sense6 = False
print("Car no longer detected: ", channel)
def priority():
makeway()
for i in range(1, 5):
if i == 1:
green(1)
green(3)
green(5)
time.sleep(green_time)
iteration = 0
while (sense1 or sense3 or sense5) and iteration < max_iteration and limit():
time.sleep(extend)
print("Extending time requested by sensor")
iteration += 1
amber(1, 0)
amber(3, 0)
amber(5, 0)
time.sleep(amber_time)
red(1, 0)
red(3, 0)
red(5, 0)
elif i == 2:
green(3)
green(4)
green(5)
time.sleep(green_time)
iteration = 0
while (sense3 or sense4 or sense5) and iteration < max_iteration and limit():
time.sleep(extend)
print("Extending time requested by sensor")
iteration += 1
amber(1, 0)
amber(2, 0)
amber(3, 0)
time.sleep(amber_time)
red(1, 0)
red(2, 0)
red(3, 0)
elif i == 3:
green(1)
green(2)
green(3)
time.sleep(green_time)
iteration = 0
while (sense1 or sense2 or sense3) and iteration < max_iteration and limit():
time.sleep(extend)
print("Extending time requested by sensor")
iteration += 1
amber(1, 0)
amber(2, 0)
amber(3, 0)
time.sleep(amber_time)
red(1, 0)
red(2, 0)
red(3, 0)
elif i == 4:
green(1)
green(5)
green(6)
time.sleep(green_time)
iteration = 0
while (sense1 or sense5 or sense6) and iteration < max_iteration and limit():
time.sleep(extend)
print("Extending time requested by sensor")
iteration += 1
amber(1, 0)
amber(5, 0)
amber(6, 0)
time.sleep(amber_time)
red(1, 0)
red(2, 0)
red(3, 0)
def limit():
t_now = time.time()
if (t_now - red1time) >= max_timeout:
print("Preempting light: 1")
makeway()
green(1)
time.sleep(green_time)
makeway()
return False
if (t_now - red2time) >= max_timeout:
print("Preempting light: 2")
makeway()
green(2)
time.sleep(green_time)
makeway()
return False
if (t_now - red3time) >= max_timeout:
print("Preempting light: 3")
makeway()
green(3)
time.sleep(green_time)
makeway()
return False
if (t_now - red4time) >= max_timeout:
print("Preempting light: 4")
makeway()
green(4)
time.sleep(green_time)
makeway()
return False
if (t_now - red5time) >= max_timeout:
print("Preempting light: 5")
makeway()
green(5)
time.sleep(green_time)
makeway()
return False
if (t_now - red6time) >= max_timeout:
print("Preempting light: 6")
makeway()
green(6)
time.sleep(green_time)
makeway()
return False
return True
def red(n, t=3):
global state1
global state2
global state3
global state4
global state5
global state6
if n == 1:
if state1 == 0:
amber(n, t)
elif state1 == 1:
pass
else:
return
GPIO.output(14, True)
GPIO.output(20, False)
GPIO.output(21, False)
global red1time
red1time = time.time()
state1 = 2
elif n == 2:
if state2 == 0:
amber(n, t)
elif state2 == 1:
pass
else:
return
GPIO.output(8, True)
GPIO.output(7, False)
GPIO.output(12, False)
global red2time
red2time = time.time()
state2 = 2
elif n == 3:
if state3 == 0:
amber(n, t)
elif state3 == 1:
pass
else:
return
GPIO.output(23, True)
GPIO.output(24, False)
GPIO.output(25, False)
global red3time
red3time = time.time()
state3 = 2
elif n == 4:
if state4 == 0:
amber(n, t)
elif state4 == 1:
pass
else:
return
GPIO.output(2, True)
GPIO.output(3, False)
GPIO.output(18, False)
global red4time
red4time = time.time()
state4 = 2
elif n == 5:
if state5 == 0:
amber(n, t)
elif state5 == 1:
pass
else:
return
GPIO.output(4, True)
GPIO.output(17, False)
GPIO.output(27, False)
global red5time
red5time = time.time()
state5 = 2
elif n == 6:
if state6 == 0:
amber(n, t)
elif state6 == 1:
pass
else:
return
GPIO.output(22, True)
GPIO.output(10, False)
GPIO.output(9, False)
global red6time
red6time = time.time()
state6 = 2
print("Light change: Red ", n)
def amber(n, t=3):
global state1
global state2
global state3
global state4
global state5
global state6
if n == 1:
GPIO.output(14, False)
GPIO.output(20, True)
GPIO.output(21, False)
state1 = 1
time.sleep(t)
elif n == 2:
GPIO.output(8, False)
GPIO.output(7, True)
GPIO.output(12, False)
state2 = 1
time.sleep(t)
elif n == 3:
GPIO.output(23, False)
GPIO.output(24, True)
GPIO.output(25, False)
state3 = 1
time.sleep(t)
elif n == 4:
GPIO.output(2, False)
GPIO.output(3, True)
GPIO.output(18, False)
state4 = 1
time.sleep(t)
elif n == 5:
GPIO.output(4, False)
GPIO.output(17, True)
GPIO.output(27, False)
state5 = 1
time.sleep(t)
elif n == 6:
GPIO.output(22, False)
GPIO.output(10, True)
GPIO.output(9, False)
state6 = 1
time.sleep(t)
print("Light change: Amber ", n)
def green(n):
global state1
global state2
global state3
global state4
global state5
global state6
if n == 1:
GPIO.output(14, False)
GPIO.output(20, False)
GPIO.output(21, True)
state1 = 0
elif n == 2:
GPIO.output(8, False)
GPIO.output(7, False)
GPIO.output(12, True)
state2 = 0
elif n == 3:
GPIO.output(23, False)
GPIO.output(24, False)
GPIO.output(25, True)
state3 = 0
elif n == 4:
GPIO.output(2, False)
GPIO.output(3, False)
GPIO.output(18, True)
state4 = 0
elif n == 5:
GPIO.output(4, False)
GPIO.output(17, False)
GPIO.output(27, True)
state5 = 0
elif n == 6:
GPIO.output(22, False)
GPIO.output(10, False)
GPIO.output(9, True)
state6 = 0
print("Light change: Green ", n)
def off(n, t=3):
global state1
global state2
global state3
global state4
global state5
global state6
if n == 1:
GPIO.output(14, False)
GPIO.output(20, False)
GPIO.output(21, False)
state1 = 1
time.sleep(t)
elif n == 2:
GPIO.output(8, False)
GPIO.output(7, False)
GPIO.output(12, False)
state2 = 1
time.sleep(t)
elif n == 3:
GPIO.output(23, False)
GPIO.output(24, False)
GPIO.output(25, False)
state3 = 1
time.sleep(t)
elif n == 4:
GPIO.output(2, False)
GPIO.output(3, False)
GPIO.output(18, False)
state4 = 1
time.sleep(t)
elif n == 5:
GPIO.output(4, False)
GPIO.output(17, False)
GPIO.output(27, False)
state5 = 1
time.sleep(t)
elif n == 6:
GPIO.output(22, False)
GPIO.output(10, False)
GPIO.output(9, False)
state6 = 1
time.sleep(t)
print("Light change: Off ", n)
def getstate(a):
if a == 1:
global state1
return state1
elif a == 2:
global state2
return state2
elif a == 3:
global state3
return state3
elif a == 4:
global state4
return state4
elif a == 5:
global state5
return state5
elif a == 6:
global state6
return state6
def init(): # Initialization program
for a in range(1, 7):
off(a, 0)
for a in range(1, 7):
amber(a, 0)
time.sleep(3)
for a in range(1, 7):
red(a)
def makeway(): # Clear junction of traffic
go_sleep = False
for a in range(1, 7):
if getstate(a) == 0:
amber(a, 0)
go_sleep = True
if go_sleep:
time.sleep(3)
for a in range(1, 7):
red(a)
time.sleep(2)
try:
setup() # Hardware setup
init() # Go into service
while True: # Main program loop
limit()
priority()
time.sleep(0.5)
finally:
GPIO.cleanup()
|
Thymo-/PiTraffic
|
pitraffic.py
|
Python
|
gpl-3.0
| 12,759
|
[
"Amber"
] |
3bc30025f859aada5979bc0ebeacf648f8c24d9eaaf09639d7175b170ec0c5ab
|
import os
import sys
from ase import Atom
from gpaw import GPAW
from gpaw.cluster import Cluster
from gpaw.test import equal
fname='H2_PBE.gpw'
fwfname='H2_wf_PBE.gpw'
txt = None
# write first if needed
try:
c = GPAW(fname, txt=txt)
c = GPAW(fwfname, txt=txt)
except:
s = Cluster([Atom('H'), Atom('H', [0,0,1])])
s.minimal_box(3.)
c = GPAW(xc='PBE', h=.3, convergence={'density':1e-4, 'eigenstates':1e-6})
c.calculate(s)
c.write(fname)
c.write(fwfname, 'all')
# full information
c = GPAW(fwfname, txt=txt)
E_PBE = c.get_potential_energy()
try: # number of iterations needed in restart
niter_PBE = c.get_number_of_iterations()
except: pass
dE = c.get_xc_difference('TPSS')
E_1 = E_PBE + dE
print "E PBE, TPSS=", E_PBE, E_1
# no wfs
c = GPAW(fname, txt=txt)
E_PBE_no_wfs = c.get_potential_energy()
try: # number of iterations needed in restart
niter_PBE_no_wfs = c.get_number_of_iterations()
except: pass
dE = c.get_xc_difference('TPSS')
E_2 = E_PBE_no_wfs + dE
print "E PBE, TPSS=", E_PBE_no_wfs, E_2
print "diff=", E_1 - E_2
assert abs(E_1 - E_2) < 0.005
energy_tolerance = 0.00008
niter_tolerance = 0
equal(E_PBE, -5.33901, energy_tolerance)
equal(E_PBE_no_wfs, -5.33901, energy_tolerance)
equal(E_1, -5.57685, energy_tolerance)
equal(E_2, -5.57685, energy_tolerance)
|
robwarm/gpaw-symm
|
gpaw/test/mgga_restart.py
|
Python
|
gpl-3.0
| 1,316
|
[
"ASE",
"GPAW"
] |
8232fea05d1d070a225d955052581ce1cbe5fd2e1916dc756b5847cb0c336882
|
# Copyright: (c) 2013, James Cammarata <jcammarata@ansible.com>
# Copyright: (c) 2018-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os.path
import re
import shutil
import sys
import textwrap
import time
from yaml.error import YAMLError
import ansible.constants as C
from ansible import context
from ansible.cli import CLI
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy, get_collections_galaxy_meta_info
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.collection import (
build_collection,
download_collections,
find_existing_collections,
install_collections,
publish_collection,
validate_collection_name,
validate_collection_path,
verify_collections
)
from ansible.galaxy.collection.concrete_artifact_manager import (
ConcreteArtifactsManager,
)
from ansible.galaxy.dependency_resolution.dataclasses import Requirement
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import BasicAuthToken, GalaxyToken, KeycloakToken, NoTokenSentinel
from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.common.collections import is_iterable
from ansible.module_utils.common.yaml import yaml_dump, yaml_load
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils import six
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.playbook.role.requirement import RoleRequirement
from ansible.template import Templar
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.display import Display
from ansible.utils.plugin_docs import get_versioned_doclink
display = Display()
urlparse = six.moves.urllib.parse.urlparse
SERVER_DEF = [
('url', True),
('username', False),
('password', False),
('token', False),
('auth_url', False),
('v3', False),
('validate_certs', False),
('client_id', False),
]
def with_collection_artifacts_manager(wrapped_method):
"""Inject an artifacts manager if not passed explicitly.
This decorator constructs a ConcreteArtifactsManager and maintains
the related temporary directory auto-cleanup around the target
method invocation.
"""
def method_wrapper(*args, **kwargs):
if 'artifacts_manager' in kwargs:
return wrapped_method(*args, **kwargs)
with ConcreteArtifactsManager.under_tmpdir(
C.DEFAULT_LOCAL_TMP,
validate_certs=not context.CLIARGS['ignore_certs'],
) as concrete_artifact_cm:
kwargs['artifacts_manager'] = concrete_artifact_cm
return wrapped_method(*args, **kwargs)
return method_wrapper
def _display_header(path, h1, h2, w1=10, w2=7):
display.display('\n# {0}\n{1:{cwidth}} {2:{vwidth}}\n{3} {4}\n'.format(
path,
h1,
h2,
'-' * max([len(h1), w1]), # Make sure that the number of dashes is at least the width of the header
'-' * max([len(h2), w2]),
cwidth=w1,
vwidth=w2,
))
def _display_role(gr):
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (gr.name, version))
def _display_collection(collection, cwidth=10, vwidth=7, min_cwidth=10, min_vwidth=7):
display.display('{fqcn:{cwidth}} {version:{vwidth}}'.format(
fqcn=to_text(collection.fqcn),
version=collection.ver,
cwidth=max(cwidth, min_cwidth), # Make sure the width isn't smaller than the header
vwidth=max(vwidth, min_vwidth)
))
def _get_collection_widths(collections):
if not is_iterable(collections):
collections = (collections, )
fqcn_set = {to_text(c.fqcn) for c in collections}
version_set = {to_text(c.ver) for c in collections}
fqcn_length = len(max(fqcn_set, key=len))
version_length = len(max(version_set, key=len))
return fqcn_length, version_length
class GalaxyCLI(CLI):
'''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
def __init__(self, args):
self._raw_args = args
self._implicit_role = False
if len(args) > 1:
# Inject role into sys.argv[1] as a backwards compatibility step
if args[1] not in ['-h', '--help', '--version'] and 'role' not in args and 'collection' not in args:
# TODO: Should we add a warning here and eventually deprecate the implicit role subcommand choice
# Remove this in Ansible 2.13 when we also remove -v as an option on the root parser for ansible-galaxy.
idx = 2 if args[1].startswith('-v') else 1
args.insert(idx, 'role')
self._implicit_role = True
# since argparse doesn't allow hidden subparsers, handle dead login arg from raw args after "role" normalization
if args[1:3] == ['role', 'login']:
display.error(
"The login command was removed in late 2020. An API key is now required to publish roles or collections "
"to Galaxy. The key can be found at https://galaxy.ansible.com/me/preferences, and passed to the "
"ansible-galaxy CLI via a file at {0} or (insecurely) via the `--token` "
"command-line argument.".format(to_text(C.GALAXY_TOKEN_PATH)))
sys.exit(1)
self.api_servers = []
self.galaxy = None
self._api = None
super(GalaxyCLI, self).__init__(args)
def init_parser(self):
''' create an options parser for bin/ansible '''
super(GalaxyCLI, self).init_parser(
desc="Perform various Role and Collection related operations.",
)
# Common arguments that apply to more than 1 action
common = opt_help.argparse.ArgumentParser(add_help=False)
common.add_argument('-s', '--server', dest='api_server', help='The Galaxy API server URL')
common.add_argument('--token', '--api-key', dest='api_key',
help='The Ansible Galaxy API key which can be found at '
'https://galaxy.ansible.com/me/preferences.')
common.add_argument('-c', '--ignore-certs', action='store_true', dest='ignore_certs',
default=C.GALAXY_IGNORE_CERTS, help='Ignore SSL certificate validation errors.')
opt_help.add_verbosity_options(common)
force = opt_help.argparse.ArgumentParser(add_help=False)
force.add_argument('-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role or collection')
github = opt_help.argparse.ArgumentParser(add_help=False)
github.add_argument('github_user', help='GitHub username')
github.add_argument('github_repo', help='GitHub repository')
offline = opt_help.argparse.ArgumentParser(add_help=False)
offline.add_argument('--offline', dest='offline', default=False, action='store_true',
help="Don't query the galaxy API when creating roles")
default_roles_path = C.config.get_configuration_definition('DEFAULT_ROLES_PATH').get('default', '')
roles_path = opt_help.argparse.ArgumentParser(add_help=False)
roles_path.add_argument('-p', '--roles-path', dest='roles_path', type=opt_help.unfrack_path(pathsep=True),
default=C.DEFAULT_ROLES_PATH, action=opt_help.PrependListAction,
help='The path to the directory containing your roles. The default is the first '
'writable one configured via DEFAULT_ROLES_PATH: %s ' % default_roles_path)
collections_path = opt_help.argparse.ArgumentParser(add_help=False)
collections_path.add_argument('-p', '--collections-path', dest='collections_path', type=opt_help.unfrack_path(pathsep=True),
default=AnsibleCollectionConfig.collection_paths,
action=opt_help.PrependListAction,
help="One or more directories to search for collections in addition "
"to the default COLLECTIONS_PATHS. Separate multiple paths "
"with '{0}'.".format(os.path.pathsep))
cache_options = opt_help.argparse.ArgumentParser(add_help=False)
cache_options.add_argument('--clear-response-cache', dest='clear_response_cache', action='store_true',
default=False, help='Clear the existing server response cache.')
cache_options.add_argument('--no-cache', dest='no_cache', action='store_true', default=False,
help='Do not use the server response cache.')
# Add sub parser for the Galaxy role type (role or collection)
type_parser = self.parser.add_subparsers(metavar='TYPE', dest='type')
type_parser.required = True
# Add sub parser for the Galaxy collection actions
collection = type_parser.add_parser('collection', help='Manage an Ansible Galaxy collection.')
collection_parser = collection.add_subparsers(metavar='COLLECTION_ACTION', dest='action')
collection_parser.required = True
self.add_download_options(collection_parser, parents=[common, cache_options])
self.add_init_options(collection_parser, parents=[common, force])
self.add_build_options(collection_parser, parents=[common, force])
self.add_publish_options(collection_parser, parents=[common])
self.add_install_options(collection_parser, parents=[common, force, cache_options])
self.add_list_options(collection_parser, parents=[common, collections_path])
self.add_verify_options(collection_parser, parents=[common, collections_path])
# Add sub parser for the Galaxy role actions
role = type_parser.add_parser('role', help='Manage an Ansible Galaxy role.')
role_parser = role.add_subparsers(metavar='ROLE_ACTION', dest='action')
role_parser.required = True
self.add_init_options(role_parser, parents=[common, force, offline])
self.add_remove_options(role_parser, parents=[common, roles_path])
self.add_delete_options(role_parser, parents=[common, github])
self.add_list_options(role_parser, parents=[common, roles_path])
self.add_search_options(role_parser, parents=[common])
self.add_import_options(role_parser, parents=[common, github])
self.add_setup_options(role_parser, parents=[common, roles_path])
self.add_info_options(role_parser, parents=[common, roles_path, offline])
self.add_install_options(role_parser, parents=[common, force, roles_path])
def add_download_options(self, parser, parents=None):
download_parser = parser.add_parser('download', parents=parents,
help='Download collections and their dependencies as a tarball for an '
'offline install.')
download_parser.set_defaults(func=self.execute_download)
download_parser.add_argument('args', help='Collection(s)', metavar='collection', nargs='*')
download_parser.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download collection(s) listed as dependencies.")
download_parser.add_argument('-p', '--download-path', dest='download_path',
default='./collections',
help='The directory to download the collections to.')
download_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be downloaded.')
download_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
def add_init_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
init_parser = parser.add_parser('init', parents=parents,
help='Initialize new {0} with the base structure of a '
'{0}.'.format(galaxy_type))
init_parser.set_defaults(func=self.execute_init)
init_parser.add_argument('--init-path', dest='init_path', default='./',
help='The path in which the skeleton {0} will be created. The default is the '
'current working directory.'.format(galaxy_type))
init_parser.add_argument('--{0}-skeleton'.format(galaxy_type), dest='{0}_skeleton'.format(galaxy_type),
default=C.GALAXY_COLLECTION_SKELETON if galaxy_type == 'collection' else C.GALAXY_ROLE_SKELETON,
help='The path to a {0} skeleton that the new {0} should be based '
'upon.'.format(galaxy_type))
obj_name_kwargs = {}
if galaxy_type == 'collection':
obj_name_kwargs['type'] = validate_collection_name
init_parser.add_argument('{0}_name'.format(galaxy_type), help='{0} name'.format(galaxy_type.capitalize()),
**obj_name_kwargs)
if galaxy_type == 'role':
init_parser.add_argument('--type', dest='role_type', action='store', default='default',
help="Initialize using an alternate role type. Valid types include: 'container', "
"'apb' and 'network'.")
def add_remove_options(self, parser, parents=None):
remove_parser = parser.add_parser('remove', parents=parents, help='Delete roles from roles_path.')
remove_parser.set_defaults(func=self.execute_remove)
remove_parser.add_argument('args', help='Role(s)', metavar='role', nargs='+')
def add_delete_options(self, parser, parents=None):
delete_parser = parser.add_parser('delete', parents=parents,
help='Removes the role from Galaxy. It does not remove or alter the actual '
'GitHub repository.')
delete_parser.set_defaults(func=self.execute_delete)
def add_list_options(self, parser, parents=None):
galaxy_type = 'role'
if parser.metavar == 'COLLECTION_ACTION':
galaxy_type = 'collection'
list_parser = parser.add_parser('list', parents=parents,
help='Show the name and version of each {0} installed in the {0}s_path.'.format(galaxy_type))
list_parser.set_defaults(func=self.execute_list)
list_parser.add_argument(galaxy_type, help=galaxy_type.capitalize(), nargs='?', metavar=galaxy_type)
if galaxy_type == 'collection':
list_parser.add_argument('--format', dest='output_format', choices=('human', 'yaml', 'json'), default='human',
help="Format to display the list of collections in.")
def add_search_options(self, parser, parents=None):
search_parser = parser.add_parser('search', parents=parents,
help='Search the Galaxy database by tags, platforms, author and multiple '
'keywords.')
search_parser.set_defaults(func=self.execute_search)
search_parser.add_argument('--platforms', dest='platforms', help='list of OS platforms to filter by')
search_parser.add_argument('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
search_parser.add_argument('--author', dest='author', help='GitHub username')
search_parser.add_argument('args', help='Search terms', metavar='searchterm', nargs='*')
def add_import_options(self, parser, parents=None):
import_parser = parser.add_parser('import', parents=parents, help='Import a role into a galaxy server')
import_parser.set_defaults(func=self.execute_import)
import_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import results.")
import_parser.add_argument('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch '
'(usually master)')
import_parser.add_argument('--role-name', dest='role_name',
help='The name the role should have, if different than the repo name')
import_parser.add_argument('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_'
'user/github_repo.')
def add_setup_options(self, parser, parents=None):
setup_parser = parser.add_parser('setup', parents=parents,
help='Manage the integration between Galaxy and the given source.')
setup_parser.set_defaults(func=self.execute_setup)
setup_parser.add_argument('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see '
'ID values.')
setup_parser.add_argument('--list', dest="setup_list", action='store_true', default=False,
help='List all of your integrations.')
setup_parser.add_argument('source', help='Source')
setup_parser.add_argument('github_user', help='GitHub username')
setup_parser.add_argument('github_repo', help='GitHub repository')
setup_parser.add_argument('secret', help='Secret')
def add_info_options(self, parser, parents=None):
info_parser = parser.add_parser('info', parents=parents, help='View more details about a specific role.')
info_parser.set_defaults(func=self.execute_info)
info_parser.add_argument('args', nargs='+', help='role', metavar='role_name[,version]')
def add_verify_options(self, parser, parents=None):
galaxy_type = 'collection'
verify_parser = parser.add_parser('verify', parents=parents, help='Compare checksums with the collection(s) '
'found on the server and the installed copy. This does not verify dependencies.')
verify_parser.set_defaults(func=self.execute_verify)
verify_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', help='The collection(s) name or '
'path/url to a tar.gz collection artifact. This is mutually exclusive with --requirements-file.')
verify_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors during verification and continue with the next specified collection.')
verify_parser.add_argument('--offline', dest='offline', action='store_true', default=False,
help='Validate collection integrity locally without contacting server for '
'canonical manifest hash.')
verify_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be verified.')
def add_install_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
args_kwargs = {}
if galaxy_type == 'collection':
args_kwargs['help'] = 'The collection(s) name or path/url to a tar.gz collection artifact. This is ' \
'mutually exclusive with --requirements-file.'
ignore_errors_help = 'Ignore errors during installation and continue with the next specified ' \
'collection. This will not ignore dependency conflict errors.'
else:
args_kwargs['help'] = 'Role name, URL or tar file'
ignore_errors_help = 'Ignore errors and continue with the next specified role.'
install_parser = parser.add_parser('install', parents=parents,
help='Install {0}(s) from file(s), URL(s) or Ansible '
'Galaxy'.format(galaxy_type))
install_parser.set_defaults(func=self.execute_install)
install_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', **args_kwargs)
install_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help=ignore_errors_help)
install_exclusive = install_parser.add_mutually_exclusive_group()
install_exclusive.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download {0}s listed as dependencies.".format(galaxy_type))
install_exclusive.add_argument('--force-with-deps', dest='force_with_deps', action='store_true', default=False,
help="Force overwriting an existing {0} and its "
"dependencies.".format(galaxy_type))
if galaxy_type == 'collection':
install_parser.add_argument('-p', '--collections-path', dest='collections_path',
default=self._get_default_collection_path(),
help='The path to the directory containing your collections.')
install_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be installed.')
install_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
install_parser.add_argument('-U', '--upgrade', dest='upgrade', action='store_true', default=False,
help='Upgrade installed collection artifacts. This will also update dependencies unless --no-deps is provided')
else:
install_parser.add_argument('-r', '--role-file', dest='requirements',
help='A file containing a list of roles to be installed.')
install_parser.add_argument('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
default=False,
help='Use tar instead of the scm archive option when packaging the role.')
def add_build_options(self, parser, parents=None):
build_parser = parser.add_parser('build', parents=parents,
help='Build an Ansible collection artifact that can be published to Ansible '
'Galaxy.')
build_parser.set_defaults(func=self.execute_build)
build_parser.add_argument('args', metavar='collection', nargs='*', default=('.',),
help='Path to the collection(s) directory to build. This should be the directory '
'that contains the galaxy.yml file. The default is the current working '
'directory.')
build_parser.add_argument('--output-path', dest='output_path', default='./',
help='The path in which the collection is built to. The default is the current '
'working directory.')
def add_publish_options(self, parser, parents=None):
publish_parser = parser.add_parser('publish', parents=parents,
help='Publish a collection artifact to Ansible Galaxy.')
publish_parser.set_defaults(func=self.execute_publish)
publish_parser.add_argument('args', metavar='collection_path',
help='The path to the collection tarball to publish.')
publish_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import validation results.")
publish_parser.add_argument('--import-timeout', dest='import_timeout', type=int, default=0,
help="The time to wait for the collection import process to finish.")
def post_process_args(self, options):
options = super(GalaxyCLI, self).post_process_args(options)
display.verbosity = options.verbosity
return options
def run(self):
super(GalaxyCLI, self).run()
self.galaxy = Galaxy()
def server_config_def(section, key, required):
return {
'description': 'The %s of the %s Galaxy server' % (key, section),
'ini': [
{
'section': 'galaxy_server.%s' % section,
'key': key,
}
],
'env': [
{'name': 'ANSIBLE_GALAXY_SERVER_%s_%s' % (section.upper(), key.upper())},
],
'required': required,
}
validate_certs_fallback = not context.CLIARGS['ignore_certs']
galaxy_options = {}
for optional_key in ['clear_response_cache', 'no_cache']:
if optional_key in context.CLIARGS:
galaxy_options[optional_key] = context.CLIARGS[optional_key]
config_servers = []
# Need to filter out empty strings or non truthy values as an empty server list env var is equal to [''].
server_list = [s for s in C.GALAXY_SERVER_LIST or [] if s]
for server_priority, server_key in enumerate(server_list, start=1):
# Config definitions are looked up dynamically based on the C.GALAXY_SERVER_LIST entry. We look up the
# section [galaxy_server.<server>] for the values url, username, password, and token.
config_dict = dict((k, server_config_def(server_key, k, req)) for k, req in SERVER_DEF)
defs = AnsibleLoader(yaml_dump(config_dict)).get_single_data()
C.config.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs)
server_options = C.config.get_plugin_options('galaxy_server', server_key)
# auth_url is used to create the token, but not directly by GalaxyAPI, so
# it doesn't need to be passed as kwarg to GalaxyApi
auth_url = server_options.pop('auth_url', None)
client_id = server_options.pop('client_id', None)
token_val = server_options['token'] or NoTokenSentinel
username = server_options['username']
available_api_versions = None
v3 = server_options.pop('v3', None)
validate_certs = server_options['validate_certs']
if validate_certs is None:
validate_certs = validate_certs_fallback
server_options['validate_certs'] = validate_certs
if v3:
# This allows a user to explicitly indicate the server uses the /v3 API
# This was added for testing against pulp_ansible and I'm not sure it has
# a practical purpose outside of this use case. As such, this option is not
# documented as of now
server_options['available_api_versions'] = {'v3': '/v3'}
# default case if no auth info is provided.
server_options['token'] = None
if username:
server_options['token'] = BasicAuthToken(username,
server_options['password'])
else:
if token_val:
if auth_url:
server_options['token'] = KeycloakToken(access_token=token_val,
auth_url=auth_url,
validate_certs=validate_certs,
client_id=client_id)
else:
# The galaxy v1 / github / django / 'Token'
server_options['token'] = GalaxyToken(token=token_val)
server_options.update(galaxy_options)
config_servers.append(GalaxyAPI(
self.galaxy, server_key,
priority=server_priority,
**server_options
))
cmd_server = context.CLIARGS['api_server']
cmd_token = GalaxyToken(token=context.CLIARGS['api_key'])
if cmd_server:
# Cmd args take precedence over the config entry but fist check if the arg was a name and use that config
# entry, otherwise create a new API entry for the server specified.
config_server = next((s for s in config_servers if s.name == cmd_server), None)
if config_server:
self.api_servers.append(config_server)
else:
self.api_servers.append(GalaxyAPI(
self.galaxy, 'cmd_arg', cmd_server, token=cmd_token,
priority=len(config_servers) + 1,
**galaxy_options
))
else:
self.api_servers = config_servers
# Default to C.GALAXY_SERVER if no servers were defined
if len(self.api_servers) == 0:
self.api_servers.append(GalaxyAPI(
self.galaxy, 'default', C.GALAXY_SERVER, token=cmd_token,
priority=0,
**galaxy_options
))
return context.CLIARGS['func']()
@property
def api(self):
if self._api:
return self._api
for server in self.api_servers:
try:
if u'v1' in server.available_api_versions:
self._api = server
break
except Exception:
continue
if not self._api:
self._api = self.api_servers[0]
return self._api
def _get_default_collection_path(self):
return C.COLLECTIONS_PATHS[0]
def _parse_requirements_file(self, requirements_file, allow_old_format=True, artifacts_manager=None):
"""
Parses an Ansible requirement.yml file and returns all the roles and/or collections defined in it. There are 2
requirements file format:
# v1 (roles only)
- src: The source of the role, required if include is not set. Can be Galaxy role name, URL to a SCM repo or tarball.
name: Downloads the role to the specified name, defaults to Galaxy name from Galaxy or name of repo if src is a URL.
scm: If src is a URL, specify the SCM. Only git or hd are supported and defaults ot git.
version: The version of the role to download. Can also be tag, commit, or branch name and defaults to master.
include: Path to additional requirements.yml files.
# v2 (roles and collections)
---
roles:
# Same as v1 format just under the roles key
collections:
- namespace.collection
- name: namespace.collection
version: version identifier, multiple identifiers are separated by ','
source: the URL or a predefined source name that relates to C.GALAXY_SERVER_LIST
type: git|file|url|galaxy
:param requirements_file: The path to the requirements file.
:param allow_old_format: Will fail if a v1 requirements file is found and this is set to False.
:param artifacts_manager: Artifacts manager.
:return: a dict containing roles and collections to found in the requirements file.
"""
requirements = {
'roles': [],
'collections': [],
}
b_requirements_file = to_bytes(requirements_file, errors='surrogate_or_strict')
if not os.path.exists(b_requirements_file):
raise AnsibleError("The requirements file '%s' does not exist." % to_native(requirements_file))
display.vvv("Reading requirement file at '%s'" % requirements_file)
with open(b_requirements_file, 'rb') as req_obj:
try:
file_requirements = yaml_load(req_obj)
except YAMLError as err:
raise AnsibleError(
"Failed to parse the requirements yml at '%s' with the following error:\n%s"
% (to_native(requirements_file), to_native(err)))
if file_requirements is None:
raise AnsibleError("No requirements found in file '%s'" % to_native(requirements_file))
def parse_role_req(requirement):
if "include" not in requirement:
role = RoleRequirement.role_yaml_parse(requirement)
display.vvv("found role %s in yaml file" % to_text(role))
if "name" not in role and "src" not in role:
raise AnsibleError("Must specify name or src for role")
return [GalaxyRole(self.galaxy, self.api, **role)]
else:
b_include_path = to_bytes(requirement["include"], errors="surrogate_or_strict")
if not os.path.isfile(b_include_path):
raise AnsibleError("Failed to find include requirements file '%s' in '%s'"
% (to_native(b_include_path), to_native(requirements_file)))
with open(b_include_path, 'rb') as f_include:
try:
return [GalaxyRole(self.galaxy, self.api, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml_load(f_include))]
except Exception as e:
raise AnsibleError("Unable to load data from include requirements file: %s %s"
% (to_native(requirements_file), to_native(e)))
if isinstance(file_requirements, list):
# Older format that contains only roles
if not allow_old_format:
raise AnsibleError("Expecting requirements file to be a dict with the key 'collections' that contains "
"a list of collections to install")
for role_req in file_requirements:
requirements['roles'] += parse_role_req(role_req)
else:
# Newer format with a collections and/or roles key
extra_keys = set(file_requirements.keys()).difference(set(['roles', 'collections']))
if extra_keys:
raise AnsibleError("Expecting only 'roles' and/or 'collections' as base keys in the requirements "
"file. Found: %s" % (to_native(", ".join(extra_keys))))
for role_req in file_requirements.get('roles') or []:
requirements['roles'] += parse_role_req(role_req)
requirements['collections'] = [
Requirement.from_requirement_dict(
self._init_coll_req_dict(collection_req),
artifacts_manager,
)
for collection_req in file_requirements.get('collections') or []
]
return requirements
def _init_coll_req_dict(self, coll_req):
if not isinstance(coll_req, dict):
# Assume it's a string:
return {'name': coll_req}
if (
'name' not in coll_req or
not coll_req.get('source') or
coll_req.get('type', 'galaxy') != 'galaxy'
):
return coll_req
# Try and match up the requirement source with our list of Galaxy API
# servers defined in the config, otherwise create a server with that
# URL without any auth.
coll_req['source'] = next(
iter(
srvr for srvr in self.api_servers
if coll_req['source'] in {srvr.name, srvr.api_server}
),
GalaxyAPI(
self.galaxy,
'explicit_requirement_{name!s}'.format(
name=coll_req['name'],
),
coll_req['source'],
validate_certs=not context.CLIARGS['ignore_certs'],
),
)
return coll_req
@staticmethod
def exit_without_ignore(rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not context.CLIARGS['ignore_errors']:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
@staticmethod
def _display_role_info(role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
# Get the top-level 'description' first, falling back to galaxy_info['galaxy_info']['description'].
galaxy_info = role_info.get('galaxy_info', {})
description = role_info.get('description', galaxy_info.get('description', ''))
text.append(u"\tdescription: %s" % description)
for k in sorted(role_info.keys()):
if k in GalaxyCLI.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in GalaxyCLI.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
# make sure we have a trailing newline returned
text.append(u"")
return u'\n'.join(text)
@staticmethod
def _resolve_path(path):
return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
@staticmethod
def _get_skeleton_galaxy_yml(template_path, inject_data):
with open(to_bytes(template_path, errors='surrogate_or_strict'), 'rb') as template_obj:
meta_template = to_text(template_obj.read(), errors='surrogate_or_strict')
galaxy_meta = get_collections_galaxy_meta_info()
required_config = []
optional_config = []
for meta_entry in galaxy_meta:
config_list = required_config if meta_entry.get('required', False) else optional_config
value = inject_data.get(meta_entry['key'], None)
if not value:
meta_type = meta_entry.get('type', 'str')
if meta_type == 'str':
value = ''
elif meta_type == 'list':
value = []
elif meta_type == 'dict':
value = {}
meta_entry['value'] = value
config_list.append(meta_entry)
link_pattern = re.compile(r"L\(([^)]+),\s+([^)]+)\)")
const_pattern = re.compile(r"C\(([^)]+)\)")
def comment_ify(v):
if isinstance(v, list):
v = ". ".join([l.rstrip('.') for l in v])
v = link_pattern.sub(r"\1 <\2>", v)
v = const_pattern.sub(r"'\1'", v)
return textwrap.fill(v, width=117, initial_indent="# ", subsequent_indent="# ", break_on_hyphens=False)
loader = DataLoader()
templar = Templar(loader, variables={'required_config': required_config, 'optional_config': optional_config})
templar.environment.filters['comment_ify'] = comment_ify
meta_value = templar.template(meta_template)
return meta_value
def _require_one_of_collections_requirements(
self, collections, requirements_file,
artifacts_manager=None,
):
if collections and requirements_file:
raise AnsibleError("The positional collection_name arg and --requirements-file are mutually exclusive.")
elif not collections and not requirements_file:
raise AnsibleError("You must specify a collection name or a requirements file.")
elif requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._parse_requirements_file(
requirements_file,
allow_old_format=False,
artifacts_manager=artifacts_manager,
)
else:
requirements = {
'collections': [
Requirement.from_string(coll_input, artifacts_manager)
for coll_input in collections
],
'roles': [],
}
return requirements
############################
# execute actions
############################
def execute_role(self):
"""
Perform the action on an Ansible Galaxy role. Must be combined with a further action like delete/install/init
as listed below.
"""
# To satisfy doc build
pass
def execute_collection(self):
"""
Perform the action on an Ansible Galaxy collection. Must be combined with a further action like init/install as
listed below.
"""
# To satisfy doc build
pass
def execute_build(self):
"""
Build an Ansible Galaxy collection artifact that can be stored in a central repository like Ansible Galaxy.
By default, this command builds from the current working directory. You can optionally pass in the
collection input path (where the ``galaxy.yml`` file is).
"""
force = context.CLIARGS['force']
output_path = GalaxyCLI._resolve_path(context.CLIARGS['output_path'])
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
elif os.path.isfile(b_output_path):
raise AnsibleError("- the output collection directory %s is a file - aborting" % to_native(output_path))
for collection_path in context.CLIARGS['args']:
collection_path = GalaxyCLI._resolve_path(collection_path)
build_collection(
to_text(collection_path, errors='surrogate_or_strict'),
to_text(output_path, errors='surrogate_or_strict'),
force,
)
@with_collection_artifacts_manager
def execute_download(self, artifacts_manager=None):
collections = context.CLIARGS['args']
no_deps = context.CLIARGS['no_deps']
download_path = context.CLIARGS['download_path']
requirements_file = context.CLIARGS['requirements']
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._require_one_of_collections_requirements(
collections, requirements_file,
artifacts_manager=artifacts_manager,
)['collections']
download_path = GalaxyCLI._resolve_path(download_path)
b_download_path = to_bytes(download_path, errors='surrogate_or_strict')
if not os.path.exists(b_download_path):
os.makedirs(b_download_path)
download_collections(
requirements, download_path, self.api_servers, no_deps,
context.CLIARGS['allow_pre_release'],
artifacts_manager=artifacts_manager,
)
return 0
def execute_init(self):
"""
Creates the skeleton framework of a role or collection that complies with the Galaxy metadata format.
Requires a role or collection name. The collection name must be in the format ``<namespace>.<collection>``.
"""
galaxy_type = context.CLIARGS['type']
init_path = context.CLIARGS['init_path']
force = context.CLIARGS['force']
obj_skeleton = context.CLIARGS['{0}_skeleton'.format(galaxy_type)]
obj_name = context.CLIARGS['{0}_name'.format(galaxy_type)]
inject_data = dict(
description='your {0} description'.format(galaxy_type),
ansible_plugin_list_dir=get_versioned_doclink('plugins/plugins.html'),
)
if galaxy_type == 'role':
inject_data.update(dict(
author='your name',
company='your company (optional)',
license='license (GPL-2.0-or-later, MIT, etc)',
role_name=obj_name,
role_type=context.CLIARGS['role_type'],
issue_tracker_url='http://example.com/issue/tracker',
repository_url='http://example.com/repository',
documentation_url='http://docs.example.com',
homepage_url='http://example.com',
min_ansible_version=ansible_version[:3], # x.y
dependencies=[],
))
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
obj_path = os.path.join(init_path, obj_name)
elif galaxy_type == 'collection':
namespace, collection_name = obj_name.split('.', 1)
inject_data.update(dict(
namespace=namespace,
collection_name=collection_name,
version='1.0.0',
readme='README.md',
authors=['your name <example@domain.com>'],
license=['GPL-2.0-or-later'],
repository='http://example.com/repository',
documentation='http://docs.example.com',
homepage='http://example.com',
issues='http://example.com/issue/tracker',
build_ignore=[],
))
skeleton_ignore_expressions = C.GALAXY_COLLECTION_SKELETON_IGNORE
obj_path = os.path.join(init_path, namespace, collection_name)
b_obj_path = to_bytes(obj_path, errors='surrogate_or_strict')
if os.path.exists(b_obj_path):
if os.path.isfile(obj_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % to_native(obj_path))
elif not force:
raise AnsibleError("- the directory %s already exists. "
"You can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % to_native(obj_path))
if obj_skeleton is not None:
own_skeleton = False
else:
own_skeleton = True
obj_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
obj_skeleton = os.path.expanduser(obj_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
if not os.path.exists(obj_skeleton):
raise AnsibleError("- the skeleton path '{0}' does not exist, cannot init {1}".format(
to_native(obj_skeleton), galaxy_type)
)
loader = DataLoader()
templar = Templar(loader, variables=inject_data)
# create role directory
if not os.path.exists(b_obj_path):
os.makedirs(b_obj_path)
for root, dirs, files in os.walk(obj_skeleton, topdown=True):
rel_root = os.path.relpath(root, obj_skeleton)
rel_dirs = rel_root.split(os.sep)
rel_root_dir = rel_dirs[0]
if galaxy_type == 'collection':
# A collection can contain templates in playbooks/*/templates and roles/*/templates
in_templates_dir = rel_root_dir in ['playbooks', 'roles'] and 'templates' in rel_dirs
else:
in_templates_dir = rel_root_dir == 'templates'
# Filter out ignored directory names
# Use [:] to mutate the list os.walk uses
dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
if galaxy_type == 'collection' and own_skeleton and rel_root == '.' and f == 'galaxy.yml.j2':
# Special use case for galaxy.yml.j2 in our own default collection skeleton. We build the options
# dynamically which requires special options to be set.
# The templated data's keys must match the key name but the inject data contains collection_name
# instead of name. We just make a copy and change the key back to name for this file.
template_data = inject_data.copy()
template_data['name'] = template_data.pop('collection_name')
meta_value = GalaxyCLI._get_skeleton_galaxy_yml(os.path.join(root, rel_root, f), template_data)
b_dest_file = to_bytes(os.path.join(obj_path, rel_root, filename), errors='surrogate_or_strict')
with open(b_dest_file, 'wb') as galaxy_obj:
galaxy_obj.write(to_bytes(meta_value, errors='surrogate_or_strict'))
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(root, f)
dest_file = os.path.join(obj_path, rel_root, filename)
template_data = to_text(loader._get_file_contents(src_template)[0], errors='surrogate_or_strict')
b_rendered = to_bytes(templar.template(template_data), errors='surrogate_or_strict')
with open(dest_file, 'wb') as df:
df.write(b_rendered)
else:
f_rel_path = os.path.relpath(os.path.join(root, f), obj_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(obj_path, f_rel_path))
for d in dirs:
b_dir_path = to_bytes(os.path.join(obj_path, rel_root, d), errors='surrogate_or_strict')
if not os.path.exists(b_dir_path):
os.makedirs(b_dir_path)
display.display("- %s %s was created successfully" % (galaxy_type.title(), obj_name))
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
roles_path = context.CLIARGS['roles_path']
data = ''
for role in context.CLIARGS['args']:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, self.api, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['installed_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
if not context.CLIARGS['offline']:
remote_data = None
try:
remote_data = self.api.lookup_role_by_name(role, False)
except AnsibleError as e:
if e.http_code == 400 and 'Bad Request' in e.message:
# Role does not exist in Ansible Galaxy
data = u"- the role %s was not found" % role
break
raise AnsibleError("Unable to find info about '%s': %s" % (role, e))
if remote_data:
role_info.update(remote_data)
elif context.CLIARGS['offline'] and not gr._exists:
data = u"- the role %s was not found" % role
break
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data += self._display_role_info(role_info)
self.pager(data)
@with_collection_artifacts_manager
def execute_verify(self, artifacts_manager=None):
collections = context.CLIARGS['args']
search_paths = context.CLIARGS['collections_path']
ignore_errors = context.CLIARGS['ignore_errors']
local_verify_only = context.CLIARGS['offline']
requirements_file = context.CLIARGS['requirements']
requirements = self._require_one_of_collections_requirements(
collections, requirements_file,
artifacts_manager=artifacts_manager,
)['collections']
resolved_paths = [validate_collection_path(GalaxyCLI._resolve_path(path)) for path in search_paths]
results = verify_collections(
requirements, resolved_paths,
self.api_servers, ignore_errors,
local_verify_only=local_verify_only,
artifacts_manager=artifacts_manager,
)
if any(result for result in results if not result.success):
return 1
return 0
@with_collection_artifacts_manager
def execute_install(self, artifacts_manager=None):
"""
Install one or more roles(``ansible-galaxy role install``), or one or more collections(``ansible-galaxy collection install``).
You can pass in a list (roles or collections) or use the file
option listed below (these are mutually exclusive). If you pass in a list, it
can be a name (which will be downloaded via the galaxy API and github), or it can be a local tar archive file.
:param artifacts_manager: Artifacts manager.
"""
install_items = context.CLIARGS['args']
requirements_file = context.CLIARGS['requirements']
collection_path = None
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
two_type_warning = "The requirements file '%s' contains {0}s which will be ignored. To install these {0}s " \
"run 'ansible-galaxy {0} install -r' or to install both at the same time run " \
"'ansible-galaxy install -r' without a custom install path." % to_text(requirements_file)
# TODO: Would be nice to share the same behaviour with args and -r in collections and roles.
collection_requirements = []
role_requirements = []
if context.CLIARGS['type'] == 'collection':
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['collections_path'])
requirements = self._require_one_of_collections_requirements(
install_items, requirements_file,
artifacts_manager=artifacts_manager,
)
collection_requirements = requirements['collections']
if requirements['roles']:
display.vvv(two_type_warning.format('role'))
else:
if not install_items and requirements_file is None:
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
if requirements_file:
if not (requirements_file.endswith('.yaml') or requirements_file.endswith('.yml')):
raise AnsibleError("Invalid role requirements file, it must end with a .yml or .yaml extension")
requirements = self._parse_requirements_file(
requirements_file,
artifacts_manager=artifacts_manager,
)
role_requirements = requirements['roles']
# We can only install collections and roles at the same time if the type wasn't specified and the -p
# argument was not used. If collections are present in the requirements then at least display a msg.
galaxy_args = self._raw_args
if requirements['collections'] and (not self._implicit_role or '-p' in galaxy_args or
'--roles-path' in galaxy_args):
# We only want to display a warning if 'ansible-galaxy install -r ... -p ...'. Other cases the user
# was explicit about the type and shouldn't care that collections were skipped.
display_func = display.warning if self._implicit_role else display.vvv
display_func(two_type_warning.format('collection'))
else:
collection_path = self._get_default_collection_path()
collection_requirements = requirements['collections']
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in context.CLIARGS['args']:
role = RoleRequirement.role_yaml_parse(rname.strip())
role_requirements.append(GalaxyRole(self.galaxy, self.api, **role))
if not role_requirements and not collection_requirements:
display.display("Skipping install, no requirements found")
return
if role_requirements:
display.display("Starting galaxy role install process")
self._execute_install_role(role_requirements)
if collection_requirements:
display.display("Starting galaxy collection install process")
# Collections can technically be installed even when ansible-galaxy is in role mode so we need to pass in
# the install path as context.CLIARGS['collections_path'] won't be set (default is calculated above).
self._execute_install_collection(
collection_requirements, collection_path,
artifacts_manager=artifacts_manager,
)
def _execute_install_collection(
self, requirements, path, artifacts_manager,
):
force = context.CLIARGS['force']
ignore_errors = context.CLIARGS['ignore_errors']
no_deps = context.CLIARGS['no_deps']
force_with_deps = context.CLIARGS['force_with_deps']
# If `ansible-galaxy install` is used, collection-only options aren't available to the user and won't be in context.CLIARGS
allow_pre_release = context.CLIARGS.get('allow_pre_release', False)
upgrade = context.CLIARGS.get('upgrade', False)
collections_path = C.COLLECTIONS_PATHS
if len([p for p in collections_path if p.startswith(path)]) == 0:
display.warning("The specified collections path '%s' is not part of the configured Ansible "
"collections paths '%s'. The installed collection won't be picked up in an Ansible "
"run." % (to_text(path), to_text(":".join(collections_path))))
output_path = validate_collection_path(path)
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
install_collections(
requirements, output_path, self.api_servers, ignore_errors,
no_deps, force, force_with_deps, upgrade,
allow_pre_release=allow_pre_release,
artifacts_manager=artifacts_manager,
)
return 0
def _execute_install_role(self, requirements):
role_file = context.CLIARGS['requirements']
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
force = context.CLIARGS['force'] or force_deps
for role in requirements:
# only process roles in roles files when names matches if given
if role_file and context.CLIARGS['args'] and role.name not in context.CLIARGS['args']:
display.vvv('Skipping role %s' % role.name)
continue
display.vvv('Processing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version or force:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
if not role.metadata:
display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
else:
role_dependencies = (role.metadata.get('dependencies') or []) + role.requirements
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, self.api, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in requirements:
display.display('- adding dependency: %s' % to_text(dep_role))
requirements.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
if force_deps:
display.display('- changing dependent role %s from %s to %s' %
(dep_role.name, dep_role.install_info['version'], dep_role.version or "unspecified"))
dep_role.remove()
requirements.append(dep_role)
else:
display.warning('- dependency %s (%s) from role %s differs from already installed version (%s), skipping' %
(to_text(dep_role), dep_role.version, role.name, dep_role.install_info['version']))
else:
if force_deps:
requirements.append(dep_role)
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if not context.CLIARGS['args']:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in context.CLIARGS['args']:
role = GalaxyRole(self.galaxy, self.api, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e)))
return 0
def execute_list(self):
"""
List installed collections or roles
"""
if context.CLIARGS['type'] == 'role':
self.execute_list_role()
elif context.CLIARGS['type'] == 'collection':
self.execute_list_collection()
def execute_list_role(self):
"""
List all roles installed on the local system or a specific role
"""
path_found = False
role_found = False
warnings = []
roles_search_paths = context.CLIARGS['roles_path']
role_name = context.CLIARGS['role']
for path in roles_search_paths:
role_path = GalaxyCLI._resolve_path(path)
if os.path.isdir(path):
path_found = True
else:
warnings.append("- the configured path {0} does not exist.".format(path))
continue
if role_name:
# show the requested role, if it exists
gr = GalaxyRole(self.galaxy, self.api, role_name, path=os.path.join(role_path, role_name))
if os.path.isdir(gr.path):
role_found = True
display.display('# %s' % os.path.dirname(gr.path))
_display_role(gr)
break
warnings.append("- the role %s was not found" % role_name)
else:
if not os.path.exists(role_path):
warnings.append("- the configured path %s does not exist." % role_path)
continue
if not os.path.isdir(role_path):
warnings.append("- the configured path %s, exists, but it is not a directory." % role_path)
continue
display.display('# %s' % role_path)
path_files = os.listdir(role_path)
for path_file in path_files:
gr = GalaxyRole(self.galaxy, self.api, path_file, path=path)
if gr.metadata:
_display_role(gr)
# Do not warn if the role was found in any of the search paths
if role_found and role_name:
warnings = []
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type']))
return 0
@with_collection_artifacts_manager
def execute_list_collection(self, artifacts_manager=None):
"""
List all collections installed on the local system
:param artifacts_manager: Artifacts manager.
"""
output_format = context.CLIARGS['output_format']
collections_search_paths = set(context.CLIARGS['collections_path'])
collection_name = context.CLIARGS['collection']
default_collections_path = AnsibleCollectionConfig.collection_paths
collections_in_paths = {}
warnings = []
path_found = False
collection_found = False
for path in collections_search_paths:
collection_path = GalaxyCLI._resolve_path(path)
if not os.path.exists(path):
if path in default_collections_path:
# don't warn for missing default paths
continue
warnings.append("- the configured path {0} does not exist.".format(collection_path))
continue
if not os.path.isdir(collection_path):
warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path))
continue
path_found = True
if collection_name:
# list a specific collection
validate_collection_name(collection_name)
namespace, collection = collection_name.split('.')
collection_path = validate_collection_path(collection_path)
b_collection_path = to_bytes(os.path.join(collection_path, namespace, collection), errors='surrogate_or_strict')
if not os.path.exists(b_collection_path):
warnings.append("- unable to find {0} in collection paths".format(collection_name))
continue
if not os.path.isdir(collection_path):
warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path))
continue
collection_found = True
try:
collection = Requirement.from_dir_path_as_unknown(
b_collection_path,
artifacts_manager,
)
except ValueError as val_err:
six.raise_from(AnsibleError(val_err), val_err)
if output_format in {'yaml', 'json'}:
collections_in_paths[collection_path] = {
collection.fqcn: {'version': collection.ver}
}
continue
fqcn_width, version_width = _get_collection_widths([collection])
_display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
_display_collection(collection, fqcn_width, version_width)
else:
# list all collections
collection_path = validate_collection_path(path)
if os.path.isdir(collection_path):
display.vvv("Searching {0} for collections".format(collection_path))
collections = list(find_existing_collections(
collection_path, artifacts_manager,
))
else:
# There was no 'ansible_collections/' directory in the path, so there
# or no collections here.
display.vvv("No 'ansible_collections' directory found at {0}".format(collection_path))
continue
if not collections:
display.vvv("No collections found at {0}".format(collection_path))
continue
if output_format in {'yaml', 'json'}:
collections_in_paths[collection_path] = {
collection.fqcn: {'version': collection.ver} for collection in collections
}
continue
# Display header
fqcn_width, version_width = _get_collection_widths(collections)
_display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
# Sort collections by the namespace and name
for collection in sorted(collections, key=to_text):
_display_collection(collection, fqcn_width, version_width)
# Do not warn if the specific collection was found in any of the search paths
if collection_found and collection_name:
warnings = []
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type']))
if output_format == 'json':
display.display(json.dumps(collections_in_paths))
elif output_format == 'yaml':
display.display(yaml_dump(collections_in_paths))
return 0
def execute_publish(self):
"""
Publish a collection into Ansible Galaxy. Requires the path to the collection tarball to publish.
"""
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['args'])
wait = context.CLIARGS['wait']
timeout = context.CLIARGS['import_timeout']
publish_collection(collection_path, self.api, wait, timeout)
def execute_search(self):
''' searches for roles on the Ansible Galaxy server'''
page_size = 1000
search = None
if context.CLIARGS['args']:
search = '+'.join(context.CLIARGS['args'])
if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'],
tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
github_user = to_text(context.CLIARGS['github_user'], errors='surrogate_or_strict')
github_repo = to_text(context.CLIARGS['github_repo'], errors='surrogate_or_strict')
if context.CLIARGS['check_status']:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo,
reference=context.CLIARGS['reference'],
role_name=context.CLIARGS['role_name'])
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not context.CLIARGS['wait']:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if context.CLIARGS['check_status'] or context.CLIARGS['wait']:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
if context.CLIARGS['setup_list']:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if context.CLIARGS['remove_id']:
# Remove a secret
self.api.remove_secret(context.CLIARGS['remove_id'])
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
source = context.CLIARGS['source']
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
secret = context.CLIARGS['secret']
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return True
|
abadger/ansible
|
lib/ansible/cli/galaxy.py
|
Python
|
gpl-3.0
| 80,811
|
[
"Galaxy"
] |
8ac2584b2a52cd4eba3dba91710dad0906eb868192a82a1843934bdebebdb5ff
|
#!/usr/bin/env python
########################################################################
# File : dirac-wms-get-wn
# Author : Philippe Charpentier
########################################################################
"""
Get WNs for a selection of jobs
Usage:
dirac-wms-get-wn [options] ... LFN|File
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import datetime
import DIRAC
import DIRAC.Core.Base.Script as Script
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
@DIRACScript()
def main():
site = 'BOINC.World.org'
status = ["Running"]
minorStatus = None
workerNodes = None
since = None
date = 'today'
full = False
until = None
batchIDs = None
Script.registerSwitch('', 'Site=', ' Select site (default: %s)' % site)
Script.registerSwitch('', 'Status=', ' Select status (default: %s)' % status)
Script.registerSwitch('', 'MinorStatus=', ' Select minor status')
Script.registerSwitch('', 'WorkerNode=', ' Select WN')
Script.registerSwitch('', 'BatchID=', ' Select batch jobID')
Script.registerSwitch('', 'Since=', ' Date since when to select jobs, or number of days (default: today)')
Script.registerSwitch('', 'Date=', ' Specify the date (check for a full day)')
Script.registerSwitch('', 'Full', ' Printout full list of job (default: False except if --WorkerNode)')
Script.parseCommandLine()
from DIRAC import gLogger
from DIRAC.Interfaces.API.Dirac import Dirac
from DIRAC.WorkloadManagementSystem.Client.JobMonitoringClient import JobMonitoringClient
switches = Script.getUnprocessedSwitches()
for switch in switches:
if switch[0] == 'Site':
site = switch[1]
elif switch[0] == 'MinorStatus':
minorStatus = switch[1]
elif switch[0] == 'Status':
if switch[1].lower() == 'all':
status = [None]
else:
status = switch[1].split(',')
elif switch[0] == 'WorkerNode':
workerNodes = switch[1].split(',')
elif switch[0] == 'BatchID':
try:
batchIDs = [int(id) for id in switch[1].split(',')]
except Exception:
gLogger.error('Invalid jobID', switch[1])
DIRAC.exit(1)
elif switch[0] == 'Full':
full = True
elif switch[0] == 'Date':
since = switch[1].split()[0]
until = str(datetime.datetime.strptime(since, '%Y-%m-%d') + datetime.timedelta(days=1)).split()[0]
elif switch[0] == 'Since':
date = switch[1].lower()
if date == 'today':
since = None
elif date == 'yesterday':
since = 1
elif date == 'ever':
since = 2 * 365
elif date.isdigit():
since = int(date)
date += ' days'
else:
since = date
if isinstance(since, int):
since = str(datetime.datetime.now() - datetime.timedelta(days=since)).split()[0]
if workerNodes or batchIDs:
# status = [None]
full = True
monitoring = JobMonitoringClient()
dirac = Dirac()
# Get jobs according to selection
jobs = set()
for stat in status:
res = dirac.selectJobs(site=site, date=since, status=stat, minorStatus=minorStatus)
if not res['OK']:
gLogger.error('Error selecting jobs', res['Message'])
DIRAC.exit(1)
allJobs = set(int(job) for job in res['Value'])
if until:
res = dirac.selectJobs(site=site, date=until, status=stat)
if not res['OK']:
gLogger.error('Error selecting jobs', res['Message'])
DIRAC.exit(1)
allJobs -= set(int(job) for job in res['Value'])
jobs.update(allJobs)
if not jobs:
gLogger.always('No jobs found...')
DIRAC.exit(0)
# res = monitoring.getJobsSummary( jobs )
# print eval( res['Value'] )[jobs[0]]
allJobs = set()
result = {}
wnJobs = {}
gLogger.always('%d jobs found' % len(jobs))
# Get host name
for job in jobs:
res = monitoring.getJobParameter(job, 'HostName')
node = res.get('Value', {}).get('HostName', 'Unknown')
res = monitoring.getJobParameter(job, 'LocalJobID')
batchID = res.get('Value', {}).get('LocalJobID', 'Unknown')
if workerNodes:
if not [wn for wn in workerNodes if node.startswith(wn)]:
continue
allJobs.add(job)
if batchIDs:
if batchID not in batchIDs:
continue
allJobs.add(job)
if full or status == [None]:
allJobs.add(job)
result.setdefault(job, {})['Status'] = status
result[job]['Node'] = node
result[job]['LocalJobID'] = batchID
wnJobs[node] = wnJobs.setdefault(node, 0) + 1
# If necessary get jobs' status
statusCounters = {}
if allJobs:
allJobs = sorted(allJobs, reverse=True)
res = monitoring.getJobsStates(allJobs)
if not res['OK']:
gLogger.error('Error getting job parameter', res['Message'])
else:
jobStates = res['Value']
for job in allJobs:
stat = jobStates.get(job, {}).get('Status', 'Unknown') + '; ' + \
jobStates.get(job, {}).get('MinorStatus', 'Unknown') + '; ' + \
jobStates.get(job, {}).get('ApplicationStatus', 'Unknown')
result[job]['Status'] = stat
statusCounters[stat] = statusCounters.setdefault(stat, 0) + 1
elif not workerNodes and not batchIDs:
allJobs = sorted(jobs, reverse=True)
# Print out result
if workerNodes or batchIDs:
gLogger.always('Found %d jobs at %s, WN %s (since %s):' % (len(allJobs), site, workerNodes, date))
if allJobs:
gLogger.always('List of jobs:', ','.join([str(job) for job in allJobs]))
else:
if status == [None]:
gLogger.always('Found %d jobs at %s (since %s):' % (len(allJobs), site, date))
for stat in sorted(statusCounters):
gLogger.always('%d jobs %s' % (statusCounters[stat], stat))
else:
gLogger.always('Found %d jobs %s at %s (since %s):' % (len(allJobs), status, site, date))
gLogger.always('List of WNs:', ','.join(['%s (%d)' % (node, wnJobs[node])
for node in sorted(wnJobs,
cmp=(lambda n1, n2: (wnJobs[n2] - wnJobs[n1])))]))
if full:
if workerNodes or batchIDs:
nodeJobs = {}
for job in allJobs:
status = result[job]['Status']
node = result[job]['Node'].split('.')[0]
jobID = result[job].get('LocalJobID')
nodeJobs.setdefault(node, []).append((jobID, job, status))
if not workerNodes:
workerNodes = sorted(nodeJobs)
for node in workerNodes:
for job in nodeJobs.get(node.split('.')[0], []):
gLogger.always('%s ' % node + '(%s): %s - %s' % job)
else:
for job in allJobs:
status = result[job]['Status']
node = result[job]['Node']
jobID = result[job].get('LocalJobID')
gLogger.always('%s (%s): %s - %s' % (node, jobID, job, status))
if __name__ == "__main__":
main()
|
yujikato/DIRAC
|
src/DIRAC/WorkloadManagementSystem/scripts/dirac_wms_get_wn.py
|
Python
|
gpl-3.0
| 6,911
|
[
"DIRAC"
] |
1b159a5bfd0054a33d5f45269fa51a29d845d582fd82f9f8ac6899c41e65f862
|
##
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing NEURON, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import os
import re
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.easyblocks.generic.pythonpackage import det_pylibdir
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import adjust_permissions
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_NEURON(ConfigureMake):
"""Support for building/installing NEURON."""
def __init__(self, *args, **kwargs):
"""Initialisation of custom class variables for NEURON."""
super(EB_NEURON, self).__init__(*args, **kwargs)
self.hostcpu = 'UNKNOWN'
self.with_python = False
self.pylibdir = 'UNKNOWN'
@staticmethod
def extra_options():
"""Custom easyconfig parameters for NEURON."""
extra_vars = {
'paranrn': [True, "Enable support for distributed simulations.", CUSTOM],
}
return ConfigureMake.extra_options(extra_vars)
def configure_step(self):
"""Custom configuration procedure for NEURON."""
# enable support for distributed simulations if desired
if self.cfg['paranrn']:
self.cfg.update('configopts', '--with-paranrn')
# specify path to InterViews if it is available as a dependency
interviews_root = get_software_root('InterViews')
if interviews_root:
self.cfg.update('configopts', "--with-iv=%s" % interviews_root)
else:
self.cfg.update('configopts', "--without-iv")
# optionally enable support for Python as alternative interpreter
python_root = get_software_root('Python')
if python_root:
self.with_python = True
self.cfg.update('configopts', "--with-nrnpython=%s/bin/python" % python_root)
# determine host CPU type
cmd = "./config.guess"
(out, ec) = run_cmd(cmd, simple=False)
self.hostcpu = out.split('\n')[0].split('-')[0]
self.log.debug("Determined host CPU type as %s" % self.hostcpu)
# determine Python lib dir
self.pylibdir = det_pylibdir()
# complete configuration with configure_method of parent
super(EB_NEURON, self).configure_step()
def install_step(self):
"""Custom install procedure for NEURON."""
super(EB_NEURON, self).install_step()
if self.with_python:
pypath = os.path.join('src', 'nrnpython')
try:
pwd = os.getcwd()
os.chdir(pypath)
except OSError, err:
raise EasyBuildError("Failed to change to %s: %s", pypath, err)
cmd = "python setup.py install --prefix=%s" % self.installdir
run_cmd(cmd, simple=True, log_all=True, log_ok=True)
try:
os.chdir(pwd)
except OSError, err:
raise EasyBuildError("Failed to change back to %s: %s", pwd, err)
def sanity_check_step(self):
"""Custom sanity check for NEURON."""
shlib_ext = get_shared_lib_ext()
binpath = os.path.join(self.hostcpu, 'bin')
libpath = os.path.join(self.hostcpu, 'lib', 'lib%s.' + shlib_ext)
custom_paths = {
'files': [os.path.join(binpath, x) for x in ["bbswork.sh", "hel2mos1.sh", "hoc_ed", "ivoc", "memacs",
"mkthreadsafe", "modlunit", "mos2nrn", "mos2nrn2.sh",
"neurondemo", "nocmodl", "oc"]] +
[os.path.join(binpath, "nrn%s" % x) for x in ["gui", "iv", "iv_makefile", "ivmodl",
"mech_makefile", "oc", "oc_makefile", "ocmodl"]] +
[libpath % x for x in ["ivoc", "ivos", "memacs", "meschach", "neuron_gnu", "nrniv", "nrnmpi",
"nrnoc", "nrnpython", "oc", "ocxt", "scopmath", "sparse13", "sundials"]],
'dirs': ['include/nrn', 'share/nrn'],
}
super(EB_NEURON, self).sanity_check_step(custom_paths=custom_paths)
try:
fake_mod_data = self.load_fake_module()
except EasyBuildError, err:
self.log.debug("Loading fake module failed: %s" % err)
# test NEURON demo
inp = '\n'.join([
"demo(3) // load the pyramidal cell model.",
"init() // initialise the model",
"t // should be zero",
"soma.v // will print -65",
"run() // run the simulation",
"t // should be 5, indicating that 5ms were simulated",
"soma.v // will print a value other than -65, indicating that the simulation was executed",
"quit()",
])
(out, ec) = run_cmd("neurondemo", simple=False, log_all=True, log_output=True, inp=inp)
validate_regexp = re.compile("^\s+-65\s*\n\s+5\s*\n\s+-68.134337", re.M)
if ec or not validate_regexp.search(out):
raise EasyBuildError("Validation of NEURON demo run failed.")
else:
self.log.info("Validation of NEURON demo OK!")
nproc = self.cfg['parallel']
try:
cwd = os.getcwd()
os.chdir(os.path.join(self.cfg['start_dir'], 'src', 'parallel'))
cmd = self.toolchain.mpi_cmd_for("nrniv -mpi test0.hoc", nproc)
(out, ec) = run_cmd(cmd, simple=False, log_all=True, log_output=True)
os.chdir(cwd)
except OSError, err:
raise EasyBuildError("Failed to run parallel hello world: %s", err)
valid = True
for i in range(0, nproc):
validate_regexp = re.compile("I am %d of %d" % (i, nproc))
if not validate_regexp.search(out):
valid = False
break
if ec or not valid:
raise EasyBuildError("Validation of parallel hello world run failed.")
else:
self.log.info("Parallel hello world OK!")
# cleanup
self.clean_up_fake_module(fake_mod_data)
def make_module_req_guess(self):
"""Custom guesses for environment variables (PATH, ...) for NEURON."""
guesses = super(EB_NEURON, self).make_module_req_guess()
guesses.update({
'PATH': [os.path.join(self.hostcpu, 'bin')],
})
if self.with_python:
guesses.update({
'PYTHONPATH': [self.pylibdir],
})
return guesses
def make_module_extra(self):
"""Define extra module entries required."""
txt = super(EB_NEURON, self).make_module_extra()
# we need to make sure the correct compiler is set in the environment,
# since NEURON features compilation at runtime
for var in ['CC', 'MPICH_CC']:
val = os.getenv(var)
if val:
txt += self.module_generator.set_environment(var, val)
self.log.debug("%s set to %s, adding it to module" % (var, val))
else:
self.log.debug("%s not set: %s" % (var, os.environ.get(var, None)))
return txt
|
valtandor/easybuild-easyblocks
|
easybuild/easyblocks/n/neuron.py
|
Python
|
gpl-2.0
| 8,594
|
[
"NEURON"
] |
68c85e9c91b8fae45106c22ac657ca384b6a7ea97fab077e697a1bbc2b85af40
|
from __future__ import division, absolute_import, print_function
import numpy as np
import scipy.sparse as ss
import scipy.sparse.csgraph as ssc
from scipy.linalg import solve
from collections import deque
from ..mini_six import range
class TransformMixin(object):
def kernelize(self, kernel):
'''Re-weight according to a specified kernel function.
kernel : str, {none, binary, rbf}
none -> no reweighting
binary -> all edges are given weight 1
rbf -> applies a gaussian function to edge weights
'''
if kernel == 'none':
return self
if kernel == 'binary':
if self.is_weighted():
return self._update_edges(1, copy=True)
return self
if kernel == 'rbf':
w = self.edge_weights()
r = np.exp(-w / w.std())
return self._update_edges(r, copy=True)
raise ValueError('Invalid kernel type: %r' % kernel)
def barycenter_edge_weights(self, X, copy=True, reg=1e-3):
'''Re-weight such that the sum of each vertex's edge weights is 1.
The resulting weighted graph is suitable for locally linear embedding.
reg : amount of regularization to keep the problem well-posed
'''
new_weights = []
for i, adj in enumerate(self.adj_list()):
C = X[adj] - X[i]
G = C.dot(C.T)
trace = np.trace(G)
r = reg * trace if trace > 0 else reg
G.flat[::G.shape[1] + 1] += r
w = solve(G, np.ones(G.shape[0]), sym_pos=True,
overwrite_a=True, overwrite_b=True)
w /= w.sum()
new_weights.extend(w.tolist())
return self.reweight(new_weights, copy=copy)
def connected_subgraphs(self, directed=True, ordered=False):
'''Generates connected components as subgraphs.
When ordered=True, subgraphs are ordered by number of vertices.
'''
num_ccs, labels = self.connected_components(directed=directed)
# check the trivial case first
if num_ccs == 1:
yield self
raise StopIteration
if ordered:
# sort by descending size (num vertices)
order = np.argsort(np.bincount(labels))[::-1]
else:
order = range(num_ccs)
# don't use self.subgraph() here, because we can reuse adj
adj = self.matrix('dense', 'csr', 'csc')
for c in order:
mask = labels == c
sub_adj = adj[mask][:,mask]
yield self.__class__.from_adj_matrix(sub_adj)
def shortest_path_subtree(self, start_idx, directed=True):
'''Returns a subgraph containing only the shortest paths from start_idx to
every other vertex.
'''
adj = self.matrix()
_, pred = ssc.dijkstra(adj, directed=directed, indices=start_idx,
return_predecessors=True)
adj = ssc.reconstruct_path(adj, pred, directed=directed)
if not directed:
adj = adj + adj.T
return self.__class__.from_adj_matrix(adj)
def minimum_spanning_subtree(self):
'''Returns the (undirected) minimum spanning tree subgraph.'''
dist = self.matrix('dense', copy=True)
dist[dist==0] = np.inf
np.fill_diagonal(dist, 0)
mst = ssc.minimum_spanning_tree(dist)
return self.__class__.from_adj_matrix(mst + mst.T)
def neighborhood_subgraph(self, start_idx, radius=1, weighted=True,
directed=True, return_mask=False):
'''Returns a subgraph containing only vertices within a given
geodesic radius of start_idx.'''
adj = self.matrix('dense', 'csr', 'csc')
dist = ssc.dijkstra(adj, directed=directed, indices=start_idx,
unweighted=(not weighted), limit=radius)
mask = np.isfinite(dist)
sub_adj = adj[mask][:,mask]
g = self.__class__.from_adj_matrix(sub_adj)
if return_mask:
return g, mask
return g
def isograph(self, min_weight=None):
'''Remove short-circuit edges using the Isograph algorithm.
min_weight : float, optional
Minimum weight of edges to consider removing. Defaults to max(MST).
From "Isograph: Neighbourhood Graph Construction Based On Geodesic Distance
For Semi-Supervised Learning" by Ghazvininejad et al., 2011.
Note: This uses the non-iterative algorithm which removes edges
rather than reweighting them.
'''
W = self.matrix('dense')
# get candidate edges: all edges - MST edges
tree = self.minimum_spanning_subtree()
candidates = np.argwhere((W - tree.matrix('dense')) > 0)
cand_weights = W[candidates[:,0], candidates[:,1]]
# order by increasing edge weight
order = np.argsort(cand_weights)
cand_weights = cand_weights[order]
# disregard edges shorter than a threshold
if min_weight is None:
min_weight = tree.edge_weights().max()
idx = np.searchsorted(cand_weights, min_weight)
cand_weights = cand_weights[idx:]
candidates = candidates[order[idx:]]
# check each candidate edge
to_remove = np.zeros_like(cand_weights, dtype=bool)
for i, (u,v) in enumerate(candidates):
W_uv = np.where(W < cand_weights[i], W, 0)
len_uv = ssc.dijkstra(W_uv, indices=u, unweighted=True, limit=2)[v]
if len_uv > 2:
to_remove[i] = True
ii, jj = candidates[to_remove].T
return self.remove_edges(ii, jj, copy=True)
def circle_tear(self, spanning_tree='mst', cycle_len_thresh=5, spt_idx=None,
copy=True):
'''Circular graph tearing.
spanning_tree: one of {'mst', 'spt'}
cycle_len_thresh: int, length of longest allowable cycle
spt_idx: int, start vertex for shortest_path_subtree, random if None
From "How to project 'circular' manifolds using geodesic distances?"
by Lee & Verleysen, ESANN 2004.
See also: shortest_path_subtree, minimum_spanning_subtree
'''
# make the initial spanning tree graph
if spanning_tree == 'mst':
tree = self.minimum_spanning_subtree().matrix()
elif spanning_tree == 'spt':
if spt_idx is None:
spt_idx = np.random.choice(self.num_vertices())
tree = self.shortest_path_subtree(spt_idx, directed=False).matrix()
# find edges in self but not in the tree
potential_edges = np.argwhere(ss.triu(self.matrix() - tree))
# remove edges that induce large cycles
ii, jj = _find_cycle_inducers(tree, potential_edges, cycle_len_thresh)
return self.remove_edges(ii, jj, symmetric=True, copy=copy)
def cycle_cut(self, cycle_len_thresh=12, directed=False, copy=True):
'''CycleCut algorithm: removes bottleneck edges.
Paper DOI: 10.1.1.225.5335
'''
symmetric = not directed
adj = self.kernelize('binary').matrix('csr', 'dense', copy=True)
if symmetric:
adj = adj + adj.T
removed_edges = []
while True:
c = _atomic_cycle(adj, cycle_len_thresh, directed=directed)
if c is None:
break
# remove edges in the cycle
ii, jj = c.T
adj[ii,jj] = 0
if symmetric:
adj[jj,ii] = 0
removed_edges.extend(c)
#XXX: if _atomic_cycle changes, may need to do this on each loop
if ss.issparse(adj):
adj.eliminate_zeros()
# select only the necessary cuts
ii, jj = _find_cycle_inducers(adj, removed_edges, cycle_len_thresh,
directed=directed)
# remove the bad edges
return self.remove_edges(ii, jj, symmetric=symmetric, copy=copy)
def _atomic_cycle(adj, length_thresh, directed=False):
# TODO: make this more efficient
start_vertex = np.random.choice(adj.shape[0])
# run BFS
q = deque([start_vertex])
visited_vertices = set([start_vertex])
visited_edges = set()
while q:
a = q.popleft()
nbrs = adj[a].nonzero()[-1]
for b in nbrs:
if b not in visited_vertices:
q.append(b)
visited_vertices.add(b)
visited_edges.add((a,b))
if not directed:
visited_edges.add((b,a))
continue
# run an inner BFS
inner_q = deque([b])
inner_visited = set([b])
parent_vertices = {b: -1}
while inner_q:
c = inner_q.popleft()
inner_nbrs = adj[c].nonzero()[-1]
for d in inner_nbrs:
if d in inner_visited or (d,c) not in visited_edges:
continue
parent_vertices[d] = c
inner_q.append(d)
inner_visited.add(d)
if d != a:
continue
# atomic cycle found
cycle = []
while parent_vertices[d] != -1:
x, d = d, parent_vertices[d]
cycle.append((x, d))
cycle.append((d, a))
if len(cycle) >= length_thresh:
return np.array(cycle)
else:
# abort the inner BFS
inner_q.clear()
break
# finished considering edge a->b
visited_edges.add((a,b))
if not directed:
visited_edges.add((b,a))
# no cycles found
return None
def _find_cycle_inducers(adj, potential_edges, length_thresh, directed=False):
# remove edges that induce large cycles
path_dist = ssc.dijkstra(adj, directed=directed, return_predecessors=False,
unweighted=True)
remove_ii, remove_jj = [], []
for i,j in potential_edges:
if length_thresh < path_dist[i,j] < np.inf:
remove_ii.append(i)
remove_jj.append(j)
else:
# keeping this edge: update path lengths
tmp = (path_dist[:,i] + 1)[:,None] + path_dist[j,:]
ii, jj = np.nonzero(tmp < path_dist)
new_lengths = tmp[ii, jj]
path_dist[ii,jj] = new_lengths
if not directed:
path_dist[jj,ii] = new_lengths
return remove_ii, remove_jj
|
all-umass/graphs
|
graphs/mixins/transformation.py
|
Python
|
mit
| 9,542
|
[
"Gaussian"
] |
36a9643692fb3b35ecf53c7fe37c840c56832d795266c71ad2cebe820b7e3937
|
# python3
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train a simple TF classifier for MNIST dataset.
This example comes from the cloudml-samples keras demo.
github.com/GoogleCloudPlatform/cloudml-samples/blob/master/census/tf-keras
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import urllib
import tempfile
import numpy as np
import pandas as pd
import tensorflow.compat.v1 as tf
DATA_DIR = os.path.join(tempfile.gettempdir(), "taxi_data")
DATA_URL = ("https://storage.googleapis.com/cloud-samples-data/ml-engine/chicago_taxi/training/small/")
TRAINING_FILE = "taxi_trips_train.csv"
EVAL_FILE = "taxi_trips_eval.csv"
TRAINING_URL = os.path.join(DATA_URL, TRAINING_FILE)
EVAL_URL = os.path.join(DATA_URL, EVAL_FILE)
_CSV_COLUMNS = [
"tip", "trip_miles", "trip_seconds", "fare", "trip_start_month",
"trip_start_hour", "trip_start_day", "pickup_community_area", "dropoff_community_area",
"pickup_census_tract", "dropoff_census_tract", "pickup_latitude", "pickup_longitude",
"dropoff_latitude", "dropoff_longitude", "payment_type", "company",
]
_LABEL_COLUMN = "tip"
_CATEGORICAL_TYPES = {
"payment_type": pd.api.types.CategoricalDtype(categories=[
'No Charge', 'Credit Card', 'Cash', 'Unknown', 'Dispute'
]),
"company": pd.api.types.CategoricalDtype(categories=[
'Northwest Management LLC', 'Blue Ribbon Taxi Association Inc.',
'Taxi Affiliation Services', 'Dispatch Taxi Affiliation',
'Top Cab Affiliation', 'Choice Taxi Association', '5129 - 87128',
'KOAM Taxi Association', 'Chicago Medallion Leasing INC',
'Chicago Medallion Management', '3201 - C&D Cab Co Inc',
'1247 - 72807 Daniel Ayertey', '5776 - Mekonen Cab Company',
'2092 - 61288 Sbeih company', '0694 - 59280 Chinesco Trans Inc',
'4197 - Royal Star', 'C & D Cab Co Inc', '3591 - 63480 Chuks Cab',
'4053 - Adwar H. Nikola', '3141 - Zip Cab',
'6742 - 83735 Tasha ride inc', '0118 - 42111 Godfrey S.Awir',
'3385 - Eman Cab', '4053 - 40193 Adwar H. Nikola',
'3152 - 97284 Crystal Abernathy', '2823 - 73307 Seung Lee',
'6574 - Babylon Express Inc.', '5724 - 75306 KYVI Cab Inc',
'5074 - 54002 Ahzmi Inc', '2733 - 74600 Benny Jona',
'3253 - 91138 Gaither Cab Co.', '3152 - Crystal Abernathy',
'5437 - Great American Cab Co', '1085 - N and W Cab Co',
'6488 - 83287 Zuha Taxi', '2192 - 73487 Zeymane Corp',
'0118 - Godfrey S.Awir', '4197 - 41842 Royal Star',
'3319 - C&D Cab Company', '4787 - Reny Cab Co',
'1085 - 72312 N and W Cab Co', "3591- 63480 Chuk's Cab",
'6743 - 78771 Luhak Corp', '3623-Arrington Enterprises',
'3623 - 72222 Arrington Enterprises', '3141 - 87803 Zip Cab',
'5074 - Ahzmi Inc', '3897 - Ilie Malec', '2092 - Sbeih company',
'6057 - 24657 Richard Addo', '5006 - 39261 Salifu Bawa',
'3620 - David K. Cab Corp.', '3556 - 36214 RC Andrews Cab',
'2733 - Benny Jona', '4615 - 83503 Tyrone Henderson',
'5129 - 98755 Mengisti Taxi', '5724 - 72965 KYVI Cab Inc',
'585 - 88805 Valley Cab Co', '5997 - 65283 AW Services Inc.',
'2809 - 95474 C & D Cab Co Inc.', '6743 - Luhak Corp',
'5874 - 73628 Sergey Cab Corp.', '3897 - 57856 Ilie Malec',
'3319 - CD Cab Co', '6747 - Mueen Abdalla']),
}
def _download_and_clean_file(filename, url):
"""Downloads data from url, and makes changes to match the CSV format.
The CSVs may use spaces after the comma delimters (non-standard) or include
rows which do not represent well-formed examples. This function strips out
some of these problems.
Args:
filename: filename to save url to
url: URL of resource to download
"""
temp_file, _ = urllib.request.urlretrieve(url)
with tf.io.gfile.GFile(temp_file, "r") as temp_file_object:
with tf.io.gfile.GFile(filename, "w") as file_object:
for line in temp_file_object:
line = line.strip()
line = line.replace(", ", ",")
if not line or "," not in line:
continue
if line[-1] == ".":
line = line[:-1]
line += "\n"
file_object.write(line)
tf.io.gfile.remove(temp_file)
def download(data_dir):
"""Downloads census data if it is not already present.
Args:
data_dir: directory where we will access/save the census data
Returns:
foo
"""
tf.io.gfile.makedirs(data_dir)
training_file_path = os.path.join(data_dir, TRAINING_FILE)
if not tf.io.gfile.exists(training_file_path):
_download_and_clean_file(training_file_path, TRAINING_URL)
eval_file_path = os.path.join(data_dir, EVAL_FILE)
if not tf.io.gfile.exists(eval_file_path):
_download_and_clean_file(eval_file_path, EVAL_URL)
return training_file_path, eval_file_path
def upload(train_df, eval_df, train_path, eval_path):
train_df.to_csv(os.path.join(os.path.dirname(train_path), TRAINING_FILE),
index=False, header=False)
eval_df.to_csv(os.path.join(os.path.dirname(eval_path), EVAL_FILE),
index=False, header=False)
def preprocess(dataframe):
"""Converts categorical features to numeric. Removes unused columns.
Args:
dataframe: Pandas dataframe with raw data
Returns:
Dataframe with preprocessed data
"""
# Convert integer valued (numeric) columns to floating point
numeric_columns = dataframe.select_dtypes(["int64"]).columns
dataframe[numeric_columns] = dataframe[numeric_columns].astype("float32")
# Convert categorical columns to numeric
cat_columns = dataframe.select_dtypes(["object"]).columns
dataframe[cat_columns] = dataframe[cat_columns].apply(
lambda x: x.astype(_CATEGORICAL_TYPES[x.name]))
dataframe[cat_columns] = dataframe[cat_columns].apply(
lambda x: x.cat.codes)
return dataframe
def standardize(dataframe):
"""Scales numerical columns using their means and standard deviation.
Args:
dataframe: Pandas dataframe
Returns:
Input dataframe with the numerical columns scaled to z-scores
"""
dtypes = list(zip(dataframe.dtypes.index, map(str, dataframe.dtypes)))
for column, dtype in dtypes:
if dtype == "float32":
dataframe[column] -= dataframe[column].mean()
dataframe[column] /= dataframe[column].std()
return dataframe
def load_data(train_path="", eval_path=""):
"""Loads data into preprocessed (train_x, train_y, eval_y, eval_y) dataframes.
Args:
train_path: Local or GCS path to uploaded train data to.
eval_path: Local or GCS path to uploaded eval data to.
Returns:
A tuple (train_x, train_y, eval_x, eval_y), where train_x and eval_x are
Pandas dataframes with features for training and train_y and eval_y are
numpy arrays with the corresponding labels.
"""
# Download Census dataset: Training and eval csv files.
training_file_path, eval_file_path = download(DATA_DIR)
train_df = pd.read_csv(training_file_path)
eval_df = pd.read_csv(eval_file_path)
train_df = preprocess(train_df)
eval_df = preprocess(eval_df)
# Split train and eval data with labels. The pop method copies and removes
# the label column from the dataframe.
train_x, train_y = train_df, train_df.pop(_LABEL_COLUMN)
eval_x, eval_y = eval_df, eval_df.pop(_LABEL_COLUMN)
# Join train_x and eval_x to normalize on overall means and standard
# deviations. Then separate them again.
all_x = pd.concat([train_x, eval_x], keys=["train", "eval"])
all_x = standardize(all_x)
train_x, eval_x = all_x.xs("train"), all_x.xs("eval")
# Rejoin features and labels and upload to GCS.
if train_path and eval_path:
train_df = train_x.copy()
train_df[_LABEL_COLUMN] = train_y
eval_df = eval_x.copy()
eval_df[_LABEL_COLUMN] = eval_y
upload(train_df, eval_df, train_path, eval_path)
# Reshape label columns for use with tf.data.Dataset
train_y = np.asarray(train_y).astype("float32").reshape((-1, 1))
eval_y = np.asarray(eval_y).astype("float32").reshape((-1, 1))
return train_x, train_y, eval_x, eval_y
|
GoogleCloudPlatform/ml-pipeline-generator-python
|
examples/taxi/xgb/model/taxi_preprocess.py
|
Python
|
apache-2.0
| 8,947
|
[
"CRYSTAL"
] |
4e7a005c867dc7cc5b785b5f8a0b2b9f80b505207b1d4d1c96d0dde01ccf6ccd
|
import numpy as np
from signal_likelihood import SignalLikelihood
import unittest
from numpy.testing import assert_array_almost_equal,assert_almost_equal, assert_equal
import math
"""
Models the ambient audio scenery with multiple, independent
Gaussian distributions. Based on that model we can distinguish
between the ambient sounds and sounds that are
unlikely to occur naturally.
This model requires the assumption that the amplitudes
of frequencies are independent. Most likely we will need
to use a model that allows for correlations (multivariate
gaussian). For now, this is the simplest solution to the
problem.
Under the assumption of independence, we model each frequency
amplitude with a gaussian. We just need to save the mean
and variance of each frequency amplitude indepdently.
To test a signal, we calculate the probability of each of the
tested signal's frequency amplitude. Their product (independence)
will be our meassure of the overall probability of hte signal
being ambient noise.
"""
class Gaussian(SignalLikelihood):
def __init__(self):
self.mean = None
self.var = None
self.sumSquareDif = None
self.n = 0
def train(self, features):
"""
Updates the mean and variance of the gaussian model capturing the
ambient sound scenery.
"""
if self.mean is None:
# no previous mean or variance exist
self.mean = features
# we need a zero vector with the size of the feature vector
self.sumSquareDif = np.zeros_like(features)
self.var = np.zeros_like(features)
self.n = 1
else:
# previous mean is old_sum / old_n => new_sum = (old_sum * old_n) + new values
old_mean = self.mean
old_sum = old_mean * self.n
new_sum = old_sum + features
self.n = self.n + 1
self.mean = new_sum / self.n
# vectorizaed adaption of Knuth's online variance algorithm
# the original algorithm can be found here:
# Donald E. Knuth (1998). The Art of Computer Programming, volume 2:
# Seminumerical Algorithms, 3rd edn., p. 232. Boston: Addison-Wesley.
# update sum of square differences
self.sumSquareDif = self.sumSquareDif + (features - old_mean) * (features - self.mean)
# update variance
self.var = self.sumSquareDif / (self.n - 1)
def calculate_prob(self, features):
"""
Calculates the probability that the signal described by the
features is an ambient sound.
"""
if np.any(self.var == 0):
return 0
# this is a vectorized version of the pdf of a normal distribution for each frequency amplitude
# it returns one probability for each of the signal's frequency amplitudes
probs = np.exp(-(features-self.mean)**2/(2.*self.var**2)) / (math.sqrt(math.pi * 2.) * self.var)
# simplificaiton: assumption of independent frequencies => product
return np.prod(probs)
class GaussianTests(unittest.TestCase):
def train(self, data):
gaussian = Gaussian()
for datum in data:
gaussian.train(datum)
return gaussian
def checkMean(self, data, expectedMean):
gaussian = self.train(data)
assert_almost_equal(gaussian.mean, expectedMean)
def checkVariance(self, data, exptectedVar):
gaussian = self.train(data)
assert_almost_equal(gaussian.var, exptectedVar)
def test_mean_for_one_feature(self):
data = [np.array([0.]), np.array([6.]), np.array([10.]), np.array([8.])]
expectedMean = np.array([6.])
self.checkMean(data, expectedMean)
def test_mean_for_multiple_features(self):
data = [np.array([0., 3.]), np.array([6., 8.]), np.array([10., 4.]), np.array([8., 7.])]
expectedMean = np.array([6., 5.5])
self.checkMean(data, expectedMean)
def test_variance_for_one_feature(self):
data = [np.array([1.]), np.array([0.]), np.array([2.]), np.array([1.]), np.array([0.])]
expectedVariance = np.array([0.7])
self.checkVariance(data, expectedVariance)
def test_variance_for_one_feature(self):
data = [np.array([1., 0.]), np.array([0., 2.]), np.array([2., 1.]), np.array([1., 0.]), np.array([0., 1.])]
expectedVariance = np.array([0.7, 0.7])
self.checkVariance(data, expectedVariance)
def test_probability_calculation(self):
gaussian = Gaussian()
gaussian.mean = np.array([5., 3.])
gaussian.var = np.array([2., 1.])
x = np.array([4.,4.])
expected = 0.0426
actual = gaussian.calculate_prob(x)
assert_almost_equal(actual,expected, decimal=4)
|
rfcx/defunct
|
sound-localization/localization/gaussian.py
|
Python
|
apache-2.0
| 4,871
|
[
"Gaussian"
] |
474c5cfadb85ce0f381f62396f4590011fd6b42fa4c29642d8df84e4844216ff
|
"""
.. moduleauthor:: Johan Comparat <johan.comparat__at__gmail.com>
General purpose:
................
The class ModelSdssSpectra is dedicated to modelling and extracting information from stacks of spectra.
"""
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as p
import os
import astropy.cosmology as co
cosmo=co.Planck13 #co.FlatLambdaCDM(H0=70,Om0=0.3)
import astropy.units as u
import astropy.io.fits as fits
import numpy as n
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
from scipy.stats import scoreatpercentile
import astropy.io.fits as fits
from lineListVac import *
allLinesList = n.array([ [Ne3,Ne3_3869,"Ne3_3869","left"], [Ne3,Ne3_3968,"Ne3_3968","left"], [O3,O3_4363,"O3_4363","right"], [O3,O3_4960,"O3_4960","left"], [O3,O3_5007,"O3_5007","right"], [N2,N2_6549,"N2_6549","left"], [N2,N2_6585,"N2_6585","right"], [H1,H1_3970,"H1_3970","right"], [H1,H1_4102,"H1_4102","right"], [H1,H1_4341,"H1_4341","right"], [H1,H1_4862,"H1_4862","left"], [H1,H1_6564,"H1_6564","left"]])
# other lines that are optional
# , [S2,S2_6718,"S2_6718","left"], [S2,S2_6732,"S2_6732","right"], [Ar3,Ar3_7137,"Ar3_7137","left"], [H1,H1_1216,"H1_1216","right"]
doubletList = n.array([[O2_3727,"O2_3727",O2_3729,"O2_3729",O2_mean]])
# import the fitting routines
import LineFittingLibrary as lineFit
#O2a=3727.092
#O2b=3729.875
#O2=(O2a+O2b)/2.
#Hg=4102.892
#Hd=4341.684
#Hb=4862.683
#O3a=4960.295
#O3b=5008.240
#Ha=6564.61
fnu = lambda mAB : 10**(-(mAB+48.6)/2.5) # erg/cm2/s/Hz
flambda= lambda mAB, ll : 10**10 * c*1000 * fnu(mAB) / ll**2. # erg/cm2/s/A
kla=lambda ll :2.659 *(-2.156+1.509/ll-0.198/ll**2+0.011/ll**3 ) + 4.05
klb=lambda ll :2.659 *(-1.857+1.040/ll)+4.05
def kl(ll):
"""Calzetti extinction law"""
if ll>6300:
return klb(ll)
if ll<=6300:
return kla(ll)
class ModelSpectraStacks:
"""
This class fits the emission lines on the continuum-subtracted stack.
:param spec_file: fits file generated with a LF in a luminosity bin.
:param cosmo: cosmology class from astropy
:param sdss_min_wavelength: minimum wavelength considered by firefly (default : 1000)
:param sdss_max_wavelength: minimum wavelength considered by firefly (default : 7500)
:param dV: default value that hold the place (default : -9999.99)
"""
def __init__(self, spec_file, mode="MILES", cosmo=cosmo, sdss_min_wavelength= 1000., sdss_max_wavelength=7500., dV=-9999.99, version="stellarpop-m11-salpeter"):
self.mode = mode
self.spec_file = spec_file
self.spec_file_base = os.path.basename(spec_file)
spl = self.spec_file_base.split('.')[0].split('-')
self.plate = spl[1]
self.mjd = spl[2]
self.fiber = spl[3]
self.spec_model_file = os.path.join( os.environ['SDSSDR12_DIR'], version, "stellarpop", self.plate, self.spec_file_base[:-5] + "-SPM-MILES.fits")
outPutDir = os.path.join( os.environ['SDSSDR12_DIR'], version, "model", self.plate)
if os.path.isdir(outPutDir)==False:
os.mkdir(outPutDir)
self.outFile = os.path.join( outPutDir, self.spec_file_base[:-5] + ".model")
self.cosmo = cosmo
self.sdss_max_wavelength = sdss_max_wavelength
self.sdss_min_wavelength = sdss_min_wavelength
self.dV = dV
self.side = ''
hdus = fits.open(self.spec_file)
self.hdR = hdus[0].header
self.hdu1 = hdus[1]
self.z = hdus[2].data['Z'][0]
print "Loads the data"
self.wl = 10**self.hdu1.data['loglam']
self.fl = self.hdu1.data['flux']
self.flErr = self.hdu1.data['ivar']**(-0.5)
self.stack=interp1d(self.wl,self.fl)
self.stackErr=interp1d(self.wl,self.flErr)
print "loads model"
hdus = fits.open(self.spec_model_file)
self.hdu2 = hdus[1]
self.wlModel,self.flModel = self.hdu2.data['wavelength'], self.hdu2.data['firefly_model']*10**(-17)
self.model=interp1d(n.hstack((self.wlModel,[n.max(self.wlModel)+10,11000])), n.hstack(( self.flModel, [n.median(self.flModel[:-20]),n.median(self.flModel[:-20])] )) )
# wavelength range common to the stack and the model :
self.wlLineSpectrum = n.arange(n.max([self.stack.x.min(),self.model.x.min()]), n.min([self.stack.x.max(),self.model.x.max()]), 0.5)[2:-1]
self.flLineSpectrum=n.array([self.stack(xx)-self.model(xx) for xx in self.wlLineSpectrum])
self.fl_frac_LineSpectrum=n.array([self.stack(xx)/self.model(xx) for xx in self.wlLineSpectrum])
self.flErrLineSpectrum=self.stackErr(self.wlLineSpectrum)
def interpolate_stack(self):
"""
Divides the measured stack in overlapping and non-overlapping parts with the model.
"""
self.stack=interp1d(self.wl,self.fl)
self.stackErr=interp1d(self.wl,self.flErr)
# bluer than model
self.stBlue = (self.wl<=self.sdss_min_wavelength)
# optical
self.stOpt = (self.wl<self.sdss_max_wavelength)& (self.wl> self.sdss_min_wavelength)
# redder than model
self.stRed = (self.wl>=self.sdss_max_wavelength)
if len(self.wl)<50 :
print "no data, skips spectrum"
return 0.
if len(self.wl[self.stBlue])>0:
self.contBlue=n.median(self.fl[self.stBlue])
self.side='blue'
if len(self.wl[self.stRed])>0:
self.contRed=n.median(self.fl[self.stRed])
self.side='red'
if len(self.wl[self.stRed])>0 and len(self.wl[self.stBlue])>0:
self.contRed=n.median(self.fl[self.stRed])
self.contBlue=n.median(self.fl[self.stBlue])
self.side='both'
if len(self.wl[self.stRed])==0 and len(self.wl[self.stBlue])==0:
self.side='none'
def interpolate_model(self):
"""
Interpolates the model to an array with the same coverage as the stack.
"""
# overlap region with stack
print "interpolate model"
self.mdOK =(self.wlModel>n.min(self.wl))&(self.wlModel<n.max(self.wl))
mdBlue=(self.wlModel<=n.min(self.wl)) # bluer part than data
mdRed=(self.wlModel>=n.max(self.wl)) # redder part than data
okRed=(self.wlModel>4650)&(self.wlModel<self.sdss_max_wavelength)
# Correction model => stack
CORRection=n.sum((self.wl[self.stOpt][1:]-self.wl[self.stOpt][:-1])* self.fl[self.stOpt][1:]) / n.sum((self.wlModel[ self.mdOK ][1:]-self.wlModel[ self.mdOK ][:-1])* self.flModel [ self.mdOK ][1:])
print "Correction", CORRection
if self.side=='red':
self.model=interp1d(n.hstack((self.wlModel[ self.mdOK ],n.arange(self.wlModel[ self.mdOK ].max()+0.5, stack.x.max(), 0.5))), n.hstack(( self.flModel [ self.mdOK ]*CORRection, n.ones_like(n.arange( self.wlModel[ self.mdOK ].max() + 0.5, stack.x.max(), 0.5))*contRed )) )
elif self.side=='blue':
self.model=interp1d(n.hstack((n.arange(stack.x.min(),self.wlModel[ self.mdOK ].min()-1., 0.5),self.wlModel[ self.mdOK ])),n.hstack(( n.ones_like(n.arange(stack.x.min() ,self.wlModel[ self.mdOK ].min() -1.,0.5))* contBlue, self.flModel [ self.mdOK ]*CORRection )) )
elif self.side=='both':
x1=n.hstack((n.arange(stack.x.min(),self.wlModel[ self.mdOK ].min()-1., 0.5), self.wlModel[ self.mdOK ]))
y1=n.hstack(( n.ones_like(n.arange(stack.x.min(),self.wlModel[ self.mdOK ].min()- 1.,0.5))*contBlue, self.flModel [ self.mdOK ]*CORRection ))
x2=n.hstack((x1,n.arange(self.wlModel[ self.mdOK ].max()+0.5,stack.x.max(),0.5)))
y2=n.hstack((y1,n.ones_like(n.arange(self.wlModel[ self.mdOK ].max()+0.5, stack.x.max(), 0.5))*contRed ))
self.model=interp1d(x2,y2)
elif self.side=='none':
self.model=interp1d(self.wlModel[ self.mdOK ], self.flModel [ self.mdOK ])
def subtract_continuum_model(self):
"""
Creates the continuum substracted spectrum: the 'line' spectrum.
"""
self.interpolate_stack()
self.interpolate_model()
# wavelength range common to the stack and the model :
self.wlLineSpectrum = n.arange(n.max([self.stack.x.min(),self.model.x.min()]), n.min([self.stack.x.max(),self.model.x.max()]), 0.5)[2:-1]
print "range probed", self.wlLineSpectrum[0], self.wlLineSpectrum[-1], len( self.wlLineSpectrum)
self.flLineSpectrum=n.array([self.stack(xx)-self.model(xx) for xx in self.wlLineSpectrum])
self.flErrLineSpectrum=self.stackErr(self.wlLineSpectrum)
def fit_lines_to_lineSpectrum(self):
"""
Fits the emission lines on the line spectrum.
"""
# interpolates the mean spectra.
print "fits to the line spectrum"
lfit = lineFit.LineFittingLibrary()
#self.subtract_continuum_model()
data,h=[],[]
print O2_3727
dat_mean,mI,hI=lfit.fit_Line_OIIdoublet_position(self.wlLineSpectrum, self.flLineSpectrum, self.flErrLineSpectrum, a0= O2_3727*(1 + self.z) , lineName="O2_3728", p0_sigma=7,model="gaussian",fitWidth = 20.,DLC=10.)
data.append(dat_mean)
h.append(hI)
for li in allLinesList :
# measure line properties from the mean weighted stack
print li[2]
dat_mean,mI,hI=lfit.fit_Line_position_C0noise(self.wlLineSpectrum, self.flLineSpectrum, self.flErrLineSpectrum, li[1]*(1 + self.z), lineName=li[2], continuumSide=li[3], model="gaussian", p0_sigma=7,fitWidth = 15.,DLC=10.)
data.append(dat_mean)
h.append(hI)
heading="".join(h)
out=n.hstack((data))
#print "out", out
out[n.isnan(out)]=n.ones_like(out[n.isnan(out)])*self.dV
#output = n.array([ out ])
#print "----------------", output.T[0], output.T[1], output
colNames = heading.split()
#print colNames
col0 = fits.Column(name=colNames[0],format='D', array= n.array([out.T[0]]))
col1 = fits.Column(name=colNames[1],format='D', array= n.array([out.T[1]]))
self.lineSpec_cols = fits.ColDefs([col0, col1])
#print self.lineSpec_cols
#print colNames
for ll in range(2,len(colNames),1):
#self.hdR["HIERARCH "+colNames[ll]+"_nc"] = out.T[ll]
self.lineSpec_cols += fits.Column(name=colNames[ll], format='D', array= n.array([out.T[ll]]) )
#print self.lineSpec_cols
self.lineSpec_tb_hdu = fits.BinTableHDU.from_columns(self.lineSpec_cols)
def fit_lines_to_fullSpectrum(self):
"""
Fits the emission lines on the line spectrum.
"""
# interpolates the mean spectra.
print "fits to full spectrum"
lfit = lineFit.LineFittingLibrary()
data,h=[],[]
print O2_3727
dat_mean,mI,hI=lfit.fit_Line_OIIdoublet_position(self.wl, self.fl, self.flErr, a0= O2_3727*(1 + self.z) , lineName="O2_3728", p0_sigma=7,model="gaussian",fitWidth = 20.,DLC=10.)
print hI, dat_mean
data.append(dat_mean)
h.append(hI)
for li in allLinesList :
print li[2]
# measure line properties from the mean weighted stack
dat_mean,mI,hI=lfit.fit_Line_position_C0noise(self.wl, self.fl, self.flErr, li[1]*(1 + self.z), lineName=li[2], continuumSide=li[3], model="gaussian", p0_sigma=7,fitWidth = 15.,DLC=10.)
data.append(dat_mean)
#print li[2], dat_mean
h.append(hI)
heading="".join(h)
out=n.hstack((data))
out[n.isnan(out)]=n.ones_like(out[n.isnan(out)])*self.dV
#output = n.array([ out ])
#print "----------------", output.T[0], output.T[1], output
colNames = heading.split()
#print colNames
col0 = fits.Column(name=colNames[0],format='D', array= n.array([out.T[0]]))
col1 = fits.Column(name=colNames[1],format='D', array= n.array([out.T[1]]))
self.fullSpec_cols = fits.ColDefs([col0, col1])
#print colNames
for ll in range(2,len(colNames),1):
#self.hdR["HIERARCH "+colNames[ll]+"_nc"] = out.T[ll]
self.fullSpec_cols += fits.Column(name=colNames[ll], format='D', array= n.array([out.T[ll]]) )
self.fullSpec_tb_hdu = fits.BinTableHDU.from_columns(self.fullSpec_cols)
def save_spectrum(self):
"""
Saves the stack spectrum, the model and derived quantities in a single fits file with different hdus.
"""
wavelength = fits.Column(name="wavelength",format="D", unit="Angstrom", array= self.wlLineSpectrum)
flux = fits.Column(name="flux",format="D", unit="Angstrom", array= self.flLineSpectrum)
fluxErr = fits.Column(name="fluxErr",format="D", unit="Angstrom", array= self.flErrLineSpectrum)
# new columns
cols = fits.ColDefs([wavelength, flux, fluxErr])
lineSptbhdu = fits.BinTableHDU.from_columns(cols)
# previous file
prihdu = fits.PrimaryHDU(header=self.hdR)
thdulist = fits.HDUList([prihdu, self.hdu1, self.hdu2, lineSptbhdu, self.lineSpec_tb_hdu, self.fullSpec_tb_hdu])
if os.path.isfile(self.outFile):
os.remove(self.outFile)
thdulist.writeto(self.outFile)
|
JohanComparat/pySU
|
galaxy/python/ModelSdssSpectra.py
|
Python
|
cc0-1.0
| 12,042
|
[
"Firefly",
"Gaussian"
] |
17b7adbaf9ca2784a20b71ab16da237c5075194955ad883399191a9f35f9aec2
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
""" run_sim.py - an example python-meep simulation of a dielectric sphere scattering a broadband impulse,
illustrating the use of the convenient functions provided by meep_utils.py
(c) 2014 Filip Dominec, see http://fzu.cz/~dominecf/meep/ for more information """
import numpy as np
import time, sys, os
import meep_utils, meep_materials
from meep_utils import in_sphere, in_xcyl, in_ycyl, in_zcyl, in_xslab, in_yslab, in_zslab, in_ellipsoid
import meep_mpi as meep
#import meep
c = 2.997e8
sim_param, model_param = meep_utils.process_param(sys.argv[1:])
class spdc_model(meep_utils.AbstractMeepModel): #{{{
def __init__(self, comment="", simtime=15e-12, resolution=3e-6, size_x=1350e-6, size_y=1350e-6, size_z=0,
wgwidth=10e-6, wgheight=20e-6, monzd=180e-6):
meep_utils.AbstractMeepModel.__init__(self) ## Base class initialisation
self.simulation_name = "SPDC"
monzd=size_z
self.register_locals(locals()) ## Remember the parameters
## Constants for the simulation
substrate_z = size_x / 3
self.pml_thickness = 10e-6
self.monitor_z1, self.monitor_z2 = (-(monzd)/2, (monzd)/2)
self.simtime = simtime # [s]
self.srcFreq, self.srcWidth = 5000e9, 5000e9 # [Hz] (note: gaussian source ends at t=10/srcWidth)
self.interesting_frequencies = (0e9, 2000e9) # Which frequencies will be saved to disk
self.Kx = 0; self.Ky = 0; self.padding=0
self.size_x = size_x
self.size_y = size_y
self.size_z = size_z
## Define materials
self.materials = [meep_materials.material_dielectric(eps=4., where = self.where_diel)]
#self.materials += [meep_materials.material_dielectric(eps=4., where = self.where_substr)]
self.TestMaterials()
f_c = c / np.pi/self.resolution/meep_utils.meep.use_Courant()
meep_utils.plot_eps(self.materials, mark_freq=[f_c])
# each material has one callback, used by all its polarizabilities (thus materials should never overlap)
def where_diel(self, r):
#curve1 = self.size_x/np.pi * np.tanh(r.y()/self.size_y*3)
#curve2 = - self.size_x/np.pi * np.tanh(r.y()/self.size_y*3)
#if r.x() > curve1-self.wgwidth/2 and r.x() < curve1+self.wgwidth/2:
#return self.return_value # (do not change this line)
#if r.x() > curve2-self.wgwidth/2 and r.x() < curve2+self.wgwidth/2 and r.y()>0:
#return self.return_value # (do not change this line)
return 0
#}}}
# Model selection
model = spdc_model(**model_param)
if sim_param['frequency_domain']: model.simulation_name += ("_frequency=%.4e" % sim_param['frequency'])
## Initialize volume and structure according to the model
#XXX vol = meep.vol2d(model.size_x, model.size_y, 1./model.resolution)
vol = meep.vol2d(model.size_x, model.size_y, 1./model.resolution)
vol.center_origin()
#s = meep_utils.init_structure(model=model, volume=vol, sim_param=sim_param, pml_axes=meep.Z)
s = meep_utils.init_structure(model=model, volume=vol, sim_param=sim_param, pml_axes="All")
## Create fields with Bloch-periodic boundaries (any transversal component of k-vector is allowed, but may not radiate)
f = meep.fields(s)
## Add a source of the plane wave (see meep_utils for definition of arbitrary source shape)
if not sim_param['frequency_domain']: ## Select the source dependence on time
src_time_type = meep.band_src_time(model.srcFreq/c / 2 , model.srcWidth/c, model.simtime*c/1.1)
#src_time_type = meep.gaussian_src_time(model.srcFreq/c, model.srcWidth/c)
else:
src_time_type = meep.continuous_src_time(sim_param['frequency']/c)
# XXX srcvolume = meep.volume(
#meep.vec(-model.wgheight/2, -model.size_y/4-model.wgwidth/2, -model.size_z/2+model.pml_thickness),
#meep.vec(+model.wgheight/2, -model.size_y/4+model.wgwidth/2, -model.size_z/2+model.pml_thickness))
srcvolume = meep.volume(
meep.vec(-model.size_x/2, -model.size_y/2+model.pml_thickness),
meep.vec( model.size_x/2, -model.size_y/2+model.pml_thickness))
## Replace the f.add_volume_source(meep.Ex, srctype, srcvolume) line with following:
## Option for a custom source (e.g. exciting some waveguide mode)
class SrcAmplitudeFactor(meep.Callback):
## The source amplitude is complex -> phase factor modifies its direction
## todo: implement in MEEP: we should define an AmplitudeVolume() object and reuse it for monitors later
def __init__(self, Kx=0, Ky=0): meep.Callback.__init__(self)
def complex_vec(self, vec): ## Note: the 'vec' coordinates are _relative_ to the source center
return (np.random.random()-.5) + 1j*(np.random.random()-.5)
af = SrcAmplitudeFactor(Kx=model.Kx, Ky=model.Ky)
meep.set_AMPL_Callback(af.__disown__())
f.add_volume_source(meep.Ez, src_time_type, srcvolume, meep.AMPL)
## Secondary (pump) source
src_time_type = meep.continuous_src_time(model.srcFreq/c)
f.add_volume_source(meep.Ez, src_time_type2, srcvolume)
## Define monitors planes and visualisation output
#monitor_options = {'size_x':model.size_x, 'size_y':model.size_y, 'Kx':model.Kx, 'Ky':model.Ky}
#monitor1_Ex = meep_utils.AmplitudeMonitorPlane(comp=meep.Ex, z_position=model.monitor_z1, **monitor_options)
#monitor1_Hy = meep_utils.AmplitudeMonitorPlane(comp=meep.Hy, z_position=model.monitor_z1, **monitor_options)
#monitor2_Ex = meep_utils.AmplitudeMonitorPlane(comp=meep.Ex, z_position=model.monitor_z2, **monitor_options)
#monitor2_Hy = meep_utils.AmplitudeMonitorPlane(comp=meep.Hy, z_position=model.monitor_z2, **monitor_options)
#XXX TODO
slice_makers = [meep_utils.Slice(model=model, field=f, components=(meep.Dielectric), at_t=0, name='EPS')]
slice_makers += [meep_utils.Slice(model=model, field=f, components=meep.Ez, at_t=[0e-12, 100e-12], min_timestep=.025e-12, outputgif=True)]
slice_makers += [meep_utils.Slice(model=model, field=f, components=meep.Ez, at_t=2.5e-12)]
if not sim_param['frequency_domain']: ## time-domain computation
f.step()
dt = (f.time()/c)
meep_utils.lorentzian_unstable_check_new(model, dt)
timer = meep_utils.Timer(simtime=model.simtime); meep.quiet(True) # use custom progress messages
while (f.time()/c < model.simtime): # timestepping cycle
f.step()
timer.print_progress(f.time()/c)
#for monitor in (monitor1_Ex, monitor1_Hy, monitor2_Ex, monitor2_Hy): monitor.record(field=f)
for slice_maker in slice_makers: slice_maker.poll(f.time()/c)
for slice_maker in slice_makers: slice_maker.finalize()
meep_utils.notify(model.simulation_name, run_time=timer.get_time())
else: ## frequency-domain computation
f.step()
f.solve_cw(sim_param['MaxTol'], sim_param['MaxIter'], sim_param['BiCGStab'])
#for monitor in (monitor1_Ex, monitor1_Hy, monitor2_Ex, monitor2_Hy): monitor.record(field=f)
for slice_maker in slice_makers: slice_maker.finalize()
meep_utils.notify(model.simulation_name)
## Get the reflection and transmission of the structure
#if meep.my_rank() == 0:
#freq, s11, s12 = meep_utils.get_s_parameters(monitor1_Ex, monitor1_Hy, monitor2_Ex, monitor2_Hy,
#frequency_domain=sim_param['frequency_domain'], frequency=sim_param['frequency'],
#maxf=model.srcFreq+model.srcWidth, pad_zeros=1.0, Kx=model.Kx, Ky=model.Ky)
#meep_utils.savetxt(freq=freq, s11=s11, s12=s12, model=model)
#import effparam # process effective parameters for metamaterials
with open("./last_simulation_name.dat", "w") as outfile: outfile.write(model.simulation_name)
meep.all_wait() # Wait until all file operations are finished
|
FilipDominec/python-meep-utils
|
spdc.py
|
Python
|
gpl-2.0
| 7,804
|
[
"Gaussian",
"exciting"
] |
b858154cbd483a1a5152320ac002ce32050a575b8b1948e6c83e8c17b2334421
|
#
# Copyright 2016-2019 Universidad Complutense de Madrid
#
# This file is part of Numina
#
# SPDX-License-Identifier: GPL-3.0+
# License-Filename: LICENSE.txt
#
"""A representation of the a hardware device"""
import inspect
from .device import DeviceBase
class HWDevice(DeviceBase):
def __init__(self, name, origin=None, parent=None):
super(HWDevice, self).__init__(
name, origin=origin, parent=parent
)
def config_info(self):
return visit(self)
def get_properties(self):
meta = self.init_config_info()
for key, prop in inspect.getmembers(self.__class__):
if isinstance(prop, property):
try:
meta[key] = getattr(self, key).value
except:
meta[key] = getattr(self, key)
return meta
def init_config_info(self):
return dict(name=self.name)
def end_config_info(self, meta):
if self.children:
meta['children'] = self.children.keys()
return meta
def configure_me(self, value):
for key in value:
setattr(self, key, value[key])
def configure(self, info):
for key, value in info.items():
node = self.get_device(key)
if node:
node.configure_me(value)
def visit(node, root='', meta=None):
sep = '.'
if meta is None:
meta = {}
if node.name is None:
base = 'unknown'
else:
base = node.name
if root != '':
node_name = root + sep + base
else:
node_name = base
meta[node_name] = node.get_properties()
submeta = meta
for child in node.children.values():
visit(child, root=node_name, meta=submeta)
return meta
|
guaix-ucm/numina
|
numina/instrument/hwdevice.py
|
Python
|
gpl-3.0
| 1,772
|
[
"VisIt"
] |
78be0ca8f15468575f4b7ba85866e4364cef03089e6fb19425db974b853ba7c2
|
# -*- coding: utf-8 -*-
"""
Bible
-----
A minimalist app to store and recall bible verses
:copyright: (c) 2015 by Brian Kim
:license: BSD
"""
from flask import Flask
from api import api
import model
def create_app(conf='conf/debug.cfg'):
"""
use this method to create an instance of the app for serving
"""
# init app
app = Flask(__name__)
app.config.from_pyfile(conf)
# connect the model the app
model.db.init_app(app)
with app.app_context():
model.db.create_all()
# register blueprint
app.register_blueprint(api,url_prefix='/api/v1')
return app
if __name__=="__main__":
create_app().run(host='0.0.0.0')
|
briansan/bible
|
bible/__init__.py
|
Python
|
bsd-2-clause
| 656
|
[
"Brian"
] |
1cc8bf0d2512d6f4f27c5d5053d171cc84dfab243df43d2bbc6291fc7a41417d
|
#!/usr/bin/python
# Use 1 or 2 arguments:
# 1 (read): Gaussian output file
# 2 (read): Gaussian fchk file
# 3 (write): generic output file (default: append ".gen" to Gaussian output)
import sys
import re
#=============================
# Get input files
try:
gau_output = sys.argv[1]
except IndexError:
sys.exit("Missing input file")
try:
gau_fchk = sys.argv[2]
except IndexError:
sys.exit("Missing input file")
try:
gen_output = sys.argv[3]
except IndexError:
gen_output = gau_output + ".gen"
#=============================
# Read the data from the output file(s)
Q_natoms = 0
Q_charge = 0
Q_multiplicity = 0
Q_energy = 0
Q_energy_lower = 0
Q_selfenergy = 0
Q_dipole = [0, 0, 0]
Q_mulliken = []
Q_esp = []
Q_gradient = []
Q_gradient_lower = []
Q_hessian = []
Q_potfile = ""
Q_potential = []
# Read the data available in the fchk file, in atomic units
file_gau = open(gau_fchk, "r")
for line in file_gau:
if re.match("Number of atoms", line):
Q_natoms = int(line.rstrip().split()[4])
elif re.match("Charge", line):
Q_charge = int(line.rstrip().split()[2])
elif re.match("Multiplicity", line):
Q_multiplicity = int(line.rstrip().split()[2])
elif re.match("Total Energy", line):
Q_energy = float(line.rstrip().split()[3])
elif re.match("Dipole Moment", line):
Q_dipole = map(float, file_gau.next().split())
elif re.match("Cartesian Gradient", line):
num = (int(line.split()[4])-1)/5+1
line = ""
for i in range(num):
line += file_gau.next()
Q_gradient = map(float, line.split())
elif re.match("Cartesian Force Constants", line):
num = (int(line.split()[5])-1)/5+1
line = ""
for i in range(num):
line += file_gau.next()
Q_hessian = map(float, line.split())
file_gau.close()
# Read the data in the Gaussian output file
file_gau = open(gau_output, "r")
for line in file_gau:
# Self energy of the charges
if re.search("Self energy of the charges", line):
Q_selfenergy = float(line.rstrip().split()[6])
Q_energy -= Q_selfenergy
# Energy difference and gradients for a conical intersection
elif re.search("Energy difference", line):
Q_energy_lower = Q_energy + float(line.split()[2])
elif re.match("\s*Gradient of iOther State", line):
del Q_gradient_lower[:]
for i in range(Q_natoms):
Q_gradient_lower.extend(map(float, file_gau.next().split()))
elif re.match("\s*Gradient of iVec State", line):
del Q_gradient[:]
for i in range(Q_natoms):
Q_gradient.extend(map(float, file_gau.next().split()))
# Mulliken charges
elif (re.match("\s*Total atomic charges", line) or re.match("\s*Mulliken atomic charges", line)):
file_gau.next()
for i in range(Q_natoms):
Q_mulliken.append(float(file_gau.next().split()[2]))
# ESP charges
elif re.search("Charges from ESP fit,", line):
file_gau.next()
file_gau.next()
del Q_esp[:]
for i in range(Q_natoms):
Q_esp.append(float(file_gau.next().split()[2]))
# ESP charges calculated externally (added by a script)
elif re.search("ESP Charges \(Molden\)", line):
del Q_esp[:]
for i in range(Q_natoms):
Q_esp.append(float(file_gau.next().split()[1]))
# Fortran unit where the electrostatic potential is written
elif re.search("Compute potential derivative range", line):
Q_potfile = int(line.split()[11])
file_gau.close()
# Read the potential
if (Q_potfile):
potfile = open("fort.%i" % Q_potfile, "r")
for line in potfile:
Q_potential.append(float(line.split()[3]))
potfile.close()
#=============================
# Write the generic output file, in atomic units
file_gen = open(gen_output, "w")
print >> file_gen, "Number of atoms\n%4d\n" % Q_natoms
print >> file_gen, "Charge\n%4d\n" % Q_charge
print >> file_gen, "Multiplicity\n%4d\n" % Q_multiplicity
print >> file_gen, "Energy\n%20.12E\n" % Q_energy
if (Q_energy_lower):
print >> file_gen, "Energy (lower state)\n%20.12E\n" % Q_energy_lower
print >> file_gen, "Dipole moment\n%20.12f %20.12f %20.12f\n" % tuple(Q_dipole)
if (Q_mulliken):
print >> file_gen, "Mulliken charges"
for i in range(0, len(Q_mulliken), 1):
nums = tuple(Q_mulliken[i:i+1])
print >> file_gen, (len(nums)*"%20.12f ")[:-1] % nums
print >> file_gen, ""
if (Q_esp):
print >> file_gen, "ESP charges"
for i in range(0, len(Q_esp), 1):
nums = tuple(Q_esp[i:i+1])
print >> file_gen, (len(nums)*"%20.12f ")[:-1] % nums
print >> file_gen, ""
if (Q_gradient):
print >> file_gen, "Cartesian gradient"
for i in range(0, len(Q_gradient), 5):
nums = tuple(Q_gradient[i:i+5])
print >> file_gen, (len(nums)*"%20.12E ")[:-1] % nums
print >> file_gen, ""
if (Q_gradient_lower):
print >> file_gen, "Cartesian gradient (lower state)"
for i in range(0, len(Q_gradient_lower), 5):
nums = tuple(Q_gradient_lower[i:i+5])
print >> file_gen, (len(nums)*"%20.12E ")[:-1] % nums
print >> file_gen, ""
if (Q_hessian):
print >> file_gen, "Cartesian Hessian"
for i in range(0, len(Q_hessian), 5):
nums = tuple(Q_hessian[i:i+5])
print >> file_gen, (len(nums)*"%20.12E ")[:-1] % nums
print >> file_gen, ""
if (Q_potential):
print >> file_gen, "Electrostatic potential"
for i in range(0, len(Q_potential), 5):
nums = tuple(Q_potential[i:i+5])
print >> file_gen, (len(nums)*"%20.12E ")[:-1] % nums
print >> file_gen, ""
file_gen.close()
|
Jellby/ASEP-MD
|
Tests/scripts/gaussian2gen.py
|
Python
|
gpl-3.0
| 5,391
|
[
"Gaussian"
] |
033744360fcee3ee4f523bd80817601c170d618ed6a96077db53594919288e4c
|
"""Implements API endpoints under ``/api/admin``"""
from typing import Any
from datetime import datetime
from flask import Blueprint, jsonify
from werkzeug.exceptions import abort
from shrunk.client import ShrunkClient
from shrunk.util.decorators import require_login, request_schema
__all__ = ['bp']
bp = Blueprint('admin', __name__, url_prefix='/api/v1/admin')
OVERVIEW_STATS_SCHEMA = {
'type': 'object',
'additionalProperties': False,
'properties': {
'range': {
'type': 'object',
'additionalProperties': False,
'required': ['begin', 'end'],
'properties': {
'begin': {'type': 'string', 'format': 'date-time'},
'end': {'type': 'string', 'format': 'date-time'},
},
},
},
}
@bp.route('/stats/overview', methods=['POST'])
@request_schema(OVERVIEW_STATS_SCHEMA)
@require_login
def get_overview_stats(netid: str, client: ShrunkClient, req: Any) -> Any:
"""``POST /api/stats/overview``
Returns some Shrunk-wide stats. Takes optional start end end times. Request format:
.. code-block:: json
{ "range?": { "begin": "date-time", "end": "date-time" } }
Response format:
.. code-block:: json
{ "links": "number", "visits": "number", "users": "number" }
:param netid:
:param client:
:param req:
"""
if not client.roles.has('admin', netid):
abort(403)
if 'range' in req:
begin = datetime.fromisoformat(req['range']['begin'])
end = datetime.fromisoformat(req['range']['end'])
stats = client.admin_stats(begin=begin, end=end)
else:
stats = client.admin_stats()
return jsonify(stats)
@bp.route('/stats/endpoint', methods=['GET'])
@require_login
def get_endpoint_stats(netid: str, client: ShrunkClient) -> Any:
"""``GET /api/stats/endpoint``
Returns visit statistics for each Flask endpoint. Response format:
.. code-block:: json
{ "stats": [ { "endpoint": "string", "total_visits": "number", "unique_visits": "number" } ] }
:param netid:
:param client:
"""
if not client.roles.has('admin', netid):
abort(403)
stats = client.endpoint_stats()
return jsonify({'stats': stats})
|
oss/shrunk
|
backend/shrunk/api/admin.py
|
Python
|
mit
| 2,263
|
[
"VisIt"
] |
46b70d33506cb912785cfe7c071787557f991c2d9bf8b0ef88f501783faa4e30
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import ast
def parse_arg(arg):
t = type(arg).__name__
if t == 'Num':
return arg.n
elif t == 'Str':
return arg.s
elif t == 'Tuple':
return tuple([parse_arg(e) for e in arg.elts])
elif t == 'Name' and arg.id in ('True', 'False'):
return arg.id == 'True'
elif t == 'UnaryOp' and type(arg.op).__name__ == 'USub':
# Necessary for python 3.2
return -arg.operand.n
else:
print('Unexpected problem parsing expression')
import ipdb; ipdb.set_trace() # EVIL_DEBUG
class CallParser(ast.NodeVisitor):
def __init__(self):
self.name = None
self.values = []
def visit_Call(self, node):
self.name = node.func.id
self.values = tuple([parse_arg(arg) for arg in node.args])
def parse_call(exp):
visitor = CallParser()
visitor.visit(ast.parse(exp))
return visitor.name, visitor.values
class ExprParser(ast.NodeVisitor):
def __init__(self):
self.v = None
def visit_Expr(self, node):
self.v = parse_arg(node.value)
def parse_expression(exp):
visitor = ExprParser()
visitor.visit(ast.parse(exp))
return visitor.v
|
debiatan/utbp
|
parser.py
|
Python
|
gpl-3.0
| 1,225
|
[
"VisIt"
] |
c22120034eab19aa3a902204a3b79c4c8b7d1a7d88ccdaf68e53cf1211d43e2b
|
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import itertools
import numpy as np
from numpy.testing import assert_allclose
import pytest
import inspect
import hyperspy.api as hs
from hyperspy.models.model1d import Model1D
from hyperspy.misc.test_utils import ignore_warning
from hyperspy import components1d
from hyperspy.component import Component
TRUE_FALSE_2_TUPLE = [p for p in itertools.product((True, False), repeat=2)]
def get_components1d_name_list():
components1d_name_list = []
for c_name in dir(components1d):
obj = getattr(components1d, c_name)
if inspect.isclass(obj) and issubclass(obj, Component):
components1d_name_list.append(c_name)
# Remove EELSCLEdge, since it is tested elsewhere more appropriate
components1d_name_list.remove('EELSCLEdge')
return components1d_name_list
@pytest.mark.parametrize('component_name', get_components1d_name_list())
def test_creation_components1d(component_name):
s = hs.signals.Signal1D(np.zeros(1024))
s.axes_manager[0].offset = 100
s.axes_manager[0].scale = 0.01
kwargs = {}
if component_name == 'ScalableFixedPattern':
kwargs['signal1D'] = s
elif component_name == 'Expression':
kwargs.update({'expression': "a*x+b", "name": "linear"})
component = getattr(components1d, component_name)(**kwargs)
component.function(np.arange(1, 100))
m = s.create_model()
m.append(component)
class TestPowerLaw:
def setup_method(self, method):
s = hs.signals.Signal1D(np.zeros(1024))
s.axes_manager[0].offset = 100
s.axes_manager[0].scale = 0.01
m = s.create_model()
m.append(hs.model.components1D.PowerLaw())
m[0].A.value = 1000
m[0].r.value = 4
self.m = m
self.s = s
@pytest.mark.parametrize(("only_current", "binned"), TRUE_FALSE_2_TUPLE)
def test_estimate_parameters(self, only_current, binned):
self.m.signal.metadata.Signal.binned = binned
s = self.m.as_signal(show_progressbar=None, parallel=False)
assert s.metadata.Signal.binned == binned
g = hs.model.components1D.PowerLaw()
g.estimate_parameters(s, None, None, only_current=only_current)
A_value = 1008.4913 if binned else 1006.4378
r_value = 4.001768 if binned else 4.001752
assert_allclose(g.A.value, A_value)
assert_allclose(g.r.value, r_value)
if only_current:
A_value, r_value = 0, 0
# Test that it all works when calling it with a different signal
s2 = hs.stack((s, s))
g.estimate_parameters(s2, None, None, only_current=only_current)
assert_allclose(g.A.map["values"][1], A_value)
assert_allclose(g.r.map["values"][1], r_value)
def test_EDS_missing_data(self):
g = hs.model.components1D.PowerLaw()
s = self.m.as_signal(show_progressbar=None, parallel=False)
s2 = hs.signals.EDSTEMSpectrum(s.data)
g.estimate_parameters(s2, None, None)
def test_function_grad_cutoff(self):
pl = self.m[0]
pl.left_cutoff.value = 105.0
axis = self.s.axes_manager[0].axis
for attr in ['function', 'grad_A', 'grad_r', 'grad_origin']:
values = getattr(pl, attr)((axis))
assert_allclose(values[:501], np.zeros((501)))
assert getattr(pl, attr)((axis))[500] == 0
getattr(pl, attr)((axis))[502] > 0
def test_exception_gradient_calculation(self):
# if this doesn't warn, it means that sympy can compute the gradients
# and the power law component can be updated.
with pytest.warns(UserWarning):
hs.model.components1D.PowerLaw(compute_gradients=True)
class TestDoublePowerLaw:
def setup_method(self, method):
s = hs.signals.Signal1D(np.zeros(1024))
s.axes_manager[0].offset = 100
s.axes_manager[0].scale = 0.1
m = s.create_model()
m.append(hs.model.components1D.DoublePowerLaw())
m[0].A.value = 1000
m[0].r.value = 4
m[0].ratio.value = 200
self.m = m
@pytest.mark.parametrize(("binned"), (True, False))
def test_fit(self, binned):
self.m.signal.metadata.Signal.binned = binned
s = self.m.as_signal(show_progressbar=None, parallel=False)
assert s.metadata.Signal.binned == binned
g = hs.model.components1D.DoublePowerLaw()
# Fix the ratio parameter to test the fit
g.ratio.free = False
g.ratio.value = 200
m = s.create_model()
m.append(g)
m.fit_component(g, signal_range=(None, None))
assert_allclose(g.A.value, 1000.0)
assert_allclose(g.r.value, 4.0)
assert_allclose(g.ratio.value, 200.)
class TestOffset:
def setup_method(self, method):
s = hs.signals.Signal1D(np.zeros(10))
s.axes_manager[0].scale = 0.01
m = s.create_model()
m.append(hs.model.components1D.Offset())
m[0].offset.value = 10
self.m = m
@pytest.mark.parametrize(("only_current", "binned"), TRUE_FALSE_2_TUPLE)
def test_estimate_parameters(self, only_current, binned):
self.m.signal.metadata.Signal.binned = binned
s = self.m.as_signal(show_progressbar=None, parallel=False)
assert s.metadata.Signal.binned == binned
o = hs.model.components1D.Offset()
o.estimate_parameters(s, None, None, only_current=only_current)
assert_allclose(o.offset.value, 10)
def test_function_nd(self):
s = self.m.as_signal(show_progressbar=None, parallel=False)
s = hs.stack([s] * 2)
o = hs.model.components1D.Offset()
o.estimate_parameters(s, None, None, only_current=False)
axis = s.axes_manager.signal_axes[0]
assert_allclose(o.function_nd(axis.axis), s.data)
class TestPolynomial:
def setup_method(self, method):
s = hs.signals.Signal1D(np.zeros(1024))
s.axes_manager[0].offset = -5
s.axes_manager[0].scale = 0.01
m = s.create_model()
m.append(hs.model.components1D.Polynomial(order=2))
coeff_values = (0.5, 2, 3)
self.m = m
s_2d = hs.signals.Signal1D(np.arange(1000).reshape(10, 100))
self.m_2d = s_2d.create_model()
self.m_2d.append(hs.model.components1D.Polynomial(order=2))
s_3d = hs.signals.Signal1D(np.arange(1000).reshape(2, 5, 100))
self.m_3d = s_3d.create_model()
self.m_3d.append(hs.model.components1D.Polynomial(order=2))
# if same component is pased, axes_managers get mixed up, tests
# sometimes randomly fail
for _m in [self.m, self.m_2d, self.m_3d]:
_m[0].coefficients.value = coeff_values
def test_gradient(self):
c = self.m[0]
np.testing.assert_array_almost_equal(c.grad_coefficients(1),
np.array([[6, ], [4.5], [3.5]]))
assert c.grad_coefficients(np.arange(10)).shape == (3, 10)
@pytest.mark.parametrize(("only_current", "binned"), TRUE_FALSE_2_TUPLE)
def test_estimate_parameters(self, only_current, binned):
self.m.signal.metadata.Signal.binned = binned
s = self.m.as_signal(show_progressbar=None, parallel=False)
assert s.metadata.Signal.binned == binned
g = hs.model.components1D.Polynomial(order=2)
g.estimate_parameters(s, None, None, only_current=only_current)
assert_allclose(g.coefficients.value[0], 0.5)
assert_allclose(g.coefficients.value[1], 2)
assert_allclose(g.coefficients.value[2], 3)
def test_2d_signal(self):
# This code should run smoothly, any exceptions should trigger failure
s = self.m_2d.as_signal(show_progressbar=None, parallel=False)
model = Model1D(s)
p = hs.model.components1D.Polynomial(order=2)
model.append(p)
p.estimate_parameters(s, 0, 100, only_current=False)
np.testing.assert_allclose(p.coefficients.map['values'],
np.tile([0.5, 2, 3], (10, 1)))
def test_3d_signal(self):
# This code should run smoothly, any exceptions should trigger failure
s = self.m_3d.as_signal(show_progressbar=None, parallel=False)
model = Model1D(s)
p = hs.model.components1D.Polynomial(order=2)
model.append(p)
p.estimate_parameters(s, 0, 100, only_current=False)
np.testing.assert_allclose(p.coefficients.map['values'],
np.tile([0.5, 2, 3], (2, 5, 1)))
# For https://github.com/hyperspy/hyperspy/pull/1989
# def test_function_nd(self):
# s = self.m.as_signal(show_progressbar=None, parallel=False)
# s = hs.stack([s]*2)
# p = hs.model.components1D.Polynomial(order=2)
# p.estimate_parameters(s, None, None, only_current=False)
# axis = s.axes_manager.signal_axes[0]
# assert_allclose(p.function_nd(axis.axis), s.data)
class TestGaussian:
def setup_method(self, method):
s = hs.signals.Signal1D(np.zeros(1024))
s.axes_manager[0].offset = -5
s.axes_manager[0].scale = 0.01
m = s.create_model()
m.append(hs.model.components1D.Gaussian())
m[0].sigma.value = 0.5
m[0].centre.value = 1
m[0].A.value = 2
self.m = m
@pytest.mark.parametrize(("only_current", "binned"), TRUE_FALSE_2_TUPLE)
def test_estimate_parameters_binned(self, only_current, binned):
self.m.signal.metadata.Signal.binned = binned
s = self.m.as_signal(show_progressbar=None, parallel=False)
assert s.metadata.Signal.binned == binned
g = hs.model.components1D.Gaussian()
g.estimate_parameters(s, None, None, only_current=only_current)
assert_allclose(g.sigma.value, 0.5)
assert_allclose(g.A.value, 2)
assert_allclose(g.centre.value, 1)
@pytest.mark.parametrize("binned", (True, False))
def test_function_nd(self, binned):
self.m.signal.metadata.Signal.binned = binned
s = self.m.as_signal(show_progressbar=None, parallel=False)
s2 = hs.stack([s] * 2)
g = hs.model.components1D.Gaussian()
g.estimate_parameters(s2, None, None, only_current=False)
assert g.binned == binned
axis = s.axes_manager.signal_axes[0]
factor = axis.scale if binned else 1
assert_allclose(g.function_nd(axis.axis) * factor, s2.data)
class TestExpression:
def setup_method(self, method):
self.g = hs.model.components1D.Expression(
expression="height * exp(-(x - x0) ** 2 * 4 * log(2)/ fwhm ** 2)",
name="Gaussian",
position="x0",
height=1,
fwhm=1,
x0=0,
module="numpy")
def test_name(self):
assert self.g.name == "Gaussian"
def test_position(self):
assert self.g._position is self.g.x0
def test_f(self):
assert self.g.function(0) == 1
def test_grad_height(self):
assert_allclose(
self.g.grad_height(2),
1.5258789062500007e-05)
def test_grad_x0(self):
assert_allclose(
self.g.grad_x0(2),
0.00016922538587889289)
def test_grad_fwhm(self):
assert_allclose(
self.g.grad_fwhm(2),
0.00033845077175778578)
def test_function_nd(self):
assert self.g.function_nd(0) == 1
def test_expression_substitution():
expr = 'A / B; A = x+2; B = x-c'
comp = hs.model.components1D.Expression(expr, name='testcomp',
autodoc=True,
c=2)
assert ''.join(p.name for p in comp.parameters) == 'c'
assert comp.function(1) == -3
class TestScalableFixedPattern:
def setup_method(self, method):
s = hs.signals.Signal1D(np.linspace(0., 100., 10))
s1 = hs.signals.Signal1D(np.linspace(0., 1., 10))
s.axes_manager[0].scale = 0.1
s1.axes_manager[0].scale = 0.1
self.s = s
self.pattern = s1
def test_both_unbinned(self):
s = self.s
s1 = self.pattern
s.metadata.Signal.binned = False
s1.metadata.Signal.binned = False
m = s.create_model()
fp = hs.model.components1D.ScalableFixedPattern(s1)
m.append(fp)
with ignore_warning(message="invalid value encountered in sqrt",
category=RuntimeWarning):
m.fit()
assert abs(fp.yscale.value - 100) <= 0.1
def test_both_binned(self):
s = self.s
s1 = self.pattern
s.metadata.Signal.binned = True
s1.metadata.Signal.binned = True
m = s.create_model()
fp = hs.model.components1D.ScalableFixedPattern(s1)
m.append(fp)
with ignore_warning(message="invalid value encountered in sqrt",
category=RuntimeWarning):
m.fit()
assert abs(fp.yscale.value - 100) <= 0.1
def test_pattern_unbinned_signal_binned(self):
s = self.s
s1 = self.pattern
s.metadata.Signal.binned = True
s1.metadata.Signal.binned = False
m = s.create_model()
fp = hs.model.components1D.ScalableFixedPattern(s1)
m.append(fp)
with ignore_warning(message="invalid value encountered in sqrt",
category=RuntimeWarning):
m.fit()
assert abs(fp.yscale.value - 1000) <= 1
def test_pattern_binned_signal_unbinned(self):
s = self.s
s1 = self.pattern
s.metadata.Signal.binned = False
s1.metadata.Signal.binned = True
m = s.create_model()
fp = hs.model.components1D.ScalableFixedPattern(s1)
m.append(fp)
with ignore_warning(message="invalid value encountered in sqrt",
category=RuntimeWarning):
m.fit()
assert abs(fp.yscale.value - 10) <= .1
class TestHeavisideStep:
def setup_method(self, method):
self.c = hs.model.components1D.HeavisideStep()
def test_integer_values(self):
c = self.c
np.testing.assert_array_almost_equal(c.function([-1, 0, 2]),
[0, 0.5, 1])
def test_float_values(self):
c = self.c
np.testing.assert_array_almost_equal(c.function([-0.5, 0.5, 2]),
[0, 1, 1])
def test_not_sorted(self):
c = self.c
np.testing.assert_array_almost_equal(c.function([3, -0.1, 0]),
[1, 0, 0.5])
def test_gradients(self):
c = self.c
np.testing.assert_array_almost_equal(c.A.grad([3, -0.1, 0]),
[1, 1, 1])
np.testing.assert_array_almost_equal(c.n.grad([3, -0.1, 0]),
[1, 0, 0.5])
|
francisco-dlp/hyperspy
|
hyperspy/tests/component/test_components.py
|
Python
|
gpl-3.0
| 15,650
|
[
"Gaussian"
] |
a276f90b3f8a1d7bec86fc2a891181c5922c82d455b9876be000867c4f56b933
|
from PyML.utils import misc
from baseClassifiers import Classifier,IteratorClassifier
from composite import CompositeClassifier
from PyML.containers import ker
from PyML.classifiers import svm
'''classes for model selection'''
__docformat__ = "restructuredtext en"
class Param (IteratorClassifier) :
"""
A class for training a classifier with several values of a parameter.
Training trains a classifier for each value of the parameter.
Testing returns a list evaluating each trained classifier on the given
dataset.
Example::
p = Param(svm.SVM(), 'C', [0.1, 1, 10, 100, 1000])
"""
def __init__(self, arg, attribute = 'C', values = [0.1, 1, 10, 100, 1000]) :
"""
:Parameters:
- `arg` - another Param object, or the classifier to be used
- `attribute` - the attribute of the classifier that needs tuning
- `values` - a list of values to try
"""
if arg.__class__ == self.__class__ :
other = arg
self.attribute = other.attribute
self.values = other.values[:]
self.classifiers = [classifier.__class__(classifier)
for classifier in other.classifiers]
for i in range(len(self)) :
misc.mysetattr(self.classifiers[i], self.attribute, self.values[i])
elif hasattr(arg, 'type') and arg.type == 'classifier' :
self.attribute = attribute
self.values = values
self.classifiers = [arg.__class__(arg)
for i in range(len(self.values))]
for i in range(len(self)) :
misc.mysetattr(self.classifiers[i], self.attribute, self.values[i])
elif type(arg) == type([]) :
self.classifiers = [arg[i].__class__(arg[i])
for i in range(len(arg))]
def __len__(self) :
return len(self.classifiers)
def __repr__(self) :
rep = '<' + self.__class__.__name__ + ' instance>\n'
rep += 'classifier:\n'
rep += self.classifiers[0].__repr__()
rep += 'attribute: %s\n' % self.attribute
rep += 'values:' + str(self.values) + '\n'
return rep
def train(self, data, **args) :
for classifier in self.classifiers :
classifier.train(data, **args)
#self.log.trainingTime = self.getTrainingTime()
class ParamGrid (Param) :
"""
A class for training and testing a classifier on a grid of parameter
values for two attributes of the classifier.
Example::
p = ParamGrid(svm.SVM(ker.Gaussian()), 'C', [0.1, 1, 10, 100, 1000],
'kernel.gamma', [0.001, 0.01, 0.1, 1, 10])
"""
def __init__(self, arg,
attribute1 = 'C', values1 = [0.1, 1, 10, 100, 1000],
attribute2 = 'kernel.gamma', values2 = [0.001, 0.01, 0.1, 1, 10]) :
"""
:Parameters:
- `arg` - another Param object, or the classifier to be used
- `attribute1` - the first attribute of the classifier that needs tuning
- `values1` - a list of values to try for attribute1
- `attribute2` - the second attribute
- `values2` - a list of values to try for attribute2
"""
if arg.__class__ == self.__class__ :
other = arg
self.attribute1 = other.attribute1
self.values1 = other.values1[:]
self.attribute2 = other.attribute2
self.values2 = other.values2[:]
self.classifiers = [classifier.__class__(classifier)
for classifier in other.classifiers]
elif hasattr(arg, 'type') and arg.type == 'classifier' :
self.attribute1 = attribute1
self.values1 = values1
self.attribute2 = attribute2
self.values2 = values2
self.classifiers = [arg.__class__(arg)
for i in range(len(values1) * len(values2))]
for i in range(len(self.values1)) :
for j in range(len(self.values2)) :
classifierID = i * len(self.values2) + j
misc.mysetattr(self.classifiers[classifierID],
self.attribute1,
self.values1[i])
misc.mysetattr(self.classifiers[classifierID],
self.attribute2,
self.values2[j])
def __repr__(self) :
rep = '<' + self.__class__.__name__ + ' instance>\n'
rep += 'classifier:\n'
rep += self.classifiers[0].__repr__()
rep += 'attribute1: %s\n' % self.attribute1
rep += 'values1:' + str(self.values1) + '\n'
rep += 'attribute2: %s\n' % self.attribute2
rep += 'values2:' + str(self.values2) + '\n'
return rep
class ModelSelector (CompositeClassifier) :
"""
A model selector decides on the best classifier parameters
using the param object it receives as input.
Parameters are chosen according to the success rate in CV (or success
on a dataset provided to the train method.
"""
attributes = {'numFolds' : 5,
'measure' : 'balancedSuccessRate',
'foldsToPerform' : 5,}
def __init__(self, arg, **args) :
"""
:Parameters:
- `arg` - another ModelSelector or a Param object
:Keywords:
- `measure` - which measure of accuracy to use for selecting the
best classifier (default = 'balancedSuccessRate')
supported measures are: 'balancedSuccessRate', 'successRate',
'roc', 'roc50' (you can substitute any number instead of 50)
- `numFolds` - number of CV folds to use when performing model selection
- `foldsToPerform` - the number of folds to actually perform
"""
Classifier.__init__(self, **args)
if arg.__class__ == self.__class__ :
self.param = arg.param.__class__(arg.param)
self.measure = arg.measure
self.numFolds = arg.numFolds
elif arg.__class__.__name__.find('Param') >= 0 :
self.param = arg.__class__(arg)
else :
raise ValueError, 'wrong type of input for ModelSelector'
self.classifier = None
def __repr__(self) :
rep = '<' + self.__class__.__name__ + ' instance>\n'
if self.classifier is not None :
rep += self.classifier.__repr__()
else :
rep += self.param.__repr__()
return rep
def train(self, data, **args) :
"""
:Keywords:
- `train` - boolean - whether to train the best classifier
(default: True)
"""
Classifier.train(self, data, **args)
maxSuccessRate = 0
bestClassifier = None
classifierIdx = 0
args['numFolds'] = self.numFolds
args['foldsToPerform'] = self.foldsToPerform
for r in self.param.stratifiedCV(data, **args) :
successRate = getattr(r, self.measure)
if successRate > maxSuccessRate :
bestClassifier = classifierIdx
maxSuccessRate = successRate
classifierIdx += 1
self.log.maxSuccessRate = maxSuccessRate
self.classifier = self.param.classifiers[bestClassifier].__class__(
self.param.classifiers[bestClassifier])
if 'train' not in args or args['train'] is True :
self.classifier.train(data, **args)
self.classifier.log.trainingTime = self.getTrainingTime()
self.classifier.log.classifier = self.classifier.__class__(self.classifier)
def save(self, fileHandle) :
self.classifier.save(fileHandle)
class SVMselect (ModelSelector) :
"""
A model selector for searching for best parameters for an
SVM classifier with a Gaussian kernel
Its search strategy is as follows:
First optimize the width of the Gaussian (gamma) for a fixed (low)
value of C, and then optimize C.
"""
attributes = {'C' : [0.01, 0.1, 1, 10, 100, 1000],
'gamma' : [0.001, 0.01, 0.1, 1, 10],
'Clow' : 10,
'numFolds' : 5,
'measure' : 'balancedSuccessRate'}
def __init__(self, arg = None, **args) :
"""
:Parameters:
- `arg` - another ModelSelector object
:Keywords:
- `C` - a list of values to try for C
- `gamma` - a list of value to try for gamma
- `measure` - which measure of accuracy to use for selecting the
best classifier (default = 'balancedSuccessRate')
supported measures are: 'balancedSuccessRate', 'successRate',
'roc', 'roc50' (you can substitute another number instead of 50)
- `numFolds` - number of CV folds to use when performing model selection
"""
Classifier.__init__(self, arg, **args)
self.classifier = None
def __repr__(self) :
rep = '<' + self.__class__.__name__ + ' instance>\n'
if self.classifier is not None :
rep += self.classifier.__repr__()
rep += 'C: ' + str(self.C) + '\n'
rep += 'gamma: ' + str(self.gamma) + '\n'
return rep
def train(self, data, **args) :
"""
:Keywords:
- `train` - boolean - whether to train the best classifier
(default: True)
- `vdata` - data to use for testing instead of using cross-validation
(not implemented yet)
"""
Classifier.train(self, data, **args)
kernel = ker.Gaussian()
gammaSelect = ModelSelector(Param(svm.SVM(kernel, C = self.Clow),
'kernel.gamma', self.gamma),
measure = self.measure,
numFolds = self.numFolds)
gammaSelect.train(data)
kernel = ker.Gaussian(gamma = gammaSelect.classifier.kernel.gamma)
cSelect = ModelSelector(Param(svm.SVM(kernel), 'C', self.C),
measure = self.measure,
numFolds = self.numFolds)
cSelect.train(data)
self.classifier = cSelect.classifier.__class__(cSelect.classifier)
if 'train' not in args or args['train'] is True :
self.classifier.train(data, **args)
self.classifier.log.trainingTime = self.getTrainingTime()
self.classifier.log.classifier = self.classifier.__class__(self.classifier)
|
cathywu/Sentiment-Analysis
|
PyML-0.7.9/PyML/classifiers/modelSelection.py
|
Python
|
gpl-2.0
| 10,797
|
[
"Gaussian"
] |
4115c5b9936cec98a903e3c72aee38c82d43b60f9eb65e1c39fa100edfd4b82c
|
##
## teem.py: automatically-generated ctypes python wrappers for Teem
## Copyright (C) 2013, 2012, 2011, 2010, 2009 University of Chicago
##
## Permission is hereby granted, free of charge, to any person obtaining
## a copy of this software and associated documentation files (the
## "Software"), to deal in the Software without restriction, including
## without limitation the rights to use, copy, modify, merge, publish,
## distribute, sublicense, and/or sell copies of the Software, and to
## permit persons to whom the Software is furnished to do so, subject to
## the following conditions:
##
## The above copyright notice and this permission notice shall be
## included in all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
## NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
## LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
## OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
## WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##
##############################################################
##############################################################
#### NOTE: This teem.py file is automatically produced by
#### teem/python/ctypes/gen-teem.py. Necessary changes to
#### teem.py should be made in gen-teem.py, not here.
##############################################################
##############################################################
from ctypes import *
import ctypes.util
import sys, os
def load_library(libname, loader_path=""):
ext = os.path.splitext(libname)[1]
if not ext:
# Try to load library with platform-specific name
if sys.platform == 'win32':
libname_ext = '%s.dll' % libname
elif sys.platform == 'darwin':
libname_ext = '%s.dylib' % libname
elif sys.platform == 'linux2':
libname_ext = '%s.so' % libname
else: libname_ext = libname
if (loader_path != ""):
loader_path = os.path.abspath(loader_path)
if not os.path.isdir(loader_path):
libdir = os.path.dirname(loader_path)
else:
libdir = loader_path
else:
libdir = loader_path
try:
libpath = os.path.join(libdir, libname_ext)
return CDLL(libpath)
except OSError, e:
raise e
try:
libteem = load_library('libteem')
except OSError:
print "**"
print "** teem.py couldn't find and load the \"libteem\" shared library."
print "**"
print "** try setting optional loader_path argument in the load_library() call above to '<teem-install-dir>/lib/'"
print "**"
raise ImportError
# =============================================================
# Utility types and classes to help teem.py be platform-independent.
STRING = c_char_p
class FILE(Structure):
pass
# oddly, size_t is in ctypes, but not ptrdiff_t
# which is probably a bug
if sizeof(c_void_p) == 4:
ptrdiff_t = c_int32
elif sizeof(c_void_p) == 8:
ptrdiff_t = c_int64
# =============================================================
# What follows are all the functions, struct definitions, globals,
# enum values, and typedefs in Teem. This is generated by ctypeslib:
# http://svn.python.org/projects/ctypes/branches/ctypeslib-gccxml-0.9
# followed by further post-processing and filtering.
# See end of this file for definitions of stderr, stdin, stdout
alanParmMinAverageChange = 16
unrrduScaleAdd = 4
nrrdTypeULLong = 8
pullInfoIsovalueHessian = 21
pullInfoNegativeTangent1 = 17
tenAniso_Tr = 26
pullInfoTangent1 = 15
tenGageTraceHessianFrob = 91
pullInfoLiveThresh3 = 14
pullInfoLiveThresh2 = 13
pullInfoSeedThresh = 11
alanParmK = 14
tenGageCp1Hessian = 187
pullInfoHeightHessian = 8
tenDwiGageTensorWLSLikelihood = 13
pullInfoHeightGradient = 7
nrrdIoStateUnknown = 0
pullInfoNegativeTangent2 = 18
alanParmDiffB = 12
tenGageModeHessianEval1 = 127
unrrduScaleDivide = 3
pullFlagStartSkipsPoints = 14
gageSclFlowlineCurv = 31
pullFlagPopCntlEnoughTest = 9
nrrdBlind8BitRangeState = 3
pullFlagUseBetaForGammaLearn = 3
pullInitMethodGivenPos = 4
gageVecCurlNorm = 11
gageSclCurvDir1 = 29
tenGageDetNormal = 44
gageSclMeanCurv = 27
tenGageCp1HessianEvec0 = 193
alanParmHomogAniso = 5
tenGageTraceHessianEvec1 = 89
pullConstraintFailLast = 7
pullConstraintFailTravel = 6
pullConstraintFailIterMaxed = 5
limnSplineInfoLast = 7
limnSplineInfoQuaternion = 6
limnSplineInfo4Vector = 5
pullConstraintFailHessZeroA = 1
tenDwiGageTensorWLSError = 11
gageSclShapeTrace = 25
pullInfoHeightLaplacian = 9
nrrdMeasureLast = 31
limnSplineInfoScalar = 1
alanParmNumThreads = 3
nrrdField_units = 18
tenGageFiberDispersion = 206
nrrdBinaryOpNormalRandScaleAdd = 22
nrrdFormatTypeText = 5
nrrdFormatTypeVTK = 4
tenGageDetGradMag = 43
nrrdBinaryOpEqual = 18
nrrdBinaryOpCompare = 17
nrrdBinaryOpGTE = 16
alanParmVerbose = 1
nrrdBinaryOpLT = 13
nrrdBinaryOpMax = 12
nrrdBinaryOpFmod = 9
tenGageCp1HessianEvec = 192
alanParmUnknown = 0
pushEnergyTypeLast = 6
pushEnergyTypeCotan = 4
pushEnergyTypeCoulomb = 3
tenGageCa1HessianEvec1 = 203
pullTraceStopUnknown = 0
nrrdBinaryOpAdd = 1
tenDwiGageTensorWLS = 10
nrrdBinaryOpUnknown = 0
pullFlagZeroZ = 15
tenAniso_Conf = 1
nrrdMeasureHistoSD = 30
nrrdField_min = 19
pullCondConstraintFail = 5
alanParmF = 15
pullInfoTensorInverse = 2
unrrduScaleUnknown = 0
nrrdMeasureHistoVariance = 29
tenGageCa1HessianEval1 = 199
echoMatterGlassIndex = 0
tenGageCp1HessianEval2 = 191
nrrdField_labels = 17
nrrdBlind8BitRangeUnknown = 0
tenDwiGageTensorLLSLikelihood = 9
nrrdMeasureHistoSum = 27
tenGageCa1HessianEval = 197
nrrdFormatTypePNG = 3
gageSclHessEval1 = 13
tenGageCp1HessianEvec2 = 195
gageSigmaSamplingUniformTau = 2
nrrdField_space_directions = 14
gageSigmaSamplingUnknown = 0
gageSclHessEval0 = 12
hestSourceLast = 3
tenGageCp1HessianEvec1 = 194
tenGageCp1HessianEval1 = 190
tenGageTraceHessianEvec0 = 88
tijk_class_last = 4
coilMethodTypeFinish = 8
coilMethodTypeSelf = 7
coilMethodTypeCurvatureFlow = 6
tenGageTraceHessianEvec = 87
coilMethodTypeModifiedCurvatureRings = 5
nrrdField_kinds = 16
coilMethodTypeModifiedCurvature = 4
gageSclLaplacian = 9
coilMethodTypePeronaMalik = 3
coilMethodTypeHomogeneous = 2
nrrdMeasureHistoL2 = 28
tenGageBGradMag = 40
nrrdBasicInfoKeyValuePairs = 15
echoMatterMetalKd = 2
tenGageTraceHessianEval1 = 85
nrrdMeasureLineIntercept = 19
hestSourceUser = 2
nrrdBlind8BitRangeLast = 4
nrrdBlind8BitRangeFalse = 2
tenGageCp1HessianEval0 = 189
nrrdField_space_dimension = 8
nrrdBlind8BitRangeTrue = 1
tijk_class_unknown = 0
tenGageCp1HessianEval = 188
tenGageTraceHessianEval = 83
gageSclNProj = 5
nrrdSpaceLast = 13
tenGageTraceHessian = 82
nrrdMeasureCoV = 16
nrrdFormatTypeNRRD = 1
tenGageHessian = 81
pullSysParmLast = 20
pullSysParmFracNeighNixedMax = 19
nrrdBasicInfoComments = 14
pullSysParmWall = 18
pullSysParmConstraintStepMin = 17
pullSysParmEnergyDecreaseMin = 16
pullSysParmEnergyIncreasePermit = 15
tenGageClpmin2 = 80
pullSysParmEnergyDecreasePopCntlMin = 14
nrrdKind4Color = 17
pullSysParmBackStepScale = 13
nrrdMeasureVariance = 14
pullSysParmOpporStepScale = 12
pullSysParmProbeProb = 11
nrrdSpace3DLeftHanded = 10
pullSysParmNeighborTrueProb = 10
pullSysParmBinWidthSpace = 9
tenGageCa2 = 79
pullSysParmRadiusScale = 8
pullSysParmRadiusSpace = 7
nrrdMeasureLinf = 13
pullSysParmStepInitial = 6
pullSysParmTheta = 5
nrrdField_content = 2
pullSysParmBeta = 2
pullSysParmAlpha = 1
pullEnergyTypeBspln = 3
pullSysParmUnknown = 0
nrrdUnaryOpRoundDown = 22
pullFlagAllowCodimension3Constraints = 12
nrrdFormatTypeUnknown = 0
pullFlagBinSingle = 11
tenGageClpmin1 = 76
nrrdBasicInfoOldMax = 13
pullEnergyTypeSpring = 1
pullFlagConvergenceIgnoresPopCntl = 10
limnCameraPathTrackFrom = 1
nrrdMeasureL2 = 9
tenGageCl1HessianEval = 179
limnCameraPathTrackUnknown = 0
pullFlagConstraintBeforeSeedThresh = 7
tenAniso_Clpmin2 = 11
pushEnergyTypeGauss = 2
gageSclHessEvec = 15
pullFlagNixAtVolumeEdgeSpace = 6
tenGageNormGradMag = 37
pullFlagEnergyFromStrength = 5
tenGageRotTans = 70
nrrdMeasureMode = 5
pullFlagRestrictiveAddToBins = 4
tenAniso_Q = 18
tenGageInvarRGrads = 68
pullFlagNoPopCntlWithZeroAlpha = 2
tijk_class_efs = 3
tenGageOmegaHessianEval1 = 137
pullPropNeighTanCovar = 13
tenGageInvarKGrads = 66
pullFlagUnknown = 0
tenGageOmegaNormal = 65
airTypeString = 10
tenAniso_B = 17
echoMatterLightUnit = 1
pullPropScale = 10
tenGageOmegaGradVec = 63
pullPropNeighDistMean = 9
pullPropForce = 8
tenGageTensorRThetaPhiLinear = 166
tenGageCp1GradVec = 170
tenGageThetaGradMag = 61
tenGageTensorQuatGeoLoxR = 165
limnCameraPathTrackAt = 2
seekTypeValleySurfaceOP = 10
tenGageModeGradMag = 58
tenGageCovarianceKGRT = 162
nrrdTypeFloat = 9
nrrdField_spacings = 10
airEndianLast = 4322
pullPropIdCC = 2
pullCondLast = 8
nrrdKindXYZColor = 16
pullCondEnergyTry = 4
pullIterParmUnknown = 0
pullPropIdtag = 1
pullCondConstraintSatB = 3
pullCondConstraintSatA = 2
pullCondOld = 1
pullCondUnknown = 0
nrrdKindLast = 32
pullEnergyTypeLast = 14
coilKindTypeScalar = 1
tijk_class_esh = 2
pullEnergyTypeQuarticWell = 10
tenGageFANormal = 53
pullEnergyTypeBetterCubicWell = 9
pullEnergyTypeCubicWell = 8
pullStatusNewbie = 2
pullEnergyTypeQuartic = 7
pullEnergyTypeCubic = 6
gageSigmaSamplingLast = 4
tenGageConfGradVecDotEvec0 = 157
pullEnergyTypeButterworth = 4
pullEnergyTypeGauss = 2
pullEnergyTypeUnknown = 0
pullPropLast = 18
pullPropStability = 17
pullInterTypeLast = 5
pullPropNeighCovar7Ten = 12
pullPropNeighCovar = 11
pullPropStuck = 6
pullPropStepConstr = 5
pullPropStepEnergy = 4
limnEdgeTypeBorder = 6
pullPropEnergy = 3
tenGageOmegaGradVecDotEvec0 = 154
airEndianBig = 4321
airEndianLittle = 1234
pullPropUnknown = 0
coilKindTypeUnknown = 0
nrrdKind2DMaskedSymMatrix = 25
pullFlagScaleIsTau = 13
echoMatterLast = 5
echoMatterLight = 4
nrrdSpaceLeftAnteriorSuperior = 2
echoMatterMetal = 3
echoMatterGlass = 2
echoMatterPhong = 1
echoMatterUnknown = 0
pullInterTypeUnknown = 0
airMopAlways = 3
airTypeSize_t = 6
airMopNever = 0
tenAniso_Ct2 = 13
tenGageTensorGradRotE = 176
nrrdSpaceUnknown = 0
miteStageOpLast = 5
nrrdMeasureSD = 15
miteStageOpAdd = 3
miteStageOpMax = 2
tenGageEvalGrads = 72
miteStageOpMin = 1
pullConstraintFailHessZeroB = 2
mossFlagKernel = 1
limnSplineInfoNormal = 4
airNoDio_disable = 12
airNoDio_test = 11
airNoDio_fpos = 9
hooverErrSample = 6
hooverErrRayBegin = 5
nrrdKindRGBAColor = 18
airNoDio_fd = 4
hooverErrInit = 1
limnSplineInfo2Vector = 2
tenDwiGage2TensorPeledAndError = 34
tenAniso_Cs2 = 12
tenDwiGage2TensorPeled = 32
tenDwiGage2TensorQSegAndError = 31
tenDwiGage2TensorQSeg = 29
tenDwiGageTensorAllDWIError = 28
tenFiberTypeLast = 7
tenDwiGageFA = 27
tenDwiGageTensorError = 23
tenGageOmegaLaplacian = 143
tenDwiGageTensorMLELikelihood = 21
nrrdKindHSVColor = 15
tenDwiGageTensorMLEErrorLog = 20
pullInfoInside = 4
nrrdField_dimension = 6
tenDwiGageTensorMLE = 18
airFP_NEG_ZERO = 10
airFP_POS_ZERO = 9
tenDwiGageTensorNLS = 14
airFP_NEG_NORM = 6
airFP_POS_NORM = 5
airFP_NEG_INF = 4
airFP_POS_INF = 3
airFP_QNAN = 2
tenGageFA2ndDD = 112
airFP_SNAN = 1
limnPrimitiveTriangles = 2
airFP_Unknown = 0
tenDwiGageTensorLLS = 6
tenDwiGageMeanDWIValue = 5
tenDwiGageADC = 4
seekTypeValleyLine = 5
tenDwiGageJustDWI = 3
nrrdKind2Vector = 12
tenDwiGageB0 = 2
airNoDio_ptr = 8
airMopOnOkay = 2
baneMeasrFlowlineCurv = 8
tenGageCa1HessianEval2 = 200
tenGageOmegaHessianEvec = 139
baneMeasrTotalCurv = 7
baneMeasr2ndDD = 6
baneMeasrLaplacian = 5
baneMeasrGradMag = 4
baneMeasrValueAnywhere = 3
baneMeasrValueZeroCentered = 2
tenGageOmegaHessianEval2 = 138
baneMeasrValuePositive = 1
echoTypeAABBox = 8
baneMeasrUnknown = 0
nrrdField_block_size = 5
nrrdBinaryOpIf = 21
alanParmTextureType = 2
nrrdSpaceLeftPosteriorSuperior = 3
gageErrLast = 7
nrrdBinaryOpExists = 20
gageErrStackUnused = 6
limnPrimitiveNoop = 1
nrrdBinaryOpNotEqual = 19
tenAniso_Ca2 = 10
tenGageCl1Hessian = 178
tenGageCp1 = 74
nrrdFormatTypePNM = 2
tenFiberTypePureLine = 5
limnCameraPathTrackLast = 4
gageSclHessRidgeness = 34
tenGlyphTypeSuperquad = 4
nrrdField_type = 4
gageSclGeomTens = 20
tenGlyphTypeBox = 1
tenGlyphTypeUnknown = 0
nrrdBasicInfoBlocksize = 3
miteStageOpMultiply = 4
nrrdBinaryOpLTE = 14
tenAniso_Cp2 = 9
tenGageEvalHessian = 177
airNoDio_size = 7
alanTextureTypeLast = 3
alanTextureTypeTuring = 1
tenFiberParmUseIndexSpace = 2
limnPrimitiveUnknown = 0
airTypeLast = 13
airTypeEnum = 11
tenAnisoUnknown = 0
airTypeChar = 9
airTypeDouble = 8
gageSclHessValleyness = 33
pushEnergyTypeSpring = 1
airTypeFloat = 7
pushEnergyTypeUnknown = 0
echoTypeIsosurface = 7
airTypeULongInt = 5
echoMatterMetalR0 = 0
airTypeLongInt = 4
airTypeBool = 1
tenGageModeHessianEval0 = 126
tenFiberParmVerbose = 4
tenGageModeHessianEval = 125
tenFiberParmWPunct = 3
tenAniso_Cl2 = 8
airNoDio_small = 6
mossFlagLast = 2
tenFiberParmUnknown = 0
tenFiberParmStepSize = 1
pushEnergyTypeZero = 5
nrrdOriginStatusOkay = 4
mossFlagImage = 0
nrrdOriginStatusDirection = 1
nrrdOriginStatusUnknown = 0
nrrdSpacingStatusLast = 5
nrrdFFTWPlanRigorLast = 5
nrrdSpacingStatusDirection = 4
tenGageDelNormR2 = 26
nrrdSpacingStatusScalarWithSpace = 3
nrrdSpacingStatusScalarNoSpace = 2
nrrdBinaryOpPow = 5
nrrdSpacingStatusNone = 1
nrrdBasicInfoLast = 16
nrrdBinaryOpDivide = 4
tenAniso_Ct1 = 7
tenDwiFiberType1Evec0 = 1
pullCountLast = 15
tenGageCa1Normal = 175
airNoDio_dioinfo = 5
nrrdBasicInfoOldMin = 12
nrrdBasicInfoMeasurementFrame = 11
nrrdBasicInfoSpaceOrigin = 10
nrrdBinaryOpMultiply = 3
nrrdBasicInfoSpaceDimension = 8
nrrdBasicInfoSpace = 7
hooverErrRenderEnd = 10
nrrdBinaryOpSubtract = 2
nrrdBasicInfoType = 2
nrrdBasicInfoData = 1
tenGageDelNormR1 = 25
nrrdBasicInfoUnknown = 0
pullInfoIsovalueGradient = 20
hooverErrRayEnd = 7
tenAniso_Cs1 = 6
pullCountIteration = 14
limnSplineTypeLast = 6
nrrdKindQuaternion = 23
limnSplineTypeCubicBezier = 4
limnSpaceDevice = 4
limnSplineTypeHermite = 3
limnSpaceLast = 5
limnSplineTypeLinear = 1
limnSplineTypeUnknown = 0
nrrdTypeUInt = 6
limnSpaceView = 2
alanStopConverged = 4
hooverErrThreadBegin = 4
pullInfoIsovalue = 19
alanStopMaxIteration = 2
tenGageFARidgeSurfaceAlignment = 111
limnSpaceWorld = 1
limnSpaceUnknown = 0
airNoDio_std = 3
tenDwiGageLast = 36
tenDwiGage2TensorPeledLevmarInfo = 35
alanParmWrapAround = 21
alanParmConstantFilename = 20
alanParmAlpha = 18
tenGageFALaplacian = 108
pullCountPointsStuck = 11
alanParmReact = 13
airNoDio_okay = 0
nrrdCenterLast = 3
alanParmDiffA = 11
alanParmDeltaX = 10
tenGageCl1GradVec = 167
airTypeInt = 2
alanParmDeltaT = 9
pullCountNixing = 10
alanParmRandRange = 8
alanParmSaveInterval = 6
dyeSpaceLAB = 5
alanParmFrameInterval = 4
tenDwiGageTensorLLSErrorLog = 8
limnEdgeTypeLast = 8
airNoDio_format = 2
dyeSpaceXYZ = 4
pullCountConstraintSatisfy = 8
tenDwiGage2TensorQSegError = 30
nrrdMeasureL4 = 10
pullConstraintFailProjGradZeroB = 4
nrrdCenterCell = 2
limnEdgeTypeFrontCrease = 4
dyeSpaceLast = 7
nrrdIoStateLast = 10
nrrdIoStateBzip2BlockSize = 9
nrrdIoStateZlibStrategy = 8
limnEdgeTypeContour = 3
nrrdIoStateZlibLevel = 7
airNoDio_arch = 1
tenGageCa1GradMag = 174
nrrdIoStateKeepNrrdDataFileOpen = 6
nrrdIoStateSkipData = 5
nrrdIoStateValsPerLine = 4
nrrdIoStateCharsPerLine = 3
nrrdIoStateBareText = 2
pullCountEnergyFromImage = 3
nrrdIoStateDetachedHeader = 1
tenGageFAHessianEval0 = 98
limnEdgeTypeBackFacet = 1
pullCountDescent = 1
nrrdBasicInfoDimension = 4
nrrdCenterNode = 1
pullInfoTangent2 = 16
tenGageQHessian = 95
tenAniso_Cl1 = 2
tenGageCl1HessianEvec2 = 186
tenGageCa1HessianEvec = 201
tenDwiGageTensorMLEError = 19
limnSplineInfo3Vector = 3
nrrdCenterUnknown = 0
nrrdEncodingTypeAscii = 2
pullCountUnknown = 0
tenGageCovariance = 160
tenDwiGageTensorNLSLikelihood = 17
tenTripleTypeMoment = 2
gageSigmaSamplingUniformSigma = 1
tenDwiGageTensorNLSError = 15
tenInterpTypeLast = 12
limnDeviceGL = 2
airFP_POS_DENORM = 7
tenInterpTypeRThetaPhiLinear = 11
pullIterParmMax = 2
echoTypeLast = 12
echoTypeInstance = 11
tenDwiGageTensorWLSErrorLog = 12
echoTypeList = 10
echoTypeSplit = 9
pullInfoInsideGradient = 5
echoTypeTriMesh = 6
echoTypeRectangle = 5
ell_cubic_root_last = 5
echoTypeTriangle = 4
echoTypeCube = 3
tenInterpTypeLoxR = 8
echoTypeCylinder = 1
echoTypeSphere = 0
echoTypeUnknown = -1
ell_cubic_root_three = 4
pullFlagPermuteOnRebin = 1
tenInterpTypeLoxK = 7
echoTypeSuperquad = 2
ell_cubic_root_single_double = 3
pullConstraintFailUnknown = 0
ell_cubic_root_triple = 2
ell_cubic_root_single = 1
tenDwiGageTensorLLSError = 7
echoJitterRandom = 3
pullCountProbe = 7
echoJitterJitter = 2
echoJitterGrid = 1
ell_cubic_root_unknown = 0
echoJitterNone = 0
echoJitterUnknown = -1
tenInterpTypeLogLinear = 2
limnDeviceUnknown = 0
limnSplineInfoUnknown = 0
gageKernelLast = 8
tenGageEval1 = 17
pullInfoLiveThresh = 12
tenInterpTypeUnknown = 0
nrrdBinaryOpLast = 24
coilMethodTypeLast = 9
tenDwiGageUnknown = 0
tenGageFAHessianFrob = 105
gageSigmaSamplingOptimal3DL2L2 = 3
tenFiberIntgLast = 4
tenTripleTypeRThetaZ = 4
tenGageTensorQuatGeoLoxK = 164
echoMatterGlassFuzzy = 3
echoMatterGlassKa = 1
gageKernel11 = 3
pullEnergyTypeButterworthParabola = 13
gageKernel10 = 2
tenFiberStopLast = 11
miteValGTdotV = 16
tenFiberStopMinNumSteps = 10
tenFiberStopMinLength = 9
gageKernel00 = 1
tenFiberStopStub = 8
tenFiberStopFraction = 7
tenFiberStopRadius = 5
miteValVrefN = 15
tenFiberStopConfidence = 4
tenFiberStopNumSteps = 3
baneIncUnknown = 0
nrrdAxisInfoLast = 11
nrrdAxisInfoUnits = 10
nrrdAxisInfoLabel = 9
nrrdFormatTypeLast = 7
nrrdAxisInfoKind = 8
miteValNdotL = 14
nrrdAxisInfoCenter = 7
nrrdAxisInfoSpaceDirection = 6
tenGageFAGradMag = 52
nrrdAxisInfoMax = 5
nrrdAxisInfoMin = 4
nrrdAxisInfoThickness = 3
pullInfoSeedPreThresh = 10
nrrdAxisInfoSpacing = 2
miteValNdotV = 13
nrrdAxisInfoSize = 1
nrrdAxisInfoUnknown = 0
tenGageFAGradVec = 51
tenGageTraceHessianEvec2 = 90
gagePvlFlagQuery = 2
limnDeviceLast = 3
limnDevicePS = 1
miteRangeSP = 8
gagePvlFlagUnknown = 0
tenFiberIntgMidpoint = 2
miteRangeKs = 7
miteValTw = 9
nrrdMeasureMean = 3
gageCtxFlagLast = 7
miteRangeKd = 6
miteValRi = 8
tenFiberStopAniso = 1
tenGageFAHessian = 96
tenFiberStopUnknown = 0
tenGageCl1HessianEvec = 183
tenFiberTypeZhukov = 6
tenGageSGradVec = 45
tenFiberTypeTensorLine = 4
tenGageTensorLogEuclidean = 163
tenFiberTypeEvec2 = 3
tenFiberTypeEvec1 = 2
tenFiberTypeEvec0 = 1
tenFiberStopBounds = 6
miteValZw = 5
gageCtxFlagNeedK = 3
miteValYi = 4
gageCtxFlagK3Pack = 2
nrrdZlibStrategyLast = 4
tenGageBNormal = 41
nrrdZlibStrategyHuffman = 2
nrrdZlibStrategyDefault = 1
nrrdZlibStrategyUnknown = 0
limnPolyDataInfoLast = 5
gageCtxFlagUnknown = 0
limnPolyDataInfoTang = 4
limnPolyDataInfoTex2 = 3
limnPolyDataInfoNorm = 2
miteValXw = 1
limnPolyDataInfoUnknown = 0
tenEstimate1MethodLast = 5
tenEstimate1MethodNLS = 3
tenEstimate1MethodWLS = 2
tenEstimate1MethodLLS = 1
tenEstimate1MethodUnknown = 0
tenGageNormNormal = 38
gageSclHessEvec2 = 18
tenFiberIntgUnknown = 0
gageItemPackPartHessEvec2 = 11
tenAnisoLast = 30
tenAniso_eval2 = 29
tenAniso_eval0 = 27
nrrdEncodingTypeHex = 3
tenAniso_Omega = 24
tenAniso_Th = 23
tenGageNormGradVec = 36
tenAniso_Mode = 22
tenGageSHessian = 94
tenAniso_Skew = 21
seekTypeUnknown = 0
tenAniso_S = 20
nrrdBinaryOpRicianRand = 23
tenGageTraceNormal = 35
tenGageCovarianceRGRT = 161
tenAniso_FA = 15
tenAniso_RA = 14
gageParmStackNormalizeRecon = 12
gageErrStackSearch = 5
tenGageAniso = 207
gageParmStackNormalizeDerivBias = 11
tenAniso_Clpmin1 = 5
gageParmStackNormalizeDeriv = 10
tenGageCa1HessianEval0 = 198
pullInfoHeight = 6
tenGageTensorGradMag = 31
miteStageOpUnknown = 0
pullEnergyTypeZero = 12
gageParmDefaultCenter = 8
gagePvlFlagVolume = 1
nrrdSpace3DLeftHandedTime = 12
tenGageRNormal = 56
tenGageCl1HessianEvec0 = 184
miteValZi = 6
tenGageCl1HessianEval2 = 182
tenGageCl1HessianEval1 = 181
gageErrStackIntegral = 4
limnQN11octa = 11
gageParmCurvNormalSide = 6
tenGageCa1GradVec = 173
tenGageCp1Normal = 172
gageParmGradMagCurvMin = 5
tenGageCl1Normal = 169
tenGageR = 10
nrrdEncodingTypeRaw = 1
limnPrimitiveLast = 8
nrrdTypeBlock = 11
limnPrimitiveLineStrip = 6
gageParmK3Pack = 4
limnPrimitiveQuads = 5
limnPrimitiveTriangleFan = 4
limnPrimitiveTriangleStrip = 3
tenEstimate2MethodQSegLLS = 1
nrrdKindNormal = 8
gageParmCheckIntegrals = 3
tenGageConfDiffusionFraction = 159
tenGageRGradMag = 55
tenGageOmegaDiffusionFraction = 156
tenGageOmegaDiffusionAlign = 155
tenGageFADiffusionFraction = 153
gageParmRenormalize = 2
tenGageFADiffusionAlign = 152
tenGageFAGradVecDotEvec0 = 151
tenGageTraceDiffusionFraction = 150
tenGageTraceDiffusionAlign = 149
tenGageTraceGradVecDotEvec0 = 148
tenGageOmegaHessianContrTenEvec2 = 147
gageParmVerbose = 1
tenGageOmegaHessianContrTenEvec1 = 146
tenGageEvec2 = 22
tenAniso_R = 19
nrrdEncodingTypeUnknown = 0
tenGageOmegaHessianEval0 = 136
tenGageEvec1 = 21
tenGageModeHessianFrob = 133
tenGageModeHessianEvec2 = 132
nrrdTypeInt = 5
tenGageModeHessianEvec0 = 130
tenGageModeHessianEvec = 129
tenGageModeHessianEval2 = 128
tenGageConfDiffusionAlign = 158
tenGageRGradVec = 54
tenGageModeHessian = 124
pullInitMethodPointPerVoxel = 3
tenGageFAFlowlineCurv = 122
tenGageFACurvDir2 = 121
tenGageCl1HessianEvec1 = 185
tenGageFACurvDir1 = 120
gageErrBoundsStack = 3
tenGageFAGaussCurv = 119
nrrdTypeShort = 3
tenGageFAMeanCurv = 118
tenGageFAShapeIndex = 117
tenGageEval2 = 18
gageSclHessEval2 = 14
tenGageFATotalCurv = 116
tenGageFAKappa2 = 115
tenGlyphTypeCylinder = 3
tenGageFAKappa1 = 114
tenGageFAGeomTens = 113
nrrdTypeUChar = 2
tenGageModeGradVec = 57
unrrduScaleSubtract = 5
tenGageFARidgeLineAlignment = 110
tenGageFAHessianEvalMode = 109
tenGageQ = 8
tenGageFAValleySurfaceStrength = 107
tenGageFARidgeSurfaceStrength = 106
mossFlagUnknown = -1
pullInitMethodUnknown = 0
tenGageFAHessianEvec2 = 104
tenGageFAHessianEvec0 = 102
tenGageFAHessianEval2 = 100
tenGageFAHessianEval1 = 99
tenGageEval = 15
tenTripleTypeJ = 6
tenGageFAHessianEval = 97
nrrdEncodingTypeLast = 6
nrrdEncodingTypeBzip2 = 5
nrrdEncodingTypeGzip = 4
unrrduScaleMultiply = 2
gageErrBoundsSpace = 2
unrrduScaleNothing = 1
tenInterpTypeQuatGeoLoxR = 10
pullSourceLast = 3
airInsane_AIR_NAN = 7
tenInterpTypeGeoLoxK = 5
tenGageTheta = 12
tenInterpTypeAffineInvariant = 3
tenGageCl1HessianEval0 = 180
nrrdTernaryOpGTSmooth = 8
tenGageMode = 11
miteValWdotD = 19
miteValVdefTdotV = 18
airInsane_FltDblFPClass = 5
nrrdKindPoint = 5
gageCtxFlagRadius = 5
miteValView = 11
miteValTi = 10
tenGageFA = 9
miteValRw = 7
gageErrNone = 1
coilKindTypeLast = 4
pullTraceStopConstrFail = 2
miteValYw = 3
miteValXi = 2
nrrdField_comment = 1
pullTraceStopSpeeding = 1
airInsane_endian = 1
hooverErrLast = 11
tenEstimate2MethodPeled = 2
nrrdTypeDouble = 10
nrrdKindList = 4
tenEstimate2MethodUnknown = 0
pullEnergyTypeHepticWell = 11
gageCtxFlagKernel = 4
nrrdKind2DSymMatrix = 24
nrrdTypeUShort = 4
nrrdTypeChar = 1
nrrdTypeDefault = 0
gageErrUnknown = 0
dyeSpaceHSL = 2
airInsane_dio = 8
coilKindType7Tensor = 3
gageSclHessEval = 11
airInsane_NaNExists = 4
airInsane_nInfExists = 3
tenEstimate2MethodLast = 3
airInsane_pInfExists = 2
tenGageConfidence = 2
airInsane_not = 0
pullInfoUnknown = 0
nrrdBinaryOpMod = 8
limnQN8octa = 16
limnQN8checker = 15
limnQN9octa = 14
limnQN10octa = 13
alanTextureTypeGrayScott = 2
limnQN12octa = 10
dyeSpaceLUV = 6
limnQN15octa = 5
limnSplineTypeBC = 5
limnQN16checker = 3
limnQN16border1 = 2
hooverErrThreadJoin = 9
limnQN16simple = 1
limnQNUnknown = 0
alanStopLast = 6
pullEnergyTypeCotan = 5
echoMatterGlassKd = 2
baneClipLast = 5
baneClipAbsolute = 1
airTypeOther = 12
seekTypeLast = 12
nrrdBinaryOpGT = 15
seekTypeValleySurfaceT = 11
seekTypeRidgeSurfaceT = 9
hooverErrThreadEnd = 8
nrrdBoundaryWeight = 4
nrrdBoundaryWrap = 3
tenTripleTypeLast = 10
seekTypeMinimalSurface = 6
alanStopDiverged = 5
nrrdBoundaryUnknown = 0
seekTypeRidgeSurface = 2
seekTypeIsocontour = 1
airFP_NEG_DENORM = 8
baneRangeLast = 5
baneRangeAnywhere = 4
tenAniso_Ca1 = 4
baneRangeZeroCentered = 3
baneRangePositive = 1
baneRangeUnknown = 0
gageItemPackPartLast = 12
limnQNLast = 17
gageItemPackPartHessEvec1 = 10
gageItemPackPartHessEval1 = 7
gageItemPackPartHessEval0 = 6
gageItemPackPartHessian = 5
pullSysParmSeparableGammaLearnRescale = 4
gageItemPackPartNormal = 4
nrrdKindUnknown = 0
miteShadeMethodLast = 4
gageItemPackPartGradVec = 2
gageItemPackPartScalar = 1
miteShadeMethodNone = 1
miteShadeMethodUnknown = 0
pullSysParmGamma = 3
gageVecMGEvec = 31
tijk_class_tensor = 1
gageVecMGEval = 30
gageVecMGFrob = 29
gageVecMultiGrad = 28
gageVecGradient0 = 25
gageVecProjHelGradient = 24
gageVecDirHelDeriv = 23
gageVecHelGradient = 22
gageVecNCurlNormGrad = 21
gageVecDivGradient = 18
airTypeUnknown = 0
gageVecHessian = 17
limnSplineTypeTimeWarp = 2
gageVecImaginaryPart = 16
gageVecSOmega = 14
gageVecNormHelicity = 13
gageVecHelicity = 12
pullProcessModeLast = 5
gageVecCurl = 10
gageVecDivergence = 9
pullProcessModeNeighLearn = 2
pullProcessModeDescent = 1
pullProcessModeUnknown = 0
nrrdBoundaryLast = 6
gageVecVector2 = 4
gageVecVector0 = 2
gageVecUnknown = 0
gageSclLast = 37
gageSclHessMode = 36
gageSclHessDotPeakness = 35
gageSclNPerp = 6
gageSclMedian = 32
limnQN13octa = 8
gageSclCurvDir2 = 30
gageSclGaussCurv = 28
gageSclShapeIndex = 26
echoMatterMetalFuzzy = 3
limnQN14octa = 7
gageSclTotalCurv = 24
gageSclK2 = 23
gageSclK1 = 22
gageSclGeomTensTen = 21
gageScl2ndDD = 19
limnQN14checker = 6
nrrdTypeLast = 12
gageSclHessEvec1 = 17
gageSclHessEvec0 = 16
tenFiberParmLast = 5
nrrdMeasureHistoProduct = 26
nrrdBoundaryMirror = 5
nrrdMeasureHistoMode = 25
nrrdMeasureHistoMedian = 24
nrrdMeasureHistoMean = 23
tenDwiFiberType2Evec0 = 2
nrrdMeasureHistoMax = 22
nrrdMeasureHistoMin = 21
gageSclHessianTen = 8
gageSclHessian = 7
limnQN16octa = 4
nrrdMeasureLineSlope = 18
nrrdMeasureSkew = 17
gageSclNormal = 4
gageSclGradMag = 3
nrrdKind3DMaskedSymMatrix = 29
gageSclGradVec = 2
gageSclValue = 1
nrrdMeasureRootMeanSquare = 12
nrrdMeasureNormalizedL2 = 11
nrrdBinaryOpMin = 11
nrrdMeasureL1 = 8
tenGageCl1 = 73
nrrdMeasureSum = 7
nrrdMeasureProduct = 6
nrrdMeasureMedian = 4
nrrdMeasureMax = 2
nrrdMeasureMin = 1
nrrdMeasureUnknown = 0
tenFiberStopLength = 2
limnQN10checker = 12
pullInterTypeJustR = 1
pullStatusLast = 5
pullStatusEdge = 4
pullStatusNixMe = 3
tenGageEvec0 = 20
pullStatusStuck = 1
pullStatusUnknown = 0
echoJitterLast = 4
nrrdSpaceRightAnteriorSuperior = 1
nrrdBinaryOpAtan2 = 10
tenGageCa1Hessian = 196
miteValUnknown = 0
pullCondNew = 7
tenAniso_Cp1 = 3
nrrdKind2DMaskedMatrix = 27
tenDwiGage2TensorPeledError = 33
alanTextureTypeUnknown = 0
baneClipTopN = 4
hooverErrThreadCreate = 3
pullInfoStrength = 22
baneClipPercentile = 3
pullInfoTensor = 1
nrrdBoundaryBleed = 2
baneClipPeakRatio = 2
pullInfoHessian = 3
nrrdKind2DMatrix = 26
pullIterParmLast = 10
baneClipUnknown = 0
pullIterParmEnergyIncreasePermitHalfLife = 9
pullIterParmSnap = 8
pullIterParmCallback = 7
pullIterParmAddDescent = 6
nrrdOriginStatusNoMaxOrSpacing = 3
pullIterParmPopCntlPeriod = 5
pullIterParmConstraintMax = 4
pullIterParmStuckMax = 3
hooverErrRenderBegin = 2
pullIterParmMin = 1
nrrdOriginStatusNoMin = 2
tenTripleTypeWheelParm = 9
tenTripleTypeR = 8
tenTripleTypeK = 7
tenTripleTypeRThetaPhi = 5
tenTripleTypeXYZ = 3
tenGlyphTypeLast = 7
tenTripleTypeEigenvalue = 1
tenTripleTypeUnknown = 0
dyeSpaceRGB = 3
miteRangeLast = 9
nrrdSpace3DRightHandedTime = 11
miteRangeKa = 5
nrrdBinaryOpFlippedSgnPow = 7
miteRangeEmissivity = 4
miteRangeBlue = 3
miteRangeGreen = 2
miteRangeRed = 1
miteRangeAlpha = 0
miteRangeUnknown = -1
baneIncLast = 5
baneIncStdv = 4
tenGageOmegaHessianContrTenEvec0 = 145
airFP_Last = 11
tenGlyphTypePolarPlot = 6
nrrdKindCovariantVector = 7
limnPrimitiveLines = 7
gageSclUnknown = 0
nrrdSpacingStatusUnknown = 0
nrrdKindVector = 6
tenAniso_VF = 16
nrrdBinaryOpSgnPow = 6
hooverErrNone = 0
pullPropNeighCovarDet = 16
tenGageOmega2ndDD = 144
tenDwiFiberTypeLast = 4
nrrdKindTime = 3
tenDwiFiberType12BlendEvec0 = 3
tenGlyphTypeBetterquad = 5
nrrdKindSpace = 2
tenGageEvec = 19
dyeSpaceHSV = 1
nrrdKindDomain = 1
miteValLast = 20
tenDwiFiberTypeUnknown = 0
airEndianUnknown = 0
seekTypeRidgeSurfaceOP = 8
pullPropNeighCovarTrace = 15
gageParmStackUse = 9
tenGageFAHessianEvec = 101
seekTypeMaximalSurface = 7
pullCondEnergyBad = 6
airInsane_DLSize = 11
dyeSpaceUnknown = 0
pullInfoLast = 24
tenInterpTypeQuatGeoLoxK = 9
nrrdBoundaryPad = 1
nrrdBasicInfoSpaceUnits = 9
seekTypeRidgeLine = 4
pullSourceUnknown = 0
tenGageFAHessianEvec1 = 103
seekTypeValleySurface = 3
pullPropNeighInterNum = 14
tenGageOmegaHessianEvec2 = 142
limnCameraPathTrackBoth = 3
tenGageCl1GradMag = 168
airInsane_FISize = 10
nrrdBasicInfoSampleUnits = 6
tenGageOmegaHessian = 134
nrrdBasicInfoContent = 5
pullInfoQuality = 23
nrrdResampleNonExistentRenormalize = 2
tenAniso_eval1 = 28
pullFlagLast = 16
nrrdSpace3DRightHanded = 9
nrrdResampleNonExistentUnknown = 0
tenGageOmegaHessianEvec1 = 141
gageParmKernelIntegralNearZero = 7
alanParmMaxIteration = 7
tenEstimate1MethodMLE = 4
airInsane_UCSize = 9
tenGageLast = 208
tenGlyphTypeSphere = 2
coilMethodTypeTesting = 1
nrrdSpaceScannerXYZ = 7
nrrdKind3Gradient = 20
nrrdFormatTypeEPS = 6
tenDwiGageConfidence = 26
nrrdSpaceLeftPosteriorSuperiorTime = 6
miteValVdefT = 17
echoMatterMetalKa = 1
airTypeUInt = 3
nrrdFFTWPlanRigorPatient = 3
nrrdFFTWPlanRigorMeasure = 2
tenGageOmegaHessianEvec0 = 140
nrrdHasNonExistLast = 4
nrrdHasNonExistOnly = 2
nrrdFFTWPlanRigorEstimate = 1
nrrdHasNonExistTrue = 1
nrrdHasNonExistFalse = 0
coilMethodTypeUnknown = 0
tenDwiGageTensorLikelihood = 25
nrrdSpaceLeftAnteriorSuperiorTime = 5
nrrdTypeLLong = 7
pullConstraintFailProjGradZeroA = 3
tenGageRHessian = 123
nrrdField_last = 33
limnEdgeTypeLone = 7
nrrdField_data_file = 32
nrrdField_measurement_frame = 31
baneRangeNegative = 2
nrrdField_space_origin = 30
nrrdField_space_units = 29
nrrdField_sample_units = 28
nrrdField_keyvalue = 27
nrrdField_byte_skip = 26
nrrdField_line_skip = 25
nrrdField_encoding = 24
nrrdField_endian = 23
nrrdField_old_max = 22
nrrdField_old_min = 21
nrrdField_max = 20
tenFiberTypeUnknown = 0
tenGageDetHessian = 93
nrrdMeasureLineError = 20
tenGageBHessian = 92
nrrdField_centers = 15
airNoDio_setfl = 10
tenDwiGageTensorErrorLog = 24
pullInitMethodHalton = 2
nrrdSpaceRightAnteriorSuperiorTime = 4
nrrdField_axis_maxs = 13
nrrdField_axis_mins = 12
nrrdField_thicknesses = 11
tenGageTraceHessianEval2 = 86
nrrdUnaryOpRoundUp = 21
nrrdField_sizes = 9
tenGageTraceHessianEval0 = 84
nrrdField_space = 7
nrrdTernaryOpExists = 12
coilKindType3Color = 2
nrrdField_number = 3
nrrdOriginStatusLast = 5
tenGageCp2 = 78
tenGageCl2 = 77
tenDwiGageAll = 1
nrrdField_unknown = 0
nrrdTernaryOpLerp = 11
tenGageCa1 = 75
baneMeasrLast = 9
tenGageRotTanMags = 71
tenGageInvarRGradMags = 69
tenGageFiberCurving = 205
tenGageInvarKGradMags = 67
tenGageOmegaGradMag = 64
nrrdTernaryOpClamp = 9
echoMatterLightPower = 0
tenGageThetaNormal = 62
tenInterpTypeWang = 4
tenGageThetaGradVec = 60
tenGageModeNormal = 59
echoMatterPhongSp = 3
hestSourceUnknown = 0
echoMatterPhongKs = 2
echoMatterPhongKd = 1
nrrdKind3DMaskedMatrix = 31
nrrdKind3DMatrix = 30
nrrdKind3DSymMatrix = 28
limnEdgeTypeFrontFacet = 5
pullInterTypeAdditive = 4
pullInterTypeSeparable = 3
gageItemPackPartHessEvec0 = 9
pullInterTypeUnivariate = 2
nrrdTernaryOpMaxSmooth = 6
nrrdKind4Vector = 22
nrrdKind3Normal = 21
gageItemPackPartHessEval2 = 8
nrrdKind3Vector = 19
nrrdTernaryOpMax = 5
airInsane_QNaNHiBit = 6
tenGageCa1HessianEvec2 = 204
tenGageCp1GradMag = 171
nrrdKindRGBColor = 14
nrrdKind3Color = 13
nrrdKindScalar = 10
nrrdKindStub = 9
tenDwiGageTensor = 22
nrrdTypeUnknown = 0
nrrdTernaryOpMin = 3
gageVecCurlNormGrad = 20
nrrdUnaryOpCeil = 19
tenFiberIntgRK4 = 3
nrrdSpaceScannerXYZTime = 8
tenFiberIntgEuler = 1
gageItemPackPartGradMag = 3
nrrdTernaryOpUnknown = 0
limnSpaceScreen = 3
miteShadeMethodLitTen = 3
pullCountCC = 13
pullCountPoints = 12
miteShadeMethodPhong = 2
pullCountAdding = 9
pullCountForceFromPoints = 6
pullCountEnergyFromPoints = 5
pullCountForceFromImage = 4
miteValNormal = 12
gageItemPackPartUnknown = 0
limnEdgeTypeBackCrease = 2
pullCountTestStep = 2
limnEdgeTypeUnknown = 0
pullPropPosition = 7
tenGageOmegaHessianEval = 135
gageVecLast = 32
tenGageCa1HessianEvec0 = 202
airMopOnError = 1
nrrdZlibStrategyFiltered = 3
tenInterpTypeLinear = 1
gageKernelStack = 7
gageKernel22 = 6
gageKernel21 = 5
gageKernel20 = 4
baneIncPercentile = 3
baneIncRangeRatio = 2
baneIncAbsolute = 1
gageKernelUnknown = 0
nrrdUnaryOpNormalRand = 27
gagePvlFlagLast = 4
gagePvlFlagNeedD = 3
tenGageQNormal = 50
tenGageQGradMag = 49
tenGageQGradVec = 48
tenGageSNormal = 47
gageCtxFlagShape = 6
limnQN12checker = 9
gageVecGradient2 = 27
tenGageDetGradVec = 42
gageCtxFlagNeedD = 1
nrrdUnaryOpExists = 25
tenGageBGradVec = 39
gageParmLast = 16
gageParmTwoDimZeroZ = 15
gageVecGradient1 = 26
gageParmGenerateErrStr = 14
gageParmOrientationFromSpacing = 13
tenGageTraceGradMag = 34
tenGageTraceGradVec = 33
tenGageTensorGradMagMag = 32
tenGageTensorGrad = 30
tenGageDelNormPhi3 = 29
nrrdUnaryOpAbs = 23
tenGageDelNormPhi2 = 28
tenGageDelNormPhi1 = 27
tenGageDelNormK3 = 24
tenGageDelNormK2 = 23
gageParmUnknown = 0
pullInitMethodLast = 5
alanStopNonExist = 3
pullFlagNoAdd = 8
unrrduScaleLast = 8
unrrduScaleExact = 7
unrrduScaleAspectRatio = 6
pullInitMethodRandom = 1
tenGageEval0 = 16
tenGageOmega = 14
tenGageModeWarp = 13
pullTraceStopLast = 6
pullTraceStopStub = 5
pullTraceStopLength = 4
pullTraceStopBounds = 3
alanStopNot = 1
tenGageS = 7
tenGageDet = 6
tenGageB = 5
tenGageNorm = 4
tenGageTrace = 3
alanStopUnknown = 0
tenGageTensor = 1
tenGageUnknown = 0
nrrdUnaryOpErf = 17
gageVecCurlGradient = 19
echoMatterPhongKa = 0
nrrdKindComplex = 11
gageVecLambda2 = 15
nrrdHasNonExistUnknown = 3
echoJittableLast = 7
echoJittableMotionB = 6
tenGageModeHessianEvec1 = 131
echoJittableMotionA = 5
echoJittableNormalB = 4
echoJittableNormalA = 3
echoJittableLens = 2
echoJittableLight = 1
echoJittablePixel = 0
echoJittableUnknown = -1
tenDwiGageTensorNLSErrorLog = 16
limnPolyDataInfoRGBA = 1
pullSourceProp = 2
pullSourceGage = 1
pullProcessModeNixing = 4
nrrdResampleNonExistentLast = 4
nrrdResampleNonExistentWeight = 3
nrrdResampleNonExistentNoop = 1
pullProcessModeAdding = 3
nrrdFFTWPlanRigorExhaustive = 4
nrrdFFTWPlanRigorUnknown = 0
gageVecStrain = 8
nrrdTernaryOpLast = 17
nrrdTernaryOpRician = 16
nrrdTernaryOpGaussian = 15
nrrdTernaryOpInClosed = 14
nrrdTernaryOpInOpen = 13
gageVecJacobian = 7
nrrdTernaryOpIfElse = 10
hestSourceDefault = 1
nrrdTernaryOpLTSmooth = 7
gageVecNormalized = 6
nrrdTernaryOpMinSmooth = 4
nrrdTernaryOpMultiply = 2
nrrdTernaryOpAdd = 1
gageVecLength = 5
tenAniso_Det = 25
nrrdUnaryOpLast = 33
nrrdUnaryOpSigmaOfTau = 32
alanParmLast = 22
nrrdUnaryOpTauOfSigma = 31
nrrdUnaryOpOne = 30
nrrdUnaryOpZero = 29
nrrdUnaryOpIf = 28
nrrdUnaryOpRand = 26
nrrdUnaryOpSgn = 24
gageVecVector1 = 3
nrrdUnaryOpFloor = 20
nrrdUnaryOpNerf = 18
nrrdUnaryOpCbrt = 16
nrrdUnaryOpSqrt = 15
nrrdUnaryOpExpm1 = 14
tenInterpTypeGeoLoxR = 6
nrrdUnaryOpLog1p = 13
nrrdUnaryOpLog10 = 12
gageVecVector = 1
nrrdUnaryOpLog2 = 11
gageSclHessFrob = 10
nrrdUnaryOpLog = 10
nrrdUnaryOpExp = 9
nrrdUnaryOpAtan = 8
nrrdUnaryOpAcos = 7
nrrdUnaryOpAsin = 6
nrrdUnaryOpTan = 5
nrrdUnaryOpCos = 4
nrrdUnaryOpSin = 3
nrrdUnaryOpReciprocal = 2
alanParmMaxPixelChange = 17
nrrdUnaryOpNegative = 1
tenGageSGradMag = 46
nrrdUnaryOpUnknown = 0
alanParmBeta = 19
airLLong = c_longlong
airULLong = c_ulonglong
class airPtrPtrUnion(Union):
pass
airPtrPtrUnion._fields_ = [
('uc', POINTER(POINTER(c_ubyte))),
('sc', POINTER(POINTER(c_byte))),
('c', POINTER(STRING)),
('cp', POINTER(POINTER(STRING))),
('us', POINTER(POINTER(c_ushort))),
('s', POINTER(POINTER(c_short))),
('ui', POINTER(POINTER(c_uint))),
('i', POINTER(POINTER(c_int))),
('f', POINTER(POINTER(c_float))),
('d', POINTER(POINTER(c_double))),
('v', POINTER(c_void_p)),
]
class airEnum(Structure):
pass
airEnum._fields_ = [
('name', STRING),
('M', c_uint),
('str', POINTER(STRING)),
('val', POINTER(c_int)),
('desc', POINTER(STRING)),
('strEqv', POINTER(STRING)),
('valEqv', POINTER(c_int)),
('sense', c_int),
]
airEnumUnknown = libteem.airEnumUnknown
airEnumUnknown.restype = c_int
airEnumUnknown.argtypes = [POINTER(airEnum)]
airEnumValCheck = libteem.airEnumValCheck
airEnumValCheck.restype = c_int
airEnumValCheck.argtypes = [POINTER(airEnum), c_int]
airEnumStr = libteem.airEnumStr
airEnumStr.restype = STRING
airEnumStr.argtypes = [POINTER(airEnum), c_int]
airEnumDesc = libteem.airEnumDesc
airEnumDesc.restype = STRING
airEnumDesc.argtypes = [POINTER(airEnum), c_int]
airEnumVal = libteem.airEnumVal
airEnumVal.restype = c_int
airEnumVal.argtypes = [POINTER(airEnum), STRING]
airEnumFmtDesc = libteem.airEnumFmtDesc
airEnumFmtDesc.restype = STRING
airEnumFmtDesc.argtypes = [POINTER(airEnum), c_int, c_int, STRING]
airEnumPrint = libteem.airEnumPrint
airEnumPrint.restype = None
airEnumPrint.argtypes = [POINTER(FILE), POINTER(airEnum)]
airEnumCheck = libteem.airEnumCheck
airEnumCheck.restype = c_int
airEnumCheck.argtypes = [STRING, POINTER(airEnum)]
airEndian = (POINTER(airEnum)).in_dll(libteem, 'airEndian')
airMyEndian = libteem.airMyEndian
airMyEndian.restype = c_int
airMyEndian.argtypes = []
class airArray(Structure):
pass
airArray._fields_ = [
('data', c_void_p),
('dataP', POINTER(c_void_p)),
('len', c_uint),
('lenP', POINTER(c_uint)),
('incr', c_uint),
('size', c_uint),
('unit', c_size_t),
('noReallocWhenSmaller', c_int),
('allocCB', CFUNCTYPE(c_void_p)),
('freeCB', CFUNCTYPE(c_void_p, c_void_p)),
('initCB', CFUNCTYPE(None, c_void_p)),
('doneCB', CFUNCTYPE(None, c_void_p)),
]
airArrayNew = libteem.airArrayNew
airArrayNew.restype = POINTER(airArray)
airArrayNew.argtypes = [POINTER(c_void_p), POINTER(c_uint), c_size_t, c_uint]
airArrayStructCB = libteem.airArrayStructCB
airArrayStructCB.restype = None
airArrayStructCB.argtypes = [POINTER(airArray), CFUNCTYPE(None, c_void_p), CFUNCTYPE(None, c_void_p)]
airArrayPointerCB = libteem.airArrayPointerCB
airArrayPointerCB.restype = None
airArrayPointerCB.argtypes = [POINTER(airArray), CFUNCTYPE(c_void_p), CFUNCTYPE(c_void_p, c_void_p)]
airArrayLenSet = libteem.airArrayLenSet
airArrayLenSet.restype = None
airArrayLenSet.argtypes = [POINTER(airArray), c_uint]
airArrayLenPreSet = libteem.airArrayLenPreSet
airArrayLenPreSet.restype = None
airArrayLenPreSet.argtypes = [POINTER(airArray), c_uint]
airArrayLenIncr = libteem.airArrayLenIncr
airArrayLenIncr.restype = c_uint
airArrayLenIncr.argtypes = [POINTER(airArray), c_int]
airArrayNix = libteem.airArrayNix
airArrayNix.restype = POINTER(airArray)
airArrayNix.argtypes = [POINTER(airArray)]
airArrayNuke = libteem.airArrayNuke
airArrayNuke.restype = POINTER(airArray)
airArrayNuke.argtypes = [POINTER(airArray)]
class airHeap(Structure):
pass
airHeap._fields_ = [
('key_a', POINTER(airArray)),
('data_a', POINTER(airArray)),
('idx_a', POINTER(airArray)),
('invidx_a', POINTER(airArray)),
('key', POINTER(c_double)),
('data', c_void_p),
('idx', POINTER(c_uint)),
('invidx', POINTER(c_uint)),
]
airHeapNew = libteem.airHeapNew
airHeapNew.restype = POINTER(airHeap)
airHeapNew.argtypes = [c_size_t, c_uint]
airHeapFromArray = libteem.airHeapFromArray
airHeapFromArray.restype = POINTER(airHeap)
airHeapFromArray.argtypes = [POINTER(airArray), POINTER(airArray)]
airHeapNix = libteem.airHeapNix
airHeapNix.restype = POINTER(airHeap)
airHeapNix.argtypes = [POINTER(airHeap)]
airHeapLength = libteem.airHeapLength
airHeapLength.restype = c_uint
airHeapLength.argtypes = [POINTER(airHeap)]
airHeapInsert = libteem.airHeapInsert
airHeapInsert.restype = c_uint
airHeapInsert.argtypes = [POINTER(airHeap), c_double, c_void_p]
airHeapMerge = libteem.airHeapMerge
airHeapMerge.restype = c_uint
airHeapMerge.argtypes = [POINTER(airHeap), POINTER(airHeap)]
airHeapFrontPeek = libteem.airHeapFrontPeek
airHeapFrontPeek.restype = c_double
airHeapFrontPeek.argtypes = [POINTER(airHeap), c_void_p]
airHeapFrontPop = libteem.airHeapFrontPop
airHeapFrontPop.restype = c_double
airHeapFrontPop.argtypes = [POINTER(airHeap), c_void_p]
airHeapFrontUpdate = libteem.airHeapFrontUpdate
airHeapFrontUpdate.restype = c_int
airHeapFrontUpdate.argtypes = [POINTER(airHeap), c_double, c_void_p]
airHeapFind = libteem.airHeapFind
airHeapFind.restype = c_int
airHeapFind.argtypes = [POINTER(airHeap), POINTER(c_uint), c_void_p]
airHeapRemove = libteem.airHeapRemove
airHeapRemove.restype = c_int
airHeapRemove.argtypes = [POINTER(airHeap), c_uint]
airHeapUpdate = libteem.airHeapUpdate
airHeapUpdate.restype = c_int
airHeapUpdate.argtypes = [POINTER(airHeap), c_uint, c_double, c_void_p]
airThreadCapable = (c_int).in_dll(libteem, 'airThreadCapable')
airThreadNoopWarning = (c_int).in_dll(libteem, 'airThreadNoopWarning')
class _airThread(Structure):
pass
airThread = _airThread
class _airThreadMutex(Structure):
pass
airThreadMutex = _airThreadMutex
class _airThreadCond(Structure):
pass
airThreadCond = _airThreadCond
class airThreadBarrier(Structure):
pass
airThreadBarrier._fields_ = [
('numUsers', c_uint),
('numDone', c_uint),
('doneMutex', POINTER(airThreadMutex)),
('doneCond', POINTER(airThreadCond)),
]
airThreadNew = libteem.airThreadNew
airThreadNew.restype = POINTER(airThread)
airThreadNew.argtypes = []
airThreadStart = libteem.airThreadStart
airThreadStart.restype = c_int
airThreadStart.argtypes = [POINTER(airThread), CFUNCTYPE(c_void_p, c_void_p), c_void_p]
airThreadJoin = libteem.airThreadJoin
airThreadJoin.restype = c_int
airThreadJoin.argtypes = [POINTER(airThread), POINTER(c_void_p)]
airThreadNix = libteem.airThreadNix
airThreadNix.restype = POINTER(airThread)
airThreadNix.argtypes = [POINTER(airThread)]
airThreadMutexNew = libteem.airThreadMutexNew
airThreadMutexNew.restype = POINTER(airThreadMutex)
airThreadMutexNew.argtypes = []
airThreadMutexLock = libteem.airThreadMutexLock
airThreadMutexLock.restype = c_int
airThreadMutexLock.argtypes = [POINTER(airThreadMutex)]
airThreadMutexUnlock = libteem.airThreadMutexUnlock
airThreadMutexUnlock.restype = c_int
airThreadMutexUnlock.argtypes = [POINTER(airThreadMutex)]
airThreadMutexNix = libteem.airThreadMutexNix
airThreadMutexNix.restype = POINTER(airThreadMutex)
airThreadMutexNix.argtypes = [POINTER(airThreadMutex)]
airThreadCondNew = libteem.airThreadCondNew
airThreadCondNew.restype = POINTER(airThreadCond)
airThreadCondNew.argtypes = []
airThreadCondWait = libteem.airThreadCondWait
airThreadCondWait.restype = c_int
airThreadCondWait.argtypes = [POINTER(airThreadCond), POINTER(airThreadMutex)]
airThreadCondSignal = libteem.airThreadCondSignal
airThreadCondSignal.restype = c_int
airThreadCondSignal.argtypes = [POINTER(airThreadCond)]
airThreadCondBroadcast = libteem.airThreadCondBroadcast
airThreadCondBroadcast.restype = c_int
airThreadCondBroadcast.argtypes = [POINTER(airThreadCond)]
airThreadCondNix = libteem.airThreadCondNix
airThreadCondNix.restype = POINTER(airThreadCond)
airThreadCondNix.argtypes = [POINTER(airThreadCond)]
airThreadBarrierNew = libteem.airThreadBarrierNew
airThreadBarrierNew.restype = POINTER(airThreadBarrier)
airThreadBarrierNew.argtypes = [c_uint]
airThreadBarrierWait = libteem.airThreadBarrierWait
airThreadBarrierWait.restype = c_int
airThreadBarrierWait.argtypes = [POINTER(airThreadBarrier)]
airThreadBarrierNix = libteem.airThreadBarrierNix
airThreadBarrierNix.restype = POINTER(airThreadBarrier)
airThreadBarrierNix.argtypes = [POINTER(airThreadBarrier)]
class airFloat(Union):
pass
airFloat._fields_ = [
('i', c_uint),
('f', c_float),
]
class airDouble(Union):
pass
airDouble._pack_ = 4
airDouble._fields_ = [
('i', airULLong),
('d', c_double),
]
airMyQNaNHiBit = (c_int).in_dll(libteem, 'airMyQNaNHiBit')
airFPPartsToVal_f = libteem.airFPPartsToVal_f
airFPPartsToVal_f.restype = c_float
airFPPartsToVal_f.argtypes = [c_uint, c_uint, c_uint]
airFPValToParts_f = libteem.airFPValToParts_f
airFPValToParts_f.restype = None
airFPValToParts_f.argtypes = [POINTER(c_uint), POINTER(c_uint), POINTER(c_uint), c_float]
airFPPartsToVal_d = libteem.airFPPartsToVal_d
airFPPartsToVal_d.restype = c_double
airFPPartsToVal_d.argtypes = [c_uint, c_uint, c_uint, c_uint]
airFPValToParts_d = libteem.airFPValToParts_d
airFPValToParts_d.restype = None
airFPValToParts_d.argtypes = [POINTER(c_uint), POINTER(c_uint), POINTER(c_uint), POINTER(c_uint), c_double]
airFPGen_f = libteem.airFPGen_f
airFPGen_f.restype = c_float
airFPGen_f.argtypes = [c_int]
airFPGen_d = libteem.airFPGen_d
airFPGen_d.restype = c_double
airFPGen_d.argtypes = [c_int]
airFPClass_f = libteem.airFPClass_f
airFPClass_f.restype = c_int
airFPClass_f.argtypes = [c_float]
airFPClass_d = libteem.airFPClass_d
airFPClass_d.restype = c_int
airFPClass_d.argtypes = [c_double]
airFPFprintf_f = libteem.airFPFprintf_f
airFPFprintf_f.restype = None
airFPFprintf_f.argtypes = [POINTER(FILE), c_float]
airFPFprintf_d = libteem.airFPFprintf_d
airFPFprintf_d.restype = None
airFPFprintf_d.argtypes = [POINTER(FILE), c_double]
airFloatQNaN = (airFloat).in_dll(libteem, 'airFloatQNaN')
airFloatSNaN = (airFloat).in_dll(libteem, 'airFloatSNaN')
airFloatPosInf = (airFloat).in_dll(libteem, 'airFloatPosInf')
airFloatNegInf = (airFloat).in_dll(libteem, 'airFloatNegInf')
airNaN = libteem.airNaN
airNaN.restype = c_float
airNaN.argtypes = []
airIsNaN = libteem.airIsNaN
airIsNaN.restype = c_int
airIsNaN.argtypes = [c_double]
airIsInf_f = libteem.airIsInf_f
airIsInf_f.restype = c_int
airIsInf_f.argtypes = [c_float]
airIsInf_d = libteem.airIsInf_d
airIsInf_d.restype = c_int
airIsInf_d.argtypes = [c_double]
airExists = libteem.airExists
airExists.restype = c_int
airExists.argtypes = [c_double]
class airRandMTState(Structure):
pass
airRandMTState._fields_ = [
('state', c_uint * 624),
('pNext', POINTER(c_uint)),
('left', c_uint),
]
airRandMTStateGlobal = (POINTER(airRandMTState)).in_dll(libteem, 'airRandMTStateGlobal')
airRandMTStateGlobalInit = libteem.airRandMTStateGlobalInit
airRandMTStateGlobalInit.restype = None
airRandMTStateGlobalInit.argtypes = []
airRandMTStateNew = libteem.airRandMTStateNew
airRandMTStateNew.restype = POINTER(airRandMTState)
airRandMTStateNew.argtypes = [c_uint]
airRandMTStateNix = libteem.airRandMTStateNix
airRandMTStateNix.restype = POINTER(airRandMTState)
airRandMTStateNix.argtypes = [POINTER(airRandMTState)]
airSrandMT_r = libteem.airSrandMT_r
airSrandMT_r.restype = None
airSrandMT_r.argtypes = [POINTER(airRandMTState), c_uint]
airDrandMT_r = libteem.airDrandMT_r
airDrandMT_r.restype = c_double
airDrandMT_r.argtypes = [POINTER(airRandMTState)]
airUIrandMT_r = libteem.airUIrandMT_r
airUIrandMT_r.restype = c_uint
airUIrandMT_r.argtypes = [POINTER(airRandMTState)]
airDrandMT53_r = libteem.airDrandMT53_r
airDrandMT53_r.restype = c_double
airDrandMT53_r.argtypes = [POINTER(airRandMTState)]
airRandInt = libteem.airRandInt
airRandInt.restype = c_uint
airRandInt.argtypes = [c_uint]
airRandInt_r = libteem.airRandInt_r
airRandInt_r.restype = c_uint
airRandInt_r.argtypes = [POINTER(airRandMTState), c_uint]
airSrandMT = libteem.airSrandMT
airSrandMT.restype = None
airSrandMT.argtypes = [c_uint]
airDrandMT = libteem.airDrandMT
airDrandMT.restype = c_double
airDrandMT.argtypes = []
airRandMTSanity = libteem.airRandMTSanity
airRandMTSanity.restype = c_int
airRandMTSanity.argtypes = []
airAtod = libteem.airAtod
airAtod.restype = c_double
airAtod.argtypes = [STRING]
airSingleSscanf = libteem.airSingleSscanf
airSingleSscanf.restype = c_int
airSingleSscanf.argtypes = [STRING, STRING, c_void_p]
airBool = (POINTER(airEnum)).in_dll(libteem, 'airBool')
airParseStrB = libteem.airParseStrB
airParseStrB.restype = c_uint
airParseStrB.argtypes = [POINTER(c_int), STRING, STRING, c_uint]
airParseStrI = libteem.airParseStrI
airParseStrI.restype = c_uint
airParseStrI.argtypes = [POINTER(c_int), STRING, STRING, c_uint]
airParseStrUI = libteem.airParseStrUI
airParseStrUI.restype = c_uint
airParseStrUI.argtypes = [POINTER(c_uint), STRING, STRING, c_uint]
airParseStrZ = libteem.airParseStrZ
airParseStrZ.restype = c_uint
airParseStrZ.argtypes = [POINTER(c_size_t), STRING, STRING, c_uint]
airParseStrF = libteem.airParseStrF
airParseStrF.restype = c_uint
airParseStrF.argtypes = [POINTER(c_float), STRING, STRING, c_uint]
airParseStrD = libteem.airParseStrD
airParseStrD.restype = c_uint
airParseStrD.argtypes = [POINTER(c_double), STRING, STRING, c_uint]
airParseStrC = libteem.airParseStrC
airParseStrC.restype = c_uint
airParseStrC.argtypes = [STRING, STRING, STRING, c_uint]
airParseStrS = libteem.airParseStrS
airParseStrS.restype = c_uint
airParseStrS.argtypes = [POINTER(STRING), STRING, STRING, c_uint]
airParseStrE = libteem.airParseStrE
airParseStrE.restype = c_uint
airParseStrE.argtypes = [POINTER(c_int), STRING, STRING, c_uint]
airParseStr = (CFUNCTYPE(c_uint, c_void_p, STRING, STRING, c_uint) * 13).in_dll(libteem, 'airParseStr')
airStrdup = libteem.airStrdup
airStrdup.restype = STRING
airStrdup.argtypes = [STRING]
airStrlen = libteem.airStrlen
airStrlen.restype = c_size_t
airStrlen.argtypes = [STRING]
airStrcmp = libteem.airStrcmp
airStrcmp.restype = c_int
airStrcmp.argtypes = [STRING, STRING]
airStrtokQuoting = (c_int).in_dll(libteem, 'airStrtokQuoting')
airStrtok = libteem.airStrtok
airStrtok.restype = STRING
airStrtok.argtypes = [STRING, STRING, POINTER(STRING)]
airStrntok = libteem.airStrntok
airStrntok.restype = c_uint
airStrntok.argtypes = [STRING, STRING]
airStrtrans = libteem.airStrtrans
airStrtrans.restype = STRING
airStrtrans.argtypes = [STRING, c_char, c_char]
airStrcpy = libteem.airStrcpy
airStrcpy.restype = STRING
airStrcpy.argtypes = [STRING, c_size_t, STRING]
airEndsWith = libteem.airEndsWith
airEndsWith.restype = c_int
airEndsWith.argtypes = [STRING, STRING]
airUnescape = libteem.airUnescape
airUnescape.restype = STRING
airUnescape.argtypes = [STRING]
airOneLinify = libteem.airOneLinify
airOneLinify.restype = STRING
airOneLinify.argtypes = [STRING]
airToLower = libteem.airToLower
airToLower.restype = STRING
airToLower.argtypes = [STRING]
airToUpper = libteem.airToUpper
airToUpper.restype = STRING
airToUpper.argtypes = [STRING]
airOneLine = libteem.airOneLine
airOneLine.restype = c_uint
airOneLine.argtypes = [POINTER(FILE), STRING, c_uint]
airInsaneErr = libteem.airInsaneErr
airInsaneErr.restype = STRING
airInsaneErr.argtypes = [c_int]
airSanity = libteem.airSanity
airSanity.restype = c_int
airSanity.argtypes = []
airTeemVersion = (STRING).in_dll(libteem, 'airTeemVersion')
airTeemReleaseDone = (c_int).in_dll(libteem, 'airTeemReleaseDone')
airTeemReleaseDate = (STRING).in_dll(libteem, 'airTeemReleaseDate')
airTeemVersionSprint = libteem.airTeemVersionSprint
airTeemVersionSprint.restype = None
airTeemVersionSprint.argtypes = [STRING]
airNull = libteem.airNull
airNull.restype = c_void_p
airNull.argtypes = []
airSetNull = libteem.airSetNull
airSetNull.restype = c_void_p
airSetNull.argtypes = [POINTER(c_void_p)]
airFree = libteem.airFree
airFree.restype = c_void_p
airFree.argtypes = [c_void_p]
airFopen = libteem.airFopen
airFopen.restype = POINTER(FILE)
airFopen.argtypes = [STRING, POINTER(FILE), STRING]
airFclose = libteem.airFclose
airFclose.restype = POINTER(FILE)
airFclose.argtypes = [POINTER(FILE)]
airSinglePrintf = libteem.airSinglePrintf
airSinglePrintf.restype = c_int
airSinglePrintf.argtypes = [POINTER(FILE), STRING, STRING]
airSprintSize_t = libteem.airSprintSize_t
airSprintSize_t.restype = STRING
airSprintSize_t.argtypes = [STRING, c_size_t]
airSprintVecSize_t = libteem.airSprintVecSize_t
airSprintVecSize_t.restype = STRING
airSprintVecSize_t.argtypes = [STRING, POINTER(c_size_t), c_uint]
airPrettySprintSize_t = libteem.airPrettySprintSize_t
airPrettySprintSize_t.restype = STRING
airPrettySprintSize_t.argtypes = [STRING, c_size_t]
airSprintPtrdiff_t = libteem.airSprintPtrdiff_t
airSprintPtrdiff_t.restype = STRING
airSprintPtrdiff_t.argtypes = [STRING, ptrdiff_t]
airPresent = (c_int).in_dll(libteem, 'airPresent')
airStderr = libteem.airStderr
airStderr.restype = POINTER(FILE)
airStderr.argtypes = []
airStdout = libteem.airStdout
airStdout.restype = POINTER(FILE)
airStdout.argtypes = []
airStdin = libteem.airStdin
airStdin.restype = POINTER(FILE)
airStdin.argtypes = []
airIndex = libteem.airIndex
airIndex.restype = c_uint
airIndex.argtypes = [c_double, c_double, c_double, c_uint]
airIndexClamp = libteem.airIndexClamp
airIndexClamp.restype = c_uint
airIndexClamp.argtypes = [c_double, c_double, c_double, c_uint]
airIndexULL = libteem.airIndexULL
airIndexULL.restype = airULLong
airIndexULL.argtypes = [c_double, c_double, c_double, airULLong]
airIndexClampULL = libteem.airIndexClampULL
airIndexClampULL.restype = airULLong
airIndexClampULL.argtypes = [c_double, c_double, c_double, airULLong]
airDoneStr = libteem.airDoneStr
airDoneStr.restype = STRING
airDoneStr.argtypes = [c_double, c_double, c_double, STRING]
airTime = libteem.airTime
airTime.restype = c_double
airTime.argtypes = []
airTypeStr = (c_char * 129 * 13).in_dll(libteem, 'airTypeStr')
airTypeSize = (c_size_t * 13).in_dll(libteem, 'airTypeSize')
airEqvAdd = libteem.airEqvAdd
airEqvAdd.restype = None
airEqvAdd.argtypes = [POINTER(airArray), c_uint, c_uint]
airEqvMap = libteem.airEqvMap
airEqvMap.restype = c_uint
airEqvMap.argtypes = [POINTER(airArray), POINTER(c_uint), c_uint]
airEqvSettle = libteem.airEqvSettle
airEqvSettle.restype = c_uint
airEqvSettle.argtypes = [POINTER(c_uint), c_uint]
airFastExp = libteem.airFastExp
airFastExp.restype = c_double
airFastExp.argtypes = [c_double]
airExp = libteem.airExp
airExp.restype = c_double
airExp.argtypes = [c_double]
airNormalRand = libteem.airNormalRand
airNormalRand.restype = None
airNormalRand.argtypes = [POINTER(c_double), POINTER(c_double)]
airNormalRand_r = libteem.airNormalRand_r
airNormalRand_r.restype = None
airNormalRand_r.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(airRandMTState)]
airShuffle = libteem.airShuffle
airShuffle.restype = None
airShuffle.argtypes = [POINTER(c_uint), c_uint, c_int]
airShuffle_r = libteem.airShuffle_r
airShuffle_r.restype = None
airShuffle_r.argtypes = [POINTER(airRandMTState), POINTER(c_uint), c_uint, c_int]
airCbrt = libteem.airCbrt
airCbrt.restype = c_double
airCbrt.argtypes = [c_double]
airMode3 = libteem.airMode3
airMode3.restype = c_double
airMode3.argtypes = [c_double, c_double, c_double]
airMode3_d = libteem.airMode3_d
airMode3_d.restype = c_double
airMode3_d.argtypes = [POINTER(c_double)]
airSgnPow = libteem.airSgnPow
airSgnPow.restype = c_double
airSgnPow.argtypes = [c_double, c_double]
airFlippedSgnPow = libteem.airFlippedSgnPow
airFlippedSgnPow.restype = c_double
airFlippedSgnPow.argtypes = [c_double, c_double]
airIntPow = libteem.airIntPow
airIntPow.restype = c_double
airIntPow.argtypes = [c_double, c_int]
airSgn = libteem.airSgn
airSgn.restype = c_int
airSgn.argtypes = [c_double]
airLog2 = libteem.airLog2
airLog2.restype = c_int
airLog2.argtypes = [c_size_t]
airErfc = libteem.airErfc
airErfc.restype = c_double
airErfc.argtypes = [c_double]
airErf = libteem.airErf
airErf.restype = c_double
airErf.argtypes = [c_double]
airGaussian = libteem.airGaussian
airGaussian.restype = c_double
airGaussian.argtypes = [c_double, c_double, c_double]
airBesselI0 = libteem.airBesselI0
airBesselI0.restype = c_double
airBesselI0.argtypes = [c_double]
airBesselI1 = libteem.airBesselI1
airBesselI1.restype = c_double
airBesselI1.argtypes = [c_double]
airBesselI0ExpScaled = libteem.airBesselI0ExpScaled
airBesselI0ExpScaled.restype = c_double
airBesselI0ExpScaled.argtypes = [c_double]
airBesselI1ExpScaled = libteem.airBesselI1ExpScaled
airBesselI1ExpScaled.restype = c_double
airBesselI1ExpScaled.argtypes = [c_double]
airLogBesselI0 = libteem.airLogBesselI0
airLogBesselI0.restype = c_double
airLogBesselI0.argtypes = [c_double]
airLogRician = libteem.airLogRician
airLogRician.restype = c_double
airLogRician.argtypes = [c_double, c_double, c_double]
airRician = libteem.airRician
airRician.restype = c_double
airRician.argtypes = [c_double, c_double, c_double]
airBesselI1By0 = libteem.airBesselI1By0
airBesselI1By0.restype = c_double
airBesselI1By0.argtypes = [c_double]
airBesselIn = libteem.airBesselIn
airBesselIn.restype = c_double
airBesselIn.argtypes = [c_int, c_double]
airBesselInExpScaled = libteem.airBesselInExpScaled
airBesselInExpScaled.restype = c_double
airBesselInExpScaled.argtypes = [c_int, c_double]
airTauOfTime = libteem.airTauOfTime
airTauOfTime.restype = c_double
airTauOfTime.argtypes = [c_double]
airTimeOfTau = libteem.airTimeOfTau
airTimeOfTau.restype = c_double
airTimeOfTau.argtypes = [c_double]
airSigmaOfTau = libteem.airSigmaOfTau
airSigmaOfTau.restype = c_double
airSigmaOfTau.argtypes = [c_double]
airTauOfSigma = libteem.airTauOfSigma
airTauOfSigma.restype = c_double
airTauOfSigma.argtypes = [c_double]
airVanDerCorput = libteem.airVanDerCorput
airVanDerCorput.restype = c_double
airVanDerCorput.argtypes = [c_uint, c_uint]
airHalton = libteem.airHalton
airHalton.restype = None
airHalton.argtypes = [POINTER(c_double), c_uint, POINTER(c_uint), c_uint]
airPrimeList = (c_uint * 1000).in_dll(libteem, 'airPrimeList')
airCRC32 = libteem.airCRC32
airCRC32.restype = c_uint
airCRC32.argtypes = [POINTER(c_ubyte), c_size_t, c_size_t, c_int]
airNoDioErr = libteem.airNoDioErr
airNoDioErr.restype = STRING
airNoDioErr.argtypes = [c_int]
airMyDio = (c_int).in_dll(libteem, 'airMyDio')
airDisableDio = (c_int).in_dll(libteem, 'airDisableDio')
airDioInfo = libteem.airDioInfo
airDioInfo.restype = None
airDioInfo.argtypes = [POINTER(c_int), POINTER(c_int), POINTER(c_int), c_int]
airDioTest = libteem.airDioTest
airDioTest.restype = c_int
airDioTest.argtypes = [c_int, c_void_p, c_size_t]
airDioMalloc = libteem.airDioMalloc
airDioMalloc.restype = c_void_p
airDioMalloc.argtypes = [c_size_t, c_int]
airDioRead = libteem.airDioRead
airDioRead.restype = c_size_t
airDioRead.argtypes = [c_int, c_void_p, c_size_t]
airDioWrite = libteem.airDioWrite
airDioWrite.restype = c_size_t
airDioWrite.argtypes = [c_int, c_void_p, c_size_t]
airMopper = CFUNCTYPE(c_void_p, c_void_p)
class airMop(Structure):
pass
airMop._fields_ = [
('ptr', c_void_p),
('mop', airMopper),
('when', c_int),
]
airMopNew = libteem.airMopNew
airMopNew.restype = POINTER(airArray)
airMopNew.argtypes = []
airMopAdd = libteem.airMopAdd
airMopAdd.restype = c_int
airMopAdd.argtypes = [POINTER(airArray), c_void_p, airMopper, c_int]
airMopSub = libteem.airMopSub
airMopSub.restype = None
airMopSub.argtypes = [POINTER(airArray), c_void_p, airMopper]
airMopMem = libteem.airMopMem
airMopMem.restype = None
airMopMem.argtypes = [POINTER(airArray), c_void_p, c_int]
airMopUnMem = libteem.airMopUnMem
airMopUnMem.restype = None
airMopUnMem.argtypes = [POINTER(airArray), c_void_p]
airMopPrint = libteem.airMopPrint
airMopPrint.restype = None
airMopPrint.argtypes = [POINTER(airArray), c_void_p, c_int]
airMopDone = libteem.airMopDone
airMopDone.restype = None
airMopDone.argtypes = [POINTER(airArray), c_int]
airMopError = libteem.airMopError
airMopError.restype = None
airMopError.argtypes = [POINTER(airArray)]
airMopOkay = libteem.airMopOkay
airMopOkay.restype = None
airMopOkay.argtypes = [POINTER(airArray)]
airMopDebug = libteem.airMopDebug
airMopDebug.restype = None
airMopDebug.argtypes = [POINTER(airArray)]
airMopSingleDone = libteem.airMopSingleDone
airMopSingleDone.restype = None
airMopSingleDone.argtypes = [POINTER(airArray), c_void_p, c_int]
airMopSingleError = libteem.airMopSingleError
airMopSingleError.restype = None
airMopSingleError.argtypes = [POINTER(airArray), c_void_p]
airMopSingleOkay = libteem.airMopSingleOkay
airMopSingleOkay.restype = None
airMopSingleOkay.argtypes = [POINTER(airArray), c_void_p]
alan_t = c_float
class alanContext_t(Structure):
pass
class Nrrd(Structure):
pass
alanContext_t._fields_ = [
('dim', c_uint),
('size', c_uint * 3),
('verbose', c_int),
('wrap', c_int),
('textureType', c_int),
('oversample', c_int),
('homogAniso', c_int),
('numThreads', c_int),
('frameInterval', c_int),
('saveInterval', c_int),
('maxIteration', c_int),
('constFilename', c_int),
('K', alan_t),
('F', alan_t),
('deltaX', alan_t),
('minAverageChange', alan_t),
('maxPixelChange', alan_t),
('alpha', alan_t),
('beta', alan_t),
('react', alan_t),
('deltaT', alan_t),
('initA', alan_t),
('initB', alan_t),
('diffA', alan_t),
('diffB', alan_t),
('randRange', alan_t),
('nten', POINTER(Nrrd)),
('perIteration', CFUNCTYPE(c_int, POINTER(alanContext_t), c_int)),
('iter', c_int),
('_nlev', POINTER(Nrrd) * 2),
('nlev', POINTER(Nrrd)),
('nparm', POINTER(Nrrd)),
('averageChange', alan_t),
('changeCount', c_int),
('changeMutex', POINTER(airThreadMutex)),
('iterBarrier', POINTER(airThreadBarrier)),
('stop', c_int),
]
alanContext = alanContext_t
alanPresent = (c_int).in_dll(libteem, 'alanPresent')
alanBiffKey = (STRING).in_dll(libteem, 'alanBiffKey')
alanContextNew = libteem.alanContextNew
alanContextNew.restype = POINTER(alanContext)
alanContextNew.argtypes = []
alanContextNix = libteem.alanContextNix
alanContextNix.restype = POINTER(alanContext)
alanContextNix.argtypes = [POINTER(alanContext)]
alanDimensionSet = libteem.alanDimensionSet
alanDimensionSet.restype = c_int
alanDimensionSet.argtypes = [POINTER(alanContext), c_int]
alan2DSizeSet = libteem.alan2DSizeSet
alan2DSizeSet.restype = c_int
alan2DSizeSet.argtypes = [POINTER(alanContext), c_int, c_int]
alan3DSizeSet = libteem.alan3DSizeSet
alan3DSizeSet.restype = c_int
alan3DSizeSet.argtypes = [POINTER(alanContext), c_int, c_int, c_int]
alanTensorSet = libteem.alanTensorSet
alanTensorSet.restype = c_int
alanTensorSet.argtypes = [POINTER(alanContext), POINTER(Nrrd), c_int]
alanParmSet = libteem.alanParmSet
alanParmSet.restype = c_int
alanParmSet.argtypes = [POINTER(alanContext), c_int, c_double]
alanStop = (POINTER(airEnum)).in_dll(libteem, 'alanStop')
alanUpdate = libteem.alanUpdate
alanUpdate.restype = c_int
alanUpdate.argtypes = [POINTER(alanContext)]
class NrrdAxisInfo(Structure):
pass
NrrdAxisInfo._pack_ = 4
NrrdAxisInfo._fields_ = [
('size', c_size_t),
('spacing', c_double),
('thickness', c_double),
('min', c_double),
('max', c_double),
('spaceDirection', c_double * 8),
('center', c_int),
('kind', c_int),
('label', STRING),
('units', STRING),
]
Nrrd._pack_ = 4
Nrrd._fields_ = [
('data', c_void_p),
('type', c_int),
('dim', c_uint),
('axis', NrrdAxisInfo * 16),
('content', STRING),
('sampleUnits', STRING),
('space', c_int),
('spaceDim', c_uint),
('spaceUnits', STRING * 8),
('spaceOrigin', c_double * 8),
('measurementFrame', c_double * 8 * 8),
('blockSize', c_size_t),
('oldMin', c_double),
('oldMax', c_double),
('ptr', c_void_p),
('cmt', POINTER(STRING)),
('cmtArr', POINTER(airArray)),
('kvp', POINTER(STRING)),
('kvpArr', POINTER(airArray)),
]
alanInit = libteem.alanInit
alanInit.restype = c_int
alanInit.argtypes = [POINTER(alanContext), POINTER(Nrrd), POINTER(Nrrd)]
alanRun = libteem.alanRun
alanRun.restype = c_int
alanRun.argtypes = [POINTER(alanContext)]
class baneRange(Structure):
pass
baneRange._pack_ = 4
baneRange._fields_ = [
('name', c_char * 129),
('type', c_int),
('center', c_double),
('answer', CFUNCTYPE(c_int, POINTER(c_double), POINTER(c_double), c_double, c_double)),
]
class baneInc_t(Structure):
pass
baneInc_t._pack_ = 4
baneInc_t._fields_ = [
('name', c_char * 129),
('type', c_int),
('S', c_double),
('SS', c_double),
('num', c_int),
('nhist', POINTER(Nrrd)),
('range', POINTER(baneRange)),
('parm', c_double * 5),
('process', CFUNCTYPE(None, POINTER(baneInc_t), c_double) * 2),
('answer', CFUNCTYPE(c_int, POINTER(c_double), POINTER(c_double), POINTER(Nrrd), POINTER(c_double), POINTER(baneRange))),
]
baneInc = baneInc_t
class baneClip(Structure):
pass
baneClip._pack_ = 4
baneClip._fields_ = [
('name', c_char * 129),
('type', c_int),
('parm', c_double * 5),
('answer', CFUNCTYPE(c_int, POINTER(c_int), POINTER(Nrrd), POINTER(c_double))),
]
class baneMeasr_t(Structure):
pass
gageQuery = c_ubyte * 32
baneMeasr_t._pack_ = 4
baneMeasr_t._fields_ = [
('name', c_char * 129),
('type', c_int),
('parm', c_double * 5),
('query', gageQuery),
('range', POINTER(baneRange)),
('offset0', c_int),
('answer', CFUNCTYPE(c_double, POINTER(baneMeasr_t), POINTER(c_double), POINTER(c_double))),
]
baneMeasr = baneMeasr_t
class baneAxis(Structure):
pass
baneAxis._fields_ = [
('res', c_uint),
('measr', POINTER(baneMeasr)),
('inc', POINTER(baneInc)),
]
class baneHVolParm(Structure):
pass
class NrrdKernel(Structure):
pass
NrrdKernel._fields_ = [
('name', c_char * 129),
('numParm', c_uint),
('support', CFUNCTYPE(c_double, POINTER(c_double))),
('integral', CFUNCTYPE(c_double, POINTER(c_double))),
('eval1_f', CFUNCTYPE(c_float, c_float, POINTER(c_double))),
('evalN_f', CFUNCTYPE(None, POINTER(c_float), POINTER(c_float), c_size_t, POINTER(c_double))),
('eval1_d', CFUNCTYPE(c_double, c_double, POINTER(c_double))),
('evalN_d', CFUNCTYPE(None, POINTER(c_double), POINTER(c_double), c_size_t, POINTER(c_double))),
]
baneHVolParm._pack_ = 4
baneHVolParm._fields_ = [
('verbose', c_int),
('makeMeasrVol', c_int),
('renormalize', c_int),
('k3pack', c_int),
('k', POINTER(NrrdKernel) * 8),
('kparm', c_double * 8 * 8),
('clip', POINTER(baneClip)),
('incLimit', c_double),
('axis', baneAxis * 3),
('measrVol', POINTER(Nrrd)),
('measrVolDone', c_int),
]
baneBiffKey = (STRING).in_dll(libteem, 'baneBiffKey')
baneDefVerbose = (c_int).in_dll(libteem, 'baneDefVerbose')
baneDefMakeMeasrVol = (c_int).in_dll(libteem, 'baneDefMakeMeasrVol')
baneDefIncLimit = (c_double).in_dll(libteem, 'baneDefIncLimit')
baneDefRenormalize = (c_int).in_dll(libteem, 'baneDefRenormalize')
baneDefPercHistBins = (c_int).in_dll(libteem, 'baneDefPercHistBins')
baneStateHistEqBins = (c_int).in_dll(libteem, 'baneStateHistEqBins')
baneStateHistEqSmart = (c_int).in_dll(libteem, 'baneStateHistEqSmart')
baneHack = (c_int).in_dll(libteem, 'baneHack')
baneRangeNew = libteem.baneRangeNew
baneRangeNew.restype = POINTER(baneRange)
baneRangeNew.argtypes = [c_int]
baneRangeCopy = libteem.baneRangeCopy
baneRangeCopy.restype = POINTER(baneRange)
baneRangeCopy.argtypes = [POINTER(baneRange)]
baneRangeAnswer = libteem.baneRangeAnswer
baneRangeAnswer.restype = c_int
baneRangeAnswer.argtypes = [POINTER(baneRange), POINTER(c_double), POINTER(c_double), c_double, c_double]
baneRangeNix = libteem.baneRangeNix
baneRangeNix.restype = POINTER(baneRange)
baneRangeNix.argtypes = [POINTER(baneRange)]
baneIncNew = libteem.baneIncNew
baneIncNew.restype = POINTER(baneInc)
baneIncNew.argtypes = [c_int, POINTER(baneRange), POINTER(c_double)]
baneIncProcess = libteem.baneIncProcess
baneIncProcess.restype = None
baneIncProcess.argtypes = [POINTER(baneInc), c_int, c_double]
baneIncAnswer = libteem.baneIncAnswer
baneIncAnswer.restype = c_int
baneIncAnswer.argtypes = [POINTER(baneInc), POINTER(c_double), POINTER(c_double)]
baneIncCopy = libteem.baneIncCopy
baneIncCopy.restype = POINTER(baneInc)
baneIncCopy.argtypes = [POINTER(baneInc)]
baneIncNix = libteem.baneIncNix
baneIncNix.restype = POINTER(baneInc)
baneIncNix.argtypes = [POINTER(baneInc)]
baneClipNew = libteem.baneClipNew
baneClipNew.restype = POINTER(baneClip)
baneClipNew.argtypes = [c_int, POINTER(c_double)]
baneClipAnswer = libteem.baneClipAnswer
baneClipAnswer.restype = c_int
baneClipAnswer.argtypes = [POINTER(c_int), POINTER(baneClip), POINTER(Nrrd)]
baneClipCopy = libteem.baneClipCopy
baneClipCopy.restype = POINTER(baneClip)
baneClipCopy.argtypes = [POINTER(baneClip)]
baneClipNix = libteem.baneClipNix
baneClipNix.restype = POINTER(baneClip)
baneClipNix.argtypes = [POINTER(baneClip)]
baneMeasrNew = libteem.baneMeasrNew
baneMeasrNew.restype = POINTER(baneMeasr)
baneMeasrNew.argtypes = [c_int, POINTER(c_double)]
class gageContext_t(Structure):
pass
gageContext = gageContext_t
baneMeasrAnswer = libteem.baneMeasrAnswer
baneMeasrAnswer.restype = c_double
baneMeasrAnswer.argtypes = [POINTER(baneMeasr), POINTER(gageContext)]
baneMeasrCopy = libteem.baneMeasrCopy
baneMeasrCopy.restype = POINTER(baneMeasr)
baneMeasrCopy.argtypes = [POINTER(baneMeasr)]
baneMeasrNix = libteem.baneMeasrNix
baneMeasrNix.restype = POINTER(baneMeasr)
baneMeasrNix.argtypes = [POINTER(baneMeasr)]
banePresent = (c_int).in_dll(libteem, 'banePresent')
baneHVolParmNew = libteem.baneHVolParmNew
baneHVolParmNew.restype = POINTER(baneHVolParm)
baneHVolParmNew.argtypes = []
baneHVolParmGKMSInit = libteem.baneHVolParmGKMSInit
baneHVolParmGKMSInit.restype = None
baneHVolParmGKMSInit.argtypes = [POINTER(baneHVolParm)]
baneHVolParmAxisSet = libteem.baneHVolParmAxisSet
baneHVolParmAxisSet.restype = None
baneHVolParmAxisSet.argtypes = [POINTER(baneHVolParm), c_uint, c_uint, POINTER(baneMeasr), POINTER(baneInc)]
baneHVolParmClipSet = libteem.baneHVolParmClipSet
baneHVolParmClipSet.restype = None
baneHVolParmClipSet.argtypes = [POINTER(baneHVolParm), POINTER(baneClip)]
baneHVolParmNix = libteem.baneHVolParmNix
baneHVolParmNix.restype = POINTER(baneHVolParm)
baneHVolParmNix.argtypes = [POINTER(baneHVolParm)]
baneInputCheck = libteem.baneInputCheck
baneInputCheck.restype = c_int
baneInputCheck.argtypes = [POINTER(Nrrd), POINTER(baneHVolParm)]
baneHVolCheck = libteem.baneHVolCheck
baneHVolCheck.restype = c_int
baneHVolCheck.argtypes = [POINTER(Nrrd)]
baneInfoCheck = libteem.baneInfoCheck
baneInfoCheck.restype = c_int
baneInfoCheck.argtypes = [POINTER(Nrrd), c_int]
banePosCheck = libteem.banePosCheck
banePosCheck.restype = c_int
banePosCheck.argtypes = [POINTER(Nrrd), c_int]
baneBcptsCheck = libteem.baneBcptsCheck
baneBcptsCheck.restype = c_int
baneBcptsCheck.argtypes = [POINTER(Nrrd)]
baneProbe = libteem.baneProbe
baneProbe.restype = None
baneProbe.argtypes = [POINTER(c_double), POINTER(Nrrd), POINTER(baneHVolParm), POINTER(gageContext), c_uint, c_uint, c_uint]
baneFindInclusion = libteem.baneFindInclusion
baneFindInclusion.restype = c_int
baneFindInclusion.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(Nrrd), POINTER(baneHVolParm), POINTER(gageContext)]
baneMakeHVol = libteem.baneMakeHVol
baneMakeHVol.restype = c_int
baneMakeHVol.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(baneHVolParm)]
baneGKMSHVol = libteem.baneGKMSHVol
baneGKMSHVol.restype = POINTER(Nrrd)
baneGKMSHVol.argtypes = [POINTER(Nrrd), c_float, c_float]
baneOpacInfo = libteem.baneOpacInfo
baneOpacInfo.restype = c_int
baneOpacInfo.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_int, c_int]
bane1DOpacInfoFrom2D = libteem.bane1DOpacInfoFrom2D
bane1DOpacInfoFrom2D.restype = c_int
bane1DOpacInfoFrom2D.argtypes = [POINTER(Nrrd), POINTER(Nrrd)]
baneSigmaCalc = libteem.baneSigmaCalc
baneSigmaCalc.restype = c_int
baneSigmaCalc.argtypes = [POINTER(c_float), POINTER(Nrrd)]
banePosCalc = libteem.banePosCalc
banePosCalc.restype = c_int
banePosCalc.argtypes = [POINTER(Nrrd), c_float, c_float, POINTER(Nrrd)]
baneOpacCalc = libteem.baneOpacCalc
baneOpacCalc.restype = c_int
baneOpacCalc.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd)]
baneRawScatterplots = libteem.baneRawScatterplots
baneRawScatterplots.restype = c_int
baneRawScatterplots.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd), c_int]
class unrrduCmd(Structure):
pass
class hestParm(Structure):
pass
unrrduCmd._fields_ = [
('name', STRING),
('info', STRING),
('main', CFUNCTYPE(c_int, c_int, POINTER(STRING), STRING, POINTER(hestParm))),
('hidden', c_int),
]
baneGkms_opacCmd = (unrrduCmd).in_dll(libteem, 'baneGkms_opacCmd')
baneGkms_hvolCmd = (unrrduCmd).in_dll(libteem, 'baneGkms_hvolCmd')
baneGkms_scatCmd = (unrrduCmd).in_dll(libteem, 'baneGkms_scatCmd')
baneGkms_infoCmd = (unrrduCmd).in_dll(libteem, 'baneGkms_infoCmd')
baneGkms_txfCmd = (unrrduCmd).in_dll(libteem, 'baneGkms_txfCmd')
baneGkms_miteCmd = (unrrduCmd).in_dll(libteem, 'baneGkms_miteCmd')
baneGkms_pvgCmd = (unrrduCmd).in_dll(libteem, 'baneGkms_pvgCmd')
baneGkmsMeasr = (POINTER(airEnum)).in_dll(libteem, 'baneGkmsMeasr')
baneGkmsCmdList = (POINTER(unrrduCmd) * 0).in_dll(libteem, 'baneGkmsCmdList')
baneGkmsUsage = libteem.baneGkmsUsage
baneGkmsUsage.restype = None
baneGkmsUsage.argtypes = [STRING, POINTER(hestParm)]
class hestCB(Structure):
pass
baneGkmsHestIncStrategy = (POINTER(hestCB)).in_dll(libteem, 'baneGkmsHestIncStrategy')
baneGkmsHestBEF = (POINTER(hestCB)).in_dll(libteem, 'baneGkmsHestBEF')
baneGkmsHestGthresh = (POINTER(hestCB)).in_dll(libteem, 'baneGkmsHestGthresh')
class biffMsg(Structure):
pass
biffMsg._fields_ = [
('key', STRING),
('err', POINTER(STRING)),
('errNum', c_uint),
('errArr', POINTER(airArray)),
]
biffPresent = (c_int).in_dll(libteem, 'biffPresent')
biffMsgNew = libteem.biffMsgNew
biffMsgNew.restype = POINTER(biffMsg)
biffMsgNew.argtypes = [STRING]
biffMsgNix = libteem.biffMsgNix
biffMsgNix.restype = POINTER(biffMsg)
biffMsgNix.argtypes = [POINTER(biffMsg)]
biffMsgAdd = libteem.biffMsgAdd
biffMsgAdd.restype = None
biffMsgAdd.argtypes = [POINTER(biffMsg), STRING]
biffMsgClear = libteem.biffMsgClear
biffMsgClear.restype = None
biffMsgClear.argtypes = [POINTER(biffMsg)]
biffMsgLineLenMax = libteem.biffMsgLineLenMax
biffMsgLineLenMax.restype = c_uint
biffMsgLineLenMax.argtypes = [POINTER(biffMsg)]
biffMsgMove = libteem.biffMsgMove
biffMsgMove.restype = None
biffMsgMove.argtypes = [POINTER(biffMsg), POINTER(biffMsg), STRING]
biffMsgAddf = libteem.biffMsgAddf
biffMsgAddf.restype = None
biffMsgAddf.argtypes = [POINTER(biffMsg), STRING]
biffMsgMovef = libteem.biffMsgMovef
biffMsgMovef.restype = None
biffMsgMovef.argtypes = [POINTER(biffMsg), POINTER(biffMsg), STRING]
biffMsgErrNum = libteem.biffMsgErrNum
biffMsgErrNum.restype = c_uint
biffMsgErrNum.argtypes = [POINTER(biffMsg)]
biffMsgStrlen = libteem.biffMsgStrlen
biffMsgStrlen.restype = c_uint
biffMsgStrlen.argtypes = [POINTER(biffMsg)]
biffMsgStrSet = libteem.biffMsgStrSet
biffMsgStrSet.restype = None
biffMsgStrSet.argtypes = [STRING, POINTER(biffMsg)]
biffMsgStrAlloc = libteem.biffMsgStrAlloc
biffMsgStrAlloc.restype = STRING
biffMsgStrAlloc.argtypes = [POINTER(biffMsg)]
biffMsgStrGet = libteem.biffMsgStrGet
biffMsgStrGet.restype = STRING
biffMsgStrGet.argtypes = [POINTER(biffMsg)]
biffMsgNoop = (POINTER(biffMsg)).in_dll(libteem, 'biffMsgNoop')
biffAdd = libteem.biffAdd
biffAdd.restype = None
biffAdd.argtypes = [STRING, STRING]
biffAddf = libteem.biffAddf
biffAddf.restype = None
biffAddf.argtypes = [STRING, STRING]
biffMaybeAdd = libteem.biffMaybeAdd
biffMaybeAdd.restype = None
biffMaybeAdd.argtypes = [STRING, STRING, c_int]
biffMaybeAddf = libteem.biffMaybeAddf
biffMaybeAddf.restype = None
biffMaybeAddf.argtypes = [c_int, STRING, STRING]
biffGet = libteem.biffGet
biffGet.restype = STRING
biffGet.argtypes = [STRING]
biffGetStrlen = libteem.biffGetStrlen
biffGetStrlen.restype = c_uint
biffGetStrlen.argtypes = [STRING]
biffSetStr = libteem.biffSetStr
biffSetStr.restype = None
biffSetStr.argtypes = [STRING, STRING]
biffCheck = libteem.biffCheck
biffCheck.restype = c_uint
biffCheck.argtypes = [STRING]
biffMove = libteem.biffMove
biffMove.restype = None
biffMove.argtypes = [STRING, STRING, STRING]
biffMovef = libteem.biffMovef
biffMovef.restype = None
biffMovef.argtypes = [STRING, STRING, STRING]
biffSetStrDone = libteem.biffSetStrDone
biffSetStrDone.restype = None
biffSetStrDone.argtypes = [STRING, STRING]
biffDone = libteem.biffDone
biffDone.restype = None
biffDone.argtypes = [STRING]
biffGetDone = libteem.biffGetDone
biffGetDone.restype = STRING
biffGetDone.argtypes = [STRING]
coil_t = c_float
class coilMethod(Structure):
pass
coilMethod._fields_ = [
('name', c_char * 129),
('type', c_int),
('numParm', c_int),
]
class coilKind(Structure):
pass
coilKind._fields_ = [
('name', c_char * 129),
('valLen', c_uint),
('filter', CFUNCTYPE(None, POINTER(coil_t), c_int, c_int, c_int, POINTER(POINTER(coil_t)), POINTER(c_double), POINTER(c_double)) * 9),
('update', CFUNCTYPE(None, POINTER(coil_t), POINTER(coil_t))),
]
class coilTask(Structure):
pass
class coilContext_t(Structure):
pass
coilTask._fields_ = [
('cctx', POINTER(coilContext_t)),
('thread', POINTER(airThread)),
('threadIdx', c_uint),
('_iv3', POINTER(coil_t)),
('iv3', POINTER(POINTER(coil_t))),
('iv3Fill', CFUNCTYPE(None, POINTER(POINTER(coil_t)), POINTER(coil_t), c_uint, c_int, c_int, c_int, c_int, c_int, c_int, c_int)),
('returnPtr', c_void_p),
]
coilContext_t._pack_ = 4
coilContext_t._fields_ = [
('nin', POINTER(Nrrd)),
('kind', POINTER(coilKind)),
('method', POINTER(coilMethod)),
('radius', c_uint),
('numThreads', c_uint),
('verbose', c_int),
('parm', c_double * 6),
('iter', c_uint),
('size', c_size_t * 3),
('nextSlice', c_size_t),
('spacing', c_double * 3),
('nvol', POINTER(Nrrd)),
('finished', c_int),
('todoFilter', c_int),
('todoUpdate', c_int),
('nextSliceMutex', POINTER(airThreadMutex)),
('task', POINTER(POINTER(coilTask))),
('filterBarrier', POINTER(airThreadBarrier)),
('updateBarrier', POINTER(airThreadBarrier)),
]
coilContext = coilContext_t
coilPresent = (c_int).in_dll(libteem, 'coilPresent')
coilBiffKey = (STRING).in_dll(libteem, 'coilBiffKey')
coilDefaultRadius = (c_int).in_dll(libteem, 'coilDefaultRadius')
coilVerbose = (c_int).in_dll(libteem, 'coilVerbose')
coilMethodType = (POINTER(airEnum)).in_dll(libteem, 'coilMethodType')
coilKindType = (POINTER(airEnum)).in_dll(libteem, 'coilKindType')
coilKindScalar = (POINTER(coilKind)).in_dll(libteem, 'coilKindScalar')
coilKindArray = (POINTER(coilKind) * 4).in_dll(libteem, 'coilKindArray')
coilKind7Tensor = (POINTER(coilKind)).in_dll(libteem, 'coilKind7Tensor')
coilMethodTesting = (POINTER(coilMethod)).in_dll(libteem, 'coilMethodTesting')
coilMethodArray = (POINTER(coilMethod) * 9).in_dll(libteem, 'coilMethodArray')
coilContextNew = libteem.coilContextNew
coilContextNew.restype = POINTER(coilContext)
coilContextNew.argtypes = []
coilVolumeCheck = libteem.coilVolumeCheck
coilVolumeCheck.restype = c_int
coilVolumeCheck.argtypes = [POINTER(Nrrd), POINTER(coilKind)]
coilContextAllSet = libteem.coilContextAllSet
coilContextAllSet.restype = c_int
coilContextAllSet.argtypes = [POINTER(coilContext), POINTER(Nrrd), POINTER(coilKind), POINTER(coilMethod), c_uint, c_uint, c_int, POINTER(c_double)]
coilOutputGet = libteem.coilOutputGet
coilOutputGet.restype = c_int
coilOutputGet.argtypes = [POINTER(Nrrd), POINTER(coilContext)]
coilContextNix = libteem.coilContextNix
coilContextNix.restype = POINTER(coilContext)
coilContextNix.argtypes = [POINTER(coilContext)]
coilStart = libteem.coilStart
coilStart.restype = c_int
coilStart.argtypes = [POINTER(coilContext)]
coilIterate = libteem.coilIterate
coilIterate.restype = c_int
coilIterate.argtypes = [POINTER(coilContext), c_int]
coilFinish = libteem.coilFinish
coilFinish.restype = c_int
coilFinish.argtypes = [POINTER(coilContext)]
class dyeColor(Structure):
pass
dyeColor._fields_ = [
('val', c_float * 3 * 2),
('xWhite', c_float),
('yWhite', c_float),
('spc', c_byte * 2),
('ii', c_byte),
]
dyePresent = (c_int).in_dll(libteem, 'dyePresent')
dyeBiffKey = (STRING).in_dll(libteem, 'dyeBiffKey')
dyeSpaceToStr = (c_char * 129 * 0).in_dll(libteem, 'dyeSpaceToStr')
dyeStrToSpace = libteem.dyeStrToSpace
dyeStrToSpace.restype = c_int
dyeStrToSpace.argtypes = [STRING]
dyeColorInit = libteem.dyeColorInit
dyeColorInit.restype = POINTER(dyeColor)
dyeColorInit.argtypes = [POINTER(dyeColor)]
dyeColorSet = libteem.dyeColorSet
dyeColorSet.restype = POINTER(dyeColor)
dyeColorSet.argtypes = [POINTER(dyeColor), c_int, c_float, c_float, c_float]
dyeColorGet = libteem.dyeColorGet
dyeColorGet.restype = c_int
dyeColorGet.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(dyeColor)]
dyeColorGetAs = libteem.dyeColorGetAs
dyeColorGetAs.restype = c_int
dyeColorGetAs.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(dyeColor), c_int]
dyeColorNew = libteem.dyeColorNew
dyeColorNew.restype = POINTER(dyeColor)
dyeColorNew.argtypes = []
dyeColorCopy = libteem.dyeColorCopy
dyeColorCopy.restype = POINTER(dyeColor)
dyeColorCopy.argtypes = [POINTER(dyeColor), POINTER(dyeColor)]
dyeColorNix = libteem.dyeColorNix
dyeColorNix.restype = POINTER(dyeColor)
dyeColorNix.argtypes = [POINTER(dyeColor)]
dyeColorParse = libteem.dyeColorParse
dyeColorParse.restype = c_int
dyeColorParse.argtypes = [POINTER(dyeColor), STRING]
dyeColorSprintf = libteem.dyeColorSprintf
dyeColorSprintf.restype = STRING
dyeColorSprintf.argtypes = [STRING, POINTER(dyeColor)]
dyeConverter = CFUNCTYPE(None, POINTER(c_float), POINTER(c_float), POINTER(c_float), c_float, c_float, c_float)
dyeRGBtoHSV = libteem.dyeRGBtoHSV
dyeRGBtoHSV.restype = None
dyeRGBtoHSV.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), c_float, c_float, c_float]
dyeHSVtoRGB = libteem.dyeHSVtoRGB
dyeHSVtoRGB.restype = None
dyeHSVtoRGB.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), c_float, c_float, c_float]
dyeRGBtoHSL = libteem.dyeRGBtoHSL
dyeRGBtoHSL.restype = None
dyeRGBtoHSL.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), c_float, c_float, c_float]
dyeHSLtoRGB = libteem.dyeHSLtoRGB
dyeHSLtoRGB.restype = None
dyeHSLtoRGB.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), c_float, c_float, c_float]
dyeRGBtoXYZ = libteem.dyeRGBtoXYZ
dyeRGBtoXYZ.restype = None
dyeRGBtoXYZ.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), c_float, c_float, c_float]
dyeXYZtoRGB = libteem.dyeXYZtoRGB
dyeXYZtoRGB.restype = None
dyeXYZtoRGB.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), c_float, c_float, c_float]
dyeXYZtoLAB = libteem.dyeXYZtoLAB
dyeXYZtoLAB.restype = None
dyeXYZtoLAB.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), c_float, c_float, c_float]
dyeXYZtoLUV = libteem.dyeXYZtoLUV
dyeXYZtoLUV.restype = None
dyeXYZtoLUV.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), c_float, c_float, c_float]
dyeLABtoXYZ = libteem.dyeLABtoXYZ
dyeLABtoXYZ.restype = None
dyeLABtoXYZ.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), c_float, c_float, c_float]
dyeLUVtoXYZ = libteem.dyeLUVtoXYZ
dyeLUVtoXYZ.restype = None
dyeLUVtoXYZ.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), c_float, c_float, c_float]
dyeSimpleConvert = (dyeConverter * 7 * 7).in_dll(libteem, 'dyeSimpleConvert')
dyeConvert = libteem.dyeConvert
dyeConvert.restype = c_int
dyeConvert.argtypes = [POINTER(dyeColor), c_int]
echoPos_t = c_double
echoCol_t = c_float
class echoRTParm(Structure):
pass
echoRTParm._pack_ = 4
echoRTParm._fields_ = [
('jitterType', c_int),
('reuseJitter', c_int),
('permuteJitter', c_int),
('textureNN', c_int),
('numSamples', c_int),
('imgResU', c_int),
('imgResV', c_int),
('maxRecDepth', c_int),
('renderLights', c_int),
('renderBoxes', c_int),
('seedRand', c_int),
('sqNRI', c_int),
('numThreads', c_int),
('sqTol', echoPos_t),
('shadow', echoCol_t),
('glassC', echoCol_t),
('aperture', c_float),
('timeGamma', c_float),
('boxOpac', c_float),
('maxRecCol', echoCol_t * 3),
]
class echoGlobalState(Structure):
pass
class limnCamera_t(Structure):
pass
limnCamera = limnCamera_t
class echoScene_t(Structure):
pass
echoGlobalState._pack_ = 4
echoGlobalState._fields_ = [
('verbose', c_int),
('time', c_double),
('nraw', POINTER(Nrrd)),
('cam', POINTER(limnCamera)),
('scene', POINTER(echoScene_t)),
('parm', POINTER(echoRTParm)),
('workIdx', c_int),
('workMutex', POINTER(airThreadMutex)),
]
class echoThreadState(Structure):
pass
echoThreadState._fields_ = [
('thread', POINTER(airThread)),
('gstate', POINTER(echoGlobalState)),
('verbose', c_int),
('threadIdx', c_int),
('depth', c_int),
('nperm', POINTER(Nrrd)),
('njitt', POINTER(Nrrd)),
('permBuff', POINTER(c_uint)),
('jitt', POINTER(echoPos_t)),
('chanBuff', POINTER(echoCol_t)),
('rst', POINTER(airRandMTState)),
('returnPtr', c_void_p),
]
class echoObject(Structure):
pass
echoObject._fields_ = [
('type', c_byte),
('matter', c_ubyte),
('rgba', echoCol_t * 4),
('mat', echoCol_t * 4),
('ntext', POINTER(Nrrd)),
]
class echoSphere(Structure):
pass
echoSphere._pack_ = 4
echoSphere._fields_ = [
('type', c_byte),
('matter', c_ubyte),
('rgba', echoCol_t * 4),
('mat', echoCol_t * 4),
('ntext', POINTER(Nrrd)),
('pos', echoPos_t * 3),
('rad', echoPos_t),
]
class echoCylinder(Structure):
pass
echoCylinder._fields_ = [
('type', c_byte),
('matter', c_ubyte),
('rgba', echoCol_t * 4),
('mat', echoCol_t * 4),
('ntext', POINTER(Nrrd)),
('axis', c_int),
]
class echoSuperquad(Structure):
pass
echoSuperquad._pack_ = 4
echoSuperquad._fields_ = [
('type', c_byte),
('matter', c_ubyte),
('rgba', echoCol_t * 4),
('mat', echoCol_t * 4),
('ntext', POINTER(Nrrd)),
('axis', c_int),
('A', echoPos_t),
('B', echoPos_t),
]
class echoCube(Structure):
pass
echoCube._fields_ = [
('type', c_byte),
('matter', c_ubyte),
('rgba', echoCol_t * 4),
('mat', echoCol_t * 4),
('ntext', POINTER(Nrrd)),
]
class echoTriangle(Structure):
pass
echoTriangle._pack_ = 4
echoTriangle._fields_ = [
('type', c_byte),
('matter', c_ubyte),
('rgba', echoCol_t * 4),
('mat', echoCol_t * 4),
('ntext', POINTER(Nrrd)),
('vert', echoPos_t * 3 * 3),
]
class echoRectangle(Structure):
pass
echoRectangle._pack_ = 4
echoRectangle._fields_ = [
('type', c_byte),
('matter', c_ubyte),
('rgba', echoCol_t * 4),
('mat', echoCol_t * 4),
('ntext', POINTER(Nrrd)),
('origin', echoPos_t * 3),
('edge0', echoPos_t * 3),
('edge1', echoPos_t * 3),
]
class echoTriMesh(Structure):
pass
echoTriMesh._pack_ = 4
echoTriMesh._fields_ = [
('type', c_byte),
('matter', c_ubyte),
('rgba', echoCol_t * 4),
('mat', echoCol_t * 4),
('ntext', POINTER(Nrrd)),
('meanvert', echoPos_t * 3),
('min', echoPos_t * 3),
('max', echoPos_t * 3),
('numV', c_int),
('numF', c_int),
('pos', POINTER(echoPos_t)),
('vert', POINTER(c_int)),
]
class echoIsosurface(Structure):
pass
echoIsosurface._fields_ = [
('type', c_byte),
('matter', c_ubyte),
('rgba', echoCol_t * 4),
('mat', echoCol_t * 4),
('ntext', POINTER(Nrrd)),
('volume', POINTER(Nrrd)),
('value', c_float),
]
class echoAABBox(Structure):
pass
echoAABBox._pack_ = 4
echoAABBox._fields_ = [
('type', c_byte),
('obj', POINTER(echoObject)),
('min', echoPos_t * 3),
('max', echoPos_t * 3),
]
class echoSplit(Structure):
pass
echoSplit._pack_ = 4
echoSplit._fields_ = [
('type', c_byte),
('axis', c_int),
('min0', echoPos_t * 3),
('max0', echoPos_t * 3),
('min1', echoPos_t * 3),
('max1', echoPos_t * 3),
('obj0', POINTER(echoObject)),
('obj1', POINTER(echoObject)),
]
class echoList(Structure):
pass
echoList._fields_ = [
('type', c_byte),
('obj', POINTER(POINTER(echoObject))),
('objArr', POINTER(airArray)),
]
class echoInstance(Structure):
pass
echoInstance._pack_ = 4
echoInstance._fields_ = [
('type', c_byte),
('Mi', echoPos_t * 16),
('M', echoPos_t * 16),
('obj', POINTER(echoObject)),
]
echoScene_t._fields_ = [
('cat', POINTER(POINTER(echoObject))),
('catArr', POINTER(airArray)),
('rend', POINTER(POINTER(echoObject))),
('rendArr', POINTER(airArray)),
('light', POINTER(POINTER(echoObject))),
('lightArr', POINTER(airArray)),
('nrrd', POINTER(POINTER(Nrrd))),
('nrrdArr', POINTER(airArray)),
('envmap', POINTER(Nrrd)),
('ambi', echoCol_t * 3),
('bkgr', echoCol_t * 3),
]
echoScene = echoScene_t
class echoRay(Structure):
pass
echoRay._pack_ = 4
echoRay._fields_ = [
('from', echoPos_t * 3),
('dir', echoPos_t * 3),
('neer', echoPos_t),
('faar', echoPos_t),
('shadow', c_int),
('transp', echoCol_t),
]
class echoIntx(Structure):
pass
echoIntx._pack_ = 4
echoIntx._fields_ = [
('obj', POINTER(echoObject)),
('t', echoPos_t),
('u', echoPos_t),
('v', echoPos_t),
('norm', echoPos_t * 3),
('view', echoPos_t * 3),
('refl', echoPos_t * 3),
('pos', echoPos_t * 3),
('face', c_int),
('boxhits', c_int),
]
class echoPtrPtrUnion(Union):
pass
echoPtrPtrUnion._fields_ = [
('obj', POINTER(POINTER(POINTER(echoObject)))),
('nrd', POINTER(POINTER(POINTER(Nrrd)))),
('v', POINTER(c_void_p)),
]
echoJitter = (POINTER(airEnum)).in_dll(libteem, 'echoJitter')
echoType = (POINTER(airEnum)).in_dll(libteem, 'echoType')
echoMatter = (POINTER(airEnum)).in_dll(libteem, 'echoMatter')
echoPresent = (c_int).in_dll(libteem, 'echoPresent')
echoBiffKey = (STRING).in_dll(libteem, 'echoBiffKey')
echoRTParmNew = libteem.echoRTParmNew
echoRTParmNew.restype = POINTER(echoRTParm)
echoRTParmNew.argtypes = []
echoRTParmNix = libteem.echoRTParmNix
echoRTParmNix.restype = POINTER(echoRTParm)
echoRTParmNix.argtypes = [POINTER(echoRTParm)]
echoGlobalStateNew = libteem.echoGlobalStateNew
echoGlobalStateNew.restype = POINTER(echoGlobalState)
echoGlobalStateNew.argtypes = []
echoGlobalStateNix = libteem.echoGlobalStateNix
echoGlobalStateNix.restype = POINTER(echoGlobalState)
echoGlobalStateNix.argtypes = [POINTER(echoGlobalState)]
echoThreadStateNew = libteem.echoThreadStateNew
echoThreadStateNew.restype = POINTER(echoThreadState)
echoThreadStateNew.argtypes = []
echoThreadStateNix = libteem.echoThreadStateNix
echoThreadStateNix.restype = POINTER(echoThreadState)
echoThreadStateNix.argtypes = [POINTER(echoThreadState)]
echoSceneNew = libteem.echoSceneNew
echoSceneNew.restype = POINTER(echoScene)
echoSceneNew.argtypes = []
echoSceneNix = libteem.echoSceneNix
echoSceneNix.restype = POINTER(echoScene)
echoSceneNix.argtypes = [POINTER(echoScene)]
echoObjectNew = libteem.echoObjectNew
echoObjectNew.restype = POINTER(echoObject)
echoObjectNew.argtypes = [POINTER(echoScene), c_byte]
echoObjectAdd = libteem.echoObjectAdd
echoObjectAdd.restype = c_int
echoObjectAdd.argtypes = [POINTER(echoScene), POINTER(echoObject)]
echoObjectNix = libteem.echoObjectNix
echoObjectNix.restype = POINTER(echoObject)
echoObjectNix.argtypes = [POINTER(echoObject)]
echoRoughSphereNew = libteem.echoRoughSphereNew
echoRoughSphereNew.restype = POINTER(echoObject)
echoRoughSphereNew.argtypes = [POINTER(echoScene), c_int, c_int, POINTER(echoPos_t)]
echoBoundsGet = libteem.echoBoundsGet
echoBoundsGet.restype = None
echoBoundsGet.argtypes = [POINTER(echoPos_t), POINTER(echoPos_t), POINTER(echoObject)]
echoListAdd = libteem.echoListAdd
echoListAdd.restype = None
echoListAdd.argtypes = [POINTER(echoObject), POINTER(echoObject)]
echoListSplit = libteem.echoListSplit
echoListSplit.restype = POINTER(echoObject)
echoListSplit.argtypes = [POINTER(echoScene), POINTER(echoObject), c_int]
echoListSplit3 = libteem.echoListSplit3
echoListSplit3.restype = POINTER(echoObject)
echoListSplit3.argtypes = [POINTER(echoScene), POINTER(echoObject), c_int]
echoSphereSet = libteem.echoSphereSet
echoSphereSet.restype = None
echoSphereSet.argtypes = [POINTER(echoObject), echoPos_t, echoPos_t, echoPos_t, echoPos_t]
echoCylinderSet = libteem.echoCylinderSet
echoCylinderSet.restype = None
echoCylinderSet.argtypes = [POINTER(echoObject), c_int]
echoSuperquadSet = libteem.echoSuperquadSet
echoSuperquadSet.restype = None
echoSuperquadSet.argtypes = [POINTER(echoObject), c_int, echoPos_t, echoPos_t]
echoRectangleSet = libteem.echoRectangleSet
echoRectangleSet.restype = None
echoRectangleSet.argtypes = [POINTER(echoObject), echoPos_t, echoPos_t, echoPos_t, echoPos_t, echoPos_t, echoPos_t, echoPos_t, echoPos_t, echoPos_t]
echoTriangleSet = libteem.echoTriangleSet
echoTriangleSet.restype = None
echoTriangleSet.argtypes = [POINTER(echoObject), echoPos_t, echoPos_t, echoPos_t, echoPos_t, echoPos_t, echoPos_t, echoPos_t, echoPos_t, echoPos_t]
echoTriMeshSet = libteem.echoTriMeshSet
echoTriMeshSet.restype = None
echoTriMeshSet.argtypes = [POINTER(echoObject), c_int, POINTER(echoPos_t), c_int, POINTER(c_int)]
echoInstanceSet = libteem.echoInstanceSet
echoInstanceSet.restype = None
echoInstanceSet.argtypes = [POINTER(echoObject), POINTER(echoPos_t), POINTER(echoObject)]
echoObjectHasMatter = (c_int * 12).in_dll(libteem, 'echoObjectHasMatter')
echoColorSet = libteem.echoColorSet
echoColorSet.restype = None
echoColorSet.argtypes = [POINTER(echoObject), echoCol_t, echoCol_t, echoCol_t, echoCol_t]
echoMatterPhongSet = libteem.echoMatterPhongSet
echoMatterPhongSet.restype = None
echoMatterPhongSet.argtypes = [POINTER(echoScene), POINTER(echoObject), echoCol_t, echoCol_t, echoCol_t, echoCol_t]
echoMatterGlassSet = libteem.echoMatterGlassSet
echoMatterGlassSet.restype = None
echoMatterGlassSet.argtypes = [POINTER(echoScene), POINTER(echoObject), echoCol_t, echoCol_t, echoCol_t, echoCol_t]
echoMatterMetalSet = libteem.echoMatterMetalSet
echoMatterMetalSet.restype = None
echoMatterMetalSet.argtypes = [POINTER(echoScene), POINTER(echoObject), echoCol_t, echoCol_t, echoCol_t, echoCol_t]
echoMatterLightSet = libteem.echoMatterLightSet
echoMatterLightSet.restype = None
echoMatterLightSet.argtypes = [POINTER(echoScene), POINTER(echoObject), echoCol_t, echoCol_t]
echoMatterTextureSet = libteem.echoMatterTextureSet
echoMatterTextureSet.restype = None
echoMatterTextureSet.argtypes = [POINTER(echoScene), POINTER(echoObject), POINTER(Nrrd)]
echoLightPosition = libteem.echoLightPosition
echoLightPosition.restype = None
echoLightPosition.argtypes = [POINTER(echoPos_t), POINTER(echoObject), POINTER(echoThreadState)]
echoLightColor = libteem.echoLightColor
echoLightColor.restype = None
echoLightColor.argtypes = [POINTER(echoCol_t), echoPos_t, POINTER(echoObject), POINTER(echoRTParm), POINTER(echoThreadState)]
echoEnvmapLookup = libteem.echoEnvmapLookup
echoEnvmapLookup.restype = None
echoEnvmapLookup.argtypes = [POINTER(echoCol_t), POINTER(echoPos_t), POINTER(Nrrd)]
echoTextureLookup = libteem.echoTextureLookup
echoTextureLookup.restype = None
echoTextureLookup.argtypes = [POINTER(echoCol_t), POINTER(Nrrd), echoPos_t, echoPos_t, POINTER(echoRTParm)]
echoIntxMaterialColor = libteem.echoIntxMaterialColor
echoIntxMaterialColor.restype = None
echoIntxMaterialColor.argtypes = [POINTER(echoCol_t), POINTER(echoIntx), POINTER(echoRTParm)]
echoIntxLightColor = libteem.echoIntxLightColor
echoIntxLightColor.restype = None
echoIntxLightColor.argtypes = [POINTER(echoCol_t), POINTER(echoCol_t), POINTER(echoCol_t), echoCol_t, POINTER(echoIntx), POINTER(echoScene), POINTER(echoRTParm), POINTER(echoThreadState)]
echoIntxFuzzify = libteem.echoIntxFuzzify
echoIntxFuzzify.restype = None
echoIntxFuzzify.argtypes = [POINTER(echoIntx), echoCol_t, POINTER(echoThreadState)]
echoRayIntx = libteem.echoRayIntx
echoRayIntx.restype = c_int
echoRayIntx.argtypes = [POINTER(echoIntx), POINTER(echoRay), POINTER(echoScene), POINTER(echoRTParm), POINTER(echoThreadState)]
echoIntxColor = libteem.echoIntxColor
echoIntxColor.restype = None
echoIntxColor.argtypes = [POINTER(echoCol_t), POINTER(echoIntx), POINTER(echoScene), POINTER(echoRTParm), POINTER(echoThreadState)]
echoThreadStateInit = libteem.echoThreadStateInit
echoThreadStateInit.restype = c_int
echoThreadStateInit.argtypes = [c_int, POINTER(echoThreadState), POINTER(echoRTParm), POINTER(echoGlobalState)]
echoJitterCompute = libteem.echoJitterCompute
echoJitterCompute.restype = None
echoJitterCompute.argtypes = [POINTER(echoRTParm), POINTER(echoThreadState)]
echoRayColor = libteem.echoRayColor
echoRayColor.restype = None
echoRayColor.argtypes = [POINTER(echoCol_t), POINTER(echoRay), POINTER(echoScene), POINTER(echoRTParm), POINTER(echoThreadState)]
echoChannelAverage = libteem.echoChannelAverage
echoChannelAverage.restype = None
echoChannelAverage.argtypes = [POINTER(echoCol_t), POINTER(echoRTParm), POINTER(echoThreadState)]
echoRTRenderCheck = libteem.echoRTRenderCheck
echoRTRenderCheck.restype = c_int
echoRTRenderCheck.argtypes = [POINTER(Nrrd), POINTER(limnCamera), POINTER(echoScene), POINTER(echoRTParm), POINTER(echoGlobalState)]
echoRTRender = libteem.echoRTRender
echoRTRender.restype = c_int
echoRTRender.argtypes = [POINTER(Nrrd), POINTER(limnCamera), POINTER(echoScene), POINTER(echoRTParm), POINTER(echoGlobalState)]
elfPresent = (c_int).in_dll(libteem, 'elfPresent')
class limnPolyData(Structure):
pass
class tijk_type_t(Structure):
pass
tijk_type = tijk_type_t
elfGlyphHOME = libteem.elfGlyphHOME
elfGlyphHOME.restype = c_float
elfGlyphHOME.argtypes = [POINTER(limnPolyData), c_char, POINTER(c_float), POINTER(tijk_type), STRING, c_char]
elfGlyphPolar = libteem.elfGlyphPolar
elfGlyphPolar.restype = c_float
elfGlyphPolar.argtypes = [POINTER(limnPolyData), c_char, POINTER(c_float), POINTER(tijk_type), STRING, c_char, c_char, POINTER(c_ubyte), POINTER(c_ubyte)]
elfGlyphKDE = libteem.elfGlyphKDE
elfGlyphKDE.restype = c_float
elfGlyphKDE.argtypes = [POINTER(limnPolyData), c_char, POINTER(c_float), c_size_t, c_float, c_char]
elfColorGlyphMaxima = libteem.elfColorGlyphMaxima
elfColorGlyphMaxima.restype = c_int
elfColorGlyphMaxima.argtypes = [POINTER(limnPolyData), c_char, POINTER(c_int), c_uint, POINTER(c_float), POINTER(tijk_type), c_char, c_float]
class elfMaximaContext(Structure):
pass
class tijk_refine_rank1_parm_t(Structure):
pass
tijk_refine_rank1_parm = tijk_refine_rank1_parm_t
elfMaximaContext._fields_ = [
('num', c_uint),
('type', POINTER(tijk_type)),
('parm', POINTER(tijk_refine_rank1_parm)),
('refine', c_int),
('neighbors', POINTER(c_int)),
('nbstride', c_uint),
('vertices_f', POINTER(c_float)),
('vertices_d', POINTER(c_double)),
]
elfMaximaContextNew = libteem.elfMaximaContextNew
elfMaximaContextNew.restype = POINTER(elfMaximaContext)
elfMaximaContextNew.argtypes = [POINTER(tijk_type), c_uint]
elfMaximaContextNix = libteem.elfMaximaContextNix
elfMaximaContextNix.restype = POINTER(elfMaximaContext)
elfMaximaContextNix.argtypes = [POINTER(elfMaximaContext)]
elfMaximaParmSet = libteem.elfMaximaParmSet
elfMaximaParmSet.restype = None
elfMaximaParmSet.argtypes = [POINTER(elfMaximaContext), POINTER(tijk_refine_rank1_parm)]
elfMaximaRefineSet = libteem.elfMaximaRefineSet
elfMaximaRefineSet.restype = None
elfMaximaRefineSet.argtypes = [POINTER(elfMaximaContext), c_int]
elfMaximaFind_d = libteem.elfMaximaFind_d
elfMaximaFind_d.restype = c_int
elfMaximaFind_d.argtypes = [POINTER(POINTER(c_double)), POINTER(POINTER(c_double)), POINTER(c_double), POINTER(elfMaximaContext)]
elfMaximaFind_f = libteem.elfMaximaFind_f
elfMaximaFind_f.restype = c_int
elfMaximaFind_f.argtypes = [POINTER(POINTER(c_float)), POINTER(POINTER(c_float)), POINTER(c_float), POINTER(elfMaximaContext)]
elfCart2Thetaphi_d = libteem.elfCart2Thetaphi_d
elfCart2Thetaphi_d.restype = None
elfCart2Thetaphi_d.argtypes = [POINTER(c_double), POINTER(c_double), c_uint]
elfCart2Thetaphi_f = libteem.elfCart2Thetaphi_f
elfCart2Thetaphi_f.restype = None
elfCart2Thetaphi_f.argtypes = [POINTER(c_float), POINTER(c_float), c_uint]
elfESHEstimMatrix_d = libteem.elfESHEstimMatrix_d
elfESHEstimMatrix_d.restype = c_int
elfESHEstimMatrix_d.argtypes = [POINTER(c_double), POINTER(c_double), c_uint, POINTER(c_double), c_uint, c_double, POINTER(c_double)]
elfESHEstimMatrix_f = libteem.elfESHEstimMatrix_f
elfESHEstimMatrix_f.restype = c_int
elfESHEstimMatrix_f.argtypes = [POINTER(c_float), POINTER(c_float), c_uint, POINTER(c_float), c_uint, c_float, POINTER(c_float)]
elfTenEstimMatrix_d = libteem.elfTenEstimMatrix_d
elfTenEstimMatrix_d.restype = c_int
elfTenEstimMatrix_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(tijk_type), POINTER(c_double), c_uint, POINTER(c_double)]
elfTenEstimMatrix_f = libteem.elfTenEstimMatrix_f
elfTenEstimMatrix_f.restype = c_int
elfTenEstimMatrix_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(tijk_type), POINTER(c_float), c_uint, POINTER(c_float)]
class elfSingleShellDWI(Structure):
pass
elfSingleShellDWI._fields_ = [
('b0', c_float),
('b', c_float),
('dwis', POINTER(c_float)),
('grads', POINTER(c_float)),
('dwino', c_uint),
]
elfKernelStick_f = libteem.elfKernelStick_f
elfKernelStick_f.restype = c_int
elfKernelStick_f.argtypes = [POINTER(c_float), c_uint, c_float, c_float, c_int]
elfBallStickODF_f = libteem.elfBallStickODF_f
elfBallStickODF_f.restype = c_int
elfBallStickODF_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(elfSingleShellDWI), POINTER(c_float), c_uint, c_int]
class elfBallStickParms(Structure):
pass
elfBallStickParms._pack_ = 4
elfBallStickParms._fields_ = [
('d', c_float),
('fiberct', c_uint),
('fs', c_float * 4),
('vs', c_float * 9),
('stopreason', c_int),
('sqrerr', c_double),
('itr', c_double),
]
elfBallStickPredict_f = libteem.elfBallStickPredict_f
elfBallStickPredict_f.restype = c_int
elfBallStickPredict_f.argtypes = [POINTER(elfBallStickParms), POINTER(c_float), POINTER(tijk_type), c_uint, c_float, c_float]
elfBallStickOptimize_f = libteem.elfBallStickOptimize_f
elfBallStickOptimize_f.restype = c_int
elfBallStickOptimize_f.argtypes = [POINTER(elfBallStickParms), POINTER(elfSingleShellDWI)]
ellPresent = (c_int).in_dll(libteem, 'ellPresent')
ell_biff_key = (STRING).in_dll(libteem, 'ell_biff_key')
ell_cubic_root = (POINTER(airEnum)).in_dll(libteem, 'ell_cubic_root')
ell_debug = (c_int).in_dll(libteem, 'ell_debug')
ell_3m_print_f = libteem.ell_3m_print_f
ell_3m_print_f.restype = None
ell_3m_print_f.argtypes = [POINTER(FILE), POINTER(c_float)]
ell_3v_print_f = libteem.ell_3v_print_f
ell_3v_print_f.restype = None
ell_3v_print_f.argtypes = [POINTER(FILE), POINTER(c_float)]
ell_3m_print_d = libteem.ell_3m_print_d
ell_3m_print_d.restype = None
ell_3m_print_d.argtypes = [POINTER(FILE), POINTER(c_double)]
ell_3v_print_d = libteem.ell_3v_print_d
ell_3v_print_d.restype = None
ell_3v_print_d.argtypes = [POINTER(FILE), POINTER(c_double)]
ell_4m_print_f = libteem.ell_4m_print_f
ell_4m_print_f.restype = None
ell_4m_print_f.argtypes = [POINTER(FILE), POINTER(c_float)]
ell_4v_print_f = libteem.ell_4v_print_f
ell_4v_print_f.restype = None
ell_4v_print_f.argtypes = [POINTER(FILE), POINTER(c_float)]
ell_4m_print_d = libteem.ell_4m_print_d
ell_4m_print_d.restype = None
ell_4m_print_d.argtypes = [POINTER(FILE), POINTER(c_double)]
ell_4v_print_d = libteem.ell_4v_print_d
ell_4v_print_d.restype = None
ell_4v_print_d.argtypes = [POINTER(FILE), POINTER(c_double)]
ell_4v_norm_f = libteem.ell_4v_norm_f
ell_4v_norm_f.restype = None
ell_4v_norm_f.argtypes = [POINTER(c_float), POINTER(c_float)]
ell_3v_perp_f = libteem.ell_3v_perp_f
ell_3v_perp_f.restype = None
ell_3v_perp_f.argtypes = [POINTER(c_float), POINTER(c_float)]
ell_3v_perp_d = libteem.ell_3v_perp_d
ell_3v_perp_d.restype = None
ell_3v_perp_d.argtypes = [POINTER(c_double), POINTER(c_double)]
ell_3mv_mul_f = libteem.ell_3mv_mul_f
ell_3mv_mul_f.restype = None
ell_3mv_mul_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float)]
ell_3mv_mul_d = libteem.ell_3mv_mul_d
ell_3mv_mul_d.restype = None
ell_3mv_mul_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
ell_4mv_mul_f = libteem.ell_4mv_mul_f
ell_4mv_mul_f.restype = None
ell_4mv_mul_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float)]
ell_4mv_mul_d = libteem.ell_4mv_mul_d
ell_4mv_mul_d.restype = None
ell_4mv_mul_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
ell_3v_angle_f = libteem.ell_3v_angle_f
ell_3v_angle_f.restype = c_float
ell_3v_angle_f.argtypes = [POINTER(c_float), POINTER(c_float)]
ell_3v_angle_d = libteem.ell_3v_angle_d
ell_3v_angle_d.restype = c_double
ell_3v_angle_d.argtypes = [POINTER(c_double), POINTER(c_double)]
ell_3v_area_spherical_d = libteem.ell_3v_area_spherical_d
ell_3v_area_spherical_d.restype = c_double
ell_3v_area_spherical_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
ell_3v_barycentric_spherical_d = libteem.ell_3v_barycentric_spherical_d
ell_3v_barycentric_spherical_d.restype = None
ell_3v_barycentric_spherical_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double)]
ell_3m_mul_f = libteem.ell_3m_mul_f
ell_3m_mul_f.restype = None
ell_3m_mul_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float)]
ell_3m_mul_d = libteem.ell_3m_mul_d
ell_3m_mul_d.restype = None
ell_3m_mul_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
ell_3m_pre_mul_f = libteem.ell_3m_pre_mul_f
ell_3m_pre_mul_f.restype = None
ell_3m_pre_mul_f.argtypes = [POINTER(c_float), POINTER(c_float)]
ell_3m_pre_mul_d = libteem.ell_3m_pre_mul_d
ell_3m_pre_mul_d.restype = None
ell_3m_pre_mul_d.argtypes = [POINTER(c_double), POINTER(c_double)]
ell_3m_post_mul_f = libteem.ell_3m_post_mul_f
ell_3m_post_mul_f.restype = None
ell_3m_post_mul_f.argtypes = [POINTER(c_float), POINTER(c_float)]
ell_3m_post_mul_d = libteem.ell_3m_post_mul_d
ell_3m_post_mul_d.restype = None
ell_3m_post_mul_d.argtypes = [POINTER(c_double), POINTER(c_double)]
ell_3m_det_f = libteem.ell_3m_det_f
ell_3m_det_f.restype = c_float
ell_3m_det_f.argtypes = [POINTER(c_float)]
ell_3m_det_d = libteem.ell_3m_det_d
ell_3m_det_d.restype = c_double
ell_3m_det_d.argtypes = [POINTER(c_double)]
ell_3m_inv_f = libteem.ell_3m_inv_f
ell_3m_inv_f.restype = None
ell_3m_inv_f.argtypes = [POINTER(c_float), POINTER(c_float)]
ell_3m_inv_d = libteem.ell_3m_inv_d
ell_3m_inv_d.restype = None
ell_3m_inv_d.argtypes = [POINTER(c_double), POINTER(c_double)]
ell_4m_mul_f = libteem.ell_4m_mul_f
ell_4m_mul_f.restype = None
ell_4m_mul_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float)]
ell_4m_mul_d = libteem.ell_4m_mul_d
ell_4m_mul_d.restype = None
ell_4m_mul_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
ell_4m_pre_mul_f = libteem.ell_4m_pre_mul_f
ell_4m_pre_mul_f.restype = None
ell_4m_pre_mul_f.argtypes = [POINTER(c_float), POINTER(c_float)]
ell_4m_post_mul_f = libteem.ell_4m_post_mul_f
ell_4m_post_mul_f.restype = None
ell_4m_post_mul_f.argtypes = [POINTER(c_float), POINTER(c_float)]
ell_4m_post_mul_d = libteem.ell_4m_post_mul_d
ell_4m_post_mul_d.restype = None
ell_4m_post_mul_d.argtypes = [POINTER(c_double), POINTER(c_double)]
ell_4m_det_f = libteem.ell_4m_det_f
ell_4m_det_f.restype = c_float
ell_4m_det_f.argtypes = [POINTER(c_float)]
ell_4m_det_d = libteem.ell_4m_det_d
ell_4m_det_d.restype = c_double
ell_4m_det_d.argtypes = [POINTER(c_double)]
ell_4m_inv_f = libteem.ell_4m_inv_f
ell_4m_inv_f.restype = None
ell_4m_inv_f.argtypes = [POINTER(c_float), POINTER(c_float)]
ell_4m_inv_d = libteem.ell_4m_inv_d
ell_4m_inv_d.restype = None
ell_4m_inv_d.argtypes = [POINTER(c_double), POINTER(c_double)]
ell_6m_mul_d = libteem.ell_6m_mul_d
ell_6m_mul_d.restype = None
ell_6m_mul_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
ell_3m_rotate_between_d = libteem.ell_3m_rotate_between_d
ell_3m_rotate_between_d.restype = None
ell_3m_rotate_between_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
ell_3m_to_q_f = libteem.ell_3m_to_q_f
ell_3m_to_q_f.restype = None
ell_3m_to_q_f.argtypes = [POINTER(c_float), POINTER(c_float)]
ell_3m_to_q_d = libteem.ell_3m_to_q_d
ell_3m_to_q_d.restype = None
ell_3m_to_q_d.argtypes = [POINTER(c_double), POINTER(c_double)]
ell_4m_to_q_f = libteem.ell_4m_to_q_f
ell_4m_to_q_f.restype = None
ell_4m_to_q_f.argtypes = [POINTER(c_float), POINTER(c_float)]
ell_4m_to_q_d = libteem.ell_4m_to_q_d
ell_4m_to_q_d.restype = None
ell_4m_to_q_d.argtypes = [POINTER(c_double), POINTER(c_double)]
ell_q_to_3m_f = libteem.ell_q_to_3m_f
ell_q_to_3m_f.restype = None
ell_q_to_3m_f.argtypes = [POINTER(c_float), POINTER(c_float)]
ell_q_to_3m_d = libteem.ell_q_to_3m_d
ell_q_to_3m_d.restype = None
ell_q_to_3m_d.argtypes = [POINTER(c_double), POINTER(c_double)]
ell_q_to_4m_f = libteem.ell_q_to_4m_f
ell_q_to_4m_f.restype = None
ell_q_to_4m_f.argtypes = [POINTER(c_float), POINTER(c_float)]
ell_q_to_4m_d = libteem.ell_q_to_4m_d
ell_q_to_4m_d.restype = None
ell_q_to_4m_d.argtypes = [POINTER(c_double), POINTER(c_double)]
ell_q_to_aa_f = libteem.ell_q_to_aa_f
ell_q_to_aa_f.restype = c_float
ell_q_to_aa_f.argtypes = [POINTER(c_float), POINTER(c_float)]
ell_q_to_aa_d = libteem.ell_q_to_aa_d
ell_q_to_aa_d.restype = c_double
ell_q_to_aa_d.argtypes = [POINTER(c_double), POINTER(c_double)]
ell_aa_to_q_f = libteem.ell_aa_to_q_f
ell_aa_to_q_f.restype = None
ell_aa_to_q_f.argtypes = [POINTER(c_float), c_float, POINTER(c_float)]
ell_aa_to_q_d = libteem.ell_aa_to_q_d
ell_aa_to_q_d.restype = None
ell_aa_to_q_d.argtypes = [POINTER(c_double), c_double, POINTER(c_double)]
ell_aa_to_3m_f = libteem.ell_aa_to_3m_f
ell_aa_to_3m_f.restype = None
ell_aa_to_3m_f.argtypes = [POINTER(c_float), c_float, POINTER(c_float)]
ell_aa_to_3m_d = libteem.ell_aa_to_3m_d
ell_aa_to_3m_d.restype = None
ell_aa_to_3m_d.argtypes = [POINTER(c_double), c_double, POINTER(c_double)]
ell_aa_to_4m_f = libteem.ell_aa_to_4m_f
ell_aa_to_4m_f.restype = None
ell_aa_to_4m_f.argtypes = [POINTER(c_float), c_float, POINTER(c_float)]
ell_aa_to_4m_d = libteem.ell_aa_to_4m_d
ell_aa_to_4m_d.restype = None
ell_aa_to_4m_d.argtypes = [POINTER(c_double), c_double, POINTER(c_double)]
ell_3m_to_aa_f = libteem.ell_3m_to_aa_f
ell_3m_to_aa_f.restype = c_float
ell_3m_to_aa_f.argtypes = [POINTER(c_float), POINTER(c_float)]
ell_3m_to_aa_d = libteem.ell_3m_to_aa_d
ell_3m_to_aa_d.restype = c_double
ell_3m_to_aa_d.argtypes = [POINTER(c_double), POINTER(c_double)]
ell_4m_to_aa_f = libteem.ell_4m_to_aa_f
ell_4m_to_aa_f.restype = c_float
ell_4m_to_aa_f.argtypes = [POINTER(c_float), POINTER(c_float)]
ell_4m_to_aa_d = libteem.ell_4m_to_aa_d
ell_4m_to_aa_d.restype = c_double
ell_4m_to_aa_d.argtypes = [POINTER(c_double), POINTER(c_double)]
ell_q_mul_f = libteem.ell_q_mul_f
ell_q_mul_f.restype = None
ell_q_mul_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float)]
ell_q_mul_d = libteem.ell_q_mul_d
ell_q_mul_d.restype = None
ell_q_mul_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
ell_q_inv_f = libteem.ell_q_inv_f
ell_q_inv_f.restype = None
ell_q_inv_f.argtypes = [POINTER(c_float), POINTER(c_float)]
ell_q_inv_d = libteem.ell_q_inv_d
ell_q_inv_d.restype = None
ell_q_inv_d.argtypes = [POINTER(c_double), POINTER(c_double)]
ell_q_pow_f = libteem.ell_q_pow_f
ell_q_pow_f.restype = None
ell_q_pow_f.argtypes = [POINTER(c_float), POINTER(c_float), c_float]
ell_q_pow_d = libteem.ell_q_pow_d
ell_q_pow_d.restype = None
ell_q_pow_d.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
ell_q_div_f = libteem.ell_q_div_f
ell_q_div_f.restype = None
ell_q_div_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float)]
ell_q_div_d = libteem.ell_q_div_d
ell_q_div_d.restype = None
ell_q_div_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
ell_q_exp_f = libteem.ell_q_exp_f
ell_q_exp_f.restype = None
ell_q_exp_f.argtypes = [POINTER(c_float), POINTER(c_float)]
ell_q_exp_d = libteem.ell_q_exp_d
ell_q_exp_d.restype = None
ell_q_exp_d.argtypes = [POINTER(c_double), POINTER(c_double)]
ell_q_log_f = libteem.ell_q_log_f
ell_q_log_f.restype = None
ell_q_log_f.argtypes = [POINTER(c_float), POINTER(c_float)]
ell_q_log_d = libteem.ell_q_log_d
ell_q_log_d.restype = None
ell_q_log_d.argtypes = [POINTER(c_double), POINTER(c_double)]
ell_q_3v_rotate_f = libteem.ell_q_3v_rotate_f
ell_q_3v_rotate_f.restype = None
ell_q_3v_rotate_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float)]
ell_q_3v_rotate_d = libteem.ell_q_3v_rotate_d
ell_q_3v_rotate_d.restype = None
ell_q_3v_rotate_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
ell_q_4v_rotate_f = libteem.ell_q_4v_rotate_f
ell_q_4v_rotate_f.restype = None
ell_q_4v_rotate_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float)]
ell_q_4v_rotate_d = libteem.ell_q_4v_rotate_d
ell_q_4v_rotate_d.restype = None
ell_q_4v_rotate_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
ell_q_avg4_d = libteem.ell_q_avg4_d
ell_q_avg4_d.restype = c_int
ell_q_avg4_d.argtypes = [POINTER(c_double), POINTER(c_uint), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_double, c_uint]
ell_q_avgN_d = libteem.ell_q_avgN_d
ell_q_avgN_d.restype = c_int
ell_q_avgN_d.argtypes = [POINTER(c_double), POINTER(c_uint), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_uint, c_double, c_uint]
ell_Nm_check = libteem.ell_Nm_check
ell_Nm_check.restype = c_int
ell_Nm_check.argtypes = [POINTER(Nrrd), c_int]
ell_Nm_tran = libteem.ell_Nm_tran
ell_Nm_tran.restype = c_int
ell_Nm_tran.argtypes = [POINTER(Nrrd), POINTER(Nrrd)]
ell_Nm_mul = libteem.ell_Nm_mul
ell_Nm_mul.restype = c_int
ell_Nm_mul.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd)]
ell_Nm_inv = libteem.ell_Nm_inv
ell_Nm_inv.restype = c_int
ell_Nm_inv.argtypes = [POINTER(Nrrd), POINTER(Nrrd)]
ell_Nm_pseudo_inv = libteem.ell_Nm_pseudo_inv
ell_Nm_pseudo_inv.restype = c_int
ell_Nm_pseudo_inv.argtypes = [POINTER(Nrrd), POINTER(Nrrd)]
ell_Nm_wght_pseudo_inv = libteem.ell_Nm_wght_pseudo_inv
ell_Nm_wght_pseudo_inv.restype = c_int
ell_Nm_wght_pseudo_inv.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd)]
ell_cubic = libteem.ell_cubic
ell_cubic.restype = c_int
ell_cubic.argtypes = [POINTER(c_double), c_double, c_double, c_double, c_int]
ell_2m_1d_nullspace_d = libteem.ell_2m_1d_nullspace_d
ell_2m_1d_nullspace_d.restype = None
ell_2m_1d_nullspace_d.argtypes = [POINTER(c_double), POINTER(c_double)]
ell_3m_1d_nullspace_d = libteem.ell_3m_1d_nullspace_d
ell_3m_1d_nullspace_d.restype = None
ell_3m_1d_nullspace_d.argtypes = [POINTER(c_double), POINTER(c_double)]
ell_3m_2d_nullspace_d = libteem.ell_3m_2d_nullspace_d
ell_3m_2d_nullspace_d.restype = None
ell_3m_2d_nullspace_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
ell_3m_eigenvalues_d = libteem.ell_3m_eigenvalues_d
ell_3m_eigenvalues_d.restype = c_int
ell_3m_eigenvalues_d.argtypes = [POINTER(c_double), POINTER(c_double), c_int]
ell_3m_eigensolve_d = libteem.ell_3m_eigensolve_d
ell_3m_eigensolve_d.restype = c_int
ell_3m_eigensolve_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
ell_3m2sub_eigenvalues_d = libteem.ell_3m2sub_eigenvalues_d
ell_3m2sub_eigenvalues_d.restype = c_int
ell_3m2sub_eigenvalues_d.argtypes = [POINTER(c_double), POINTER(c_double)]
ell_3m2sub_eigensolve_d = libteem.ell_3m2sub_eigensolve_d
ell_3m2sub_eigensolve_d.restype = c_int
ell_3m2sub_eigensolve_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
ell_3m_svd_d = libteem.ell_3m_svd_d
ell_3m_svd_d.restype = c_int
ell_3m_svd_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
ell_6ms_eigensolve_d = libteem.ell_6ms_eigensolve_d
ell_6ms_eigensolve_d.restype = c_int
ell_6ms_eigensolve_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_double]
class gageItemEntry(Structure):
pass
gageItemEntry._fields_ = [
('enumVal', c_int),
('answerLength', c_uint),
('needDeriv', c_int),
('prereq', c_int * 8),
('parentItem', c_int),
('parentIndex', c_int),
('needData', c_int),
]
class gageShape_t(Structure):
pass
gageShape_t._pack_ = 4
gageShape_t._fields_ = [
('defaultCenter', c_int),
('orientationFromSpacing', c_int),
('center', c_int),
('fromOrientation', c_int),
('size', c_uint * 3),
('spacing', c_double * 3),
('ItoW', c_double * 16),
('WtoI', c_double * 16),
('ItoWSubInvTransp', c_double * 9),
('ItoWSubInv', c_double * 9),
]
gageShape = gageShape_t
class gageParm_t(Structure):
pass
gageParm_t._pack_ = 4
gageParm_t._fields_ = [
('renormalize', c_int),
('checkIntegrals', c_int),
('k3pack', c_int),
('gradMagCurvMin', c_double),
('kernelIntegralNearZero', c_double),
('stackNormalizeDerivBias', c_double),
('curvNormalSide', c_int),
('defaultCenter', c_int),
('stackUse', c_int),
('stackNormalizeRecon', c_int),
('stackNormalizeDeriv', c_int),
('orientationFromSpacing', c_int),
('generateErrStr', c_int),
('twoDimZeroZ', c_int),
]
gageParm = gageParm_t
class gagePoint_t(Structure):
pass
gagePoint_t._pack_ = 4
gagePoint_t._fields_ = [
('frac', c_double * 4),
('idx', c_uint * 4),
('stackFwNonZeroNum', c_uint),
]
gagePoint = gagePoint_t
class NrrdKernelSpec(Structure):
pass
class gagePerVolume_t(Structure):
pass
gageContext_t._pack_ = 4
gageContext_t._fields_ = [
('verbose', c_int),
('parm', gageParm),
('ksp', POINTER(NrrdKernelSpec) * 8),
('pvl', POINTER(POINTER(gagePerVolume_t))),
('pvlNum', c_uint),
('pvlArr', POINTER(airArray)),
('shape', POINTER(gageShape)),
('stackPos', POINTER(c_double)),
('stackFsl', POINTER(c_double)),
('stackFw', POINTER(c_double)),
('flag', c_int * 7),
('needD', c_int * 3),
('needK', c_int * 8),
('radius', c_uint),
('fsl', POINTER(c_double)),
('fw', POINTER(c_double)),
('off', POINTER(c_uint)),
('point', gagePoint),
('errStr', c_char * 513),
('errNum', c_int),
('edgeFrac', c_double),
]
class gageKind_t(Structure):
pass
gagePerVolume = gagePerVolume_t
gageKind_t._fields_ = [
('dynamicAlloc', c_int),
('name', c_char * 129),
('enm', POINTER(airEnum)),
('baseDim', c_uint),
('valLen', c_uint),
('itemMax', c_int),
('table', POINTER(gageItemEntry)),
('iv3Print', CFUNCTYPE(None, POINTER(FILE), POINTER(gageContext), POINTER(gagePerVolume))),
('filter', CFUNCTYPE(None, POINTER(gageContext), POINTER(gagePerVolume))),
('answer', CFUNCTYPE(None, POINTER(gageContext), POINTER(gagePerVolume))),
('pvlDataNew', CFUNCTYPE(c_void_p, POINTER(gageKind_t))),
('pvlDataCopy', CFUNCTYPE(c_void_p, POINTER(gageKind_t), c_void_p)),
('pvlDataNix', CFUNCTYPE(c_void_p, POINTER(gageKind_t), c_void_p)),
('pvlDataUpdate', CFUNCTYPE(c_int, POINTER(gageKind_t), POINTER(gageContext), POINTER(gagePerVolume), c_void_p)),
('data', c_void_p),
]
gagePerVolume_t._fields_ = [
('verbose', c_int),
('kind', POINTER(gageKind_t)),
('query', gageQuery),
('needD', c_int * 3),
('nin', POINTER(Nrrd)),
('flag', c_int * 4),
('iv3', POINTER(c_double)),
('iv2', POINTER(c_double)),
('iv1', POINTER(c_double)),
('lup', CFUNCTYPE(c_double, c_void_p, c_size_t)),
('answer', POINTER(c_double)),
('directAnswer', POINTER(POINTER(c_double))),
('data', c_void_p),
]
gageKind = gageKind_t
class gageItemSpec(Structure):
pass
gageItemSpec._fields_ = [
('kind', POINTER(gageKind)),
('item', c_int),
]
class gageItemPack(Structure):
pass
gageItemPack._fields_ = [
('kind', POINTER(gageKind)),
('item', c_int * 12),
]
class gageStackBlurParm(Structure):
pass
class NrrdBoundarySpec(Structure):
pass
gageStackBlurParm._pack_ = 4
gageStackBlurParm._fields_ = [
('num', c_uint),
('sigmaRange', c_double * 2),
('sigmaSampling', c_int),
('sigma', POINTER(c_double)),
('kspec', POINTER(NrrdKernelSpec)),
('renormalize', c_int),
('bspec', POINTER(NrrdBoundarySpec)),
('oneDim', c_int),
('needSpatialBlur', c_int),
('verbose', c_int),
('dgGoodSigmaMax', c_double),
]
class gageOptimSigContext(Structure):
pass
gageOptimSigContext._pack_ = 4
gageOptimSigContext._fields_ = [
('dim', c_uint),
('sampleNumMax', c_uint),
('trueImgNum', c_uint),
('sigmaRange', c_double * 2),
('cutoff', c_double),
('kssSpec', POINTER(NrrdKernelSpec)),
('sampleNum', c_uint),
('maxIter', c_uint),
('imgMeasr', c_int),
('allMeasr', c_int),
('convEps', c_double),
('sx', c_uint),
('sy', c_uint),
('sz', c_uint),
('nerr', POINTER(Nrrd)),
('ninterp', POINTER(Nrrd)),
('ndiff', POINTER(Nrrd)),
('rhoRange', c_double * 2),
('kloc', POINTER(c_double)),
('kern', POINTER(c_double)),
('ktmp1', POINTER(c_double)),
('ktmp2', POINTER(c_double)),
('kone', c_double * 1),
('gctx', POINTER(gageContext)),
('pvlBase', POINTER(gagePerVolume)),
('pvlSS', POINTER(POINTER(gagePerVolume))),
('nsampleImg', POINTER(POINTER(Nrrd))),
('sampleSigma', POINTER(c_double)),
('sampleRho', POINTER(c_double)),
('sampleTmp', POINTER(c_double)),
('sampleErrMax', POINTER(c_double)),
('step', POINTER(c_double)),
('finalErr', c_double),
]
gageBiffKey = (STRING).in_dll(libteem, 'gageBiffKey')
gageDefVerbose = (c_int).in_dll(libteem, 'gageDefVerbose')
gageDefGradMagCurvMin = (c_double).in_dll(libteem, 'gageDefGradMagCurvMin')
gageDefRenormalize = (c_int).in_dll(libteem, 'gageDefRenormalize')
gageDefCheckIntegrals = (c_int).in_dll(libteem, 'gageDefCheckIntegrals')
gageDefK3Pack = (c_int).in_dll(libteem, 'gageDefK3Pack')
gageDefCurvNormalSide = (c_int).in_dll(libteem, 'gageDefCurvNormalSide')
gageDefKernelIntegralNearZero = (c_double).in_dll(libteem, 'gageDefKernelIntegralNearZero')
gageDefDefaultCenter = (c_int).in_dll(libteem, 'gageDefDefaultCenter')
gageDefStackUse = (c_int).in_dll(libteem, 'gageDefStackUse')
gageDefStackNormalizeRecon = (c_int).in_dll(libteem, 'gageDefStackNormalizeRecon')
gageDefStackNormalizeDeriv = (c_int).in_dll(libteem, 'gageDefStackNormalizeDeriv')
gageDefStackNormalizeDerivBias = (c_double).in_dll(libteem, 'gageDefStackNormalizeDerivBias')
gageDefOrientationFromSpacing = (c_int).in_dll(libteem, 'gageDefOrientationFromSpacing')
gageDefGenerateErrStr = (c_int).in_dll(libteem, 'gageDefGenerateErrStr')
gageDefTwoDimZeroZ = (c_int).in_dll(libteem, 'gageDefTwoDimZeroZ')
gagePresent = (c_int).in_dll(libteem, 'gagePresent')
gageZeroNormal = (c_double * 3).in_dll(libteem, 'gageZeroNormal')
gageErr = (POINTER(airEnum)).in_dll(libteem, 'gageErr')
gageKernel = (POINTER(airEnum)).in_dll(libteem, 'gageKernel')
gageItemPackPart = (POINTER(airEnum)).in_dll(libteem, 'gageItemPackPart')
gageParmReset = libteem.gageParmReset
gageParmReset.restype = None
gageParmReset.argtypes = [POINTER(gageParm)]
gagePointReset = libteem.gagePointReset
gagePointReset.restype = None
gagePointReset.argtypes = [POINTER(gagePoint)]
gageItemSpecNew = libteem.gageItemSpecNew
gageItemSpecNew.restype = POINTER(gageItemSpec)
gageItemSpecNew.argtypes = []
gageItemSpecInit = libteem.gageItemSpecInit
gageItemSpecInit.restype = None
gageItemSpecInit.argtypes = [POINTER(gageItemSpec)]
gageItemSpecNix = libteem.gageItemSpecNix
gageItemSpecNix.restype = POINTER(gageItemSpec)
gageItemSpecNix.argtypes = [POINTER(gageItemSpec)]
gageKindCheck = libteem.gageKindCheck
gageKindCheck.restype = c_int
gageKindCheck.argtypes = [POINTER(gageKind)]
gageKindTotalAnswerLength = libteem.gageKindTotalAnswerLength
gageKindTotalAnswerLength.restype = c_uint
gageKindTotalAnswerLength.argtypes = [POINTER(gageKind)]
gageKindAnswerLength = libteem.gageKindAnswerLength
gageKindAnswerLength.restype = c_uint
gageKindAnswerLength.argtypes = [POINTER(gageKind), c_int]
gageKindAnswerOffset = libteem.gageKindAnswerOffset
gageKindAnswerOffset.restype = c_int
gageKindAnswerOffset.argtypes = [POINTER(gageKind), c_int]
gageKindVolumeCheck = libteem.gageKindVolumeCheck
gageKindVolumeCheck.restype = c_int
gageKindVolumeCheck.argtypes = [POINTER(gageKind), POINTER(Nrrd)]
gageQueryPrint = libteem.gageQueryPrint
gageQueryPrint.restype = None
gageQueryPrint.argtypes = [POINTER(FILE), POINTER(gageKind), POINTER(c_ubyte)]
gageScl3PFilter_t = CFUNCTYPE(None, POINTER(gageShape), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_int))
gageScl3PFilter2 = libteem.gageScl3PFilter2
gageScl3PFilter2.restype = None
gageScl3PFilter2.argtypes = [POINTER(gageShape), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_int)]
gageScl3PFilter4 = libteem.gageScl3PFilter4
gageScl3PFilter4.restype = None
gageScl3PFilter4.argtypes = [POINTER(gageShape), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_int)]
gageScl3PFilter6 = libteem.gageScl3PFilter6
gageScl3PFilter6.restype = None
gageScl3PFilter6.argtypes = [POINTER(gageShape), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_int)]
gageScl3PFilter8 = libteem.gageScl3PFilter8
gageScl3PFilter8.restype = None
gageScl3PFilter8.argtypes = [POINTER(gageShape), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_int)]
gageScl3PFilterN = libteem.gageScl3PFilterN
gageScl3PFilterN.restype = None
gageScl3PFilterN.argtypes = [POINTER(gageShape), c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_int)]
gageScl = (POINTER(airEnum)).in_dll(libteem, 'gageScl')
gageKindScl = (POINTER(gageKind)).in_dll(libteem, 'gageKindScl')
gageItemPackSclValue = (POINTER(gageItemPack)).in_dll(libteem, 'gageItemPackSclValue')
gageVec = (POINTER(airEnum)).in_dll(libteem, 'gageVec')
gageKindVec = (POINTER(gageKind)).in_dll(libteem, 'gageKindVec')
gageShapeReset = libteem.gageShapeReset
gageShapeReset.restype = None
gageShapeReset.argtypes = [POINTER(gageShape)]
gageShapeNew = libteem.gageShapeNew
gageShapeNew.restype = POINTER(gageShape)
gageShapeNew.argtypes = []
gageShapeCopy = libteem.gageShapeCopy
gageShapeCopy.restype = POINTER(gageShape)
gageShapeCopy.argtypes = [POINTER(gageShape)]
gageShapeNix = libteem.gageShapeNix
gageShapeNix.restype = POINTER(gageShape)
gageShapeNix.argtypes = [POINTER(gageShape)]
gageShapeSet = libteem.gageShapeSet
gageShapeSet.restype = c_int
gageShapeSet.argtypes = [POINTER(gageShape), POINTER(Nrrd), c_int]
gageShapeWtoI = libteem.gageShapeWtoI
gageShapeWtoI.restype = None
gageShapeWtoI.argtypes = [POINTER(gageShape), POINTER(c_double), POINTER(c_double)]
gageShapeItoW = libteem.gageShapeItoW
gageShapeItoW.restype = None
gageShapeItoW.argtypes = [POINTER(gageShape), POINTER(c_double), POINTER(c_double)]
gageShapeEqual = libteem.gageShapeEqual
gageShapeEqual.restype = c_int
gageShapeEqual.argtypes = [POINTER(gageShape), STRING, POINTER(gageShape), STRING]
gageShapeBoundingBox = libteem.gageShapeBoundingBox
gageShapeBoundingBox.restype = None
gageShapeBoundingBox.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(gageShape)]
gageVolumeCheck = libteem.gageVolumeCheck
gageVolumeCheck.restype = c_int
gageVolumeCheck.argtypes = [POINTER(gageContext), POINTER(Nrrd), POINTER(gageKind)]
gagePerVolumeNew = libteem.gagePerVolumeNew
gagePerVolumeNew.restype = POINTER(gagePerVolume)
gagePerVolumeNew.argtypes = [POINTER(gageContext), POINTER(Nrrd), POINTER(gageKind)]
gagePerVolumeNix = libteem.gagePerVolumeNix
gagePerVolumeNix.restype = POINTER(gagePerVolume)
gagePerVolumeNix.argtypes = [POINTER(gagePerVolume)]
gageAnswerPointer = libteem.gageAnswerPointer
gageAnswerPointer.restype = POINTER(c_double)
gageAnswerPointer.argtypes = [POINTER(gageContext), POINTER(gagePerVolume), c_int]
gageAnswerLength = libteem.gageAnswerLength
gageAnswerLength.restype = c_uint
gageAnswerLength.argtypes = [POINTER(gageContext), POINTER(gagePerVolume), c_int]
gageQueryReset = libteem.gageQueryReset
gageQueryReset.restype = c_int
gageQueryReset.argtypes = [POINTER(gageContext), POINTER(gagePerVolume)]
gageQuerySet = libteem.gageQuerySet
gageQuerySet.restype = c_int
gageQuerySet.argtypes = [POINTER(gageContext), POINTER(gagePerVolume), POINTER(c_ubyte)]
gageQueryAdd = libteem.gageQueryAdd
gageQueryAdd.restype = c_int
gageQueryAdd.argtypes = [POINTER(gageContext), POINTER(gagePerVolume), POINTER(c_ubyte)]
gageQueryItemOn = libteem.gageQueryItemOn
gageQueryItemOn.restype = c_int
gageQueryItemOn.argtypes = [POINTER(gageContext), POINTER(gagePerVolume), c_int]
gageOptimSigSet = libteem.gageOptimSigSet
gageOptimSigSet.restype = c_int
gageOptimSigSet.argtypes = [POINTER(c_double), c_uint, c_uint]
gageOptimSigContextNew = libteem.gageOptimSigContextNew
gageOptimSigContextNew.restype = POINTER(gageOptimSigContext)
gageOptimSigContextNew.argtypes = [c_uint, c_uint, c_uint, c_double, c_double, c_double]
gageOptimSigContextNix = libteem.gageOptimSigContextNix
gageOptimSigContextNix.restype = POINTER(gageOptimSigContext)
gageOptimSigContextNix.argtypes = [POINTER(gageOptimSigContext)]
NrrdKernelSpec._pack_ = 4
NrrdKernelSpec._fields_ = [
('kernel', POINTER(NrrdKernel)),
('parm', c_double * 8),
]
gageOptimSigCalculate = libteem.gageOptimSigCalculate
gageOptimSigCalculate.restype = c_int
gageOptimSigCalculate.argtypes = [POINTER(gageOptimSigContext), POINTER(c_double), c_uint, POINTER(NrrdKernelSpec), c_int, c_int, c_uint, c_double]
gageOptimSigErrorPlot = libteem.gageOptimSigErrorPlot
gageOptimSigErrorPlot.restype = c_int
gageOptimSigErrorPlot.argtypes = [POINTER(gageOptimSigContext), POINTER(Nrrd), POINTER(c_double), c_uint, POINTER(NrrdKernelSpec), c_int]
gageOptimSigErrorPlotSliding = libteem.gageOptimSigErrorPlotSliding
gageOptimSigErrorPlotSliding.restype = c_int
gageOptimSigErrorPlotSliding.argtypes = [POINTER(gageOptimSigContext), POINTER(Nrrd), c_double, c_uint, POINTER(NrrdKernelSpec), c_int]
gageStackWtoI = libteem.gageStackWtoI
gageStackWtoI.restype = c_double
gageStackWtoI.argtypes = [POINTER(gageContext), c_double, POINTER(c_int)]
gageStackItoW = libteem.gageStackItoW
gageStackItoW.restype = c_double
gageStackItoW.argtypes = [POINTER(gageContext), c_double, POINTER(c_int)]
gageStackPerVolumeNew = libteem.gageStackPerVolumeNew
gageStackPerVolumeNew.restype = c_int
gageStackPerVolumeNew.argtypes = [POINTER(gageContext), POINTER(POINTER(gagePerVolume)), POINTER(POINTER(Nrrd)), c_uint, POINTER(gageKind)]
gageStackPerVolumeAttach = libteem.gageStackPerVolumeAttach
gageStackPerVolumeAttach.restype = c_int
gageStackPerVolumeAttach.argtypes = [POINTER(gageContext), POINTER(gagePerVolume), POINTER(POINTER(gagePerVolume)), POINTER(c_double), c_uint]
gageStackProbe = libteem.gageStackProbe
gageStackProbe.restype = c_int
gageStackProbe.argtypes = [POINTER(gageContext), c_double, c_double, c_double, c_double]
gageStackProbeSpace = libteem.gageStackProbeSpace
gageStackProbeSpace.restype = c_int
gageStackProbeSpace.argtypes = [POINTER(gageContext), c_double, c_double, c_double, c_double, c_int, c_int]
gageSigmaSampling = (POINTER(airEnum)).in_dll(libteem, 'gageSigmaSampling')
gageStackBlurParmNew = libteem.gageStackBlurParmNew
gageStackBlurParmNew.restype = POINTER(gageStackBlurParm)
gageStackBlurParmNew.argtypes = []
gageStackBlurParmCopy = libteem.gageStackBlurParmCopy
gageStackBlurParmCopy.restype = c_int
gageStackBlurParmCopy.argtypes = [POINTER(gageStackBlurParm), POINTER(gageStackBlurParm)]
gageStackBlurParmInit = libteem.gageStackBlurParmInit
gageStackBlurParmInit.restype = None
gageStackBlurParmInit.argtypes = [POINTER(gageStackBlurParm)]
gageStackBlurParmNix = libteem.gageStackBlurParmNix
gageStackBlurParmNix.restype = POINTER(gageStackBlurParm)
gageStackBlurParmNix.argtypes = [POINTER(gageStackBlurParm)]
gageStackBlurParmCompare = libteem.gageStackBlurParmCompare
gageStackBlurParmCompare.restype = c_int
gageStackBlurParmCompare.argtypes = [POINTER(gageStackBlurParm), STRING, POINTER(gageStackBlurParm), STRING, POINTER(c_int), STRING]
gageStackBlurParmScaleSet = libteem.gageStackBlurParmScaleSet
gageStackBlurParmScaleSet.restype = c_int
gageStackBlurParmScaleSet.argtypes = [POINTER(gageStackBlurParm), c_uint, c_double, c_double, c_int, c_int]
gageStackBlurParmSigmaSet = libteem.gageStackBlurParmSigmaSet
gageStackBlurParmSigmaSet.restype = c_int
gageStackBlurParmSigmaSet.argtypes = [POINTER(gageStackBlurParm), c_uint, c_double, c_double, c_int]
gageStackBlurParmKernelSet = libteem.gageStackBlurParmKernelSet
gageStackBlurParmKernelSet.restype = c_int
gageStackBlurParmKernelSet.argtypes = [POINTER(gageStackBlurParm), POINTER(NrrdKernelSpec)]
gageStackBlurParmRenormalizeSet = libteem.gageStackBlurParmRenormalizeSet
gageStackBlurParmRenormalizeSet.restype = c_int
gageStackBlurParmRenormalizeSet.argtypes = [POINTER(gageStackBlurParm), c_int]
gageStackBlurParmDgGoodSigmaMaxSet = libteem.gageStackBlurParmDgGoodSigmaMaxSet
gageStackBlurParmDgGoodSigmaMaxSet.restype = c_int
gageStackBlurParmDgGoodSigmaMaxSet.argtypes = [POINTER(gageStackBlurParm), c_double]
gageStackBlurParmBoundarySet = libteem.gageStackBlurParmBoundarySet
gageStackBlurParmBoundarySet.restype = c_int
gageStackBlurParmBoundarySet.argtypes = [POINTER(gageStackBlurParm), c_int, c_double]
NrrdBoundarySpec._pack_ = 4
NrrdBoundarySpec._fields_ = [
('boundary', c_int),
('padValue', c_double),
]
gageStackBlurParmBoundarySpecSet = libteem.gageStackBlurParmBoundarySpecSet
gageStackBlurParmBoundarySpecSet.restype = c_int
gageStackBlurParmBoundarySpecSet.argtypes = [POINTER(gageStackBlurParm), POINTER(NrrdBoundarySpec)]
gageStackBlurParmNeedSpatialBlurSet = libteem.gageStackBlurParmNeedSpatialBlurSet
gageStackBlurParmNeedSpatialBlurSet.restype = c_int
gageStackBlurParmNeedSpatialBlurSet.argtypes = [POINTER(gageStackBlurParm), c_int]
gageStackBlurParmVerboseSet = libteem.gageStackBlurParmVerboseSet
gageStackBlurParmVerboseSet.restype = c_int
gageStackBlurParmVerboseSet.argtypes = [POINTER(gageStackBlurParm), c_int]
gageStackBlurParmOneDimSet = libteem.gageStackBlurParmOneDimSet
gageStackBlurParmOneDimSet.restype = c_int
gageStackBlurParmOneDimSet.argtypes = [POINTER(gageStackBlurParm), c_int]
gageStackBlurParmCheck = libteem.gageStackBlurParmCheck
gageStackBlurParmCheck.restype = c_int
gageStackBlurParmCheck.argtypes = [POINTER(gageStackBlurParm)]
gageStackBlurParmParse = libteem.gageStackBlurParmParse
gageStackBlurParmParse.restype = c_int
gageStackBlurParmParse.argtypes = [POINTER(gageStackBlurParm), POINTER(c_int), POINTER(STRING), STRING]
gageHestStackBlurParm = (POINTER(hestCB)).in_dll(libteem, 'gageHestStackBlurParm')
gageStackBlurParmSprint = libteem.gageStackBlurParmSprint
gageStackBlurParmSprint.restype = c_int
gageStackBlurParmSprint.argtypes = [STRING, POINTER(gageStackBlurParm), POINTER(c_int), STRING]
gageStackBlur = libteem.gageStackBlur
gageStackBlur.restype = c_int
gageStackBlur.argtypes = [POINTER(POINTER(Nrrd)), POINTER(gageStackBlurParm), POINTER(Nrrd), POINTER(gageKind)]
gageStackBlurCheck = libteem.gageStackBlurCheck
gageStackBlurCheck.restype = c_int
gageStackBlurCheck.argtypes = [POINTER(POINTER(Nrrd)), POINTER(gageStackBlurParm), POINTER(Nrrd), POINTER(gageKind)]
gageStackBlurGet = libteem.gageStackBlurGet
gageStackBlurGet.restype = c_int
gageStackBlurGet.argtypes = [POINTER(POINTER(Nrrd)), POINTER(c_int), POINTER(gageStackBlurParm), STRING, POINTER(Nrrd), POINTER(gageKind)]
class NrrdEncoding_t(Structure):
pass
NrrdEncoding = NrrdEncoding_t
gageStackBlurManage = libteem.gageStackBlurManage
gageStackBlurManage.restype = c_int
gageStackBlurManage.argtypes = [POINTER(POINTER(POINTER(Nrrd))), POINTER(c_int), POINTER(gageStackBlurParm), STRING, c_int, POINTER(NrrdEncoding), POINTER(Nrrd), POINTER(gageKind)]
gageContextNew = libteem.gageContextNew
gageContextNew.restype = POINTER(gageContext)
gageContextNew.argtypes = []
gageContextCopy = libteem.gageContextCopy
gageContextCopy.restype = POINTER(gageContext)
gageContextCopy.argtypes = [POINTER(gageContext)]
gageContextNix = libteem.gageContextNix
gageContextNix.restype = POINTER(gageContext)
gageContextNix.argtypes = [POINTER(gageContext)]
gageParmSet = libteem.gageParmSet
gageParmSet.restype = None
gageParmSet.argtypes = [POINTER(gageContext), c_int, c_double]
gagePerVolumeIsAttached = libteem.gagePerVolumeIsAttached
gagePerVolumeIsAttached.restype = c_int
gagePerVolumeIsAttached.argtypes = [POINTER(gageContext), POINTER(gagePerVolume)]
gagePerVolumeAttach = libteem.gagePerVolumeAttach
gagePerVolumeAttach.restype = c_int
gagePerVolumeAttach.argtypes = [POINTER(gageContext), POINTER(gagePerVolume)]
gagePerVolumeDetach = libteem.gagePerVolumeDetach
gagePerVolumeDetach.restype = c_int
gagePerVolumeDetach.argtypes = [POINTER(gageContext), POINTER(gagePerVolume)]
gageKernelSet = libteem.gageKernelSet
gageKernelSet.restype = c_int
gageKernelSet.argtypes = [POINTER(gageContext), c_int, POINTER(NrrdKernel), POINTER(c_double)]
gageKernelReset = libteem.gageKernelReset
gageKernelReset.restype = None
gageKernelReset.argtypes = [POINTER(gageContext)]
gageProbe = libteem.gageProbe
gageProbe.restype = c_int
gageProbe.argtypes = [POINTER(gageContext), c_double, c_double, c_double]
gageProbeSpace = libteem.gageProbeSpace
gageProbeSpace.restype = c_int
gageProbeSpace.argtypes = [POINTER(gageContext), c_double, c_double, c_double, c_int, c_int]
gageUpdate = libteem.gageUpdate
gageUpdate.restype = c_int
gageUpdate.argtypes = [POINTER(gageContext)]
gageStructureTensor = libteem.gageStructureTensor
gageStructureTensor.restype = c_int
gageStructureTensor.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_int, c_int, c_int]
gageDeconvolve = libteem.gageDeconvolve
gageDeconvolve.restype = c_int
gageDeconvolve.argtypes = [POINTER(Nrrd), POINTER(c_double), POINTER(Nrrd), POINTER(gageKind), POINTER(NrrdKernelSpec), c_int, c_uint, c_int, c_double, c_double, c_int]
gageDeconvolveSeparableKnown = libteem.gageDeconvolveSeparableKnown
gageDeconvolveSeparableKnown.restype = c_int
gageDeconvolveSeparableKnown.argtypes = [POINTER(NrrdKernelSpec)]
gageDeconvolveSeparable = libteem.gageDeconvolveSeparable
gageDeconvolveSeparable.restype = c_int
gageDeconvolveSeparable.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(gageKind), POINTER(NrrdKernelSpec), c_int]
hestCB._fields_ = [
('size', c_size_t),
('type', STRING),
('parse', CFUNCTYPE(c_int, c_void_p, STRING, STRING)),
('destroy', CFUNCTYPE(c_void_p, c_void_p)),
]
class hestOpt(Structure):
pass
hestOpt._fields_ = [
('flag', STRING),
('name', STRING),
('type', c_int),
('min', c_uint),
('max', c_int),
('valueP', c_void_p),
('dflt', STRING),
('info', STRING),
('sawP', POINTER(c_uint)),
('enm', POINTER(airEnum)),
('CB', POINTER(hestCB)),
('kind', c_int),
('alloc', c_int),
('source', c_int),
]
hestParm._fields_ = [
('verbosity', c_int),
('respFileEnable', c_int),
('elideSingleEnumType', c_int),
('elideSingleOtherType', c_int),
('elideSingleOtherDefault', c_int),
('elideSingleNonExistFloatDefault', c_int),
('elideMultipleNonExistFloatDefault', c_int),
('elideSingleEmptyStringDefault', c_int),
('elideMultipleEmptyStringDefault', c_int),
('noArgsIsNoProblem', c_int),
('greedySingleString', c_int),
('cleverPluralizeOtherY', c_int),
('columns', c_uint),
('respFileFlag', c_char),
('respFileComment', c_char),
('varParamStopFlag', c_char),
('multiFlagSep', c_char),
]
hestVerbosity = (c_int).in_dll(libteem, 'hestVerbosity')
hestRespFileEnable = (c_int).in_dll(libteem, 'hestRespFileEnable')
hestElideSingleEnumType = (c_int).in_dll(libteem, 'hestElideSingleEnumType')
hestElideSingleOtherType = (c_int).in_dll(libteem, 'hestElideSingleOtherType')
hestElideSingleOtherDefault = (c_int).in_dll(libteem, 'hestElideSingleOtherDefault')
hestElideSingleNonExistFloatDefault = (c_int).in_dll(libteem, 'hestElideSingleNonExistFloatDefault')
hestElideMultipleNonExistFloatDefault = (c_int).in_dll(libteem, 'hestElideMultipleNonExistFloatDefault')
hestElideSingleEmptyStringDefault = (c_int).in_dll(libteem, 'hestElideSingleEmptyStringDefault')
hestElideMultipleEmptyStringDefault = (c_int).in_dll(libteem, 'hestElideMultipleEmptyStringDefault')
hestNoArgsIsNoProblem = (c_int).in_dll(libteem, 'hestNoArgsIsNoProblem')
hestGreedySingleString = (c_int).in_dll(libteem, 'hestGreedySingleString')
hestCleverPluralizeOtherY = (c_int).in_dll(libteem, 'hestCleverPluralizeOtherY')
hestColumns = (c_uint).in_dll(libteem, 'hestColumns')
hestRespFileFlag = (c_char).in_dll(libteem, 'hestRespFileFlag')
hestRespFileComment = (c_char).in_dll(libteem, 'hestRespFileComment')
hestVarParamStopFlag = (c_char).in_dll(libteem, 'hestVarParamStopFlag')
hestMultiFlagSep = (c_char).in_dll(libteem, 'hestMultiFlagSep')
hestPresent = (c_int).in_dll(libteem, 'hestPresent')
hestParmNew = libteem.hestParmNew
hestParmNew.restype = POINTER(hestParm)
hestParmNew.argtypes = []
hestParmFree = libteem.hestParmFree
hestParmFree.restype = POINTER(hestParm)
hestParmFree.argtypes = [POINTER(hestParm)]
hestOptAdd = libteem.hestOptAdd
hestOptAdd.restype = c_uint
hestOptAdd.argtypes = [POINTER(POINTER(hestOpt)), STRING, STRING, c_int, c_int, c_int, c_void_p, STRING, STRING]
hestOptFree = libteem.hestOptFree
hestOptFree.restype = POINTER(hestOpt)
hestOptFree.argtypes = [POINTER(hestOpt)]
hestOptCheck = libteem.hestOptCheck
hestOptCheck.restype = c_int
hestOptCheck.argtypes = [POINTER(hestOpt), POINTER(STRING)]
hestParse = libteem.hestParse
hestParse.restype = c_int
hestParse.argtypes = [POINTER(hestOpt), c_int, POINTER(STRING), POINTER(STRING), POINTER(hestParm)]
hestParseFree = libteem.hestParseFree
hestParseFree.restype = c_void_p
hestParseFree.argtypes = [POINTER(hestOpt)]
hestParseOrDie = libteem.hestParseOrDie
hestParseOrDie.restype = None
hestParseOrDie.argtypes = [POINTER(hestOpt), c_int, POINTER(STRING), POINTER(hestParm), STRING, STRING, c_int, c_int, c_int]
hestMinNumArgs = libteem.hestMinNumArgs
hestMinNumArgs.restype = c_int
hestMinNumArgs.argtypes = [POINTER(hestOpt)]
hestUsage = libteem.hestUsage
hestUsage.restype = None
hestUsage.argtypes = [POINTER(FILE), POINTER(hestOpt), STRING, POINTER(hestParm)]
hestGlossary = libteem.hestGlossary
hestGlossary.restype = None
hestGlossary.argtypes = [POINTER(FILE), POINTER(hestOpt), POINTER(hestParm)]
hestInfo = libteem.hestInfo
hestInfo.restype = None
hestInfo.argtypes = [POINTER(FILE), STRING, STRING, POINTER(hestParm)]
hooverRenderBegin_t = CFUNCTYPE(c_int, POINTER(c_void_p), c_void_p)
hooverThreadBegin_t = CFUNCTYPE(c_int, POINTER(c_void_p), c_void_p, c_void_p, c_int)
hooverRayBegin_t = CFUNCTYPE(c_int, c_void_p, c_void_p, c_void_p, c_int, c_int, c_double, POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double))
hooverSample_t = CFUNCTYPE(c_double, c_void_p, c_void_p, c_void_p, c_int, c_double, c_int, POINTER(c_double), POINTER(c_double))
hooverRayEnd_t = CFUNCTYPE(c_int, c_void_p, c_void_p, c_void_p)
hooverThreadEnd_t = CFUNCTYPE(c_int, c_void_p, c_void_p, c_void_p)
hooverRenderEnd_t = CFUNCTYPE(c_int, c_void_p, c_void_p)
class hooverContext(Structure):
pass
hooverContext._pack_ = 4
hooverContext._fields_ = [
('cam', POINTER(limnCamera)),
('volSize', c_int * 3),
('volSpacing', c_double * 3),
('volCentering', c_int),
('shape', POINTER(gageShape)),
('imgSize', c_int * 2),
('imgCentering', c_int),
('user', c_void_p),
('numThreads', c_uint),
('workIdx', c_int),
('workMutex', POINTER(airThreadMutex)),
('renderBegin', POINTER(hooverRenderBegin_t)),
('threadBegin', POINTER(hooverThreadBegin_t)),
('rayBegin', POINTER(hooverRayBegin_t)),
('sample', POINTER(hooverSample_t)),
('rayEnd', POINTER(hooverRayEnd_t)),
('threadEnd', POINTER(hooverThreadEnd_t)),
('renderEnd', POINTER(hooverRenderEnd_t)),
]
hooverPresent = (c_int).in_dll(libteem, 'hooverPresent')
hooverBiffKey = (STRING).in_dll(libteem, 'hooverBiffKey')
hooverDefVolCentering = (c_int).in_dll(libteem, 'hooverDefVolCentering')
hooverDefImgCentering = (c_int).in_dll(libteem, 'hooverDefImgCentering')
hooverErr = (POINTER(airEnum)).in_dll(libteem, 'hooverErr')
hooverContextNew = libteem.hooverContextNew
hooverContextNew.restype = POINTER(hooverContext)
hooverContextNew.argtypes = []
hooverContextCheck = libteem.hooverContextCheck
hooverContextCheck.restype = c_int
hooverContextCheck.argtypes = [POINTER(hooverContext)]
hooverContextNix = libteem.hooverContextNix
hooverContextNix.restype = None
hooverContextNix.argtypes = [POINTER(hooverContext)]
hooverRender = libteem.hooverRender
hooverRender.restype = c_int
hooverRender.argtypes = [POINTER(hooverContext), POINTER(c_int), POINTER(c_int)]
hooverStubRenderBegin = libteem.hooverStubRenderBegin
hooverStubRenderBegin.restype = c_int
hooverStubRenderBegin.argtypes = [POINTER(c_void_p), c_void_p]
hooverStubThreadBegin = libteem.hooverStubThreadBegin
hooverStubThreadBegin.restype = c_int
hooverStubThreadBegin.argtypes = [POINTER(c_void_p), c_void_p, c_void_p, c_int]
hooverStubRayBegin = libteem.hooverStubRayBegin
hooverStubRayBegin.restype = c_int
hooverStubRayBegin.argtypes = [c_void_p, c_void_p, c_void_p, c_int, c_int, c_double, POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double)]
hooverStubSample = libteem.hooverStubSample
hooverStubSample.restype = c_double
hooverStubSample.argtypes = [c_void_p, c_void_p, c_void_p, c_int, c_double, c_int, POINTER(c_double), POINTER(c_double)]
hooverStubRayEnd = libteem.hooverStubRayEnd
hooverStubRayEnd.restype = c_int
hooverStubRayEnd.argtypes = [c_void_p, c_void_p, c_void_p]
hooverStubThreadEnd = libteem.hooverStubThreadEnd
hooverStubThreadEnd.restype = c_int
hooverStubThreadEnd.argtypes = [c_void_p, c_void_p, c_void_p]
hooverStubRenderEnd = libteem.hooverStubRenderEnd
hooverStubRenderEnd.restype = c_int
hooverStubRenderEnd.argtypes = [c_void_p, c_void_p]
limnCamera_t._pack_ = 4
limnCamera_t._fields_ = [
('from', c_double * 3),
('at', c_double * 3),
('up', c_double * 3),
('uRange', c_double * 2),
('vRange', c_double * 2),
('fov', c_double),
('aspect', c_double),
('neer', c_double),
('faar', c_double),
('dist', c_double),
('atRelative', c_int),
('orthographic', c_int),
('rightHanded', c_int),
('W2V', c_double * 16),
('V2W', c_double * 16),
('U', c_double * 4),
('V', c_double * 4),
('N', c_double * 4),
('vspNeer', c_double),
('vspFaar', c_double),
('vspDist', c_double),
]
class limnLight(Structure):
pass
limnLight._fields_ = [
('amb', c_float * 4),
('_dir', c_float * 4 * 8),
('dir', c_float * 4 * 8),
('col', c_float * 4 * 8),
('on', c_int * 8),
('vsp', c_int * 8),
]
class limnOptsPS(Structure):
pass
limnOptsPS._fields_ = [
('lineWidth', c_float * 8),
('creaseAngle', c_float),
('bg', c_float * 3),
('edgeColor', c_float * 3),
('showpage', c_int),
('wireFrame', c_int),
('noBackground', c_int),
]
class limnWindow(Structure):
pass
limnWindow._fields_ = [
('ps', limnOptsPS),
('device', c_int),
('scale', c_float),
('bbox', c_float * 4),
('yFlip', c_int),
('file', POINTER(FILE)),
]
class limnLook(Structure):
pass
limnLook._fields_ = [
('rgba', c_float * 4),
('kads', c_float * 3),
('spow', c_float),
]
class limnVertex(Structure):
pass
limnVertex._fields_ = [
('world', c_float * 4),
('rgba', c_float * 4),
('coord', c_float * 4),
('worldNormal', c_float * 3),
]
class limnEdge_t(Structure):
pass
limnEdge_t._fields_ = [
('vertIdx', c_uint * 2),
('lookIdx', c_uint),
('partIdx', c_uint),
('type', c_int),
('faceIdx', c_int * 2),
('once', c_int),
]
limnEdge = limnEdge_t
class limnFace_t(Structure):
pass
limnFace_t._fields_ = [
('worldNormal', c_float * 3),
('screenNormal', c_float * 3),
('vertIdx', POINTER(c_uint)),
('edgeIdx', POINTER(c_uint)),
('sideNum', c_uint),
('lookIdx', c_uint),
('partIdx', c_uint),
('visible', c_int),
('depth', c_float),
]
limnFace = limnFace_t
class limnPart_t(Structure):
pass
limnPart_t._fields_ = [
('vertIdx', POINTER(c_uint)),
('vertIdxNum', c_uint),
('vertIdxArr', POINTER(airArray)),
('edgeIdx', POINTER(c_uint)),
('edgeIdxNum', c_uint),
('edgeIdxArr', POINTER(airArray)),
('faceIdx', POINTER(c_uint)),
('faceIdxNum', c_uint),
('faceIdxArr', POINTER(airArray)),
('lookIdx', c_int),
('depth', c_float),
]
limnPart = limnPart_t
class limnObject(Structure):
pass
limnObject._fields_ = [
('vert', POINTER(limnVertex)),
('vertNum', c_uint),
('vertArr', POINTER(airArray)),
('edge', POINTER(limnEdge)),
('edgeNum', c_uint),
('edgeArr', POINTER(airArray)),
('face', POINTER(limnFace)),
('faceNum', c_uint),
('faceArr', POINTER(airArray)),
('faceSort', POINTER(POINTER(limnFace))),
('part', POINTER(POINTER(limnPart))),
('partNum', c_uint),
('partArr', POINTER(airArray)),
('partPool', POINTER(POINTER(limnPart))),
('partPoolNum', c_uint),
('partPoolArr', POINTER(airArray)),
('look', POINTER(limnLook)),
('lookNum', c_uint),
('lookArr', POINTER(airArray)),
('vertSpace', c_int),
('setVertexRGBAFromLook', c_int),
('doEdges', c_int),
('incr', c_uint),
]
limnPolyData._fields_ = [
('xyzw', POINTER(c_float)),
('xyzwNum', c_uint),
('rgba', POINTER(c_ubyte)),
('rgbaNum', c_uint),
('norm', POINTER(c_float)),
('normNum', c_uint),
('tex2', POINTER(c_float)),
('tex2Num', c_uint),
('tang', POINTER(c_float)),
('tangNum', c_uint),
('indxNum', c_uint),
('indx', POINTER(c_uint)),
('primNum', c_uint),
('type', POINTER(c_ubyte)),
('icnt', POINTER(c_uint)),
]
class limnSpline_t(Structure):
pass
limnSpline_t._pack_ = 4
limnSpline_t._fields_ = [
('type', c_int),
('info', c_int),
('loop', c_int),
('B', c_double),
('C', c_double),
('ncpt', POINTER(Nrrd)),
('time', POINTER(c_double)),
]
limnSpline = limnSpline_t
class limnSplineTypeSpec_t(Structure):
pass
limnSplineTypeSpec_t._pack_ = 4
limnSplineTypeSpec_t._fields_ = [
('type', c_int),
('B', c_double),
('C', c_double),
]
limnSplineTypeSpec = limnSplineTypeSpec_t
limnPresent = (c_int).in_dll(libteem, 'limnPresent')
limnBiffKey = (STRING).in_dll(libteem, 'limnBiffKey')
limnDefCameraAtRelative = (c_int).in_dll(libteem, 'limnDefCameraAtRelative')
limnDefCameraOrthographic = (c_int).in_dll(libteem, 'limnDefCameraOrthographic')
limnDefCameraRightHanded = (c_int).in_dll(libteem, 'limnDefCameraRightHanded')
limnSpace = (POINTER(airEnum)).in_dll(libteem, 'limnSpace')
limnPolyDataInfo = (POINTER(airEnum)).in_dll(libteem, 'limnPolyDataInfo')
limnCameraPathTrack = (POINTER(airEnum)).in_dll(libteem, 'limnCameraPathTrack')
limnPrimitive = (POINTER(airEnum)).in_dll(libteem, 'limnPrimitive')
limnQNBins = (c_uint * 17).in_dll(libteem, 'limnQNBins')
limnQNtoV_f = (CFUNCTYPE(None, POINTER(c_float), c_uint) * 17).in_dll(libteem, 'limnQNtoV_f')
limnQNtoV_d = (CFUNCTYPE(None, POINTER(c_double), c_uint) * 17).in_dll(libteem, 'limnQNtoV_d')
limnVtoQN_f = (CFUNCTYPE(c_uint, POINTER(c_float)) * 17).in_dll(libteem, 'limnVtoQN_f')
limnVtoQN_d = (CFUNCTYPE(c_uint, POINTER(c_double)) * 17).in_dll(libteem, 'limnVtoQN_d')
limnQNDemo = libteem.limnQNDemo
limnQNDemo.restype = c_int
limnQNDemo.argtypes = [POINTER(Nrrd), c_uint, c_int]
limnLightSet = libteem.limnLightSet
limnLightSet.restype = None
limnLightSet.argtypes = [POINTER(limnLight), c_int, c_int, c_float, c_float, c_float, c_float, c_float, c_float]
limnLightAmbientSet = libteem.limnLightAmbientSet
limnLightAmbientSet.restype = None
limnLightAmbientSet.argtypes = [POINTER(limnLight), c_float, c_float, c_float]
limnLightSwitch = libteem.limnLightSwitch
limnLightSwitch.restype = None
limnLightSwitch.argtypes = [POINTER(limnLight), c_int, c_int]
limnLightReset = libteem.limnLightReset
limnLightReset.restype = None
limnLightReset.argtypes = [POINTER(limnLight)]
limnLightUpdate = libteem.limnLightUpdate
limnLightUpdate.restype = c_int
limnLightUpdate.argtypes = [POINTER(limnLight), POINTER(limnCamera)]
limnEnvMapCB = CFUNCTYPE(None, POINTER(c_float), POINTER(c_float), c_void_p)
limnEnvMapFill = libteem.limnEnvMapFill
limnEnvMapFill.restype = c_int
limnEnvMapFill.argtypes = [POINTER(Nrrd), limnEnvMapCB, c_int, c_void_p]
limnLightDiffuseCB = libteem.limnLightDiffuseCB
limnLightDiffuseCB.restype = None
limnLightDiffuseCB.argtypes = [POINTER(c_float), POINTER(c_float), c_void_p]
limnEnvMapCheck = libteem.limnEnvMapCheck
limnEnvMapCheck.restype = c_int
limnEnvMapCheck.argtypes = [POINTER(Nrrd)]
limnLightNew = libteem.limnLightNew
limnLightNew.restype = POINTER(limnLight)
limnLightNew.argtypes = []
limnCameraInit = libteem.limnCameraInit
limnCameraInit.restype = None
limnCameraInit.argtypes = [POINTER(limnCamera)]
limnLightNix = libteem.limnLightNix
limnLightNix.restype = POINTER(limnLight)
limnLightNix.argtypes = [POINTER(limnLight)]
limnCameraNew = libteem.limnCameraNew
limnCameraNew.restype = POINTER(limnCamera)
limnCameraNew.argtypes = []
limnCameraNix = libteem.limnCameraNix
limnCameraNix.restype = POINTER(limnCamera)
limnCameraNix.argtypes = [POINTER(limnCamera)]
limnWindowNew = libteem.limnWindowNew
limnWindowNew.restype = POINTER(limnWindow)
limnWindowNew.argtypes = [c_int]
limnWindowNix = libteem.limnWindowNix
limnWindowNix.restype = POINTER(limnWindow)
limnWindowNix.argtypes = [POINTER(limnWindow)]
limnHestCameraOptAdd = libteem.limnHestCameraOptAdd
limnHestCameraOptAdd.restype = None
limnHestCameraOptAdd.argtypes = [POINTER(POINTER(hestOpt)), POINTER(limnCamera), STRING, STRING, STRING, STRING, STRING, STRING, STRING, STRING, STRING]
limnCameraAspectSet = libteem.limnCameraAspectSet
limnCameraAspectSet.restype = c_int
limnCameraAspectSet.argtypes = [POINTER(limnCamera), c_uint, c_uint, c_int]
limnCameraUpdate = libteem.limnCameraUpdate
limnCameraUpdate.restype = c_int
limnCameraUpdate.argtypes = [POINTER(limnCamera)]
limnCameraPathMake = libteem.limnCameraPathMake
limnCameraPathMake.restype = c_int
limnCameraPathMake.argtypes = [POINTER(limnCamera), c_int, POINTER(limnCamera), POINTER(c_double), c_int, c_int, POINTER(limnSplineTypeSpec), POINTER(limnSplineTypeSpec), POINTER(limnSplineTypeSpec), POINTER(limnSplineTypeSpec)]
limnObjectLookAdd = libteem.limnObjectLookAdd
limnObjectLookAdd.restype = c_int
limnObjectLookAdd.argtypes = [POINTER(limnObject)]
limnObjectNew = libteem.limnObjectNew
limnObjectNew.restype = POINTER(limnObject)
limnObjectNew.argtypes = [c_int, c_int]
limnObjectNix = libteem.limnObjectNix
limnObjectNix.restype = POINTER(limnObject)
limnObjectNix.argtypes = [POINTER(limnObject)]
limnObjectEmpty = libteem.limnObjectEmpty
limnObjectEmpty.restype = None
limnObjectEmpty.argtypes = [POINTER(limnObject)]
limnObjectPreSet = libteem.limnObjectPreSet
limnObjectPreSet.restype = c_int
limnObjectPreSet.argtypes = [POINTER(limnObject), c_uint, c_uint, c_uint, c_uint, c_uint]
limnObjectPartAdd = libteem.limnObjectPartAdd
limnObjectPartAdd.restype = c_int
limnObjectPartAdd.argtypes = [POINTER(limnObject)]
limnObjectVertexNumPreSet = libteem.limnObjectVertexNumPreSet
limnObjectVertexNumPreSet.restype = c_int
limnObjectVertexNumPreSet.argtypes = [POINTER(limnObject), c_uint, c_uint]
limnObjectVertexAdd = libteem.limnObjectVertexAdd
limnObjectVertexAdd.restype = c_int
limnObjectVertexAdd.argtypes = [POINTER(limnObject), c_uint, c_float, c_float, c_float]
limnObjectEdgeAdd = libteem.limnObjectEdgeAdd
limnObjectEdgeAdd.restype = c_int
limnObjectEdgeAdd.argtypes = [POINTER(limnObject), c_uint, c_uint, c_uint, c_uint, c_uint]
limnObjectFaceNumPreSet = libteem.limnObjectFaceNumPreSet
limnObjectFaceNumPreSet.restype = c_int
limnObjectFaceNumPreSet.argtypes = [POINTER(limnObject), c_uint, c_uint]
limnObjectFaceAdd = libteem.limnObjectFaceAdd
limnObjectFaceAdd.restype = c_int
limnObjectFaceAdd.argtypes = [POINTER(limnObject), c_uint, c_uint, c_uint, POINTER(c_uint)]
limnPolyDataNew = libteem.limnPolyDataNew
limnPolyDataNew.restype = POINTER(limnPolyData)
limnPolyDataNew.argtypes = []
limnPolyDataNix = libteem.limnPolyDataNix
limnPolyDataNix.restype = POINTER(limnPolyData)
limnPolyDataNix.argtypes = [POINTER(limnPolyData)]
limnPolyDataInfoBitFlag = libteem.limnPolyDataInfoBitFlag
limnPolyDataInfoBitFlag.restype = c_uint
limnPolyDataInfoBitFlag.argtypes = [POINTER(limnPolyData)]
limnPolyDataAlloc = libteem.limnPolyDataAlloc
limnPolyDataAlloc.restype = c_int
limnPolyDataAlloc.argtypes = [POINTER(limnPolyData), c_uint, c_uint, c_uint, c_uint]
limnPolyDataSize = libteem.limnPolyDataSize
limnPolyDataSize.restype = c_size_t
limnPolyDataSize.argtypes = [POINTER(limnPolyData)]
limnPolyDataCopy = libteem.limnPolyDataCopy
limnPolyDataCopy.restype = c_int
limnPolyDataCopy.argtypes = [POINTER(limnPolyData), POINTER(limnPolyData)]
limnPolyDataCopyN = libteem.limnPolyDataCopyN
limnPolyDataCopyN.restype = c_int
limnPolyDataCopyN.argtypes = [POINTER(limnPolyData), POINTER(limnPolyData), c_uint]
limnPolyDataTransform_f = libteem.limnPolyDataTransform_f
limnPolyDataTransform_f.restype = None
limnPolyDataTransform_f.argtypes = [POINTER(limnPolyData), POINTER(c_float)]
limnPolyDataTransform_d = libteem.limnPolyDataTransform_d
limnPolyDataTransform_d.restype = None
limnPolyDataTransform_d.argtypes = [POINTER(limnPolyData), POINTER(c_double)]
limnPolyDataPolygonNumber = libteem.limnPolyDataPolygonNumber
limnPolyDataPolygonNumber.restype = c_uint
limnPolyDataPolygonNumber.argtypes = [POINTER(limnPolyData)]
limnPolyDataVertexNormals = libteem.limnPolyDataVertexNormals
limnPolyDataVertexNormals.restype = c_int
limnPolyDataVertexNormals.argtypes = [POINTER(limnPolyData)]
limnPolyDataVertexNormalsNO = libteem.limnPolyDataVertexNormalsNO
limnPolyDataVertexNormalsNO.restype = c_int
limnPolyDataVertexNormalsNO.argtypes = [POINTER(limnPolyData)]
limnPolyDataPrimitiveTypes = libteem.limnPolyDataPrimitiveTypes
limnPolyDataPrimitiveTypes.restype = c_uint
limnPolyDataPrimitiveTypes.argtypes = [POINTER(limnPolyData)]
limnPolyDataPrimitiveVertexNumber = libteem.limnPolyDataPrimitiveVertexNumber
limnPolyDataPrimitiveVertexNumber.restype = c_int
limnPolyDataPrimitiveVertexNumber.argtypes = [POINTER(Nrrd), POINTER(limnPolyData)]
limnPolyDataPrimitiveArea = libteem.limnPolyDataPrimitiveArea
limnPolyDataPrimitiveArea.restype = c_int
limnPolyDataPrimitiveArea.argtypes = [POINTER(Nrrd), POINTER(limnPolyData)]
limnPolyDataRasterize = libteem.limnPolyDataRasterize
limnPolyDataRasterize.restype = c_int
limnPolyDataRasterize.argtypes = [POINTER(Nrrd), POINTER(limnPolyData), POINTER(c_double), POINTER(c_double), POINTER(c_size_t), c_int]
limnPolyDataColorSet = libteem.limnPolyDataColorSet
limnPolyDataColorSet.restype = None
limnPolyDataColorSet.argtypes = [POINTER(limnPolyData), c_ubyte, c_ubyte, c_ubyte, c_ubyte]
limnPolyDataCube = libteem.limnPolyDataCube
limnPolyDataCube.restype = c_int
limnPolyDataCube.argtypes = [POINTER(limnPolyData), c_uint, c_int]
limnPolyDataCubeTriangles = libteem.limnPolyDataCubeTriangles
limnPolyDataCubeTriangles.restype = c_int
limnPolyDataCubeTriangles.argtypes = [POINTER(limnPolyData), c_uint, c_int]
limnPolyDataOctahedron = libteem.limnPolyDataOctahedron
limnPolyDataOctahedron.restype = c_int
limnPolyDataOctahedron.argtypes = [POINTER(limnPolyData), c_uint, c_int]
limnPolyDataCone = libteem.limnPolyDataCone
limnPolyDataCone.restype = c_int
limnPolyDataCone.argtypes = [POINTER(limnPolyData), c_uint, c_uint, c_int]
limnPolyDataCylinder = libteem.limnPolyDataCylinder
limnPolyDataCylinder.restype = c_int
limnPolyDataCylinder.argtypes = [POINTER(limnPolyData), c_uint, c_uint, c_int]
limnPolyDataSuperquadric = libteem.limnPolyDataSuperquadric
limnPolyDataSuperquadric.restype = c_int
limnPolyDataSuperquadric.argtypes = [POINTER(limnPolyData), c_uint, c_float, c_float, c_uint, c_uint]
limnPolyDataSpiralBetterquadric = libteem.limnPolyDataSpiralBetterquadric
limnPolyDataSpiralBetterquadric.restype = c_int
limnPolyDataSpiralBetterquadric.argtypes = [POINTER(limnPolyData), c_uint, c_float, c_float, c_float, c_float, c_uint, c_uint]
limnPolyDataSpiralSuperquadric = libteem.limnPolyDataSpiralSuperquadric
limnPolyDataSpiralSuperquadric.restype = c_int
limnPolyDataSpiralSuperquadric.argtypes = [POINTER(limnPolyData), c_uint, c_float, c_float, c_uint, c_uint]
limnPolyDataPolarSphere = libteem.limnPolyDataPolarSphere
limnPolyDataPolarSphere.restype = c_int
limnPolyDataPolarSphere.argtypes = [POINTER(limnPolyData), c_uint, c_uint, c_uint]
limnPolyDataSpiralSphere = libteem.limnPolyDataSpiralSphere
limnPolyDataSpiralSphere.restype = c_int
limnPolyDataSpiralSphere.argtypes = [POINTER(limnPolyData), c_uint, c_uint, c_uint]
limnPolyDataIcoSphere = libteem.limnPolyDataIcoSphere
limnPolyDataIcoSphere.restype = c_int
limnPolyDataIcoSphere.argtypes = [POINTER(limnPolyData), c_uint, c_uint]
limnPolyDataPlane = libteem.limnPolyDataPlane
limnPolyDataPlane.restype = c_int
limnPolyDataPlane.argtypes = [POINTER(limnPolyData), c_uint, c_uint, c_uint]
limnPolyDataSquare = libteem.limnPolyDataSquare
limnPolyDataSquare.restype = c_int
limnPolyDataSquare.argtypes = [POINTER(limnPolyData), c_uint]
limnPolyDataEdgeHalve = libteem.limnPolyDataEdgeHalve
limnPolyDataEdgeHalve.restype = c_int
limnPolyDataEdgeHalve.argtypes = [POINTER(limnPolyData), POINTER(limnPolyData)]
limnPolyDataVertexWindingFix = libteem.limnPolyDataVertexWindingFix
limnPolyDataVertexWindingFix.restype = c_int
limnPolyDataVertexWindingFix.argtypes = [POINTER(limnPolyData), c_int]
limnPolyDataClip = libteem.limnPolyDataClip
limnPolyDataClip.restype = c_int
limnPolyDataClip.argtypes = [POINTER(limnPolyData), POINTER(Nrrd), c_double]
limnPolyDataClipMulti = libteem.limnPolyDataClipMulti
limnPolyDataClipMulti.restype = c_int
limnPolyDataClipMulti.argtypes = [POINTER(limnPolyData), POINTER(Nrrd), POINTER(c_double)]
limnPolyDataCompress = libteem.limnPolyDataCompress
limnPolyDataCompress.restype = POINTER(limnPolyData)
limnPolyDataCompress.argtypes = [POINTER(limnPolyData)]
limnPolyDataJoin = libteem.limnPolyDataJoin
limnPolyDataJoin.restype = POINTER(limnPolyData)
limnPolyDataJoin.argtypes = [POINTER(POINTER(limnPolyData)), c_uint]
limnPolyDataVertexWindingFlip = libteem.limnPolyDataVertexWindingFlip
limnPolyDataVertexWindingFlip.restype = c_int
limnPolyDataVertexWindingFlip.argtypes = [POINTER(limnPolyData)]
limnPolyDataCCFind = libteem.limnPolyDataCCFind
limnPolyDataCCFind.restype = c_int
limnPolyDataCCFind.argtypes = [POINTER(limnPolyData)]
limnPolyDataPrimitiveSort = libteem.limnPolyDataPrimitiveSort
limnPolyDataPrimitiveSort.restype = c_int
limnPolyDataPrimitiveSort.argtypes = [POINTER(limnPolyData), POINTER(Nrrd)]
limnPolyDataPrimitiveSelect = libteem.limnPolyDataPrimitiveSelect
limnPolyDataPrimitiveSelect.restype = c_int
limnPolyDataPrimitiveSelect.argtypes = [POINTER(limnPolyData), POINTER(limnPolyData), POINTER(Nrrd)]
limnPolyDataNeighborList = libteem.limnPolyDataNeighborList
limnPolyDataNeighborList.restype = c_int
limnPolyDataNeighborList.argtypes = [POINTER(POINTER(c_uint)), POINTER(c_size_t), POINTER(c_uint), POINTER(limnPolyData)]
limnPolyDataNeighborArray = libteem.limnPolyDataNeighborArray
limnPolyDataNeighborArray.restype = c_int
limnPolyDataNeighborArray.argtypes = [POINTER(POINTER(c_int)), POINTER(c_uint), POINTER(limnPolyData)]
limnPolyDataNeighborArrayComp = libteem.limnPolyDataNeighborArrayComp
limnPolyDataNeighborArrayComp.restype = c_int
limnPolyDataNeighborArrayComp.argtypes = [POINTER(POINTER(c_int)), POINTER(POINTER(c_int)), POINTER(limnPolyData)]
limnPolyDataSpiralTubeWrap = libteem.limnPolyDataSpiralTubeWrap
limnPolyDataSpiralTubeWrap.restype = c_int
limnPolyDataSpiralTubeWrap.argtypes = [POINTER(limnPolyData), POINTER(limnPolyData), c_uint, POINTER(Nrrd), c_uint, c_uint, c_double]
limnPolyDataSmoothHC = libteem.limnPolyDataSmoothHC
limnPolyDataSmoothHC.restype = c_int
limnPolyDataSmoothHC.argtypes = [POINTER(limnPolyData), POINTER(c_int), POINTER(c_int), c_double, c_double, c_int]
limnObjectDescribe = libteem.limnObjectDescribe
limnObjectDescribe.restype = c_int
limnObjectDescribe.argtypes = [POINTER(FILE), POINTER(limnObject)]
limnObjectReadOFF = libteem.limnObjectReadOFF
limnObjectReadOFF.restype = c_int
limnObjectReadOFF.argtypes = [POINTER(limnObject), POINTER(FILE)]
limnObjectWriteOFF = libteem.limnObjectWriteOFF
limnObjectWriteOFF.restype = c_int
limnObjectWriteOFF.argtypes = [POINTER(FILE), POINTER(limnObject)]
limnPolyDataWriteIV = libteem.limnPolyDataWriteIV
limnPolyDataWriteIV.restype = c_int
limnPolyDataWriteIV.argtypes = [POINTER(FILE), POINTER(limnPolyData)]
limnPolyDataWriteLMPD = libteem.limnPolyDataWriteLMPD
limnPolyDataWriteLMPD.restype = c_int
limnPolyDataWriteLMPD.argtypes = [POINTER(FILE), POINTER(limnPolyData)]
limnPolyDataReadLMPD = libteem.limnPolyDataReadLMPD
limnPolyDataReadLMPD.restype = c_int
limnPolyDataReadLMPD.argtypes = [POINTER(limnPolyData), POINTER(FILE)]
limnPolyDataWriteVTK = libteem.limnPolyDataWriteVTK
limnPolyDataWriteVTK.restype = c_int
limnPolyDataWriteVTK.argtypes = [POINTER(FILE), POINTER(limnPolyData)]
limnPolyDataReadOFF = libteem.limnPolyDataReadOFF
limnPolyDataReadOFF.restype = c_int
limnPolyDataReadOFF.argtypes = [POINTER(limnPolyData), POINTER(FILE)]
limnPolyDataSave = libteem.limnPolyDataSave
limnPolyDataSave.restype = c_int
limnPolyDataSave.argtypes = [STRING, POINTER(limnPolyData)]
limnHestPolyDataLMPD = (POINTER(hestCB)).in_dll(libteem, 'limnHestPolyDataLMPD')
limnHestPolyDataOFF = (POINTER(hestCB)).in_dll(libteem, 'limnHestPolyDataOFF')
limnObjectCubeAdd = libteem.limnObjectCubeAdd
limnObjectCubeAdd.restype = c_int
limnObjectCubeAdd.argtypes = [POINTER(limnObject), c_uint]
limnObjectSquareAdd = libteem.limnObjectSquareAdd
limnObjectSquareAdd.restype = c_int
limnObjectSquareAdd.argtypes = [POINTER(limnObject), c_uint]
limnObjectCylinderAdd = libteem.limnObjectCylinderAdd
limnObjectCylinderAdd.restype = c_int
limnObjectCylinderAdd.argtypes = [POINTER(limnObject), c_uint, c_uint, c_uint]
limnObjectPolarSphereAdd = libteem.limnObjectPolarSphereAdd
limnObjectPolarSphereAdd.restype = c_int
limnObjectPolarSphereAdd.argtypes = [POINTER(limnObject), c_uint, c_uint, c_uint, c_uint]
limnObjectConeAdd = libteem.limnObjectConeAdd
limnObjectConeAdd.restype = c_int
limnObjectConeAdd.argtypes = [POINTER(limnObject), c_uint, c_uint, c_uint]
limnObjectPolarSuperquadAdd = libteem.limnObjectPolarSuperquadAdd
limnObjectPolarSuperquadAdd.restype = c_int
limnObjectPolarSuperquadAdd.argtypes = [POINTER(limnObject), c_uint, c_uint, c_float, c_float, c_uint, c_uint]
limnObjectPolarSuperquadFancyAdd = libteem.limnObjectPolarSuperquadFancyAdd
limnObjectPolarSuperquadFancyAdd.restype = c_int
limnObjectPolarSuperquadFancyAdd.argtypes = [POINTER(limnObject), c_uint, c_uint, c_float, c_float, c_float, c_float, c_uint, c_uint]
limnObjectWorldHomog = libteem.limnObjectWorldHomog
limnObjectWorldHomog.restype = c_int
limnObjectWorldHomog.argtypes = [POINTER(limnObject)]
limnObjectFaceNormals = libteem.limnObjectFaceNormals
limnObjectFaceNormals.restype = c_int
limnObjectFaceNormals.argtypes = [POINTER(limnObject), c_int]
limnObjectVertexNormals = libteem.limnObjectVertexNormals
limnObjectVertexNormals.restype = c_int
limnObjectVertexNormals.argtypes = [POINTER(limnObject)]
limnObjectSpaceTransform = libteem.limnObjectSpaceTransform
limnObjectSpaceTransform.restype = c_int
limnObjectSpaceTransform.argtypes = [POINTER(limnObject), POINTER(limnCamera), POINTER(limnWindow), c_int]
limnObjectPartTransform = libteem.limnObjectPartTransform
limnObjectPartTransform.restype = c_int
limnObjectPartTransform.argtypes = [POINTER(limnObject), c_uint, POINTER(c_float)]
limnObjectDepthSortParts = libteem.limnObjectDepthSortParts
limnObjectDepthSortParts.restype = c_int
limnObjectDepthSortParts.argtypes = [POINTER(limnObject)]
limnObjectDepthSortFaces = libteem.limnObjectDepthSortFaces
limnObjectDepthSortFaces.restype = c_int
limnObjectDepthSortFaces.argtypes = [POINTER(limnObject)]
limnObjectFaceReverse = libteem.limnObjectFaceReverse
limnObjectFaceReverse.restype = c_int
limnObjectFaceReverse.argtypes = [POINTER(limnObject)]
limnObjectRender = libteem.limnObjectRender
limnObjectRender.restype = c_int
limnObjectRender.argtypes = [POINTER(limnObject), POINTER(limnCamera), POINTER(limnWindow)]
limnObjectPSDraw = libteem.limnObjectPSDraw
limnObjectPSDraw.restype = c_int
limnObjectPSDraw.argtypes = [POINTER(limnObject), POINTER(limnCamera), POINTER(Nrrd), POINTER(limnWindow)]
limnObjectPSDrawConcave = libteem.limnObjectPSDrawConcave
limnObjectPSDrawConcave.restype = c_int
limnObjectPSDrawConcave.argtypes = [POINTER(limnObject), POINTER(limnCamera), POINTER(Nrrd), POINTER(limnWindow)]
limnSplineTypeSpecNew = libteem.limnSplineTypeSpecNew
limnSplineTypeSpecNew.restype = POINTER(limnSplineTypeSpec)
limnSplineTypeSpecNew.argtypes = [c_int]
limnSplineTypeSpecNix = libteem.limnSplineTypeSpecNix
limnSplineTypeSpecNix.restype = POINTER(limnSplineTypeSpec)
limnSplineTypeSpecNix.argtypes = [POINTER(limnSplineTypeSpec)]
limnSplineNew = libteem.limnSplineNew
limnSplineNew.restype = POINTER(limnSpline)
limnSplineNew.argtypes = [POINTER(Nrrd), c_int, POINTER(limnSplineTypeSpec)]
limnSplineNix = libteem.limnSplineNix
limnSplineNix.restype = POINTER(limnSpline)
limnSplineNix.argtypes = [POINTER(limnSpline)]
limnSplineNrrdCleverFix = libteem.limnSplineNrrdCleverFix
limnSplineNrrdCleverFix.restype = c_int
limnSplineNrrdCleverFix.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_int, c_int]
limnSplineCleverNew = libteem.limnSplineCleverNew
limnSplineCleverNew.restype = POINTER(limnSpline)
limnSplineCleverNew.argtypes = [POINTER(Nrrd), c_int, POINTER(limnSplineTypeSpec)]
limnSplineUpdate = libteem.limnSplineUpdate
limnSplineUpdate.restype = c_int
limnSplineUpdate.argtypes = [POINTER(limnSpline), POINTER(Nrrd)]
limnSplineType = (POINTER(airEnum)).in_dll(libteem, 'limnSplineType')
limnSplineInfo = (POINTER(airEnum)).in_dll(libteem, 'limnSplineInfo')
limnSplineParse = libteem.limnSplineParse
limnSplineParse.restype = POINTER(limnSpline)
limnSplineParse.argtypes = [STRING]
limnSplineTypeSpecParse = libteem.limnSplineTypeSpecParse
limnSplineTypeSpecParse.restype = POINTER(limnSplineTypeSpec)
limnSplineTypeSpecParse.argtypes = [STRING]
limnHestSpline = (POINTER(hestCB)).in_dll(libteem, 'limnHestSpline')
limnHestSplineTypeSpec = (POINTER(hestCB)).in_dll(libteem, 'limnHestSplineTypeSpec')
limnSplineInfoSize = (c_uint * 7).in_dll(libteem, 'limnSplineInfoSize')
limnSplineTypeHasImplicitTangents = (c_int * 6).in_dll(libteem, 'limnSplineTypeHasImplicitTangents')
limnSplineNumPoints = libteem.limnSplineNumPoints
limnSplineNumPoints.restype = c_int
limnSplineNumPoints.argtypes = [POINTER(limnSpline)]
limnSplineMinT = libteem.limnSplineMinT
limnSplineMinT.restype = c_double
limnSplineMinT.argtypes = [POINTER(limnSpline)]
limnSplineMaxT = libteem.limnSplineMaxT
limnSplineMaxT.restype = c_double
limnSplineMaxT.argtypes = [POINTER(limnSpline)]
limnSplineBCSet = libteem.limnSplineBCSet
limnSplineBCSet.restype = None
limnSplineBCSet.argtypes = [POINTER(limnSpline), c_double, c_double]
limnSplineEvaluate = libteem.limnSplineEvaluate
limnSplineEvaluate.restype = None
limnSplineEvaluate.argtypes = [POINTER(c_double), POINTER(limnSpline), c_double]
limnSplineNrrdEvaluate = libteem.limnSplineNrrdEvaluate
limnSplineNrrdEvaluate.restype = c_int
limnSplineNrrdEvaluate.argtypes = [POINTER(Nrrd), POINTER(limnSpline), POINTER(Nrrd)]
limnSplineSample = libteem.limnSplineSample
limnSplineSample.restype = c_int
limnSplineSample.argtypes = [POINTER(Nrrd), POINTER(limnSpline), c_double, c_size_t, c_double]
meetPresent = (c_int).in_dll(libteem, 'meetPresent')
meetBiffKey = (STRING).in_dll(libteem, 'meetBiffKey')
meetAirEnumAll = libteem.meetAirEnumAll
meetAirEnumAll.restype = POINTER(POINTER(airEnum))
meetAirEnumAll.argtypes = []
meetAirEnumAllPrint = libteem.meetAirEnumAllPrint
meetAirEnumAllPrint.restype = None
meetAirEnumAllPrint.argtypes = [POINTER(FILE)]
meetAirEnumAllCheck = libteem.meetAirEnumAllCheck
meetAirEnumAllCheck.restype = c_int
meetAirEnumAllCheck.argtypes = []
meetTeemLibs = (STRING * 0).in_dll(libteem, 'meetTeemLibs')
meetNrrdKernelAll = libteem.meetNrrdKernelAll
meetNrrdKernelAll.restype = POINTER(POINTER(NrrdKernel))
meetNrrdKernelAll.argtypes = []
meetNrrdKernelAllCheck = libteem.meetNrrdKernelAllCheck
meetNrrdKernelAllCheck.restype = c_int
meetNrrdKernelAllCheck.argtypes = []
meetGageKindParse = libteem.meetGageKindParse
meetGageKindParse.restype = POINTER(gageKind)
meetGageKindParse.argtypes = [STRING]
meetConstGageKindParse = libteem.meetConstGageKindParse
meetConstGageKindParse.restype = POINTER(gageKind)
meetConstGageKindParse.argtypes = [STRING]
meetHestGageKind = (POINTER(hestCB)).in_dll(libteem, 'meetHestGageKind')
meetHestConstGageKind = (POINTER(hestCB)).in_dll(libteem, 'meetHestConstGageKind')
class meetPullVol(Structure):
pass
meetPullVol._pack_ = 4
meetPullVol._fields_ = [
('kind', POINTER(gageKind)),
('fileName', STRING),
('volName', STRING),
('sbp', POINTER(gageStackBlurParm)),
('leeching', c_int),
('derivNormSS', c_int),
('recomputedSS', c_int),
('derivNormBiasSS', c_double),
('nin', POINTER(Nrrd)),
('ninSS', POINTER(POINTER(Nrrd))),
]
class meetPullInfo(Structure):
pass
meetPullInfo._pack_ = 4
meetPullInfo._fields_ = [
('info', c_int),
('source', c_int),
('prop', c_int),
('constraint', c_int),
('volName', STRING),
('itemStr', STRING),
('zero', c_double),
('scale', c_double),
]
meetPullVolNew = libteem.meetPullVolNew
meetPullVolNew.restype = POINTER(meetPullVol)
meetPullVolNew.argtypes = []
meetPullVolCopy = libteem.meetPullVolCopy
meetPullVolCopy.restype = POINTER(meetPullVol)
meetPullVolCopy.argtypes = [POINTER(meetPullVol)]
meetPullVolParse = libteem.meetPullVolParse
meetPullVolParse.restype = c_int
meetPullVolParse.argtypes = [POINTER(meetPullVol), STRING]
meetPullVolLeechable = libteem.meetPullVolLeechable
meetPullVolLeechable.restype = c_int
meetPullVolLeechable.argtypes = [POINTER(meetPullVol), POINTER(meetPullVol), POINTER(c_int), STRING]
meetPullVolNix = libteem.meetPullVolNix
meetPullVolNix.restype = POINTER(meetPullVol)
meetPullVolNix.argtypes = [POINTER(meetPullVol)]
meetHestPullVol = (POINTER(hestCB)).in_dll(libteem, 'meetHestPullVol')
meetPullVolStackBlurParmFinishMulti = libteem.meetPullVolStackBlurParmFinishMulti
meetPullVolStackBlurParmFinishMulti.restype = c_int
meetPullVolStackBlurParmFinishMulti.argtypes = [POINTER(POINTER(meetPullVol)), c_uint, POINTER(c_uint), POINTER(c_uint), POINTER(NrrdKernelSpec), POINTER(NrrdBoundarySpec)]
meetPullVolLoadMulti = libteem.meetPullVolLoadMulti
meetPullVolLoadMulti.restype = c_int
meetPullVolLoadMulti.argtypes = [POINTER(POINTER(meetPullVol)), c_uint, STRING, c_int]
class pullContext_t(Structure):
pass
pullContext = pullContext_t
meetPullVolAddMulti = libteem.meetPullVolAddMulti
meetPullVolAddMulti.restype = c_int
meetPullVolAddMulti.argtypes = [POINTER(pullContext), POINTER(POINTER(meetPullVol)), c_uint, POINTER(NrrdKernelSpec), POINTER(NrrdKernelSpec), POINTER(NrrdKernelSpec), POINTER(NrrdKernelSpec)]
meetPullInfoNew = libteem.meetPullInfoNew
meetPullInfoNew.restype = POINTER(meetPullInfo)
meetPullInfoNew.argtypes = []
meetPullInfoNix = libteem.meetPullInfoNix
meetPullInfoNix.restype = POINTER(meetPullInfo)
meetPullInfoNix.argtypes = [POINTER(meetPullInfo)]
meetPullInfoParse = libteem.meetPullInfoParse
meetPullInfoParse.restype = c_int
meetPullInfoParse.argtypes = [POINTER(meetPullInfo), STRING]
meetHestPullInfo = (POINTER(hestCB)).in_dll(libteem, 'meetHestPullInfo')
meetPullInfoAddMulti = libteem.meetPullInfoAddMulti
meetPullInfoAddMulti.restype = c_int
meetPullInfoAddMulti.argtypes = [POINTER(pullContext), POINTER(POINTER(meetPullInfo)), c_uint]
mite_t = c_double
class miteUser(Structure):
pass
miteUser._pack_ = 4
miteUser._fields_ = [
('nsin', POINTER(Nrrd)),
('nvin', POINTER(Nrrd)),
('ntin', POINTER(Nrrd)),
('ntxf', POINTER(POINTER(Nrrd))),
('nout', POINTER(Nrrd)),
('debug', POINTER(c_double)),
('debugArr', POINTER(airArray)),
('ndebug', POINTER(Nrrd)),
('debugIdx', c_int),
('ntxfNum', c_int),
('shadeStr', c_char * 257),
('normalStr', c_char * 257),
('rangeInit', mite_t * 9),
('refStep', c_double),
('rayStep', c_double),
('opacMatters', c_double),
('opacNear1', c_double),
('hctx', POINTER(hooverContext)),
('fakeFrom', c_double * 3),
('vectorD', c_double * 3),
('ksp', POINTER(NrrdKernelSpec) * 8),
('shape', POINTER(gageShape)),
('gctx0', POINTER(gageContext)),
('lit', POINTER(limnLight)),
('normalSide', c_int),
('verbUi', c_int),
('verbVi', c_int),
('umop', POINTER(airArray)),
('rendTime', c_double),
('sampRate', c_double),
]
class miteShadeSpec(Structure):
pass
miteShadeSpec._fields_ = [
('method', c_int),
('vec0', POINTER(gageItemSpec)),
('vec1', POINTER(gageItemSpec)),
('scl0', POINTER(gageItemSpec)),
('scl1', POINTER(gageItemSpec)),
]
class miteRender(Structure):
pass
class miteThread_t(Structure):
pass
miteRender._pack_ = 4
miteRender._fields_ = [
('ntxf', POINTER(POINTER(Nrrd))),
('ntxfNum', c_int),
('sclPvlIdx', c_int),
('vecPvlIdx', c_int),
('tenPvlIdx', c_int),
('shadeSpec', POINTER(miteShadeSpec)),
('normalSpec', POINTER(gageItemSpec)),
('time0', c_double),
('queryMite', gageQuery),
('queryMiteNonzero', c_int),
('tt', POINTER(miteThread_t) * 512),
('rmop', POINTER(airArray)),
]
class miteStage(Structure):
pass
miteStage._pack_ = 4
miteStage._fields_ = [
('val', POINTER(c_double)),
('size', c_int),
('op', c_int),
('qn', CFUNCTYPE(c_uint, POINTER(c_double))),
('min', c_double),
('max', c_double),
('data', POINTER(mite_t)),
('rangeIdx', c_int * 9),
('rangeNum', c_int),
('label', STRING),
]
miteThread_t._pack_ = 4
miteThread_t._fields_ = [
('gctx', POINTER(gageContext)),
('ansScl', POINTER(c_double)),
('nPerp', POINTER(c_double)),
('geomTens', POINTER(c_double)),
('ansVec', POINTER(c_double)),
('ansTen', POINTER(c_double)),
('ansMiteVal', POINTER(c_double)),
('directAnsMiteVal', POINTER(POINTER(c_double))),
('_normal', POINTER(c_double)),
('shadeVec0', POINTER(c_double)),
('shadeVec1', POINTER(c_double)),
('shadeScl0', POINTER(c_double)),
('shadeScl1', POINTER(c_double)),
('verbose', c_int),
('skip', c_int),
('thrid', c_int),
('ui', c_int),
('vi', c_int),
('raySample', c_int),
('samples', c_int),
('stage', POINTER(miteStage)),
('stageNum', c_int),
('range', mite_t * 9),
('rayStep', mite_t),
('V', mite_t * 3),
('RR', mite_t),
('GG', mite_t),
('BB', mite_t),
('TT', mite_t),
('ZZ', mite_t),
('rmop', POINTER(airArray)),
]
miteThread = miteThread_t
mitePresent = (c_int).in_dll(libteem, 'mitePresent')
miteBiffKey = (STRING).in_dll(libteem, 'miteBiffKey')
miteDefRefStep = (c_double).in_dll(libteem, 'miteDefRefStep')
miteDefRenorm = (c_int).in_dll(libteem, 'miteDefRenorm')
miteDefNormalSide = (c_int).in_dll(libteem, 'miteDefNormalSide')
miteDefOpacNear1 = (c_double).in_dll(libteem, 'miteDefOpacNear1')
miteDefOpacMatters = (c_double).in_dll(libteem, 'miteDefOpacMatters')
miteVal = (POINTER(airEnum)).in_dll(libteem, 'miteVal')
miteValGageKind = (POINTER(gageKind)).in_dll(libteem, 'miteValGageKind')
miteStageOp = (POINTER(airEnum)).in_dll(libteem, 'miteStageOp')
miteRangeChar = (c_char * 10).in_dll(libteem, 'miteRangeChar')
miteVariableParse = libteem.miteVariableParse
miteVariableParse.restype = c_int
miteVariableParse.argtypes = [POINTER(gageItemSpec), STRING]
miteVariablePrint = libteem.miteVariablePrint
miteVariablePrint.restype = None
miteVariablePrint.argtypes = [STRING, POINTER(gageItemSpec)]
miteNtxfCheck = libteem.miteNtxfCheck
miteNtxfCheck.restype = c_int
miteNtxfCheck.argtypes = [POINTER(Nrrd)]
miteQueryAdd = libteem.miteQueryAdd
miteQueryAdd.restype = None
miteQueryAdd.argtypes = [POINTER(c_ubyte), POINTER(c_ubyte), POINTER(c_ubyte), POINTER(c_ubyte), POINTER(gageItemSpec)]
miteUserNew = libteem.miteUserNew
miteUserNew.restype = POINTER(miteUser)
miteUserNew.argtypes = []
miteUserNix = libteem.miteUserNix
miteUserNix.restype = POINTER(miteUser)
miteUserNix.argtypes = [POINTER(miteUser)]
miteShadeSpecNew = libteem.miteShadeSpecNew
miteShadeSpecNew.restype = POINTER(miteShadeSpec)
miteShadeSpecNew.argtypes = []
miteShadeSpecNix = libteem.miteShadeSpecNix
miteShadeSpecNix.restype = POINTER(miteShadeSpec)
miteShadeSpecNix.argtypes = [POINTER(miteShadeSpec)]
miteShadeSpecParse = libteem.miteShadeSpecParse
miteShadeSpecParse.restype = c_int
miteShadeSpecParse.argtypes = [POINTER(miteShadeSpec), STRING]
miteShadeSpecPrint = libteem.miteShadeSpecPrint
miteShadeSpecPrint.restype = None
miteShadeSpecPrint.argtypes = [STRING, POINTER(miteShadeSpec)]
miteShadeSpecQueryAdd = libteem.miteShadeSpecQueryAdd
miteShadeSpecQueryAdd.restype = None
miteShadeSpecQueryAdd.argtypes = [POINTER(c_ubyte), POINTER(c_ubyte), POINTER(c_ubyte), POINTER(c_ubyte), POINTER(miteShadeSpec)]
miteRenderBegin = libteem.miteRenderBegin
miteRenderBegin.restype = c_int
miteRenderBegin.argtypes = [POINTER(POINTER(miteRender)), POINTER(miteUser)]
miteRenderEnd = libteem.miteRenderEnd
miteRenderEnd.restype = c_int
miteRenderEnd.argtypes = [POINTER(miteRender), POINTER(miteUser)]
miteThreadNew = libteem.miteThreadNew
miteThreadNew.restype = POINTER(miteThread)
miteThreadNew.argtypes = []
miteThreadNix = libteem.miteThreadNix
miteThreadNix.restype = POINTER(miteThread)
miteThreadNix.argtypes = [POINTER(miteThread)]
miteThreadBegin = libteem.miteThreadBegin
miteThreadBegin.restype = c_int
miteThreadBegin.argtypes = [POINTER(POINTER(miteThread)), POINTER(miteRender), POINTER(miteUser), c_int]
miteThreadEnd = libteem.miteThreadEnd
miteThreadEnd.restype = c_int
miteThreadEnd.argtypes = [POINTER(miteThread), POINTER(miteRender), POINTER(miteUser)]
miteRayBegin = libteem.miteRayBegin
miteRayBegin.restype = c_int
miteRayBegin.argtypes = [POINTER(miteThread), POINTER(miteRender), POINTER(miteUser), c_int, c_int, c_double, POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double)]
miteSample = libteem.miteSample
miteSample.restype = c_double
miteSample.argtypes = [POINTER(miteThread), POINTER(miteRender), POINTER(miteUser), c_int, c_double, c_int, POINTER(c_double), POINTER(c_double)]
miteRayEnd = libteem.miteRayEnd
miteRayEnd.restype = c_int
miteRayEnd.argtypes = [POINTER(miteThread), POINTER(miteRender), POINTER(miteUser)]
class mossSampler(Structure):
pass
mossSampler._pack_ = 4
mossSampler._fields_ = [
('image', POINTER(Nrrd)),
('kernel', POINTER(NrrdKernel)),
('kparm', c_double * 8),
('ivc', POINTER(c_float)),
('xFslw', POINTER(c_double)),
('yFslw', POINTER(c_double)),
('fdiam', c_int),
('ncol', c_int),
('xIdx', POINTER(c_int)),
('yIdx', POINTER(c_int)),
('bg', POINTER(c_float)),
('boundary', c_int),
('flag', c_int * 2),
]
mossBiffKey = (STRING).in_dll(libteem, 'mossBiffKey')
mossDefBoundary = (c_int).in_dll(libteem, 'mossDefBoundary')
mossDefCenter = (c_int).in_dll(libteem, 'mossDefCenter')
mossVerbose = (c_int).in_dll(libteem, 'mossVerbose')
mossPresent = (c_int).in_dll(libteem, 'mossPresent')
mossSamplerNew = libteem.mossSamplerNew
mossSamplerNew.restype = POINTER(mossSampler)
mossSamplerNew.argtypes = []
mossSamplerFill = libteem.mossSamplerFill
mossSamplerFill.restype = c_int
mossSamplerFill.argtypes = [POINTER(mossSampler), c_int, c_int]
mossSamplerEmpty = libteem.mossSamplerEmpty
mossSamplerEmpty.restype = None
mossSamplerEmpty.argtypes = [POINTER(mossSampler)]
mossSamplerNix = libteem.mossSamplerNix
mossSamplerNix.restype = POINTER(mossSampler)
mossSamplerNix.argtypes = [POINTER(mossSampler)]
mossImageCheck = libteem.mossImageCheck
mossImageCheck.restype = c_int
mossImageCheck.argtypes = [POINTER(Nrrd)]
mossImageAlloc = libteem.mossImageAlloc
mossImageAlloc.restype = c_int
mossImageAlloc.argtypes = [POINTER(Nrrd), c_int, c_int, c_int, c_int]
mossSamplerImageSet = libteem.mossSamplerImageSet
mossSamplerImageSet.restype = c_int
mossSamplerImageSet.argtypes = [POINTER(mossSampler), POINTER(Nrrd), POINTER(c_float)]
mossSamplerKernelSet = libteem.mossSamplerKernelSet
mossSamplerKernelSet.restype = c_int
mossSamplerKernelSet.argtypes = [POINTER(mossSampler), POINTER(NrrdKernel), POINTER(c_double)]
mossSamplerUpdate = libteem.mossSamplerUpdate
mossSamplerUpdate.restype = c_int
mossSamplerUpdate.argtypes = [POINTER(mossSampler)]
mossSamplerSample = libteem.mossSamplerSample
mossSamplerSample.restype = c_int
mossSamplerSample.argtypes = [POINTER(c_float), POINTER(mossSampler), c_double, c_double]
mossHestTransform = (POINTER(hestCB)).in_dll(libteem, 'mossHestTransform')
mossHestOrigin = (POINTER(hestCB)).in_dll(libteem, 'mossHestOrigin')
mossMatPrint = libteem.mossMatPrint
mossMatPrint.restype = None
mossMatPrint.argtypes = [POINTER(FILE), POINTER(c_double)]
mossMatRightMultiply = libteem.mossMatRightMultiply
mossMatRightMultiply.restype = POINTER(c_double)
mossMatRightMultiply.argtypes = [POINTER(c_double), POINTER(c_double)]
mossMatLeftMultiply = libteem.mossMatLeftMultiply
mossMatLeftMultiply.restype = POINTER(c_double)
mossMatLeftMultiply.argtypes = [POINTER(c_double), POINTER(c_double)]
mossMatInvert = libteem.mossMatInvert
mossMatInvert.restype = POINTER(c_double)
mossMatInvert.argtypes = [POINTER(c_double), POINTER(c_double)]
mossMatIdentitySet = libteem.mossMatIdentitySet
mossMatIdentitySet.restype = POINTER(c_double)
mossMatIdentitySet.argtypes = [POINTER(c_double)]
mossMatTranslateSet = libteem.mossMatTranslateSet
mossMatTranslateSet.restype = POINTER(c_double)
mossMatTranslateSet.argtypes = [POINTER(c_double), c_double, c_double]
mossMatRotateSet = libteem.mossMatRotateSet
mossMatRotateSet.restype = POINTER(c_double)
mossMatRotateSet.argtypes = [POINTER(c_double), c_double]
mossMatFlipSet = libteem.mossMatFlipSet
mossMatFlipSet.restype = POINTER(c_double)
mossMatFlipSet.argtypes = [POINTER(c_double), c_double]
mossMatShearSet = libteem.mossMatShearSet
mossMatShearSet.restype = POINTER(c_double)
mossMatShearSet.argtypes = [POINTER(c_double), c_double, c_double]
mossMatScaleSet = libteem.mossMatScaleSet
mossMatScaleSet.restype = POINTER(c_double)
mossMatScaleSet.argtypes = [POINTER(c_double), c_double, c_double]
mossMatApply = libteem.mossMatApply
mossMatApply.restype = None
mossMatApply.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_double, c_double]
mossLinearTransform = libteem.mossLinearTransform
mossLinearTransform.restype = c_int
mossLinearTransform.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(c_float), POINTER(c_double), POINTER(mossSampler), c_double, c_double, c_double, c_double, c_int, c_int]
nrrdDefaultWriteEncodingType = (c_int).in_dll(libteem, 'nrrdDefaultWriteEncodingType')
nrrdDefaultWriteBareText = (c_int).in_dll(libteem, 'nrrdDefaultWriteBareText')
nrrdDefaultWriteCharsPerLine = (c_uint).in_dll(libteem, 'nrrdDefaultWriteCharsPerLine')
nrrdDefaultWriteValsPerLine = (c_uint).in_dll(libteem, 'nrrdDefaultWriteValsPerLine')
nrrdDefaultResampleBoundary = (c_int).in_dll(libteem, 'nrrdDefaultResampleBoundary')
nrrdDefaultResampleType = (c_int).in_dll(libteem, 'nrrdDefaultResampleType')
nrrdDefaultResampleRenormalize = (c_int).in_dll(libteem, 'nrrdDefaultResampleRenormalize')
nrrdDefaultResampleRound = (c_int).in_dll(libteem, 'nrrdDefaultResampleRound')
nrrdDefaultResampleClamp = (c_int).in_dll(libteem, 'nrrdDefaultResampleClamp')
nrrdDefaultResampleCheap = (c_int).in_dll(libteem, 'nrrdDefaultResampleCheap')
nrrdDefaultResamplePadValue = (c_double).in_dll(libteem, 'nrrdDefaultResamplePadValue')
nrrdDefaultResampleNonExistent = (c_int).in_dll(libteem, 'nrrdDefaultResampleNonExistent')
nrrdDefaultKernelParm0 = (c_double).in_dll(libteem, 'nrrdDefaultKernelParm0')
nrrdDefaultCenter = (c_int).in_dll(libteem, 'nrrdDefaultCenter')
nrrdDefaultSpacing = (c_double).in_dll(libteem, 'nrrdDefaultSpacing')
nrrdStateVerboseIO = (c_int).in_dll(libteem, 'nrrdStateVerboseIO')
nrrdStateKeyValuePairsPropagate = (c_int).in_dll(libteem, 'nrrdStateKeyValuePairsPropagate')
nrrdStateBlind8BitRange = (c_int).in_dll(libteem, 'nrrdStateBlind8BitRange')
nrrdStateMeasureType = (c_int).in_dll(libteem, 'nrrdStateMeasureType')
nrrdStateMeasureModeBins = (c_int).in_dll(libteem, 'nrrdStateMeasureModeBins')
nrrdStateMeasureHistoType = (c_int).in_dll(libteem, 'nrrdStateMeasureHistoType')
nrrdStateDisallowIntegerNonExist = (c_int).in_dll(libteem, 'nrrdStateDisallowIntegerNonExist')
nrrdStateAlwaysSetContent = (c_int).in_dll(libteem, 'nrrdStateAlwaysSetContent')
nrrdStateDisableContent = (c_int).in_dll(libteem, 'nrrdStateDisableContent')
nrrdStateUnknownContent = (STRING).in_dll(libteem, 'nrrdStateUnknownContent')
nrrdStateGrayscaleImage3D = (c_int).in_dll(libteem, 'nrrdStateGrayscaleImage3D')
nrrdStateKeyValueReturnInternalPointers = (c_int).in_dll(libteem, 'nrrdStateKeyValueReturnInternalPointers')
nrrdStateKindNoop = (c_int).in_dll(libteem, 'nrrdStateKindNoop')
nrrdEnvVarDefaultWriteEncodingType = (STRING).in_dll(libteem, 'nrrdEnvVarDefaultWriteEncodingType')
nrrdEnvVarDefaultWriteBareText = (STRING).in_dll(libteem, 'nrrdEnvVarDefaultWriteBareText')
nrrdEnvVarDefaultWriteBareTextOld = (STRING).in_dll(libteem, 'nrrdEnvVarDefaultWriteBareTextOld')
nrrdEnvVarDefaultCenter = (STRING).in_dll(libteem, 'nrrdEnvVarDefaultCenter')
nrrdEnvVarDefaultCenterOld = (STRING).in_dll(libteem, 'nrrdEnvVarDefaultCenterOld')
nrrdEnvVarDefaultWriteCharsPerLine = (STRING).in_dll(libteem, 'nrrdEnvVarDefaultWriteCharsPerLine')
nrrdEnvVarDefaultWriteValsPerLine = (STRING).in_dll(libteem, 'nrrdEnvVarDefaultWriteValsPerLine')
nrrdEnvVarDefaultKernelParm0 = (STRING).in_dll(libteem, 'nrrdEnvVarDefaultKernelParm0')
nrrdEnvVarDefaultSpacing = (STRING).in_dll(libteem, 'nrrdEnvVarDefaultSpacing')
nrrdEnvVarStateKindNoop = (STRING).in_dll(libteem, 'nrrdEnvVarStateKindNoop')
nrrdEnvVarStateVerboseIO = (STRING).in_dll(libteem, 'nrrdEnvVarStateVerboseIO')
nrrdEnvVarStateKeyValuePairsPropagate = (STRING).in_dll(libteem, 'nrrdEnvVarStateKeyValuePairsPropagate')
nrrdEnvVarStateBlind8BitRange = (STRING).in_dll(libteem, 'nrrdEnvVarStateBlind8BitRange')
nrrdEnvVarStateAlwaysSetContent = (STRING).in_dll(libteem, 'nrrdEnvVarStateAlwaysSetContent')
nrrdEnvVarStateDisableContent = (STRING).in_dll(libteem, 'nrrdEnvVarStateDisableContent')
nrrdEnvVarStateMeasureType = (STRING).in_dll(libteem, 'nrrdEnvVarStateMeasureType')
nrrdEnvVarStateMeasureModeBins = (STRING).in_dll(libteem, 'nrrdEnvVarStateMeasureModeBins')
nrrdEnvVarStateMeasureHistoType = (STRING).in_dll(libteem, 'nrrdEnvVarStateMeasureHistoType')
nrrdEnvVarStateGrayscaleImage3D = (STRING).in_dll(libteem, 'nrrdEnvVarStateGrayscaleImage3D')
nrrdGetenvBool = libteem.nrrdGetenvBool
nrrdGetenvBool.restype = c_int
nrrdGetenvBool.argtypes = [POINTER(c_int), POINTER(STRING), STRING]
nrrdGetenvEnum = libteem.nrrdGetenvEnum
nrrdGetenvEnum.restype = c_int
nrrdGetenvEnum.argtypes = [POINTER(c_int), POINTER(STRING), POINTER(airEnum), STRING]
nrrdGetenvInt = libteem.nrrdGetenvInt
nrrdGetenvInt.restype = c_int
nrrdGetenvInt.argtypes = [POINTER(c_int), POINTER(STRING), STRING]
nrrdGetenvUInt = libteem.nrrdGetenvUInt
nrrdGetenvUInt.restype = c_int
nrrdGetenvUInt.argtypes = [POINTER(c_uint), POINTER(STRING), STRING]
nrrdGetenvDouble = libteem.nrrdGetenvDouble
nrrdGetenvDouble.restype = c_int
nrrdGetenvDouble.argtypes = [POINTER(c_double), POINTER(STRING), STRING]
nrrdGetenvString = libteem.nrrdGetenvString
nrrdGetenvString.restype = c_int
nrrdGetenvString.argtypes = [POINTER(STRING), STRING]
nrrdDefaultGetenv = libteem.nrrdDefaultGetenv
nrrdDefaultGetenv.restype = None
nrrdDefaultGetenv.argtypes = []
nrrdStateGetenv = libteem.nrrdStateGetenv
nrrdStateGetenv.restype = None
nrrdStateGetenv.argtypes = []
nrrdFormatType = (POINTER(airEnum)).in_dll(libteem, 'nrrdFormatType')
nrrdType = (POINTER(airEnum)).in_dll(libteem, 'nrrdType')
nrrdEncodingType = (POINTER(airEnum)).in_dll(libteem, 'nrrdEncodingType')
nrrdCenter = (POINTER(airEnum)).in_dll(libteem, 'nrrdCenter')
nrrdKind = (POINTER(airEnum)).in_dll(libteem, 'nrrdKind')
nrrdField = (POINTER(airEnum)).in_dll(libteem, 'nrrdField')
nrrdSpace = (POINTER(airEnum)).in_dll(libteem, 'nrrdSpace')
nrrdSpacingStatus = (POINTER(airEnum)).in_dll(libteem, 'nrrdSpacingStatus')
nrrdBoundary = (POINTER(airEnum)).in_dll(libteem, 'nrrdBoundary')
nrrdMeasure = (POINTER(airEnum)).in_dll(libteem, 'nrrdMeasure')
nrrdUnaryOp = (POINTER(airEnum)).in_dll(libteem, 'nrrdUnaryOp')
nrrdBinaryOp = (POINTER(airEnum)).in_dll(libteem, 'nrrdBinaryOp')
nrrdTernaryOp = (POINTER(airEnum)).in_dll(libteem, 'nrrdTernaryOp')
nrrdFFTWPlanRigor = (POINTER(airEnum)).in_dll(libteem, 'nrrdFFTWPlanRigor')
nrrdResampleNonExistent = (POINTER(airEnum)).in_dll(libteem, 'nrrdResampleNonExistent')
nrrdTypePrintfStr = (c_char * 129 * 12).in_dll(libteem, 'nrrdTypePrintfStr')
nrrdTypeSize = (c_size_t * 12).in_dll(libteem, 'nrrdTypeSize')
nrrdTypeMin = (c_double * 12).in_dll(libteem, 'nrrdTypeMin')
nrrdTypeMax = (c_double * 12).in_dll(libteem, 'nrrdTypeMax')
nrrdTypeIsIntegral = (c_int * 12).in_dll(libteem, 'nrrdTypeIsIntegral')
nrrdTypeIsUnsigned = (c_int * 12).in_dll(libteem, 'nrrdTypeIsUnsigned')
nrrdPresent = (c_int).in_dll(libteem, 'nrrdPresent')
nrrdBoundarySpecNew = libteem.nrrdBoundarySpecNew
nrrdBoundarySpecNew.restype = POINTER(NrrdBoundarySpec)
nrrdBoundarySpecNew.argtypes = []
nrrdBoundarySpecNix = libteem.nrrdBoundarySpecNix
nrrdBoundarySpecNix.restype = POINTER(NrrdBoundarySpec)
nrrdBoundarySpecNix.argtypes = [POINTER(NrrdBoundarySpec)]
nrrdBoundarySpecCopy = libteem.nrrdBoundarySpecCopy
nrrdBoundarySpecCopy.restype = POINTER(NrrdBoundarySpec)
nrrdBoundarySpecCopy.argtypes = [POINTER(NrrdBoundarySpec)]
nrrdBoundarySpecCheck = libteem.nrrdBoundarySpecCheck
nrrdBoundarySpecCheck.restype = c_int
nrrdBoundarySpecCheck.argtypes = [POINTER(NrrdBoundarySpec)]
nrrdBoundarySpecParse = libteem.nrrdBoundarySpecParse
nrrdBoundarySpecParse.restype = c_int
nrrdBoundarySpecParse.argtypes = [POINTER(NrrdBoundarySpec), STRING]
nrrdBoundarySpecSprint = libteem.nrrdBoundarySpecSprint
nrrdBoundarySpecSprint.restype = c_int
nrrdBoundarySpecSprint.argtypes = [STRING, POINTER(NrrdBoundarySpec)]
nrrdBoundarySpecCompare = libteem.nrrdBoundarySpecCompare
nrrdBoundarySpecCompare.restype = c_int
nrrdBoundarySpecCompare.argtypes = [POINTER(NrrdBoundarySpec), POINTER(NrrdBoundarySpec), POINTER(c_int), STRING]
class NrrdIoState_t(Structure):
pass
NrrdIoState = NrrdIoState_t
nrrdIoStateNew = libteem.nrrdIoStateNew
nrrdIoStateNew.restype = POINTER(NrrdIoState)
nrrdIoStateNew.argtypes = []
nrrdIoStateInit = libteem.nrrdIoStateInit
nrrdIoStateInit.restype = None
nrrdIoStateInit.argtypes = [POINTER(NrrdIoState)]
nrrdIoStateNix = libteem.nrrdIoStateNix
nrrdIoStateNix.restype = POINTER(NrrdIoState)
nrrdIoStateNix.argtypes = [POINTER(NrrdIoState)]
class NrrdResampleInfo(Structure):
pass
nrrdResampleInfoNew = libteem.nrrdResampleInfoNew
nrrdResampleInfoNew.restype = POINTER(NrrdResampleInfo)
nrrdResampleInfoNew.argtypes = []
nrrdResampleInfoNix = libteem.nrrdResampleInfoNix
nrrdResampleInfoNix.restype = POINTER(NrrdResampleInfo)
nrrdResampleInfoNix.argtypes = [POINTER(NrrdResampleInfo)]
nrrdKernelSpecNew = libteem.nrrdKernelSpecNew
nrrdKernelSpecNew.restype = POINTER(NrrdKernelSpec)
nrrdKernelSpecNew.argtypes = []
nrrdKernelSpecCopy = libteem.nrrdKernelSpecCopy
nrrdKernelSpecCopy.restype = POINTER(NrrdKernelSpec)
nrrdKernelSpecCopy.argtypes = [POINTER(NrrdKernelSpec)]
nrrdKernelSpecSet = libteem.nrrdKernelSpecSet
nrrdKernelSpecSet.restype = None
nrrdKernelSpecSet.argtypes = [POINTER(NrrdKernelSpec), POINTER(NrrdKernel), POINTER(c_double)]
nrrdKernelParmSet = libteem.nrrdKernelParmSet
nrrdKernelParmSet.restype = None
nrrdKernelParmSet.argtypes = [POINTER(POINTER(NrrdKernel)), POINTER(c_double), POINTER(NrrdKernelSpec)]
nrrdKernelSpecNix = libteem.nrrdKernelSpecNix
nrrdKernelSpecNix.restype = POINTER(NrrdKernelSpec)
nrrdKernelSpecNix.argtypes = [POINTER(NrrdKernelSpec)]
nrrdInit = libteem.nrrdInit
nrrdInit.restype = None
nrrdInit.argtypes = [POINTER(Nrrd)]
nrrdNew = libteem.nrrdNew
nrrdNew.restype = POINTER(Nrrd)
nrrdNew.argtypes = []
nrrdNix = libteem.nrrdNix
nrrdNix.restype = POINTER(Nrrd)
nrrdNix.argtypes = [POINTER(Nrrd)]
nrrdEmpty = libteem.nrrdEmpty
nrrdEmpty.restype = POINTER(Nrrd)
nrrdEmpty.argtypes = [POINTER(Nrrd)]
nrrdNuke = libteem.nrrdNuke
nrrdNuke.restype = POINTER(Nrrd)
nrrdNuke.argtypes = [POINTER(Nrrd)]
nrrdWrap_nva = libteem.nrrdWrap_nva
nrrdWrap_nva.restype = c_int
nrrdWrap_nva.argtypes = [POINTER(Nrrd), c_void_p, c_int, c_uint, POINTER(c_size_t)]
nrrdWrap_va = libteem.nrrdWrap_va
nrrdWrap_va.restype = c_int
nrrdWrap_va.argtypes = [POINTER(Nrrd), c_void_p, c_int, c_uint]
nrrdBasicInfoInit = libteem.nrrdBasicInfoInit
nrrdBasicInfoInit.restype = None
nrrdBasicInfoInit.argtypes = [POINTER(Nrrd), c_int]
nrrdBasicInfoCopy = libteem.nrrdBasicInfoCopy
nrrdBasicInfoCopy.restype = c_int
nrrdBasicInfoCopy.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_int]
nrrdCopy = libteem.nrrdCopy
nrrdCopy.restype = c_int
nrrdCopy.argtypes = [POINTER(Nrrd), POINTER(Nrrd)]
nrrdAlloc_nva = libteem.nrrdAlloc_nva
nrrdAlloc_nva.restype = c_int
nrrdAlloc_nva.argtypes = [POINTER(Nrrd), c_int, c_uint, POINTER(c_size_t)]
nrrdAlloc_va = libteem.nrrdAlloc_va
nrrdAlloc_va.restype = c_int
nrrdAlloc_va.argtypes = [POINTER(Nrrd), c_int, c_uint]
nrrdMaybeAlloc_nva = libteem.nrrdMaybeAlloc_nva
nrrdMaybeAlloc_nva.restype = c_int
nrrdMaybeAlloc_nva.argtypes = [POINTER(Nrrd), c_int, c_uint, POINTER(c_size_t)]
nrrdMaybeAlloc_va = libteem.nrrdMaybeAlloc_va
nrrdMaybeAlloc_va.restype = c_int
nrrdMaybeAlloc_va.argtypes = [POINTER(Nrrd), c_int, c_uint]
nrrdCompare = libteem.nrrdCompare
nrrdCompare.restype = c_int
nrrdCompare.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_int, c_double, POINTER(c_int), STRING]
nrrdPPM = libteem.nrrdPPM
nrrdPPM.restype = c_int
nrrdPPM.argtypes = [POINTER(Nrrd), c_size_t, c_size_t]
nrrdPGM = libteem.nrrdPGM
nrrdPGM.restype = c_int
nrrdPGM.argtypes = [POINTER(Nrrd), c_size_t, c_size_t]
nrrdKindIsDomain = libteem.nrrdKindIsDomain
nrrdKindIsDomain.restype = c_int
nrrdKindIsDomain.argtypes = [c_int]
nrrdKindSize = libteem.nrrdKindSize
nrrdKindSize.restype = c_uint
nrrdKindSize.argtypes = [c_int]
nrrdAxisInfoCopy = libteem.nrrdAxisInfoCopy
nrrdAxisInfoCopy.restype = c_int
nrrdAxisInfoCopy.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(c_int), c_int]
nrrdAxisInfoSet_nva = libteem.nrrdAxisInfoSet_nva
nrrdAxisInfoSet_nva.restype = None
nrrdAxisInfoSet_nva.argtypes = [POINTER(Nrrd), c_int, c_void_p]
nrrdAxisInfoSet_va = libteem.nrrdAxisInfoSet_va
nrrdAxisInfoSet_va.restype = None
nrrdAxisInfoSet_va.argtypes = [POINTER(Nrrd), c_int]
nrrdAxisInfoGet_nva = libteem.nrrdAxisInfoGet_nva
nrrdAxisInfoGet_nva.restype = None
nrrdAxisInfoGet_nva.argtypes = [POINTER(Nrrd), c_int, c_void_p]
nrrdAxisInfoGet_va = libteem.nrrdAxisInfoGet_va
nrrdAxisInfoGet_va.restype = None
nrrdAxisInfoGet_va.argtypes = [POINTER(Nrrd), c_int]
nrrdAxisInfoPos = libteem.nrrdAxisInfoPos
nrrdAxisInfoPos.restype = c_double
nrrdAxisInfoPos.argtypes = [POINTER(Nrrd), c_uint, c_double]
nrrdAxisInfoIdx = libteem.nrrdAxisInfoIdx
nrrdAxisInfoIdx.restype = c_double
nrrdAxisInfoIdx.argtypes = [POINTER(Nrrd), c_uint, c_double]
nrrdAxisInfoPosRange = libteem.nrrdAxisInfoPosRange
nrrdAxisInfoPosRange.restype = None
nrrdAxisInfoPosRange.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(Nrrd), c_uint, c_double, c_double]
nrrdAxisInfoIdxRange = libteem.nrrdAxisInfoIdxRange
nrrdAxisInfoIdxRange.restype = None
nrrdAxisInfoIdxRange.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(Nrrd), c_uint, c_double, c_double]
nrrdAxisInfoSpacingSet = libteem.nrrdAxisInfoSpacingSet
nrrdAxisInfoSpacingSet.restype = None
nrrdAxisInfoSpacingSet.argtypes = [POINTER(Nrrd), c_uint]
nrrdAxisInfoMinMaxSet = libteem.nrrdAxisInfoMinMaxSet
nrrdAxisInfoMinMaxSet.restype = None
nrrdAxisInfoMinMaxSet.argtypes = [POINTER(Nrrd), c_uint, c_int]
nrrdAxisInfoCompare = libteem.nrrdAxisInfoCompare
nrrdAxisInfoCompare.restype = c_int
nrrdAxisInfoCompare.argtypes = [POINTER(NrrdAxisInfo), POINTER(NrrdAxisInfo), POINTER(c_int), STRING]
nrrdDomainAxesGet = libteem.nrrdDomainAxesGet
nrrdDomainAxesGet.restype = c_uint
nrrdDomainAxesGet.argtypes = [POINTER(Nrrd), POINTER(c_uint)]
nrrdRangeAxesGet = libteem.nrrdRangeAxesGet
nrrdRangeAxesGet.restype = c_uint
nrrdRangeAxesGet.argtypes = [POINTER(Nrrd), POINTER(c_uint)]
nrrdSpatialAxesGet = libteem.nrrdSpatialAxesGet
nrrdSpatialAxesGet.restype = c_uint
nrrdSpatialAxesGet.argtypes = [POINTER(Nrrd), POINTER(c_uint)]
nrrdNonSpatialAxesGet = libteem.nrrdNonSpatialAxesGet
nrrdNonSpatialAxesGet.restype = c_uint
nrrdNonSpatialAxesGet.argtypes = [POINTER(Nrrd), POINTER(c_uint)]
nrrdSpacingCalculate = libteem.nrrdSpacingCalculate
nrrdSpacingCalculate.restype = c_int
nrrdSpacingCalculate.argtypes = [POINTER(Nrrd), c_uint, POINTER(c_double), POINTER(c_double)]
nrrdOrientationReduce = libteem.nrrdOrientationReduce
nrrdOrientationReduce.restype = c_int
nrrdOrientationReduce.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_int]
nrrdBiffKey = (STRING).in_dll(libteem, 'nrrdBiffKey')
nrrdSpaceDimension = libteem.nrrdSpaceDimension
nrrdSpaceDimension.restype = c_uint
nrrdSpaceDimension.argtypes = [c_int]
nrrdSpaceSet = libteem.nrrdSpaceSet
nrrdSpaceSet.restype = c_int
nrrdSpaceSet.argtypes = [POINTER(Nrrd), c_int]
nrrdSpaceDimensionSet = libteem.nrrdSpaceDimensionSet
nrrdSpaceDimensionSet.restype = c_int
nrrdSpaceDimensionSet.argtypes = [POINTER(Nrrd), c_uint]
nrrdSpaceOriginGet = libteem.nrrdSpaceOriginGet
nrrdSpaceOriginGet.restype = c_uint
nrrdSpaceOriginGet.argtypes = [POINTER(Nrrd), POINTER(c_double)]
nrrdSpaceOriginSet = libteem.nrrdSpaceOriginSet
nrrdSpaceOriginSet.restype = c_int
nrrdSpaceOriginSet.argtypes = [POINTER(Nrrd), POINTER(c_double)]
nrrdOriginCalculate = libteem.nrrdOriginCalculate
nrrdOriginCalculate.restype = c_int
nrrdOriginCalculate.argtypes = [POINTER(Nrrd), POINTER(c_uint), c_uint, c_int, POINTER(c_double)]
nrrdContentSet_va = libteem.nrrdContentSet_va
nrrdContentSet_va.restype = c_int
nrrdContentSet_va.argtypes = [POINTER(Nrrd), STRING, POINTER(Nrrd), STRING]
nrrdDescribe = libteem.nrrdDescribe
nrrdDescribe.restype = None
nrrdDescribe.argtypes = [POINTER(FILE), POINTER(Nrrd)]
nrrdCheck = libteem.nrrdCheck
nrrdCheck.restype = c_int
nrrdCheck.argtypes = [POINTER(Nrrd)]
nrrdElementSize = libteem.nrrdElementSize
nrrdElementSize.restype = c_size_t
nrrdElementSize.argtypes = [POINTER(Nrrd)]
nrrdElementNumber = libteem.nrrdElementNumber
nrrdElementNumber.restype = c_size_t
nrrdElementNumber.argtypes = [POINTER(Nrrd)]
nrrdSanity = libteem.nrrdSanity
nrrdSanity.restype = c_int
nrrdSanity.argtypes = []
nrrdSameSize = libteem.nrrdSameSize
nrrdSameSize.restype = c_int
nrrdSameSize.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_int]
nrrdSpaceVecCopy = libteem.nrrdSpaceVecCopy
nrrdSpaceVecCopy.restype = None
nrrdSpaceVecCopy.argtypes = [POINTER(c_double), POINTER(c_double)]
nrrdSpaceVecScaleAdd2 = libteem.nrrdSpaceVecScaleAdd2
nrrdSpaceVecScaleAdd2.restype = None
nrrdSpaceVecScaleAdd2.argtypes = [POINTER(c_double), c_double, POINTER(c_double), c_double, POINTER(c_double)]
nrrdSpaceVecScale = libteem.nrrdSpaceVecScale
nrrdSpaceVecScale.restype = None
nrrdSpaceVecScale.argtypes = [POINTER(c_double), c_double, POINTER(c_double)]
nrrdSpaceVecNorm = libteem.nrrdSpaceVecNorm
nrrdSpaceVecNorm.restype = c_double
nrrdSpaceVecNorm.argtypes = [c_uint, POINTER(c_double)]
nrrdSpaceVecExists = libteem.nrrdSpaceVecExists
nrrdSpaceVecExists.restype = c_int
nrrdSpaceVecExists.argtypes = [c_uint, POINTER(c_double)]
nrrdSpaceVecSetNaN = libteem.nrrdSpaceVecSetNaN
nrrdSpaceVecSetNaN.restype = None
nrrdSpaceVecSetNaN.argtypes = [POINTER(c_double)]
nrrdSanityOrDie = libteem.nrrdSanityOrDie
nrrdSanityOrDie.restype = None
nrrdSanityOrDie.argtypes = [STRING]
nrrdSpaceVecSetZero = libteem.nrrdSpaceVecSetZero
nrrdSpaceVecSetZero.restype = None
nrrdSpaceVecSetZero.argtypes = [POINTER(c_double)]
nrrdZeroSet = libteem.nrrdZeroSet
nrrdZeroSet.restype = None
nrrdZeroSet.argtypes = [POINTER(Nrrd)]
nrrdCommentAdd = libteem.nrrdCommentAdd
nrrdCommentAdd.restype = c_int
nrrdCommentAdd.argtypes = [POINTER(Nrrd), STRING]
nrrdCommentClear = libteem.nrrdCommentClear
nrrdCommentClear.restype = None
nrrdCommentClear.argtypes = [POINTER(Nrrd)]
nrrdCommentCopy = libteem.nrrdCommentCopy
nrrdCommentCopy.restype = c_int
nrrdCommentCopy.argtypes = [POINTER(Nrrd), POINTER(Nrrd)]
nrrdKeyValueSize = libteem.nrrdKeyValueSize
nrrdKeyValueSize.restype = c_uint
nrrdKeyValueSize.argtypes = [POINTER(Nrrd)]
nrrdKeyValueAdd = libteem.nrrdKeyValueAdd
nrrdKeyValueAdd.restype = c_int
nrrdKeyValueAdd.argtypes = [POINTER(Nrrd), STRING, STRING]
nrrdKeyValueGet = libteem.nrrdKeyValueGet
nrrdKeyValueGet.restype = STRING
nrrdKeyValueGet.argtypes = [POINTER(Nrrd), STRING]
nrrdKeyValueIndex = libteem.nrrdKeyValueIndex
nrrdKeyValueIndex.restype = None
nrrdKeyValueIndex.argtypes = [POINTER(Nrrd), POINTER(STRING), POINTER(STRING), c_uint]
nrrdKeyValueErase = libteem.nrrdKeyValueErase
nrrdKeyValueErase.restype = c_int
nrrdKeyValueErase.argtypes = [POINTER(Nrrd), STRING]
nrrdKeyValueClear = libteem.nrrdKeyValueClear
nrrdKeyValueClear.restype = None
nrrdKeyValueClear.argtypes = [POINTER(Nrrd)]
nrrdKeyValueCopy = libteem.nrrdKeyValueCopy
nrrdKeyValueCopy.restype = c_int
nrrdKeyValueCopy.argtypes = [POINTER(Nrrd), POINTER(Nrrd)]
nrrdSwapEndian = libteem.nrrdSwapEndian
nrrdSwapEndian.restype = None
nrrdSwapEndian.argtypes = [POINTER(Nrrd)]
class NrrdFormat(Structure):
pass
NrrdEncoding_t._fields_ = [
('name', c_char * 129),
('suffix', c_char * 129),
('endianMatters', c_int),
('isCompression', c_int),
('available', CFUNCTYPE(c_int)),
('read', CFUNCTYPE(c_int, POINTER(FILE), c_void_p, c_size_t, POINTER(Nrrd), POINTER(NrrdIoState_t))),
('write', CFUNCTYPE(c_int, POINTER(FILE), c_void_p, c_size_t, POINTER(Nrrd), POINTER(NrrdIoState_t))),
]
NrrdFormat._fields_ = [
('name', c_char * 129),
('isImage', c_int),
('readable', c_int),
('usesDIO', c_int),
('available', CFUNCTYPE(c_int)),
('nameLooksLike', CFUNCTYPE(c_int, STRING)),
('fitsInto', CFUNCTYPE(c_int, POINTER(Nrrd), POINTER(NrrdEncoding_t), c_int)),
('contentStartsLike', CFUNCTYPE(c_int, POINTER(NrrdIoState_t))),
('read', CFUNCTYPE(c_int, POINTER(FILE), POINTER(Nrrd), POINTER(NrrdIoState_t))),
('write', CFUNCTYPE(c_int, POINTER(FILE), POINTER(Nrrd), POINTER(NrrdIoState_t))),
]
nrrdFormatNRRD = (POINTER(NrrdFormat)).in_dll(libteem, 'nrrdFormatNRRD')
nrrdFormatPNM = (POINTER(NrrdFormat)).in_dll(libteem, 'nrrdFormatPNM')
nrrdFormatPNG = (POINTER(NrrdFormat)).in_dll(libteem, 'nrrdFormatPNG')
nrrdFormatVTK = (POINTER(NrrdFormat)).in_dll(libteem, 'nrrdFormatVTK')
nrrdFormatText = (POINTER(NrrdFormat)).in_dll(libteem, 'nrrdFormatText')
nrrdFormatEPS = (POINTER(NrrdFormat)).in_dll(libteem, 'nrrdFormatEPS')
nrrdFormatUnknown = (POINTER(NrrdFormat)).in_dll(libteem, 'nrrdFormatUnknown')
nrrdFormatArray = (POINTER(NrrdFormat) * 7).in_dll(libteem, 'nrrdFormatArray')
nrrdEncodingRaw = (POINTER(NrrdEncoding)).in_dll(libteem, 'nrrdEncodingRaw')
nrrdEncodingAscii = (POINTER(NrrdEncoding)).in_dll(libteem, 'nrrdEncodingAscii')
nrrdEncodingHex = (POINTER(NrrdEncoding)).in_dll(libteem, 'nrrdEncodingHex')
nrrdEncodingGzip = (POINTER(NrrdEncoding)).in_dll(libteem, 'nrrdEncodingGzip')
nrrdEncodingBzip2 = (POINTER(NrrdEncoding)).in_dll(libteem, 'nrrdEncodingBzip2')
nrrdEncodingUnknown = (POINTER(NrrdEncoding)).in_dll(libteem, 'nrrdEncodingUnknown')
nrrdEncodingArray = (POINTER(NrrdEncoding) * 6).in_dll(libteem, 'nrrdEncodingArray')
nrrdFieldInfoParse = (CFUNCTYPE(c_int, POINTER(FILE), POINTER(Nrrd), POINTER(NrrdIoState), c_int) * 33).in_dll(libteem, 'nrrdFieldInfoParse')
nrrdLineSkip = libteem.nrrdLineSkip
nrrdLineSkip.restype = c_int
nrrdLineSkip.argtypes = [POINTER(FILE), POINTER(NrrdIoState)]
nrrdByteSkip = libteem.nrrdByteSkip
nrrdByteSkip.restype = c_int
nrrdByteSkip.argtypes = [POINTER(FILE), POINTER(Nrrd), POINTER(NrrdIoState)]
nrrdLoad = libteem.nrrdLoad
nrrdLoad.restype = c_int
nrrdLoad.argtypes = [POINTER(Nrrd), STRING, POINTER(NrrdIoState)]
nrrdLoadMulti = libteem.nrrdLoadMulti
nrrdLoadMulti.restype = c_int
nrrdLoadMulti.argtypes = [POINTER(POINTER(Nrrd)), c_uint, STRING, c_uint, POINTER(NrrdIoState)]
nrrdRead = libteem.nrrdRead
nrrdRead.restype = c_int
nrrdRead.argtypes = [POINTER(Nrrd), POINTER(FILE), POINTER(NrrdIoState)]
nrrdStringRead = libteem.nrrdStringRead
nrrdStringRead.restype = c_int
nrrdStringRead.argtypes = [POINTER(Nrrd), STRING, POINTER(NrrdIoState)]
nrrdIoStateSet = libteem.nrrdIoStateSet
nrrdIoStateSet.restype = c_int
nrrdIoStateSet.argtypes = [POINTER(NrrdIoState), c_int, c_int]
nrrdIoStateEncodingSet = libteem.nrrdIoStateEncodingSet
nrrdIoStateEncodingSet.restype = c_int
nrrdIoStateEncodingSet.argtypes = [POINTER(NrrdIoState), POINTER(NrrdEncoding)]
nrrdIoStateFormatSet = libteem.nrrdIoStateFormatSet
nrrdIoStateFormatSet.restype = c_int
nrrdIoStateFormatSet.argtypes = [POINTER(NrrdIoState), POINTER(NrrdFormat)]
nrrdIoStateGet = libteem.nrrdIoStateGet
nrrdIoStateGet.restype = c_int
nrrdIoStateGet.argtypes = [POINTER(NrrdIoState), c_int]
nrrdIoStateEncodingGet = libteem.nrrdIoStateEncodingGet
nrrdIoStateEncodingGet.restype = POINTER(NrrdEncoding)
nrrdIoStateEncodingGet.argtypes = [POINTER(NrrdIoState)]
nrrdIoStateFormatGet = libteem.nrrdIoStateFormatGet
nrrdIoStateFormatGet.restype = POINTER(NrrdFormat)
nrrdIoStateFormatGet.argtypes = [POINTER(NrrdIoState)]
nrrdSave = libteem.nrrdSave
nrrdSave.restype = c_int
nrrdSave.argtypes = [STRING, POINTER(Nrrd), POINTER(NrrdIoState)]
nrrdSaveMulti = libteem.nrrdSaveMulti
nrrdSaveMulti.restype = c_int
nrrdSaveMulti.argtypes = [STRING, POINTER(POINTER(Nrrd)), c_uint, c_uint, POINTER(NrrdIoState)]
nrrdWrite = libteem.nrrdWrite
nrrdWrite.restype = c_int
nrrdWrite.argtypes = [POINTER(FILE), POINTER(Nrrd), POINTER(NrrdIoState)]
nrrdStringWrite = libteem.nrrdStringWrite
nrrdStringWrite.restype = c_int
nrrdStringWrite.argtypes = [POINTER(STRING), POINTER(Nrrd), POINTER(NrrdIoState)]
nrrdDLoad = (CFUNCTYPE(c_double, c_void_p) * 12).in_dll(libteem, 'nrrdDLoad')
nrrdFLoad = (CFUNCTYPE(c_float, c_void_p) * 12).in_dll(libteem, 'nrrdFLoad')
nrrdILoad = (CFUNCTYPE(c_int, c_void_p) * 12).in_dll(libteem, 'nrrdILoad')
nrrdUILoad = (CFUNCTYPE(c_uint, c_void_p) * 12).in_dll(libteem, 'nrrdUILoad')
nrrdDStore = (CFUNCTYPE(c_double, c_void_p, c_double) * 12).in_dll(libteem, 'nrrdDStore')
nrrdFStore = (CFUNCTYPE(c_float, c_void_p, c_float) * 12).in_dll(libteem, 'nrrdFStore')
nrrdIStore = (CFUNCTYPE(c_int, c_void_p, c_int) * 12).in_dll(libteem, 'nrrdIStore')
nrrdUIStore = (CFUNCTYPE(c_uint, c_void_p, c_uint) * 12).in_dll(libteem, 'nrrdUIStore')
nrrdDLookup = (CFUNCTYPE(c_double, c_void_p, c_size_t) * 12).in_dll(libteem, 'nrrdDLookup')
nrrdFLookup = (CFUNCTYPE(c_float, c_void_p, c_size_t) * 12).in_dll(libteem, 'nrrdFLookup')
nrrdILookup = (CFUNCTYPE(c_int, c_void_p, c_size_t) * 12).in_dll(libteem, 'nrrdILookup')
nrrdUILookup = (CFUNCTYPE(c_uint, c_void_p, c_size_t) * 12).in_dll(libteem, 'nrrdUILookup')
nrrdDInsert = (CFUNCTYPE(c_double, c_void_p, c_size_t, c_double) * 12).in_dll(libteem, 'nrrdDInsert')
nrrdFInsert = (CFUNCTYPE(c_float, c_void_p, c_size_t, c_float) * 12).in_dll(libteem, 'nrrdFInsert')
nrrdIInsert = (CFUNCTYPE(c_int, c_void_p, c_size_t, c_int) * 12).in_dll(libteem, 'nrrdIInsert')
nrrdUIInsert = (CFUNCTYPE(c_uint, c_void_p, c_size_t, c_uint) * 12).in_dll(libteem, 'nrrdUIInsert')
nrrdSprint = (CFUNCTYPE(c_int, STRING, c_void_p) * 12).in_dll(libteem, 'nrrdSprint')
nrrdFprint = (CFUNCTYPE(c_int, POINTER(FILE), c_void_p) * 12).in_dll(libteem, 'nrrdFprint')
nrrdMinMaxExactFind = (CFUNCTYPE(None, c_void_p, c_void_p, POINTER(c_int), POINTER(Nrrd)) * 12).in_dll(libteem, 'nrrdMinMaxExactFind')
nrrdValCompare = (CFUNCTYPE(c_int, c_void_p, c_void_p) * 12).in_dll(libteem, 'nrrdValCompare')
nrrdValCompareInv = (CFUNCTYPE(c_int, c_void_p, c_void_p) * 12).in_dll(libteem, 'nrrdValCompareInv')
nrrdArrayCompare = libteem.nrrdArrayCompare
nrrdArrayCompare.restype = c_int
nrrdArrayCompare.argtypes = [c_int, c_void_p, c_void_p, c_size_t, c_double, POINTER(c_int), STRING]
nrrdAxesInsert = libteem.nrrdAxesInsert
nrrdAxesInsert.restype = c_int
nrrdAxesInsert.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_uint]
nrrdInvertPerm = libteem.nrrdInvertPerm
nrrdInvertPerm.restype = c_int
nrrdInvertPerm.argtypes = [POINTER(c_uint), POINTER(c_uint), c_uint]
nrrdAxesPermute = libteem.nrrdAxesPermute
nrrdAxesPermute.restype = c_int
nrrdAxesPermute.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(c_uint)]
nrrdShuffle = libteem.nrrdShuffle
nrrdShuffle.restype = c_int
nrrdShuffle.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_uint, POINTER(c_size_t)]
nrrdAxesSwap = libteem.nrrdAxesSwap
nrrdAxesSwap.restype = c_int
nrrdAxesSwap.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_uint, c_uint]
nrrdFlip = libteem.nrrdFlip
nrrdFlip.restype = c_int
nrrdFlip.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_uint]
nrrdJoin = libteem.nrrdJoin
nrrdJoin.restype = c_int
nrrdJoin.argtypes = [POINTER(Nrrd), POINTER(POINTER(Nrrd)), c_uint, c_uint, c_int]
nrrdReshape_va = libteem.nrrdReshape_va
nrrdReshape_va.restype = c_int
nrrdReshape_va.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_uint]
nrrdReshape_nva = libteem.nrrdReshape_nva
nrrdReshape_nva.restype = c_int
nrrdReshape_nva.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_uint, POINTER(c_size_t)]
nrrdAxesSplit = libteem.nrrdAxesSplit
nrrdAxesSplit.restype = c_int
nrrdAxesSplit.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_uint, c_size_t, c_size_t]
nrrdAxesDelete = libteem.nrrdAxesDelete
nrrdAxesDelete.restype = c_int
nrrdAxesDelete.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_uint]
nrrdAxesMerge = libteem.nrrdAxesMerge
nrrdAxesMerge.restype = c_int
nrrdAxesMerge.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_uint]
nrrdBlock = libteem.nrrdBlock
nrrdBlock.restype = c_int
nrrdBlock.argtypes = [POINTER(Nrrd), POINTER(Nrrd)]
nrrdUnblock = libteem.nrrdUnblock
nrrdUnblock.restype = c_int
nrrdUnblock.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_int]
nrrdTile2D = libteem.nrrdTile2D
nrrdTile2D.restype = c_int
nrrdTile2D.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_uint, c_uint, c_uint, c_size_t, c_size_t]
nrrdUntile2D = libteem.nrrdUntile2D
nrrdUntile2D.restype = c_int
nrrdUntile2D.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_uint, c_uint, c_uint, c_size_t, c_size_t]
nrrdHestNrrd = (POINTER(hestCB)).in_dll(libteem, 'nrrdHestNrrd')
nrrdHestKernelSpec = (POINTER(hestCB)).in_dll(libteem, 'nrrdHestKernelSpec')
nrrdHestBoundarySpec = (POINTER(hestCB)).in_dll(libteem, 'nrrdHestBoundarySpec')
nrrdHestIter = (POINTER(hestCB)).in_dll(libteem, 'nrrdHestIter')
class NrrdIter(Structure):
pass
nrrdIterNew = libteem.nrrdIterNew
nrrdIterNew.restype = POINTER(NrrdIter)
nrrdIterNew.argtypes = []
nrrdIterSetValue = libteem.nrrdIterSetValue
nrrdIterSetValue.restype = None
nrrdIterSetValue.argtypes = [POINTER(NrrdIter), c_double]
nrrdIterSetNrrd = libteem.nrrdIterSetNrrd
nrrdIterSetNrrd.restype = None
nrrdIterSetNrrd.argtypes = [POINTER(NrrdIter), POINTER(Nrrd)]
nrrdIterSetOwnNrrd = libteem.nrrdIterSetOwnNrrd
nrrdIterSetOwnNrrd.restype = None
nrrdIterSetOwnNrrd.argtypes = [POINTER(NrrdIter), POINTER(Nrrd)]
nrrdIterValue = libteem.nrrdIterValue
nrrdIterValue.restype = c_double
nrrdIterValue.argtypes = [POINTER(NrrdIter)]
nrrdIterContent = libteem.nrrdIterContent
nrrdIterContent.restype = STRING
nrrdIterContent.argtypes = [POINTER(NrrdIter)]
nrrdIterNix = libteem.nrrdIterNix
nrrdIterNix.restype = POINTER(NrrdIter)
nrrdIterNix.argtypes = [POINTER(NrrdIter)]
class NrrdRange(Structure):
pass
nrrdRangeNew = libteem.nrrdRangeNew
nrrdRangeNew.restype = POINTER(NrrdRange)
nrrdRangeNew.argtypes = [c_double, c_double]
NrrdRange._pack_ = 4
NrrdRange._fields_ = [
('min', c_double),
('max', c_double),
('hasNonExist', c_int),
]
nrrdRangeCopy = libteem.nrrdRangeCopy
nrrdRangeCopy.restype = POINTER(NrrdRange)
nrrdRangeCopy.argtypes = [POINTER(NrrdRange)]
nrrdRangeNix = libteem.nrrdRangeNix
nrrdRangeNix.restype = POINTER(NrrdRange)
nrrdRangeNix.argtypes = [POINTER(NrrdRange)]
nrrdRangeReset = libteem.nrrdRangeReset
nrrdRangeReset.restype = None
nrrdRangeReset.argtypes = [POINTER(NrrdRange)]
nrrdRangeSet = libteem.nrrdRangeSet
nrrdRangeSet.restype = None
nrrdRangeSet.argtypes = [POINTER(NrrdRange), POINTER(Nrrd), c_int]
nrrdRangePercentileSet = libteem.nrrdRangePercentileSet
nrrdRangePercentileSet.restype = c_int
nrrdRangePercentileSet.argtypes = [POINTER(NrrdRange), POINTER(Nrrd), c_double, c_double, c_uint, c_int]
nrrdRangePercentileFromStringSet = libteem.nrrdRangePercentileFromStringSet
nrrdRangePercentileFromStringSet.restype = c_int
nrrdRangePercentileFromStringSet.argtypes = [POINTER(NrrdRange), POINTER(Nrrd), STRING, STRING, c_uint, c_int]
nrrdRangeSafeSet = libteem.nrrdRangeSafeSet
nrrdRangeSafeSet.restype = None
nrrdRangeSafeSet.argtypes = [POINTER(NrrdRange), POINTER(Nrrd), c_int]
nrrdRangeNewSet = libteem.nrrdRangeNewSet
nrrdRangeNewSet.restype = POINTER(NrrdRange)
nrrdRangeNewSet.argtypes = [POINTER(Nrrd), c_int]
nrrdHasNonExist = libteem.nrrdHasNonExist
nrrdHasNonExist.restype = c_int
nrrdHasNonExist.argtypes = [POINTER(Nrrd)]
nrrdFClamp = (CFUNCTYPE(c_float, c_float) * 12).in_dll(libteem, 'nrrdFClamp')
nrrdDClamp = (CFUNCTYPE(c_double, c_double) * 12).in_dll(libteem, 'nrrdDClamp')
nrrdConvert = libteem.nrrdConvert
nrrdConvert.restype = c_int
nrrdConvert.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_int]
nrrdClampConvert = libteem.nrrdClampConvert
nrrdClampConvert.restype = c_int
nrrdClampConvert.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_int]
nrrdCastClampRound = libteem.nrrdCastClampRound
nrrdCastClampRound.restype = c_int
nrrdCastClampRound.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_int, c_int, c_int]
nrrdQuantize = libteem.nrrdQuantize
nrrdQuantize.restype = c_int
nrrdQuantize.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(NrrdRange), c_uint]
nrrdUnquantize = libteem.nrrdUnquantize
nrrdUnquantize.restype = c_int
nrrdUnquantize.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_int]
nrrdHistoEq = libteem.nrrdHistoEq
nrrdHistoEq.restype = c_int
nrrdHistoEq.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(POINTER(Nrrd)), c_uint, c_uint, c_float]
nrrdApply1DLut = libteem.nrrdApply1DLut
nrrdApply1DLut.restype = c_int
nrrdApply1DLut.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(NrrdRange), POINTER(Nrrd), c_int, c_int]
nrrdApplyMulti1DLut = libteem.nrrdApplyMulti1DLut
nrrdApplyMulti1DLut.restype = c_int
nrrdApplyMulti1DLut.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(NrrdRange), POINTER(Nrrd), c_int, c_int]
nrrdApply1DRegMap = libteem.nrrdApply1DRegMap
nrrdApply1DRegMap.restype = c_int
nrrdApply1DRegMap.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(NrrdRange), POINTER(Nrrd), c_int, c_int]
nrrdApplyMulti1DRegMap = libteem.nrrdApplyMulti1DRegMap
nrrdApplyMulti1DRegMap.restype = c_int
nrrdApplyMulti1DRegMap.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(NrrdRange), POINTER(Nrrd), c_int, c_int]
nrrd1DIrregMapCheck = libteem.nrrd1DIrregMapCheck
nrrd1DIrregMapCheck.restype = c_int
nrrd1DIrregMapCheck.argtypes = [POINTER(Nrrd)]
nrrd1DIrregAclGenerate = libteem.nrrd1DIrregAclGenerate
nrrd1DIrregAclGenerate.restype = c_int
nrrd1DIrregAclGenerate.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_size_t]
nrrd1DIrregAclCheck = libteem.nrrd1DIrregAclCheck
nrrd1DIrregAclCheck.restype = c_int
nrrd1DIrregAclCheck.argtypes = [POINTER(Nrrd)]
nrrdApply1DIrregMap = libteem.nrrdApply1DIrregMap
nrrdApply1DIrregMap.restype = c_int
nrrdApply1DIrregMap.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(NrrdRange), POINTER(Nrrd), POINTER(Nrrd), c_int, c_int]
nrrdApply1DSubstitution = libteem.nrrdApply1DSubstitution
nrrdApply1DSubstitution.restype = c_int
nrrdApply1DSubstitution.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd)]
nrrdApply2DLut = libteem.nrrdApply2DLut
nrrdApply2DLut.restype = c_int
nrrdApply2DLut.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_uint, POINTER(NrrdRange), POINTER(NrrdRange), POINTER(Nrrd), c_int, c_int, c_int]
nrrdSlice = libteem.nrrdSlice
nrrdSlice.restype = c_int
nrrdSlice.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_uint, c_size_t]
nrrdCrop = libteem.nrrdCrop
nrrdCrop.restype = c_int
nrrdCrop.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(c_size_t), POINTER(c_size_t)]
nrrdSliceSelect = libteem.nrrdSliceSelect
nrrdSliceSelect.restype = c_int
nrrdSliceSelect.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd), c_uint, POINTER(Nrrd), c_double]
nrrdSample_nva = libteem.nrrdSample_nva
nrrdSample_nva.restype = c_int
nrrdSample_nva.argtypes = [c_void_p, POINTER(Nrrd), POINTER(c_size_t)]
nrrdSample_va = libteem.nrrdSample_va
nrrdSample_va.restype = c_int
nrrdSample_va.argtypes = [c_void_p, POINTER(Nrrd)]
nrrdSimpleCrop = libteem.nrrdSimpleCrop
nrrdSimpleCrop.restype = c_int
nrrdSimpleCrop.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_uint]
nrrdCropAuto = libteem.nrrdCropAuto
nrrdCropAuto.restype = c_int
nrrdCropAuto.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(c_size_t), POINTER(c_size_t), POINTER(c_uint), c_uint, c_int, c_double, c_int]
nrrdSplice = libteem.nrrdSplice
nrrdSplice.restype = c_int
nrrdSplice.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd), c_uint, c_size_t]
nrrdPad_nva = libteem.nrrdPad_nva
nrrdPad_nva.restype = c_int
nrrdPad_nva.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(ptrdiff_t), POINTER(ptrdiff_t), c_int, c_double]
nrrdPad_va = libteem.nrrdPad_va
nrrdPad_va.restype = c_int
nrrdPad_va.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(ptrdiff_t), POINTER(ptrdiff_t), c_int]
nrrdSimplePad_nva = libteem.nrrdSimplePad_nva
nrrdSimplePad_nva.restype = c_int
nrrdSimplePad_nva.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_uint, c_int, c_double]
nrrdSimplePad_va = libteem.nrrdSimplePad_va
nrrdSimplePad_va.restype = c_int
nrrdSimplePad_va.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_uint, c_int]
nrrdInset = libteem.nrrdInset
nrrdInset.restype = c_int
nrrdInset.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd), POINTER(c_size_t)]
nrrdMeasureLine = (CFUNCTYPE(None, c_void_p, c_int, c_void_p, c_int, c_size_t, c_double, c_double) * 31).in_dll(libteem, 'nrrdMeasureLine')
nrrdProject = libteem.nrrdProject
nrrdProject.restype = c_int
nrrdProject.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_uint, c_int, c_int]
nrrdHisto = libteem.nrrdHisto
nrrdHisto.restype = c_int
nrrdHisto.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(NrrdRange), POINTER(Nrrd), c_size_t, c_int]
nrrdHistoCheck = libteem.nrrdHistoCheck
nrrdHistoCheck.restype = c_int
nrrdHistoCheck.argtypes = [POINTER(Nrrd)]
nrrdHistoDraw = libteem.nrrdHistoDraw
nrrdHistoDraw.restype = c_int
nrrdHistoDraw.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_size_t, c_int, c_double]
nrrdHistoAxis = libteem.nrrdHistoAxis
nrrdHistoAxis.restype = c_int
nrrdHistoAxis.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(NrrdRange), c_uint, c_size_t, c_int]
nrrdHistoJoint = libteem.nrrdHistoJoint
nrrdHistoJoint.restype = c_int
nrrdHistoJoint.argtypes = [POINTER(Nrrd), POINTER(POINTER(Nrrd)), POINTER(POINTER(NrrdRange)), c_uint, POINTER(Nrrd), POINTER(c_size_t), c_int, POINTER(c_int)]
nrrdHistoThresholdOtsu = libteem.nrrdHistoThresholdOtsu
nrrdHistoThresholdOtsu.restype = c_int
nrrdHistoThresholdOtsu.argtypes = [POINTER(c_double), POINTER(Nrrd), c_double]
nrrdArithGamma = libteem.nrrdArithGamma
nrrdArithGamma.restype = c_int
nrrdArithGamma.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(NrrdRange), c_double]
nrrdArithUnaryOp = libteem.nrrdArithUnaryOp
nrrdArithUnaryOp.restype = c_int
nrrdArithUnaryOp.argtypes = [POINTER(Nrrd), c_int, POINTER(Nrrd)]
nrrdArithBinaryOp = libteem.nrrdArithBinaryOp
nrrdArithBinaryOp.restype = c_int
nrrdArithBinaryOp.argtypes = [POINTER(Nrrd), c_int, POINTER(Nrrd), POINTER(Nrrd)]
nrrdArithTernaryOp = libteem.nrrdArithTernaryOp
nrrdArithTernaryOp.restype = c_int
nrrdArithTernaryOp.argtypes = [POINTER(Nrrd), c_int, POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd)]
nrrdArithAffine = libteem.nrrdArithAffine
nrrdArithAffine.restype = c_int
nrrdArithAffine.argtypes = [POINTER(Nrrd), c_double, POINTER(Nrrd), c_double, c_double, c_double, c_int]
nrrdArithIterBinaryOp = libteem.nrrdArithIterBinaryOp
nrrdArithIterBinaryOp.restype = c_int
nrrdArithIterBinaryOp.argtypes = [POINTER(Nrrd), c_int, POINTER(NrrdIter), POINTER(NrrdIter)]
nrrdArithIterBinaryOpSelect = libteem.nrrdArithIterBinaryOpSelect
nrrdArithIterBinaryOpSelect.restype = c_int
nrrdArithIterBinaryOpSelect.argtypes = [POINTER(Nrrd), c_int, POINTER(NrrdIter), POINTER(NrrdIter), c_uint]
nrrdArithIterTernaryOp = libteem.nrrdArithIterTernaryOp
nrrdArithIterTernaryOp.restype = c_int
nrrdArithIterTernaryOp.argtypes = [POINTER(Nrrd), c_int, POINTER(NrrdIter), POINTER(NrrdIter), POINTER(NrrdIter)]
nrrdArithIterTernaryOpSelect = libteem.nrrdArithIterTernaryOpSelect
nrrdArithIterTernaryOpSelect.restype = c_int
nrrdArithIterTernaryOpSelect.argtypes = [POINTER(Nrrd), c_int, POINTER(NrrdIter), POINTER(NrrdIter), POINTER(NrrdIter), c_uint]
nrrdArithIterAffine = libteem.nrrdArithIterAffine
nrrdArithIterAffine.restype = c_int
nrrdArithIterAffine.argtypes = [POINTER(Nrrd), POINTER(NrrdIter), POINTER(NrrdIter), POINTER(NrrdIter), POINTER(NrrdIter), POINTER(NrrdIter), c_int]
nrrdCRC32 = libteem.nrrdCRC32
nrrdCRC32.restype = c_uint
nrrdCRC32.argtypes = [POINTER(Nrrd), c_int]
nrrdCheapMedian = libteem.nrrdCheapMedian
nrrdCheapMedian.restype = c_int
nrrdCheapMedian.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_int, c_int, c_uint, c_float, c_uint]
nrrdDistanceL2 = libteem.nrrdDistanceL2
nrrdDistanceL2.restype = c_int
nrrdDistanceL2.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_int, POINTER(c_int), c_double, c_int]
nrrdDistanceL2Biased = libteem.nrrdDistanceL2Biased
nrrdDistanceL2Biased.restype = c_int
nrrdDistanceL2Biased.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_int, POINTER(c_int), c_double, c_double, c_int]
nrrdDistanceL2Signed = libteem.nrrdDistanceL2Signed
nrrdDistanceL2Signed.restype = c_int
nrrdDistanceL2Signed.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_int, POINTER(c_int), c_double, c_int]
class NrrdDeringContext(Structure):
pass
nrrdDeringContextNew = libteem.nrrdDeringContextNew
nrrdDeringContextNew.restype = POINTER(NrrdDeringContext)
nrrdDeringContextNew.argtypes = []
nrrdDeringContextNix = libteem.nrrdDeringContextNix
nrrdDeringContextNix.restype = POINTER(NrrdDeringContext)
nrrdDeringContextNix.argtypes = [POINTER(NrrdDeringContext)]
nrrdDeringVerboseSet = libteem.nrrdDeringVerboseSet
nrrdDeringVerboseSet.restype = c_int
nrrdDeringVerboseSet.argtypes = [POINTER(NrrdDeringContext), c_int]
nrrdDeringLinearInterpSet = libteem.nrrdDeringLinearInterpSet
nrrdDeringLinearInterpSet.restype = c_int
nrrdDeringLinearInterpSet.argtypes = [POINTER(NrrdDeringContext), c_int]
nrrdDeringVerticalSeamSet = libteem.nrrdDeringVerticalSeamSet
nrrdDeringVerticalSeamSet.restype = c_int
nrrdDeringVerticalSeamSet.argtypes = [POINTER(NrrdDeringContext), c_int]
nrrdDeringInputSet = libteem.nrrdDeringInputSet
nrrdDeringInputSet.restype = c_int
nrrdDeringInputSet.argtypes = [POINTER(NrrdDeringContext), POINTER(Nrrd)]
nrrdDeringCenterSet = libteem.nrrdDeringCenterSet
nrrdDeringCenterSet.restype = c_int
nrrdDeringCenterSet.argtypes = [POINTER(NrrdDeringContext), c_double, c_double]
nrrdDeringClampPercSet = libteem.nrrdDeringClampPercSet
nrrdDeringClampPercSet.restype = c_int
nrrdDeringClampPercSet.argtypes = [POINTER(NrrdDeringContext), c_double, c_double]
nrrdDeringClampHistoBinsSet = libteem.nrrdDeringClampHistoBinsSet
nrrdDeringClampHistoBinsSet.restype = c_int
nrrdDeringClampHistoBinsSet.argtypes = [POINTER(NrrdDeringContext), c_uint]
nrrdDeringRadiusScaleSet = libteem.nrrdDeringRadiusScaleSet
nrrdDeringRadiusScaleSet.restype = c_int
nrrdDeringRadiusScaleSet.argtypes = [POINTER(NrrdDeringContext), c_double]
nrrdDeringThetaNumSet = libteem.nrrdDeringThetaNumSet
nrrdDeringThetaNumSet.restype = c_int
nrrdDeringThetaNumSet.argtypes = [POINTER(NrrdDeringContext), c_uint]
nrrdDeringRadialKernelSet = libteem.nrrdDeringRadialKernelSet
nrrdDeringRadialKernelSet.restype = c_int
nrrdDeringRadialKernelSet.argtypes = [POINTER(NrrdDeringContext), POINTER(NrrdKernel), POINTER(c_double)]
nrrdDeringThetaKernelSet = libteem.nrrdDeringThetaKernelSet
nrrdDeringThetaKernelSet.restype = c_int
nrrdDeringThetaKernelSet.argtypes = [POINTER(NrrdDeringContext), POINTER(NrrdKernel), POINTER(c_double)]
nrrdDeringExecute = libteem.nrrdDeringExecute
nrrdDeringExecute.restype = c_int
nrrdDeringExecute.argtypes = [POINTER(NrrdDeringContext), POINTER(Nrrd)]
nrrdResample_t = c_double
class NrrdResampleContext(Structure):
pass
nrrdResampleContextNew = libteem.nrrdResampleContextNew
nrrdResampleContextNew.restype = POINTER(NrrdResampleContext)
nrrdResampleContextNew.argtypes = []
nrrdResampleContextNix = libteem.nrrdResampleContextNix
nrrdResampleContextNix.restype = POINTER(NrrdResampleContext)
nrrdResampleContextNix.argtypes = [POINTER(NrrdResampleContext)]
nrrdResampleDefaultCenterSet = libteem.nrrdResampleDefaultCenterSet
nrrdResampleDefaultCenterSet.restype = c_int
nrrdResampleDefaultCenterSet.argtypes = [POINTER(NrrdResampleContext), c_int]
nrrdResampleNonExistentSet = libteem.nrrdResampleNonExistentSet
nrrdResampleNonExistentSet.restype = c_int
nrrdResampleNonExistentSet.argtypes = [POINTER(NrrdResampleContext), c_int]
nrrdResampleNrrdSet = libteem.nrrdResampleNrrdSet
nrrdResampleNrrdSet.restype = c_int
nrrdResampleNrrdSet.argtypes = [POINTER(NrrdResampleContext), POINTER(Nrrd)]
nrrdResampleInputSet = libteem.nrrdResampleInputSet
nrrdResampleInputSet.restype = c_int
nrrdResampleInputSet.argtypes = [POINTER(NrrdResampleContext), POINTER(Nrrd)]
nrrdResampleKernelSet = libteem.nrrdResampleKernelSet
nrrdResampleKernelSet.restype = c_int
nrrdResampleKernelSet.argtypes = [POINTER(NrrdResampleContext), c_uint, POINTER(NrrdKernel), POINTER(c_double)]
nrrdResampleSamplesSet = libteem.nrrdResampleSamplesSet
nrrdResampleSamplesSet.restype = c_int
nrrdResampleSamplesSet.argtypes = [POINTER(NrrdResampleContext), c_uint, c_size_t]
nrrdResampleRangeSet = libteem.nrrdResampleRangeSet
nrrdResampleRangeSet.restype = c_int
nrrdResampleRangeSet.argtypes = [POINTER(NrrdResampleContext), c_uint, c_double, c_double]
nrrdResampleOverrideCenterSet = libteem.nrrdResampleOverrideCenterSet
nrrdResampleOverrideCenterSet.restype = c_int
nrrdResampleOverrideCenterSet.argtypes = [POINTER(NrrdResampleContext), c_uint, c_int]
nrrdResampleRangeFullSet = libteem.nrrdResampleRangeFullSet
nrrdResampleRangeFullSet.restype = c_int
nrrdResampleRangeFullSet.argtypes = [POINTER(NrrdResampleContext), c_uint]
nrrdResampleBoundarySet = libteem.nrrdResampleBoundarySet
nrrdResampleBoundarySet.restype = c_int
nrrdResampleBoundarySet.argtypes = [POINTER(NrrdResampleContext), c_int]
nrrdResamplePadValueSet = libteem.nrrdResamplePadValueSet
nrrdResamplePadValueSet.restype = c_int
nrrdResamplePadValueSet.argtypes = [POINTER(NrrdResampleContext), c_double]
nrrdResampleBoundarySpecSet = libteem.nrrdResampleBoundarySpecSet
nrrdResampleBoundarySpecSet.restype = c_int
nrrdResampleBoundarySpecSet.argtypes = [POINTER(NrrdResampleContext), POINTER(NrrdBoundarySpec)]
nrrdResampleTypeOutSet = libteem.nrrdResampleTypeOutSet
nrrdResampleTypeOutSet.restype = c_int
nrrdResampleTypeOutSet.argtypes = [POINTER(NrrdResampleContext), c_int]
nrrdResampleRenormalizeSet = libteem.nrrdResampleRenormalizeSet
nrrdResampleRenormalizeSet.restype = c_int
nrrdResampleRenormalizeSet.argtypes = [POINTER(NrrdResampleContext), c_int]
nrrdResampleRoundSet = libteem.nrrdResampleRoundSet
nrrdResampleRoundSet.restype = c_int
nrrdResampleRoundSet.argtypes = [POINTER(NrrdResampleContext), c_int]
nrrdResampleClampSet = libteem.nrrdResampleClampSet
nrrdResampleClampSet.restype = c_int
nrrdResampleClampSet.argtypes = [POINTER(NrrdResampleContext), c_int]
nrrdResampleExecute = libteem.nrrdResampleExecute
nrrdResampleExecute.restype = c_int
nrrdResampleExecute.argtypes = [POINTER(NrrdResampleContext), POINTER(Nrrd)]
NrrdResampleInfo._pack_ = 4
NrrdResampleInfo._fields_ = [
('kernel', POINTER(NrrdKernel) * 16),
('samples', c_size_t * 16),
('parm', c_double * 8 * 16),
('min', c_double * 16),
('max', c_double * 16),
('boundary', c_int),
('type', c_int),
('renormalize', c_int),
('round', c_int),
('clamp', c_int),
('cheap', c_int),
('padValue', c_double),
]
nrrdSpatialResample = libteem.nrrdSpatialResample
nrrdSpatialResample.restype = c_int
nrrdSpatialResample.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(NrrdResampleInfo)]
nrrdSimpleResample = libteem.nrrdSimpleResample
nrrdSimpleResample.restype = c_int
nrrdSimpleResample.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(NrrdKernel), POINTER(c_double), POINTER(c_size_t), POINTER(c_double)]
nrrdCCValid = libteem.nrrdCCValid
nrrdCCValid.restype = c_int
nrrdCCValid.argtypes = [POINTER(Nrrd)]
nrrdCCSize = libteem.nrrdCCSize
nrrdCCSize.restype = c_uint
nrrdCCSize.argtypes = [POINTER(Nrrd), POINTER(Nrrd)]
nrrdCCMax = libteem.nrrdCCMax
nrrdCCMax.restype = c_uint
nrrdCCMax.argtypes = [POINTER(Nrrd)]
nrrdCCNum = libteem.nrrdCCNum
nrrdCCNum.restype = c_uint
nrrdCCNum.argtypes = [POINTER(Nrrd)]
nrrdCCFind = libteem.nrrdCCFind
nrrdCCFind.restype = c_int
nrrdCCFind.argtypes = [POINTER(Nrrd), POINTER(POINTER(Nrrd)), POINTER(Nrrd), c_int, c_uint]
nrrdCCAdjacency = libteem.nrrdCCAdjacency
nrrdCCAdjacency.restype = c_int
nrrdCCAdjacency.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_uint]
nrrdCCMerge = libteem.nrrdCCMerge
nrrdCCMerge.restype = c_int
nrrdCCMerge.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd), c_int, c_uint, c_uint, c_uint]
nrrdCCRevalue = libteem.nrrdCCRevalue
nrrdCCRevalue.restype = c_int
nrrdCCRevalue.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd)]
nrrdCCSettle = libteem.nrrdCCSettle
nrrdCCSettle.restype = c_int
nrrdCCSettle.argtypes = [POINTER(Nrrd), POINTER(POINTER(Nrrd)), POINTER(Nrrd)]
nrrdFFTWEnabled = (c_int).in_dll(libteem, 'nrrdFFTWEnabled')
nrrdFFTWWisdomRead = libteem.nrrdFFTWWisdomRead
nrrdFFTWWisdomRead.restype = c_int
nrrdFFTWWisdomRead.argtypes = [POINTER(FILE)]
nrrdFFT = libteem.nrrdFFT
nrrdFFT.restype = c_int
nrrdFFT.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(c_uint), c_uint, c_int, c_int, c_int]
nrrdFFTWWisdomWrite = libteem.nrrdFFTWWisdomWrite
nrrdFFTWWisdomWrite.restype = c_int
nrrdFFTWWisdomWrite.argtypes = [POINTER(FILE)]
nrrdKernelTMF = (POINTER(NrrdKernel) * 5 * 5 * 4).in_dll(libteem, 'nrrdKernelTMF')
nrrdKernelTMF_maxD = (c_uint).in_dll(libteem, 'nrrdKernelTMF_maxD')
nrrdKernelTMF_maxC = (c_uint).in_dll(libteem, 'nrrdKernelTMF_maxC')
nrrdKernelTMF_maxA = (c_uint).in_dll(libteem, 'nrrdKernelTMF_maxA')
nrrdKernelHann = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelHann')
nrrdKernelHannD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelHannD')
nrrdKernelHannDD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelHannDD')
nrrdKernelBlackman = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBlackman')
nrrdKernelBlackmanD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBlackmanD')
nrrdKernelBlackmanDD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBlackmanDD')
nrrdKernelBSpline1 = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline1')
nrrdKernelBSpline1D = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline1D')
nrrdKernelBSpline2 = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline2')
nrrdKernelBSpline2D = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline2D')
nrrdKernelBSpline2DD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline2DD')
nrrdKernelBSpline3 = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline3')
nrrdKernelBSpline3D = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline3D')
nrrdKernelBSpline3DD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline3DD')
nrrdKernelBSpline3DDD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline3DDD')
nrrdKernelBSpline3ApproxInverse = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline3ApproxInverse')
nrrdKernelBSpline4 = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline4')
nrrdKernelBSpline4D = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline4D')
nrrdKernelBSpline4DD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline4DD')
nrrdKernelBSpline4DDD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline4DDD')
nrrdKernelBSpline5 = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline5')
nrrdKernelBSpline5D = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline5D')
nrrdKernelBSpline5DD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline5DD')
nrrdKernelBSpline5DDD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline5DDD')
nrrdKernelBSpline5ApproxInverse = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline5ApproxInverse')
nrrdKernelBSpline6 = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline6')
nrrdKernelBSpline6D = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline6D')
nrrdKernelBSpline6DD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline6DD')
nrrdKernelBSpline6DDD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline6DDD')
nrrdKernelBSpline7 = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline7')
nrrdKernelBSpline7D = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline7D')
nrrdKernelBSpline7DD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline7DD')
nrrdKernelBSpline7DDD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline7DDD')
nrrdKernelBSpline7ApproxInverse = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBSpline7ApproxInverse')
nrrdKernelZero = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelZero')
nrrdKernelBox = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBox')
nrrdKernelBoxSupportDebug = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBoxSupportDebug')
nrrdKernelCos4SupportDebug = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelCos4SupportDebug')
nrrdKernelCos4SupportDebugD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelCos4SupportDebugD')
nrrdKernelCos4SupportDebugDD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelCos4SupportDebugDD')
nrrdKernelCos4SupportDebugDDD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelCos4SupportDebugDDD')
nrrdKernelCatmullRomSupportDebug = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelCatmullRomSupportDebug')
nrrdKernelCatmullRomSupportDebugD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelCatmullRomSupportDebugD')
nrrdKernelCatmullRomSupportDebugDD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelCatmullRomSupportDebugDD')
nrrdKernelCheap = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelCheap')
nrrdKernelHermiteScaleSpaceFlag = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelHermiteScaleSpaceFlag')
nrrdKernelTent = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelTent')
nrrdKernelForwDiff = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelForwDiff')
nrrdKernelCentDiff = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelCentDiff')
nrrdKernelBCCubic = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBCCubic')
nrrdKernelBCCubicD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBCCubicD')
nrrdKernelBCCubicDD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelBCCubicDD')
nrrdKernelCatmullRom = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelCatmullRom')
nrrdKernelCatmullRomD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelCatmullRomD')
nrrdKernelCatmullRomDD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelCatmullRomDD')
nrrdKernelAQuartic = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelAQuartic')
nrrdKernelAQuarticD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelAQuarticD')
nrrdKernelAQuarticDD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelAQuarticDD')
nrrdKernelC3Quintic = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelC3Quintic')
nrrdKernelC3QuinticD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelC3QuinticD')
nrrdKernelC3QuinticDD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelC3QuinticDD')
nrrdKernelC4Hexic = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelC4Hexic')
nrrdKernelC4HexicD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelC4HexicD')
nrrdKernelC4HexicDD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelC4HexicDD')
nrrdKernelC4HexicDDD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelC4HexicDDD')
nrrdKernelC4HexicApproxInverse = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelC4HexicApproxInverse')
nrrdKernelC5Septic = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelC5Septic')
nrrdKernelC5SepticD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelC5SepticD')
nrrdKernelC5SepticDD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelC5SepticDD')
nrrdKernelC5SepticDDD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelC5SepticDDD')
nrrdKernelC5SepticApproxInverse = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelC5SepticApproxInverse')
nrrdKernelGaussian = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelGaussian')
nrrdKernelGaussianD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelGaussianD')
nrrdKernelGaussianDD = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelGaussianDD')
nrrdKernelDiscreteGaussian = (POINTER(NrrdKernel)).in_dll(libteem, 'nrrdKernelDiscreteGaussian')
nrrdKernelDiscreteGaussianGoodSigmaMax = (c_double).in_dll(libteem, 'nrrdKernelDiscreteGaussianGoodSigmaMax')
nrrdKernelParse = libteem.nrrdKernelParse
nrrdKernelParse.restype = c_int
nrrdKernelParse.argtypes = [POINTER(POINTER(NrrdKernel)), POINTER(c_double), STRING]
nrrdKernelSpecParse = libteem.nrrdKernelSpecParse
nrrdKernelSpecParse.restype = c_int
nrrdKernelSpecParse.argtypes = [POINTER(NrrdKernelSpec), STRING]
nrrdKernelSpecSprint = libteem.nrrdKernelSpecSprint
nrrdKernelSpecSprint.restype = c_int
nrrdKernelSpecSprint.argtypes = [STRING, POINTER(NrrdKernelSpec)]
nrrdKernelSprint = libteem.nrrdKernelSprint
nrrdKernelSprint.restype = c_int
nrrdKernelSprint.argtypes = [STRING, POINTER(NrrdKernel), POINTER(c_double)]
nrrdKernelCompare = libteem.nrrdKernelCompare
nrrdKernelCompare.restype = c_int
nrrdKernelCompare.argtypes = [POINTER(NrrdKernel), POINTER(c_double), POINTER(NrrdKernel), POINTER(c_double), POINTER(c_int), STRING]
nrrdKernelSpecCompare = libteem.nrrdKernelSpecCompare
nrrdKernelSpecCompare.restype = c_int
nrrdKernelSpecCompare.argtypes = [POINTER(NrrdKernelSpec), POINTER(NrrdKernelSpec), POINTER(c_int), STRING]
nrrdKernelCheck = libteem.nrrdKernelCheck
nrrdKernelCheck.restype = c_int
nrrdKernelCheck.argtypes = [POINTER(NrrdKernel), POINTER(c_double), c_size_t, c_double, c_uint, c_uint, POINTER(NrrdKernel), POINTER(c_double)]
class pullInfoSpec_t(Structure):
pass
pullInfoSpec_t._pack_ = 4
pullInfoSpec_t._fields_ = [
('info', c_int),
('source', c_int),
('volName', STRING),
('item', c_int),
('prop', c_int),
('scale', c_double),
('zero', c_double),
('constraint', c_int),
('volIdx', c_uint),
]
pullInfoSpec = pullInfoSpec_t
class pullPoint_t(Structure):
pass
pullPoint_t._pack_ = 4
pullPoint_t._fields_ = [
('idtag', c_uint),
('idCC', c_uint),
('neighPoint', POINTER(POINTER(pullPoint_t))),
('neighPointNum', c_uint),
('neighPointArr', POINTER(airArray)),
('neighDistMean', c_double),
('neighCovar', c_float * 10),
('neighTanCovar', c_float * 6),
('stability', c_float),
('neighInterNum', c_uint),
('stuckIterNum', c_uint),
('status', c_int),
('pos', c_double * 4),
('energy', c_double),
('force', c_double * 4),
('stepEnergy', c_double),
('stepConstr', c_double),
('info', c_double * 1),
]
pullPoint = pullPoint_t
class pullBin_t(Structure):
pass
pullBin_t._fields_ = [
('point', POINTER(POINTER(pullPoint))),
('pointNum', c_uint),
('pointArr', POINTER(airArray)),
('neighBin', POINTER(POINTER(pullBin_t))),
]
pullBin = pullBin_t
class pullEnergy(Structure):
pass
pullEnergy._fields_ = [
('name', c_char * 129),
('parmNum', c_uint),
('well', CFUNCTYPE(c_double, POINTER(c_double), POINTER(c_double))),
('eval', CFUNCTYPE(c_double, POINTER(c_double), c_double, POINTER(c_double))),
]
class pullEnergySpec(Structure):
pass
pullEnergySpec._pack_ = 4
pullEnergySpec._fields_ = [
('energy', POINTER(pullEnergy)),
('parm', c_double * 3),
]
class pullVolume(Structure):
pass
pullVolume._pack_ = 4
pullVolume._fields_ = [
('verbose', c_int),
('name', STRING),
('kind', POINTER(gageKind)),
('ninSingle', POINTER(Nrrd)),
('ninScale', POINTER(POINTER(Nrrd))),
('scaleNum', c_uint),
('scalePos', POINTER(c_double)),
('scaleDerivNorm', c_int),
('scaleDerivNormBias', c_double),
('ksp00', POINTER(NrrdKernelSpec)),
('ksp11', POINTER(NrrdKernelSpec)),
('ksp22', POINTER(NrrdKernelSpec)),
('kspSS', POINTER(NrrdKernelSpec)),
('pullValQuery', gageQuery),
('gctx', POINTER(gageContext)),
('gpvl', POINTER(gagePerVolume)),
('gpvlSS', POINTER(POINTER(gagePerVolume))),
('seedOnly', c_int),
('forSeedPreThresh', c_int),
]
class pullTask_t(Structure):
pass
pullTask_t._fields_ = [
('pctx', POINTER(pullContext_t)),
('vol', POINTER(pullVolume) * 4),
('ans', POINTER(c_double) * 24),
('processMode', c_int),
('probeSeedPreThreshOnly', c_int),
('thread', POINTER(airThread)),
('threadIdx', c_uint),
('rng', POINTER(airRandMTState)),
('pointBuffer', POINTER(pullPoint)),
('neighPoint', POINTER(POINTER(pullPoint))),
('addPoint', POINTER(POINTER(pullPoint))),
('addPointNum', c_uint),
('addPointArr', POINTER(airArray)),
('nixPoint', POINTER(POINTER(pullPoint))),
('nixPointNum', c_uint),
('nixPointArr', POINTER(airArray)),
('returnPtr', c_void_p),
('stuckNum', c_uint),
]
pullTask = pullTask_t
class pullInitParm(Structure):
pass
pullInitParm._pack_ = 4
pullInitParm._fields_ = [
('method', c_int),
('liveThreshUse', c_int),
('unequalShapesAllow', c_int),
('jitter', c_double),
('numInitial', c_uint),
('haltonStartIndex', c_uint),
('samplesAlongScaleNum', c_uint),
('ppvZRange', c_uint * 2),
('pointPerVoxel', c_int),
('npos', POINTER(Nrrd)),
]
class pullIterParm(Structure):
pass
pullIterParm._fields_ = [
('min', c_uint),
('max', c_uint),
('popCntlPeriod', c_uint),
('addDescent', c_uint),
('constraintMax', c_uint),
('stuckMax', c_uint),
('callback', c_uint),
('snap', c_uint),
('energyIncreasePermitHalfLife', c_uint),
]
class pullSysParm(Structure):
pass
pullSysParm._pack_ = 4
pullSysParm._fields_ = [
('alpha', c_double),
('beta', c_double),
('gamma', c_double),
('separableGammaLearnRescale', c_double),
('theta', c_double),
('wall', c_double),
('radiusSpace', c_double),
('radiusScale', c_double),
('binWidthSpace', c_double),
('neighborTrueProb', c_double),
('probeProb', c_double),
('stepInitial', c_double),
('opporStepScale', c_double),
('backStepScale', c_double),
('constraintStepMin', c_double),
('energyDecreaseMin', c_double),
('energyDecreasePopCntlMin', c_double),
('energyIncreasePermit', c_double),
('fracNeighNixedMax', c_double),
]
class pullFlag(Structure):
pass
pullFlag._fields_ = [
('permuteOnRebin', c_int),
('noPopCntlWithZeroAlpha', c_int),
('useBetaForGammaLearn', c_int),
('restrictiveAddToBins', c_int),
('energyFromStrength', c_int),
('nixAtVolumeEdgeSpace', c_int),
('constraintBeforeSeedThresh', c_int),
('popCntlEnoughTest', c_int),
('convergenceIgnoresPopCntl', c_int),
('noAdd', c_int),
('binSingle', c_int),
('allowCodimension3Constraints', c_int),
('scaleIsTau', c_int),
('startSkipsPoints', c_int),
('zeroZ', c_int),
]
pullContext_t._pack_ = 4
pullContext_t._fields_ = [
('initParm', pullInitParm),
('iterParm', pullIterParm),
('sysParm', pullSysParm),
('flag', pullFlag),
('verbose', c_int),
('threadNum', c_uint),
('rngSeed', c_uint),
('progressBinMod', c_uint),
('iter_cb', CFUNCTYPE(None, c_void_p)),
('data_cb', c_void_p),
('vol', POINTER(pullVolume) * 4),
('volNum', c_uint),
('ispec', POINTER(pullInfoSpec) * 24),
('interType', c_int),
('energySpecR', POINTER(pullEnergySpec)),
('energySpecS', POINTER(pullEnergySpec)),
('energySpecWin', POINTER(pullEnergySpec)),
('haltonOffset', c_uint),
('bboxMin', c_double * 4),
('bboxMax', c_double * 4),
('infoTotalLen', c_uint),
('infoIdx', c_uint * 24),
('idtagNext', c_uint),
('haveScale', c_int),
('constraint', c_int),
('constraintDim', c_int),
('targetDim', c_int),
('finished', c_int),
('maxDistSpace', c_double),
('maxDistScale', c_double),
('voxelSizeSpace', c_double),
('voxelSizeScale', c_double),
('eipScale', c_double),
('bin', POINTER(pullBin)),
('binsEdge', c_uint * 4),
('binNum', c_uint),
('binNextIdx', c_uint),
('tmpPointPerm', POINTER(c_uint)),
('tmpPointPtr', POINTER(POINTER(pullPoint))),
('tmpPointNum', c_uint),
('binMutex', POINTER(airThreadMutex)),
('task', POINTER(POINTER(pullTask))),
('iterBarrierA', POINTER(airThreadBarrier)),
('iterBarrierB', POINTER(airThreadBarrier)),
('logAdd', POINTER(FILE)),
('timeIteration', c_double),
('timeRun', c_double),
('energy', c_double),
('addNum', c_uint),
('nixNum', c_uint),
('stuckNum', c_uint),
('pointNum', c_uint),
('CCNum', c_uint),
('iter', c_uint),
('count', c_uint * 15),
]
class pullTrace(Structure):
pass
pullTrace._pack_ = 4
pullTrace._fields_ = [
('seedPos', c_double * 4),
('nvert', POINTER(Nrrd)),
('nstrn', POINTER(Nrrd)),
('nvelo', POINTER(Nrrd)),
('seedIdx', c_uint),
('whyStop', c_int * 2),
('whyNowhere', c_int),
]
class pullTraceMulti(Structure):
pass
pullTraceMulti._fields_ = [
('trace', POINTER(POINTER(pullTrace))),
('traceNum', c_uint),
('traceArr', POINTER(airArray)),
]
class pullPtrPtrUnion(Union):
pass
pullPtrPtrUnion._fields_ = [
('points', POINTER(POINTER(POINTER(pullPoint)))),
('v', POINTER(c_void_p)),
]
pullPresent = (c_int).in_dll(libteem, 'pullPresent')
pullPhistEnabled = (c_int).in_dll(libteem, 'pullPhistEnabled')
pullBiffKey = (STRING).in_dll(libteem, 'pullBiffKey')
pullInitRandomSet = libteem.pullInitRandomSet
pullInitRandomSet.restype = c_int
pullInitRandomSet.argtypes = [POINTER(pullContext), c_uint]
pullInitHaltonSet = libteem.pullInitHaltonSet
pullInitHaltonSet.restype = c_int
pullInitHaltonSet.argtypes = [POINTER(pullContext), c_uint, c_uint]
pullInitPointPerVoxelSet = libteem.pullInitPointPerVoxelSet
pullInitPointPerVoxelSet.restype = c_int
pullInitPointPerVoxelSet.argtypes = [POINTER(pullContext), c_int, c_uint, c_uint, c_uint, c_double]
pullInitGivenPosSet = libteem.pullInitGivenPosSet
pullInitGivenPosSet.restype = c_int
pullInitGivenPosSet.argtypes = [POINTER(pullContext), POINTER(Nrrd)]
pullInitLiveThreshUseSet = libteem.pullInitLiveThreshUseSet
pullInitLiveThreshUseSet.restype = c_int
pullInitLiveThreshUseSet.argtypes = [POINTER(pullContext), c_int]
pullInitUnequalShapesAllowSet = libteem.pullInitUnequalShapesAllowSet
pullInitUnequalShapesAllowSet.restype = c_int
pullInitUnequalShapesAllowSet.argtypes = [POINTER(pullContext), c_int]
pullIterParmSet = libteem.pullIterParmSet
pullIterParmSet.restype = c_int
pullIterParmSet.argtypes = [POINTER(pullContext), c_int, c_uint]
pullSysParmSet = libteem.pullSysParmSet
pullSysParmSet.restype = c_int
pullSysParmSet.argtypes = [POINTER(pullContext), c_int, c_double]
pullFlagSet = libteem.pullFlagSet
pullFlagSet.restype = c_int
pullFlagSet.argtypes = [POINTER(pullContext), c_int, c_int]
pullVerboseSet = libteem.pullVerboseSet
pullVerboseSet.restype = c_int
pullVerboseSet.argtypes = [POINTER(pullContext), c_int]
pullThreadNumSet = libteem.pullThreadNumSet
pullThreadNumSet.restype = c_int
pullThreadNumSet.argtypes = [POINTER(pullContext), c_uint]
pullRngSeedSet = libteem.pullRngSeedSet
pullRngSeedSet.restype = c_int
pullRngSeedSet.argtypes = [POINTER(pullContext), c_uint]
pullProgressBinModSet = libteem.pullProgressBinModSet
pullProgressBinModSet.restype = c_int
pullProgressBinModSet.argtypes = [POINTER(pullContext), c_uint]
pullCallbackSet = libteem.pullCallbackSet
pullCallbackSet.restype = c_int
pullCallbackSet.argtypes = [POINTER(pullContext), CFUNCTYPE(None, c_void_p), c_void_p]
pullInterEnergySet = libteem.pullInterEnergySet
pullInterEnergySet.restype = c_int
pullInterEnergySet.argtypes = [POINTER(pullContext), c_int, POINTER(pullEnergySpec), POINTER(pullEnergySpec), POINTER(pullEnergySpec)]
pullLogAddSet = libteem.pullLogAddSet
pullLogAddSet.restype = c_int
pullLogAddSet.argtypes = [POINTER(pullContext), POINTER(FILE)]
pullInterType = (POINTER(airEnum)).in_dll(libteem, 'pullInterType')
pullEnergyType = (POINTER(airEnum)).in_dll(libteem, 'pullEnergyType')
pullEnergyUnknown = (POINTER(pullEnergy)).in_dll(libteem, 'pullEnergyUnknown')
pullEnergySpring = (POINTER(pullEnergy)).in_dll(libteem, 'pullEnergySpring')
pullEnergyGauss = (POINTER(pullEnergy)).in_dll(libteem, 'pullEnergyGauss')
pullEnergyBspln = (POINTER(pullEnergy)).in_dll(libteem, 'pullEnergyBspln')
pullEnergyButterworth = (POINTER(pullEnergy)).in_dll(libteem, 'pullEnergyButterworth')
pullEnergyCotan = (POINTER(pullEnergy)).in_dll(libteem, 'pullEnergyCotan')
pullEnergyCubic = (POINTER(pullEnergy)).in_dll(libteem, 'pullEnergyCubic')
pullEnergyQuartic = (POINTER(pullEnergy)).in_dll(libteem, 'pullEnergyQuartic')
pullEnergyCubicWell = (POINTER(pullEnergy)).in_dll(libteem, 'pullEnergyCubicWell')
pullEnergyBetterCubicWell = (POINTER(pullEnergy)).in_dll(libteem, 'pullEnergyBetterCubicWell')
pullEnergyQuarticWell = (POINTER(pullEnergy)).in_dll(libteem, 'pullEnergyQuarticWell')
pullEnergyHepticWell = (POINTER(pullEnergy)).in_dll(libteem, 'pullEnergyHepticWell')
pullEnergyZero = (POINTER(pullEnergy)).in_dll(libteem, 'pullEnergyZero')
pullEnergyButterworthParabola = (POINTER(pullEnergy)).in_dll(libteem, 'pullEnergyButterworthParabola')
pullEnergyAll = (POINTER(pullEnergy) * 14).in_dll(libteem, 'pullEnergyAll')
pullEnergySpecNew = libteem.pullEnergySpecNew
pullEnergySpecNew.restype = POINTER(pullEnergySpec)
pullEnergySpecNew.argtypes = []
pullEnergySpecSet = libteem.pullEnergySpecSet
pullEnergySpecSet.restype = None
pullEnergySpecSet.argtypes = [POINTER(pullEnergySpec), POINTER(pullEnergy), POINTER(c_double)]
pullEnergySpecCopy = libteem.pullEnergySpecCopy
pullEnergySpecCopy.restype = None
pullEnergySpecCopy.argtypes = [POINTER(pullEnergySpec), POINTER(pullEnergySpec)]
pullEnergySpecNix = libteem.pullEnergySpecNix
pullEnergySpecNix.restype = POINTER(pullEnergySpec)
pullEnergySpecNix.argtypes = [POINTER(pullEnergySpec)]
pullEnergySpecParse = libteem.pullEnergySpecParse
pullEnergySpecParse.restype = c_int
pullEnergySpecParse.argtypes = [POINTER(pullEnergySpec), STRING]
pullHestEnergySpec = (POINTER(hestCB)).in_dll(libteem, 'pullHestEnergySpec')
pullVolumeNew = libteem.pullVolumeNew
pullVolumeNew.restype = POINTER(pullVolume)
pullVolumeNew.argtypes = []
pullVolumeNix = libteem.pullVolumeNix
pullVolumeNix.restype = POINTER(pullVolume)
pullVolumeNix.argtypes = [POINTER(pullVolume)]
pullVolumeSingleAdd = libteem.pullVolumeSingleAdd
pullVolumeSingleAdd.restype = c_int
pullVolumeSingleAdd.argtypes = [POINTER(pullContext), POINTER(gageKind), STRING, POINTER(Nrrd), POINTER(NrrdKernelSpec), POINTER(NrrdKernelSpec), POINTER(NrrdKernelSpec)]
pullVolumeStackAdd = libteem.pullVolumeStackAdd
pullVolumeStackAdd.restype = c_int
pullVolumeStackAdd.argtypes = [POINTER(pullContext), POINTER(gageKind), STRING, POINTER(Nrrd), POINTER(POINTER(Nrrd)), POINTER(c_double), c_uint, c_int, c_double, POINTER(NrrdKernelSpec), POINTER(NrrdKernelSpec), POINTER(NrrdKernelSpec), POINTER(NrrdKernelSpec)]
pullVolumeLookup = libteem.pullVolumeLookup
pullVolumeLookup.restype = POINTER(pullVolume)
pullVolumeLookup.argtypes = [POINTER(pullContext), STRING]
pullConstraintScaleRange = libteem.pullConstraintScaleRange
pullConstraintScaleRange.restype = c_int
pullConstraintScaleRange.argtypes = [POINTER(pullContext), POINTER(c_double)]
pullInfo = (POINTER(airEnum)).in_dll(libteem, 'pullInfo')
pullSource = (POINTER(airEnum)).in_dll(libteem, 'pullSource')
pullProp = (POINTER(airEnum)).in_dll(libteem, 'pullProp')
pullProcessMode = (POINTER(airEnum)).in_dll(libteem, 'pullProcessMode')
pullTraceStop = (POINTER(airEnum)).in_dll(libteem, 'pullTraceStop')
pullInitMethod = (POINTER(airEnum)).in_dll(libteem, 'pullInitMethod')
pullCount = (POINTER(airEnum)).in_dll(libteem, 'pullCount')
pullConstraintFail = (POINTER(airEnum)).in_dll(libteem, 'pullConstraintFail')
pullPropLen = libteem.pullPropLen
pullPropLen.restype = c_uint
pullPropLen.argtypes = [c_int]
pullInfoLen = libteem.pullInfoLen
pullInfoLen.restype = c_uint
pullInfoLen.argtypes = [c_int]
pullInfoSpecNew = libteem.pullInfoSpecNew
pullInfoSpecNew.restype = POINTER(pullInfoSpec)
pullInfoSpecNew.argtypes = []
pullInfoSpecNix = libteem.pullInfoSpecNix
pullInfoSpecNix.restype = POINTER(pullInfoSpec)
pullInfoSpecNix.argtypes = [POINTER(pullInfoSpec)]
pullInfoSpecAdd = libteem.pullInfoSpecAdd
pullInfoSpecAdd.restype = c_int
pullInfoSpecAdd.argtypes = [POINTER(pullContext), POINTER(pullInfoSpec)]
pullInfoGet = libteem.pullInfoGet
pullInfoGet.restype = c_int
pullInfoGet.argtypes = [POINTER(Nrrd), c_int, POINTER(pullContext)]
pullInfoSpecSprint = libteem.pullInfoSpecSprint
pullInfoSpecSprint.restype = c_int
pullInfoSpecSprint.argtypes = [STRING, POINTER(pullContext), POINTER(pullInfoSpec)]
pullContextNew = libteem.pullContextNew
pullContextNew.restype = POINTER(pullContext)
pullContextNew.argtypes = []
pullContextNix = libteem.pullContextNix
pullContextNix.restype = POINTER(pullContext)
pullContextNix.argtypes = [POINTER(pullContext)]
pullOutputGet = libteem.pullOutputGet
pullOutputGet.restype = c_int
pullOutputGet.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd), POINTER(c_double), c_double, POINTER(pullContext)]
pullOutputGetFilter = libteem.pullOutputGetFilter
pullOutputGetFilter.restype = c_int
pullOutputGetFilter.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd), POINTER(c_double), c_double, POINTER(pullContext), c_uint, c_uint]
pullPositionHistoryGet = libteem.pullPositionHistoryGet
pullPositionHistoryGet.restype = c_int
pullPositionHistoryGet.argtypes = [POINTER(limnPolyData), POINTER(pullContext)]
pullPropGet = libteem.pullPropGet
pullPropGet.restype = c_int
pullPropGet.argtypes = [POINTER(Nrrd), c_int, POINTER(pullContext)]
pullPointInitializePerVoxel = libteem.pullPointInitializePerVoxel
pullPointInitializePerVoxel.restype = c_int
pullPointInitializePerVoxel.argtypes = [POINTER(pullContext), c_uint, POINTER(pullPoint), POINTER(pullVolume), POINTER(c_int)]
pullPointInitializeRandomOrHalton = libteem.pullPointInitializeRandomOrHalton
pullPointInitializeRandomOrHalton.restype = c_int
pullPointInitializeRandomOrHalton.argtypes = [POINTER(pullContext), c_uint, POINTER(pullPoint), POINTER(pullVolume)]
pullPointInitializeGivenPos = libteem.pullPointInitializeGivenPos
pullPointInitializeGivenPos.restype = c_int
pullPointInitializeGivenPos.argtypes = [POINTER(pullContext), POINTER(c_double), c_uint, POINTER(pullPoint), POINTER(c_int)]
pullPointScalar = libteem.pullPointScalar
pullPointScalar.restype = c_double
pullPointScalar.argtypes = [POINTER(pullContext), POINTER(pullPoint), c_int, POINTER(c_double), POINTER(c_double)]
pullPointNumber = libteem.pullPointNumber
pullPointNumber.restype = c_uint
pullPointNumber.argtypes = [POINTER(pullContext)]
pullPointNumberFilter = libteem.pullPointNumberFilter
pullPointNumberFilter.restype = c_uint
pullPointNumberFilter.argtypes = [POINTER(pullContext), c_uint, c_uint]
pullPointNew = libteem.pullPointNew
pullPointNew.restype = POINTER(pullPoint)
pullPointNew.argtypes = [POINTER(pullContext)]
pullPointNix = libteem.pullPointNix
pullPointNix.restype = POINTER(pullPoint)
pullPointNix.argtypes = [POINTER(pullPoint)]
pullProbe = libteem.pullProbe
pullProbe.restype = c_int
pullProbe.argtypes = [POINTER(pullTask), POINTER(pullPoint)]
pullBinsPointAdd = libteem.pullBinsPointAdd
pullBinsPointAdd.restype = c_int
pullBinsPointAdd.argtypes = [POINTER(pullContext), POINTER(pullPoint), POINTER(POINTER(pullBin))]
pullBinsPointMaybeAdd = libteem.pullBinsPointMaybeAdd
pullBinsPointMaybeAdd.restype = c_int
pullBinsPointMaybeAdd.argtypes = [POINTER(pullContext), POINTER(pullPoint), POINTER(POINTER(pullBin)), POINTER(c_int)]
pullTraceNew = libteem.pullTraceNew
pullTraceNew.restype = POINTER(pullTrace)
pullTraceNew.argtypes = []
pullTraceNix = libteem.pullTraceNix
pullTraceNix.restype = POINTER(pullTrace)
pullTraceNix.argtypes = [POINTER(pullTrace)]
pullTraceMultiSizeof = libteem.pullTraceMultiSizeof
pullTraceMultiSizeof.restype = c_size_t
pullTraceMultiSizeof.argtypes = [POINTER(pullTraceMulti)]
pullTraceSet = libteem.pullTraceSet
pullTraceSet.restype = c_int
pullTraceSet.argtypes = [POINTER(pullContext), POINTER(pullTrace), c_int, c_int, c_double, c_double, c_double, c_uint, POINTER(c_double)]
pullTraceMultiNew = libteem.pullTraceMultiNew
pullTraceMultiNew.restype = POINTER(pullTraceMulti)
pullTraceMultiNew.argtypes = []
pullTraceMultiNix = libteem.pullTraceMultiNix
pullTraceMultiNix.restype = POINTER(pullTraceMulti)
pullTraceMultiNix.argtypes = [POINTER(pullTraceMulti)]
pullTraceMultiAdd = libteem.pullTraceMultiAdd
pullTraceMultiAdd.restype = c_int
pullTraceMultiAdd.argtypes = [POINTER(pullTraceMulti), POINTER(pullTrace), POINTER(c_int)]
pullTraceMultiFilterConcaveDown = libteem.pullTraceMultiFilterConcaveDown
pullTraceMultiFilterConcaveDown.restype = c_int
pullTraceMultiFilterConcaveDown.argtypes = [POINTER(Nrrd), POINTER(pullTraceMulti), c_double]
pullTraceMultiPlotAdd = libteem.pullTraceMultiPlotAdd
pullTraceMultiPlotAdd.restype = c_int
pullTraceMultiPlotAdd.argtypes = [POINTER(Nrrd), POINTER(pullTraceMulti), POINTER(Nrrd), c_int, c_uint, c_uint]
pullTraceMultiWrite = libteem.pullTraceMultiWrite
pullTraceMultiWrite.restype = c_int
pullTraceMultiWrite.argtypes = [POINTER(FILE), POINTER(pullTraceMulti)]
pullTraceMultiRead = libteem.pullTraceMultiRead
pullTraceMultiRead.restype = c_int
pullTraceMultiRead.argtypes = [POINTER(pullTraceMulti), POINTER(FILE)]
pullEnergyPlot = libteem.pullEnergyPlot
pullEnergyPlot.restype = c_int
pullEnergyPlot.argtypes = [POINTER(pullContext), POINTER(Nrrd), c_double, c_double, c_double, c_uint]
pullBinProcess = libteem.pullBinProcess
pullBinProcess.restype = c_int
pullBinProcess.argtypes = [POINTER(pullTask), c_uint]
pullGammaLearn = libteem.pullGammaLearn
pullGammaLearn.restype = c_int
pullGammaLearn.argtypes = [POINTER(pullContext)]
pullStart = libteem.pullStart
pullStart.restype = c_int
pullStart.argtypes = [POINTER(pullContext)]
pullRun = libteem.pullRun
pullRun.restype = c_int
pullRun.argtypes = [POINTER(pullContext)]
pullFinish = libteem.pullFinish
pullFinish.restype = c_int
pullFinish.argtypes = [POINTER(pullContext)]
pullCCFind = libteem.pullCCFind
pullCCFind.restype = c_int
pullCCFind.argtypes = [POINTER(pullContext)]
pullCCMeasure = libteem.pullCCMeasure
pullCCMeasure.restype = c_int
pullCCMeasure.argtypes = [POINTER(pullContext), POINTER(Nrrd), c_int, c_double]
pullCCSort = libteem.pullCCSort
pullCCSort.restype = c_int
pullCCSort.argtypes = [POINTER(pullContext), c_int, c_double]
class pushPoint_t(Structure):
pass
pushPoint_t._pack_ = 4
pushPoint_t._fields_ = [
('ttaagg', c_uint),
('pos', c_double * 3),
('enr', c_double),
('frc', c_double * 3),
('ten', c_double * 7),
('inv', c_double * 7),
('cnt', c_double * 3),
('grav', c_double),
('gravGrad', c_double * 3),
('seedThresh', c_double),
('neigh', POINTER(POINTER(pushPoint_t))),
('neighNum', c_uint),
('neighArr', POINTER(airArray)),
]
pushPoint = pushPoint_t
class pushBin_t(Structure):
pass
pushBin_t._fields_ = [
('pointNum', c_uint),
('point', POINTER(POINTER(pushPoint))),
('pointArr', POINTER(airArray)),
('neighbor', POINTER(POINTER(pushBin_t))),
]
pushBin = pushBin_t
class pushTask_t(Structure):
pass
class pushContext_t(Structure):
pass
pushTask_t._pack_ = 4
pushTask_t._fields_ = [
('pctx', POINTER(pushContext_t)),
('gctx', POINTER(gageContext)),
('tenAns', POINTER(c_double)),
('invAns', POINTER(c_double)),
('cntAns', POINTER(c_double)),
('gravAns', POINTER(c_double)),
('gravGradAns', POINTER(c_double)),
('seedThreshAns', POINTER(c_double)),
('thread', POINTER(airThread)),
('threadIdx', c_uint),
('pointNum', c_uint),
('energySum', c_double),
('deltaFracSum', c_double),
('rng', POINTER(airRandMTState)),
('returnPtr', c_void_p),
]
pushTask = pushTask_t
class pushEnergy(Structure):
pass
pushEnergy._fields_ = [
('name', c_char * 129),
('parmNum', c_uint),
('eval', CFUNCTYPE(None, POINTER(c_double), POINTER(c_double), c_double, POINTER(c_double))),
('support', CFUNCTYPE(c_double, POINTER(c_double))),
]
class pushEnergySpec(Structure):
pass
pushEnergySpec._pack_ = 4
pushEnergySpec._fields_ = [
('energy', POINTER(pushEnergy)),
('parm', c_double * 3),
]
pushContext_t._pack_ = 4
pushContext_t._fields_ = [
('pointNum', c_uint),
('nin', POINTER(Nrrd)),
('npos', POINTER(Nrrd)),
('stepInitial', c_double),
('scale', c_double),
('wall', c_double),
('cntScl', c_double),
('deltaLimit', c_double),
('deltaFracMin', c_double),
('energyStepFrac', c_double),
('deltaFracStepFrac', c_double),
('neighborTrueProb', c_double),
('probeProb', c_double),
('energyImprovMin', c_double),
('detReject', c_int),
('midPntSmp', c_int),
('verbose', c_int),
('seedRNG', c_uint),
('threadNum', c_uint),
('maxIter', c_uint),
('snap', c_uint),
('gravItem', c_int),
('gravGradItem', c_int),
('gravScl', c_double),
('gravZero', c_double),
('seedThreshItem', c_int),
('seedThreshSign', c_int),
('seedThresh', c_double),
('ensp', POINTER(pushEnergySpec)),
('binSingle', c_int),
('binIncr', c_uint),
('ksp00', POINTER(NrrdKernelSpec)),
('ksp11', POINTER(NrrdKernelSpec)),
('ksp22', POINTER(NrrdKernelSpec)),
('ttaagg', c_uint),
('nten', POINTER(Nrrd)),
('ninv', POINTER(Nrrd)),
('nmask', POINTER(Nrrd)),
('gctx', POINTER(gageContext)),
('tpvl', POINTER(gagePerVolume)),
('ipvl', POINTER(gagePerVolume)),
('finished', c_int),
('dimIn', c_uint),
('sliceAxis', c_uint),
('bin', POINTER(pushBin)),
('binsEdge', c_uint * 3),
('binNum', c_uint),
('binIdx', c_uint),
('binMutex', POINTER(airThreadMutex)),
('step', c_double),
('maxDist', c_double),
('maxEval', c_double),
('meanEval', c_double),
('maxDet', c_double),
('energySum', c_double),
('task', POINTER(POINTER(pushTask))),
('iterBarrierA', POINTER(airThreadBarrier)),
('iterBarrierB', POINTER(airThreadBarrier)),
('deltaFrac', c_double),
('timeIteration', c_double),
('timeRun', c_double),
('iter', c_uint),
('noutPos', POINTER(Nrrd)),
('noutTen', POINTER(Nrrd)),
]
pushContext = pushContext_t
class pushPtrPtrUnion(Union):
pass
pushPtrPtrUnion._fields_ = [
('point', POINTER(POINTER(POINTER(pushPoint)))),
('v', POINTER(c_void_p)),
]
pushPresent = (c_int).in_dll(libteem, 'pushPresent')
pushBiffKey = (STRING).in_dll(libteem, 'pushBiffKey')
pushPointNew = libteem.pushPointNew
pushPointNew.restype = POINTER(pushPoint)
pushPointNew.argtypes = [POINTER(pushContext)]
pushPointNix = libteem.pushPointNix
pushPointNix.restype = POINTER(pushPoint)
pushPointNix.argtypes = [POINTER(pushPoint)]
pushContextNew = libteem.pushContextNew
pushContextNew.restype = POINTER(pushContext)
pushContextNew.argtypes = []
pushContextNix = libteem.pushContextNix
pushContextNix.restype = POINTER(pushContext)
pushContextNix.argtypes = [POINTER(pushContext)]
pushEnergyType = (POINTER(airEnum)).in_dll(libteem, 'pushEnergyType')
pushEnergyUnknown = (POINTER(pushEnergy)).in_dll(libteem, 'pushEnergyUnknown')
pushEnergySpring = (POINTER(pushEnergy)).in_dll(libteem, 'pushEnergySpring')
pushEnergyGauss = (POINTER(pushEnergy)).in_dll(libteem, 'pushEnergyGauss')
pushEnergyCoulomb = (POINTER(pushEnergy)).in_dll(libteem, 'pushEnergyCoulomb')
pushEnergyCotan = (POINTER(pushEnergy)).in_dll(libteem, 'pushEnergyCotan')
pushEnergyZero = (POINTER(pushEnergy)).in_dll(libteem, 'pushEnergyZero')
pushEnergyAll = (POINTER(pushEnergy) * 6).in_dll(libteem, 'pushEnergyAll')
pushEnergySpecNew = libteem.pushEnergySpecNew
pushEnergySpecNew.restype = POINTER(pushEnergySpec)
pushEnergySpecNew.argtypes = []
pushEnergySpecSet = libteem.pushEnergySpecSet
pushEnergySpecSet.restype = None
pushEnergySpecSet.argtypes = [POINTER(pushEnergySpec), POINTER(pushEnergy), POINTER(c_double)]
pushEnergySpecNix = libteem.pushEnergySpecNix
pushEnergySpecNix.restype = POINTER(pushEnergySpec)
pushEnergySpecNix.argtypes = [POINTER(pushEnergySpec)]
pushEnergySpecParse = libteem.pushEnergySpecParse
pushEnergySpecParse.restype = c_int
pushEnergySpecParse.argtypes = [POINTER(pushEnergySpec), STRING]
pushHestEnergySpec = (POINTER(hestCB)).in_dll(libteem, 'pushHestEnergySpec')
pushStart = libteem.pushStart
pushStart.restype = c_int
pushStart.argtypes = [POINTER(pushContext)]
pushIterate = libteem.pushIterate
pushIterate.restype = c_int
pushIterate.argtypes = [POINTER(pushContext)]
pushRun = libteem.pushRun
pushRun.restype = c_int
pushRun.argtypes = [POINTER(pushContext)]
pushFinish = libteem.pushFinish
pushFinish.restype = c_int
pushFinish.argtypes = [POINTER(pushContext)]
pushBinInit = libteem.pushBinInit
pushBinInit.restype = None
pushBinInit.argtypes = [POINTER(pushBin), c_uint]
pushBinDone = libteem.pushBinDone
pushBinDone.restype = None
pushBinDone.argtypes = [POINTER(pushBin)]
pushBinPointAdd = libteem.pushBinPointAdd
pushBinPointAdd.restype = c_int
pushBinPointAdd.argtypes = [POINTER(pushContext), POINTER(pushPoint)]
pushBinAllNeighborSet = libteem.pushBinAllNeighborSet
pushBinAllNeighborSet.restype = None
pushBinAllNeighborSet.argtypes = [POINTER(pushContext)]
pushRebin = libteem.pushRebin
pushRebin.restype = c_int
pushRebin.argtypes = [POINTER(pushContext)]
pushBinProcess = libteem.pushBinProcess
pushBinProcess.restype = c_int
pushBinProcess.argtypes = [POINTER(pushTask), c_uint]
pushOutputGet = libteem.pushOutputGet
pushOutputGet.restype = c_int
pushOutputGet.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd), POINTER(pushContext)]
class seekContext(Structure):
pass
seekContext._pack_ = 4
seekContext._fields_ = [
('verbose', c_int),
('ninscl', POINTER(Nrrd)),
('gctx', POINTER(gageContext)),
('pvl', POINTER(gagePerVolume)),
('type', c_int),
('sclvItem', c_int),
('gradItem', c_int),
('normItem', c_int),
('evalItem', c_int),
('evecItem', c_int),
('stngItem', c_int),
('hessItem', c_int),
('lowerInside', c_int),
('normalsFind', c_int),
('strengthUse', c_int),
('strengthSign', c_int),
('isovalue', c_double),
('strength', c_double),
('evalDiffThresh', c_double),
('samples', c_size_t * 3),
('facesPerVoxel', c_double),
('vertsPerVoxel', c_double),
('pldArrIncr', c_uint),
('flag', POINTER(c_int)),
('nin', POINTER(Nrrd)),
('baseDim', c_uint),
('_shape', POINTER(gageShape)),
('shape', POINTER(gageShape)),
('nsclDerived', POINTER(Nrrd)),
('sclvAns', POINTER(c_double)),
('gradAns', POINTER(c_double)),
('normAns', POINTER(c_double)),
('evalAns', POINTER(c_double)),
('evecAns', POINTER(c_double)),
('stngAns', POINTER(c_double)),
('hessAns', POINTER(c_double)),
('reverse', c_int),
('txfNormal', c_double * 9),
('spanSize', c_size_t),
('nspanHist', POINTER(Nrrd)),
('range', POINTER(NrrdRange)),
('sx', c_size_t),
('sy', c_size_t),
('sz', c_size_t),
('txfIdx', c_double * 16),
('vidx', POINTER(c_int)),
('facevidx', POINTER(c_int)),
('sclv', POINTER(c_double)),
('grad', POINTER(c_double)),
('eval', POINTER(c_double)),
('evec', POINTER(c_double)),
('hess', POINTER(c_double)),
('t', POINTER(c_double)),
('edgealpha', POINTER(c_double)),
('edgenorm', POINTER(c_double)),
('edgeicoord', POINTER(c_double)),
('facecoord', POINTER(c_double)),
('facenorm', POINTER(c_double)),
('faceicoord', POINTER(c_double)),
('gradcontext', POINTER(c_double)),
('hesscontext', POINTER(c_double)),
('tcontext', POINTER(c_double)),
('stngcontext', POINTER(c_double)),
('flip', POINTER(c_byte)),
('pairs', POINTER(c_byte)),
('treated', POINTER(c_byte)),
('stng', POINTER(c_double)),
('nvidx', POINTER(Nrrd)),
('nsclv', POINTER(Nrrd)),
('ngrad', POINTER(Nrrd)),
('neval', POINTER(Nrrd)),
('nevec', POINTER(Nrrd)),
('nflip', POINTER(Nrrd)),
('nstng', POINTER(Nrrd)),
('nhess', POINTER(Nrrd)),
('nt', POINTER(Nrrd)),
('nfacevidx', POINTER(Nrrd)),
('nedgealpha', POINTER(Nrrd)),
('nedgenorm', POINTER(Nrrd)),
('nfacecoord', POINTER(Nrrd)),
('nfacenorm', POINTER(Nrrd)),
('npairs', POINTER(Nrrd)),
('nedgeicoord', POINTER(Nrrd)),
('nfaceicoord', POINTER(Nrrd)),
('ngradcontext', POINTER(Nrrd)),
('nhesscontext', POINTER(Nrrd)),
('ntcontext', POINTER(Nrrd)),
('nstngcontext', POINTER(Nrrd)),
('ntreated', POINTER(Nrrd)),
('voxNum', c_uint),
('vertNum', c_uint),
('faceNum', c_uint),
('strengthSeenMax', c_double),
('time', c_double),
]
seekBiffKey = (STRING).in_dll(libteem, 'seekBiffKey')
seekType = (POINTER(airEnum)).in_dll(libteem, 'seekType')
seekContour3DTopoHackEdge = (c_int * 256).in_dll(libteem, 'seekContour3DTopoHackEdge')
seekContour3DTopoHackTriangle = (c_int * 16 * 256).in_dll(libteem, 'seekContour3DTopoHackTriangle')
seekPresent = (c_int).in_dll(libteem, 'seekPresent')
seekContextNew = libteem.seekContextNew
seekContextNew.restype = POINTER(seekContext)
seekContextNew.argtypes = []
seekContextNix = libteem.seekContextNix
seekContextNix.restype = POINTER(seekContext)
seekContextNix.argtypes = [POINTER(seekContext)]
seekVerboseSet = libteem.seekVerboseSet
seekVerboseSet.restype = None
seekVerboseSet.argtypes = [POINTER(seekContext), c_int]
seekDataSet = libteem.seekDataSet
seekDataSet.restype = c_int
seekDataSet.argtypes = [POINTER(seekContext), POINTER(Nrrd), POINTER(gageContext), c_uint]
seekNormalsFindSet = libteem.seekNormalsFindSet
seekNormalsFindSet.restype = c_int
seekNormalsFindSet.argtypes = [POINTER(seekContext), c_int]
seekStrengthUseSet = libteem.seekStrengthUseSet
seekStrengthUseSet.restype = c_int
seekStrengthUseSet.argtypes = [POINTER(seekContext), c_int]
seekStrengthSet = libteem.seekStrengthSet
seekStrengthSet.restype = c_int
seekStrengthSet.argtypes = [POINTER(seekContext), c_int, c_double]
seekSamplesSet = libteem.seekSamplesSet
seekSamplesSet.restype = c_int
seekSamplesSet.argtypes = [POINTER(seekContext), POINTER(c_size_t)]
seekTypeSet = libteem.seekTypeSet
seekTypeSet.restype = c_int
seekTypeSet.argtypes = [POINTER(seekContext), c_int]
seekLowerInsideSet = libteem.seekLowerInsideSet
seekLowerInsideSet.restype = c_int
seekLowerInsideSet.argtypes = [POINTER(seekContext), c_int]
seekItemScalarSet = libteem.seekItemScalarSet
seekItemScalarSet.restype = c_int
seekItemScalarSet.argtypes = [POINTER(seekContext), c_int]
seekItemStrengthSet = libteem.seekItemStrengthSet
seekItemStrengthSet.restype = c_int
seekItemStrengthSet.argtypes = [POINTER(seekContext), c_int]
seekItemNormalSet = libteem.seekItemNormalSet
seekItemNormalSet.restype = c_int
seekItemNormalSet.argtypes = [POINTER(seekContext), c_int]
seekItemGradientSet = libteem.seekItemGradientSet
seekItemGradientSet.restype = c_int
seekItemGradientSet.argtypes = [POINTER(seekContext), c_int]
seekItemEigensystemSet = libteem.seekItemEigensystemSet
seekItemEigensystemSet.restype = c_int
seekItemEigensystemSet.argtypes = [POINTER(seekContext), c_int, c_int]
seekItemHessSet = libteem.seekItemHessSet
seekItemHessSet.restype = c_int
seekItemHessSet.argtypes = [POINTER(seekContext), c_int]
seekIsovalueSet = libteem.seekIsovalueSet
seekIsovalueSet.restype = c_int
seekIsovalueSet.argtypes = [POINTER(seekContext), c_double]
seekEvalDiffThreshSet = libteem.seekEvalDiffThreshSet
seekEvalDiffThreshSet.restype = c_int
seekEvalDiffThreshSet.argtypes = [POINTER(seekContext), c_double]
seekUpdate = libteem.seekUpdate
seekUpdate.restype = c_int
seekUpdate.argtypes = [POINTER(seekContext)]
seekExtract = libteem.seekExtract
seekExtract.restype = c_int
seekExtract.argtypes = [POINTER(seekContext), POINTER(limnPolyData)]
seekVertexStrength = libteem.seekVertexStrength
seekVertexStrength.restype = c_int
seekVertexStrength.argtypes = [POINTER(Nrrd), POINTER(seekContext), POINTER(limnPolyData)]
seekDescendToDeg = libteem.seekDescendToDeg
seekDescendToDeg.restype = c_int
seekDescendToDeg.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_double, c_char]
seekDescendToDegCell = libteem.seekDescendToDegCell
seekDescendToDegCell.restype = c_int
seekDescendToDegCell.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_double, c_char]
seekDescendToRidge = libteem.seekDescendToRidge
seekDescendToRidge.restype = c_int
seekDescendToRidge.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_double, c_char, c_double]
class tenGlyphParm(Structure):
pass
tenGlyphParm._fields_ = [
('verbose', c_int),
('nmask', POINTER(Nrrd)),
('anisoType', c_int),
('onlyPositive', c_int),
('confThresh', c_float),
('anisoThresh', c_float),
('maskThresh', c_float),
('glyphType', c_int),
('facetRes', c_int),
('glyphScale', c_float),
('sqdSharp', c_float),
('edgeWidth', c_float * 5),
('colEvec', c_int),
('colAnisoType', c_int),
('colMaxSat', c_float),
('colIsoGray', c_float),
('colGamma', c_float),
('colAnisoModulate', c_float),
('ADSP', c_float * 4),
('sliceAxis', c_uint),
('slicePos', c_size_t),
('doSlice', c_int),
('sliceAnisoType', c_int),
('sliceOffset', c_float),
('sliceBias', c_float),
('sliceGamma', c_float),
]
class tenEvecRGBParm(Structure):
pass
tenEvecRGBParm._pack_ = 4
tenEvecRGBParm._fields_ = [
('which', c_uint),
('aniso', c_int),
('confThresh', c_double),
('anisoGamma', c_double),
('gamma', c_double),
('bgGray', c_double),
('isoGray', c_double),
('maxSat', c_double),
('typeOut', c_int),
('genAlpha', c_int),
]
class tenFiberContext(Structure):
pass
tenFiberContext._pack_ = 4
tenFiberContext._fields_ = [
('nin', POINTER(Nrrd)),
('ksp', POINTER(NrrdKernelSpec)),
('useDwi', c_int),
('fiberType', c_int),
('fiberProbeItem', c_int),
('intg', c_int),
('anisoStopType', c_int),
('anisoSpeedType', c_int),
('stop', c_int),
('useIndexSpace', c_int),
('verbose', c_int),
('anisoThresh', c_double),
('anisoSpeedFunc', c_double * 3),
('maxNumSteps', c_uint),
('minNumSteps', c_uint),
('stepSize', c_double),
('maxHalfLen', c_double),
('minWholeLen', c_double),
('confThresh', c_double),
('minRadius', c_double),
('minFraction', c_double),
('wPunct', c_double),
('ten2Which', c_uint),
('query', gageQuery),
('halfIdx', c_int),
('mframeUse', c_int),
('mframe', c_double * 9),
('mframeT', c_double * 9),
('wPos', c_double * 3),
('wDir', c_double * 3),
('lastDir', c_double * 3),
('seedEvec', c_double * 3),
('lastDirSet', c_int),
('lastTenSet', c_int),
('ten2Use', c_uint),
('gtx', POINTER(gageContext)),
('pvl', POINTER(gagePerVolume)),
('gageTen', POINTER(c_double)),
('gageEval', POINTER(c_double)),
('gageEvec', POINTER(c_double)),
('gageAnisoStop', POINTER(c_double)),
('gageAnisoSpeed', POINTER(c_double)),
('gageTen2', POINTER(c_double)),
('ten2AnisoStop', c_double),
('fiberTen', c_double * 7),
('fiberEval', c_double * 3),
('fiberEvec', c_double * 9),
('fiberAnisoStop', c_double),
('fiberAnisoSpeed', c_double),
('radius', c_double),
('halfLen', c_double * 2),
('numSteps', c_uint * 2),
('whyStop', c_int * 2),
('whyNowhere', c_int),
]
class tenFiberSingle(Structure):
pass
tenFiberSingle._pack_ = 4
tenFiberSingle._fields_ = [
('seedPos', c_double * 3),
('dirIdx', c_uint),
('dirNum', c_uint),
('nvert', POINTER(Nrrd)),
('halfLen', c_double * 2),
('seedIdx', c_uint),
('stepNum', c_uint * 2),
('whyStop', c_int * 2),
('whyNowhere', c_int),
('nval', POINTER(Nrrd)),
('measr', c_double * 31),
]
class tenFiberMulti(Structure):
pass
tenFiberMulti._fields_ = [
('fiber', POINTER(tenFiberSingle)),
('fiberNum', c_uint),
('fiberArr', POINTER(airArray)),
]
class tenEMBimodalParm(Structure):
pass
tenEMBimodalParm._pack_ = 4
tenEMBimodalParm._fields_ = [
('minProb', c_double),
('minProb2', c_double),
('minDelta', c_double),
('minFraction', c_double),
('minConfidence', c_double),
('twoStage', c_double),
('verbose', c_double),
('maxIteration', c_uint),
('histo', POINTER(c_double)),
('pp1', POINTER(c_double)),
('pp2', POINTER(c_double)),
('vmin', c_double),
('vmax', c_double),
('delta', c_double),
('N', c_int),
('stage', c_int),
('iteration', c_uint),
('mean1', c_double),
('stdv1', c_double),
('mean2', c_double),
('stdv2', c_double),
('fraction1', c_double),
('confidence', c_double),
('threshold', c_double),
]
class tenGradientParm(Structure):
pass
tenGradientParm._pack_ = 4
tenGradientParm._fields_ = [
('initStep', c_double),
('jitter', c_double),
('minVelocity', c_double),
('minPotentialChange', c_double),
('minMean', c_double),
('minMeanImprovement', c_double),
('single', c_int),
('insertZeroVec', c_int),
('verbose', c_int),
('snap', c_uint),
('report', c_uint),
('expo', c_uint),
('seed', c_uint),
('maxEdgeShrink', c_uint),
('minIteration', c_uint),
('maxIteration', c_uint),
('expo_d', c_double),
('step', c_double),
('nudge', c_double),
('itersUsed', c_uint),
('potential', c_double),
('potentialNorm', c_double),
('angle', c_double),
('edge', c_double),
]
class tenEstimateContext(Structure):
pass
tenEstimateContext._pack_ = 4
tenEstimateContext._fields_ = [
('bValue', c_double),
('valueMin', c_double),
('sigma', c_double),
('dwiConfThresh', c_double),
('dwiConfSoft', c_double),
('_ngrad', POINTER(Nrrd)),
('_nbmat', POINTER(Nrrd)),
('skipList', POINTER(c_uint)),
('skipListArr', POINTER(airArray)),
('all_f', POINTER(c_float)),
('all_d', POINTER(c_double)),
('simulate', c_int),
('estimate1Method', c_int),
('estimateB0', c_int),
('recordTime', c_int),
('recordErrorDwi', c_int),
('recordErrorLogDwi', c_int),
('recordLikelihoodDwi', c_int),
('verbose', c_int),
('negEvalShift', c_int),
('progress', c_int),
('WLSIterNum', c_uint),
('flag', c_int * 128),
('allNum', c_uint),
('dwiNum', c_uint),
('nbmat', POINTER(Nrrd)),
('nwght', POINTER(Nrrd)),
('nemat', POINTER(Nrrd)),
('knownB0', c_double),
('all', POINTER(c_double)),
('bnorm', POINTER(c_double)),
('allTmp', POINTER(c_double)),
('dwiTmp', POINTER(c_double)),
('dwi', POINTER(c_double)),
('skipLut', POINTER(c_ubyte)),
('estimatedB0', c_double),
('ten', c_double * 7),
('conf', c_double),
('mdwi', c_double),
('time', c_double),
('errorDwi', c_double),
('errorLogDwi', c_double),
('likelihoodDwi', c_double),
]
class tenDwiGageKindData(Structure):
pass
tenDwiGageKindData._pack_ = 4
tenDwiGageKindData._fields_ = [
('ngrad', POINTER(Nrrd)),
('nbmat', POINTER(Nrrd)),
('thresh', c_double),
('soft', c_double),
('bval', c_double),
('valueMin', c_double),
('est1Method', c_int),
('est2Method', c_int),
('randSeed', c_uint),
]
class tenDwiGagePvlData(Structure):
pass
tenDwiGagePvlData._pack_ = 4
tenDwiGagePvlData._fields_ = [
('tec1', POINTER(tenEstimateContext)),
('tec2', POINTER(tenEstimateContext)),
('vbuf', POINTER(c_double)),
('wght', POINTER(c_uint)),
('qvals', POINTER(c_double)),
('qpoints', POINTER(c_double)),
('dists', POINTER(c_double)),
('weights', POINTER(c_double)),
('nten1EigenGrads', POINTER(Nrrd)),
('randState', POINTER(airRandMTState)),
('randSeed', c_uint),
('ten1', c_double * 7),
('ten1Evec', c_double * 9),
('ten1Eval', c_double * 3),
('levmarUseFastExp', c_int),
('levmarMaxIter', c_uint),
('levmarTau', c_double),
('levmarEps1', c_double),
('levmarEps2', c_double),
('levmarEps3', c_double),
('levmarDelta', c_double),
('levmarMinCp', c_double),
('levmarInfo', c_double * 9),
]
class tenInterpParm(Structure):
pass
tenInterpParm._pack_ = 4
tenInterpParm._fields_ = [
('verbose', c_int),
('convStep', c_double),
('minNorm', c_double),
('convEps', c_double),
('wghtSumEps', c_double),
('enableRecurse', c_int),
('maxIter', c_uint),
('numSteps', c_uint),
('lengthFancy', c_int),
('allocLen', c_uint),
('eval', POINTER(c_double)),
('evec', POINTER(c_double)),
('rtIn', POINTER(c_double)),
('rtLog', POINTER(c_double)),
('qIn', POINTER(c_double)),
('qBuff', POINTER(c_double)),
('qInter', POINTER(c_double)),
('numIter', c_uint),
('convFinal', c_double),
('lengthShape', c_double),
('lengthOrient', c_double),
]
class tenExperSpec(Structure):
pass
tenExperSpec._fields_ = [
('set', c_int),
('imgNum', c_uint),
('bval', POINTER(c_double)),
('grad', POINTER(c_double)),
]
class tenModelParmDesc(Structure):
pass
tenModelParmDesc._pack_ = 4
tenModelParmDesc._fields_ = [
('name', c_char * 129),
('min', c_double),
('max', c_double),
('cyclic', c_int),
('vec3', c_int),
('vecIdx', c_uint),
]
class tenModel_t(Structure):
pass
tenModel_t._fields_ = [
('name', c_char * 129),
('parmNum', c_uint),
('parmDesc', POINTER(tenModelParmDesc)),
('simulate', CFUNCTYPE(None, POINTER(c_double), POINTER(c_double), POINTER(tenExperSpec))),
('sprint', CFUNCTYPE(STRING, STRING, POINTER(c_double))),
('alloc', CFUNCTYPE(POINTER(c_double))),
('rand', CFUNCTYPE(None, POINTER(c_double), POINTER(airRandMTState), c_int)),
('step', CFUNCTYPE(None, POINTER(c_double), c_double, POINTER(c_double), POINTER(c_double))),
('dist', CFUNCTYPE(c_double, POINTER(c_double), POINTER(c_double))),
('copy', CFUNCTYPE(None, POINTER(c_double), POINTER(c_double))),
('convert', CFUNCTYPE(c_int, POINTER(c_double), POINTER(c_double), POINTER(tenModel_t))),
('sqe', CFUNCTYPE(c_double, POINTER(c_double), POINTER(tenExperSpec), POINTER(c_double), POINTER(c_double), c_int)),
('sqeGrad', CFUNCTYPE(None, POINTER(c_double), POINTER(c_double), POINTER(tenExperSpec), POINTER(c_double), POINTER(c_double), c_int)),
('sqeFit', CFUNCTYPE(c_double, POINTER(c_double), POINTER(c_double), POINTER(c_uint), POINTER(tenExperSpec), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_uint, c_uint, c_double, c_int)),
('nll', CFUNCTYPE(c_double, POINTER(c_double), POINTER(tenExperSpec), POINTER(c_double), POINTER(c_double), c_int, c_double, c_int)),
('nllGrad', CFUNCTYPE(None, POINTER(c_double), POINTER(c_double), POINTER(tenExperSpec), POINTER(c_double), POINTER(c_double), c_int, c_double)),
('nllFit', CFUNCTYPE(c_double, POINTER(c_double), POINTER(tenExperSpec), POINTER(c_double), POINTER(c_double), c_int, c_double, c_int)),
]
tenModel = tenModel_t
tenPresent = (c_int).in_dll(libteem, 'tenPresent')
tenBiffKey = (STRING).in_dll(libteem, 'tenBiffKey')
tenDefFiberKernel = (c_char * 0).in_dll(libteem, 'tenDefFiberKernel')
tenDefFiberStepSize = (c_double).in_dll(libteem, 'tenDefFiberStepSize')
tenDefFiberUseIndexSpace = (c_int).in_dll(libteem, 'tenDefFiberUseIndexSpace')
tenDefFiberMaxNumSteps = (c_int).in_dll(libteem, 'tenDefFiberMaxNumSteps')
tenDefFiberMaxHalfLen = (c_double).in_dll(libteem, 'tenDefFiberMaxHalfLen')
tenDefFiberAnisoStopType = (c_int).in_dll(libteem, 'tenDefFiberAnisoStopType')
tenDefFiberAnisoThresh = (c_double).in_dll(libteem, 'tenDefFiberAnisoThresh')
tenDefFiberIntg = (c_int).in_dll(libteem, 'tenDefFiberIntg')
tenDefFiberWPunct = (c_double).in_dll(libteem, 'tenDefFiberWPunct')
tenTripleConvertSingle_d = libteem.tenTripleConvertSingle_d
tenTripleConvertSingle_d.restype = None
tenTripleConvertSingle_d.argtypes = [POINTER(c_double), c_int, POINTER(c_double), c_int]
tenTripleConvertSingle_f = libteem.tenTripleConvertSingle_f
tenTripleConvertSingle_f.restype = None
tenTripleConvertSingle_f.argtypes = [POINTER(c_float), c_int, POINTER(c_float), c_int]
tenTripleCalcSingle_d = libteem.tenTripleCalcSingle_d
tenTripleCalcSingle_d.restype = None
tenTripleCalcSingle_d.argtypes = [POINTER(c_double), c_int, POINTER(c_double)]
tenTripleCalcSingle_f = libteem.tenTripleCalcSingle_f
tenTripleCalcSingle_f.restype = None
tenTripleCalcSingle_f.argtypes = [POINTER(c_float), c_int, POINTER(c_float)]
tenTripleCalc = libteem.tenTripleCalc
tenTripleCalc.restype = c_int
tenTripleCalc.argtypes = [POINTER(Nrrd), c_int, POINTER(Nrrd)]
tenTripleConvert = libteem.tenTripleConvert
tenTripleConvert.restype = c_int
tenTripleConvert.argtypes = [POINTER(Nrrd), c_int, POINTER(Nrrd), c_int]
tenGradientParmNew = libteem.tenGradientParmNew
tenGradientParmNew.restype = POINTER(tenGradientParm)
tenGradientParmNew.argtypes = []
tenGradientParmNix = libteem.tenGradientParmNix
tenGradientParmNix.restype = POINTER(tenGradientParm)
tenGradientParmNix.argtypes = [POINTER(tenGradientParm)]
tenGradientCheck = libteem.tenGradientCheck
tenGradientCheck.restype = c_int
tenGradientCheck.argtypes = [POINTER(Nrrd), c_int, c_uint]
tenGradientRandom = libteem.tenGradientRandom
tenGradientRandom.restype = c_int
tenGradientRandom.argtypes = [POINTER(Nrrd), c_uint, c_uint]
tenGradientIdealEdge = libteem.tenGradientIdealEdge
tenGradientIdealEdge.restype = c_double
tenGradientIdealEdge.argtypes = [c_uint, c_int]
tenGradientJitter = libteem.tenGradientJitter
tenGradientJitter.restype = c_int
tenGradientJitter.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_double]
tenGradientBalance = libteem.tenGradientBalance
tenGradientBalance.restype = c_int
tenGradientBalance.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(tenGradientParm)]
tenGradientMeasure = libteem.tenGradientMeasure
tenGradientMeasure.restype = None
tenGradientMeasure.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(Nrrd), POINTER(tenGradientParm), c_int]
tenGradientDistribute = libteem.tenGradientDistribute
tenGradientDistribute.restype = c_int
tenGradientDistribute.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(tenGradientParm)]
tenGradientGenerate = libteem.tenGradientGenerate
tenGradientGenerate.restype = c_int
tenGradientGenerate.argtypes = [POINTER(Nrrd), c_uint, POINTER(tenGradientParm)]
tenAniso = (POINTER(airEnum)).in_dll(libteem, 'tenAniso')
tenInterpType = (POINTER(airEnum)).in_dll(libteem, 'tenInterpType')
tenGage = (POINTER(airEnum)).in_dll(libteem, 'tenGage')
tenFiberType = (POINTER(airEnum)).in_dll(libteem, 'tenFiberType')
tenDwiFiberType = (POINTER(airEnum)).in_dll(libteem, 'tenDwiFiberType')
tenFiberStop = (POINTER(airEnum)).in_dll(libteem, 'tenFiberStop')
tenFiberIntg = (POINTER(airEnum)).in_dll(libteem, 'tenFiberIntg')
tenGlyphType = (POINTER(airEnum)).in_dll(libteem, 'tenGlyphType')
tenEstimate1Method = (POINTER(airEnum)).in_dll(libteem, 'tenEstimate1Method')
tenEstimate2Method = (POINTER(airEnum)).in_dll(libteem, 'tenEstimate2Method')
tenTripleType = (POINTER(airEnum)).in_dll(libteem, 'tenTripleType')
tenInterpParmNew = libteem.tenInterpParmNew
tenInterpParmNew.restype = POINTER(tenInterpParm)
tenInterpParmNew.argtypes = []
tenInterpParmCopy = libteem.tenInterpParmCopy
tenInterpParmCopy.restype = POINTER(tenInterpParm)
tenInterpParmCopy.argtypes = [POINTER(tenInterpParm)]
tenInterpParmBufferAlloc = libteem.tenInterpParmBufferAlloc
tenInterpParmBufferAlloc.restype = c_int
tenInterpParmBufferAlloc.argtypes = [POINTER(tenInterpParm), c_uint]
tenInterpParmNix = libteem.tenInterpParmNix
tenInterpParmNix.restype = POINTER(tenInterpParm)
tenInterpParmNix.argtypes = [POINTER(tenInterpParm)]
tenInterpTwo_d = libteem.tenInterpTwo_d
tenInterpTwo_d.restype = None
tenInterpTwo_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_double, POINTER(tenInterpParm)]
tenInterpN_d = libteem.tenInterpN_d
tenInterpN_d.restype = c_int
tenInterpN_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_uint, c_int, POINTER(tenInterpParm)]
tenInterpPathLength = libteem.tenInterpPathLength
tenInterpPathLength.restype = c_double
tenInterpPathLength.argtypes = [POINTER(Nrrd), c_int, c_int, c_int]
tenInterpTwoDiscrete_d = libteem.tenInterpTwoDiscrete_d
tenInterpTwoDiscrete_d.restype = c_int
tenInterpTwoDiscrete_d.argtypes = [POINTER(Nrrd), POINTER(c_double), POINTER(c_double), c_int, c_uint, POINTER(tenInterpParm)]
tenInterpDistanceTwo_d = libteem.tenInterpDistanceTwo_d
tenInterpDistanceTwo_d.restype = c_double
tenInterpDistanceTwo_d.argtypes = [POINTER(c_double), POINTER(c_double), c_int, POINTER(tenInterpParm)]
tenInterpMulti3D = libteem.tenInterpMulti3D
tenInterpMulti3D.restype = c_int
tenInterpMulti3D.argtypes = [POINTER(Nrrd), POINTER(POINTER(Nrrd)), POINTER(c_double), c_uint, c_int, POINTER(tenInterpParm)]
tenGlyphParmNew = libteem.tenGlyphParmNew
tenGlyphParmNew.restype = POINTER(tenGlyphParm)
tenGlyphParmNew.argtypes = []
tenGlyphParmNix = libteem.tenGlyphParmNix
tenGlyphParmNix.restype = POINTER(tenGlyphParm)
tenGlyphParmNix.argtypes = [POINTER(tenGlyphParm)]
tenGlyphParmCheck = libteem.tenGlyphParmCheck
tenGlyphParmCheck.restype = c_int
tenGlyphParmCheck.argtypes = [POINTER(tenGlyphParm), POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd)]
tenGlyphGen = libteem.tenGlyphGen
tenGlyphGen.restype = c_int
tenGlyphGen.argtypes = [POINTER(limnObject), POINTER(echoScene), POINTER(tenGlyphParm), POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd)]
tenGlyphBqdZoneEval = libteem.tenGlyphBqdZoneEval
tenGlyphBqdZoneEval.restype = c_uint
tenGlyphBqdZoneEval.argtypes = [POINTER(c_double)]
tenGlyphBqdUvEval = libteem.tenGlyphBqdUvEval
tenGlyphBqdUvEval.restype = None
tenGlyphBqdUvEval.argtypes = [POINTER(c_double), POINTER(c_double)]
tenGlyphBqdEvalUv = libteem.tenGlyphBqdEvalUv
tenGlyphBqdEvalUv.restype = None
tenGlyphBqdEvalUv.argtypes = [POINTER(c_double), POINTER(c_double)]
tenGlyphBqdZoneUv = libteem.tenGlyphBqdZoneUv
tenGlyphBqdZoneUv.restype = c_uint
tenGlyphBqdZoneUv.argtypes = [POINTER(c_double)]
tenGlyphBqdAbcUv = libteem.tenGlyphBqdAbcUv
tenGlyphBqdAbcUv.restype = None
tenGlyphBqdAbcUv.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
tenVerbose = (c_int).in_dll(libteem, 'tenVerbose')
tenRotateSingle_f = libteem.tenRotateSingle_f
tenRotateSingle_f.restype = None
tenRotateSingle_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float)]
tenTensorCheck = libteem.tenTensorCheck
tenTensorCheck.restype = c_int
tenTensorCheck.argtypes = [POINTER(Nrrd), c_int, c_int, c_int]
tenMeasurementFrameReduce = libteem.tenMeasurementFrameReduce
tenMeasurementFrameReduce.restype = c_int
tenMeasurementFrameReduce.argtypes = [POINTER(Nrrd), POINTER(Nrrd)]
tenExpand2D = libteem.tenExpand2D
tenExpand2D.restype = c_int
tenExpand2D.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_double, c_double]
tenExpand = libteem.tenExpand
tenExpand.restype = c_int
tenExpand.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_double, c_double]
tenShrink = libteem.tenShrink
tenShrink.restype = c_int
tenShrink.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd)]
tenEigensolve_f = libteem.tenEigensolve_f
tenEigensolve_f.restype = c_int
tenEigensolve_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float)]
tenEigensolve_d = libteem.tenEigensolve_d
tenEigensolve_d.restype = c_int
tenEigensolve_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
tenMakeSingle_f = libteem.tenMakeSingle_f
tenMakeSingle_f.restype = None
tenMakeSingle_f.argtypes = [POINTER(c_float), c_float, POINTER(c_float), POINTER(c_float)]
tenMakeSingle_d = libteem.tenMakeSingle_d
tenMakeSingle_d.restype = None
tenMakeSingle_d.argtypes = [POINTER(c_double), c_double, POINTER(c_double), POINTER(c_double)]
tenMake = libteem.tenMake
tenMake.restype = c_int
tenMake.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd)]
tenSlice = libteem.tenSlice
tenSlice.restype = c_int
tenSlice.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_uint, c_size_t, c_uint]
tenInvariantGradientsK_d = libteem.tenInvariantGradientsK_d
tenInvariantGradientsK_d.restype = None
tenInvariantGradientsK_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_double]
tenInvariantGradientsR_d = libteem.tenInvariantGradientsR_d
tenInvariantGradientsR_d.restype = None
tenInvariantGradientsR_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_double]
tenRotationTangents_d = libteem.tenRotationTangents_d
tenRotationTangents_d.restype = None
tenRotationTangents_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double)]
tenLogSingle_d = libteem.tenLogSingle_d
tenLogSingle_d.restype = None
tenLogSingle_d.argtypes = [POINTER(c_double), POINTER(c_double)]
tenLogSingle_f = libteem.tenLogSingle_f
tenLogSingle_f.restype = None
tenLogSingle_f.argtypes = [POINTER(c_float), POINTER(c_float)]
tenExpSingle_d = libteem.tenExpSingle_d
tenExpSingle_d.restype = None
tenExpSingle_d.argtypes = [POINTER(c_double), POINTER(c_double)]
tenExpSingle_f = libteem.tenExpSingle_f
tenExpSingle_f.restype = None
tenExpSingle_f.argtypes = [POINTER(c_float), POINTER(c_float)]
tenSqrtSingle_d = libteem.tenSqrtSingle_d
tenSqrtSingle_d.restype = None
tenSqrtSingle_d.argtypes = [POINTER(c_double), POINTER(c_double)]
tenSqrtSingle_f = libteem.tenSqrtSingle_f
tenSqrtSingle_f.restype = None
tenSqrtSingle_f.argtypes = [POINTER(c_float), POINTER(c_float)]
tenPowSingle_d = libteem.tenPowSingle_d
tenPowSingle_d.restype = None
tenPowSingle_d.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
tenPowSingle_f = libteem.tenPowSingle_f
tenPowSingle_f.restype = None
tenPowSingle_f.argtypes = [POINTER(c_float), POINTER(c_float), c_float]
tenInv_f = libteem.tenInv_f
tenInv_f.restype = None
tenInv_f.argtypes = [POINTER(c_float), POINTER(c_float)]
tenInv_d = libteem.tenInv_d
tenInv_d.restype = None
tenInv_d.argtypes = [POINTER(c_double), POINTER(c_double)]
tenDoubleContract_d = libteem.tenDoubleContract_d
tenDoubleContract_d.restype = c_double
tenDoubleContract_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
tenDWMRIModalityKey = (STRING).in_dll(libteem, 'tenDWMRIModalityKey')
tenDWMRIModalityVal = (STRING).in_dll(libteem, 'tenDWMRIModalityVal')
tenDWMRINAVal = (STRING).in_dll(libteem, 'tenDWMRINAVal')
tenDWMRIBValueKey = (STRING).in_dll(libteem, 'tenDWMRIBValueKey')
tenDWMRIGradKeyFmt = (STRING).in_dll(libteem, 'tenDWMRIGradKeyFmt')
tenDWMRIBmatKeyFmt = (STRING).in_dll(libteem, 'tenDWMRIBmatKeyFmt')
tenDWMRINexKeyFmt = (STRING).in_dll(libteem, 'tenDWMRINexKeyFmt')
tenDWMRISkipKeyFmt = (STRING).in_dll(libteem, 'tenDWMRISkipKeyFmt')
tenDWMRIKeyValueParse = libteem.tenDWMRIKeyValueParse
tenDWMRIKeyValueParse.restype = c_int
tenDWMRIKeyValueParse.argtypes = [POINTER(POINTER(Nrrd)), POINTER(POINTER(Nrrd)), POINTER(c_double), POINTER(POINTER(c_uint)), POINTER(c_uint), POINTER(Nrrd)]
tenBMatrixCalc = libteem.tenBMatrixCalc
tenBMatrixCalc.restype = c_int
tenBMatrixCalc.argtypes = [POINTER(Nrrd), POINTER(Nrrd)]
tenEMatrixCalc = libteem.tenEMatrixCalc
tenEMatrixCalc.restype = c_int
tenEMatrixCalc.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_int]
tenEstimateLinearSingle_f = libteem.tenEstimateLinearSingle_f
tenEstimateLinearSingle_f.restype = None
tenEstimateLinearSingle_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(c_double), POINTER(c_double), c_uint, c_int, c_float, c_float, c_float]
tenEstimateLinearSingle_d = libteem.tenEstimateLinearSingle_d
tenEstimateLinearSingle_d.restype = None
tenEstimateLinearSingle_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_uint, c_int, c_double, c_double, c_double]
tenEstimateLinear3D = libteem.tenEstimateLinear3D
tenEstimateLinear3D.restype = c_int
tenEstimateLinear3D.argtypes = [POINTER(Nrrd), POINTER(POINTER(Nrrd)), POINTER(POINTER(Nrrd)), POINTER(POINTER(Nrrd)), c_uint, POINTER(Nrrd), c_int, c_double, c_double, c_double]
tenEstimateLinear4D = libteem.tenEstimateLinear4D
tenEstimateLinear4D.restype = c_int
tenEstimateLinear4D.argtypes = [POINTER(Nrrd), POINTER(POINTER(Nrrd)), POINTER(POINTER(Nrrd)), POINTER(Nrrd), POINTER(Nrrd), c_int, c_double, c_double, c_double]
tenSimulateSingle_f = libteem.tenSimulateSingle_f
tenSimulateSingle_f.restype = None
tenSimulateSingle_f.argtypes = [POINTER(c_float), c_float, POINTER(c_float), POINTER(c_double), c_uint, c_float]
tenSimulate = libteem.tenSimulate
tenSimulate.restype = c_int
tenSimulate.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd), c_double]
tenEstimateContextNew = libteem.tenEstimateContextNew
tenEstimateContextNew.restype = POINTER(tenEstimateContext)
tenEstimateContextNew.argtypes = []
tenEstimateVerboseSet = libteem.tenEstimateVerboseSet
tenEstimateVerboseSet.restype = None
tenEstimateVerboseSet.argtypes = [POINTER(tenEstimateContext), c_int]
tenEstimateNegEvalShiftSet = libteem.tenEstimateNegEvalShiftSet
tenEstimateNegEvalShiftSet.restype = None
tenEstimateNegEvalShiftSet.argtypes = [POINTER(tenEstimateContext), c_int]
tenEstimateMethodSet = libteem.tenEstimateMethodSet
tenEstimateMethodSet.restype = c_int
tenEstimateMethodSet.argtypes = [POINTER(tenEstimateContext), c_int]
tenEstimateSigmaSet = libteem.tenEstimateSigmaSet
tenEstimateSigmaSet.restype = c_int
tenEstimateSigmaSet.argtypes = [POINTER(tenEstimateContext), c_double]
tenEstimateValueMinSet = libteem.tenEstimateValueMinSet
tenEstimateValueMinSet.restype = c_int
tenEstimateValueMinSet.argtypes = [POINTER(tenEstimateContext), c_double]
tenEstimateGradientsSet = libteem.tenEstimateGradientsSet
tenEstimateGradientsSet.restype = c_int
tenEstimateGradientsSet.argtypes = [POINTER(tenEstimateContext), POINTER(Nrrd), c_double, c_int]
tenEstimateBMatricesSet = libteem.tenEstimateBMatricesSet
tenEstimateBMatricesSet.restype = c_int
tenEstimateBMatricesSet.argtypes = [POINTER(tenEstimateContext), POINTER(Nrrd), c_double, c_int]
tenEstimateSkipSet = libteem.tenEstimateSkipSet
tenEstimateSkipSet.restype = c_int
tenEstimateSkipSet.argtypes = [POINTER(tenEstimateContext), c_uint, c_int]
tenEstimateSkipReset = libteem.tenEstimateSkipReset
tenEstimateSkipReset.restype = c_int
tenEstimateSkipReset.argtypes = [POINTER(tenEstimateContext)]
tenEstimateThresholdSet = libteem.tenEstimateThresholdSet
tenEstimateThresholdSet.restype = c_int
tenEstimateThresholdSet.argtypes = [POINTER(tenEstimateContext), c_double, c_double]
tenEstimateUpdate = libteem.tenEstimateUpdate
tenEstimateUpdate.restype = c_int
tenEstimateUpdate.argtypes = [POINTER(tenEstimateContext)]
tenEstimate1TensorSimulateSingle_f = libteem.tenEstimate1TensorSimulateSingle_f
tenEstimate1TensorSimulateSingle_f.restype = c_int
tenEstimate1TensorSimulateSingle_f.argtypes = [POINTER(tenEstimateContext), POINTER(c_float), c_float, c_float, c_float, POINTER(c_float)]
tenEstimate1TensorSimulateSingle_d = libteem.tenEstimate1TensorSimulateSingle_d
tenEstimate1TensorSimulateSingle_d.restype = c_int
tenEstimate1TensorSimulateSingle_d.argtypes = [POINTER(tenEstimateContext), POINTER(c_double), c_double, c_double, c_double, POINTER(c_double)]
tenEstimate1TensorSimulateVolume = libteem.tenEstimate1TensorSimulateVolume
tenEstimate1TensorSimulateVolume.restype = c_int
tenEstimate1TensorSimulateVolume.argtypes = [POINTER(tenEstimateContext), POINTER(Nrrd), c_double, c_double, POINTER(Nrrd), POINTER(Nrrd), c_int, c_int]
tenEstimate1TensorSingle_f = libteem.tenEstimate1TensorSingle_f
tenEstimate1TensorSingle_f.restype = c_int
tenEstimate1TensorSingle_f.argtypes = [POINTER(tenEstimateContext), POINTER(c_float), POINTER(c_float)]
tenEstimate1TensorSingle_d = libteem.tenEstimate1TensorSingle_d
tenEstimate1TensorSingle_d.restype = c_int
tenEstimate1TensorSingle_d.argtypes = [POINTER(tenEstimateContext), POINTER(c_double), POINTER(c_double)]
tenEstimate1TensorVolume4D = libteem.tenEstimate1TensorVolume4D
tenEstimate1TensorVolume4D.restype = c_int
tenEstimate1TensorVolume4D.argtypes = [POINTER(tenEstimateContext), POINTER(Nrrd), POINTER(POINTER(Nrrd)), POINTER(POINTER(Nrrd)), POINTER(Nrrd), c_int]
tenEstimateContextNix = libteem.tenEstimateContextNix
tenEstimateContextNix.restype = POINTER(tenEstimateContext)
tenEstimateContextNix.argtypes = [POINTER(tenEstimateContext)]
tenAnisoEval_f = libteem.tenAnisoEval_f
tenAnisoEval_f.restype = c_float
tenAnisoEval_f.argtypes = [POINTER(c_float), c_int]
tenAnisoEval_d = libteem.tenAnisoEval_d
tenAnisoEval_d.restype = c_double
tenAnisoEval_d.argtypes = [POINTER(c_double), c_int]
tenAnisoTen_f = libteem.tenAnisoTen_f
tenAnisoTen_f.restype = c_float
tenAnisoTen_f.argtypes = [POINTER(c_float), c_int]
tenAnisoTen_d = libteem.tenAnisoTen_d
tenAnisoTen_d.restype = c_double
tenAnisoTen_d.argtypes = [POINTER(c_double), c_int]
tenAnisoPlot = libteem.tenAnisoPlot
tenAnisoPlot.restype = c_int
tenAnisoPlot.argtypes = [POINTER(Nrrd), c_int, c_uint, c_int, c_int, c_int]
tenAnisoVolume = libteem.tenAnisoVolume
tenAnisoVolume.restype = c_int
tenAnisoVolume.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_int, c_double]
tenAnisoHistogram = libteem.tenAnisoHistogram
tenAnisoHistogram.restype = c_int
tenAnisoHistogram.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd), c_int, c_int, c_uint]
tenEvecRGBParmNew = libteem.tenEvecRGBParmNew
tenEvecRGBParmNew.restype = POINTER(tenEvecRGBParm)
tenEvecRGBParmNew.argtypes = []
tenEvecRGBParmNix = libteem.tenEvecRGBParmNix
tenEvecRGBParmNix.restype = POINTER(tenEvecRGBParm)
tenEvecRGBParmNix.argtypes = [POINTER(tenEvecRGBParm)]
tenEvecRGBParmCheck = libteem.tenEvecRGBParmCheck
tenEvecRGBParmCheck.restype = c_int
tenEvecRGBParmCheck.argtypes = [POINTER(tenEvecRGBParm)]
tenEvecRGBSingle_f = libteem.tenEvecRGBSingle_f
tenEvecRGBSingle_f.restype = None
tenEvecRGBSingle_f.argtypes = [POINTER(c_float), c_float, POINTER(c_float), POINTER(c_float), POINTER(tenEvecRGBParm)]
tenEvecRGBSingle_d = libteem.tenEvecRGBSingle_d
tenEvecRGBSingle_d.restype = None
tenEvecRGBSingle_d.argtypes = [POINTER(c_double), c_double, POINTER(c_double), POINTER(c_double), POINTER(tenEvecRGBParm)]
tenEvecRGB = libteem.tenEvecRGB
tenEvecRGB.restype = c_int
tenEvecRGB.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(tenEvecRGBParm)]
tenEvqVolume = libteem.tenEvqVolume
tenEvqVolume.restype = c_int
tenEvqVolume.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_int, c_int, c_int]
tenBMatrixCheck = libteem.tenBMatrixCheck
tenBMatrixCheck.restype = c_int
tenBMatrixCheck.argtypes = [POINTER(Nrrd), c_int, c_uint]
tenFiberSingleInit = libteem.tenFiberSingleInit
tenFiberSingleInit.restype = None
tenFiberSingleInit.argtypes = [POINTER(tenFiberSingle)]
tenFiberSingleDone = libteem.tenFiberSingleDone
tenFiberSingleDone.restype = None
tenFiberSingleDone.argtypes = [POINTER(tenFiberSingle)]
tenFiberSingleNew = libteem.tenFiberSingleNew
tenFiberSingleNew.restype = POINTER(tenFiberSingle)
tenFiberSingleNew.argtypes = []
tenFiberSingleNix = libteem.tenFiberSingleNix
tenFiberSingleNix.restype = POINTER(tenFiberSingle)
tenFiberSingleNix.argtypes = [POINTER(tenFiberSingle)]
tenFiberContextNew = libteem.tenFiberContextNew
tenFiberContextNew.restype = POINTER(tenFiberContext)
tenFiberContextNew.argtypes = [POINTER(Nrrd)]
tenFiberContextDwiNew = libteem.tenFiberContextDwiNew
tenFiberContextDwiNew.restype = POINTER(tenFiberContext)
tenFiberContextDwiNew.argtypes = [POINTER(Nrrd), c_double, c_double, c_double, c_int, c_int]
tenFiberVerboseSet = libteem.tenFiberVerboseSet
tenFiberVerboseSet.restype = None
tenFiberVerboseSet.argtypes = [POINTER(tenFiberContext), c_int]
tenFiberTypeSet = libteem.tenFiberTypeSet
tenFiberTypeSet.restype = c_int
tenFiberTypeSet.argtypes = [POINTER(tenFiberContext), c_int]
tenFiberKernelSet = libteem.tenFiberKernelSet
tenFiberKernelSet.restype = c_int
tenFiberKernelSet.argtypes = [POINTER(tenFiberContext), POINTER(NrrdKernel), POINTER(c_double)]
tenFiberProbeItemSet = libteem.tenFiberProbeItemSet
tenFiberProbeItemSet.restype = c_int
tenFiberProbeItemSet.argtypes = [POINTER(tenFiberContext), c_int]
tenFiberIntgSet = libteem.tenFiberIntgSet
tenFiberIntgSet.restype = c_int
tenFiberIntgSet.argtypes = [POINTER(tenFiberContext), c_int]
tenFiberStopSet = libteem.tenFiberStopSet
tenFiberStopSet.restype = c_int
tenFiberStopSet.argtypes = [POINTER(tenFiberContext), c_int]
tenFiberStopAnisoSet = libteem.tenFiberStopAnisoSet
tenFiberStopAnisoSet.restype = c_int
tenFiberStopAnisoSet.argtypes = [POINTER(tenFiberContext), c_int, c_double]
tenFiberStopDoubleSet = libteem.tenFiberStopDoubleSet
tenFiberStopDoubleSet.restype = c_int
tenFiberStopDoubleSet.argtypes = [POINTER(tenFiberContext), c_int, c_double]
tenFiberStopUIntSet = libteem.tenFiberStopUIntSet
tenFiberStopUIntSet.restype = c_int
tenFiberStopUIntSet.argtypes = [POINTER(tenFiberContext), c_int, c_uint]
tenFiberStopOn = libteem.tenFiberStopOn
tenFiberStopOn.restype = None
tenFiberStopOn.argtypes = [POINTER(tenFiberContext), c_int]
tenFiberStopOff = libteem.tenFiberStopOff
tenFiberStopOff.restype = None
tenFiberStopOff.argtypes = [POINTER(tenFiberContext), c_int]
tenFiberStopReset = libteem.tenFiberStopReset
tenFiberStopReset.restype = None
tenFiberStopReset.argtypes = [POINTER(tenFiberContext)]
tenFiberAnisoSpeedSet = libteem.tenFiberAnisoSpeedSet
tenFiberAnisoSpeedSet.restype = c_int
tenFiberAnisoSpeedSet.argtypes = [POINTER(tenFiberContext), c_int, c_double, c_double, c_double]
tenFiberAnisoSpeedReset = libteem.tenFiberAnisoSpeedReset
tenFiberAnisoSpeedReset.restype = c_int
tenFiberAnisoSpeedReset.argtypes = [POINTER(tenFiberContext)]
tenFiberParmSet = libteem.tenFiberParmSet
tenFiberParmSet.restype = c_int
tenFiberParmSet.argtypes = [POINTER(tenFiberContext), c_int, c_double]
tenFiberUpdate = libteem.tenFiberUpdate
tenFiberUpdate.restype = c_int
tenFiberUpdate.argtypes = [POINTER(tenFiberContext)]
tenFiberContextCopy = libteem.tenFiberContextCopy
tenFiberContextCopy.restype = POINTER(tenFiberContext)
tenFiberContextCopy.argtypes = [POINTER(tenFiberContext)]
tenFiberContextNix = libteem.tenFiberContextNix
tenFiberContextNix.restype = POINTER(tenFiberContext)
tenFiberContextNix.argtypes = [POINTER(tenFiberContext)]
tenFiberTraceSet = libteem.tenFiberTraceSet
tenFiberTraceSet.restype = c_int
tenFiberTraceSet.argtypes = [POINTER(tenFiberContext), POINTER(Nrrd), POINTER(c_double), c_uint, POINTER(c_uint), POINTER(c_uint), POINTER(c_double)]
tenFiberTrace = libteem.tenFiberTrace
tenFiberTrace.restype = c_int
tenFiberTrace.argtypes = [POINTER(tenFiberContext), POINTER(Nrrd), POINTER(c_double)]
tenFiberDirectionNumber = libteem.tenFiberDirectionNumber
tenFiberDirectionNumber.restype = c_uint
tenFiberDirectionNumber.argtypes = [POINTER(tenFiberContext), POINTER(c_double)]
tenFiberSingleTrace = libteem.tenFiberSingleTrace
tenFiberSingleTrace.restype = c_int
tenFiberSingleTrace.argtypes = [POINTER(tenFiberContext), POINTER(tenFiberSingle), POINTER(c_double), c_uint]
tenFiberMultiNew = libteem.tenFiberMultiNew
tenFiberMultiNew.restype = POINTER(tenFiberMulti)
tenFiberMultiNew.argtypes = []
tenFiberMultiNix = libteem.tenFiberMultiNix
tenFiberMultiNix.restype = POINTER(tenFiberMulti)
tenFiberMultiNix.argtypes = [POINTER(tenFiberMulti)]
tenFiberMultiTrace = libteem.tenFiberMultiTrace
tenFiberMultiTrace.restype = c_int
tenFiberMultiTrace.argtypes = [POINTER(tenFiberContext), POINTER(tenFiberMulti), POINTER(Nrrd)]
tenFiberMultiPolyData = libteem.tenFiberMultiPolyData
tenFiberMultiPolyData.restype = c_int
tenFiberMultiPolyData.argtypes = [POINTER(tenFiberContext), POINTER(limnPolyData), POINTER(tenFiberMulti)]
tenFiberMultiProbeVals = libteem.tenFiberMultiProbeVals
tenFiberMultiProbeVals.restype = c_int
tenFiberMultiProbeVals.argtypes = [POINTER(tenFiberContext), POINTER(Nrrd), POINTER(tenFiberMulti)]
tenEpiRegister3D = libteem.tenEpiRegister3D
tenEpiRegister3D.restype = c_int
tenEpiRegister3D.argtypes = [POINTER(POINTER(Nrrd)), POINTER(POINTER(Nrrd)), c_uint, POINTER(Nrrd), c_int, c_double, c_double, c_double, c_double, c_int, POINTER(NrrdKernel), POINTER(c_double), c_int, c_int]
tenEpiRegister4D = libteem.tenEpiRegister4D
tenEpiRegister4D.restype = c_int
tenEpiRegister4D.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(Nrrd), c_int, c_double, c_double, c_double, c_double, c_int, POINTER(NrrdKernel), POINTER(c_double), c_int, c_int]
tenExperSpecNew = libteem.tenExperSpecNew
tenExperSpecNew.restype = POINTER(tenExperSpec)
tenExperSpecNew.argtypes = []
tenExperSpecGradSingleBValSet = libteem.tenExperSpecGradSingleBValSet
tenExperSpecGradSingleBValSet.restype = c_int
tenExperSpecGradSingleBValSet.argtypes = [POINTER(tenExperSpec), c_int, c_double, POINTER(c_double), c_uint]
tenExperSpecGradBValSet = libteem.tenExperSpecGradBValSet
tenExperSpecGradBValSet.restype = c_int
tenExperSpecGradBValSet.argtypes = [POINTER(tenExperSpec), c_int, POINTER(c_double), POINTER(c_double), c_uint]
tenExperSpecFromKeyValueSet = libteem.tenExperSpecFromKeyValueSet
tenExperSpecFromKeyValueSet.restype = c_int
tenExperSpecFromKeyValueSet.argtypes = [POINTER(tenExperSpec), POINTER(Nrrd)]
tenExperSpecNix = libteem.tenExperSpecNix
tenExperSpecNix.restype = POINTER(tenExperSpec)
tenExperSpecNix.argtypes = [POINTER(tenExperSpec)]
tenExperSpecKnownB0Get = libteem.tenExperSpecKnownB0Get
tenExperSpecKnownB0Get.restype = c_double
tenExperSpecKnownB0Get.argtypes = [POINTER(tenExperSpec), POINTER(c_double)]
tenExperSpecMaxBGet = libteem.tenExperSpecMaxBGet
tenExperSpecMaxBGet.restype = c_double
tenExperSpecMaxBGet.argtypes = [POINTER(tenExperSpec)]
tenDWMRIKeyValueFromExperSpecSet = libteem.tenDWMRIKeyValueFromExperSpecSet
tenDWMRIKeyValueFromExperSpecSet.restype = c_int
tenDWMRIKeyValueFromExperSpecSet.argtypes = [POINTER(Nrrd), POINTER(tenExperSpec)]
tenModelPrefixStr = (STRING).in_dll(libteem, 'tenModelPrefixStr')
tenModelParse = libteem.tenModelParse
tenModelParse.restype = c_int
tenModelParse.argtypes = [POINTER(POINTER(tenModel)), POINTER(c_int), c_int, STRING]
tenModelFromAxisLearnPossible = libteem.tenModelFromAxisLearnPossible
tenModelFromAxisLearnPossible.restype = c_int
tenModelFromAxisLearnPossible.argtypes = [POINTER(NrrdAxisInfo)]
tenModelFromAxisLearn = libteem.tenModelFromAxisLearn
tenModelFromAxisLearn.restype = c_int
tenModelFromAxisLearn.argtypes = [POINTER(POINTER(tenModel)), POINTER(c_int), POINTER(NrrdAxisInfo)]
tenModelSimulate = libteem.tenModelSimulate
tenModelSimulate.restype = c_int
tenModelSimulate.argtypes = [POINTER(Nrrd), c_int, POINTER(tenExperSpec), POINTER(tenModel), POINTER(Nrrd), POINTER(Nrrd), c_int]
tenModelSqeFit = libteem.tenModelSqeFit
tenModelSqeFit.restype = c_int
tenModelSqeFit.argtypes = [POINTER(Nrrd), POINTER(POINTER(Nrrd)), POINTER(POINTER(Nrrd)), POINTER(POINTER(Nrrd)), POINTER(tenModel), POINTER(tenExperSpec), POINTER(Nrrd), c_int, c_int, c_int, c_uint, c_uint, c_uint, c_double, POINTER(airRandMTState), c_int]
tenModelNllFit = libteem.tenModelNllFit
tenModelNllFit.restype = c_int
tenModelNllFit.argtypes = [POINTER(Nrrd), POINTER(POINTER(Nrrd)), POINTER(tenModel), POINTER(tenExperSpec), POINTER(Nrrd), c_int, c_double, c_int]
tenModelConvert = libteem.tenModelConvert
tenModelConvert.restype = c_int
tenModelConvert.argtypes = [POINTER(Nrrd), POINTER(c_int), POINTER(tenModel), POINTER(Nrrd), POINTER(tenModel)]
tenModelZero = (POINTER(tenModel)).in_dll(libteem, 'tenModelZero')
tenModelB0 = (POINTER(tenModel)).in_dll(libteem, 'tenModelB0')
tenModelBall = (POINTER(tenModel)).in_dll(libteem, 'tenModelBall')
tenModel1Vector2D = (POINTER(tenModel)).in_dll(libteem, 'tenModel1Vector2D')
tenModel1Unit2D = (POINTER(tenModel)).in_dll(libteem, 'tenModel1Unit2D')
tenModel2Unit2D = (POINTER(tenModel)).in_dll(libteem, 'tenModel2Unit2D')
tenModel1Stick = (POINTER(tenModel)).in_dll(libteem, 'tenModel1Stick')
tenModelBall1StickEMD = (POINTER(tenModel)).in_dll(libteem, 'tenModelBall1StickEMD')
tenModelBall1Stick = (POINTER(tenModel)).in_dll(libteem, 'tenModelBall1Stick')
tenModelBall1Cylinder = (POINTER(tenModel)).in_dll(libteem, 'tenModelBall1Cylinder')
tenModel1Cylinder = (POINTER(tenModel)).in_dll(libteem, 'tenModel1Cylinder')
tenModel1Tensor2 = (POINTER(tenModel)).in_dll(libteem, 'tenModel1Tensor2')
tenSizeNormalize = libteem.tenSizeNormalize
tenSizeNormalize.restype = c_int
tenSizeNormalize.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(c_double), c_double, c_double]
tenSizeScale = libteem.tenSizeScale
tenSizeScale.restype = c_int
tenSizeScale.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_double]
tenAnisoScale = libteem.tenAnisoScale
tenAnisoScale.restype = c_int
tenAnisoScale.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_double, c_int, c_int]
tenEigenvaluePower = libteem.tenEigenvaluePower
tenEigenvaluePower.restype = c_int
tenEigenvaluePower.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_double]
tenEigenvalueClamp = libteem.tenEigenvalueClamp
tenEigenvalueClamp.restype = c_int
tenEigenvalueClamp.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_double, c_double]
tenEigenvalueAdd = libteem.tenEigenvalueAdd
tenEigenvalueAdd.restype = c_int
tenEigenvalueAdd.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_double]
tenEigenvalueMultiply = libteem.tenEigenvalueMultiply
tenEigenvalueMultiply.restype = c_int
tenEigenvalueMultiply.argtypes = [POINTER(Nrrd), POINTER(Nrrd), c_double]
tenLog = libteem.tenLog
tenLog.restype = c_int
tenLog.argtypes = [POINTER(Nrrd), POINTER(Nrrd)]
tenExp = libteem.tenExp
tenExp.restype = c_int
tenExp.argtypes = [POINTER(Nrrd), POINTER(Nrrd)]
tenBVecNonLinearFit = libteem.tenBVecNonLinearFit
tenBVecNonLinearFit.restype = c_int
tenBVecNonLinearFit.argtypes = [POINTER(Nrrd), POINTER(Nrrd), POINTER(c_double), POINTER(c_double), c_int, c_double]
tenGageKind = (POINTER(gageKind)).in_dll(libteem, 'tenGageKind')
tenDwiGage = (POINTER(airEnum)).in_dll(libteem, 'tenDwiGage')
tenDwiGageKindNew = libteem.tenDwiGageKindNew
tenDwiGageKindNew.restype = POINTER(gageKind)
tenDwiGageKindNew.argtypes = []
tenDwiGageKindNix = libteem.tenDwiGageKindNix
tenDwiGageKindNix.restype = POINTER(gageKind)
tenDwiGageKindNix.argtypes = [POINTER(gageKind)]
tenDwiGageKindSet = libteem.tenDwiGageKindSet
tenDwiGageKindSet.restype = c_int
tenDwiGageKindSet.argtypes = [POINTER(gageKind), c_double, c_double, c_double, c_double, POINTER(Nrrd), POINTER(Nrrd), c_int, c_int, c_uint]
tenDwiGageKindCheck = libteem.tenDwiGageKindCheck
tenDwiGageKindCheck.restype = c_int
tenDwiGageKindCheck.argtypes = [POINTER(gageKind)]
tenEMBimodalParmNew = libteem.tenEMBimodalParmNew
tenEMBimodalParmNew.restype = POINTER(tenEMBimodalParm)
tenEMBimodalParmNew.argtypes = []
tenEMBimodalParmNix = libteem.tenEMBimodalParmNix
tenEMBimodalParmNix.restype = POINTER(tenEMBimodalParm)
tenEMBimodalParmNix.argtypes = [POINTER(tenEMBimodalParm)]
tenEMBimodal = libteem.tenEMBimodal
tenEMBimodal.restype = c_int
tenEMBimodal.argtypes = [POINTER(tenEMBimodalParm), POINTER(Nrrd)]
tend_simCmd = (unrrduCmd).in_dll(libteem, 'tend_simCmd')
tend_evqCmd = (unrrduCmd).in_dll(libteem, 'tend_evqCmd')
tend_bmatCmd = (unrrduCmd).in_dll(libteem, 'tend_bmatCmd')
tend_tconvCmd = (unrrduCmd).in_dll(libteem, 'tend_tconvCmd')
tend_evecrgbCmd = (unrrduCmd).in_dll(libteem, 'tend_evecrgbCmd')
tend_pointCmd = (unrrduCmd).in_dll(libteem, 'tend_pointCmd')
tend_expandCmd = (unrrduCmd).in_dll(libteem, 'tend_expandCmd')
tend_ellipseCmd = (unrrduCmd).in_dll(libteem, 'tend_ellipseCmd')
tend_anplotCmd = (unrrduCmd).in_dll(libteem, 'tend_anplotCmd')
tend_evalclampCmd = (unrrduCmd).in_dll(libteem, 'tend_evalclampCmd')
tend_unmfCmd = (unrrduCmd).in_dll(libteem, 'tend_unmfCmd')
tend_msimCmd = (unrrduCmd).in_dll(libteem, 'tend_msimCmd')
tend_evalCmd = (unrrduCmd).in_dll(libteem, 'tend_evalCmd')
tend_evalmultCmd = (unrrduCmd).in_dll(libteem, 'tend_evalmultCmd')
tend_estimCmd = (unrrduCmd).in_dll(libteem, 'tend_estimCmd')
tend_gradsCmd = (unrrduCmd).in_dll(libteem, 'tend_gradsCmd')
tend_mconvCmd = (unrrduCmd).in_dll(libteem, 'tend_mconvCmd')
tend_avgCmd = (unrrduCmd).in_dll(libteem, 'tend_avgCmd')
tend_fiberCmd = (unrrduCmd).in_dll(libteem, 'tend_fiberCmd')
tend_shrinkCmd = (unrrduCmd).in_dll(libteem, 'tend_shrinkCmd')
tend_mfitCmd = (unrrduCmd).in_dll(libteem, 'tend_mfitCmd')
tend_bfitCmd = (unrrduCmd).in_dll(libteem, 'tend_bfitCmd')
tend_helixCmd = (unrrduCmd).in_dll(libteem, 'tend_helixCmd')
tend_anhistCmd = (unrrduCmd).in_dll(libteem, 'tend_anhistCmd')
tend_normCmd = (unrrduCmd).in_dll(libteem, 'tend_normCmd')
tend_anscaleCmd = (unrrduCmd).in_dll(libteem, 'tend_anscaleCmd')
tend_epiregCmd = (unrrduCmd).in_dll(libteem, 'tend_epiregCmd')
tend_anvolCmd = (unrrduCmd).in_dll(libteem, 'tend_anvolCmd')
tend_tripleCmd = (unrrduCmd).in_dll(libteem, 'tend_tripleCmd')
tend_sliceCmd = (unrrduCmd).in_dll(libteem, 'tend_sliceCmd')
tend_evaladdCmd = (unrrduCmd).in_dll(libteem, 'tend_evaladdCmd')
tend_stenCmd = (unrrduCmd).in_dll(libteem, 'tend_stenCmd')
tend_glyphCmd = (unrrduCmd).in_dll(libteem, 'tend_glyphCmd')
tend_aboutCmd = (unrrduCmd).in_dll(libteem, 'tend_aboutCmd')
tend_makeCmd = (unrrduCmd).in_dll(libteem, 'tend_makeCmd')
tend_satinCmd = (unrrduCmd).in_dll(libteem, 'tend_satinCmd')
tend_expCmd = (unrrduCmd).in_dll(libteem, 'tend_expCmd')
tend_evecCmd = (unrrduCmd).in_dll(libteem, 'tend_evecCmd')
tend_logCmd = (unrrduCmd).in_dll(libteem, 'tend_logCmd')
tend_evalpowCmd = (unrrduCmd).in_dll(libteem, 'tend_evalpowCmd')
tendCmdList = (POINTER(unrrduCmd) * 0).in_dll(libteem, 'tendCmdList')
tendFiberStopCB = (POINTER(hestCB)).in_dll(libteem, 'tendFiberStopCB')
tendTitle = (STRING).in_dll(libteem, 'tendTitle')
class tijk_sym_fun_t(Structure):
pass
tijk_sym_fun_t._fields_ = [
('s_form_d', CFUNCTYPE(c_double, POINTER(c_double), POINTER(c_double))),
('s_form_f', CFUNCTYPE(c_float, POINTER(c_float), POINTER(c_float))),
('mean_d', CFUNCTYPE(c_double, POINTER(c_double))),
('mean_f', CFUNCTYPE(c_float, POINTER(c_float))),
('var_d', CFUNCTYPE(c_double, POINTER(c_double))),
('var_f', CFUNCTYPE(c_float, POINTER(c_float))),
('v_form_d', CFUNCTYPE(None, POINTER(c_double), POINTER(c_double), POINTER(c_double))),
('v_form_f', CFUNCTYPE(None, POINTER(c_float), POINTER(c_float), POINTER(c_float))),
('m_form_d', CFUNCTYPE(None, POINTER(c_double), POINTER(c_double), POINTER(c_double))),
('m_form_f', CFUNCTYPE(None, POINTER(c_float), POINTER(c_float), POINTER(c_float))),
('grad_d', CFUNCTYPE(None, POINTER(c_double), POINTER(c_double), POINTER(c_double))),
('grad_f', CFUNCTYPE(None, POINTER(c_float), POINTER(c_float), POINTER(c_float))),
('hess_d', CFUNCTYPE(None, POINTER(c_double), POINTER(c_double), POINTER(c_double))),
('hess_f', CFUNCTYPE(None, POINTER(c_float), POINTER(c_float), POINTER(c_float))),
('make_rank1_d', CFUNCTYPE(None, POINTER(c_double), c_double, POINTER(c_double))),
('make_rank1_f', CFUNCTYPE(None, POINTER(c_float), c_float, POINTER(c_float))),
('make_iso_d', CFUNCTYPE(None, POINTER(c_double), c_double)),
('make_iso_f', CFUNCTYPE(None, POINTER(c_float), c_float)),
]
tijk_sym_fun = tijk_sym_fun_t
tijk_type_t._fields_ = [
('name', STRING),
('order', c_uint),
('dim', c_uint),
('num', c_uint),
('mult', POINTER(c_uint)),
('unsym2uniq', POINTER(c_int)),
('uniq2unsym', POINTER(c_int)),
('uniq_idx', POINTER(c_uint)),
('tsp_d', CFUNCTYPE(c_double, POINTER(c_double), POINTER(c_double))),
('tsp_f', CFUNCTYPE(c_float, POINTER(c_float), POINTER(c_float))),
('norm_d', CFUNCTYPE(c_double, POINTER(c_double))),
('norm_f', CFUNCTYPE(c_float, POINTER(c_float))),
('trans_d', CFUNCTYPE(None, POINTER(c_double), POINTER(c_double), POINTER(c_double))),
('trans_f', CFUNCTYPE(None, POINTER(c_float), POINTER(c_float), POINTER(c_float))),
('convert_d', CFUNCTYPE(c_int, POINTER(c_double), POINTER(tijk_type_t), POINTER(c_double))),
('convert_f', CFUNCTYPE(c_int, POINTER(c_float), POINTER(tijk_type_t), POINTER(c_float))),
('approx_d', CFUNCTYPE(c_int, POINTER(c_double), POINTER(tijk_type_t), POINTER(c_double))),
('approx_f', CFUNCTYPE(c_int, POINTER(c_float), POINTER(tijk_type_t), POINTER(c_float))),
('_convert_from_d', CFUNCTYPE(c_int, POINTER(c_double), POINTER(c_double), POINTER(tijk_type_t))),
('_convert_from_f', CFUNCTYPE(c_int, POINTER(c_float), POINTER(c_float), POINTER(tijk_type_t))),
('_approx_from_d', CFUNCTYPE(c_int, POINTER(c_double), POINTER(c_double), POINTER(tijk_type_t))),
('_approx_from_f', CFUNCTYPE(c_int, POINTER(c_float), POINTER(c_float), POINTER(tijk_type_t))),
('sym', POINTER(tijk_sym_fun)),
]
tijk_2o2d_unsym = (POINTER(tijk_type)).in_dll(libteem, 'tijk_2o2d_unsym')
tijk_2o2d_sym = (POINTER(tijk_type)).in_dll(libteem, 'tijk_2o2d_sym')
tijk_2o2d_asym = (POINTER(tijk_type)).in_dll(libteem, 'tijk_2o2d_asym')
tijk_3o2d_sym = (POINTER(tijk_type)).in_dll(libteem, 'tijk_3o2d_sym')
tijk_4o2d_unsym = (POINTER(tijk_type)).in_dll(libteem, 'tijk_4o2d_unsym')
tijk_4o2d_sym = (POINTER(tijk_type)).in_dll(libteem, 'tijk_4o2d_sym')
tijk_1o3d = (POINTER(tijk_type)).in_dll(libteem, 'tijk_1o3d')
tijk_2o3d_unsym = (POINTER(tijk_type)).in_dll(libteem, 'tijk_2o3d_unsym')
tijk_2o3d_sym = (POINTER(tijk_type)).in_dll(libteem, 'tijk_2o3d_sym')
tijk_2o3d_asym = (POINTER(tijk_type)).in_dll(libteem, 'tijk_2o3d_asym')
tijk_3o3d_unsym = (POINTER(tijk_type)).in_dll(libteem, 'tijk_3o3d_unsym')
tijk_3o3d_sym = (POINTER(tijk_type)).in_dll(libteem, 'tijk_3o3d_sym')
tijk_4o3d_sym = (POINTER(tijk_type)).in_dll(libteem, 'tijk_4o3d_sym')
tijk_6o3d_sym = (POINTER(tijk_type)).in_dll(libteem, 'tijk_6o3d_sym')
tijk_8o3d_sym = (POINTER(tijk_type)).in_dll(libteem, 'tijk_8o3d_sym')
tijkPresent = (c_int).in_dll(libteem, 'tijkPresent')
tijk_add_d = libteem.tijk_add_d
tijk_add_d.restype = None
tijk_add_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(tijk_type)]
tijk_add_f = libteem.tijk_add_f
tijk_add_f.restype = None
tijk_add_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(tijk_type)]
tijk_sub_d = libteem.tijk_sub_d
tijk_sub_d.restype = None
tijk_sub_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(tijk_type)]
tijk_sub_f = libteem.tijk_sub_f
tijk_sub_f.restype = None
tijk_sub_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(tijk_type)]
tijk_incr_d = libteem.tijk_incr_d
tijk_incr_d.restype = None
tijk_incr_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(tijk_type)]
tijk_incr_f = libteem.tijk_incr_f
tijk_incr_f.restype = None
tijk_incr_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(tijk_type)]
tijk_negate_d = libteem.tijk_negate_d
tijk_negate_d.restype = None
tijk_negate_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(tijk_type)]
tijk_negate_f = libteem.tijk_negate_f
tijk_negate_f.restype = None
tijk_negate_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(tijk_type)]
tijk_scale_d = libteem.tijk_scale_d
tijk_scale_d.restype = None
tijk_scale_d.argtypes = [POINTER(c_double), c_double, POINTER(c_double), POINTER(tijk_type)]
tijk_scale_f = libteem.tijk_scale_f
tijk_scale_f.restype = None
tijk_scale_f.argtypes = [POINTER(c_float), c_float, POINTER(c_float), POINTER(tijk_type)]
tijk_zero_d = libteem.tijk_zero_d
tijk_zero_d.restype = None
tijk_zero_d.argtypes = [POINTER(c_double), POINTER(tijk_type)]
tijk_zero_f = libteem.tijk_zero_f
tijk_zero_f.restype = None
tijk_zero_f.argtypes = [POINTER(c_float), POINTER(tijk_type)]
tijk_copy_d = libteem.tijk_copy_d
tijk_copy_d.restype = None
tijk_copy_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(tijk_type)]
tijk_copy_f = libteem.tijk_copy_f
tijk_copy_f.restype = None
tijk_copy_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(tijk_type)]
tijk_refine_rank1_parm_t._pack_ = 4
tijk_refine_rank1_parm_t._fields_ = [
('eps_start', c_double),
('eps_impr', c_double),
('beta', c_double),
('gamma', c_double),
('sigma', c_double),
('maxtry', c_uint),
]
tijk_refine_rank1_parm_new = libteem.tijk_refine_rank1_parm_new
tijk_refine_rank1_parm_new.restype = POINTER(tijk_refine_rank1_parm)
tijk_refine_rank1_parm_new.argtypes = []
tijk_refine_rank1_parm_nix = libteem.tijk_refine_rank1_parm_nix
tijk_refine_rank1_parm_nix.restype = POINTER(tijk_refine_rank1_parm)
tijk_refine_rank1_parm_nix.argtypes = [POINTER(tijk_refine_rank1_parm)]
class tijk_refine_rankk_parm_t(Structure):
pass
tijk_refine_rankk_parm_t._pack_ = 4
tijk_refine_rankk_parm_t._fields_ = [
('eps_res', c_double),
('eps_impr', c_double),
('pos', c_char),
('rank1_parm', POINTER(tijk_refine_rank1_parm)),
]
tijk_refine_rankk_parm = tijk_refine_rankk_parm_t
tijk_refine_rankk_parm_new = libteem.tijk_refine_rankk_parm_new
tijk_refine_rankk_parm_new.restype = POINTER(tijk_refine_rankk_parm)
tijk_refine_rankk_parm_new.argtypes = []
tijk_refine_rankk_parm_nix = libteem.tijk_refine_rankk_parm_nix
tijk_refine_rankk_parm_nix.restype = POINTER(tijk_refine_rankk_parm)
tijk_refine_rankk_parm_nix.argtypes = [POINTER(tijk_refine_rankk_parm)]
class tijk_approx_heur_parm_t(Structure):
pass
tijk_approx_heur_parm_t._pack_ = 4
tijk_approx_heur_parm_t._fields_ = [
('eps_res', c_double),
('eps_impr', c_double),
('ratios', POINTER(c_double)),
('refine_parm', POINTER(tijk_refine_rankk_parm)),
]
tijk_approx_heur_parm = tijk_approx_heur_parm_t
tijk_approx_heur_parm_new = libteem.tijk_approx_heur_parm_new
tijk_approx_heur_parm_new.restype = POINTER(tijk_approx_heur_parm)
tijk_approx_heur_parm_new.argtypes = []
tijk_approx_heur_parm_nix = libteem.tijk_approx_heur_parm_nix
tijk_approx_heur_parm_nix.restype = POINTER(tijk_approx_heur_parm)
tijk_approx_heur_parm_nix.argtypes = [POINTER(tijk_approx_heur_parm)]
tijk_init_rank1_2d_d = libteem.tijk_init_rank1_2d_d
tijk_init_rank1_2d_d.restype = c_int
tijk_init_rank1_2d_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(tijk_type)]
tijk_init_rank1_2d_f = libteem.tijk_init_rank1_2d_f
tijk_init_rank1_2d_f.restype = c_int
tijk_init_rank1_2d_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(tijk_type)]
tijk_init_rank1_3d_d = libteem.tijk_init_rank1_3d_d
tijk_init_rank1_3d_d.restype = c_int
tijk_init_rank1_3d_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(tijk_type)]
tijk_init_rank1_3d_f = libteem.tijk_init_rank1_3d_f
tijk_init_rank1_3d_f.restype = c_int
tijk_init_rank1_3d_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(tijk_type)]
tijk_init_max_2d_d = libteem.tijk_init_max_2d_d
tijk_init_max_2d_d.restype = c_int
tijk_init_max_2d_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(tijk_type)]
tijk_init_max_2d_f = libteem.tijk_init_max_2d_f
tijk_init_max_2d_f.restype = c_int
tijk_init_max_2d_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(tijk_type)]
tijk_init_max_3d_d = libteem.tijk_init_max_3d_d
tijk_init_max_3d_d.restype = c_int
tijk_init_max_3d_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(tijk_type)]
tijk_init_max_3d_f = libteem.tijk_init_max_3d_f
tijk_init_max_3d_f.restype = c_int
tijk_init_max_3d_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(tijk_type)]
tijk_refine_rank1_2d_d = libteem.tijk_refine_rank1_2d_d
tijk_refine_rank1_2d_d.restype = c_int
tijk_refine_rank1_2d_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(tijk_type), POINTER(tijk_refine_rank1_parm)]
tijk_refine_rank1_2d_f = libteem.tijk_refine_rank1_2d_f
tijk_refine_rank1_2d_f.restype = c_int
tijk_refine_rank1_2d_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(tijk_type), POINTER(tijk_refine_rank1_parm)]
tijk_refine_rank1_3d_d = libteem.tijk_refine_rank1_3d_d
tijk_refine_rank1_3d_d.restype = c_int
tijk_refine_rank1_3d_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(tijk_type), POINTER(tijk_refine_rank1_parm)]
tijk_refine_rank1_3d_f = libteem.tijk_refine_rank1_3d_f
tijk_refine_rank1_3d_f.restype = c_int
tijk_refine_rank1_3d_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(tijk_type), POINTER(tijk_refine_rank1_parm)]
tijk_refine_max_2d_d = libteem.tijk_refine_max_2d_d
tijk_refine_max_2d_d.restype = c_int
tijk_refine_max_2d_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(tijk_type), POINTER(tijk_refine_rank1_parm)]
tijk_refine_max_2d_f = libteem.tijk_refine_max_2d_f
tijk_refine_max_2d_f.restype = c_int
tijk_refine_max_2d_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(tijk_type), POINTER(tijk_refine_rank1_parm)]
tijk_refine_max_3d_d = libteem.tijk_refine_max_3d_d
tijk_refine_max_3d_d.restype = c_int
tijk_refine_max_3d_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(tijk_type), POINTER(tijk_refine_rank1_parm)]
tijk_refine_max_3d_f = libteem.tijk_refine_max_3d_f
tijk_refine_max_3d_f.restype = c_int
tijk_refine_max_3d_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(tijk_type), POINTER(tijk_refine_rank1_parm)]
tijk_refine_rankk_2d_d = libteem.tijk_refine_rankk_2d_d
tijk_refine_rankk_2d_d.restype = c_int
tijk_refine_rankk_2d_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_double, POINTER(tijk_type), c_uint, POINTER(tijk_refine_rankk_parm)]
tijk_refine_rankk_2d_f = libteem.tijk_refine_rankk_2d_f
tijk_refine_rankk_2d_f.restype = c_int
tijk_refine_rankk_2d_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(c_float), c_float, POINTER(tijk_type), c_uint, POINTER(tijk_refine_rankk_parm)]
tijk_refine_rankk_3d_d = libteem.tijk_refine_rankk_3d_d
tijk_refine_rankk_3d_d.restype = c_int
tijk_refine_rankk_3d_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_double, POINTER(tijk_type), c_uint, POINTER(tijk_refine_rankk_parm)]
tijk_refine_rankk_3d_f = libteem.tijk_refine_rankk_3d_f
tijk_refine_rankk_3d_f.restype = c_int
tijk_refine_rankk_3d_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(c_float), c_float, POINTER(tijk_type), c_uint, POINTER(tijk_refine_rankk_parm)]
tijk_approx_rankk_2d_d = libteem.tijk_approx_rankk_2d_d
tijk_approx_rankk_2d_d.restype = c_int
tijk_approx_rankk_2d_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(tijk_type), c_uint, POINTER(tijk_refine_rankk_parm)]
tijk_approx_rankk_2d_f = libteem.tijk_approx_rankk_2d_f
tijk_approx_rankk_2d_f.restype = c_int
tijk_approx_rankk_2d_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(tijk_type), c_uint, POINTER(tijk_refine_rankk_parm)]
tijk_approx_rankk_3d_d = libteem.tijk_approx_rankk_3d_d
tijk_approx_rankk_3d_d.restype = c_int
tijk_approx_rankk_3d_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(tijk_type), c_uint, POINTER(tijk_refine_rankk_parm)]
tijk_approx_rankk_3d_f = libteem.tijk_approx_rankk_3d_f
tijk_approx_rankk_3d_f.restype = c_int
tijk_approx_rankk_3d_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(tijk_type), c_uint, POINTER(tijk_refine_rankk_parm)]
tijk_approx_heur_2d_d = libteem.tijk_approx_heur_2d_d
tijk_approx_heur_2d_d.restype = c_int
tijk_approx_heur_2d_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(tijk_type), c_uint, POINTER(tijk_approx_heur_parm)]
tijk_approx_heur_2d_f = libteem.tijk_approx_heur_2d_f
tijk_approx_heur_2d_f.restype = c_int
tijk_approx_heur_2d_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(tijk_type), c_uint, POINTER(tijk_approx_heur_parm)]
tijk_approx_heur_3d_d = libteem.tijk_approx_heur_3d_d
tijk_approx_heur_3d_d.restype = c_int
tijk_approx_heur_3d_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(tijk_type), c_uint, POINTER(tijk_approx_heur_parm)]
tijk_approx_heur_3d_f = libteem.tijk_approx_heur_3d_f
tijk_approx_heur_3d_f.restype = c_int
tijk_approx_heur_3d_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(c_float), POINTER(tijk_type), c_uint, POINTER(tijk_approx_heur_parm)]
tijk_esh_len = (c_uint * 0).in_dll(libteem, 'tijk_esh_len')
tijk_max_esh_order = (c_uint).in_dll(libteem, 'tijk_max_esh_order')
tijk_eval_esh_basis_d = libteem.tijk_eval_esh_basis_d
tijk_eval_esh_basis_d.restype = c_uint
tijk_eval_esh_basis_d.argtypes = [POINTER(c_double), c_uint, c_double, c_double]
tijk_eval_esh_basis_f = libteem.tijk_eval_esh_basis_f
tijk_eval_esh_basis_f.restype = c_uint
tijk_eval_esh_basis_f.argtypes = [POINTER(c_float), c_uint, c_float, c_float]
tijk_eval_esh_d = libteem.tijk_eval_esh_d
tijk_eval_esh_d.restype = c_double
tijk_eval_esh_d.argtypes = [POINTER(c_double), c_uint, c_double, c_double]
tijk_eval_esh_f = libteem.tijk_eval_esh_f
tijk_eval_esh_f.restype = c_float
tijk_eval_esh_f.argtypes = [POINTER(c_float), c_uint, c_float, c_float]
tijk_esh_sp_d = libteem.tijk_esh_sp_d
tijk_esh_sp_d.restype = c_double
tijk_esh_sp_d.argtypes = [POINTER(c_double), POINTER(c_double), c_uint]
tijk_esh_sp_f = libteem.tijk_esh_sp_f
tijk_esh_sp_f.restype = c_float
tijk_esh_sp_f.argtypes = [POINTER(c_float), POINTER(c_float), c_uint]
tijk_3d_sym_to_esh_d = libteem.tijk_3d_sym_to_esh_d
tijk_3d_sym_to_esh_d.restype = c_int
tijk_3d_sym_to_esh_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(tijk_type)]
tijk_3d_sym_to_esh_f = libteem.tijk_3d_sym_to_esh_f
tijk_3d_sym_to_esh_f.restype = c_int
tijk_3d_sym_to_esh_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(tijk_type)]
tijk_esh_to_3d_sym_d = libteem.tijk_esh_to_3d_sym_d
tijk_esh_to_3d_sym_d.restype = POINTER(tijk_type)
tijk_esh_to_3d_sym_d.argtypes = [POINTER(c_double), POINTER(c_double), c_uint]
tijk_esh_to_3d_sym_f = libteem.tijk_esh_to_3d_sym_f
tijk_esh_to_3d_sym_f.restype = POINTER(tijk_type)
tijk_esh_to_3d_sym_f.argtypes = [POINTER(c_float), POINTER(c_float), c_uint]
tijk_3d_sym_to_esh_matrix_d = libteem.tijk_3d_sym_to_esh_matrix_d
tijk_3d_sym_to_esh_matrix_d.restype = POINTER(c_double)
tijk_3d_sym_to_esh_matrix_d.argtypes = [POINTER(tijk_type)]
tijk_3d_sym_to_esh_matrix_f = libteem.tijk_3d_sym_to_esh_matrix_f
tijk_3d_sym_to_esh_matrix_f.restype = POINTER(c_float)
tijk_3d_sym_to_esh_matrix_f.argtypes = [POINTER(tijk_type)]
tijk_esh_to_3d_sym_matrix_d = libteem.tijk_esh_to_3d_sym_matrix_d
tijk_esh_to_3d_sym_matrix_d.restype = POINTER(c_double)
tijk_esh_to_3d_sym_matrix_d.argtypes = [c_uint]
tijk_esh_to_3d_sym_matrix_f = libteem.tijk_esh_to_3d_sym_matrix_f
tijk_esh_to_3d_sym_matrix_f.restype = POINTER(c_float)
tijk_esh_to_3d_sym_matrix_f.argtypes = [c_uint]
tijk_esh_convolve_d = libteem.tijk_esh_convolve_d
tijk_esh_convolve_d.restype = None
tijk_esh_convolve_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_uint]
tijk_esh_convolve_f = libteem.tijk_esh_convolve_f
tijk_esh_convolve_f.restype = None
tijk_esh_convolve_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), c_uint]
tijk_esh_deconvolve_d = libteem.tijk_esh_deconvolve_d
tijk_esh_deconvolve_d.restype = None
tijk_esh_deconvolve_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_uint]
tijk_esh_deconvolve_f = libteem.tijk_esh_deconvolve_f
tijk_esh_deconvolve_f.restype = None
tijk_esh_deconvolve_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float), c_uint]
tijk_esh_make_kernel_rank1_f = libteem.tijk_esh_make_kernel_rank1_f
tijk_esh_make_kernel_rank1_f.restype = c_int
tijk_esh_make_kernel_rank1_f.argtypes = [POINTER(c_float), POINTER(c_float), c_uint]
tijk_esh_make_kernel_rank1_d = libteem.tijk_esh_make_kernel_rank1_d
tijk_esh_make_kernel_rank1_d.restype = c_int
tijk_esh_make_kernel_rank1_d.argtypes = [POINTER(c_double), POINTER(c_double), c_uint]
tijk_esh_make_kernel_delta_f = libteem.tijk_esh_make_kernel_delta_f
tijk_esh_make_kernel_delta_f.restype = c_int
tijk_esh_make_kernel_delta_f.argtypes = [POINTER(c_float), POINTER(c_float), c_uint]
tijk_esh_make_kernel_delta_d = libteem.tijk_esh_make_kernel_delta_d
tijk_esh_make_kernel_delta_d.restype = c_int
tijk_esh_make_kernel_delta_d.argtypes = [POINTER(c_double), POINTER(c_double), c_uint]
tijk_max_efs_order = (c_uint).in_dll(libteem, 'tijk_max_efs_order')
tijk_eval_efs_basis_d = libteem.tijk_eval_efs_basis_d
tijk_eval_efs_basis_d.restype = c_uint
tijk_eval_efs_basis_d.argtypes = [POINTER(c_double), c_uint, c_double]
tijk_eval_efs_basis_f = libteem.tijk_eval_efs_basis_f
tijk_eval_efs_basis_f.restype = c_uint
tijk_eval_efs_basis_f.argtypes = [POINTER(c_float), c_uint, c_float]
tijk_eval_efs_d = libteem.tijk_eval_efs_d
tijk_eval_efs_d.restype = c_double
tijk_eval_efs_d.argtypes = [POINTER(c_double), c_uint, c_double]
tijk_eval_efs_f = libteem.tijk_eval_efs_f
tijk_eval_efs_f.restype = c_float
tijk_eval_efs_f.argtypes = [POINTER(c_float), c_uint, c_float]
tijk_2d_sym_to_efs_d = libteem.tijk_2d_sym_to_efs_d
tijk_2d_sym_to_efs_d.restype = c_int
tijk_2d_sym_to_efs_d.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(tijk_type)]
tijk_2d_sym_to_efs_f = libteem.tijk_2d_sym_to_efs_f
tijk_2d_sym_to_efs_f.restype = c_int
tijk_2d_sym_to_efs_f.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(tijk_type)]
tijk_efs_to_2d_sym_d = libteem.tijk_efs_to_2d_sym_d
tijk_efs_to_2d_sym_d.restype = POINTER(tijk_type)
tijk_efs_to_2d_sym_d.argtypes = [POINTER(c_double), POINTER(c_double), c_uint]
tijk_efs_to_2d_sym_f = libteem.tijk_efs_to_2d_sym_f
tijk_efs_to_2d_sym_f.restype = POINTER(tijk_type)
tijk_efs_to_2d_sym_f.argtypes = [POINTER(c_float), POINTER(c_float), c_uint]
tijk_class = (POINTER(airEnum)).in_dll(libteem, 'tijk_class')
tijk_set_axis_tensor = libteem.tijk_set_axis_tensor
tijk_set_axis_tensor.restype = c_int
tijk_set_axis_tensor.argtypes = [POINTER(Nrrd), c_uint, POINTER(tijk_type)]
tijk_set_axis_esh = libteem.tijk_set_axis_esh
tijk_set_axis_esh.restype = c_int
tijk_set_axis_esh.argtypes = [POINTER(Nrrd), c_uint, c_uint]
tijk_set_axis_efs = libteem.tijk_set_axis_efs
tijk_set_axis_efs.restype = c_int
tijk_set_axis_efs.argtypes = [POINTER(Nrrd), c_uint, c_uint]
class tijk_axis_info_t(Structure):
pass
tijk_axis_info_t._fields_ = [
('tclass', c_int),
('masked', c_uint),
('type', POINTER(tijk_type)),
('order', c_uint),
]
tijk_axis_info = tijk_axis_info_t
tijk_get_axis_type = libteem.tijk_get_axis_type
tijk_get_axis_type.restype = c_int
tijk_get_axis_type.argtypes = [POINTER(tijk_axis_info), POINTER(Nrrd), c_uint]
unrrdu_axinsertCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_axinsertCmd')
unrrdu_2opCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_2opCmd')
unrrdu_axmergeCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_axmergeCmd')
unrrdu_projectCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_projectCmd')
unrrdu_padCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_padCmd')
unrrdu_reshapeCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_reshapeCmd')
unrrdu_ccfindCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_ccfindCmd')
unrrdu_undosCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_undosCmd')
unrrdu_permuteCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_permuteCmd')
unrrdu_cksumCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_cksumCmd')
unrrdu_sliceCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_sliceCmd')
unrrdu_i2wCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_i2wCmd')
unrrdu_envCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_envCmd')
unrrdu_jhistoCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_jhistoCmd')
unrrdu_spliceCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_spliceCmd')
unrrdu_swapCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_swapCmd')
unrrdu_rmapCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_rmapCmd')
unrrdu_insetCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_insetCmd')
unrrdu_shuffleCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_shuffleCmd')
unrrdu_substCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_substCmd')
unrrdu_axdeleteCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_axdeleteCmd')
unrrdu_w2iCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_w2iCmd')
unrrdu_gammaCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_gammaCmd')
unrrdu_ccadjCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_ccadjCmd')
unrrdu_1opCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_1opCmd')
unrrdu_histoCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_histoCmd')
unrrdu_joinCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_joinCmd')
unrrdu_histaxCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_histaxCmd')
unrrdu_lut2Cmd = (unrrduCmd).in_dll(libteem, 'unrrdu_lut2Cmd')
unrrdu_cropCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_cropCmd')
unrrdu_dhistoCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_dhistoCmd')
unrrdu_headCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_headCmd')
unrrdu_axinfoCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_axinfoCmd')
unrrdu_resampleCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_resampleCmd')
unrrdu_imapCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_imapCmd')
unrrdu_ccmergeCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_ccmergeCmd')
unrrdu_lutCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_lutCmd')
unrrdu_aboutCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_aboutCmd')
unrrdu_vidiconCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_vidiconCmd')
unrrdu_cmedianCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_cmedianCmd')
unrrdu_mlutCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_mlutCmd')
unrrdu_quantizeCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_quantizeCmd')
unrrdu_ccsettleCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_ccsettleCmd')
unrrdu_deringCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_deringCmd')
unrrdu_diffCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_diffCmd')
unrrdu_untileCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_untileCmd')
unrrdu_tileCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_tileCmd')
unrrdu_basinfoCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_basinfoCmd')
unrrdu_makeCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_makeCmd')
unrrdu_flipCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_flipCmd')
unrrdu_mrmapCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_mrmapCmd')
unrrdu_heqCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_heqCmd')
unrrdu_fftCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_fftCmd')
unrrdu_distCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_distCmd')
unrrdu_3opCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_3opCmd')
unrrdu_unorientCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_unorientCmd')
unrrdu_acropCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_acropCmd')
unrrdu_sselectCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_sselectCmd')
unrrdu_saveCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_saveCmd')
unrrdu_unquantizeCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_unquantizeCmd')
unrrdu_dataCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_dataCmd')
unrrdu_dnormCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_dnormCmd')
unrrdu_convertCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_convertCmd')
unrrdu_affineCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_affineCmd')
unrrdu_axsplitCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_axsplitCmd')
unrrdu_minmaxCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_minmaxCmd')
unrrdu_diceCmd = (unrrduCmd).in_dll(libteem, 'unrrdu_diceCmd')
unrrduPresent = (c_int).in_dll(libteem, 'unrrduPresent')
unrrduBiffKey = (STRING).in_dll(libteem, 'unrrduBiffKey')
unrrduDefNumColumns = (c_uint).in_dll(libteem, 'unrrduDefNumColumns')
unrrduCmdList = (POINTER(unrrduCmd) * 0).in_dll(libteem, 'unrrduCmdList')
unrrduUsageUnu = libteem.unrrduUsageUnu
unrrduUsageUnu.restype = None
unrrduUsageUnu.argtypes = [STRING, POINTER(hestParm)]
unrrduUsage = libteem.unrrduUsage
unrrduUsage.restype = c_int
unrrduUsage.argtypes = [STRING, POINTER(hestParm), STRING, POINTER(POINTER(unrrduCmd))]
unrrduHestPosCB = (hestCB).in_dll(libteem, 'unrrduHestPosCB')
unrrduHestMaybeTypeCB = (hestCB).in_dll(libteem, 'unrrduHestMaybeTypeCB')
unrrduHestScaleCB = (hestCB).in_dll(libteem, 'unrrduHestScaleCB')
unrrduHestBitsCB = (hestCB).in_dll(libteem, 'unrrduHestBitsCB')
unrrduHestFileCB = (hestCB).in_dll(libteem, 'unrrduHestFileCB')
unrrduHestEncodingCB = (hestCB).in_dll(libteem, 'unrrduHestEncodingCB')
_airThread._fields_ = [
]
_airThreadMutex._fields_ = [
]
_airThreadCond._fields_ = [
]
NrrdIoState_t._fields_ = [
('path', STRING),
('base', STRING),
('line', STRING),
('dataFNFormat', STRING),
('dataFN', POINTER(STRING)),
('headerStringWrite', STRING),
('headerStringRead', STRING),
('dataFNArr', POINTER(airArray)),
('headerFile', POINTER(FILE)),
('dataFile', POINTER(FILE)),
('dataFileDim', c_uint),
('lineLen', c_uint),
('charsPerLine', c_uint),
('valsPerLine', c_uint),
('lineSkip', c_uint),
('headerStrlen', c_uint),
('headerStrpos', c_uint),
('byteSkip', c_long),
('dataFNMin', c_int),
('dataFNMax', c_int),
('dataFNStep', c_int),
('dataFNIndex', c_uint),
('pos', c_int),
('endian', c_int),
('seen', c_int * 33),
('detachedHeader', c_int),
('bareText', c_int),
('skipData', c_int),
('skipFormatURL', c_int),
('keepNrrdDataFileOpen', c_int),
('zlibLevel', c_int),
('zlibStrategy', c_int),
('bzip2BlockSize', c_int),
('learningHeaderStrlen', c_int),
('oldData', c_void_p),
('oldDataSize', c_size_t),
('format', POINTER(NrrdFormat)),
('encoding', POINTER(NrrdEncoding)),
]
class NrrdResampleAxis(Structure):
pass
NrrdResampleAxis._pack_ = 4
NrrdResampleAxis._fields_ = [
('kernel', POINTER(NrrdKernel)),
('kparm', c_double * 8),
('min', c_double),
('max', c_double),
('samples', c_size_t),
('overrideCenter', c_int),
('center', c_int),
('sizeIn', c_size_t),
('sizePerm', c_size_t * 16),
('axIdx', c_uint),
('passIdx', c_uint),
('axisPerm', c_uint * 16),
('ratio', c_double),
('nrsmp', POINTER(Nrrd)),
('nline', POINTER(Nrrd)),
('nindex', POINTER(Nrrd)),
('nweight', POINTER(Nrrd)),
]
NrrdResampleContext._pack_ = 4
NrrdResampleContext._fields_ = [
('nin', POINTER(Nrrd)),
('verbose', c_int),
('boundary', c_int),
('typeOut', c_int),
('renormalize', c_int),
('roundlast', c_int),
('clamp', c_int),
('defaultCenter', c_int),
('nonExistent', c_int),
('padValue', c_double),
('dim', c_uint),
('passNum', c_uint),
('topRax', c_uint),
('botRax', c_uint),
('permute', c_uint * 17),
('passAxis', c_uint * 16),
('axis', NrrdResampleAxis * 17),
('flag', POINTER(c_int)),
('time', c_double),
]
NrrdIter._pack_ = 4
NrrdIter._fields_ = [
('nrrd', POINTER(Nrrd)),
('ownNrrd', POINTER(Nrrd)),
('val', c_double),
('size', c_size_t),
('data', STRING),
('left', c_size_t),
('load', CFUNCTYPE(c_double, c_void_p)),
]
NrrdDeringContext._pack_ = 4
NrrdDeringContext._fields_ = [
('verbose', c_int),
('linearInterp', c_int),
('verticalSeam', c_int),
('nin', POINTER(Nrrd)),
('center', c_double * 2),
('clampPerc', c_double * 2),
('radiusScale', c_double),
('thetaNum', c_uint),
('clampHistoBins', c_uint),
('rkernel', POINTER(NrrdKernel)),
('rkparm', c_double * 8),
('tkernel', POINTER(NrrdKernel)),
('tkparm', c_double * 8),
('cdataIn', STRING),
('cdataOut', STRING),
('sliceSize', c_size_t),
('clampDo', c_int),
('clamp', c_double * 2),
('ringMagnitude', c_double),
]
__all__ = ['tenFiberStopUIntSet', 'biffMsgAddf',
'limnCameraPathTrack', 'gageVecLambda2',
'nrrdBoundarySpecParse', 'miteUserNix', 'tenFiberIntgLast',
'tenInterpTypeRThetaPhiLinear', 'ell_4m_post_mul_f',
'nrrdArithGamma', 'ell_Nm_check', 'tijk_refine_max_2d_f',
'pushEnergyCoulomb', 'tijk_class', 'tenEstimate1MethodMLE',
'nrrdHasNonExistOnly', 'nrrdFormatPNG',
'pullInterTypeLast', 'miteBiffKey', 'limnEdgeTypeLast',
'tenGageFAShapeIndex', 'tijk_approx_rankk_3d_f',
'baneMeasrUnknown', 'alanParmF', 'seekTypeMinimalSurface',
'pullIterParmLast', 'airRandMTStateNix',
'nrrdKernelCatmullRomD', 'pullInfoTangent1',
'pullInfoTangent2', 'alanParmK', 'gageContextCopy',
'miteValVdefTdotV', 'nrrdKernelC4Hexic', 'unrrdu_dnormCmd',
'pullEnergyCotan', 'tenEstimateContextNix',
'tenGageFAGeomTens', 'airEndsWith', 'miteUser',
'ell_4m_to_q_d', 'nrrdField_block_size', 'mossSamplerNew',
'gageVecSOmega', 'tenDoubleContract_d', 'pullInfoQuality',
'limnSpaceView', 'limnSplineMinT',
'nrrdResampleNonExistentRenormalize', 'nrrdFieldInfoParse',
'nrrdField_space', 'baneStateHistEqSmart',
'tenGageConfDiffusionAlign', 'gageSclK2',
'hestRespFileComment', 'nrrdApply2DLut', 'pullCountLast',
'tenGageNormGradMag', 'ell_q_exp_f',
'tijk_refine_max_3d_f', 'bane1DOpacInfoFrom2D',
'tijk_refine_max_3d_d', 'tijk_refine_max_2d_d',
'nrrdBoundaryWrap', 'seekLowerInsideSet',
'limnEdgeTypeBackFacet', 'ell_q_exp_d',
'tenInterpTypeLoxR', 'tijk_3o3d_unsym', 'pushBinPointAdd',
'nrrdFlip', 'airFP_SNAN', 'tenDwiGageTensorMLEError',
'nrrdKernelBlackmanD', 'pullInitMethodLast',
'seekContextNew', 'nrrdField_comment',
'limnSplineInfoSize', 'nrrdSimpleResample',
'tenInterpTypeLoxK', 'nrrdTernaryOpIfElse',
'tenGlyphTypePolarPlot', 'limnPolyDataCylinder',
'tendFiberStopCB', 'tenGageTraceDiffusionAlign',
'unrrdu_flipCmd', 'nrrdKeyValueSize', 'gageErrUnknown',
'limnPolyDataNeighborArray', 'nrrdBinaryOpSgnPow',
'nrrdKeyValueGet', 'nrrdKernelCatmullRomSupportDebugDD',
'limnQN16octa', 'nrrdKernelC3QuinticD', 'airTypeFloat',
'pullFlagBinSingle', 'alanParmDeltaX', 'tenGlyphTypeBox',
'limnLightReset', 'ellPresent', 'nrrdField_space_units',
'gageSclShapeTrace', 'seekStrengthSet',
'pullCondEnergyTry', 'tenDwiGage2TensorPeled',
'nrrdKernelBSpline4DDD', 'tenGageTraceHessianEvec0',
'tenGageTraceHessianEvec1', 'tenGageTraceHessianEvec2',
'miteShadeMethodLast', 'airHeap', 'unrrdu_substCmd',
'limnHestPolyDataLMPD', 'baneGkms_hvolCmd',
'nrrdUnaryOpExists', 'nrrdSpace3DRightHandedTime',
'airArrayNix', 'nrrdArithIterBinaryOp', 'airFPClass_d',
'airNoDio_format', 'gageCtxFlagLast', 'dyeConvert',
'ell_q_3v_rotate_d', 'limnQNLast',
'gageParmOrientationFromSpacing',
'nrrdBasicInfoMeasurementFrame', 'limnQN8octa',
'limnPolyDataNix', 'tenInterpTypeWang',
'limnPolyDataOctahedron', 'unrrdu_envCmd',
'nrrdSpace3DLeftHanded', 'nrrdUnaryOpCbrt',
'nrrdAxisInfoLast', 'coilMethodTesting',
'tenFiberDirectionNumber', 'tijk_eval_efs_f',
'coilKind7Tensor', 'baneIncAnswer',
'gageSigmaSamplingUniformTau', 'hooverThreadBegin_t',
'airFastExp', 'tenExperSpecNix', 'pullStatusNixMe',
'tenGageQGradVec', 'tenGageOmegaLaplacian',
'pullTraceMultiNix', 'nrrdOriginStatusNoMin',
'nrrdResampleInputSet', 'tenGlyphType',
'pullInitMethodPointPerVoxel', 'tenFiberSingleTrace',
'tenGageOmegaDiffusionFraction', 'tijk_refine_rank1_3d_f',
'gageVecProjHelGradient', 'miteRangeChar',
'nrrdStateMeasureType', 'tijk_class_last', 'pullInfoLast',
'tenTripleConvert', 'hestElideSingleOtherDefault',
'gageSclHessianTen', 'echoTypeRectangle', 'nrrdUntile2D',
'nrrdSpaceLeftAnteriorSuperior', 'nrrdTypeULLong',
'airInsane_UCSize', 'limnSplineTypeSpecParse',
'limnSplineInfo4Vector', 'tenInterpParmBufferAlloc',
'nrrdResampleRangeSet', 'unrrdu_dhistoCmd',
'meetAirEnumAllCheck', 'tenFiberIntgRK4',
'pushEnergySpring', 'tenGageModeWarp', 'nrrdFormatUnknown',
'gageStackBlurParmScaleSet', 'elfMaximaParmSet',
'tenInterpTwoDiscrete_d', 'tenFiberStopMinNumSteps',
'nrrdKernelSprint', 'gageKindVolumeCheck',
'tenGageNormGradVec', 'pullFlagZeroZ', 'tenGageQHessian',
'airHeapNix', 'tenEstimate2MethodUnknown',
'limnPolyDataCone', 'unrrdu_distCmd', 'hestParmNew',
'tenEstimate2Method', 'nrrdUnaryOpAcos', 'tenModel1Stick',
'tendCmdList', 'airMopAlways', 'gageKindAnswerLength',
'nrrdMeasureSD', 'tenBMatrixCalc', 'nrrdLoad',
'miteVariableParse', 'pullCCSort', 'echoThreadStateNew',
'coilKindTypeLast', 'mossMatTranslateSet',
'nrrdBinaryOpGT', 'tenDefFiberMaxHalfLen',
'tenGlyphBqdEvalUv', 'banePosCalc', 'tijk_3o2d_sym',
'airFloatQNaN', 'limnPolyDataIcoSphere', 'tenEigensolve_f',
'pullEnergyTypeZero', 'tenEigensolve_d',
'meetNrrdKernelAllCheck', 'tenGageQGradMag',
'gageDefStackNormalizeDeriv', 'nrrdArithUnaryOp',
'tenGageCp1HessianEvec2', 'tenGageCp1HessianEvec1',
'tenGageCp1HessianEvec0', 'pullTask_t', 'echoColorSet',
'pullConstraintFailTravel', 'limnPolyDataInfo',
'tenFiberMultiProbeVals', 'miteRayBegin',
'alanParmNumThreads', 'pullFlagSet',
'gageParmDefaultCenter', 'unrrdu_ccmergeCmd',
'miteThreadNew', 'miteStage', 'airDrandMT53_r',
'airMopNever', 'tenFiberTypeEvec1', 'tenFiberTypeEvec0',
'tenFiberTypeEvec2', 'gageVecCurlNorm', 'ell_4m_to_q_f',
'ell_3v_print_d', 'nrrdMeasureHistoMedian',
'ell_3v_print_f', 'pullSysParmEnergyDecreasePopCntlMin',
'unrrdu_ccsettleCmd', 'tenGageCa1HessianEvec',
'tenGageFADiffusionFraction', 'gageStackBlurParm',
'limnPolyDataSpiralSuperquadric', 'seekTypeRidgeSurfaceT',
'nrrdBasicInfoSpaceOrigin', 'airUnescape', 'airEnumPrint',
'nrrdField_endian', 'alanRun', 'pullProcessModeNixing',
'airFPGen_f', 'airFPGen_d', 'airInsane_FltDblFPClass',
'pullInfoSpecNew', 'nrrdBinaryOpIf', 'nrrdAxisInfoSpacing',
'airExists', 'pullInfoLiveThresh', 'gageStackBlurParmNix',
'tenDWMRINexKeyFmt', 'pullInfoInside',
'tijk_approx_heur_3d_f', 'nrrdBinaryOpLTE',
'gageErrStackUnused', 'nrrdZeroSet',
'nrrdSpaceLeftAnteriorSuperiorTime',
'pullEnergyTypeQuartic', 'airHeapMerge',
'nrrdRangePercentileFromStringSet', 'limnPolyDataCopy',
'coilMethodArray', 'gageDeconvolveSeparableKnown',
'coilMethodTypeLast', 'tijk_zero_f', 'nrrdAxisInfoUnknown',
'tijk_zero_d', 'meetHestConstGageKind', 'miteThreadBegin',
'nrrdKernelBSpline2DD', 'nrrdTernaryOpMinSmooth',
'gageShapeItoW', 'miteThreadNix', 'limnPrimitiveLines',
'pullSysParmBeta', 'unrrduCmdList', 'nrrdUnaryOpNerf',
'pushTask', 'gageShapeCopy', 'ell_2m_1d_nullspace_d',
'nrrdSanity', 'nrrdSameSize', 'nrrdUnaryOpTan',
'tijk_copy_f', 'tenAniso_RA', 'tenTripleTypeRThetaZ',
'tenGageRHessian', 'gageKindCheck', 'airNoDio_size',
'seekTypeRidgeSurface', 'pullEnergyZero',
'gagePerVolumeNew', 'alanParmDiffB',
'pullFlagPopCntlEnoughTest', 'alanParmDiffA',
'pushBinDone', 'pullInfoLiveThresh2',
'pullInfoLiveThresh3', 'nrrdAxesSwap', 'gageItemSpec',
'baneMakeHVol', 'nrrdResampleNonExistentLast',
'mossFlagLast', 'nrrdDefaultWriteBareText', 'airShuffle_r',
'limnObjectRender', 'miteValXw',
'nrrdSpacingStatusScalarWithSpace', 'tenDWMRIModalityKey',
'pullFlag', 'gageSclK1', 'miteValXi',
'gageParmStackNormalizeDerivBias', 'alanContext_t',
'nrrdOriginStatusDirection', 'baneMeasrValuePositive',
'baneSigmaCalc', 'seekTypeSet', 'nrrdWrite',
'pullInfoIsovalue', 'nrrdAlloc_va',
'gageItemPackPartHessEval1', 'gageItemPackPartHessEval0',
'gageItemPackPartHessEval2', 'seekDescendToRidge',
'tenFiberParmStepSize',
'pullPointInitializeRandomOrHalton',
'limnObjectSpaceTransform', 'nrrdKeyValueErase',
'limnPolyDataPrimitiveArea', 'nrrdResampleTypeOutSet',
'unrrduHestScaleCB', 'nrrdArithBinaryOp',
'meetHestPullVol', 'nrrdZlibStrategyDefault',
'baneClipUnknown', 'pushOutputGet', 'tenEMBimodalParmNix',
'tenAniso_Cp1', 'nrrdIterNix', 'tenAniso_Cp2',
'airMopSingleOkay', 'tijk_refine_rankk_parm_new',
'nrrdUIInsert', 'gageErrNone', 'tend_shrinkCmd',
'tend_expCmd', 'ell_3m_svd_d', 'limnQN9octa',
'unrrdu_axinsertCmd', 'airBesselIn', 'baneHVolParmNew',
'pullCCMeasure', 'gageDefGenerateErrStr',
'nrrdEnvVarDefaultSpacing', 'nrrdRangePercentileSet',
'tenGradientParmNew', 'tijk_esh_to_3d_sym_f',
'nrrdTypeUChar', 'nrrdAxisInfoThickness',
'pullPropStepConstr', 'tijk_esh_to_3d_sym_d',
'airHeapFrontPeek', 'pushBin_t', 'airTypeLast',
'gageVecMGFrob', 'miteStageOpLast', 'tenDwiGageConfidence',
'ell_cubic_root_last', 'biffGet', 'nrrdMeasureL4',
'nrrdMeasureL2', 'nrrdMeasureL1',
'alanParmConstantFilename', 'miteShadeMethodLitTen',
'nrrdCenterUnknown', 'limnHestCameraOptAdd',
'echoTypeInstance', 'airBesselI1', 'seekSamplesSet',
'nrrdIterValue', 'nrrdKind2DMaskedMatrix',
'pullPhistEnabled', 'airSgnPow', 'nrrdBasicInfoOldMin',
'airMopUnMem', 'gageVecHelicity', 'gageSclHessFrob',
'airThreadNew', 'tenGageOmegaHessianEval2',
'tenGageOmegaHessianEval1', 'tenGageOmegaHessianEval0',
'nrrdAxisInfoLabel', 'nrrdSpacingStatusScalarNoSpace',
'tend_glyphCmd', 'nrrdHistoEq', 'limnSpline_t', 'biffDone',
'tenDWMRIBmatKeyFmt', 'tenGage', 'nrrdBinaryOpExists',
'tenEigenvalueMultiply', 'alanTextureTypeLast',
'tenGageTraceHessianEvec', 'nrrdKernelBSpline3',
'seekDescendToDegCell', 'echoGlobalStateNix', 'limnLook',
'unrrdu_w2iCmd', 'limnLightSet', 'baneGkms_scatCmd',
'pullInfoSpecNix', 'tenModelParmDesc', 'nrrdUnaryOpExp',
'tijk_refine_rank1_parm_new', 'baneRangeNix',
'limnObjectReadOFF', 'pullInfoIsovalueGradient',
'tenGradientParm', 'hooverContextNew', 'elfGlyphPolar',
'limnPolyDataVertexWindingFlip', 'nrrdBoundaryBleed',
'pullPointNew', 'nrrdArithAffine', 'meetHestPullInfo',
'nrrdKernelCos4SupportDebugDDD', 'elfMaximaContext',
'echoJitterUnknown', 'nrrdResampleRenormalizeSet',
'ell_aa_to_q_f', 'ell_aa_to_q_d', 'baneRawScatterplots',
'nrrdKernelBSpline7D', 'limnSplineNrrdCleverFix',
'pullSysParmEnergyIncreasePermit', 'miteValGageKind',
'nrrdEnvVarDefaultWriteEncodingType',
'pullInfoHeightLaplacian', 'echoSuperquadSet',
'nrrdSpaceOriginSet', 'nrrdResampleInfoNew',
'seekItemHessSet', 'pushEnergySpecNix',
'limnPolyDataSquare', 'dyeLUVtoXYZ', 'tenAnisoUnknown',
'pullFlagNixAtVolumeEdgeSpace', 'gageItemPack',
'nrrdKernelCentDiff', 'miteThread', 'tenGageFAHessianEvec',
'limnSplineInfoLast', 'nrrdMeasureLineSlope',
'pullLogAddSet', 'tenFiberKernelSet', 'nrrdTypeIsUnsigned',
'nrrdHistoCheck', 'gageStackBlurParmCheck',
'nrrdHestBoundarySpec', 'pullIterParmSet',
'airNoDio_setfl', 'tend_anhistCmd', 'ell_3m_print_f',
'nrrdKernelBSpline7DDD', 'ell_3m_print_d',
'seekTypeMaximalSurface', 'gagePvlFlagNeedD',
'nrrdAxisInfoCompare', 'nrrdBasicInfoComments',
'airRandInt', 'echoSuperquad', 'nrrdKind3DMaskedSymMatrix',
'limnPolyDataTransform_f', 'tenGlyphTypeCylinder',
'tenEstimate1TensorSimulateSingle_f',
'tenEstimate1TensorSimulateSingle_d', 'nrrdHistoAxis',
'tenDWMRINAVal', 'tenDwiGageTensorWLS',
'nrrdAxisInfoCenter', 'limnPrimitiveNoop',
'tenGageTensorGradMag', 'pullIterParmMax',
'tenGageCp1GradMag', 'nrrdSpaceVecExists',
'echoJittableLast', 'nrrdCenterNode', 'nrrdJoin',
'echoCylinder', 'airUIrandMT_r', 'nrrdDStore',
'mossHestOrigin', 'tenGageFAKappa2', 'nrrdUnaryOpLast',
'tenGageFAKappa1', 'unrrdu_imapCmd', 'tijk_max_esh_order',
'tenDwiGageTensorLLSErrorLog', 'limnPrimitive',
'nrrdKind2DSymMatrix', 'tenAniso_Ca2', 'pullFinish',
'tenAniso_Ca1', 'tenGageModeHessian',
'tijk_approx_heur_3d_d', 'nrrdKernelC5SepticApproxInverse',
'mossMatApply', 'tenInvariantGradientsK_d',
'NrrdResampleInfo', 'gageDefDefaultCenter', 'meetTeemLibs',
'airTypeUInt', 'coilKindScalar', 'tenInterpTypeLinear',
'unrrduCmd', 'limnVertex', 'nrrdCheck', 'pullCCFind',
'limnObjectVertexNumPreSet', 'gageVecMultiGrad',
'coilMethodTypeSelf', 'airBesselI0', 'nrrdKindUnknown',
'ell_cubic_root', 'tenEstimateMethodSet', 'nrrdMeasureSum',
'nrrdResampleNonExistentNoop', 'nrrdSliceSelect',
'nrrdUnaryOpCos', 'airNoDio_arch', 'tijk_4o3d_sym',
'tenInterpPathLength', 'echoMatterGlassFuzzy',
'pullTraceStop', 'meetPullVolNew', 'nrrdRangeSafeSet',
'pullCount', 'tenInterpParmNew', 'airCbrt', 'airTypeEnum',
'nrrdApply1DRegMap', 'tend_mconvCmd', 'tenGageCl2',
'tenGageCl1', 'pullFlagScaleIsTau',
'tenFiberStopMinLength', 'nrrdKernelBlackman',
'echoInstance', 'nrrdMeasureHistoMax', 'limnPolyDataPlane',
'limnSplineSample', 'pullPropIdCC', 'nrrdFormatText',
'tend_evalmultCmd', 'nrrdResampleNrrdSet',
'nrrdEnvVarDefaultWriteValsPerLine',
'pullCondConstraintSatB', 'miteStageOpMax',
'pullCondConstraintSatA', 'airArrayNew', 'nrrdKernelHann',
'meetPullVolCopy', 'ell_q_pow_d', 'nrrdBinaryOpGTE',
'ell_q_pow_f', 'pullCountCC', 'tenFiberAnisoSpeedSet',
'pushFinish', 'tenGageFAGradVecDotEvec0',
'tenDwiGageKindNix', 'nrrdKindPoint', 'pullFlagUnknown',
'limnPolyDataPolygonNumber', 'mossMatRightMultiply',
'tenBiffKey', 'pullTraceStopSpeeding', 'nrrdFormatType',
'gageSigmaSamplingOptimal3DL2L2', 'nrrdDistanceL2Biased',
'nrrdUnaryOpLog', 'limnPolyDataWriteLMPD',
'nrrdMeasureMax', 'nrrdTernaryOpLerp',
'gageItemPackPartUnknown', 'nrrdFInsert',
'limnCameraPathTrackFrom', 'meetGageKindParse',
'hestRespFileFlag', 'baneDefMakeMeasrVol',
'nrrdMeasureMode', 'nrrdDefaultWriteValsPerLine',
'gageOptimSigContext', 'gageSclLaplacian', 'tenGlyphParm',
'nrrdConvert', 'biffMaybeAddf', 'tenGlyphTypeSuperquad',
'nrrdFormatTypeText', 'miteValYw', 'airDioRead',
'unrrdu_minmaxCmd', 'pullEnergyTypeButterworth',
'hooverContext', 'echoMatterLast', 'airHeapFrontUpdate',
'tenEstimateSkipReset', 'miteValYi',
'tenGageOmegaDiffusionAlign', 'nrrdMeasureHistoSD',
'tenEstimateContextNew', 'tenAnisoPlot', 'nrrdIoStateInit',
'nrrdKernelCatmullRom', 'dyeHSVtoRGB',
'nrrdEnvVarStateMeasureHistoType', 'pullVerboseSet',
'nrrdAxisInfoKind', 'airTime', 'limnFace',
'limnPolyDataInfoNorm', 'echoType', 'tenGageQNormal',
'limnPolyDataInfoTex2', 'nrrdHisto', 'baneHVolParmNix',
'mossMatRotateSet', 'tenEvecRGBParm',
'tijk_refine_rankk_parm_nix', 'tenFiberContextNix',
'nrrdHasNonExistFalse', 'tenDWMRISkipKeyFmt',
'miteVariablePrint', 'tenFiberParmUseIndexSpace',
'nrrdKind3Gradient', 'pushEnergyTypeUnknown',
'gageAnswerPointer', 'coilOutputGet', 'nrrdKeyValueClear',
'pushEnergyZero', 'nrrdCenter', 'baneHVolParmAxisSet',
'airBesselI1ExpScaled', 'nrrdKernelC3Quintic',
'pullProcessModeLast', 'gageShapeNix',
'echoJittableNormalA', 'echoJittableNormalB',
'nrrdResampleDefaultCenterSet', 'pullPropForce',
'tenGageSNormal', 'miteShadeSpecNix', 'nrrdTypeShort',
'mossVerbose', 'baneHVolParmClipSet',
'ell_cubic_root_single', 'gageShapeReset',
'tenGageBGradMag', 'tenGlyphBqdZoneEval',
'limnCameraPathTrackUnknown', 'nrrdDeringClampPercSet',
'ell_3m_1d_nullspace_d', 'nrrdGetenvInt',
'nrrdKindIsDomain', 'ell_3mv_mul_d', 'dyeXYZtoLUV',
'ell_3mv_mul_f', 'nrrdType', 'echoThreadStateInit',
'baneHVolParmGKMSInit', 'airExp', 'NrrdResampleAxis',
'tenInterpTypeQuatGeoLoxR', 'hestElideSingleOtherType',
'tenEMBimodalParm', 'tenInterpTypeQuatGeoLoxK',
'echoAABBox', 'NrrdIter', 'gageParmVerbose',
'airThreadMutexNew', 'tenGageTensorGrad',
'limnHestSplineTypeSpec', 'pullPropGet',
'limnSplineTypeUnknown', 'nrrdUnaryOpExpm1',
'tend_unmfCmd', 'tenMeasurementFrameReduce',
'nrrdUnaryOpNegative', 'NrrdIoState', 'airErfc',
'tenGlyphBqdZoneUv', 'airNoDio_dioinfo',
'gageItemPackPartScalar', 'pullTraceMultiRead',
'pushBiffKey', 'biffSetStr', 'nrrdBinaryOpDivide',
'nrrdDeringCenterSet', 'nrrdFprint', 'biffMsg',
'elfBallStickODF_f', 'nrrdMeasureVariance',
'limnObjectEmpty', 'nrrdHistoThresholdOtsu', 'airEnumVal',
'tenExperSpecNew', 'nrrdSpaceLeftPosteriorSuperiorTime',
'airHeapFrontPop', 'airTypeStr', 'tenFiberStopOn',
'hooverErrSample', 'nrrdZlibStrategyHuffman',
'gageStackBlurCheck', 'pullInfoNegativeTangent1',
'pullInfoNegativeTangent2', 'nrrdSprint', 'hooverBiffKey',
'nrrdKernelCompare', 'nrrdTypeMax',
'pullInterTypeUnivariate', 'gageParmLast',
'pullPropUnknown', 'airParseStrF', 'airParseStrD',
'airParseStrE', 'airParseStrB', 'airParseStrC',
'nrrdDistanceL2', 'nrrdEnvVarStateKeyValuePairsPropagate',
'gageSclMeanCurv', 'tend_evalclampCmd',
'tenModel1Cylinder', 'airParseStrI', 'nrrdKeyValueAdd',
'airParseStrS', 'nrrdTernaryOpMin', 'unrrdu_convertCmd',
'limnEdge', 'tenGageEval1', 'tenGageEval0',
'tenGageTraceGradVec', 'dyePresent', 'tenGageAniso',
'tenDwiGageTensor', 'echoRTRender',
'gageItemPackPartNormal', 'ell_3m2sub_eigensolve_d',
'dyeXYZtoRGB', 'alanStopUnknown',
'gageKindTotalAnswerLength', 'miteStageOpAdd',
'dyeSpaceLUV', 'nrrdKernelGaussianD', 'pullInfoTensor',
'airAtod', 'nrrdField_number', 'tenGageModeGradMag',
'tijk_esh_make_kernel_delta_d', 'tenAniso_Conf',
'airHeapNew', 'tijk_incr_f', 'nrrdDeringInputSet',
'nrrdByteSkip', 'nrrdBasicInfoData', 'baneBcptsCheck',
'gageKernelReset', 'nrrdBinaryOpMultiply', 'seekContext',
'nrrdFormatTypeLast', 'dyeColorCopy',
'nrrdEnvVarStateVerboseIO', 'gageDefVerbose',
'tenGageDelNormR1', 'tenGageDelNormR2', 'miteNtxfCheck',
'pullTraceMultiWrite', 'tenAniso_Omega',
'gageStackBlurParmNew', 'pushEnergySpecSet',
'hooverStubThreadBegin', 'unrrdu_1opCmd',
'pullTraceStopUnknown', 'nrrdField_data_file',
'alanParmSaveInterval', 'tenGageFACurvDir1',
'tenGageFACurvDir2', 'hooverErrThreadCreate',
'tenGageCl1HessianEvec2', 'tenGageCl1HessianEvec1',
'tenGageCl1HessianEvec0', 'pullEnergyUnknown',
'limnQN12checker', 'nrrdDLookup', 'ell_q_3v_rotate_f',
'nrrdDInsert', 'pullSourceProp', 'tenModelFromAxisLearn',
'tenGageCa1', 'ell_3m_eigenvalues_d',
'hooverDefVolCentering', 'gageVecHessian',
'gageParmStackNormalizeDeriv', 'limnObject',
'pullIterParm', 'tenFiberStopFraction', 'airToLower',
'nrrd1DIrregAclCheck', 'unrrdu_joinCmd',
'elfKernelStick_f', 'miteValZw', 'airThreadCond',
'tendTitle', 'pullSysParmWall', 'tenMake',
'unrrdu_makeCmd', 'miteValZi', 'ell_cubic',
'tenGageClpmin2', 'nrrdResampleNonExistentWeight',
'nrrdUnaryOpCeil', 'tenGageClpmin1', 'limnObjectPreSet',
'gageShape', 'tijk_copy_d', 'tenRotateSingle_f',
'nrrdCCValid', 'mossMatIdentitySet', 'tenDefFiberIntg',
'limnCameraNix', 'pullTraceMulti', 'nrrdKernelBSpline4DD',
'tenDwiFiberType2Evec0', 'pullStatusNewbie',
'tenMakeSingle_f', 'tenGageCa1HessianEval1',
'tenGageCa1HessianEval0', 'nrrdUILookup',
'tenGageCa1HessianEval2', 'tenEstimate1MethodWLS',
'unrrdu_axsplitCmd', 'nrrdAxisInfoPos', 'hooverErrNone',
'tend_fiberCmd', 'ell_Nm_pseudo_inv', 'nrrdIoStateNix',
'echoObjectAdd', 'pullEnergyQuartic', 'gageVecVector0',
'tenBVecNonLinearFit', 'hooverSample_t',
'tenEstimateNegEvalShiftSet', 'nrrdCCAdjacency',
'mossDefBoundary', 'pullTask', 'airEnumDesc', 'tijk_add_f',
'baneIncNix', 'nrrdWrap_va',
'nrrdStateKeyValueReturnInternalPointers',
'unrrduHestBitsCB', 'limnSplineNew', 'miteValGTdotV',
'pushPtrPtrUnion', 'gageItemPackPart',
'gageStackBlurParmBoundarySpecSet', 'nrrdKindTime',
'meetPresent', 'airArrayLenPreSet', 'nrrdMeasure',
'tenModelSqeFit', 'airParseStrZ', 'airMopAdd',
'alanPresent', 'gageSigmaSampling', 'coilMethodType',
'gageStackBlurParmCopy', 'airEqvAdd', 'ell_3m_inv_d',
'tenGageEval2', 'pullProgressBinModSet',
'nrrdKernelCos4SupportDebugD', 'miteValNormal',
'tijk_eval_efs_basis_f', 'tijk_eval_efs_basis_d',
'limnQN12octa', 'tend_evalpowCmd', 'unrrdu_lutCmd',
'tenDwiGageUnknown', 'nrrdKind3DMatrix', 'tenFiberIntg',
'miteValTi', 'nrrdFormatTypeEPS', 'tijk_negate_f',
'tend_tripleCmd', 'dyeColorParse', 'airIndexClamp',
'nrrdEncodingTypeLast', 'nrrdTypeDefault', 'baneHVolParm',
'pullBin_t', 'tijk_init_max_3d_f', 'nrrdEncodingTypeAscii',
'coil_t', 'tenAniso_FA', 'tenGageFAHessian',
'gageStackBlurGet', 'limnPolyDataReadOFF', 'gageSclValue',
'airEqvSettle', 'echoIsosurface', 'echoMatterPhongSp',
'nrrdField_max', 'seekTypeRidgeSurfaceOP', 'nrrdFLookup',
'mossPresent', 'ell_q_to_3m_f', 'unrrdu_diceCmd',
'pushBinInit', 'hestNoArgsIsNoProblem',
'limnPolyDataInfoLast', 'airHeapFromArray',
'nrrdResampleSamplesSet', 'gageVecHelGradient',
'miteShadeMethodNone', 'unrrdu_ccfindCmd',
'gagePvlFlagUnknown', 'nrrdIterContent',
'tenEstimateLinear3D', 'gageContextNix', 'nrrdRangeCopy',
'pullPositionHistoryGet', 'alan3DSizeSet',
'tenFiberStopLast', 'tenModelConvert', 'tenModelZero',
'nrrdKernelBSpline5D', 'hooverStubSample', 'biffMsgErrNum',
'gagePerVolumeDetach', 'gageScl',
'nrrdResampleNonExistentUnknown', 'airBool',
'tend_makeCmd', 'nrrdBinaryOpSubtract', 'limnSplineNix',
'tenEvecRGB', 'hestGlossary', 'nrrdUILoad',
'pullHestEnergySpec', 'airNull',
'gageStackBlurParmRenormalizeSet', 'nrrdField_keyvalue',
'unrrdu_2opCmd', 'gageAnswerLength', 'airTypeOther',
'nrrdKernelBSpline3DD', 'nrrdIoStateFormatSet',
'pullPropIdtag', 'ell_3v_angle_d', 'unrrdu_i2wCmd',
'ell_3v_angle_f', 'airEnumUnknown', 'nrrdFormatVTK',
'echoRayIntx', 'pullPointScalar', 'nrrdUnaryOpSigmaOfTau',
'nrrdField_space_dimension', 'limnObjectPartAdd',
'tenEvecRGBParmCheck', 'limnSplineNrrdEvaluate',
'tijk_incr_d', 'limnPolyData', 'seekBiffKey',
'nrrdDefaultResampleCheap', 'airOneLine',
'nrrdKernelForwDiff', 'tijk_get_axis_type',
'miteShadeSpecNew', 'hestCB', 'tenEstimate2MethodLast',
'tenAniso_eval2', 'tenAniso_eval0', 'tenAniso_eval1',
'baneClipNix', 'pullTraceStopConstrFail', 'ell_4m_det_f',
'ell_4m_det_d', 'nrrdFFTWPlanRigorLast',
'tenEstimate2MethodPeled', 'gageVecNCurlNormGrad',
'nrrdDefaultSpacing', 'unrrdu_unorientCmd', 'hestColumns',
'alanStopConverged', 'tenFiberTraceSet',
'nrrdApplyMulti1DLut', 'nrrdCCMerge',
'nrrdMeasureHistoMode', 'pullCondLast',
'pullVolumeSingleAdd', 'airThreadCapable',
'gageParmGenerateErrStr', 'pullConstraintFailLast',
'pullPropStuck', 'tenGageOmegaHessianContrTenEvec1',
'biffMaybeAdd', 'tijk_esh_len', 'hooverContextNix',
'nrrdSpatialResample', 'pullPointNix', 'echoSplit',
'dyeColorGetAs', 'tenDefFiberStepSize', 'echoObjectNix',
'pullEnergySpecNew', 'coilContext', 'pushEnergyTypeLast',
'pullSysParmNeighborTrueProb', 'nrrdBiffKey',
'limnPrimitiveQuads', 'limnPolyDataVertexNormals',
'limnLightNix', 'unrrdu_untileCmd', 'gageVecCurl',
'nrrdInset', 'pullInitParm', 'dyeLABtoXYZ',
'gageSigmaSamplingUnknown', 'nrrdBinaryOpMod',
'tenFiberTypeUnknown', 'limnSplineNumPoints', 'hestOptAdd',
'gageSclNProj', 'nrrdTypeMin', 'airStrdup',
'echoRoughSphereNew', 'airThreadMutexLock', 'nrrdInit',
'gageKernelStack', 'tenGradientIdealEdge',
'nrrdGetenvBool', 'nrrdIoStateEncodingGet',
'alanDimensionSet', 'baneOpacCalc',
'limnCameraPathTrackLast', 'tijk_refine_rankk_3d_d',
'baneClip', 'tijk_refine_rankk_3d_f',
'tenTripleCalcSingle_f', 'miteThread_t', 'limnBiffKey',
'tijk_2o3d_unsym', 'alanContextNix',
'pullEnergyTypeCubicWell', 'pullEnergyQuarticWell',
'echoJitterJitter', 'baneMeasrLaplacian',
'limnObjectVertexAdd', 'nrrdBinaryOpLT', 'unrrdu_acropCmd',
'coilKindType', 'pullSysParmProbeProb', 'airStrtokQuoting',
'pullInfoHeightGradient', 'pullPropStability',
'coilMethod', 'gageSclMedian', 'miteQueryAdd',
'gageParmK3Pack', 'limnPolyDataClipMulti', 'airTypeSize',
'pullSysParmGamma', 'airStrcmp', 'airStrlen',
'tenGageCp1HessianEval', 'echoMatterUnknown',
'unrrdu_saveCmd', 'gageSclGradVec', 'tenDwiFiberType',
'tijk_init_rank1_2d_d', 'tijk_init_rank1_2d_f', 'tenLog',
'tenGageCp1HessianEval2', 'tenGageCp1HessianEval0',
'tenGageCp1HessianEval1', 'nrrdSpaceDimension',
'miteDefNormalSide', 'nrrdTernaryOpGTSmooth',
'unrrdu_axmergeCmd', 'nrrdField_labels', 'tijk_type_t',
'pullInfoSpec', 'nrrdFStore', 'biffAdd', 'tijk_scale_f',
'tenGradientDistribute', 'tijk_scale_d', 'nrrdKind',
'nrrdValCompare', 'hestSourceLast', 'gagePvlFlagQuery',
'pullTraceMultiPlotAdd', 'nrrdIterSetValue',
'limnCameraPathMake', 'unrrdu_affineCmd', 'unrrduScaleAdd',
'nrrdHasNonExistUnknown', 'elfBallStickPredict_f',
'miteRenderEnd', 'tijk_esh_convolve_d', 'miteRender',
'nrrdBinaryOpFlippedSgnPow', 'pullPropNeighCovarDet',
'limnObjectWorldHomog', 'tijk_6o3d_sym',
'tenGradientParmNix', 'tenGageEvalHessian',
'nrrdKernelC4HexicApproxInverse', 'nrrdBinaryOpEqual',
'limnQN10checker', 'mitePresent', 'tenFiberStopAnisoSet',
'pullTraceMultiFilterConcaveDown', 'meetHestGageKind',
'tenAnisoTen_d', 'tenPowSingle_d', 'tenAnisoTen_f',
'echoMatterPhongSet', 'echoMatterMetal',
'nrrdTernaryOpMultiply', 'mossImageCheck',
'tenFiberMultiNix', 'nrrdBinaryOpAdd',
'nrrdTernaryOpExists', 'limnObjectCubeAdd',
'nrrdKindVector', 'echoRTParm', 'tijkPresent',
'limnLightAmbientSet', 'gageShapeSet',
'tenDefFiberUseIndexSpace', 'tijk_esh_sp_f',
'nrrdBlind8BitRangeFalse', 'tenGageDetGradVec',
'nrrdMeasureCoV', 'hestOptFree', 'tenGageModeHessianEval',
'gageSclHessDotPeakness', 'echoJitterGrid',
'nrrdIoStateBzip2BlockSize', 'hooverErrRenderBegin',
'hooverErrRayBegin', 'airFPPartsToVal_f', 'hestVerbosity',
'alanParmMaxIteration', 'nrrdDefaultWriteEncodingType',
'limnLight', 'tenAnisoVolume', 'echoMatterGlassKa',
'airHalton', 'echoMatterGlassKd', 'nrrdAxisInfoCopy',
'NrrdRange', 'airMyDio', 'tenBMatrixCheck',
'limnObjectDescribe', 'nrrdBinaryOpRicianRand',
'pushPoint_t', 'nrrdIoStateSet', 'tenFiberContextCopy',
'tend_helixCmd', 'echoTypeIsosurface',
'tenEstimate1MethodUnknown', 'coilKindTypeScalar',
'echoMatterLightPower', 'nrrdSpaceRightAnteriorSuperior',
'gageVecCurlNormGrad', 'seekEvalDiffThreshSet',
'gageVecNormHelicity', 'pullSysParmBinWidthSpace',
'nrrdContentSet_va', 'pullConstraintFail',
'tenFiberProbeItemSet', 'baneClipTopN',
'nrrdEncodingAscii', 'hooverErrRenderEnd',
'echoRectangleSet', 'ell_4v_norm_f',
'nrrdKernelCos4SupportDebug', 'ell_q_avgN_d', 'airStdout',
'airThreadCondNix', 'baneMeasrValueAnywhere',
'alanParmDeltaT', 'airInsane_not', 'tenExp',
'miteValVdefT', 'tenModel_t', 'nrrdField_line_skip',
'tenFiberParmSet', 'pullEnergyBetterCubicWell',
'airTypeChar', 'airRandInt_r', 'nrrdResampleKernelSet',
'tenGageRNormal', 'limnPolyDataReadLMPD',
'tijk_refine_rank1_parm', 'nrrdDefaultWriteCharsPerLine',
'nrrdBoundarySpecCopy', 'nrrdStateDisableContent',
'nrrdKindQuaternion', 'nrrdNonSpatialAxesGet',
'tenExperSpecGradSingleBValSet',
'tenDwiGageTensorErrorLog', 'pullConstraintFailHessZeroA',
'pullConstraintFailHessZeroB', 'airFP_QNAN',
'tijk_axis_info_t', 'nrrdProject', 'baneRangePositive',
'tenGageCa1HessianEval', 'nrrdKindCovariantVector',
'baneClipAnswer', 'ell_4v_print_d', 'ell_4v_print_f',
'nrrdEncodingTypeGzip', 'gageParm', 'tenGageOmegaHessian',
'unrrdu_lut2Cmd', 'alanBiffKey', 'limnWindowNix',
'nrrdEnvVarDefaultCenter', 'unrrdu_3opCmd',
'tijk_esh_convolve_f', 'tenEvqVolume',
'nrrdEncodingTypeRaw', 'ell_aa_to_4m_f',
'nrrdKernelBSpline4D', 'nrrdSpaceDimensionSet',
'tijk_type', 'nrrdFormatEPS', 'unrrduScaleLast',
'gageContext', 'gageCtxFlagKernel', 'tend_evaladdCmd',
'limnObjectDepthSortParts', 'tijk_approx_heur_2d_f',
'pullInfoLen', 'nrrdKindSize', 'pullSourceUnknown',
'limnPrimitiveTriangleStrip', 'airThreadMutexNix',
'tenGradientCheck', 'tenSimulateSingle_f',
'tenGageFATotalCurv', 'tijk_4o2d_sym',
'gageSigmaSamplingUniformSigma', 'pushHestEnergySpec',
'nrrdAlloc_nva', 'miteRayEnd', 'airFloatNegInf',
'nrrdKernelSpecSprint', 'limnVtoQN_f', 'limnVtoQN_d',
'tijk_approx_heur_2d_d', 'miteRangeSP', 'pullStatusLast',
'airThreadCondSignal', 'airEndianBig',
'nrrdUnaryOpReciprocal', 'biffMsgStrSet', 'hestOptCheck',
'hooverErr', 'hooverRayEnd_t', 'echoTriMeshSet',
'nrrdIterSetOwnNrrd', 'limnEnvMapFill', 'unrrdu_aboutCmd',
'tijk_init_max_2d_f', 'dyeSpaceLAB', 'nrrdIoStateNew',
'tenTripleType', 'unrrdu_insetCmd', 'nrrdSample_nva',
'airTypeSize_t', 'tenGradientRandom', 'tenAniso_VF',
'tijk_2o3d_asym', 'seekDataSet', 'unrrdu_histaxCmd',
'tenInterpParm', 'limnPolyDataVertexNormalsNO',
'pullCountConstraintSatisfy', 'tenInterpParmCopy',
'tijk_approx_rankk_2d_f', 'gageBiffKey', 'gageSclCurvDir2',
'tenGageRotTans', 'gageSclCurvDir1',
'nrrdSpaceLeftPosteriorSuperior', 'baneIncLast',
'alanTensorSet', 'nrrdHasNonExistTrue', 'gageProbeSpace',
'baneAxis', 'limnSplineInfo', 'pullEnergyTypeLast',
'nrrdIoStateCharsPerLine', 'NrrdEncoding_t', 'tenGageCa2',
'pullEnergyBspln', 'pullCountForceFromImage',
'ell_4m_pre_mul_f', 'elfMaximaRefineSet',
'tenMakeSingle_d', 'dyeBiffKey', 'miteVal',
'nrrdAxisInfoSpacingSet', 'tenGageDet',
'baneMeasrValueZeroCentered', 'tenAniso_Ct1',
'gageVecNormalized', 'tenAniso_Ct2',
'limnEdgeTypeBackCrease', 'limnPolyDataJoin',
'hooverDefImgCentering', 'hooverErrThreadJoin',
'airPrettySprintSize_t', 'airFree', 'tijk_class_tensor',
'unrrdu_jhistoCmd', 'hestRespFileEnable', 'nrrdSpaceSet',
'pullCountProbe', 'limnPolyDataNeighborList',
'limnSplineEvaluate', 'hooverStubRenderBegin',
'tijk_refine_rank1_3d_d', 'biffSetStrDone',
'pullInfoStrength', 'gageKernel10', 'gageKernel11',
'tenFiberTypeTensorLine', 'airFPFprintf_f',
'airFPFprintf_d', 'limnSpaceUnknown', 'tenAniso_Mode',
'gageQueryItemOn', 'nrrdILoad', 'gageDefTwoDimZeroZ',
'pullPropPosition', 'gageVecVector', 'tenExpSingle_d',
'airMopPrint', 'tenExpSingle_f', 'Nrrd',
'tenInterpDistanceTwo_d', 'ell_4m_to_aa_f',
'ell_4m_to_aa_d', 'tenPowSingle_f', 'alanParmBeta',
'airFP_Last', 'unrrdu_basinfoCmd', 'limnPolyDataColorSet',
'tenGageFAFlowlineCurv', 'echoRTRenderCheck',
'nrrdKernelCatmullRomSupportDebug', 'nrrdAxisInfoMin',
'hestParseOrDie', 'echoJittableMotionA',
'pullInitMethodGivenPos', 'echoJittableMotionB',
'echoInstanceSet', 'gageStructureTensor',
'gageStackBlurParmNeedSpatialBlurSet',
'nrrdOrientationReduce', 'tenGageRGradMag',
'alanTextureTypeUnknown', 'tenGageBGradVec',
'tenDwiGage2TensorQSegAndError', 'ell_aa_to_4m_d',
'tenGageB', 'nrrdMeasureNormalizedL2', 'nrrdPad_nva',
'nrrdKeyValueCopy', 'pullStatusStuck', 'baneRangeNew',
'tenGageS', 'tenGageR', 'tenGageQ', 'tenGageTheta',
'miteShadeSpec', 'nrrdSave', 'gageSclGaussCurv',
'tenEMBimodal', 'limnObjectFaceAdd', 'nrrdUnaryOpFloor',
'gageErrLast', 'tenAniso', 'pushTask_t',
'baneClipPeakRatio', 'nrrdBasicInfoUnknown',
'tenEigenvaluePower', 'gageStackBlurParmInit',
'tenSizeNormalize', 'nrrdKernelBSpline3D',
'baneRangeNegative', 'baneIncStdv', 'tijk_sym_fun',
'gageParmStackNormalizeRecon', 'tenFiberStopDoubleSet',
'gageItemPackPartHessian', 'nrrdKernelBoxSupportDebug',
'nrrdIInsert', 'airInsane_pInfExists', 'baneOpacInfo',
'limnObjectWriteOFF', 'alanParmRandRange',
'tenDwiFiberType1Evec0', 'airLogBesselI0',
'gageDefGradMagCurvMin', 'pushEnergySpecParse',
'nrrdKernelBSpline6D', 'airMop',
'limnPolyDataSuperquadric', 'nrrdTernaryOpUnknown',
'nrrdHistoJoint', 'tend_anvolCmd', 'airInsane_QNaNHiBit',
'nrrdEnvVarStateMeasureType', 'nrrdKind4Color',
'tijk_3d_sym_to_esh_matrix_d', 'nrrdField_dimension',
'pullInfoHeight', 'unrrdu_gammaCmd', 'nrrdBoundaryLast',
'miteStageOpMultiply', 'limnPolyDataNeighborArrayComp',
'airVanDerCorput', 'gageStackBlurParmParse', 'tijk_add_d',
'gageVec', 'nrrdEnvVarDefaultWriteBareTextOld',
'gageStackBlurParmBoundarySet', 'nrrdTypeLast',
'pullTraceStopLength', 'baneHack', 'gageQuerySet',
'tenDwiFiberType12BlendEvec0', 'echoMatterGlassIndex',
'gageStackBlurManage', 'tenSimulate', 'pullPropLast',
'nrrdEnvVarStateDisableContent', 'gageVecDivGradient',
'nrrdKind3DSymMatrix', 'nrrdBasicInfoSpaceDimension',
'tijk_eval_esh_basis_d', 'nrrdAxisInfoIdx', 'alanInit',
'gageStackBlur', 'nrrdEncodingBzip2',
'pullEnergyTypeButterworthParabola', 'nrrd1DIrregMapCheck',
'echoRTParmNix', 'echoRay', 'elfMaximaContextNew',
'echoMatterGlassSet', 'unrrdu_cropCmd',
'tenGageTensorRThetaPhiLinear', 'meetPullVolAddMulti',
'pullPropNeighCovarTrace', 'gageDefOrientationFromSpacing',
'limnCameraAspectSet', 'tenInvariantGradientsR_d',
'meetPullVolLoadMulti', 'nrrdRangeNew', 'baneDefVerbose',
'ell_q_mul_d', 'ell_q_mul_f', 'tenGageTraceHessianEval',
'nrrdEncodingRaw', 'airInsane_NaNExists', 'pullEnergy',
'limnHestPolyDataOFF', 'nrrdQuantize',
'nrrdSpacingStatusDirection', 'tijk_refine_rank1_parm_t',
'tenEstimateSigmaSet', 'tijk_efs_to_2d_sym_f',
'pullBinProcess', 'pullProcessModeAdding', 'tend_simCmd',
'limnEdgeTypeLone', 'unrrdu_heqCmd', 'airStrntok',
'nrrdTernaryOpInOpen', 'nrrdPGM', 'nrrdTernaryOpRician',
'baneFindInclusion', 'gageDefK3Pack', 'echoTypeAABBox',
'nrrdZlibStrategyUnknown', 'tenGageFADiffusionAlign',
'_airThread', 'unrrdu_diffCmd', 'airRician',
'coilKindType7Tensor', 'echoMatterLightSet', 'echoPresent',
'limnPrimitiveTriangles', 'nrrdTernaryOpClamp',
'echoObjectHasMatter', 'ell_3m_to_aa_f', 'ell_3m_to_aa_d',
'airIsNaN', 'alanParmTextureType', 'tenFiberVerboseSet',
'tend_epiregCmd', 'gageVecCurlGradient', 'airMopMem',
'biffMsgStrGet', 'gageDefRenormalize', 'coilKind',
'tenGageCp2', 'pullInfoSpecSprint', 'tenGageCp1',
'limnObjectNew', 'nrrdBasicInfoLast', 'pushPresent',
'tenAniso_Clpmin1', 'tenAniso_Clpmin2', 'echoCube',
'pullTrace', 'echoMatterMetalR0', 'limnEdge_t',
'alanStopLast', 'limnEdgeTypeFrontFacet',
'tenFiberIntgUnknown', 'limnObjectDepthSortFaces',
'tenGageOmegaNormal', 'airStdin',
'tenGageTraceDiffusionFraction', 'airThreadCondBroadcast',
'nrrdBinaryOp', 'tenDwiGage2TensorPeledAndError',
'mossMatShearSet', 'pullInfo', 'nrrdMeasureMin',
'nrrdNuke', 'nrrdTypeIsIntegral', 'airNoDio_disable',
'limnSplineTypeHermite', '_airThreadCond',
'tenEvecRGBSingle_f', 'tenDwiGagePvlData',
'pushContextNix', 'limnSpaceScreen', 'pushRebin',
'airThreadBarrierWait', 'gageItemSpecNew',
'nrrdClampConvert', 'nrrdKernelCos4SupportDebugDD',
'alanStop', 'tenExpand', 'nrrdCCFind',
'gageItemPackSclValue', 'tijk_refine_rankk_parm',
'ell_3m_mul_f', 'tenTripleTypeWheelParm',
'seekVertexStrength', 'hooverRayBegin_t', 'tenSlice',
'airIndexULL', 'airNormalRand_r', 'baneBiffKey',
'miteValView', 'nrrdReshape_va', 'tijk_sym_fun_t',
'nrrdKindComplex', 'echoChannelAverage', 'limnQN13octa',
'baneMeasrTotalCurv', 'ell_3m_inv_f', 'limnQN14octa',
'tenGageCp1GradVec', 'alanContextNew', 'echoTriangleSet',
'gageKernel00', 'pullCountTestStep', 'nrrdEmpty',
'limnSplineTypeTimeWarp', 'hestMinNumArgs',
'gageOptimSigErrorPlot', 'tenTripleTypeEigenvalue',
'nrrdKind4Vector', 'airPresent', 'nrrdArithIterAffine',
'tenGageFARidgeSurfaceAlignment',
'nrrdKernelBSpline5ApproxInverse', 'pushPointNix',
'tenAnisoEval_f', 'tijk_2o2d_sym', 'tenAnisoEval_d',
'tenDWMRIBValueKey', 'tenGageCa1HessianEvec0',
'tenGageCa1HessianEvec1', 'tenGageCa1HessianEvec2',
'nrrdFFTWPlanRigorPatient', 'hooverErrInit', 'tend_avgCmd',
'nrrdUnquantize', 'baneMeasrCopy', 'pullEnergyType',
'tenGageOmegaHessianEval', 'nrrdStateUnknownContent',
'baneIncCopy', 'alanParmMinAverageChange',
'echoTypeTriangle', 'gageKernel22', 'pullPropEnergy',
'tijk_eval_esh_f', 'nrrdEnvVarDefaultWriteBareText',
'limnPolyDataSpiralSphere', 'tenGageModeHessianEvec2',
'tenGageModeHessianEvec1', 'tenGageModeHessianEvec0',
'airFP_POS_INF', 'echoBoundsGet',
'limnObjectPolarSuperquadFancyAdd', 'pullBinsPointAdd',
'baneClipCopy', 'pullSysParmAlpha', 'gageShapeNew',
'pullInterTypeJustR', 'pullIterParmConstraintMax',
'miteValTw', 'mossSamplerUpdate', 'hestGreedySingleString',
'meetAirEnumAll', 'tenFiberStopAniso', 'gageShapeWtoI',
'pullBinsPointMaybeAdd', 'nrrdHestKernelSpec',
'unrrduScaleNothing', 'nrrdMeasureLinf', 'limnPart',
'tijk_eval_esh_d', 'gagePerVolume', 'tenGageCl1GradVec',
'seekUpdate', 'tenGradientJitter', 'banePresent',
'baneRangeCopy', 'tenFiberStop', 'alanParmAlpha',
'gageParmGradMagCurvMin', 'hestSourceUser',
'nrrdEncodingTypeUnknown', 'tenTripleCalc', 'biffGetDone',
'alanTextureTypeGrayScott', 'ell_debug', 'tenGageSHessian',
'limnSplineTypeCubicBezier', 'nrrdUnaryOpRand',
'airArrayLenIncr', 'tenEstimate1TensorSingle_d',
'tenEstimate1TensorSingle_f', 'alanStopNonExist',
'pullTraceMultiNew', 'tenDwiGageAll', 'ell_4mv_mul_f',
'ell_4mv_mul_d', 'tenDefFiberAnisoStopType', 'nrrdSlice',
'tenFiberMultiNew', 'nrrdKernelBlackmanDD',
'airNoDio_test', 'nrrdKernelSpecCopy',
'tenGageOmegaGradVecDotEvec0',
'gageStackBlurParmKernelSet', 'limnDefCameraRightHanded',
'nrrdKernelAQuarticD', 'nrrdDefaultResampleNonExistent',
'nrrdBoundaryWeight', 'tenGlyphParmCheck',
'pullCountDescent', 'baneMeasrFlowlineCurv',
'mossLinearTransform', 'ell_4m_inv_d',
'nrrdSpaceVecScaleAdd2', 'nrrdFClamp',
'coilMethodTypeFinish', 'nrrdBlind8BitRangeLast',
'nrrdField_centers', 'pullPropLen', 'airLog2',
'airThreadBarrier', 'ell_q_inv_d', 'pullSysParmUnknown',
'miteRangeGreen', 'airFP_NEG_DENORM', 'nrrdAxisInfoGet_va',
'pullInterTypeUnknown', 'limnEnvMapCB',
'tenGageOmegaGradVec', 'airFP_NEG_NORM', 'airToUpper',
'tenEigenvalueClamp', 'nrrdApply1DIrregMap',
'airFloatSNaN', 'nrrdKernelC3QuinticDD', 'echoTypeTriMesh',
'nrrdStateGrayscaleImage3D', 'hooverRenderEnd_t',
'tenFiberMulti', 'ell_Nm_tran', 'tijk_esh_sp_d',
'nrrdUnaryOpLog10', 'airStrcpy', 'tenFiberMultiPolyData',
'airHeapUpdate', 'miteValNdotL',
'gageParmKernelIntegralNearZero', 'meetPullVolLeechable',
'nrrdFormatTypePNM', 'airErf', 'baneGkms_pvgCmd',
'pullPresent', 'gageOptimSigContextNew', 'miteValNdotV',
'mossHestTransform', 'ell_3m_det_f', 'ell_3m_det_d',
'ell_q_4v_rotate_d', 'ell_q_4v_rotate_f',
'gagePerVolumeIsAttached', 'pullEnergySpecParse',
'nrrdField_measurement_frame', 'limnCamera', 'nrrdPPM',
'echoTypeList', 'pullInfoGet', 'nrrdMeasureLast',
'nrrdUnaryOpLog1p', 'airInsane_endian',
'tenEMBimodalParmNew', 'pullInfoInsideGradient',
'airTeemReleaseDone', 'pullStatusUnknown',
'tenFiberParmLast', 'nrrdBasicInfoInit', 'nrrdIterSetNrrd',
'pullStart', 'tenModelSimulate', 'tenGageFAHessianEvec2',
'tenGageFAHessianEvec1', 'tenGageFAHessianEvec0',
'ell_4m_post_mul_d', 'echoIntxMaterialColor',
'pullEnergySpring', 'gageSclHessEval',
'airThreadNoopWarning', 'airMopSingleDone', 'limnDevicePS',
'nrrdBasicInfoSpaceUnits', 'nrrdKernelCatmullRomDD',
'tend_mfitCmd', 'elfTenEstimMatrix_f',
'elfTenEstimMatrix_d', 'gageStackProbe',
'nrrdBinaryOpAtan2', 'unrrdu_cmedianCmd', 'tenAniso_Tr',
'biffMove', 'hooverRenderBegin_t', 'nrrdHestNrrd',
'echoSceneNew', 'pullCallbackSet',
'nrrdEnvVarStateAlwaysSetContent',
'tijk_approx_heur_parm_t', 'pushEnergyCotan',
'tijk_eval_efs_d', 'limnWindowNew',
'pullConstraintFailProjGradZeroB',
'pullConstraintFailProjGradZeroA', 'tenAniso_Th',
'ell_3m_rotate_between_d', 'nrrdTypeBlock',
'pullEnergyTypeHepticWell', 'pullCountNixing',
'nrrdSpatialAxesGet', 'seekItemGradientSet',
'tenGageTensor', 'pushContext', 'nrrdSpaceScannerXYZ',
'airBesselInExpScaled', 'coilMethodTypeTesting',
'nrrdEncodingGzip', 'airTeemVersion', 'meetPullInfo',
'unrrduScaleUnknown', 'airThreadMutex', 'unrrduUsage',
'nrrdDistanceL2Signed', 'tenGageTensorQuatGeoLoxR',
'tenGageInvarKGrads', 'tenSizeScale',
'tenGageTensorQuatGeoLoxK', 'biffAddf',
'gageStackPerVolumeAttach', 'biffGetStrlen',
'gageDefCheckIntegrals', 'coilBiffKey',
'nrrdBinaryOpCompare', 'tend_normCmd', 'airNoDio_okay',
'airTypeUnknown', 'tenAnisoHistogram', 'tenGageFA',
'airTeemVersionSprint', 'alanStopNot', 'tenDwiGageJustDWI',
'tenDwiGageTensorLLSError', 'tenDwiGageTensorAllDWIError',
'tenDwiGage', 'nrrdCastClampRound', 'tijk_2d_sym_to_efs_f',
'nrrdBinaryOpFmod', 'nrrdKind3Vector', 'airStrtrans',
'meetPullVolParse', 'nrrdTypeChar', 'nrrdCCRevalue',
'ell_3v_barycentric_spherical_d', 'echoObject',
'nrrdFFTWWisdomWrite', 'pullInitHaltonSet',
'pullIterParmSnap', 'nrrdSpace3DLeftHandedTime',
'ell_biff_key', 'gageSclNormal', 'nrrdDefaultResampleType',
'nrrdDeringContextNew', 'unrrdu_axdeleteCmd',
'nrrdBasicInfoCopy', 'baneInc_t', 'dyeConverter',
'nrrdCrop', 'tenGageFiberDispersion', 'nrrdUnaryOpAbs',
'limnSplineUpdate', 'tenGageCovariance', 'nrrdCompare',
'echoCylinderSet', 'alan2DSizeSet', 'unrrduScaleSubtract',
'hestMultiFlagSep', 'limnObjectPSDraw', 'baneMeasr',
'airInsane_FISize', 'pullTraceStopLast', 'hestParm',
'nrrdKeyValueIndex', 'limnPolyDataCompress', 'seekType',
'nrrdSpacingStatusUnknown', 'limnSplineTypeSpec',
'airEnumStr', 'pushEnergyTypeGauss', 'airDioTest',
'pullEnergyTypeQuarticWell', 'miteValVrefN',
'pushEnergyTypeZero', 'unrrduPresent', 'tenFiberTrace',
'limnEdgeTypeFrontCrease', 'miteSample',
'nrrdGetenvString', 'nrrdBinaryOpNormalRandScaleAdd',
'tenFiberIntgMidpoint', 'meetPullInfoAddMulti',
'limnWindow', 'tend_bfitCmd', 'nrrdField_old_min',
'unrrduScaleExact', 'pullConstraintFailIterMaxed',
'airThreadBarrierNix', 'ell_q_avg4_d',
'coilKindTypeUnknown', 'nrrdTypeFloat', 'airParseStrUI',
'coilContextNew', 'nrrdKernelBSpline1D', 'airEqvMap',
'seekTypeLast', 'gageKind_t',
'gageDefKernelIntegralNearZero', 'nrrdEncodingTypeHex',
'tenGageEvec0', 'tenGageEvec1', 'nrrdIoStateValsPerLine',
'unrrdu_tileCmd', 'pullInterTypeSeparable',
'nrrdKind2DMaskedSymMatrix',
'pullSysParmFracNeighNixedMax', 'baneDefPercHistBins',
'limnPolyDataPrimitiveSort', 'tenGageTraceHessian',
'nrrdField_space_directions', 'baneRangeAnywhere',
'nrrdHasNonExist', 'tenTripleConvertSingle_f',
'tenGageOmegaGradMag', 'airThreadMutexUnlock',
'pullSysParmTheta', 'nrrdMeasureHistoProduct',
'alanContext', 'tenTripleConvertSingle_d', 'nrrdUnaryOpIf',
'tenGageUnknown', 'tijk_approx_heur_parm_new',
'nrrdBasicInfoBlocksize', 'baneClipNew',
'unrrdu_unquantizeCmd', 'miteValUnknown', 'nrrdFormatNRRD',
'pullEnergyTypeUnknown', 'elfPresent', 'tenGageNormNormal',
'nrrdKindRGBColor', 'gageDefStackNormalizeRecon',
'airSingleSscanf', 'airThreadCondWait',
'pullFlagRestrictiveAddToBins', 'pullProcessModeDescent',
'limnSplineTypeSpecNix', 'gageVecMGEval',
'tenFiberIntgSet', 'tenModel1Unit2D', 'pullBin',
'nrrdField_unknown', 'nrrdCCNum',
'pullInitUnequalShapesAllowSet', 'pullPropStepEnergy',
'pullIterParmEnergyIncreasePermitHalfLife', 'tenGageOmega',
'tijk_set_axis_efs', 'pullIterParmAddDescent',
'tijk_set_axis_tensor', 'pullSysParmEnergyDecreaseMin',
'tend_logCmd', 'pullSysParmLast', 'mossSamplerFill',
'nrrdKernelDiscreteGaussianGoodSigmaMax', 'alanParmLast',
'limnObjectNix', 'tenGageDetHessian', 'pullCountAdding',
'limnQN15octa', 'pullEnergyCubic', 'limnSplineMaxT',
'pullPropNeighCovar7Ten', 'gageErrStackIntegral',
'nrrdRangeNewSet', 'miteShadeSpecQueryAdd',
'nrrdEnvVarStateKindNoop', 'tenGageInvarRGradMags',
'nrrdSpaceVecSetNaN', 'limnCameraInit',
'pullConstraintFailUnknown', 'meetPullInfoParse',
'tend_sliceCmd', 'tenFiberTypePureLine', 'nrrdKindScalar',
'tenFiberStopNumSteps', 'airTypeInt',
'pushBinAllNeighborSet', 'tenGlyphTypeSphere',
'tijk_refine_rankk_2d_d', 'unrrdu_cksumCmd',
'baneGkmsHestGthresh', 'tijk_8o3d_sym', 'baneClipLast',
'gagePointReset', 'nrrdKernelC5SepticDD', 'hestOpt',
'nrrdMeasureLineError', 'alanParmWrapAround',
'elfBallStickOptimize_f', 'nrrdKernelBCCubicDD',
'limnOptsPS', 'nrrdSpacingStatusLast',
'tenFiberStopBounds', 'airSrandMT_r', 'pullProp',
'limnSplineInfo2Vector', 'nrrdKernelBSpline7ApproxInverse',
'pullProcessModeNeighLearn', 'nrrdFFTWWisdomRead',
'nrrdKernelSpecNew', 'baneGkms_txfCmd',
'pullFlagEnergyFromStrength', 'nrrdRangeSet',
'limnEdgeTypeContour', 'ell_4m_inv_f',
'nrrdField_space_origin', 'dyeXYZtoLAB',
'nrrdFormatTypeNRRD', 'echoLightPosition', 'tenAnisoScale',
'echoScene_t', 'nrrdResampleClampSet', 'mossMatPrint',
'gageVecGradient2', 'gageVecGradient0', 'gageVecGradient1',
'limnDeviceGL', 'pullTraceNix', 'tenInterpTypeLast',
'tenGageFAHessianEval2', 'gageItemPackPartGradMag',
'tenGageFAHessianEval0', 'tenGageFAHessianEval1',
'nrrdSpaceLast', 'limnPolyDataEdgeHalve',
'nrrdKernelC5SepticDDD', 'nrrdResampleNonExistentSet',
'pushRun', 'airNoDio_ptr', 'airEndian',
'nrrdIoStateBareText', 'ell_Nm_inv',
'tijk_init_rank1_3d_f', 'hestElideSingleEnumType',
'tijk_init_rank1_3d_d', 'ell_q_log_d',
'unrrduHestEncodingCB', 'ell_q_log_f', 'airInsaneErr',
'tenGageCovarianceRGRT', 'mossFlagImage', 'pullVolumeNew',
'coilMethodTypeModifiedCurvatureRings',
'meetNrrdKernelAll', 'gageKernel21', 'gageKernel20',
'airMopOnOkay', 'miteDefRefStep', 'airArrayNuke',
'echoMatterPhong', 'tenGageFARidgeLineAlignment',
'nrrdCropAuto', 'tenGageSGradMag', 'tenGageEvalGrads',
'airSigmaOfTau', 'limnLightNew', 'miteRangeEmissivity',
'pullIterParmUnknown', 'nrrdAxisInfoPosRange',
'gageItemEntry', 'pullSource', 'airTimeOfTau',
'echoJitter', 'seekItemStrengthSet', 'nrrdFormatPNM',
'nrrdReshape_nva', 'pullBiffKey', 'tenGlyphTypeUnknown',
'pushEnergyUnknown', 'pushEnergyTypeSpring',
'ell_cubic_root_unknown', 'tenGageModeHessianEvec',
'nrrdMeasureHistoMin', 'nrrdUnaryOpRoundUp',
'tenFiberTypeLast', 'nrrdILookup',
'gageParmCheckIntegrals', 'tend_evqCmd',
'tenDwiGageTensorNLSError',
'tenDWMRIKeyValueFromExperSpecSet', 'pullEnergyTypeGauss',
'tijk_class_unknown', 'ell_6m_mul_d', 'dyeRGBtoHSL',
'limnPolyDataPrimitiveVertexNumber', 'nrrdField_content',
'nrrdSpaceVecSetZero', 'pullOutputGetFilter',
'gageKernelUnknown', 'nrrdKindSpace', 'nrrdKindRGBAColor',
'gageDefStackUse', 'airHeapInsert', 'pullCountIteration',
'hestUsage', 'nrrdUnaryOpZero', 'dyeRGBtoHSV',
'elfGlyphHOME', 'hooverPresent', 'limnEnvMapCheck',
'nrrdResampleOverrideCenterSet', 'airEnumValCheck',
'pullFlagConstraintBeforeSeedThresh', 'airThread',
'tenGageMode', 'nrrdKernelBSpline6DDD',
'tenGageCp1HessianEvec', 'gageUpdate',
'nrrdSpacingCalculate', 'miteDefRenorm', 'nrrdKernelHannD',
'limnObjectPolarSuperquadAdd', 'nrrdKind3DMaskedMatrix',
'airThreadCondNew', 'tijk_esh_to_3d_sym_matrix_f',
'nrrdStateGetenv', 'tenInterpType', 'gageProbe',
'tenGageCp1Normal', 'airEndianUnknown', 'pullVolume',
'echoIntxColor', 'nrrdMeasureMean', 'miteValWdotD',
'pullInitPointPerVoxelSet', 'tenGlyphGen',
'miteShadeSpecParse', 'airTypeBool', 'unrrdu_deringCmd',
'tenDwiGage2TensorQSeg', 'nrrdStateKeyValuePairsPropagate',
'gageShapeEqual', 'pullFlagLast', 'echoJittableLens',
'tijk_sub_f', 'pullGammaLearn', 'tenInterpParmNix',
'limnObjectPolarSphereAdd', 'tenGageFiberCurving',
'nrrdKernelC5SepticD', 'pullSourceGage',
'nrrdDeringVerticalSeamSet', 'pushPoint',
'nrrdArithTernaryOp', 'nrrdStateMeasureHistoType',
'echoJitterNone', 'nrrdBoundarySpecNew',
'gageHestStackBlurParm', 'limnSplineInfoQuaternion',
'tenGageTraceGradMag', 'NrrdKernel', 'nrrdField_encoding',
'nrrdKernelGaussian', 'biffMovef', 'echoScene',
'limnSplineTypeBC', 'nrrdIoStateZlibStrategy',
'nrrdField_thicknesses', 'baneMeasrGradMag',
'echoTypeLast', 'gageStackPerVolumeNew',
'nrrdArithIterBinaryOpSelect', 'ell_3m2sub_eigenvalues_d',
'nrrdEncodingArray', 'gageKernelSet', 'nrrdKindList',
'limnCamera_t', 'echoSphereSet', 'echoGlobalState',
'pullEnergyGauss', 'tenModel', 'baneIncProcess',
'gageItemSpecNix', 'echoTypeSplit',
'nrrdArithIterTernaryOp', 'tenDwiGageFA',
'tenEstimateGradientsSet', 'nrrdHasNonExistLast',
'miteDefOpacMatters', 'nrrdCommentAdd', 'limnSplineType',
'pullIterParmStuckMax', 'nrrdCCSize', 'baneClipAbsolute',
'echoBiffKey', 'pushContextNew', 'tenGageDetGradMag',
'tenDwiGageKindSet', 'tenGageOmegaHessianEvec0',
'tenGageOmegaHessianEvec1', 'nrrdBoundary',
'nrrdAxesPermute', 'tenFiberParmWPunct',
'limnCameraPathTrackBoth', 'echoMatterMetalSet',
'ell_6ms_eigensolve_d', 'limnSplineTypeLinear',
'tijk_esh_deconvolve_f', 'gageParmCurvNormalSide',
'gageSclHessMode', 'tenGageFAGradMag', 'pullPointNumber',
'pullContextNix', 'limnSplineInfoNormal',
'tenGageFAValleySurfaceStrength', 'airFopen',
'airSprintVecSize_t', 'echoEnvmapLookup',
'unrrdu_quantizeCmd', 'tenAniso_Skew',
'nrrdMeasureHistoMean', 'limnQN16border1',
'nrrdCenterLast', 'tijk_refine_rank1_2d_d',
'tijk_refine_rank1_2d_f', 'nrrdAxisInfoUnits',
'miteRangeKa', 'tenFiberSingleDone', 'miteRangeKd',
'nrrdMeasureMedian', 'nrrdMinMaxExactFind',
'pullEnergyTypeCotan', 'ell_q_div_f', 'ell_q_div_d',
'nrrdPad_va', 'nrrdAxisInfoSize', 'miteRangeKs',
'baneMeasr_t', 'nrrdBinaryOpNotEqual',
'nrrdStateAlwaysSetContent', 'tenGageFAGaussCurv',
'NrrdFormat', 'nrrdKernelCheap',
'pullFlagUseBetaForGammaLearn',
'tenDwiGage2TensorPeledError', 'nrrdBoundarySpecSprint',
'pushEnergy', 'airInsane_nInfExists', 'baneInc',
'nrrdTernaryOpMax', 'tenGageLast', 'gageScl3PFilter4',
'tenGlyphParmNew', 'baneMeasrAnswer', 'dyeColorNix',
'tenFiberTypeZhukov', 'pullInitGivenPosSet',
'echoListSplit3', 'tenDwiGageKindNew', 'gageQueryPrint',
'tenEstimateVerboseSet', 'airSgn', 'nrrdMaybeAlloc_va',
'meetAirEnumAllPrint', 'gageCtxFlagUnknown',
'echoJittableUnknown', 'gageOptimSigContextNix',
'pullPoint', 'tenInterpTypeUnknown', 'nrrdKernelBCCubicD',
'tenGageCl1HessianEvec', 'nrrdBasicInfoDimension',
'unrrdu_undosCmd', 'coilKindArray', 'alanParmHomogAniso',
'limnPolyDataCCFind', 'airTeemReleaseDate',
'limnObjectFaceNormals', 'gageStackBlurParmVerboseSet',
'nrrdKernelTMF_maxC', 'nrrdIoStateDetachedHeader',
'alanStopDiverged', 'tend_expandCmd', 'tenGlyphParmNix',
'tenEstimate2MethodQSegLLS', 'unrrduHestMaybeTypeCB',
'airOneLinify', 'tenEvecRGBSingle_d', 'dyeColorInit',
'gageItemPackPartGradVec', 'pullProcessModeUnknown',
'tenDwiGageKindCheck', 'airMopError', 'coilVolumeCheck',
'echoTriangle', 'limnPolyDataRasterize',
'hestElideMultipleEmptyStringDefault', 'miteValRi',
'echoPos_t', 'hestVarParamStopFlag', 'seekTypeValleyLine',
'pullSysParmStepInitial', 'nrrdUIStore', 'airCRC32',
'tenFiberAnisoSpeedReset', 'coilMethodTypePeronaMalik',
'miteRangeBlue', 'limnPolyDataInfoRGBA', 'miteValRw',
'tenGradientGenerate', 'coilPresent', 'tend_stenCmd',
'tijk_esh_to_3d_sym_matrix_d', 'limnEdgeTypeUnknown',
'airSprintSize_t', 'gageStackBlurParmSigmaSet',
'nrrdBasicInfoOldMax', 'unrrdu_shuffleCmd',
'echoJittableLight', 'tenInterpMulti3D', 'seekIsovalueSet',
'airMopDebug', 'nrrdEncodingType', 'nrrdCRC32',
'baneGkmsCmdList', 'pullCondUnknown', 'limnQN11octa',
'pullEnergyTypeBspln', 'tenDwiGageTensorMLE',
'pullEnergyTypeBetterCubicWell', 'tenFiberType',
'ell_q_to_aa_d', 'gagePoint_t', 'ell_q_to_aa_f',
'unrrdu_padCmd', 'pushEnergyTypeCoulomb', 'airMopOnError',
'nrrdDefaultKernelParm0', 'tend_pointCmd',
'echoTypeUnknown', 'airFPClass_f',
'unrrduScaleAspectRatio', 'tenDwiGageMeanDWIValue',
'tijk_set_axis_esh', 'nrrdGetenvEnum', 'baneRangeLast',
'nrrdUnaryOpSqrt', 'airFP_POS_ZERO', 'nrrdIoStateGet',
'tenModelFromAxisLearnPossible', 'tenGageHessian',
'pullCondEnergyBad', 'nrrdUnaryOpOne', 'gageCtxFlagRadius',
'tenInterpTypeLogLinear', 'ell_3m_mul_d',
'mossFlagUnknown', 'coilContext_t', 'tenTensorCheck',
'gageKindAnswerOffset', 'nrrdNew', 'nrrdEncodingTypeBzip2',
'airMopSingleError', 'nrrdField_sample_units',
'hooverErrRayEnd', 'tend_anplotCmd',
'pullInfoTensorInverse', 'pullPropNeighTanCovar',
'nrrdAxesSplit', 'echoRTParmNew', 'nrrdTernaryOpMaxSmooth',
'tenEstimateContext', 'hestSourceUnknown',
'unrrdu_swapCmd', 'seekItemEigensystemSet', 'airDioMalloc',
'limnPrimitiveLineStrip', 'airThreadJoin',
'pullEnergyTypeCubic', 'pullSysParmConstraintStepMin',
'tenDefFiberWPunct', 'nrrdTypeUShort',
'hooverContextCheck', 'tenFiberStopSet',
'echoMatterPhongKs', 'limnSpaceDevice', 'nrrdUnaryOpAsin',
'biffMsgAdd', 'tenGageInvarRGrads',
'pullFlagConvergenceIgnoresPopCntl', 'echoMatterPhongKd',
'nrrdUnblock', 'pushEnergySpec', 'echoMatterPhongKa',
'tenFiberStopRadius', 'limnPrimitiveUnknown',
'airSinglePrintf', 'airNormalRand', 'nrrdIterNew',
'pullIterParmPopCntlPeriod', 'airNoDio_std',
'tenModel2Unit2D', 'nrrdFFTWPlanRigorUnknown', 'airFloat',
'tenFiberStopConfidence', 'nrrdKind3Normal',
'tenGageModeHessianEval2', 'tenGageModeHessianEval0',
'tenGageModeHessianEval1', 'airThreadNix',
'gageSclTotalCurv', 'gageCtxFlagK3Pack', 'nrrdUnaryOpErf',
'airGaussian', 'elfMaximaContextNix',
'nrrdResampleBoundarySet', 'tenEMatrixCalc',
'tenRotationTangents_d', 'nrrdBasicInfoType',
'coilContextNix', 'tenEigenvalueAdd', 'tenModelBall1Stick',
'hooverErrLast', 'nrrdKernelBSpline4',
'nrrdKernelC4HexicDD', 'nrrdTile2D', 'airIndex',
'tenFiberStopStub', 'limnCameraUpdate',
'nrrdKernelBSpline5DDD', 'dyeSpaceLast', 'airDrandMT_r',
'airMode3_d', 'miteRangeAlpha', 'limnPolyDataSave',
'nrrdStateVerboseIO', 'nrrdDefaultResamplePadValue',
'gageOptimSigCalculate', 'nrrd1DIrregAclGenerate',
'baneRange', 'tenGageModeNormal', 'tenTripleCalcSingle_d',
'ell_3v_perp_d', 'ell_3v_perp_f', 'pullIterParmMin',
'nrrdKind2DMatrix', 'airRandMTStateGlobal', 'tijk_sub_d',
'airLLong', 'tenGageConfidence', 'seekItemScalarSet',
'hooverStubThreadEnd', 'nrrdArrayCompare', 'tenGageEvec',
'tenDwiGageTensorLLS', 'limnSplineParse',
'baneClipPercentile', 'tenEpiRegister4D',
'seekItemNormalSet', 'limnSpaceLast', 'miteRenderBegin',
'airMyEndian', 'pullPropNeighCovar', 'miteRangeRed',
'gageVecVector2', 'gageVecVector1', 'nrrdBinaryOpMin',
'tenGageThetaNormal', 'nrrdKernelC4HexicDDD',
'nrrdSimplePad_nva', 'nrrdSpaceVecNorm',
'pullInitRandomSet', 'airPrimeList', 'tijk_negate_d',
'tenDwiGageTensorWLSErrorLog', 'meetBiffKey',
'echoThreadStateNix', 'airThreadStart', 'tenFiberSingle',
'ell_3m_to_q_d', 'ell_3m_to_q_f', 'airFP_POS_DENORM',
'nrrdAxesInsert', 'airThreadBarrierNew', 'gageSclHessian',
'baneIncNew', 'limnSpline', 'meetPullInfoNix',
'limnDeviceLast', 'airTypeULongInt', 'nrrdSample_va',
'limnPolyDataSmoothHC', 'tenGageOmegaHessianContrTenEvec2',
'nrrdAxisInfoSpaceDirection',
'tenGageOmegaHessianContrTenEvec0', 'tenAniso_Det',
'tijk_class_esh', 'nrrdResampleNonExistent',
'tenGageFA2ndDD', 'elfSingleShellDWI', 'tenGageCl1GradMag',
'tenFiberContextNew', 'tenGageFANormal',
'tenGageBNormal', 'tenEpiRegister3D', 'baneGkmsUsage',
'echoSphere', 'nrrdApply1DSubstitution', 'airEnumFmtDesc',
'seekPresent', 'tenExperSpecFromKeyValueSet',
'echoJittablePixel', 'tenGageTraceHessianEval2',
'ell_3m_eigensolve_d', 'echoIntxLightColor',
'dyeSimpleConvert', 'limnObjectVertexNormals',
'nrrdSpaceVecCopy', 'seekExtract', 'pullRngSeedSet',
'nrrdField_last', 'NrrdBoundarySpec', 'nrrdHestIter',
'alanParmVerbose', 'gageVecImaginaryPart',
'nrrdBasicInfoContent', 'tenEstimateValueMinSet',
'tijk_3d_sym_to_esh_f', 'limnPolyDataInfoTang',
'dyeSpaceRGB', 'mossImageAlloc', 'tenGlyphTypeLast',
'airRandMTStateNew', 'nrrdEnvVarDefaultCenterOld',
'tenExperSpecMaxBGet', 'pullPointNumberFilter',
'tenEstimateUpdate', 'pullInfoIsovalueHessian',
'baneRangeZeroCentered', 'nrrdLineSkip', 'coilStart',
'coilMethodTypeHomogeneous', 'nrrdField_kinds',
'tijk_refine_rankk_parm_t', 'tenExpand2D',
'gageVecJacobian', 'nrrdBlind8BitRangeState',
'tenEstimate1Method', 'tijk_4o2d_unsym',
'pullTraceMultiSizeof', 'tijk_axis_info', 'airFP_NEG_ZERO',
'pushEnergyAll', 'echoMatter', 'pullFlagStartSkipsPoints',
'gageSigmaSamplingLast', 'tenFiberParmUnknown',
'airStderr', 'pushEnergyType', 'unrrdu_histoCmd',
'pullSysParmSeparableGammaLearnRescale',
'unrrduDefNumColumns', 'tenModel1Tensor2',
'airEndianLittle', 'airArrayStructCB', 'limnSpaceWorld',
'seekTypeUnknown', 'seekContour3DTopoHackTriangle',
'nrrdCommentClear', 'limnPolyDataSpiralTubeWrap',
'tenGageRGradVec', 'nrrdStateBlind8BitRange',
'airPtrPtrUnion', 'pullInfoSpecAdd',
'tenDwiFiberTypeUnknown', 'tenGageOmegaHessianEvec',
'seekContextNix', 'tijk_efs_to_2d_sym_d',
'nrrdFFTWPlanRigorExhaustive', 'nrrdTernaryOpAdd',
'nrrdKernelHermiteScaleSpaceFlag', 'ell_cubic_root_triple',
'nrrdKernelSpecNix', 'nrrdDefaultResampleBoundary',
'nrrdAxesDelete', 'airFloatPosInf', 'nrrdKernelSpecSet',
'limnPolyDataInfoUnknown', 'tenGageThetaGradVec',
'gageItemSpecInit', 'tend_aboutCmd', 'gageErr',
'gagePoint', 'pullTraceNew', 'tenGageFAHessianFrob',
'alanParmMaxPixelChange', 'seekVerboseSet', 'airMopper',
'nrrdApply1DLut', 'tenGradientMeasure', 'nrrdKindStub',
'nrrdTypeSize', 'tenDwiGageTensorLLSLikelihood',
'nrrdBlock', 'mossFlagKernel', 'airTypeString',
'nrrdKernelC4HexicD', 'miteStageOpMin',
'nrrdFormatTypeUnknown', 'dyeColorSprintf',
'pullEnergySpecCopy', 'tend_gradsCmd', 'airFlippedSgnPow',
'baneMeasr2ndDD', 'pullCountPoints',
'meetConstGageKindParse', 'nrrdSpacingStatusNone',
'gageVecDivergence', 'gageShape_t', 'alanParmSet',
'limnPolyDataCube', 'limnDefCameraAtRelative',
'ell_cubic_root_three', 'biffMsgNix', 'tenGageCa1GradMag',
'limnPolyDataNew', 'nrrdDefaultResampleRound',
'coilMethodTypeModifiedCurvature', 'airFclose',
'gageSclHessEvec0', 'gageSclHessEvec1', 'gageSclHessEvec2',
'tenGlyphBqdUvEval', 'hestElideSingleNonExistFloatDefault',
'tenFiberStopLength', 'ell_4m_print_f',
'limnSplineInfo3Vector', 'ell_4m_print_d',
'tenDefFiberMaxNumSteps', 'limnLightDiffuseCB',
'nrrdMeasureHistoSum', 'tenGageFARidgeSurfaceStrength',
'nrrdField_axis_mins', 'pullEnergyCubicWell',
'tenFiberUpdate', 'biffMsgStrAlloc', 'tenDwiGageADC',
'pullProbe', 'hestSourceDefault', 'nrrdOriginCalculate',
'tenInterpN_d', 'mossMatInvert', 'seekTypeRidgeLine',
'pullCountEnergyFromPoints', 'coilMethodTypeUnknown',
'gageVecLength', 'pullSysParmRadiusSpace', 'pushContext_t',
'limnSpace', 'nrrdIoStateFormatGet', 'gageStackWtoI',
'gageStackBlurParmDgGoodSigmaMaxSet',
'nrrdKernelBSpline5DD', 'pushPointNew',
'pushEnergySpecNew', 'pullContext_t', 'tenDwiGageKindData',
'nrrdDeringRadiusScaleSet', 'nrrdTypePrintfStr',
'nrrdField_sizes', 'nrrdAxisInfoGet_nva',
'nrrdSimplePad_va', 'tenDwiGageTensorLikelihood',
'tenGageThetaGradMag', 'pullInterEnergySet',
'tijk_2d_sym_to_efs_d', 'nrrdKindLast', 'gageParm_t',
'nrrdIoStateLast', 'tenEvecRGBParmNew', 'limnFace_t',
'banePosCheck', 'nrrdField_axis_maxs', 'tenGageRotTanMags',
'tenGageBHessian', 'airHeapRemove',
'limnObjectPSDrawConcave', 'limnObjectFaceNumPreSet',
'tend_ellipseCmd', 'tenGageCovarianceKGRT',
'biffMsgStrlen', 'NrrdDeringContext',
'mossMatLeftMultiply', 'tenGageOmega2ndDD',
'nrrdDefaultResampleClamp',
'pullEnergyButterworthParabola',
'hestElideSingleEmptyStringDefault', 'tenExperSpec',
'nrrdDeringVerboseSet', 'tenDwiGage2TensorQSegError',
'hestElideMultipleNonExistFloatDefault',
'nrrdBasicInfoSpace', 'gageScl2ndDD',
'limnPolyDataTransform_d', 'airTypeDouble',
'pullEnergyHepticWell', 'tijk_esh_make_kernel_rank1_d',
'tijk_esh_make_kernel_rank1_f', 'baneIncUnknown',
'pullInfoUnknown', 'pullEnergyAll', 'unrrdu_permuteCmd',
'limnDeviceUnknown', 'nrrdTypeDouble',
'tenGageDelNormPhi3', 'tenGageDelNormPhi2',
'tenGageDelNormPhi1', 'airRandMTState', 'NrrdKernelSpec',
'nrrdBinaryOpMax', 'tenGageTensorGradRotE',
'nrrdInvertPerm', 'nrrdTernaryOp', 'nrrdBoundarySpecNix',
'tenGlyphBqdAbcUv', 'nrrdUnaryOpSgn',
'ell_3v_area_spherical_d', 'pullSysParmBackStepScale',
'dyeColorSet', 'baneIncAbsolute', 'nrrdKernelBSpline5',
'nrrdKernelBSpline6', 'nrrdKernelBSpline7',
'nrrdKernelBSpline1', 'nrrdKernelBSpline2',
'gageSclGeomTensTen', 'nrrdMeasureHistoL2',
'limnPolyDataAlloc', 'tenGradientBalance',
'tijk_esh_make_kernel_delta_f', 'gageSclHessEvec',
'elfGlyphKDE', 'nrrdHistoDraw', 'nrrdGetenvDouble',
'nrrdLoadMulti', 'echoMatterTextureSet', 'limnQNtoV_f',
'limnQNtoV_d', 'tenGageTraceHessianEval1',
'tenGageTraceHessianEval0', 'echoSceneNix',
'nrrdZlibStrategyFiltered', 'nrrdTernaryOpLTSmooth',
'elfColorGlyphMaxima', 'pullEnergySpecNix',
'nrrdKindHSVColor', 'elfBallStickParms',
'nrrdFormatTypeVTK', 'pullCondOld',
'alanTextureTypeTuring', 'pullCountForceFromPoints',
'tend_evecrgbCmd', 'pushEnergyTypeCotan',
'echoJitterRandom', 'pullPtrPtrUnion',
'tenGageOmegaHessianEvec2', 'echoIntx', 'gageCtxFlagShape',
'alanParmUnknown', 'limnCameraPathTrackAt',
'limnPolyDataCopyN', 'tijk_3d_sym_to_esh_d',
'tenDWMRIModalityVal', 'pullTraceSet',
'nrrdKernelAQuartic', 'airTauOfTime',
'nrrdSpace3DRightHanded', 'nrrdUnaryOpSin',
'nrrdFFTWPlanRigorMeasure', 'airHeapFind', 'ell_Nm_mul',
'nrrdKindXYZColor', 'echoTypeCylinder',
'nrrdAxisInfoMinMaxSet', 'gageVolumeCheck',
'gageVecDirHelDeriv', 'tijk_2o2d_unsym', 'nrrdStringWrite',
'tenAniso_B', 'limnLightUpdate', 'nrrdOriginStatusOkay',
'tenAniso_Q', 'tenAniso_R', 'tenAniso_S', 'limnQN16simple',
'tijk_max_efs_order', 'dyeColorGet', 'nrrdMeasureLine',
'tenGageFAMeanCurv', 'ell_4m_mul_f', 'gagePresent',
'ell_4m_mul_d', 'limnPresent', 'nrrdUnaryOpTauOfSigma',
'tenDwiGageTensorWLSError', 'miteRangeLast',
'tenTripleTypeLast', 'nrrdSwapEndian',
'gageErrBoundsStack', 'tijk_approx_heur_parm',
'tenEstimate1TensorSimulateVolume', 'unrrdu_resampleCmd',
'limnPolyDataInfoBitFlag', 'hooverStubRayBegin',
'tenGageKind', 'nrrdRangeAxesGet',
'tenGageConfDiffusionFraction', 'nrrdKernelBCCubic',
'pullVolumeStackAdd', 'tenGageCa1Normal', 'airDoneStr',
'pullSysParmSet', 'nrrdResamplePadValueSet',
'nrrdDeringContextNix', 'pullSourceLast',
'echoMatterMetalFuzzy', 'mossMatScaleSet',
'tenGageCp1Hessian', 'unrrdu_sliceCmd', 'gageKindVec',
'echoMatterMetalKd', 'nrrdIoStateSkipData',
'echoMatterMetalKa', 'unrrduScaleMultiply',
'unrrdu_vidiconCmd', 'gageKindScl',
'hestCleverPluralizeOtherY', 'nrrdCheapMedian',
'nrrdKernelDiscreteGaussian', 'limnSplineInfoUnknown',
'tenEstimateLinearSingle_d', 'tenEstimateLinearSingle_f',
'tenModel1Vector2D', 'nrrdSaveMulti', 'baneDefIncLimit',
'tenLogSingle_d', 'tend_satinCmd',
'tijk_3d_sym_to_esh_matrix_f', 'nrrdIoStateZlibLevel',
'gageQueryAdd', 'gageItemPackPartLast',
'nrrdKernelBSpline3ApproxInverse', 'baneMeasrLast',
'tenFiberSingleNix', 'gageVecLast', 'limnQNUnknown',
'nrrdKernelAQuarticDD', 'airIsInf_f', 'tend_anscaleCmd',
'airIsInf_d', 'tenGageTensorGradMagMag',
'tenDWMRIGradKeyFmt', 'alan_t', 'tenGageCl1Hessian',
'limnQN10octa', 'airFP_Unknown',
'nrrdBasicInfoKeyValuePairs', 'tenTripleTypeRThetaPhi',
'gageParmUnknown', 'pushBin', 'miteShadeMethodUnknown',
'nrrdBinaryOpPow', 'elfMaximaFind_d', 'biffMsgNoop',
'baneIncPercentile', 'elfMaximaFind_f',
'nrrdTernaryOpLast', 'tenGageTensorLogEuclidean',
'pullVolumeLookup', 'unrrduScaleDivide', 'airParseStr',
'unrrdu_fftCmd', 'pullInitMethodHalton',
'tenInterpTypeGeoLoxR', 'nrrdKernelZero', 'pullRun',
'tenGageFAGradVec', 'tenInterpTypeGeoLoxK',
'nrrdDefaultCenter', 'limnPolyDataWriteVTK',
'ell_3m_post_mul_f', 'ell_3m_post_mul_d', 'limnPart_t',
'baneDefRenormalize', 'tenDWMRIKeyValueParse',
'airIndexClampULL', 'baneIncRangeRatio',
'baneStateHistEqBins', 'nrrdFLoad', 'echoGlobalStateNew',
'tenEstimateThresholdSet', 'pullFlagPermuteOnRebin',
'baneProbe', 'biffMsgLineLenMax', 'nrrdKernelParse',
'tenSqrtSingle_d', 'limnSplineTypeSpecNew',
'gageVecMGEvec', 'echoRectangle', 'unrrdu_ccadjCmd',
'tijk_approx_rankk_3d_d', 'ell_q_to_4m_f',
'nrrdStateDisallowIntegerNonExist', 'ell_q_to_4m_d',
'biffMsgClear', 'ell_3m_2d_nullspace_d',
'nrrdBinaryOpLast', 'tend_evalCmd', 'coilDefaultRadius',
'nrrdRangeNix', 'dyeSpaceUnknown',
'airRandMTStateGlobalInit', 'nrrdDomainAxesGet',
'echoMatterLightUnit', 'pullInterTypeAdditive',
'tenGageDetNormal', 'nrrdDeringClampHistoBinsSet',
'airDrandMT', 'nrrdUnaryOpLog2', 'pullEnergyTypeSpring',
'airMode3', 'tijk_refine_rank1_parm_nix',
'limnObjectConeAdd', 'NrrdAxisInfo', 'seekDescendToDeg',
'airMopDone', 'tenSqrtSingle_f',
'tenDwiGageTensorNLSLikelihood', 'baneGkms_infoCmd',
'elfESHEstimMatrix_d', 'baneGkmsMeasr', 'nrrdField_min',
'dyeStrToSpace', 'gageVecStrain', 'nrrdShuffle',
'baneGkms_miteCmd', 'mite_t', 'gageDeconvolveSeparable',
'airSetNull', 'pullThreadNumSet', 'airDioInfo', 'airArray',
'nrrdResampleContextNew', 'limnSplineTypeSpec_t',
'echoListAdd', 'tenDefFiberAnisoThresh',
'pullTraceStopBounds', 'tenGageFAHessianEval',
'nrrdResampleExecute', 'nrrdField_spacings', 'dyeColorNew',
'gageParmStackUse', 'nrrdBoundarySpecCompare',
'airNoDio_fpos', 'nrrdOriginStatusNoMaxOrSpacing',
'gageSclHessRidgeness', 'nrrdDClamp', 'unrrdu_reshapeCmd',
'pullInfoHessian', 'limnPolyDataPrimitiveTypes',
'gageOptimSigSet', 'gageStackItoW',
'nrrdEnvVarStateMeasureModeBins', 'tenTripleTypeMoment',
'nrrdKernelBSpline2D', 'nrrdIStore', 'mossSamplerNix',
'tenModelParse', 'miteRangeUnknown',
'tenGageInvarKGradMags', 'biffPresent', 'nrrdTypeInt',
'seekTypeValleySurfaceT', 'limnPolyDataWriteIV',
'nrrdFormatArray', 'nrrdCCMax', 'airBesselI0ExpScaled',
'tenTripleTypeK', 'tenTripleTypeJ', 'coilTask',
'tenTripleTypeR', 'mossDefCenter', 'tenVerbose',
'miteDefOpacNear1', 'nrrdIoStateUnknown',
'pullSysParmRadiusScale', 'nrrdKernelSpecParse',
'gageDefCurvNormalSide', 'tijk_approx_heur_parm_nix',
'tenGageCa1GradVec', 'tijk_approx_rankk_2d_d',
'tenGageDelNormK3', 'tenGageDelNormK2', 'biffMsgMovef',
'gageItemPackPartHessEvec0', 'gageItemPackPartHessEvec1',
'gageItemPackPartHessEvec2', 'limnHestSpline',
'nrrdEnvVarStateBlind8BitRange', 'echoLightColor',
'tenFiberSingleNew', 'dyeRGBtoXYZ', 'tijk_2o2d_asym',
'baneInfoCheck', 'unrrdu_mlutCmd', 'hestParmFree',
'tenFiberIntgEuler', 'baneGkmsHestIncStrategy',
'echoTextureLookup', 'pullOutputGet', 'nrrdGetenvUInt',
'pullVolumeNix', 'nrrdBoundaryMirror', 'nrrdFFTWPlanRigor',
'pullCountPointsStuck', 'pullInfoSpec_t',
'nrrdTernaryOpInClosed', 'tenGageFAHessianEvalMode',
'echoTypeSphere', 'gagePerVolumeAttach', 'pushIterate',
'nrrdField_units', 'unrrduUsageUnu', 'ell_aa_to_3m_f',
'ell_aa_to_3m_d', 'baneMeasrNix', 'pullCondConstraintFail',
'tenAniso_Cs1', 'tenAniso_Cs2', 'airTauOfSigma',
'alanStopMaxIteration', 'miteUserNew', 'nrrdSimpleCrop',
'tenEstimate1MethodNLS', 'gageKind', 'nrrdSanityOrDie',
'seekStrengthUseSet', 'pullContextNew',
'tenDwiGageTensorNLSErrorLog', 'echoList', 'airMopNew',
'nrrdOriginStatusLast', 'tijk_3o3d_sym', 'gageQueryReset',
'gageDefStackNormalizeDerivBias', 'pullInfoSeedPreThresh',
'alanUpdate', 'seekNormalsFindSet', 'pullEnergySpec',
'airRandMTSanity', 'nrrdKernelCatmullRomSupportDebugD',
'tenFiberSingleInit', 'tenModelBall1Cylinder',
'unrrdu_projectCmd', 'biffCheck', 'pullInitMethodRandom',
'pullInfoSeedThresh', 'tenDwiGageTensorMLEErrorLog',
'hooverRender', 'biffMsgMove', 'limnSplineTypeLast',
'tenEstimate1MethodLLS',
'nrrdEnvVarDefaultWriteCharsPerLine', 'tenInterpTwo_d',
'hestParseFree', 'hooverStubRenderEnd', 'pullPoint_t',
'tenGageTrace', 'mossSamplerKernelSet', 'echoRayColor',
'gageSclGradMag', 'nrrdCopy', 'nrrdUnaryOp',
'nrrdKernelTMF', 'pullEnergyButterworth',
'gagePerVolumeNix', 'tenEstimateSkipSet', 'airInsane_dio',
'tenDwiFiberTypeLast', 'gageScl3PFilter_t', 'echoTriMesh',
'tenDwiGageTensorError', 'coilIterate', 'airTypeLongInt',
'nrrdKernelParmSet',
'pullFlagAllowCodimension3Constraints',
'limnPrimitiveLast', 'gageParmRenormalize', 'tenGageNorm',
'nrrdFFT', 'pullFlagNoAdd',
'tenDwiGage2TensorPeledLevmarInfo', 'baneMeasrNew',
'airLogRician', 'tenGageTraceGradVecDotEvec0',
'tenDwiGageTensorWLSLikelihood', 'nrrdSplice',
'nrrdKernelGaussianDD', 'nrrdFFTWEnabled',
'nrrdRangeReset', 'nrrdKind3Color', 'airSrandMT',
'tenGageModeHessianFrob', 'tenGageConfGradVecDotEvec0',
'echoThreadState', 'tenDwiGageTensorNLS', 'baneInputCheck',
'airNoDio_fd', 'pushEnergyGauss', 'tend_tconvCmd',
'nrrdAxisInfoSet_va', 'tenGageModeGradVec',
'nrrdValCompareInv', 'nrrdStringRead', 'unrrdu_rmapCmd',
'nrrdDeringLinearInterpSet', 'limnPolyDataPrimitiveSelect',
'pullPointInitializePerVoxel', 'ell_q_inv_f',
'pullPropNeighDistMean', 'nrrdArithIterTernaryOpSelect',
'nrrdSpace', 'pullContext', 'NrrdResampleContext',
'hestParse', 'limnObjectFaceReverse', 'miteShadeSpecPrint',
'NrrdIoState_t', 'nrrdEncodingUnknown',
'airFPValToParts_f', 'airFPValToParts_d', 'dyeColor',
'tenFiberStopReset', 'pullInterType', 'pullEnergySpecSet',
'gageCtxFlagNeedD', 'limnPolyDataClip',
'alanParmFrameInterval', 'nrrdKernelC5Septic',
'gageCtxFlagNeedK', 'mossSamplerEmpty', 'limnQN16checker',
'tenExperSpecGradBValSet', 'gageZeroNormal',
'nrrdFFTWPlanRigorEstimate', 'gageSclHessValleyness',
'nrrdSpacingStatus', 'tenGageSGradVec', 'meetPullVol',
'airFP_NEG_INF', 'airBesselI1By0',
'nrrdBasicInfoSampleUnits', 'nrrdIoStateEncodingSet',
'nrrdMeasureUnknown', 'ell_3m_pre_mul_d',
'ell_3m_pre_mul_f', 'tijk_esh_deconvolve_d', 'tenPresent',
'unrrduBiffKey', 'coilFinish', 'nrrdResampleRoundSet',
'miteShadeMethodPhong', 'nrrdResampleBoundarySpecSet',
'airSprintPtrdiff_t', 'biffMsgNew', 'airEndianLast',
'nrrdSpaceVecScale', 'ell_q_to_3m_d', 'nrrdDLoad',
'hestInfo', 'miteStageOpUnknown', 'nrrdField',
'echoPtrPtrUnion', 'nrrdEncodingHex',
'nrrdStateMeasureModeBins', 'tijk_init_max_3d_d',
'gagePerVolume_t', 'limnPolyDataPolarSphere',
'tend_bmatCmd', 'tenDwiGageB0', 'nrrdApplyMulti1DRegMap',
'gageSclNPerp', 'baneGKMSHVol', 'echoTypeSuperquad',
'limnPolyDataCubeTriangles', 'nrrdKindNormal',
'miteValLast', 'gageErrStackSearch', 'tenGageTraceNormal',
'nrrdKernelBSpline7DD', 'baneRangeAnswer',
'gageStackBlurParmOneDimSet', 'airFPPartsToVal_d',
'nrrdAxesMerge', 'echoJitterLast', 'airIntPow',
'tenModelBall', 'hooverThreadEnd_t', 'limnQN8checker',
'nrrdAxisInfoSet_nva', 'nrrdEnvVarStateGrayscaleImage3D',
'tenEstimateBMatricesSet', 'airHeapLength', 'tenGageEvec2',
'gageParmSet', 'nrrdRead',
'pullFlagNoPopCntlWithZeroAlpha', 'nrrdElementNumber',
'tijk_eval_esh_basis_f', 'nrrdDeringExecute',
'tenDefFiberKernel', 'echoJitterCompute', 'tijk_class_efs',
'airInsane_AIR_NAN', 'nrrdBinaryOpUnknown',
'nrrdDefaultResampleRenormalize', 'tenEvecRGBParmNix',
'nrrdUnaryOpNormalRand', 'tend_msimCmd',
'seekTypeValleySurface', 'airShuffle', 'dyeSpaceXYZ',
'tenGageCl1HessianEval', 'mossBiffKey', 'tenAniso_Cl1',
'tenAniso_Cl2', 'nrrdFormatTypePNG', 'airULLong',
'gageKernel', 'gageSclGeomTens', 'nrrdKernelBSpline3DDD',
'ell_cubic_root_single_double', 'airEnum',
'nrrdCenterCell', 'hooverErrThreadBegin',
'nrrdUnaryOpUnknown', 'elfCart2Thetaphi_f',
'elfCart2Thetaphi_d', 'baneGkms_opacCmd', 'airNaN',
'limnPolyDataVertexWindingFix', 'gageStackBlurParmSprint',
'miteStageOp', 'dyeSpaceHSV', 'tenEstimate1MethodLast',
'gageShapeBoundingBox', 'limnObjectPartTransform',
'tenGageCl1HessianEval2', 'pullPropNeighInterNum',
'airDisableDio', 'airArrayLenSet', 'gageQuery',
'pullEnergyPlot', 'gageScl3PFilter8',
'tenGageCl1HessianEval0', 'tenFiberContext',
'gageScl3PFilter2', 'nrrdBlind8BitRangeTrue',
'gageScl3PFilter6', 'dyeSpaceHSL',
'nrrdSpaceScannerXYZTime', 'echoTypeCube',
'nrrdCommentCopy', 'airSanity', 'tenFiberContextDwiNew',
'nrrdKernelTMF_maxA', 'dyeSpaceToStr', 'nrrdKindDomain',
'nrrdKernelTMF_maxD', 'unrrduHestPosCB', 'nrrdKernelTent',
'unrrduHestFileCB', 'gageParmReset', 'airDouble',
'mossSamplerImageSet', 'pullInitMethod',
'airInsane_DLSize', 'baneRangeUnknown',
'gageParmTwoDimZeroZ', 'nrrdElementSize', 'limnCameraNew',
'gageVecUnknown', 'nrrdMeasureSkew',
'tenGlyphTypeBetterquad', 'echoCol_t', 'echoListSplit',
'pullPropScale', 'gageKernelLast', 'airStrtok',
'tenGageTraceHessianFrob', 'seekContour3DTopoHackEdge',
'nrrdBlind8BitRangeUnknown', 'limnQNDemo',
'gageScl3PFilterN', 'unrrdu_spliceCmd', 'nrrdAxisInfoMax',
'tijk_1o3d', 'nrrdTypeLLong', 'nrrdKernelBSpline6DD',
'echoMatterGlass', 'unrrdu_sselectCmd',
'pullIterParmCallback', 'pullStatusEdge',
'nrrdIoStateKeepNrrdDataFileOpen', 'nrrdMaybeAlloc_nva',
'nrrdSpaceUnknown', 'tenModelPrefixStr', 'meetPullVolNix',
'_airThreadMutex', 'nrrdWrap_nva', 'nrrdPresent',
'nrrdNix', 'nrrdResampleInfoNix', 'pushBinProcess',
'tenGageFALaplacian', 'nrrdCCSettle', 'gagePvlFlagLast',
'mossSamplerSample', 'airFP_POS_NORM',
'limnPrimitiveTriangleFan', 'nrrdField_type',
'unrrdu_mrmapCmd', 'miteThreadEnd',
'pullInfoHeightHessian', 'gageSclLast',
'tenInterpTypeAffineInvariant', 'airMopSub', 'dyeHSLtoRGB',
'unrrdu_axinfoCmd', 'tenGageCl1Normal',
'tenGageCa1Hessian', 'limnDefCameraOrthographic',
'airDioWrite', 'nrrdStateKindNoop', 'nrrdUnaryOpAtan',
'tenDwiGageTensorMLELikelihood', 'airMopOkay',
'gagePvlFlagVolume', 'NrrdEncoding', 'tenFiberStopOff',
'nrrdBoundaryPad', 'hestPresent',
'tenEstimate1TensorVolume4D', 'nrrdMeasureHistoVariance',
'tenFiberTypeSet', 'baneGkmsHestBEF',
'limnSplineTypeHasImplicitTangents', 'tenTripleTypeXYZ',
'tenAnisoLast', 'nrrdKernelCheck', 'nrrdDescribe',
'limnObjectEdgeAdd', 'gageStackProbeSpace',
'nrrdDeringRadialKernelSet', 'nrrdKernelBox',
'tenModelBall1StickEMD', 'coilContextAllSet',
'gageContext_t', 'nrrdTypeUInt', 'tenShrink',
'tenLogSingle_f', 'limnObjectCylinderAdd',
'gageErrBoundsSpace', 'gageSclShapeIndex',
'pullSysParmOpporStepScale', 'nrrdKernelHannDD',
'seekTypeIsocontour', 'pullConstraintScaleRange',
'echoIntxFuzzify', 'nrrdTypeUnknown',
'nrrdResampleContextNix', 'hooverStubRayEnd',
'limnSplineBCSet', 'limnPolyDataSpiralBetterquadric',
'pullCountUnknown', 'limnQNBins', 'ell_Nm_wght_pseudo_inv',
'limnObjectSquareAdd', 'limnSplineCleverNew',
'nrrdMeasureRootMeanSquare', 'limnQN14checker',
'tend_estimCmd', 'gageContextNew', 'tend_evecCmd',
'unrrdu_dataCmd', 'tenGageEval', 'coilKindType3Color',
'alanParmReact', 'elfESHEstimMatrix_f',
'pullCountEnergyFromImage', 'tenModelB0', 'mossMatFlipSet',
'nrrdZlibStrategyLast', 'unrrdu_headCmd',
'limnSplineInfoScalar', 'nrrdField_old_max',
'hooverErrThreadEnd', 'seekTypeValleySurfaceOP',
'mossSampler', 'gageSclFlowlineCurv',
'tenExperSpecKnownB0Get', 'nrrdMeasureProduct',
'nrrdTernaryOpGaussian', 'pullPointInitializeGivenPos',
'airNoDio_small', 'gageSclUnknown',
'nrrdResampleRangeFullSet', 'pullSysParm',
'nrrdDeringThetaKernelSet', 'limnPolyDataSize',
'airMyQNaNHiBit', 'coilMethodTypeCurvatureFlow',
'pullInitMethodUnknown', 'pushStart',
'nrrdBoundarySpecCheck',
'meetPullVolStackBlurParmFinishMulti',
'tenFiberParmVerbose', 'gageStackBlurParmCompare',
'limnObjectLookAdd', 'tijk_refine_rankk_2d_f',
'tenModelNllFit', 'tenFiberMultiTrace',
'limnEdgeTypeBorder', 'nrrdSpaceOriginGet',
'nrrdBoundaryUnknown', 'tenInv_f', 'tenInv_d',
'baneHVolCheck', 'pullInitLiveThreshUseSet',
'nrrdField_byte_skip', 'tenTripleTypeUnknown',
'nrrdSpaceRightAnteriorSuperiorTime',
'tenGageCl1HessianEval1', 'pullTraceStopStub',
'tenFiberStopUnknown', 'tenDwiGageLast', 'meetPullInfoNew',
'limnLightSwitch', 'echoObjectNew', 'tijk_init_max_2d_d',
'pullTraceMultiAdd', 'nrrdMeasureLineIntercept',
'airNoDioErr', 'nrrdResample_t',
'gageOptimSigErrorPlotSliding', 'nrrdUnaryOpRoundDown',
'coilVerbose', 'pullProcessMode', 'airArrayPointerCB',
'airEnumCheck', 'nrrdOriginStatusUnknown',
'nrrdKind2Vector', 'nrrdEnvVarDefaultKernelParm0',
'nrrdAxisInfoIdxRange', 'echoMatterLight', 'pullCondNew',
'tijk_2o3d_sym', 'gageDeconvolve', 'nrrdKernelSpecCompare',
'gageSclHessEval1', 'gageSclHessEval0', 'gageSclHessEval2',
'nrrdDeringThetaNumSet', 'tenEstimateLinear4D',
'nrrdDefaultGetenv']
# =============================================================
# What follows are the all #define's in Teem, excluding macros,
# and #defines that depend on compile-time tests done by the
# C pre-processor.
# This is created by something akin to grep'ing through the
# public header files, with some extra filters.
TEEM_VERSION_MAJOR = 1 # must be 1 digit
TEEM_VERSION_MINOR = 11 # 1 or 2 digits
TEEM_VERSION_PATCH = 01 # 1 or 2 digits
TEEM_VERSION = 11101 # must be 5 digits, to facilitate
TEEM_VERSION_STRING = "1.11.1" # cannot be so easily compared
AIR_PI = 3.14159265358979323846
AIR_E = 2.71828182845904523536
AIR_STRLEN_SMALL = (128+1) # has to be big enough to hold:
AIR_STRLEN_MED = (256+1)
AIR_STRLEN_LARGE = (512+1)
AIR_STRLEN_HUGE = (1024+1) # has to be big enough to hold
AIR_RANDMT_N = 624
AIR_TYPE_MAX = 12
AIR_INSANE_MAX = 11
AIR_PRIME_NUM = 1000
AIR_NODIO_MAX = 12
AIR_TRUE = 1
AIR_FALSE = 0
AIR_ENDIAN = (airMyEndian())
AIR_QNANHIBIT = (airMyQNaNHiBit)
AIR_DIO = (airMyDio)
AIR_NAN = (airFloatQNaN.f)
AIR_QNAN = (airFloatQNaN.f)
AIR_SNAN = (airFloatSNaN.f)
AIR_POS_INF = (airFloatPosInf.f)
AIR_NEG_INF = (airFloatNegInf.f)
ALAN = alanBiffKey
ALAN_THREAD_MAX = 256
ALAN_STOP_MAX = 5
BANE = baneBiffKey
BANE_PARM_NUM = 5
COIL = coilBiffKey
COIL_PARMS_NUM = 6
COIL_METHOD_TYPE_MAX = 8
COIL_KIND_TYPE_MAX = 3
DYE = dyeBiffKey
DYE_MAX_SPACE = 6
ECHO = echoBiffKey
ECHO_LIST_OBJECT_INCR = 32
ECHO_IMG_CHANNELS = 5
ECHO_EPSILON = 0.00005 # used for adjusting ray positions
ECHO_NEAR0 = 0.004 # used for comparing transparency to zero
ECHO_LEN_SMALL_ENOUGH = 5 # to control splitting for split objects
ECHO_THREAD_MAX = 512 # max number of threads
ECHO_JITTER_NUM = 4
ECHO_JITTABLE_NUM = 7
ECHO_MATTER_MAX = 4
ECHO_MATTER_PARM_NUM = 4
ECHO_TYPE_NUM = 12
ELL = ell_biff_key
ELL_EPS = 1.0e-10
ELL_CUBIC_ROOT_MAX = 4
GAGE = gageBiffKey
GAGE_DERIV_MAX = 2
GAGE_ERR_MAX = 6
GAGE_CTX_FLAG_MAX = 6
GAGE_PVL_FLAG_MAX = 3
GAGE_KERNEL_MAX = 7
GAGE_ITEM_PREREQ_MAXNUM = 8
GAGE_SCL_ITEM_MAX = 36
GAGE_VEC_ITEM_MAX = 31
GAGE_ITEM_PACK_PART_MAX = 11
GAGE_SIGMA_SAMPLING_MAX = 3
GAGE_QUERY_BYTES_NUM = 32
GAGE_ITEM_MAX = ((8*GAGE_QUERY_BYTES_NUM)-1)
GAGE_PERVOLUME_ARR_INCR = 32
GAGE_OPTIMSIG_SIGMA_MAX = 11
GAGE_OPTIMSIG_SAMPLES_MAXNUM = 11
HOOVER = hooverBiffKey
HOOVER_THREAD_MAX = 512
HOOVER_ERR_MAX = 10
LIMN = limnBiffKey
LIMN_LIGHT_NUM = 8
LIMN_SPLINE_Q_AVG_EPS = 0.00001
LIMN_EDGE_TYPE_MAX = 7
LIMN_SPACE_MAX = 4
LIMN_PRIMITIVE_MAX = 7
LIMN_POLY_DATA_INFO_MAX = 4
LIMN_QN_MAX = 16
LIMN_SPLINE_TYPE_MAX = 5
LIMN_SPLINE_INFO_MAX = 6
LIMN_CAMERA_PATH_TRACK_MAX = 3
MEET = meetBiffKey
MITE = miteBiffKey
MITE_RANGE_NUM = 9
MITE_STAGE_OP_MAX = 4
MITE_VAL_ITEM_MAX = 19
MOSS = mossBiffKey
MOSS_FLAG_NUM = 2
NRRD = nrrdBiffKey
NRRD_DIM_MAX = 16 # Max array dimension (nrrd->dim)
NRRD_SPACE_DIM_MAX = 8 # Max dimension of "space" around array
NRRD_EXT_NRRD = ".nrrd"
NRRD_EXT_NHDR = ".nhdr"
NRRD_EXT_PGM = ".pgm"
NRRD_EXT_PPM = ".ppm"
NRRD_EXT_PNG = ".png"
NRRD_EXT_VTK = ".vtk"
NRRD_EXT_TEXT = ".txt"
NRRD_EXT_EPS = ".eps"
NRRD_KERNEL_PARMS_NUM = 8 # max # arguments to a kernel-
NRRD_MINMAX_PERC_SUFF = "%"
NRRD_COMMENT_CHAR = '#'
NRRD_FILENAME_INCR = 32
NRRD_COMMENT_INCR = 16
NRRD_KEYVALUE_INCR = 32
NRRD_LIST_FLAG = "LIST"
NRRD_PNM_COMMENT = "# NRRD>" # this is designed to be robust against
NRRD_PNG_FIELD_KEY = "NRRD" # this is the key used for getting nrrd
NRRD_PNG_COMMENT_KEY = "NRRD#" # this is the key used for getting nrrd
NRRD_UNKNOWN = "???" # how to represent something unknown in
NRRD_NONE = "none" # like NRRD_UNKNOWN, but with an air
NRRD_FORMAT_TYPE_MAX = 6
NRRD_BOUNDARY_MAX = 5
NRRD_TYPE_MAX = 11
NRRD_TYPE_SIZE_MAX = 8 # max(sizeof()) over all scalar types
NRRD_ENCODING_TYPE_MAX = 5
NRRD_ZLIB_STRATEGY_MAX = 3
NRRD_CENTER_MAX = 2
NRRD_KIND_MAX = 31
NRRD_AXIS_INFO_SIZE_BIT = (1<< 1)
NRRD_AXIS_INFO_SPACING_BIT = (1<< 2)
NRRD_AXIS_INFO_THICKNESS_BIT = (1<< 3)
NRRD_AXIS_INFO_MIN_BIT = (1<< 4)
NRRD_AXIS_INFO_MAX_BIT = (1<< 5)
NRRD_AXIS_INFO_SPACEDIRECTION_BIT = (1<< 6)
NRRD_AXIS_INFO_CENTER_BIT = (1<< 7)
NRRD_AXIS_INFO_KIND_BIT = (1<< 8)
NRRD_AXIS_INFO_LABEL_BIT = (1<< 9)
NRRD_AXIS_INFO_UNITS_BIT = (1<<10)
NRRD_AXIS_INFO_MAX = 10
NRRD_AXIS_INFO_NONE = 0
NRRD_BASIC_INFO_DATA_BIT = (1<< 1)
NRRD_BASIC_INFO_TYPE_BIT = (1<< 2)
NRRD_BASIC_INFO_BLOCKSIZE_BIT = (1<< 3)
NRRD_BASIC_INFO_DIMENSION_BIT = (1<< 4)
NRRD_BASIC_INFO_CONTENT_BIT = (1<< 5)
NRRD_BASIC_INFO_SAMPLEUNITS_BIT = (1<< 6)
NRRD_BASIC_INFO_SPACE_BIT = (1<< 7)
NRRD_BASIC_INFO_SPACEDIMENSION_BIT = (1<< 8)
NRRD_BASIC_INFO_SPACEUNITS_BIT = (1<< 9)
NRRD_BASIC_INFO_SPACEORIGIN_BIT = (1<<10)
NRRD_BASIC_INFO_MEASUREMENTFRAME_BIT = (1<<11)
NRRD_BASIC_INFO_OLDMIN_BIT = (1<<12)
NRRD_BASIC_INFO_OLDMAX_BIT = (1<<13)
NRRD_BASIC_INFO_COMMENTS_BIT = (1<<14)
NRRD_BASIC_INFO_KEYVALUEPAIRS_BIT = (1<<15)
NRRD_BASIC_INFO_MAX = 15
NRRD_BASIC_INFO_NONE = 0
NRRD_FIELD_MAX = 32
NRRD_HAS_NON_EXIST_MAX = 3
NRRD_SPACE_MAX = 12
NRRD_SPACING_STATUS_MAX = 4
NRRD_MEASURE_MAX = 30
NRRD_BLIND_8BIT_RANGE_MAX = 3
NRRD_UNARY_OP_MAX = 32
NRRD_BINARY_OP_MAX = 23
NRRD_TERNARY_OP_MAX = 16
NRRD_FFTW_PLAN_RIGOR_MAX = 4
NRRD_RESAMPLE_NON_EXISTENT_MAX = 3
PULL = pullBiffKey
PULL_THREAD_MAXNUM = 512
PULL_VOLUME_MAXNUM = 4
PULL_POINT_NEIGH_INCR = 16
PULL_BIN_MAXNUM = 40000000 # sanity check on max number bins
PULL_PHIST = 0
PULL_HINTER = 0
PULL_TANCOVAR = 1
PULL_INFO_MAX = 23
PULL_PROP_MAX = 17
PULL_STATUS_STUCK_BIT = (1<< 1)
PULL_STATUS_NEWBIE_BIT = (1<< 2)
PULL_STATUS_NIXME_BIT = (1<< 3)
PULL_STATUS_EDGE_BIT = (1<< 4)
PULL_INTER_TYPE_MAX = 4
PULL_ENERGY_TYPE_MAX = 13
PULL_ENERGY_PARM_NUM = 3
PULL_PROCESS_MODE_MAX = 4
PULL_SOURCE_MAX = 2
PULL_COUNT_MAX = 14
PULL_TRACE_STOP_MAX = 5
PULL_INIT_METHOD_MAX = 4
PULL_CONSTRAINT_FAIL_MAX = 6
PUSH = pushBiffKey
PUSH_THREAD_MAXNUM = 512
PUSH_ENERGY_TYPE_MAX = 5
PUSH_ENERGY_PARM_NUM = 3
SEEK = seekBiffKey
SEEK_TYPE_MAX = 11
TEN = tenBiffKey
TEN_ANISO_MAX = 29
TEN_INTERP_TYPE_MAX = 11
TEN_GLYPH_TYPE_MAX = 6
TEN_GAGE_ITEM_MAX = 207
TEN_DWI_GAGE_ITEM_MAX = 35
TEN_ESTIMATE_1_METHOD_MAX = 4
TEN_ESTIMATE_2_METHOD_MAX = 2
TEN_FIBER_TYPE_MAX = 6
TEN_DWI_FIBER_TYPE_MAX = 3
TEN_FIBER_INTG_MAX = 3
TEN_FIBER_STOP_MAX = 10
TEN_FIBER_NUM_STEPS_MAX = 10240
TEN_FIBER_PARM_MAX = 4
TEN_TRIPLE_TYPE_MAX = 9
TEN_MODEL_B0_MAX = 65500 # HEY: fairly arbitrary, but is set to be
TEN_MODEL_DIFF_MAX = 0.006 # in units of mm^2/sec; diffusivity of
TEN_MODEL_PARM_GRAD_EPS = 0.000005 # for gradient calculations
TEN_MODEL_STR_ZERO = "zero"
TEN_MODEL_STR_B0 = "b0"
TEN_MODEL_STR_BALL = "ball"
TEN_MODEL_STR_1VECTOR2D = "1vector2d"
TEN_MODEL_STR_1UNIT2D = "1unit2d"
TEN_MODEL_STR_2UNIT2D = "2unit2d"
TEN_MODEL_STR_1STICK = "1stick"
TEN_MODEL_STR_BALL1STICKEMD = "ball1stickemd"
TEN_MODEL_STR_BALL1STICK = "ball1stick"
TEN_MODEL_STR_BALL1CYLINDER = "ball1cylinder"
TEN_MODEL_STR_1CYLINDER = "1cylinder"
TEN_MODEL_STR_1TENSOR2 = "1tensor2"
TEN_DWI_GAGE_KIND_NAME = "dwi"
TIJK_TYPE_MAX_NUM = 45
TIJK_CLASS_MAX = 3
UNRRDU = unrrduBiffKey
UNRRDU_COLUMNS = 78 # how many chars per line do we allow hest
# =============================================================
# Make sure this shared library will work on this machine.
if not nrrdSanity():
errstr = biffGetDone(NRRD)
print "**"
print "** Sorry, there is a problem (described below) with the "
print "** Teem shared library that prevents its use. This will "
print "** have to be fixed by recompiling the Teem library for "
print "** this platform. "
print "**"
print "** %s" % errstr
raise ImportError
# =============================================================
# Its nice to have these FILE*s around for utility use, but they
# aren't available in a platform-independent way in ctypes. These
# air functions were created for this purpose.
stderr = airStderr()
stdout = airStdout()
stdin = airStdin()
|
Slicer/teem
|
python/ctypes/teem.py
|
Python
|
lgpl-2.1
| 448,845
|
[
"VTK"
] |
aaf924e4f6577692285c45b34df8494c41e086836ffaa475ed5a9a9964c46664
|
# -*- coding: utf-8 -*-
import unittest
from pybel import BELGraph
from pybel.dsl import ComplexAbundance, Fragment, Protein
from pybel.dsl.namespaces import hgnc
from pybel_tools.selection.group_nodes import get_mapped_nodes
ccl2 = hgnc(name='CCL2')
ccr2 = hgnc(name='CCR2')
ccl2_mgi = Protein('MGI', 'Ccl2')
ccl2_ccr2_complex = ComplexAbundance([ccl2, ccr2])
chemokine_family = Protein('FPLX', 'chemokine protein family')
HGNC = 'hgnc'
class TestMapping(unittest.TestCase):
def test_variants_mapping(self):
graph = BELGraph()
app = Protein(HGNC, 'APP')
app_fragment = app.with_variants(Fragment('1_49'))
graph.add_node_from_data(app_fragment)
mapped_nodes = get_mapped_nodes(graph, HGNC, {'APP'})
self.assertEqual(1, len(mapped_nodes))
self.assertIn(app, mapped_nodes)
self.assertEqual({app_fragment}, mapped_nodes[app])
def test_complexes_composites_mapping(self):
g = BELGraph()
g.add_is_a(ccl2, chemokine_family)
g.add_is_a(ccr2, chemokine_family)
g.add_part_of(ccl2, ccl2_ccr2_complex)
g.add_part_of(ccr2, ccl2_ccr2_complex)
mapped_nodes = get_mapped_nodes(g, 'HGNC', {ccl2.name, ccr2.name})
self.assertEqual(2, len(mapped_nodes))
self.assertIn(ccl2, mapped_nodes)
self.assertIn(ccr2, mapped_nodes)
self.assertEqual({ccl2_ccr2_complex, chemokine_family}, mapped_nodes[ccl2])
self.assertEqual({ccl2_ccr2_complex, chemokine_family}, mapped_nodes[ccr2])
def test_orthologus_mapping(self):
g = BELGraph()
g.add_node_from_data(ccl2)
g.add_node_from_data(ccl2_mgi)
g.add_orthology(ccl2, ccl2_mgi)
mapped_nodes = get_mapped_nodes(g, 'HGNC', {'CCL2'})
self.assertEqual(1, len(mapped_nodes))
self.assertIn(ccl2, mapped_nodes)
self.assertEqual({ccl2_mgi}, mapped_nodes[ccl2])
|
pybel/pybel-tools
|
tests/test_mapping.py
|
Python
|
mit
| 1,918
|
[
"Pybel"
] |
5c8a32fb27ed11e00baad82adb06b7162e7e644b7080f060bb2796fa4728c50a
|
import numpy as np
import cv2
import core.trackers.fhog as fhog
# ffttools
def fftd(img, backwards=False):
# shape of img can be (m,n), (m,n,1) or (m,n,2)
# in my test, fft provided by numpy and scipy are slower than cv2.dft
return cv2.dft(np.float32(img), flags=(
(cv2.DFT_INVERSE | cv2.DFT_SCALE) if backwards else cv2.DFT_COMPLEX_OUTPUT))
def real(img):
return img[:, :, 0]
def imag(img):
return img[:, :, 1]
def complexMultiplication(a, b):
res = np.zeros(a.shape, a.dtype)
res[:, :, 0] = a[:, :, 0] * b[:, :, 0] - a[:, :, 1] * b[:, :, 1]
res[:, :, 1] = a[:, :, 0] * b[:, :, 1] + a[:, :, 1] * b[:, :, 0]
return res
def complexDivision(a, b):
res = np.zeros(a.shape, a.dtype)
divisor = 1. / (b[:, :, 0]**2 + b[:, :, 1]**2)
res[:, :, 0] = (a[:, :, 0] * b[:, :, 0] + a[:, :, 1] * b[:, :, 1]) * divisor
res[:, :, 1] = (a[:, :, 1] * b[:, :, 0] + a[:, :, 0] * b[:, :, 1]) * divisor
return res
def rearrange(img):
# return np.fft.fftshift(img, axes=(0,1))
assert(img.ndim == 2)
# Fix image sizes
original_shape = img.shape
xh, yh = img.shape[1] // 2, img.shape[0] // 2
img = img[img.shape[0] - yh * 2: img.shape[0], img.shape[1] - xh * 2: img.shape[1]]
img_ = np.zeros(img.shape, img.dtype)
# Reassignation process
# -- -- -- --
# | A | B | | D | C |
# | --- --- | = | --- --- |
# | C | D | | B | A |
# -- -- -- --
# Switch A and D sections
img_[0:yh, 0:xh] = img[yh:img.shape[0], xh:img.shape[1]]
img_[yh:img.shape[0], xh:img.shape[1]] = img[0:yh, 0:xh]
# Switch C and B sections
img_[0:yh, xh:img.shape[1]] = img[yh:img.shape[0], 0:xh]
img_[yh:img.shape[0], 0:xh] = img[0:yh, xh:img.shape[1]]
# Recovering original shape
img_org_shape = np.zeros(original_shape, img.dtype)
img_org_shape[
original_shape[0] - yh * 2: original_shape[0],
original_shape[1] - xh * 2: original_shape[1]] = img_
return img_org_shape
# recttools
def x2(rect):
return rect[0] + rect[2]
def y2(rect):
return rect[1] + rect[3]
def limit(rect, limit):
if(rect[0] + rect[2] > limit[0] + limit[2]):
rect[2] = limit[0] + limit[2] - rect[0]
if(rect[1] + rect[3] > limit[1] + limit[3]):
rect[3] = limit[1] + limit[3] - rect[1]
if(rect[0] < limit[0]):
rect[2] -= (limit[0] - rect[0])
rect[0] = limit[0]
if(rect[1] < limit[1]):
rect[3] -= (limit[1] - rect[1])
rect[1] = limit[1]
if(rect[2] < 0):
rect[2] = 0
if(rect[3] < 0):
rect[3] = 0
return rect
def getBorder(original, limited):
res = [0, 0, 0, 0]
res[0] = limited[0] - original[0]
res[1] = limited[1] - original[1]
res[2] = x2(original) - x2(limited)
res[3] = y2(original) - y2(limited)
assert(np.all(np.array(res) >= 0))
return res
def subwindow(img, window, borderType=cv2.BORDER_CONSTANT):
cutWindow = [x for x in window]
limit(cutWindow, [0, 0, img.shape[1], img.shape[0]]) # modify cutWindow
assert(cutWindow[2] > 0 and cutWindow[3] > 0)
border = getBorder(window, cutWindow)
res = img[cutWindow[1]:cutWindow[1] + cutWindow[3], cutWindow[0]:cutWindow[0] + cutWindow[2]]
if(border != [0, 0, 0, 0]):
res = cv2.copyMakeBorder(res, border[1], border[3], border[0], border[2], borderType)
return res
# KCF tracker
class KCFTracker:
def __init__(self, hog=False, fixed_window=True, multiscale=False):
self.lambdar = 0.0001 # regularization
self.padding = 2.5 # extra area surrounding the target
self.output_sigma_factor = 0.125 # bandwidth of gaussian target
if(hog): # HOG feature
# VOT
self.interp_factor = 0.012 # linear interpolation factor for adaptation
self.sigma = 0.6 # gaussian kernel bandwidth
# TPAMI #interp_factor = 0.02 #sigma = 0.5
self.cell_size = 4 # HOG cell size
self._hogfeatures = True
else: # raw gray-scale image # aka CSK tracker
self.interp_factor = 0.075
self.sigma = 0.2
self.cell_size = 1
self._hogfeatures = False
if(multiscale):
self.template_size = 96 # template size
self.scale_step = 1.05 # scale step for multi-scale estimation
# to downweight detection scores of other scales for added stability
self.scale_weight = 0.96
elif(fixed_window):
self.template_size = 96
self.scale_step = 1
else:
self.template_size = 1
self.scale_step = 1
self._tmpl_sz = [0, 0] # cv::Size, [width,height] #[int,int]
self._roi = [0., 0., 0., 0.] # cv::Rect2f, [x,y,width,height] #[float,float,float,float]
self.size_patch = [0, 0, 0] # [int,int,int]
self._scale = 1. # float
self._alphaf = None # numpy.ndarray (size_patch[0], size_patch[1], 2)
self._prob = None # numpy.ndarray (size_patch[0], size_patch[1], 2)
# numpy.ndarray raw: (size_patch[0], size_patch[1]) hog: (size_patch[2],
# size_patch[0]*size_patch[1])
self._tmpl = None
# numpy.ndarray raw: (size_patch[0], size_patch[1]) hog: (size_patch[2],
# size_patch[0]*size_patch[1])
self.hann = None
def subPixelPeak(self, left, center, right):
divisor = 2 * center - right - left # float
return (0 if abs(divisor) < 1e-3 else 0.5 * (right - left) / divisor)
def createHanningMats(self):
hann2t, hann1t = np.ogrid[0:self.size_patch[0], 0:self.size_patch[1]]
hann1t = 0.5 * (1 - np.cos(2 * np.pi * hann1t / (self.size_patch[1] - 1)))
hann2t = 0.5 * (1 - np.cos(2 * np.pi * hann2t / (self.size_patch[0] - 1)))
hann2d = hann2t * hann1t
if(self._hogfeatures):
hann1d = hann2d.reshape(self.size_patch[0] * self.size_patch[1])
self.hann = np.zeros((self.size_patch[2], 1), np.float32) + hann1d
else:
self.hann = hann2d
self.hann = self.hann.astype(np.float32)
def createGaussianPeak(self, sizey, sizex):
syh, sxh = sizey / 2, sizex / 2
output_sigma = np.sqrt(sizex * sizey) / self.padding * self.output_sigma_factor
mult = -0.5 / (output_sigma * output_sigma)
y, x = np.ogrid[0:sizey, 0:sizex]
y, x = (y - syh)**2, (x - sxh)**2
res = np.exp(mult * (y + x))
return fftd(res)
def gaussianCorrelation(self, x1, x2):
if(self._hogfeatures):
c = np.zeros((self.size_patch[0], self.size_patch[1]), np.float32)
for i in range(self.size_patch[2]):
x1aux = x1[i, :].reshape((self.size_patch[0], self.size_patch[1]))
x2aux = x2[i, :].reshape((self.size_patch[0], self.size_patch[1]))
caux = cv2.mulSpectrums(fftd(x1aux), fftd(x2aux), 0, conjB=True)
caux = real(fftd(caux, True))
# caux = rearrange(caux)
c += caux
c = rearrange(c)
else:
c = cv2.mulSpectrums(fftd(x1), fftd(x2), 0, conjB=True) # 'conjB=' is necessary!
c = fftd(c, True)
c = real(c)
c = rearrange(c)
if(x1.ndim == 3 and x2.ndim == 3):
num = (np.sum(x1[:, :, 0] * x1[:, :, 0]) + np.sum(x2[:, :, 0] * x2[:, :, 0]) - 2.0 * c)
den = (self.size_patch[0] * self.size_patch[1] * self.size_patch[2])
d = num / den
elif(x1.ndim == 2 and x2.ndim == 2):
d = (np.sum(x1 * x1) + np.sum(x2 * x2) -
2.0 * c) / (self.size_patch[0] * self.size_patch[1] * self.size_patch[2])
d = d * (d >= 0)
d = np.exp(-d / (self.sigma * self.sigma))
return d
def getFeatures(self, image, inithann, scale_adjust=1.0):
extracted_roi = [0, 0, 0, 0] # [int,int,int,int]
cx = self._roi[0] + self._roi[2] / 2 # float
cy = self._roi[1] + self._roi[3] / 2 # float
if(inithann):
padded_w = self._roi[2] * self.padding
padded_h = self._roi[3] * self.padding
if(self.template_size > 1):
if(padded_w >= padded_h):
self._scale = padded_w / float(self.template_size)
else:
self._scale = padded_h / float(self.template_size)
self._tmpl_sz[0] = int(padded_w / self._scale)
self._tmpl_sz[1] = int(padded_h / self._scale)
else:
self._tmpl_sz[0] = int(padded_w)
self._tmpl_sz[1] = int(padded_h)
self._scale = 1.
if(self._hogfeatures):
self._tmpl_sz[0] = int(self._tmpl_sz[0]) / (2 * self.cell_size) * \
2 * self.cell_size + 2 * self.cell_size
self._tmpl_sz[1] = int(self._tmpl_sz[1]) / (2 * self.cell_size) * \
2 * self.cell_size + 2 * self.cell_size
else:
self._tmpl_sz[0] = int(self._tmpl_sz[0]) / 2 * 2
self._tmpl_sz[1] = int(self._tmpl_sz[1]) / 2 * 2
self._tmpl_sz[0] = int(self._tmpl_sz[0])
self._tmpl_sz[1] = int(self._tmpl_sz[1])
extracted_roi[2] = int(scale_adjust * self._scale * self._tmpl_sz[0])
extracted_roi[3] = int(scale_adjust * self._scale * self._tmpl_sz[1])
extracted_roi[0] = int(cx - extracted_roi[2] / 2)
extracted_roi[1] = int(cy - extracted_roi[3] / 2)
z = subwindow(image, extracted_roi, cv2.BORDER_REPLICATE)
if(z.shape[1] != self._tmpl_sz[0] or z.shape[0] != self._tmpl_sz[1]):
z = cv2.resize(z, tuple(self._tmpl_sz))
if(self._hogfeatures):
mapp = {'sizeX': 0, 'sizeY': 0, 'numFeatures': 0, 'map': 0}
mapp = fhog.getFeatureMaps(z, self.cell_size, mapp)
mapp = fhog.normalizeAndTruncate(mapp, 0.2)
mapp = fhog.PCAFeatureMaps(mapp)
self.size_patch = list(map(int, [mapp['sizeY'], mapp['sizeX'], mapp['numFeatures']]))
# (size_patch[2], size_patch[0]*size_patch[1])
FeaturesMap = mapp['map'].reshape(
(self.size_patch[0] * self.size_patch[1], self.size_patch[2])).T
else:
if(z.ndim == 3 and z.shape[2] == 3):
# z:(size_patch[0], size_patch[1], 3) FeaturesMap:(size_patch[0]
# size_patch[1]) #np.int8 #0~255
FeaturesMap = cv2.cvtColor(z, cv2.COLOR_BGR2GRAY)
elif(z.ndim == 2):
FeaturesMap = z # (size_patch[0], size_patch[1]) #np.int8 #0~255
FeaturesMap = FeaturesMap.astype(np.float32) / 255.0 - 0.5
self.size_patch = [z.shape[0], z.shape[1], 1]
if(inithann):
self.createHanningMats() # createHanningMats need size_patch
FeaturesMap = self.hann * FeaturesMap
return FeaturesMap
def detect(self, z, x):
k = self.gaussianCorrelation(x, z)
res = real(fftd(complexMultiplication(self._alphaf, fftd(k)), True))
_, pv, _, pi = cv2.minMaxLoc(res) # pv:float pi:tuple of int
p = [float(pi[0]), float(pi[1])] # cv::Point2f, [x,y] #[float,float]
if(pi[0] > 0 and pi[0] < res.shape[1] - 1):
p[0] += self.subPixelPeak(res[pi[1], pi[0] - 1], pv, res[pi[1], pi[0] + 1])
if(pi[1] > 0 and pi[1] < res.shape[0] - 1):
p[1] += self.subPixelPeak(res[pi[1] - 1, pi[0]], pv, res[pi[1] + 1, pi[0]])
p[0] -= res.shape[1] / 2.
p[1] -= res.shape[0] / 2.
return p, pv
def train(self, x, train_interp_factor):
k = self.gaussianCorrelation(x, x)
alphaf = complexDivision(self._prob, fftd(k) + self.lambdar)
self._tmpl = (1 - train_interp_factor) * self._tmpl + train_interp_factor * x
self._alphaf = (1 - train_interp_factor) * self._alphaf + train_interp_factor * alphaf
def init(self, roi, image):
self._roi = roi
assert(roi[2] > 0 and roi[3] > 0)
self._tmpl = self.getFeatures(image, 1)
self._prob = self.createGaussianPeak(self.size_patch[0], self.size_patch[1])
self._alphaf = np.zeros((self.size_patch[0], self.size_patch[1], 2), np.float32)
self.train(self._tmpl, 1.0)
def update(self, image):
if(self._roi[0] + self._roi[2] <= 0):
self._roi[0] = -self._roi[2] + 1
if(self._roi[1] + self._roi[3] <= 0):
self._roi[1] = -self._roi[2] + 1
if(self._roi[0] >= image.shape[1] - 1):
self._roi[0] = image.shape[1] - 2
if(self._roi[1] >= image.shape[0] - 1):
self._roi[1] = image.shape[0] - 2
cx = self._roi[0] + self._roi[2] / 2.
cy = self._roi[1] + self._roi[3] / 2.
loc, peak_value = self.detect(self._tmpl, self.getFeatures(image, 0, 1.0))
if(self.scale_step != 1):
# Test at a smaller _scale
new_loc1, new_peak_value1 = self.detect(
self._tmpl, self.getFeatures(image, 0, 1.0 / self.scale_step))
# Test at a bigger _scale
new_loc2, new_peak_value2 = self.detect(
self._tmpl, self.getFeatures(image, 0, self.scale_step))
if(self.scale_weight * new_peak_value1 > peak_value and
new_peak_value1 > new_peak_value2):
loc = new_loc1
peak_value = new_peak_value1
self._scale /= self.scale_step
self._roi[2] /= self.scale_step
self._roi[3] /= self.scale_step
elif(self.scale_weight * new_peak_value2 > peak_value):
loc = new_loc2
peak_value = new_peak_value2
self._scale *= self.scale_step
self._roi[2] *= self.scale_step
self._roi[3] *= self.scale_step
self._roi[0] = cx - self._roi[2] / 2.0 + loc[0] * self.cell_size * self._scale
self._roi[1] = cy - self._roi[3] / 2.0 + loc[1] * self.cell_size * self._scale
if(self._roi[0] >= image.shape[1] - 1):
self._roi[0] = image.shape[1] - 1
if(self._roi[1] >= image.shape[0] - 1):
self._roi[1] = image.shape[0] - 1
if(self._roi[0] + self._roi[2] <= 0):
self._roi[0] = -self._roi[2] + 2
if(self._roi[1] + self._roi[3] <= 0):
self._roi[1] = -self._roi[3] + 2
assert(self._roi[2] > 0 and self._roi[3] > 0)
x = self.getFeatures(image, 0, 1.0)
self.train(x, self.interp_factor)
return self._roi
|
asmateus/flight-stone
|
fstone/director/core/trackers/kcftracker.py
|
Python
|
mit
| 14,721
|
[
"Gaussian"
] |
30709caf1568ee6bd55bb0273acb81354a553d1bd731a8c2714428e4726c7c57
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import absolute_import
import numpy as np
from MDAnalysis.coordinates.base import (
Timestep,
SingleFrameReaderBase,
ReaderBase
)
from numpy.testing import assert_equal, assert_raises
"""
Isolate the API definitions of Readers independent of implementations
"""
class AmazingMultiFrameReader(ReaderBase):
format = 'AmazingMulti'
def __init__(self, filename, **kwargs):
self.filename = filename
self.n_frames = 10
self.n_atoms = 10
self._auxs = {}
# ts isn't a real timestep, but just an integer
# whose value represents the frame number (0 based)
self.ts = Timestep(self.n_atoms)
self.ts.frame = -1
self._read_next_timestep()
def _read_next_timestep(self):
self.ts.frame += 1
if (self.ts.frame + 1) > self.n_frames:
raise IOError
else:
return self.ts
def _read_frame(self, frame):
self.ts.frame = frame
return self.ts
def _reopen(self):
self.ts.frame = -1
class AmazingReader(SingleFrameReaderBase):
format = 'Amazing'
# have to hack this in to get the base class to "work"
def _read_first_frame(self):
self.n_atoms = 10
self.ts = Timestep(self.n_atoms)
self.ts.frame = 0
class _TestReader(object):
"""Basic API readers"""
def setUp(self):
self.reader = self.readerclass('test.txt')
self.ts = self.reader.ts
def test_required_attributes(self):
"""Test that Reader has the required attributes"""
for attr in ['filename', 'n_atoms', 'n_frames', 'ts',
'units', 'format']:
assert_equal(hasattr(self.reader, attr), True,
"Missing attr: {0}".format(attr))
def test_iter(self):
l = [ts for ts in self.reader]
assert_equal(len(l), self.n_frames)
def test_close(self):
sfr = self.readerclass('text.txt')
ret = sfr.close()
# Check that method works?
assert_equal(ret, None)
def test_rewind(self):
ret = self.reader.rewind()
assert_equal(ret, None)
assert_equal(self.reader.ts.frame, 0)
def test_context(self):
with self.readerclass('text.txt') as sfr:
l = sfr.ts.frame
assert_equal(l, 0)
def test_len(self):
l = len(self.reader)
assert_equal(l, self.n_frames)
def test_raises_StopIteration(self):
self.reader[-1]
assert_raises(StopIteration, next, self.reader)
class _Multi(_TestReader):
n_frames = 10
n_atoms = 10
readerclass = AmazingMultiFrameReader
reference = np.arange(10)
class TestMultiFrameReader(_Multi):
def _check_slice(self, start, stop, step):
"""Compare the slice applied to trajectory, to slice of list"""
res = [ts.frame for ts in self.reader[start:stop:step]]
ref = self.reference[start:stop:step]
assert_equal(res, ref)
def test_slices(self):
for start, stop, step in [
(None, None, None), # blank slice
(None, 5, None), # set end point
(2, None, None), # set start point
(2, 5, None), # start & end
(None, None, 2), # set skip
(None, None, -1), # backwards skip
(0, 10, 1),
(0, 10, 2),
(None, 20, None), # end beyond real end
(None, 20, 2), # with skip
(0, 5, 2),
(5, None, -1),
(None, 5, -1),
(100, 10, 1),
(-10, None, 1),
(100, None, -1), # beyond real end
(100, 5, -20),
(5, 1, 1), # Stop less than start
(1, 5, -1), # Stop less than start
(-100, None, None),
(100, None, None), # Outside of range of trajectory
(-2, 10, -2)
]:
yield self._check_slice, start, stop, step
def test_slice_VE_1(self):
def sl():
return list(self.reader[::0])
assert_raises(ValueError, sl)
def test_slice_TE_1(self):
def sl():
return list(self.reader[1.2:2.5:0.1])
assert_raises(TypeError, sl)
def _check_getitem(self, sl):
res = [ts.frame for ts in self.reader[sl]]
sl = np.asarray(sl)
ref = self.reference[sl]
assert_equal(res, ref)
def test_getitem_list_ints(self):
for sl in (
[0, 1, 4, 5],
np.array([0, 1, 4, 5]),
[5, 1, 6, 2, 7, 3, 8],
np.array([5, 1, 6, 2, 7, 3, 8]),
[0, 1, 1, 1, 0, 0, 2, 3, 4],
np.array([0, 1, 1, 1, 0, 0, 2, 3, 4]),
):
yield self._check_getitem, sl
def test_list_TE(self):
def sl():
return list(self.reader[[0, 'a', 5, 6]])
assert_raises(TypeError, sl)
def test_array_TE(self):
def sl():
return list(self.reader[np.array([1.2, 3.4, 5.6])])
assert_raises(TypeError, sl)
def test_bool_slice(self):
t = True
f = False
for sl in (
[t, f, t, f, t, f, t, f, t, f],
[t, t, f, f, t, t, f, t, f, t],
[t, t, t, t, t, t, t, t, t, t],
[f, f, f, f, f, f, f, f, f, f],
):
yield self._check_getitem, sl
yield self._check_getitem, np.array(sl, dtype=np.bool)
class _Single(_TestReader):
n_frames = 1
n_atoms = 10
readerclass = AmazingReader
class TestSingleFrameReader(_Single):
def test_next(self):
assert_raises(StopIteration, self.reader.next)
# Getitem tests
# only 0 & -1 should work
# others should get IndexError
def _check_get_results(self, l):
assert_equal(len(l), 1)
assert_equal(self.ts in l, True)
def test_getitem(self):
fr = [self.reader[0]]
self._check_get_results(fr)
def test_getitem_2(self):
fr = [self.reader[-1]]
self._check_get_results(fr)
def test_getitem_IE(self):
assert_raises(IndexError, self.reader.__getitem__, 1)
def test_getitem_IE_2(self):
assert_raises(IndexError, self.reader.__getitem__, -2)
# Slicing should still work!
def test_slice_1(self):
l = list(self.reader[::])
self._check_get_results(l)
def test_slice_2(self):
l = list(self.reader[::-1])
self._check_get_results(l)
def test_reopen(self):
self.reader._reopen()
assert_equal(self.ts.frame, 0)
def test_rewind(self):
self.reader.rewind()
assert_equal(self.ts.frame, 0)
def test_read_frame(self):
assert_raises(IndexError, self.reader._read_frame, 1)
|
kain88-de/mdanalysis
|
testsuite/MDAnalysisTests/coordinates/test_reader_api.py
|
Python
|
gpl-2.0
| 7,820
|
[
"MDAnalysis"
] |
f73030904afdb37584bbee1c8a141bb0c54d78a60c039b6386d8bcf918fe902c
|
# Copyright (c) 2003-2010 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""classes checker for Python code
"""
from __future__ import generators
from logilab import astng
from logilab.astng import YES, Instance, are_exclusive
from pylint.interfaces import IASTNGChecker
from pylint.checkers import BaseChecker
from pylint.checkers.utils import PYMETHODS, overrides_a_method
def class_is_abstract(node):
"""return true if the given class node should be considered as an abstract
class
"""
for method in node.methods():
if method.parent.frame() is node:
if method.is_abstract(pass_is_abstract=False):
return True
return False
MSGS = {
'F0202': ('Unable to check methods signature (%s / %s)',
'Used when PyLint has been unable to check methods signature \
compatibility for an unexpected reason. Please report this kind \
if you don\'t make sense of it.'),
'E0202': ('An attribute inherited from %s hide this method',
'Used when a class defines a method which is hidden by an \
instance attribute from an ancestor class.'),
'E0203': ('Access to member %r before its definition line %s',
'Used when an instance member is accessed before it\'s actually\
assigned.'),
'W0201': ('Attribute %r defined outside __init__',
'Used when an instance attribute is defined outside the __init__\
method.'),
'W0212': ('Access to a protected member %s of a client class', # E0214
'Used when a protected member (i.e. class member with a name \
beginning with an underscore) is access outside the class or a \
descendant of the class where it\'s defined.'),
'E0211': ('Method has no argument',
'Used when a method which should have the bound instance as \
first argument has no argument defined.'),
'E0213': ('Method should have "self" as first argument',
'Used when a method has an attribute different the "self" as\
first argument. This is considered as an error since this is\
a so common convention that you shouldn\'t break it!'),
'C0202': ('Class method should have "cls" as first argument', # E0212
'Used when a class method has an attribute different than "cls"\
as first argument, to easily differentiate them from regular \
instance methods.'),
'C0203': ('Metaclass method should have "mcs" as first argument', # E0214
'Used when a metaclass method has an attribute different the \
"mcs" as first argument.'),
'W0211': ('Static method with %r as first argument',
'Used when a static method has "self" or "cls" as first argument.'
),
'R0201': ('Method could be a function',
'Used when a method doesn\'t use its bound instance, and so could\
be written as a function.'
),
'E0221': ('Interface resolved to %s is not a class',
'Used when a class claims to implement an interface which is not \
a class.'),
'E0222': ('Missing method %r from %s interface' ,
'Used when a method declared in an interface is missing from a \
class implementing this interface'),
'W0221': ('Arguments number differs from %s method',
'Used when a method has a different number of arguments than in \
the implemented interface or in an overridden method.'),
'W0222': ('Signature differs from %s method',
'Used when a method signature is different than in the \
implemented interface or in an overridden method.'),
'W0223': ('Method %r is abstract in class %r but is not overridden',
'Used when an abstract method (i.e. raise NotImplementedError) is \
not overridden in concrete class.'
),
'F0220': ('failed to resolve interfaces implemented by %s (%s)', # W0224
'Used when a PyLint as failed to find interfaces implemented by \
a class'),
'W0231': ('__init__ method from base class %r is not called',
'Used when an ancestor class method has an __init__ method \
which is not called by a derived class.'),
'W0232': ('Class has no __init__ method',
'Used when a class has no __init__ method, neither its parent \
classes.'),
'W0233': ('__init__ method from a non direct base class %r is called',
'Used when an __init__ method is called on a class which is not \
in the direct ancestors for the analysed class.'),
}
class ClassChecker(BaseChecker):
"""checks for :
* methods without self as first argument
* overridden methods signature
* access only to existent members via self
* attributes not defined in the __init__ method
* supported interfaces implementation
* unreachable code
"""
__implements__ = (IASTNGChecker,)
# configuration section name
name = 'classes'
# messages
msgs = MSGS
priority = -2
# configuration options
options = (('ignore-iface-methods',
{'default' : (#zope interface
'isImplementedBy', 'deferred', 'extends', 'names',
'namesAndDescriptions', 'queryDescriptionFor', 'getBases',
'getDescriptionFor', 'getDoc', 'getName', 'getTaggedValue',
'getTaggedValueTags', 'isEqualOrExtendedBy', 'setTaggedValue',
'isImplementedByInstancesOf',
# twisted
'adaptWith',
# logilab.common interface
'is_implemented_by'),
'type' : 'csv',
'metavar' : '<method names>',
'help' : 'List of interface methods to ignore, \
separated by a comma. This is used for instance to not check methods defines \
in Zope\'s Interface base class.'}
),
('defining-attr-methods',
{'default' : ('__init__', '__new__', 'setUp'),
'type' : 'csv',
'metavar' : '<method names>',
'help' : 'List of method names used to declare (i.e. assign) \
instance attributes.'}
),
)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self._accessed = []
self._first_attrs = []
self._meth_could_be_func = None
def visit_class(self, node):
"""init visit variable _accessed and check interfaces
"""
self._accessed.append({})
self._check_bases_classes(node)
self._check_interfaces(node)
# if not an interface, exception, metaclass
if node.type == 'class':
try:
node.local_attr('__init__')
except astng.NotFoundError:
self.add_message('W0232', args=node, node=node)
def leave_class(self, cnode):
"""close a class node:
check that instance attributes are defined in __init__ and check
access to existent members
"""
# checks attributes are defined in an allowed method such as __init__
defining_methods = self.config.defining_attr_methods
for attr, nodes in cnode.instance_attrs.items():
nodes = [n for n in nodes if not
isinstance(n.statement(), (astng.Delete, astng.AugAssign))]
if not nodes:
continue # error detected by typechecking
node = nodes[0] # XXX
frame = node.frame()
if frame.name not in defining_methods:
# check attribute is defined in a parent's __init__
for parent in cnode.instance_attr_ancestors(attr):
frame = parent.instance_attrs[attr][0].frame() # XXX
if frame.name in defining_methods:
# we're done :)
break
else:
# check attribute is defined as a class attribute
try:
cnode.local_attr(attr)
except astng.NotFoundError:
self.add_message('W0201', args=attr, node=node)
# check access to existent members on non metaclass classes
accessed = self._accessed.pop()
if cnode.type != 'metaclass':
self._check_accessed_members(cnode, accessed)
def visit_function(self, node):
"""check method arguments, overriding"""
# ignore actual functions
if not node.is_method():
return
klass = node.parent.frame()
self._meth_could_be_func = True
# check first argument is self if this is actually a method
self._check_first_arg_for_type(node, klass.type == 'metaclass')
if node.name == '__init__':
self._check_init(node)
return
# check signature if the method overloads inherited method
for overridden in klass.local_attr_ancestors(node.name):
# get astng for the searched method
try:
meth_node = overridden[node.name]
except KeyError:
# we have found the method but it's not in the local
# dictionary.
# This may happen with astng build from living objects
continue
if not isinstance(meth_node, astng.Function):
continue
self._check_signature(node, meth_node, 'overridden')
break
# check if the method overload an attribute
try:
overridden = klass.instance_attr(node.name)[0] # XXX
# we may be unable to get owner class if this is a monkey
# patched method
while overridden.parent and not isinstance(overridden, astng.Class):
overridden = overridden.parent.frame()
self.add_message('E0202', args=overridden.name, node=node)
except astng.NotFoundError:
pass
def leave_function(self, node):
"""on method node, check if this method couldn't be a function
ignore class, static and abstract methods, initializer,
methods overridden from a parent class and any
kind of method defined in an interface for this warning
"""
if node.is_method():
if node.args.args is not None:
self._first_attrs.pop()
class_node = node.parent.frame()
if (self._meth_could_be_func and node.type == 'method'
and not node.name in PYMETHODS
and not (node.is_abstract() or
overrides_a_method(class_node, node.name))
and class_node.type != 'interface'):
self.add_message('R0201', node=node)
def visit_getattr(self, node):
"""check if the getattr is an access to a class member
if so, register it. Also check for access to protected
class member from outside its class (but ignore __special__
methods)
"""
attrname = node.attrname
if self._first_attrs and isinstance(node.expr, astng.Name) and \
node.expr.name == self._first_attrs[-1]:
self._accessed[-1].setdefault(attrname, []).append(node)
elif attrname[0] == '_' and not attrname == '_' and not (
attrname.startswith('__') and attrname.endswith('__')):
# XXX move this in a reusable function
klass = node.frame()
while klass is not None and not isinstance(klass, astng.Class):
if klass.parent is None:
klass = None
else:
klass = klass.parent.frame()
# XXX infer to be more safe and less dirty ??
# in classes, check we are not getting a parent method
# through the class object or through super
callee = node.expr.as_string()
if klass is None or not (callee == klass.name or
callee in klass.basenames
or (isinstance(node.expr, astng.CallFunc)
and isinstance(node.expr.func, astng.Name)
and node.expr.func.name == 'super')):
self.add_message('W0212', node=node, args=attrname)
def visit_name(self, node):
"""check if the name handle an access to a class member
if so, register it
"""
if self._first_attrs and (node.name == self._first_attrs[-1] or
not self._first_attrs[-1]):
self._meth_could_be_func = False
def _check_accessed_members(self, node, accessed):
"""check that accessed members are defined"""
# XXX refactor, probably much simpler now that E0201 is in type checker
for attr, nodes in accessed.items():
# deactivate "except doesn't do anything", that's expected
# pylint: disable-msg=W0704
# is it a class attribute ?
try:
node.local_attr(attr)
# yes, stop here
continue
except astng.NotFoundError:
pass
# is it an instance attribute of a parent class ?
try:
node.instance_attr_ancestors(attr).next()
# yes, stop here
continue
except StopIteration:
pass
# is it an instance attribute ?
try:
defstmts = node.instance_attr(attr)
except astng.NotFoundError:
pass
else:
if len(defstmts) == 1:
defstmt = defstmts[0]
# check that if the node is accessed in the same method as
# it's defined, it's accessed after the initial assignment
frame = defstmt.frame()
lno = defstmt.fromlineno
for _node in nodes:
if _node.frame() is frame and _node.fromlineno < lno \
and not are_exclusive(_node.statement(), defstmt, ('AttributeError', 'Exception', 'BaseException')):
self.add_message('E0203', node=_node,
args=(attr, lno))
def _check_first_arg_for_type(self, node, metaclass=0):
"""check the name of first argument, expect:
* 'self' for a regular method
* 'cls' for a class method
* 'mcs' for a metaclass
* not one of the above for a static method
"""
# don't care about functions with unknown argument (builtins)
if node.args.args is None:
return
first_arg = node.args.args and node.argnames()[0]
self._first_attrs.append(first_arg)
first = self._first_attrs[-1]
# static method
if node.type == 'staticmethod':
if first_arg in ('self', 'cls', 'mcs'):
self.add_message('W0211', args=first, node=node)
self._first_attrs[-1] = None
# class / regular method with no args
elif not node.args.args:
self.add_message('E0211', node=node)
# metaclass method
elif metaclass:
if first != 'mcs':
self.add_message('C0203', node=node)
# class method
elif node.type == 'classmethod':
if first != 'cls':
self.add_message('C0202', node=node)
# regular method without self as argument
elif first != 'self':
self.add_message('E0213', node=node)
def _check_bases_classes(self, node):
"""check that the given class node implements abstract methods from
base classes
"""
# check if this class abstract
if class_is_abstract(node):
return
for method in node.methods():
owner = method.parent.frame()
if owner is node:
continue
# owner is not this class, it must be a parent class
# check that the ancestor's method is not abstract
if method.is_abstract(pass_is_abstract=False):
self.add_message('W0223', node=node,
args=(method.name, owner.name))
def _check_interfaces(self, node):
"""check that the given class node really implements declared
interfaces
"""
e0221_hack = [False]
def iface_handler(obj):
"""filter interface objects, it should be classes"""
if not isinstance(obj, astng.Class):
e0221_hack[0] = True
self.add_message('E0221', node=node,
args=(obj.as_string(),))
return False
return True
ignore_iface_methods = self.config.ignore_iface_methods
try:
for iface in node.interfaces(handler_func=iface_handler):
for imethod in iface.methods():
name = imethod.name
if name.startswith('_') or name in ignore_iface_methods:
# don't check method beginning with an underscore,
# usually belonging to the interface implementation
continue
# get class method astng
try:
method = node_method(node, name)
except astng.NotFoundError:
self.add_message('E0222', args=(name, iface.name),
node=node)
continue
# ignore inherited methods
if method.parent.frame() is not node:
continue
# check signature
self._check_signature(method, imethod,
'%s interface' % iface.name)
except astng.InferenceError:
if e0221_hack[0]:
return
implements = Instance(node).getattr('__implements__')[0]
assignment = implements.parent
assert isinstance(assignment, astng.Assign)
# assignment.expr can be a Name or a Tuple or whatever.
# Use as_string() for the message
# FIXME: in case of multiple interfaces, find which one could not
# be resolved
self.add_message('F0220', node=implements,
args=(node.name, assignment.value.as_string()))
def _check_init(self, node):
"""check that the __init__ method call super or ancestors'__init__
method
"""
klass_node = node.parent.frame()
to_call = _ancestors_to_call(klass_node)
not_called_yet = dict(to_call)
for stmt in node.nodes_of_class(astng.CallFunc):
expr = stmt.func
if not isinstance(expr, astng.Getattr) \
or expr.attrname != '__init__':
continue
# skip the test if using super
if isinstance(expr.expr, astng.CallFunc) and \
isinstance(expr.expr.func, astng.Name) and \
expr.expr.func.name == 'super':
return
try:
klass = expr.expr.infer().next()
if klass is YES:
continue
try:
del not_called_yet[klass]
except KeyError:
if klass not in to_call:
self.add_message('W0233', node=expr, args=klass.name)
except astng.InferenceError:
continue
for klass in not_called_yet.keys():
if klass.name == 'object':
continue
self.add_message('W0231', args=klass.name, node=node)
def _check_signature(self, method1, refmethod, class_type):
"""check that the signature of the two given methods match
class_type is in 'class', 'interface'
"""
if not (isinstance(method1, astng.Function)
and isinstance(refmethod, astng.Function)):
self.add_message('F0202', args=(method1, refmethod), node=method1)
return
# don't care about functions with unknown argument (builtins)
if method1.args.args is None or refmethod.args.args is None:
return
if len(method1.args.args) != len(refmethod.args.args):
self.add_message('W0221', args=class_type, node=method1)
elif len(method1.args.defaults) < len(refmethod.args.defaults):
self.add_message('W0222', args=class_type, node=method1)
def _ancestors_to_call(klass_node, method='__init__'):
"""return a dictionary where keys are the list of base classes providing
the queried method, and so that should/may be called from the method node
"""
to_call = {}
for base_node in klass_node.ancestors(recurs=False):
try:
base_node.local_attr(method)
to_call[base_node] = 1
except astng.NotFoundError:
continue
return to_call
def node_method(node, method_name):
"""get astng for <method_name> on the given class node, ensuring it
is a Function node
"""
for n in node.local_attr(method_name):
if isinstance(n, astng.Function):
return n
raise astng.NotFoundError(method_name)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(ClassChecker(linter))
|
dbbhattacharya/kitsune
|
vendor/packages/pylint/checkers/classes.py
|
Python
|
bsd-3-clause
| 22,469
|
[
"VisIt"
] |
d25c8ab47f31b0e6c27af0a7ebdfe6ba0d34e8d7a5d82590b5580672da074696
|
"""Manage IPython.parallel clusters in the notebook.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
from tornado import web
from zmq.eventloop import ioloop
from IPython.config.configurable import LoggingConfigurable
from IPython.config.loader import load_pyconfig_files
from IPython.utils.traitlets import Dict, Instance, CFloat
from IPython.parallel.apps.ipclusterapp import IPClusterStart
from IPython.core.profileapp import list_profiles_in
from IPython.core.profiledir import ProfileDir
from IPython.utils.path import get_ipython_dir
from IPython.utils.sysinfo import num_cpus
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class DummyIPClusterStart(IPClusterStart):
"""Dummy subclass to skip init steps that conflict with global app.
Instantiating and initializing this class should result in fully configured
launchers, but no other side effects or state.
"""
def init_signal(self):
pass
def reinit_logging(self):
pass
class ClusterManager(LoggingConfigurable):
profiles = Dict()
delay = CFloat(1., config=True,
help="delay (in s) between starting the controller and the engines")
loop = Instance('zmq.eventloop.ioloop.IOLoop')
def _loop_default(self):
from zmq.eventloop.ioloop import IOLoop
return IOLoop.instance()
def build_launchers(self, profile_dir):
starter = DummyIPClusterStart(log=self.log)
starter.initialize(['--profile-dir', profile_dir])
cl = starter.controller_launcher
esl = starter.engine_launcher
n = starter.n
return cl, esl, n
def get_profile_dir(self, name, path):
p = ProfileDir.find_profile_dir_by_name(path,name=name)
return p.location
def update_profiles(self):
"""List all profiles in the ipython_dir and cwd.
"""
for path in [get_ipython_dir(), os.getcwdu()]:
for profile in list_profiles_in(path):
pd = self.get_profile_dir(profile, path)
if profile not in self.profiles:
self.log.debug("Adding cluster profile '%s'" % profile)
self.profiles[profile] = {
'profile': profile,
'profile_dir': pd,
'status': 'stopped'
}
def list_profiles(self):
self.update_profiles()
result = [self.profile_info(p) for p in sorted(self.profiles.keys())]
return result
def check_profile(self, profile):
if profile not in self.profiles:
raise web.HTTPError(404, u'profile not found')
def profile_info(self, profile):
self.check_profile(profile)
result = {}
data = self.profiles.get(profile)
result['profile'] = profile
result['profile_dir'] = data['profile_dir']
result['status'] = data['status']
if 'n' in data:
result['n'] = data['n']
return result
def start_cluster(self, profile, n=None):
"""Start a cluster for a given profile."""
self.check_profile(profile)
data = self.profiles[profile]
if data['status'] == 'running':
raise web.HTTPError(409, u'cluster already running')
cl, esl, default_n = self.build_launchers(data['profile_dir'])
n = n if n is not None else default_n
def clean_data():
data.pop('controller_launcher',None)
data.pop('engine_set_launcher',None)
data.pop('n',None)
data['status'] = 'stopped'
def engines_stopped(r):
self.log.debug('Engines stopped')
if cl.running:
cl.stop()
clean_data()
esl.on_stop(engines_stopped)
def controller_stopped(r):
self.log.debug('Controller stopped')
if esl.running:
esl.stop()
clean_data()
cl.on_stop(controller_stopped)
dc = ioloop.DelayedCallback(lambda: cl.start(), 0, self.loop)
dc.start()
dc = ioloop.DelayedCallback(lambda: esl.start(n), 1000*self.delay, self.loop)
dc.start()
self.log.debug('Cluster started')
data['controller_launcher'] = cl
data['engine_set_launcher'] = esl
data['n'] = n
data['status'] = 'running'
return self.profile_info(profile)
def stop_cluster(self, profile):
"""Stop a cluster for a given profile."""
self.check_profile(profile)
data = self.profiles[profile]
if data['status'] == 'stopped':
raise web.HTTPError(409, u'cluster not running')
data = self.profiles[profile]
cl = data['controller_launcher']
esl = data['engine_set_launcher']
if cl.running:
cl.stop()
if esl.running:
esl.stop()
# Return a temp info dict, the real one is updated in the on_stop
# logic above.
result = {
'profile': data['profile'],
'profile_dir': data['profile_dir'],
'status': 'stopped'
}
return result
def stop_all_clusters(self):
for p in self.profiles.keys():
self.stop_cluster(p)
|
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/frontend/html/notebook/clustermanager.py
|
Python
|
lgpl-3.0
| 5,867
|
[
"Brian"
] |
c0adb9e9e6438dc18fdaac3074adaf9583ea917e1df2d10b31814a67f919ca97
|
# Orca
#
# Copyright 2005-2008 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Provides an abtract class for working with speech servers.
A speech server (class SpeechServer) provides the ability to tell the
machine to speak. Each speech server provides a set of known
voices (identified by name) which can be combined with various
attributes to create aural style sheets."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2008 Sun Microsystems Inc."
__license__ = "LGPL"
import logging
from . import settings
from . import orca_state
log = logging.getLogger("speech")
from . import debug
from .acss import ACSS
class VoiceFamily(dict):
"""Holds the family description for a voice."""
NAME = "name"
GENDER = "gender"
LOCALE = "locale"
DIALECT = "dialect"
MALE = "male"
FEMALE = "female"
settings = {
NAME : None,
GENDER : None,
LOCALE : None,
DIALECT: None,
}
def __init__(self, props):
"""Create and initialize VoiceFamily."""
dict.__init__(self)
self.update(VoiceFamily.settings)
if props:
self.update(props)
class SayAllContext:
PROGRESS = 0
INTERRUPTED = 1
COMPLETED = 2
def __init__(self, obj, utterance, startOffset=-1, endOffset=-1):
"""Creates a new SayAllContext that will be passed to the
SayAll callback handler for progress updates on speech.
If the object does not have an accessible text specialization,
then startOffset and endOffset parameters are meaningless.
If the object does have an accessible text specialization,
then values >= 0 for startOffset and endOffset indicate
where in the text the utterance has come from.
Arguments:
-obj: the Accessible being spoken
-utterance: the actual utterance being spoken
-startOffset: the start offset of the Accessible's text
-endOffset: the end offset of the Accessible's text
"""
self.obj = obj
self.utterance = utterance
self.startOffset = startOffset
self.currentOffset = startOffset
self.endOffset = endOffset
class SpeechServer(object):
"""Provides speech server abstraction."""
def getFactoryName():
"""Returns a localized name describing this factory."""
pass
getFactoryName = staticmethod(getFactoryName)
def getSpeechServers():
"""Gets available speech servers as a list. The caller
is responsible for calling the shutdown() method of each
speech server returned.
"""
pass
getSpeechServers = staticmethod(getSpeechServers)
def getSpeechServer(info):
"""Gets a given SpeechServer based upon the info.
See SpeechServer.getInfo() for more info.
"""
pass
getSpeechServer = staticmethod(getSpeechServer)
def shutdownActiveServers():
"""Cleans up and shuts down this factory.
"""
pass
shutdownActiveServers = staticmethod(shutdownActiveServers)
def __init__(self):
pass
def getInfo(self):
"""Returns [name, id]
"""
pass
def getVoiceFamilies(self):
"""Returns a list of VoiceFamily instances representing all
voice families known by the speech server."""
pass
def queueText(self, text="", acss=None):
"""Adds the text to the queue.
Arguments:
- text: text to be spoken
- acss: acss.ACSS instance; if None,
the default voice settings will be used.
Otherwise, the acss settings will be
used to augment/override the default
voice settings.
Output is produced by the next call to speak.
"""
pass
def queueTone(self, pitch=440, duration=50):
"""Adds a tone to the queue.
Output is produced by the next call to speak.
"""
pass
def queueSilence(self, duration=50):
"""Adds silence to the queue.
Output is produced by the next call to speak.
"""
pass
def speakCharacter(self, character, acss=None):
"""Speaks a single character immediately.
Arguments:
- character: text to be spoken
- acss: acss.ACSS instance; if None,
the default voice settings will be used.
Otherwise, the acss settings will be
used to augment/override the default
voice settings.
"""
pass
def speakKeyEvent(self, event):
"""Speaks a key event immediately.
Arguments:
- event: the input_event.KeyboardEvent.
"""
if event.isPrintableKey() and event.event_string.isupper():
voice = ACSS(settings.voices[settings.UPPERCASE_VOICE])
else:
voice = ACSS(settings.voices[settings.DEFAULT_VOICE])
event_string = event.getKeyName()
if orca_state.activeScript and orca_state.usePronunciationDictionary:
event_string = orca_state.activeScript.\
utilities.adjustForPronunciation(event_string)
lockingStateString = event.getLockingStateString()
event_string = "%s %s" % (event_string, lockingStateString)
logLine = "SPEECH OUTPUT: '" + event_string +"'"
debug.println(debug.LEVEL_INFO, logLine)
log.info(logLine)
self.speak(event_string, acss=voice)
def speakUtterances(self, utteranceList, acss=None, interrupt=True):
"""Speaks the given list of utterances immediately.
Arguments:
- utteranceList: list of strings to be spoken
- acss: acss.ACSS instance; if None,
the default voice settings will be used.
Otherwise, the acss settings will be
used to augment/override the default
voice settings.
- interrupt: if True, stop any speech currently in progress.
"""
pass
def speak(self, text=None, acss=None, interrupt=True):
"""Speaks all queued text immediately. If text is not None,
it is added to the queue before speaking.
Arguments:
- text: optional text to add to the queue before speaking
- acss: acss.ACSS instance; if None,
the default voice settings will be used.
Otherwise, the acss settings will be
used to augment/override the default
voice settings.
- interrupt: if True, stops any speech in progress before
speaking the text
"""
pass
def isSpeaking(self):
""""Returns True if the system is currently speaking."""
return False
def sayAll(self, utteranceIterator, progressCallback):
"""Iterates through the given utteranceIterator, speaking
each utterance one at a time. Subclasses may postpone
getting a new element until the current element has been
spoken.
Arguments:
- utteranceIterator: iterator/generator whose next() function
returns a [SayAllContext, acss] tuple
- progressCallback: called as speech progress is made - has a
signature of (SayAllContext, type), where
type is one of PROGRESS, INTERRUPTED, or
COMPLETED.
"""
for [context, acss] in utteranceIterator:
logLine = "SPEECH OUTPUT: '" + context.utterance + "'"
debug.println(debug.LEVEL_INFO, logLine)
log.info(logLine)
self.speak(context.utterance, acss)
def increaseSpeechRate(self, step=5):
"""Increases the speech rate.
"""
pass
def decreaseSpeechRate(self, step=5):
"""Decreases the speech rate.
"""
pass
def increaseSpeechPitch(self, step=0.5):
"""Increases the speech pitch.
"""
pass
def decreaseSpeechPitch(self, step=0.5):
"""Decreases the speech pitch.
"""
pass
def updateCapitalizationStyle(self):
"""Updates the capitalization style used by the speech server."""
pass
def updatePunctuationLevel(self):
"""Punctuation level changed, inform this speechServer."""
pass
def stop(self):
"""Stops ongoing speech and flushes the queue."""
pass
def shutdown(self):
"""Shuts down the speech engine."""
pass
def reset(self, text=None, acss=None):
"""Resets the speech engine."""
pass
|
h4ck3rm1k3/orca-sonar
|
src/orca/speechserver.py
|
Python
|
lgpl-2.1
| 9,557
|
[
"ORCA"
] |
aba7e16d55fa465dda7de543f25ea4f9c51a6fd1d962592a2bdf319c848f7e0a
|
from numpy import linspace
from scipy.special import jn
from tvtk.api import tvtk
from mayavi import mlab
from enable.vtk_backend.vtk_window import EnableVTKWindow
from chaco.api import ArrayPlotData, Plot, OverlayPlotContainer
from chaco.tools.api import PanTool, ZoomTool, MoveTool
def main():
# Create some x-y data series to plot
x = linspace(-2.0, 10.0, 100)
pd = ArrayPlotData(index = x)
for i in range(5):
pd.set_data("y" + str(i), jn(i,x))
# Create some line plots of some of the data
plot = Plot(pd, bgcolor="none", padding=30, border_visible=True,
overlay_border=True, use_backbuffer=False)
plot.legend.visible = True
plot.plot(("index", "y0", "y1", "y2"), name="j_n, n<3", color="auto")
plot.plot(("index", "y3"), name="j_3", color="auto")
plot.tools.append(PanTool(plot))
zoom = ZoomTool(component=plot, tool_mode="box", always_on=False)
plot.overlays.append(zoom)
# Create the mlab test mesh and get references to various parts of the
# VTK pipeline
f = mlab.figure(size=(600,500))
m = mlab.test_mesh()
scene = mlab.gcf().scene
render_window = scene.render_window
renderer = scene.renderer
rwi = scene.interactor
plot.resizable = ""
plot.bounds = [200,200]
plot.padding = 25
plot.outer_position = [30,30]
plot.tools.append(MoveTool(component=plot,drag_button="right"))
container = OverlayPlotContainer(bgcolor = "transparent",
fit_window = True)
container.add(plot)
# Create the Enable Window
window = EnableVTKWindow(rwi, renderer,
component=container,
#istyle_class = tvtk.InteractorStyleSwitch,
#istyle_class = tvtk.InteractorStyle,
istyle_class = tvtk.InteractorStyleTrackballCamera,
bgcolor = "transparent",
event_passthrough = True,
)
mlab.show()
return window, render_window
if __name__=="__main__":
main()
|
tommy-u/chaco
|
examples/demo/vtk_example.py
|
Python
|
bsd-3-clause
| 1,995
|
[
"Mayavi",
"VTK"
] |
f161c8a350f99a0063c1f12bbc7594992de9f987160b4b63754ffae63356d70a
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
m = 500
n = 250
display = True
worldRank = El.mpi.WorldRank()
worldSize = El.mpi.WorldSize()
def Rectang(height,width):
A = El.DistMatrix()
El.Uniform( A, height, width )
return A
A = Rectang(m,n)
b = El.DistMatrix()
El.Gaussian( b, m, 1 )
if display:
El.Display( A, "A" )
El.Display( b, "b" )
startNNLS = El.mpi.Time()
x = El.NNLS( A, b )
endNNLS = El.mpi.Time()
if worldRank == 0:
print "NNLS time:", endNNLS-startNNLS, "seconds"
if display:
El.Display( x, "x" )
e = El.DistMatrix()
El.Copy( b, e )
El.Gemv( El.NORMAL, -1., A, x, 1., e )
if display:
El.Display( e, "e" )
eTwoNorm = El.Nrm2( e )
if worldRank == 0:
print "|| A x - b ||_2 =", eTwoNorm
startLS = El.mpi.Time()
xLS = El.LeastSquares( A, b )
endLS = El.mpi.Time()
if worldRank == 0:
print "LS time:", endLS-startLS, "seconds"
El.Copy( b, e )
El.Gemv( El.NORMAL, -1., A, xLS, 1., e )
if display:
El.Display( e, "e" )
eTwoNorm = El.Nrm2( e )
if worldRank == 0:
print "|| A x_{LS} - b ||_2 =", eTwoNorm
# Require the user to press a button before the figures are closed
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
|
mcopik/Elemental
|
examples/interface/NNLSDense.py
|
Python
|
bsd-3-clause
| 1,404
|
[
"Gaussian"
] |
ac906e92aa67cadd01035d97457bb23344371ccbf3eca29073112e2e69a5e524
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reinforcement learning models and parameters."""
import collections
import functools
import operator
# Dependency imports
import gym
from tensor2tensor.layers import common_hparams
from tensor2tensor.utils import registry
import tensorflow as tf
@registry.register_hparams
def ppo_base_v1():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.learning_rate = 1e-4
hparams.add_hparam("init_mean_factor", 0.1)
hparams.add_hparam("init_logstd", 0.1)
hparams.add_hparam("policy_layers", (100, 100))
hparams.add_hparam("value_layers", (100, 100))
hparams.add_hparam("num_agents", 30)
hparams.add_hparam("clipping_coef", 0.2)
hparams.add_hparam("gae_gamma", 0.99)
hparams.add_hparam("gae_lambda", 0.95)
hparams.add_hparam("entropy_loss_coef", 0.01)
hparams.add_hparam("value_loss_coef", 1)
hparams.add_hparam("optimization_epochs", 15)
hparams.add_hparam("epoch_length", 200)
hparams.add_hparam("epochs_num", 2000)
hparams.add_hparam("eval_every_epochs", 10)
hparams.add_hparam("num_eval_agents", 3)
hparams.add_hparam("video_during_eval", True)
return hparams
@registry.register_hparams
def continuous_action_base():
hparams = ppo_base_v1()
hparams.add_hparam("network", feed_forward_gaussian_fun)
return hparams
@registry.register_hparams
def discrete_action_base():
hparams = ppo_base_v1()
hparams.add_hparam("network", feed_forward_categorical_fun)
return hparams
# Neural networks for actor-critic algorithms
NetworkOutput = collections.namedtuple(
"NetworkOutput", "policy, value, action_postprocessing")
def feed_forward_gaussian_fun(action_space, config, observations):
"""Feed-forward Gaussian."""
if not isinstance(action_space, gym.spaces.box.Box):
raise ValueError("Expecting continuous action space.")
mean_weights_initializer = tf.contrib.layers.variance_scaling_initializer(
factor=config.init_mean_factor)
logstd_initializer = tf.random_normal_initializer(config.init_logstd, 1e-10)
flat_observations = tf.reshape(observations, [
tf.shape(observations)[0], tf.shape(observations)[1],
functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])
with tf.variable_scope("policy"):
x = flat_observations
for size in config.policy_layers:
x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
mean = tf.contrib.layers.fully_connected(
x, action_space.shape[0], tf.tanh,
weights_initializer=mean_weights_initializer)
logstd = tf.get_variable(
"logstd", mean.shape[2:], tf.float32, logstd_initializer)
logstd = tf.tile(
logstd[None, None],
[tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2))
with tf.variable_scope("value"):
x = flat_observations
for size in config.value_layers:
x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
value = tf.contrib.layers.fully_connected(x, 1, None)[..., 0]
mean = tf.check_numerics(mean, "mean")
logstd = tf.check_numerics(logstd, "logstd")
value = tf.check_numerics(value, "value")
policy = tf.contrib.distributions.MultivariateNormalDiag(mean,
tf.exp(logstd))
return NetworkOutput(policy, value, lambda a: tf.clip_by_value(a, -2., 2))
def feed_forward_categorical_fun(action_space, config, observations):
"""Feed-forward categorical."""
if not isinstance(action_space, gym.spaces.Discrete):
raise ValueError("Expecting discrete action space.")
flat_observations = tf.reshape(observations, [
tf.shape(observations)[0], tf.shape(observations)[1],
functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])
with tf.variable_scope("policy"):
x = flat_observations
for size in config.policy_layers:
x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
logits = tf.contrib.layers.fully_connected(x, action_space.n,
activation_fn=None)
with tf.variable_scope("value"):
x = flat_observations
for size in config.value_layers:
x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
value = tf.contrib.layers.fully_connected(x, 1, None)[..., 0]
policy = tf.contrib.distributions.Categorical(logits=logits)
return NetworkOutput(policy, value, lambda a: a)
def feed_forward_cnn_small_categorical_fun(action_space, config, observations):
"""Small cnn network with categorical output."""
del config
obs_shape = observations.shape.as_list()
x = tf.reshape(observations, [-1] + obs_shape[2:])
with tf.variable_scope("policy"):
x = tf.to_float(x) / 255.0
x = tf.contrib.layers.conv2d(x, 32, [5, 5], [2, 2],
activation_fn=tf.nn.relu, padding="SAME")
x = tf.contrib.layers.conv2d(x, 32, [5, 5], [2, 2],
activation_fn=tf.nn.relu, padding="SAME")
flat_x = tf.reshape(
x, [tf.shape(observations)[0], tf.shape(observations)[1],
functools.reduce(operator.mul, x.shape.as_list()[1:], 1)])
x = tf.contrib.layers.fully_connected(flat_x, 128, tf.nn.relu)
logits = tf.contrib.layers.fully_connected(x, action_space.n,
activation_fn=None)
value = tf.contrib.layers.fully_connected(x, 1, activation_fn=None)[..., 0]
policy = tf.contrib.distributions.Categorical(logits=logits)
return NetworkOutput(policy, value, lambda a: a)
|
rsepassi/tensor2tensor
|
tensor2tensor/models/research/rl.py
|
Python
|
apache-2.0
| 6,100
|
[
"Gaussian"
] |
f7534f8fb3ec314788d0e6147bb97e3296367b7b626fa54d7a17c113bdaf07bd
|
# encoding: utf-8
import StringIO
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import unittest
sys.path.append("../")
import HTMLTestRunner
from Order_List import TestCase_Web_Orderlist as web_Orderlist
# from esss import all_test as web_esss
from esss import TestCase_Client_EsssSales as client_esss
from visit import TestCase_Web_Visit as web_visit
from customervisit import test_suite as web_customervisit
from bbs import TestCase_Web_BBS as web_bbs
from bbs import TestCase_Client_BBS as client_bbs
from blog import TestCase_Web_Blog as web_blog
from blog import TestCase_Client_Blog as clent_blog
from bas_pd import TestCase_Web_Baspd as web_baspd
from bas_pd_promotion import TestCase_Web_Promotion as web_promotion
from bas_pd import Testcase_Client_Baspd as client_bas_pd
from base import TestCase_Web_Base as web_base
from std_attendance_bas import TestCase_Web_Stdattendancebas as web_std_attendance_bas
from user_defined import TestCase_Web_userDefined as web_userDefined
from gljsc import TestCase_Web_gljsc as web_gljsc
# ----------------------------------------------------------------------
# ------------------------------------------------------------------------
# This is the main test on HTMLTestRunner
def safe_str(param):
pass
class Test_HTMLTestRunner(unittest.TestCase):
def test0(self):
self.suite = unittest.TestSuite()
buf = StringIO.StringIO()
runner = HTMLTestRunner.HTMLTestRunner(buf)
runner.run(self.suite)
# didn't blow up? ok.
self.assert_('</html>' in buf.getvalue())
def test_main(self):
# Run HTMLTestRunner. Verify the HTML report.
# suite of TestCases
self.suite = unittest.TestSuite()
self.suite.addTests([
#orderlist st by renkai
unittest.defaultTestLoader.loadTestsFromTestCase(web_Orderlist.ST_Order_List),
unittest.defaultTestLoader.loadTestsFromTestCase(web_Orderlist.Smoke_Web_Order_List),
unittest.defaultTestLoader.loadTestsFromTestCase(web_visit.ST_Visit),
unittest.defaultTestLoader.loadTestsFromTestCase(web_customervisit.MyTestCase),
#bbs by zhangying
unittest.defaultTestLoader.loadTestsFromTestCase(web_bbs.Smoke_Web_BBS),
unittest.defaultTestLoader.loadTestsFromTestCase(web_bbs.ST_Web_bbs),
unittest.defaultTestLoader.loadTestsFromTestCase(client_bbs.Smoke_Client_BBS),
# blog by zhangying
unittest.defaultTestLoader.loadTestsFromTestCase(web_blog.Smoke_Web_Blog),
unittest.defaultTestLoader.loadTestsFromTestCase(clent_blog.Smoke_Client_Blog),
# unittest.defaultTestLoader.loadTestsFromTestCase(web_blog.ST_blog),
unittest.defaultTestLoader.loadTestsFromTestCase(web_baspd.Smoke_web_Bas_pd),
unittest.defaultTestLoader.loadTestsFromTestCase(web_promotion.Smoke_Web_Promotion),
unittest.defaultTestLoader.loadTestsFromTestCase(client_bas_pd.Smoke_Client_baspd),
unittest.defaultTestLoader.loadTestsFromTestCase(client_esss.ST_Esss_CarSales),
# web_base login test case by lulei
unittest.defaultTestLoader.loadTestsFromTestCase(web_base.ST_Web_base),
# by chenyizhang
# unittest.defaultTestLoader.loadTestsFromTestCase(web_std_attendance_bas.Smoke_web_Std_attendance_bas),
# by zhouhaifeng
unittest.defaultTestLoader.loadTestsFromTestCase(web_userDefined.ST_UserDefined),
# by zhanghaochen
unittest.defaultTestLoader.loadTestsFromTestCase(web_gljsc.ST_gljsc)
])
# Invoke TestRunner
buf = StringIO.StringIO()
#runner = unittest.TextTestRunner(buf) #DEBUG: this is the unittest baseline
runner = HTMLTestRunner.HTMLTestRunner(
stream=buf,
title='<Waiqin365 Web Api System Test>',
description='System Test Report'
)
runner.run(self.suite)
# Define the expected output sequence. This is imperfect but should
# give a good sense of the well being of the test.
EXPECTED = u""" """
# check out the output
byte_output = buf.getvalue()
# output the main test output for debugging & demo
print byte_output
# HTMLTestRunner pumps UTF-8 output
output = byte_output.decode('utf-8')
self._checkoutput(output,EXPECTED)
def _checkoutput(self,output,EXPECTED):
i = 0
for lineno, p in enumerate(EXPECTED.splitlines()):
if not p:
continue
j = output.find(p,i)
if j < 0:
self.fail(safe_str('Pattern not found lineno %s: "%s"' % (lineno+1,p)))
i = j + len(p)
##############################################################################
# Executing this module from the command line
##############################################################################
import unittest
if __name__ == "__main__":
if len(sys.argv) > 1:
argv = sys.argv
else:
argv=['ST_Web_HTMLTestRunner.py', 'Test_HTMLTestRunner']
unittest.main(argv=argv)
# Testing HTMLTestRunner with HTMLTestRunner would work. But instead
# we will use standard library's TextTestRunner to reduce the nesting
# that may confuse people.
# HTMLTestRunner.main(argv=argv)
|
NJ-zero/Android
|
requests_demo/runner/ST_Web_HTMLTestRunner.py
|
Python
|
mit
| 5,462
|
[
"VisIt"
] |
a079fdac73d5f5067428b848e0cc4a0ca058c0d469ff902a7d55a9184b7e2b6c
|
# Copyright 2004-2017 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This file contains classes that handle layout of displayables on
# the screen.
from renpy.display.render import render, Render
import renpy.display
def scale(num, base):
"""
If num is a float, multiplies it by base and returns that. Otherwise,
returns num unchanged.
"""
if isinstance(num, float):
return num * base
else:
return num
class Null(renpy.display.core.Displayable):
"""
:doc: disp_imagelike
:name: Null
A displayable that creates an empty box on the screen. The size
of the box is controlled by `width` and `height`. This can be used
when a displayable requires a child, but no child is suitable, or
as a spacer inside a box.
::
image logo spaced = HBox("logo.png", Null(width=100), "logo.png")
"""
def __init__(self, width=0, height=0, **properties):
super(Null, self).__init__(**properties)
self.width = width
self.height = height
def render(self, width, height, st, at):
rv = renpy.display.render.Render(self.width, self.height)
if self.focusable:
rv.add_focus(self, None, None, None, None, None)
return rv
class Container(renpy.display.core.Displayable):
"""
This is the base class for containers that can have one or more
children.
@ivar children: A list giving the children that have been added to
this container, in the order that they were added in.
@ivar child: The last child added to this container. This is also
used to access the sole child in containers that can only hold
one child.
@ivar offsets: A list giving offsets for each of our children.
It's expected that render will set this up each time it is called.
@ivar sizes: A list giving sizes for each of our children. It's
also expected that render will set this each time it is called.
"""
# We indirect all list creation through this, so that we can
# use RevertableLists if we want.
_list_type = list
def __init__(self, *args, **properties):
self.children = self._list_type()
self.child = None
self.offsets = self._list_type()
for i in args:
self.add(i)
super(Container, self).__init__(**properties)
def set_style_prefix(self, prefix, root):
super(Container, self).set_style_prefix(prefix, root)
for i in self.children:
i.set_style_prefix(prefix, False)
def _duplicate(self, args):
if not self._duplicatable:
return self
rv = self._copy(args)
rv.children = [ i._duplicate(args) for i in self.children ]
if rv.children:
rv.child = rv.children[-1]
rv._duplicatable = False
for i in rv.children:
i._unique()
if i._duplicatable:
rv._duplicatable = True
return rv
def _in_current_store(self):
children = [ ]
changed = False
for old in self.children:
new = old._in_current_store()
changed |= (old is not new)
children.append(new)
if not changed:
return self
rv = self._copy()
rv.children = children
if rv.children:
rv.child = rv.children[-1]
return rv
def add(self, d):
"""
Adds a child to this container.
"""
child = renpy.easy.displayable(d)
self.children.append(child)
self.child = child
self.offsets = self._list_type()
if child._duplicatable:
self._duplicatable = True
def _clear(self):
self.child = None
self.children = self._list_type()
self.offsets = self._list_type()
renpy.display.render.redraw(self, 0)
def remove(self, d):
"""
Removes the first instance of child from this container. May
not work with all containers.
"""
for i, c in enumerate(self.children):
if c is d:
break
else:
return
self.children.pop(i) # W0631
self.offsets = self._list_type()
if self.children:
self.child = self.children[-1]
else:
self.child = None
def update(self):
"""
This should be called if a child is added to this
displayable outside of the render function.
"""
renpy.display.render.invalidate(self)
def render(self, width, height, st, at):
rv = Render(width, height)
self.offsets = self._list_type()
for c in self.children:
cr = render(c, width, height, st, at)
offset = c.place(rv, 0, 0, width, height, cr)
self.offsets.append(offset)
return rv
def event(self, ev, x, y, st):
children = self.children
offsets = self.offsets
# In #641, these went out of sync. Since they should resync on a
# render, ignore the event for a short while rather than crashing.
if len(offsets) != len(children):
return None
for i in xrange(len(offsets) - 1, -1, -1):
d = children[i]
xo, yo = offsets[i]
rv = d.event(ev, x - xo, y - yo, st)
if rv is not None:
return rv
return None
def visit(self):
return self.children
# These interact with the ui functions to allow use as a context
# manager.
def __enter__(self):
renpy.ui.context_enter(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
renpy.ui.context_exit(self)
return False
def LiveComposite(size, *args, **properties):
"""
:doc: disp_imagelike
This creates a new displayable of `size`, by compositing other
displayables. `size` is a (width, height) tuple.
The remaining positional arguments are used to place images inside
the LiveComposite. The remaining positional arguments should come
in groups of two, with the first member of each group an (x, y)
tuple, and the second member of a group is a displayable that
is composited at that position.
Displayables are composited from back to front.
::
image eileen composite = LiveComposite(
(300, 600),
(0, 0), "body.png",
(0, 0), "clothes.png",
(50, 50), "expression.png")
"""
properties.setdefault('style', 'image_placement')
width, height = size
rv = Fixed(xmaximum=width, ymaximum=height, xminimum=width, yminimum=height, **properties)
if len(args) % 2 != 0:
raise Exception("LiveComposite requires an odd number of arguments.")
for pos, widget in zip(args[0::2], args[1::2]):
xpos, ypos = pos
rv.add(Position(widget, xpos=xpos, xanchor=0, ypos=ypos, yanchor=0))
return rv
class Position(Container):
"""
Controls the placement of a displayable on the screen, using
supplied position properties. This is the non-curried form of
Position, which should be used when the user has directly created
the displayable that will be shown on the screen.
"""
def __init__(self, child, style='image_placement', **properties):
"""
@param child: The child that is being laid out.
@param style: The base style of this position.
@param properties: Position properties that control where the
child of this widget is placed.
"""
super(Position, self).__init__(style=style, **properties)
self.add(child)
def render(self, width, height, st, at):
surf = render(self.child, width, height, st, at)
self.offsets = [ (0, 0) ]
rv = renpy.display.render.Render(surf.width, surf.height)
rv.blit(surf, (0, 0))
return rv
def get_placement(self):
xpos, ypos, xanchor, yanchor, xoffset, yoffset, subpixel = self.child.get_placement()
if xoffset is None:
xoffset = 0
if yoffset is None:
yoffset = 0
v = self.style.xpos
if v is not None:
xpos = v
v = self.style.ypos
if v is not None:
ypos = v
v = self.style.xanchor
if v is not None:
xanchor = v
v = self.style.yanchor
if v is not None:
yanchor = v
v = self.style.xoffset
if v is not None:
xoffset += v
v = self.style.yoffset
if v is not None:
yoffset += v
v = self.style.subpixel
if (not subpixel) and (v is not None):
subpixel = v
return xpos, ypos, xanchor, yanchor, xoffset, yoffset, subpixel
class Grid(Container):
"""
A grid is a widget that evenly allocates space to its children.
The child widgets should not be greedy, but should instead be
widgets that only use part of the space available to them.
"""
def __init__(self, cols, rows, padding=None,
transpose=False,
style='grid', **properties):
"""
@param cols: The number of columns in this widget.
@params rows: The number of rows in this widget.
@params transpose: True if the grid should be transposed.
"""
if padding is not None:
properties.setdefault('spacing', padding)
super(Grid, self).__init__(style=style, **properties)
cols = int(cols)
rows = int(rows)
self.cols = cols
self.rows = rows
self.transpose = transpose
def render(self, width, height, st, at):
xspacing = self.style.xspacing
yspacing = self.style.yspacing
if xspacing is None:
xspacing = self.style.spacing
if yspacing is None:
yspacing = self.style.spacing
# For convenience and speed.
cols = self.cols
rows = self.rows
if len(self.children) != cols * rows:
if len(self.children) < cols * rows:
raise Exception("Grid not completely full.")
else:
raise Exception("Grid overfull.")
if self.transpose:
children = [ ]
for y in range(rows):
for x in range(cols):
children.append(self.children[y + x * rows])
else:
children = self.children
# Now, start the actual rendering.
renwidth = width
renheight = height
if self.style.xfill:
renwidth = (width - (cols - 1) * xspacing) / cols
if self.style.yfill:
renheight = (height - (rows - 1) * yspacing) / rows
renders = [ render(i, renwidth, renheight, st, at) for i in children ]
sizes = [ i.get_size() for i in renders ]
cwidth = 0
cheight = 0
for w, h in sizes:
cwidth = max(cwidth, w)
cheight = max(cheight, h)
if self.style.xfill:
cwidth = renwidth
if self.style.yfill:
cheight = renheight
width = cwidth * cols + xspacing * (cols - 1)
height = cheight * rows + yspacing * (rows - 1)
rv = renpy.display.render.Render(width, height)
offsets = [ ]
for y in range(0, rows):
for x in range(0, cols):
child = children[ x + y * cols ]
surf = renders[x + y * cols]
xpos = x * (cwidth + xspacing)
ypos = y * (cheight + yspacing)
offset = child.place(rv, xpos, ypos, cwidth, cheight, surf)
offsets.append(offset)
if self.transpose:
self.offsets = [ ]
for x in range(cols):
for y in range(rows):
self.offsets.append(offsets[y * cols + x])
else:
self.offsets = offsets
return rv
class IgnoreLayers(Exception):
"""
Raise this to have the event ignored by layers, but reach the
underlay.
"""
pass
class MultiBox(Container):
layer_name = None
first = True
order_reverse = False
def __init__(self, spacing=None, layout=None, style='default', **properties):
if spacing is not None:
properties['spacing'] = spacing
super(MultiBox, self).__init__(style=style, **properties)
self._clipping = self.style.clipping
self.default_layout = layout
# The start and animation times for children of this
# box.
self.start_times = [ ]
self.anim_times = [ ]
# A map from layer name to the widget corresponding to
# that layer.
self.layers = None
# The scene list for this widget.
self.scene_list = None
def _clear(self):
super(MultiBox, self)._clear()
self.start_times = [ ]
self.anim_times = [ ]
self.layers = None
self.scene_list = None
def _in_current_store(self):
if self.layer_name is not None:
if self.scene_list is None:
return self
scene_list = [ ]
changed = False
for old_sle in self.scene_list:
new_sle = old_sle.copy()
d = new_sle.displayable._in_current_store()
if d is not new_sle.displayable:
new_sle.displayable = d
changed = True
scene_list.append(new_sle)
if not changed:
return self
rv = MultiBox(layout=self.default_layout)
rv.layer_name = self.layer_name
rv.append_scene_list(scene_list)
elif self.layers:
rv = MultiBox(layout=self.default_layout)
rv.layers = { }
changed = False
for layer in renpy.config.layers:
old_d = self.layers[layer]
new_d = old_d._in_current_store()
if new_d is not old_d:
changed = True
rv.add(new_d)
rv.layers[layer] = new_d
if not changed:
return self
else:
return super(MultiBox, self)._in_current_store()
if self.offsets:
rv.offsets = list(self.offsets)
if self.start_times:
rv.start_times = list(self.start_times)
if self.anim_times:
rv.anim_times = list(self.anim_times)
return rv
def __unicode__(self):
layout = self.style.box_layout
if layout is None:
layout = self.default_layout
if layout == "fixed":
return "Fixed"
elif layout == "horizontal":
return "HBox"
elif layout == "vertical":
return "VBox"
else:
return "MultiBox"
def add(self, widget, start_time=None, anim_time=None): # W0221
super(MultiBox, self).add(widget)
self.start_times.append(start_time)
self.anim_times.append(anim_time)
def append_scene_list(self, l):
for sle in l:
self.add(sle.displayable, sle.show_time, sle.animation_time)
if self.scene_list is None:
self.scene_list = [ ]
self.scene_list.extend(l)
def render(self, width, height, st, at):
# Do we need to adjust the child times due to our being a layer?
if self.layer_name or (self.layers is not None):
adjust_times = True
else:
adjust_times = False
minx = self.style.xminimum
if minx is not None:
width = max(width, scale(minx, width))
miny = self.style.yminimum
if miny is not None:
height = max(height, scale(miny, height))
if self.first:
self.first = False
if adjust_times:
it = renpy.game.interface.interact_time
self.start_times = [ i or it for i in self.start_times ]
self.anim_times = [ i or it for i in self.anim_times ]
layout = self.style.box_layout
if layout is None:
layout = self.default_layout
self.layout = layout # W0201
else:
layout = self.layout
# Handle time adjustment, store the results in csts and cats.
if adjust_times:
t = renpy.game.interface.frame_time
csts = [ t - start for start in self.start_times ]
cats = [ t - anim for anim in self.anim_times ]
else:
csts = [ st ] * len(self.children)
cats = [ at ] * len(self.children)
offsets = [ ]
if layout == "fixed":
rv = None
if self.style.order_reverse:
iterator = zip(reversed(self.children), reversed(csts), reversed(cats))
else:
iterator = zip(self.children, csts, cats)
rv = renpy.display.render.Render(width, height, layer_name=self.layer_name)
xfit = self.style.xfit
yfit = self.style.yfit
fit_first = self.style.fit_first
if fit_first == "width":
first_fit_width = True
first_fit_height = False
elif fit_first == "height":
first_fit_width = False
first_fit_height = True
elif fit_first:
first_fit_width = True
first_fit_height = True
else:
first_fit_width = False
first_fit_height = False
sizes = [ ]
for child, cst, cat in iterator:
surf = render(child, width, height, cst, cat)
size = surf.get_size()
sizes.append(size)
if first_fit_width:
width = rv.width = size[0]
first_fit_width = False
if first_fit_height:
height = rv.height = size[1]
first_fit_height = False
if surf:
offset = child.place(rv, 0, 0, width, height, surf)
offsets.append(offset)
else:
offsets.append((0, 0))
if xfit:
width = 0
for o, s in zip(offsets, sizes):
width = max(o[0] + s[0], width)
if fit_first:
break
rv.width = width
if width > renpy.config.max_fit_size:
raise Exception("Fixed fit width ({}) is too large.".format(width))
if yfit:
height = 0
for o, s in zip(offsets, sizes):
height = max(o[1] + s[1], height)
if fit_first:
break
rv.height = height
if height > renpy.config.max_fit_size:
raise Exception("Fixed fit width ({}) is too large.".format(height))
if self.style.order_reverse:
offsets.reverse()
self.offsets = offsets
return rv
# If we're here, we have a box, either horizontal or vertical. Which is good,
# as we can share some code between boxes.
spacing = self.style.spacing
first_spacing = self.style.first_spacing
if first_spacing is None:
first_spacing = spacing
spacings = [ first_spacing ] + [ spacing ] * (len(self.children) - 1)
box_wrap = self.style.box_wrap
xfill = self.style.xfill
yfill = self.style.yfill
xminimum = self.style.xminimum
yminimum = self.style.yminimum
# The shared height and width of the current line. The line_height must
# be 0 for a vertical box, and the line_width must be 0 for a horizontal
# box.
line_width = 0
line_height = 0
# The children to layout.
children = list(self.children)
if self.style.box_reverse:
children.reverse()
spacings.reverse()
# a list of (child, x, y, w, h, surf) tuples that are turned into
# calls to child.place().
placements = [ ]
# The maximum x and y.
maxx = 0
maxy = 0
# The minimum size of x and y.
minx = 0
miny = 0
def layout_line(line, xfill, yfill):
"""
Lays out a single line.
`line` a list of (child, x, y, surf) tuples.
`xfill` the amount of space to add in the x direction.
`yfill` the amount of space to add in the y direction.
"""
xfill = max(0, xfill)
yfill = max(0, yfill)
if line:
xperchild = xfill / len(line)
yperchild = yfill / len(line)
else:
xperchild = 0
yperchild = 0
maxxout = maxx
maxyout = maxy
for i, (child, x, y, surf) in enumerate(line):
sw, sh = surf.get_size()
sw = max(line_width, sw)
sh = max(line_height, sh)
x += i * xperchild
y += i * yperchild
sw += xperchild
sh += yperchild
placements.append((child, x, y, sw, sh, surf))
maxxout = max(maxxout, x + sw)
maxyout = max(maxyout, y + sh)
return maxxout, maxyout
x = 0
y = 0
if layout == "horizontal":
if yfill:
miny = height
else:
miny = yminimum
line_height = 0
line = [ ]
remwidth = width
if xfill:
target_width = width
else:
target_width = xminimum
for d, padding, cst, cat in zip(children, spacings, csts, cats):
if box_wrap:
rw = width
else:
rw = remwidth
surf = render(d, rw, height - y, cst, cat)
sw, sh = surf.get_size()
if box_wrap and remwidth - sw - padding < 0 and line:
maxx, maxy = layout_line(line, target_width - x, 0)
y += line_height
x = 0
line_height = 0
remwidth = width
line = [ ]
line.append((d, x, y, surf))
line_height = max(line_height, sh)
x += sw + padding
remwidth -= (sw + padding)
maxx, maxy = layout_line(line, target_width - x, 0)
elif layout == "vertical":
if xfill:
minx = width
else:
minx = xminimum
line_width = 0
line = [ ]
remheight = height
if yfill:
target_height = height
else:
target_height = yminimum
for d, padding, cst, cat in zip(children, spacings, csts, cats):
if box_wrap:
rh = height
else:
rh = remheight
surf = render(d, width - x, rh, cst, cat)
sw, sh = surf.get_size()
if box_wrap and remheight - sh - padding < 0:
maxx, maxy = layout_line(line, 0, target_height - y)
x += line_width
y = 0
line_width = 0
remheight = height
line = [ ]
line.append((d, x, y, surf))
line_width = max(line_width, sw)
y += sh + padding
remheight -= (sh + padding)
maxx, maxy = layout_line(line, 0, target_height - y)
else:
raise Exception("Unknown box layout: %r" % layout)
# Back to the common for vertical and horizontal.
if not xfill:
width = max(xminimum, maxx)
if not yfill:
height = max(yminimum, maxy)
rv = renpy.display.render.Render(width, height)
if self.style.box_reverse ^ self.style.order_reverse:
placements.reverse()
for child, x, y, w, h, surf in placements:
w = max(minx, w)
h = max(miny, h)
offset = child.place(rv, x, y, w, h, surf)
offsets.append(offset)
if self.style.order_reverse:
offsets.reverse()
self.offsets = offsets
return rv
def event(self, ev, x, y, st):
children_offsets = zip(self.children, self.offsets, self.start_times)
if not self.style.order_reverse:
children_offsets.reverse()
try:
for i, (xo, yo), t in children_offsets:
if t is None:
cst = st
else:
cst = renpy.game.interface.event_time - t
rv = i.event(ev, x - xo, y - yo, cst)
if rv is not None:
return rv
except IgnoreLayers:
if self.layers:
return None
else:
raise
return None
def Fixed(**properties):
return MultiBox(layout='fixed', **properties)
class SizeGroup(renpy.object.Object):
def __init__(self):
super(SizeGroup, self).__init__()
self.members = [ ]
self._width = None
self.computing_width = False
def width(self, width, height, st, at):
if self._width is not None:
return self._width
if self.computing_width:
return 0
self.computing_width = True
maxwidth = 0
for i in self.members:
rend = i.render(width, height, st, at)
maxwidth = max(rend.width, maxwidth)
self._width = maxwidth
self.computing_width = False
return maxwidth
size_groups = dict()
class Window(Container):
"""
A window that has padding and margins, and can place a background
behind its child. `child` is the child added to this
displayable. All other properties are as for the :ref:`Window`
screen language statement.
"""
def __init__(self, child=None, style='window', **properties):
super(Window, self).__init__(style=style, **properties)
if child is not None:
self.add(child)
def visit(self):
return [ self.style.background ] + self.children
def get_child(self):
return self.style.child or self.child
def per_interact(self):
size_group = self.style.size_group
if size_group:
group = size_groups.get(size_group, None)
if group is None:
group = size_groups[size_group] = SizeGroup()
group.members.append(self)
def predict_one(self):
pd = renpy.display.predict.displayable
self.style._predict_window(pd)
def render(self, width, height, st, at):
# save some typing.
style = self.style
xminimum = scale(style.xminimum, width)
yminimum = scale(style.yminimum, height)
xmaximum = scale(style.xmaximum, width)
ymaximum = scale(style.ymaximum, height)
size_group = self.style.size_group
if size_group and size_group in size_groups:
xminimum = max(xminimum, size_groups[size_group].width(width, height, st, at))
left_margin = scale(style.left_margin, width)
left_padding = scale(style.left_padding, width)
right_margin = scale(style.right_margin, width)
right_padding = scale(style.right_padding, width)
top_margin = scale(style.top_margin, height)
top_padding = scale(style.top_padding, height)
bottom_margin = scale(style.bottom_margin, height)
bottom_padding = scale(style.bottom_padding, height)
# c for combined.
cxmargin = left_margin + right_margin
cymargin = top_margin + bottom_margin
cxpadding = left_padding + right_padding
cypadding = top_padding + bottom_padding
child = self.get_child()
# Render the child.
surf = render(child,
width - cxmargin - cxpadding,
height - cymargin - cypadding,
st, at)
sw, sh = surf.get_size()
# If we don't fill, shrink our size to fit.
if not style.xfill:
width = max(cxmargin + cxpadding + sw, xminimum)
if not style.yfill:
height = max(cymargin + cypadding + sh, yminimum)
if renpy.config.enforce_window_max_size:
if xmaximum is not None:
width = min(width, xmaximum)
if ymaximum is not None:
height = min(height, ymaximum)
rv = renpy.display.render.Render(width, height)
# Draw the background. The background should render at exactly the
# requested size. (That is, be a Frame or a Solid).
if style.background:
bw = width - cxmargin
bh = height - cymargin
back = render(style.background, bw, bh, st, at)
style.background.place(rv, left_margin, top_margin, bw, bh, back, main=False)
offsets = child.place(rv,
left_margin + left_padding,
top_margin + top_padding,
width - cxmargin - cxpadding,
height - cymargin - cypadding,
surf)
# Draw the foreground. The background should render at exactly the
# requested size. (That is, be a Frame or a Solid).
if style.foreground:
bw = width - cxmargin
bh = height - cymargin
back = render(style.foreground, bw, bh, st, at)
style.foreground.place(rv, left_margin, top_margin, bw, bh, back, main=False)
if self.child:
self.offsets = [ offsets ]
self.window_size = width, height # W0201
return rv
def dynamic_displayable_compat(st, at, expr):
child = renpy.python.py_eval(expr)
return child, None
class DynamicDisplayable(renpy.display.core.Displayable):
"""
:doc: disp_dynamic
A displayable that can change its child based on a Python
function, over the course of an interaction.
`function`
A function that is called with the arguments:
* The amount of time the displayable has been shown for.
* The amount of time any displayable with the same tag has been shown for.
* Any positional or keyword arguments supplied to DynamicDisplayable.
and should return a (d, redraw) tuple, where:
* `d` is a displayable to show.
* `redraw` is the amount of time to wait before calling the
function again, or None to not call the function again
before the start of the next interaction.
`function` is called at the start of every interaction.
As a special case, `function` may also be a python string that evaluates
to a displayable. In that case, function is run once per interaction.
::
# Shows a countdown from 5 to 0, updating it every tenth of
# a second until the time expires.
init python:
def show_countdown(st, at):
if st > 5.0:
return Text("0.0"), None
else:
d = Text("{:.1f}".format(5.0 - st))
return d, 0.1
image countdown = DynamicDisplayable(show_countdown)
"""
nosave = [ 'child' ]
def after_setstate(self):
self.child = None
def __init__(self, function, *args, **kwargs):
super(DynamicDisplayable, self).__init__()
self.child = None
if isinstance(function, basestring):
args = ( function, )
kwargs = { }
function = dynamic_displayable_compat
self.predict_function = kwargs.pop("_predict_function", None)
self.function = function
self.args = args
self.kwargs = kwargs
def visit(self):
return [ ]
def update(self, st, at):
child, redraw = self.function(st, at, *self.args, **self.kwargs)
child = renpy.easy.displayable(child)
child.visit_all(lambda c : c.per_interact())
self.child = child
if redraw is not None:
renpy.display.render.redraw(self, redraw)
def per_interact(self):
renpy.display.render.redraw(self, 0)
def render(self, w, h, st, at):
self.update(st, at)
return renpy.display.render.render(self.child, w, h, st, at)
def predict_one(self):
try:
if self.predict_function:
child = self.predict_function(*self.args, **self.kwargs)
else:
child, _ = self.function(0, 0, *self.args, **self.kwargs)
if child is not None:
renpy.display.predict.displayable(child)
except:
pass
def get_placement(self):
if not self.child:
self.update(0, 0)
return self.child.get_placement()
def event(self, ev, x, y, st):
if self.child:
return self.child.event(ev, x, y, st)
# A cache of compiled conditions used by ConditionSwitch.
cond_cache = { }
# This chooses the first member of switch that's being shown on the
# given layer.
def condition_switch_pick(switch):
for cond, d in switch:
if cond is None:
return d
if cond in cond_cache:
code = cond_cache[cond]
else:
code = renpy.python.py_compile(cond, 'eval')
cond_cache[cond] = code
if renpy.python.py_eval_bytecode(code):
return d
raise Exception("Switch could not choose a displayable.")
def condition_switch_show(st, at, switch):
return condition_switch_pick(switch), None
def condition_switch_predict(switch):
if renpy.game.lint:
return [ d for _cond, d in switch ]
return [ condition_switch_pick(switch) ]
def ConditionSwitch(*args, **kwargs):
"""
:doc: disp_dynamic
This is a displayable that changes what it is showing based on
python conditions. The positional argument should be given in
groups of two, where each group consists of:
* A string containing a python condition.
* A displayable to use if the condition is true.
The first true condition has its displayable shown, at least
one condition should always be true.
::
image jill = ConditionSwitch(
"jill_beers > 4", "jill_drunk.png",
"True", "jill_sober.png")
"""
kwargs.setdefault('style', 'default')
switch = [ ]
if len(args) % 2 != 0:
raise Exception('ConditionSwitch takes an even number of arguments')
for cond, d in zip(args[0::2], args[1::2]):
if cond not in cond_cache:
code = renpy.python.py_compile(cond, 'eval')
cond_cache[cond] = code
d = renpy.easy.displayable(d)
switch.append((cond, d))
rv = DynamicDisplayable(condition_switch_show,
switch,
_predict_function=condition_switch_predict)
return Position(rv, **kwargs)
def ShowingSwitch(*args, **kwargs):
"""
:doc: disp_dynamic
This is a displayable that changes what it is showing based on the
images are showing on the screen. The positional argument should
be given in groups of two, where each group consists of:
* A string giving an image name, or None to indicate the default.
* A displayable to use if the condition is true.
A default image should be specified.
One use of ShowingSwitch is to have side images change depending on
the current emotion of a character. For example::
define e = Character("Eileen",
show_side_image=ShowingSwitch(
"eileen happy", Image("eileen_happy_side.png", xalign=1.0, yalign=1.0),
"eileen vhappy", Image("eileen_vhappy_side.png", xalign=1.0, yalign=1.0),
None, Image("eileen_happy_default.png", xalign=1.0, yalign=1.0),
)
)
"""
layer = kwargs.pop('layer', 'master')
if len(args) % 2 != 0:
raise Exception('ShowingSwitch takes an even number of positional arguments')
condargs = [ ]
for name, d in zip(args[0::2], args[1::2]):
if name is not None:
if not isinstance(name, tuple):
name = tuple(name.split())
cond = "renpy.showing(%r, layer=%r)" % (name, layer)
else:
cond = None
condargs.append(cond)
condargs.append(d)
return ConditionSwitch(*condargs, **kwargs)
class IgnoresEvents(Container):
def __init__(self, child, **properties):
super(IgnoresEvents, self).__init__(**properties)
self.add(child)
def render(self, w, h, st, at):
cr = renpy.display.render.render(self.child, w, h, st, at)
cw, ch = cr.get_size()
rv = renpy.display.render.Render(cw, ch)
rv.blit(cr, (0, 0), focus=False)
return rv
def get_placement(self):
return self.child.get_placement()
# Ignores events.
def event(self, ev, x, y, st):
return None
def LiveCrop(rect, child, **properties):
"""
:doc: disp_imagelike
This created a displayable by cropping `child` to `rect`, where
`rect` is an (x, y, width, height) tuple. ::
image eileen cropped = LiveCrop((0, 0, 300, 300), "eileen happy")
"""
return renpy.display.motion.Transform(child, crop=rect, **properties)
class Side(Container):
possible_positions = set([ 'tl', 't', 'tr', 'r', 'br', 'b', 'bl', 'l', 'c'])
def after_setstate(self):
self.sized = False
def __init__(self, positions, style='side', **properties):
super(Side, self).__init__(style=style, **properties)
if isinstance(positions, basestring):
positions = positions.split()
seen = set()
for i in positions:
if not i in Side.possible_positions:
raise Exception("Side used with impossible position '%s'." % (i,))
if i in seen:
raise Exception("Side used with duplicate position '%s'." % (i,))
seen.add(i)
self.positions = tuple(positions)
self.sized = False
def add(self, d):
if len(self.children) >= len(self.positions):
raise Exception("Side has been given too many arguments.")
super(Side, self).add(d)
def _clear(self):
super(Side, self)._clear()
self.sized = False
def render(self, width, height, st, at):
pos_d = { }
pos_i = { }
for i, (pos, d) in enumerate(zip(self.positions, self.children)):
pos_d[pos] = d
pos_i[pos] = i
# Figure out the size of each widget (and hence where the
# widget needs to be placed).
old_width = width
old_height = height
if not self.sized:
self.sized = True
# Deal with various spacings.
spacing = self.style.spacing
def spacer(a, b, c, axis):
if (a in pos_d) or (b in pos_d) or (c in pos_d):
return spacing, axis - spacing
else:
return 0, axis
self.left_space, width = spacer('tl', 'l', 'bl', width) # W0201
self.right_space, width = spacer('tr', 'r', 'br', width) # W0201
self.top_space, height = spacer('tl', 't', 'tr', height) # W0201
self.bottom_space, height = spacer('bl', 'b', 'br', height) # W0201
# The sizes of the various borders.
left = 0
right = 0
top = 0
bottom = 0
cwidth = 0
cheight = 0
def sizeit(pos, width, height, owidth, oheight):
if pos not in pos_d:
return owidth, oheight
rend = render(pos_d[pos], width, height, st, at)
rv = max(owidth, rend.width), max(oheight, rend.height)
rend.kill()
return rv
cwidth, cheight = sizeit('c', width, height, 0, 0)
cwidth, top = sizeit('t', cwidth, height, cwidth, top)
cwidth, bottom = sizeit('b', cwidth, height, cwidth, bottom)
left, cheight = sizeit('l', width, cheight, left, cheight)
right, cheight = sizeit('r', width, cheight, right, cheight)
left, top = sizeit('tl', left, top, left, top)
left, bottom = sizeit('bl', left, bottom, left, bottom)
right, top = sizeit('tr', right, top, right, top)
right, bottom = sizeit('br', right, bottom, right, bottom)
self.cwidth = cwidth # W0201
self.cheight = cheight # W0201
self.top = top # W0201
self.bottom = bottom # W0201
self.left = left # W0201
self.right = right # W0201
else:
cwidth = self.cwidth
cheight = self.cheight
top = self.top
bottom = self.bottom
left = self.left
right = self.right
# Now, place everything onto the render.
width = old_width
height = old_height
self.offsets = [ None ] * len(self.children)
lefts = self.left_space
rights = self.right_space
tops = self.top_space
bottoms = self.bottom_space
if self.style.xfill:
cwidth = width
if self.style.yfill:
cheight = height
cwidth = min(cwidth, width - left - lefts - right - rights)
cheight = min(cheight, height - top - tops - bottom - bottoms)
rv = renpy.display.render.Render(left + lefts + cwidth + rights + right,
top + tops + cheight + bottoms + bottom)
def place(pos, x, y, w, h):
if pos not in pos_d:
return
d = pos_d[pos]
i = pos_i[pos]
rend = render(d, w, h, st, at)
self.offsets[i] = pos_d[pos].place(rv, x, y, w, h, rend)
col1 = 0
col2 = left + lefts
col3 = left + lefts + cwidth + rights
row1 = 0
row2 = top + tops
row3 = top + tops + cheight + bottoms
place('c', col2, row2, cwidth, cheight)
place('t', col2, row1, cwidth, top)
place('r', col3, row2, right, cheight)
place('b', col2, row3, cwidth, bottom)
place('l', col1, row2, left, cheight)
place('tl', col1, row1, left, top)
place('tr', col3, row1, right, top)
place('br', col3, row3, right, bottom)
place('bl', col1, row3, left, bottom)
return rv
class Alpha(renpy.display.core.Displayable):
def __init__(self, start, end, time, child=None, repeat=False, bounce=False,
anim_timebase=False, time_warp=None, **properties):
super(Alpha, self).__init__(**properties)
self.start = start
self.end = end
self.time = time
self.child = renpy.easy.displayable(child)
self.repeat = repeat
self.anim_timebase = anim_timebase
self.time_warp = time_warp
def visit(self):
return [ self.child ]
def render(self, height, width, st, at):
if self.anim_timebase:
t = at
else:
t = st
if self.time:
done = min(t / self.time, 1.0)
else:
done = 1.0
if renpy.game.less_updates:
done = 1.0
elif self.repeat:
done = done % 1.0
renpy.display.render.redraw(self, 0)
elif done != 1.0:
renpy.display.render.redraw(self, 0)
if self.time_warp:
done = self.time_warp(done)
alpha = self.start + done * (self.end - self.start)
rend = renpy.display.render.render(self.child, height, width, st, at)
w, h = rend.get_size()
rv = renpy.display.render.Render(w, h)
rv.blit(rend, (0, 0))
rv.alpha = alpha
return rv
class AdjustTimes(Container):
def __init__(self, child, start_time, anim_time, **properties):
super(AdjustTimes, self).__init__(**properties)
self.start_time = start_time
self.anim_time = anim_time
self.add(child)
def render(self, w, h, st, at):
if self.start_time is None:
self.start_time = renpy.game.interface.frame_time
if self.anim_time is None:
self.anim_time = renpy.game.interface.frame_time
st = renpy.game.interface.frame_time - self.start_time
at = renpy.game.interface.frame_time - self.anim_time
cr = renpy.display.render.render(self.child, w, h, st, at)
cw, ch = cr.get_size()
rv = renpy.display.render.Render(cw, ch)
rv.blit(cr, (0, 0))
self.offsets = [ (0, 0) ]
return rv
def get_placement(self):
return self.child.get_placement()
class LiveTile(Container):
"""
:doc: disp_imagelike
Tiles `child` until it fills the area allocated to this displayable.
::
image bg tile = LiveTile("bg.png")
"""
def __init__(self, child, style='tile', **properties):
super(LiveTile, self).__init__(style=style, **properties)
self.add(child)
def render(self, width, height, st, at):
cr = renpy.display.render.render(self.child, width, height, st, at)
cw, ch = cr.get_size()
rv = renpy.display.render.Render(width, height)
width = int(width)
height = int(height)
cw = int(cw)
ch = int(ch)
for y in range(0, height, ch):
for x in range(0, width, cw):
ccw = min(cw, width - x)
cch = min(ch, height - y)
if (ccw < cw) or (cch < ch):
ccr = cr.subsurface((0, 0, ccw, cch))
else:
ccr = cr
rv.blit(ccr, (x, y), focus=False)
return rv
class Flatten(Container):
"""
:doc: disp_imagelike
This flattens `child`, which may be made up of multiple textures, into
a single texture.
Certain operations, like the alpha transform property, apply to every
texture making up a displayable, which can yield incorrect results
when the textures overlap on screen. Flatten creates a single texture
from multiple textures, which can prevent this problem.
Flatten is a relatively expensive operation, and so should only be used
when absolutely required.
"""
def __init__(self, child, **properties):
super(Flatten, self).__init__(**properties)
self.add(child)
def render(self, width, height, st, at):
cr = renpy.display.render.render(self.child, width, height, st, at)
cw, ch = cr.get_size()
tex = cr.render_to_texture(True)
rv = renpy.display.render.Render(cw, ch)
rv.blit(tex, (0, 0))
rv.depends_on(cr, focus=True)
rv.reverse = renpy.display.draw.draw_to_virt
rv.forward = renpy.display.render.IDENTITY
self.offsets = [ (0, 0) ]
return rv
class AlphaMask(Container):
"""
:doc: disp_imagelike
This displayable takes its colors from `child`, and its alpha channel
from the multiplication of the alpha channels of `child` and `mask`.
The result is a displayable that has the same colors as `child`, is
transparent where either `child` or `mask` is transparent, and is
opaque where `child` and `mask` are both opaque.
The `child` and `mask` parameters may be arbitrary displayables. The
size of the AlphaMask is the size of the overlap between `child` and
`mask`.
Note that this takes different arguments from :func:`im.AlphaMask`,
which uses the mask's color channel.
"""
def __init__(self, child, mask, **properties):
super(AlphaMask, self).__init__(**properties)
self.add(child)
self.mask = renpy.easy.displayable(mask)
self.null = None
self.size = None
def render(self, width, height, st, at):
cr = renpy.display.render.render(self.child, width, height, st, at)
mr = renpy.display.render.render(self.mask, width, height, st, at)
cw, ch = cr.get_size()
mw, mh = mr.get_size()
w = min(cw, mw)
h = min(ch, mh)
size = (w, h)
if self.size != size:
self.null = Null(w, h)
nr = renpy.display.render.render(self.null, width, height, st, at)
rv = renpy.display.render.Render(w, h, opaque=False)
rv.operation = renpy.display.render.IMAGEDISSOLVE
rv.operation_alpha = 1.0
rv.operation_complete = 256.0 / (256.0 + 256.0)
rv.operation_parameter = 256
rv.blit(mr, (0, 0), focus=False, main=False)
rv.blit(nr, (0, 0), focus=False, main=False)
rv.blit(cr, (0, 0))
return rv
|
kfcpaladin/sze-the-game
|
renpy/display/layout.py
|
Python
|
mit
| 50,634
|
[
"VisIt"
] |
c26a75e006905ff7053c465c5f14266e9c2405e6b30a27c49035808c4a910cdc
|
"""
Useful functions for polynomial interpolation.
NOTE: Because system of equations if computed using Gaussian elimination, errors
can be quite large.
"""
def coefficients_to_string(coefficients):
s = []
for i, j in enumerate(coefficients[::-1]):
if j:
if j == int(j): j = int(j)
foo = ''
if j == -1 and i != 0:
foo += '-'
elif j != 1 or i == 0:
foo += str(j)
if i:
foo += 'x'
if i != 1: foo += '^' + str(i)
s.append(foo)
if not s:
s = ['0']
s.reverse()
out = s[0]
for i in s[1:]:
if i[0] == '-':
out += ' - ' + i[1:]
else:
out += ' + ' + i
return out
def points_to_matrix(points):
"""List of points (x, y) is transformed into system of linear equations."""
degree = len(points) - 1
matrix = [[] for i in range(degree + 1)]
vector = []
for i, point in enumerate(points):
for j in range(degree, -1, -1):
matrix[i].append(point[0] ** j)
vector.append(point[1])
return matrix, vector
def join_matrices(matrix, vector):
for i in range(len(matrix)):
matrix[i].append(vector[i])
return matrix
def gaussian_elimination(matrix):
"""Joins given matrix and vector in new matrix and transforms it into upper
triangular matrix."""
n = len(matrix)
for i in range(n):
# find max element and swap lines of remaining unsolved matrix
max_elem = abs(matrix[i][i])
max_row = i
for j in range(i + 1, n):
if max_elem < abs(matrix[j][i]):
max_elem = abs(matrix[j][i])
max_row = j
if max_row != i:
matrix[i], matrix[max_row] = matrix[max_row], matrix[i]
# make all rows below this one 0 in this column
for j in range(i + 1, n):
r = []
c = - matrix[j][i] / matrix[i][i]
for k in matrix[i]:
r.append(c * k)
matrix[j] = [a + b for a, b in zip(matrix[j], r)]
return matrix
def solve_system(matrix):
"""Solves system of linear equations for upper triangular matrix."""
coefficients = []
matrix.reverse() # for easy access
coefficients.append(matrix[0][-1] / matrix[0][-2]) # first is solved
for i in range(1, len(matrix)):
# sub all already known coefficients...
a = 0
for j in range(i):
a += (coefficients[j] * matrix[i][-j - 2])
coefficients.append((matrix[i][-1] - a) / matrix[i][-i - 2])
coefficients.reverse()
return coefficients
def coefficient_interpolation(points):
"""
Input: list of points (x, y) of a lenght n.
Finds a polynomial of degree n-1 which goes exactly through these points.
Returns a list of coefficients of a polynomial. Last coefficient is this
polynomial's value at p(0).
"""
matrix, vector = points_to_matrix(points)
matrix = join_matrices(matrix, vector)
matrix = gaussian_elimination(matrix)
coefficients = solve_system(matrix)
return coefficients
def string_interpolation(points):
"""
Input: list of points (x, y) of a lenght n.
Finds a polynomial of degree n-1 which goes exactly through these points.
Returns a string which represents this polynomial.
"""
coefficients = coefficient_interpolation(points)
return coefficients_to_string(coefficients)
def polynomial_interpolation(points):
"""
Input: list of points (x, y) of a lenght n.
Finds a polynomial of degree n-1 which goes exactly through these points.
Returns calculated polynomial as a function.
"""
coefficients = coefficient_interpolation(points)[::-1]
polynomial = lambda x: sum(
coef * x ** i for i, coef in enumerate(coefficients)
)
return polynomial
|
matejm/curve_fitting
|
src/polynomial_interpolation.py
|
Python
|
mit
| 3,909
|
[
"Gaussian"
] |
8bf579703e7587ed169e1a907ca28cf7a9e2104c26a37139f574807b210afa2e
|
import ctypes
import numpy
_cint = numpy.ctypeslib.load_library('libcint', os.path.abspath(os.path.join(__file__, '../../build')))
#_cint4 = ctypes.cdll.LoadLibrary('libcint.so.4')
from pyscf import gto, lib
mol = gto.M(atom='''H 0 0 0;
H .2 .5 .8;
#H 1.9 2.1 .1;
#H 2.0 .3 1.4''',
basis = {'H': gto.basis.parse('''
H S
1990.8000000 1.0000000
H S
5.0250000 0.2709520 0.2
1.0130000 0.15 0.5573680
H S
80.8000000 0.0210870 -0.0045400 0.0000000
3.3190000 0.3461290 -0.1703520 0.0000000
0.9059000 0.0393780 0.1403820 1.0000000
H P
4.1330000 0.0868660 0.0000000
1.2000000 0.0000000 0.5000000
0.3827000 0.5010080 1.0000000
H D
1.0970000 1.0000000
H D
2.1330000 0.1868660 0.0000000
0.3827000 0.2010080 1.0000000
H F
0.7610000 1.0000000
H F
1.1330000 0.3868660 1.0000000
0.8827000 0.4010080 0.0000000
H g
1.1330000 0.3868660 0.0000000
0.8827000 0.4010080 1.0000000
''')})
def make_cintopt(atm, bas, env, intor):
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = c_atm.shape[0]
nbas = c_bas.shape[0]
cintopt = lib.c_null_ptr()
foptinit = getattr(_cint, intor+'_optimizer')
foptinit(ctypes.byref(cintopt),
c_atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(natm),
c_bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(nbas),
c_env.ctypes.data_as(ctypes.c_void_p))
return cintopt
def run(intor, comp=1, suffix='_sph', thr=1e-7):
if suffix == '_spinor':
intor = intor = 'c%s'%intor
else:
intor = intor = 'c%s%s'%(intor,suffix)
print(intor)
fn1 = getattr(_cint, intor)
#fn2 = getattr(_cint4, intor)
cintopt = make_cintopt(mol._atm, mol._bas, mol._env, intor)
args = (mol._atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
mol._bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
mol._env.ctypes.data_as(ctypes.c_void_p), cintopt)
for i in range(mol.nbas):
for j in range(mol.nbas):
for k in range(mol.nbas):
for l in range(mol.nbas):
ref = mol.intor_by_shell(intor, [i,j,k,l], comp=comp)
#fn2(ref.ctypes.data_as(ctypes.c_void_p),
# (ctypes.c_int*4)(i,j,k,l),
# mol._atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
# mol._bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
# mol._env.ctypes.data_as(ctypes.c_void_p), lib.c_null_ptr())
buf = numpy.empty_like(ref)
fn1(buf.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*4)(i,j,k,l),
mol._atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
mol._bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
mol._env.ctypes.data_as(ctypes.c_void_p), lib.c_null_ptr())
if numpy.linalg.norm(ref-buf) > thr:
print(intor, '| nopt', i, j, k, l, numpy.linalg.norm(ref-buf))#, ref, buf
#exit()
fn1(buf.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*4)(i,j,k,l), *args)
if numpy.linalg.norm(ref-buf) > thr:
print(intor, '|', i, j, k, l, numpy.linalg.norm(ref-buf))
#exit()
run('int2e')
run('int2e', suffix='_cart')
run("int2e_ig1" , 3)
run("int2e_ip1" , 3)
run("int2e_p1vxp1" , 3)
run("int2e_ig1ig2" , 9)
run("int2e_spsp1" , suffix='_spinor')
run("int2e_spsp1spsp2" , suffix='_spinor', thr=1e-6)
run("int2e_srsr1" , suffix='_spinor')
run("int2e_srsr1srsr2" , suffix='_spinor')
run("int2e_cg_sa10sp1" , 3, suffix='_spinor')
run("int2e_cg_sa10sp1spsp2" , 3, suffix='_spinor')
run("int2e_giao_sa10sp1" , 3, suffix='_spinor')
run("int2e_giao_sa10sp1spsp2" , 3, suffix='_spinor')
run("int2e_g1" , 3, suffix='_spinor')
run("int2e_spgsp1" , 3, suffix='_spinor')
run("int2e_g1spsp2" , 3, suffix='_spinor')
run("int2e_spgsp1spsp2" , 3, suffix='_spinor')
#run("int2e_pp1" , suffix='_spinor')
#run("int2e_pp2" , suffix='_spinor')
#run("int2e_pp1pp2" , suffix='_spinor')
run("int2e_spv1" , suffix='_spinor')
run("int2e_vsp1" , suffix='_spinor')
run("int2e_spsp2" , suffix='_spinor')
run("int2e_spv1spv2" , suffix='_spinor', thr=1e-6)
run("int2e_vsp1spv2" , suffix='_spinor', thr=1e-6)
run("int2e_spv1vsp2" , suffix='_spinor', thr=1e-6)
run("int2e_vsp1vsp2" , suffix='_spinor', thr=1e-6)
run("int2e_spv1spsp2" , suffix='_spinor', thr=1e-6)
run("int2e_vsp1spsp2" , suffix='_spinor', thr=1e-6)
run("int2e_ip1" , 3, suffix='_spinor')
run("int2e_ipspsp1" , 3, suffix='_spinor')
run("int2e_ip1spsp2" , 3, suffix='_spinor')
run("int2e_ipspsp1spsp2" , 3, suffix='_spinor', thr=1e-5)
run("int2e_ipsrsr1" , 3, suffix='_spinor')
run("int2e_ip1srsr2" , 3, suffix='_spinor')
run("int2e_ipsrsr1srsr2" , 3, suffix='_spinor')
run("int2e_ssp1ssp2" , suffix='_spinor')
run("int2e_ssp1sps2" , suffix='_spinor')
run("int2e_sps1ssp2" , suffix='_spinor')
run("int2e_sps1sps2" , suffix='_spinor')
run("int2e_cg_ssa10ssp2" , 3, suffix='_spinor')
run("int2e_giao_ssa10ssp2" , 3, suffix='_spinor')
run("int2e_gssp1ssp2" , 3, suffix='_spinor')
run("int2e_gauge_r1_ssp1ssp2" , suffix='_spinor', thr=1e-6)
run("int2e_gauge_r1_ssp1sps2" , suffix='_spinor', thr=1e-6)
run("int2e_gauge_r1_sps1ssp2" , suffix='_spinor', thr=1e-6)
run("int2e_gauge_r1_sps1sps2" , suffix='_spinor', thr=1e-6)
run("int2e_gauge_r2_ssp1ssp2" , suffix='_spinor', thr=1e-6)
run("int2e_gauge_r2_ssp1sps2" , suffix='_spinor', thr=1e-6)
run("int2e_gauge_r2_sps1ssp2" , suffix='_spinor', thr=1e-6)
run("int2e_gauge_r2_sps1sps2" , suffix='_spinor', thr=1e-6)
run("int2e_ipip1" , 9)
run("int2e_ipvip1" , 9)
run("int2e_ip1ip2" , 9)
|
sunqm/libcint
|
testsuite/test_int2e.py
|
Python
|
bsd-2-clause
| 6,949
|
[
"PySCF"
] |
3c28f46661ab566ceda190084e0dd5a0ea30669c514d5a8b953842231114bed2
|
# Copyright: (c) 2013, James Cammarata <jcammarata@ansible.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import re
import shutil
import textwrap
import time
import yaml
from jinja2 import BaseLoader, Environment, FileSystemLoader
from yaml.error import YAMLError
import ansible.constants as C
from ansible import context
from ansible.cli import CLI
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy, get_collections_galaxy_meta_info
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.collection import (
build_collection,
CollectionRequirement,
download_collections,
find_existing_collections,
install_collections,
publish_collection,
validate_collection_name,
validate_collection_path,
verify_collections
)
from ansible.galaxy.login import GalaxyLogin
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import BasicAuthToken, GalaxyToken, KeycloakToken, NoTokenSentinel
from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.common.collections import is_iterable
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils import six
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.playbook.role.requirement import RoleRequirement
from ansible.utils.display import Display
from ansible.utils.plugin_docs import get_versioned_doclink
display = Display()
urlparse = six.moves.urllib.parse.urlparse
def _display_header(path, h1, h2, w1=10, w2=7):
display.display('\n# {0}\n{1:{cwidth}} {2:{vwidth}}\n{3} {4}\n'.format(
path,
h1,
h2,
'-' * max([len(h1), w1]), # Make sure that the number of dashes is at least the width of the header
'-' * max([len(h2), w2]),
cwidth=w1,
vwidth=w2,
))
def _display_role(gr):
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (gr.name, version))
def _display_collection(collection, cwidth=10, vwidth=7, min_cwidth=10, min_vwidth=7):
display.display('{fqcn:{cwidth}} {version:{vwidth}}'.format(
fqcn=to_text(collection),
version=collection.latest_version,
cwidth=max(cwidth, min_cwidth), # Make sure the width isn't smaller than the header
vwidth=max(vwidth, min_vwidth)
))
def _get_collection_widths(collections):
if is_iterable(collections):
fqcn_set = set(to_text(c) for c in collections)
version_set = set(to_text(c.latest_version) for c in collections)
else:
fqcn_set = set([to_text(collections)])
version_set = set([collections.latest_version])
fqcn_length = len(max(fqcn_set, key=len))
version_length = len(max(version_set, key=len))
return fqcn_length, version_length
class GalaxyCLI(CLI):
'''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
def __init__(self, args):
# Inject role into sys.argv[1] as a backwards compatibility step
if len(args) > 1 and args[1] not in ['-h', '--help', '--version'] and 'role' not in args and 'collection' not in args:
# TODO: Should we add a warning here and eventually deprecate the implicit role subcommand choice
# Remove this in Ansible 2.13 when we also remove -v as an option on the root parser for ansible-galaxy.
idx = 2 if args[1].startswith('-v') else 1
args.insert(idx, 'role')
self.api_servers = []
self.galaxy = None
super(GalaxyCLI, self).__init__(args)
def init_parser(self):
''' create an options parser for bin/ansible '''
super(GalaxyCLI, self).init_parser(
desc="Perform various Role and Collection related operations.",
)
# Common arguments that apply to more than 1 action
common = opt_help.argparse.ArgumentParser(add_help=False)
common.add_argument('-s', '--server', dest='api_server', help='The Galaxy API server URL')
common.add_argument('--token', '--api-key', dest='api_key',
help='The Ansible Galaxy API key which can be found at '
'https://galaxy.ansible.com/me/preferences. You can also use ansible-galaxy login to '
'retrieve this key or set the token for the GALAXY_SERVER_LIST entry.')
common.add_argument('-c', '--ignore-certs', action='store_true', dest='ignore_certs',
default=C.GALAXY_IGNORE_CERTS, help='Ignore SSL certificate validation errors.')
opt_help.add_verbosity_options(common)
force = opt_help.argparse.ArgumentParser(add_help=False)
force.add_argument('-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role or collection')
github = opt_help.argparse.ArgumentParser(add_help=False)
github.add_argument('github_user', help='GitHub username')
github.add_argument('github_repo', help='GitHub repository')
offline = opt_help.argparse.ArgumentParser(add_help=False)
offline.add_argument('--offline', dest='offline', default=False, action='store_true',
help="Don't query the galaxy API when creating roles")
default_roles_path = C.config.get_configuration_definition('DEFAULT_ROLES_PATH').get('default', '')
roles_path = opt_help.argparse.ArgumentParser(add_help=False)
roles_path.add_argument('-p', '--roles-path', dest='roles_path', type=opt_help.unfrack_path(pathsep=True),
default=C.DEFAULT_ROLES_PATH, action=opt_help.PrependListAction,
help='The path to the directory containing your roles. The default is the first '
'writable one configured via DEFAULT_ROLES_PATH: %s ' % default_roles_path)
collections_path = opt_help.argparse.ArgumentParser(add_help=False)
collections_path.add_argument('-p', '--collection-path', dest='collections_path', type=opt_help.unfrack_path(pathsep=True),
default=C.COLLECTIONS_PATHS, action=opt_help.PrependListAction,
help="One or more directories to search for collections in addition "
"to the default COLLECTIONS_PATHS. Separate multiple paths "
"with '{0}'.".format(os.path.pathsep))
# Add sub parser for the Galaxy role type (role or collection)
type_parser = self.parser.add_subparsers(metavar='TYPE', dest='type')
type_parser.required = True
# Add sub parser for the Galaxy collection actions
collection = type_parser.add_parser('collection', help='Manage an Ansible Galaxy collection.')
collection_parser = collection.add_subparsers(metavar='COLLECTION_ACTION', dest='action')
collection_parser.required = True
self.add_download_options(collection_parser, parents=[common])
self.add_init_options(collection_parser, parents=[common, force])
self.add_build_options(collection_parser, parents=[common, force])
self.add_publish_options(collection_parser, parents=[common])
self.add_install_options(collection_parser, parents=[common, force])
self.add_list_options(collection_parser, parents=[common, collections_path])
self.add_verify_options(collection_parser, parents=[common, collections_path])
# Add sub parser for the Galaxy role actions
role = type_parser.add_parser('role', help='Manage an Ansible Galaxy role.')
role_parser = role.add_subparsers(metavar='ROLE_ACTION', dest='action')
role_parser.required = True
self.add_init_options(role_parser, parents=[common, force, offline])
self.add_remove_options(role_parser, parents=[common, roles_path])
self.add_delete_options(role_parser, parents=[common, github])
self.add_list_options(role_parser, parents=[common, roles_path])
self.add_search_options(role_parser, parents=[common])
self.add_import_options(role_parser, parents=[common, github])
self.add_setup_options(role_parser, parents=[common, roles_path])
self.add_login_options(role_parser, parents=[common])
self.add_info_options(role_parser, parents=[common, roles_path, offline])
self.add_install_options(role_parser, parents=[common, force, roles_path])
def add_download_options(self, parser, parents=None):
download_parser = parser.add_parser('download', parents=parents,
help='Download collections and their dependencies as a tarball for an '
'offline install.')
download_parser.set_defaults(func=self.execute_download)
download_parser.add_argument('args', help='Collection(s)', metavar='collection', nargs='*')
download_parser.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download collection(s) listed as dependencies.")
download_parser.add_argument('-p', '--download-path', dest='download_path',
default='./collections',
help='The directory to download the collections to.')
download_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be downloaded.')
download_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
def add_init_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
init_parser = parser.add_parser('init', parents=parents,
help='Initialize new {0} with the base structure of a '
'{0}.'.format(galaxy_type))
init_parser.set_defaults(func=self.execute_init)
init_parser.add_argument('--init-path', dest='init_path', default='./',
help='The path in which the skeleton {0} will be created. The default is the '
'current working directory.'.format(galaxy_type))
init_parser.add_argument('--{0}-skeleton'.format(galaxy_type), dest='{0}_skeleton'.format(galaxy_type),
default=C.GALAXY_ROLE_SKELETON,
help='The path to a {0} skeleton that the new {0} should be based '
'upon.'.format(galaxy_type))
obj_name_kwargs = {}
if galaxy_type == 'collection':
obj_name_kwargs['type'] = validate_collection_name
init_parser.add_argument('{0}_name'.format(galaxy_type), help='{0} name'.format(galaxy_type.capitalize()),
**obj_name_kwargs)
if galaxy_type == 'role':
init_parser.add_argument('--type', dest='role_type', action='store', default='default',
help="Initialize using an alternate role type. Valid types include: 'container', "
"'apb' and 'network'.")
def add_remove_options(self, parser, parents=None):
remove_parser = parser.add_parser('remove', parents=parents, help='Delete roles from roles_path.')
remove_parser.set_defaults(func=self.execute_remove)
remove_parser.add_argument('args', help='Role(s)', metavar='role', nargs='+')
def add_delete_options(self, parser, parents=None):
delete_parser = parser.add_parser('delete', parents=parents,
help='Removes the role from Galaxy. It does not remove or alter the actual '
'GitHub repository.')
delete_parser.set_defaults(func=self.execute_delete)
def add_list_options(self, parser, parents=None):
galaxy_type = 'role'
if parser.metavar == 'COLLECTION_ACTION':
galaxy_type = 'collection'
list_parser = parser.add_parser('list', parents=parents,
help='Show the name and version of each {0} installed in the {0}s_path.'.format(galaxy_type))
list_parser.set_defaults(func=self.execute_list)
list_parser.add_argument(galaxy_type, help=galaxy_type.capitalize(), nargs='?', metavar=galaxy_type)
def add_search_options(self, parser, parents=None):
search_parser = parser.add_parser('search', parents=parents,
help='Search the Galaxy database by tags, platforms, author and multiple '
'keywords.')
search_parser.set_defaults(func=self.execute_search)
search_parser.add_argument('--platforms', dest='platforms', help='list of OS platforms to filter by')
search_parser.add_argument('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
search_parser.add_argument('--author', dest='author', help='GitHub username')
search_parser.add_argument('args', help='Search terms', metavar='searchterm', nargs='*')
def add_import_options(self, parser, parents=None):
import_parser = parser.add_parser('import', parents=parents, help='Import a role')
import_parser.set_defaults(func=self.execute_import)
import_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import results.")
import_parser.add_argument('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch '
'(usually master)')
import_parser.add_argument('--role-name', dest='role_name',
help='The name the role should have, if different than the repo name')
import_parser.add_argument('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_'
'user/github_repo.')
def add_setup_options(self, parser, parents=None):
setup_parser = parser.add_parser('setup', parents=parents,
help='Manage the integration between Galaxy and the given source.')
setup_parser.set_defaults(func=self.execute_setup)
setup_parser.add_argument('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see '
'ID values.')
setup_parser.add_argument('--list', dest="setup_list", action='store_true', default=False,
help='List all of your integrations.')
setup_parser.add_argument('source', help='Source')
setup_parser.add_argument('github_user', help='GitHub username')
setup_parser.add_argument('github_repo', help='GitHub repository')
setup_parser.add_argument('secret', help='Secret')
def add_login_options(self, parser, parents=None):
login_parser = parser.add_parser('login', parents=parents,
help="Login to api.github.com server in order to use ansible-galaxy role sub "
"command such as 'import', 'delete', 'publish', and 'setup'")
login_parser.set_defaults(func=self.execute_login)
login_parser.add_argument('--github-token', dest='token', default=None,
help='Identify with github token rather than username and password.')
def add_info_options(self, parser, parents=None):
info_parser = parser.add_parser('info', parents=parents, help='View more details about a specific role.')
info_parser.set_defaults(func=self.execute_info)
info_parser.add_argument('args', nargs='+', help='role', metavar='role_name[,version]')
def add_verify_options(self, parser, parents=None):
galaxy_type = 'collection'
verify_parser = parser.add_parser('verify', parents=parents, help='Compare checksums with the collection(s) '
'found on the server and the installed copy. This does not verify dependencies.')
verify_parser.set_defaults(func=self.execute_verify)
verify_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', help='The collection(s) name or '
'path/url to a tar.gz collection artifact. This is mutually exclusive with --requirements-file.')
verify_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors during verification and continue with the next specified collection.')
verify_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be verified.')
def add_install_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
args_kwargs = {}
if galaxy_type == 'collection':
args_kwargs['help'] = 'The collection(s) name or path/url to a tar.gz collection artifact. This is ' \
'mutually exclusive with --requirements-file.'
ignore_errors_help = 'Ignore errors during installation and continue with the next specified ' \
'collection. This will not ignore dependency conflict errors.'
else:
args_kwargs['help'] = 'Role name, URL or tar file'
ignore_errors_help = 'Ignore errors and continue with the next specified role.'
install_parser = parser.add_parser('install', parents=parents,
help='Install {0}(s) from file(s), URL(s) or Ansible '
'Galaxy'.format(galaxy_type))
install_parser.set_defaults(func=self.execute_install)
install_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', **args_kwargs)
install_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help=ignore_errors_help)
install_exclusive = install_parser.add_mutually_exclusive_group()
install_exclusive.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download {0}s listed as dependencies.".format(galaxy_type))
install_exclusive.add_argument('--force-with-deps', dest='force_with_deps', action='store_true', default=False,
help="Force overwriting an existing {0} and its "
"dependencies.".format(galaxy_type))
if galaxy_type == 'collection':
install_parser.add_argument('-p', '--collections-path', dest='collections_path',
default=C.COLLECTIONS_PATHS[0],
help='The path to the directory containing your collections.')
install_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be installed.')
install_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
else:
install_parser.add_argument('-r', '--role-file', dest='role_file',
help='A file containing a list of roles to be imported.')
install_parser.add_argument('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
default=False,
help='Use tar instead of the scm archive option when packaging the role.')
def add_build_options(self, parser, parents=None):
build_parser = parser.add_parser('build', parents=parents,
help='Build an Ansible collection artifact that can be publish to Ansible '
'Galaxy.')
build_parser.set_defaults(func=self.execute_build)
build_parser.add_argument('args', metavar='collection', nargs='*', default=('.',),
help='Path to the collection(s) directory to build. This should be the directory '
'that contains the galaxy.yml file. The default is the current working '
'directory.')
build_parser.add_argument('--output-path', dest='output_path', default='./',
help='The path in which the collection is built to. The default is the current '
'working directory.')
def add_publish_options(self, parser, parents=None):
publish_parser = parser.add_parser('publish', parents=parents,
help='Publish a collection artifact to Ansible Galaxy.')
publish_parser.set_defaults(func=self.execute_publish)
publish_parser.add_argument('args', metavar='collection_path',
help='The path to the collection tarball to publish.')
publish_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import validation results.")
publish_parser.add_argument('--import-timeout', dest='import_timeout', type=int, default=0,
help="The time to wait for the collection import process to finish.")
def post_process_args(self, options):
options = super(GalaxyCLI, self).post_process_args(options)
display.verbosity = options.verbosity
return options
def run(self):
super(GalaxyCLI, self).run()
self.galaxy = Galaxy()
def server_config_def(section, key, required):
return {
'description': 'The %s of the %s Galaxy server' % (key, section),
'ini': [
{
'section': 'galaxy_server.%s' % section,
'key': key,
}
],
'env': [
{'name': 'ANSIBLE_GALAXY_SERVER_%s_%s' % (section.upper(), key.upper())},
],
'required': required,
}
server_def = [('url', True), ('username', False), ('password', False), ('token', False),
('auth_url', False)]
config_servers = []
# Need to filter out empty strings or non truthy values as an empty server list env var is equal to [''].
server_list = [s for s in C.GALAXY_SERVER_LIST or [] if s]
for server_key in server_list:
# Config definitions are looked up dynamically based on the C.GALAXY_SERVER_LIST entry. We look up the
# section [galaxy_server.<server>] for the values url, username, password, and token.
config_dict = dict((k, server_config_def(server_key, k, req)) for k, req in server_def)
defs = AnsibleLoader(yaml.safe_dump(config_dict)).get_single_data()
C.config.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs)
server_options = C.config.get_plugin_options('galaxy_server', server_key)
# auth_url is used to create the token, but not directly by GalaxyAPI, so
# it doesn't need to be passed as kwarg to GalaxyApi
auth_url = server_options.pop('auth_url', None)
token_val = server_options['token'] or NoTokenSentinel
username = server_options['username']
# default case if no auth info is provided.
server_options['token'] = None
if username:
server_options['token'] = BasicAuthToken(username,
server_options['password'])
else:
if token_val:
if auth_url:
server_options['token'] = KeycloakToken(access_token=token_val,
auth_url=auth_url,
validate_certs=not context.CLIARGS['ignore_certs'])
else:
# The galaxy v1 / github / django / 'Token'
server_options['token'] = GalaxyToken(token=token_val)
config_servers.append(GalaxyAPI(self.galaxy, server_key, **server_options))
cmd_server = context.CLIARGS['api_server']
cmd_token = GalaxyToken(token=context.CLIARGS['api_key'])
if cmd_server:
# Cmd args take precedence over the config entry but fist check if the arg was a name and use that config
# entry, otherwise create a new API entry for the server specified.
config_server = next((s for s in config_servers if s.name == cmd_server), None)
if config_server:
self.api_servers.append(config_server)
else:
self.api_servers.append(GalaxyAPI(self.galaxy, 'cmd_arg', cmd_server, token=cmd_token))
else:
self.api_servers = config_servers
# Default to C.GALAXY_SERVER if no servers were defined
if len(self.api_servers) == 0:
self.api_servers.append(GalaxyAPI(self.galaxy, 'default', C.GALAXY_SERVER, token=cmd_token))
context.CLIARGS['func']()
@property
def api(self):
return self.api_servers[0]
def _parse_requirements_file(self, requirements_file, allow_old_format=True):
"""
Parses an Ansible requirement.yml file and returns all the roles and/or collections defined in it. There are 2
requirements file format:
# v1 (roles only)
- src: The source of the role, required if include is not set. Can be Galaxy role name, URL to a SCM repo or tarball.
name: Downloads the role to the specified name, defaults to Galaxy name from Galaxy or name of repo if src is a URL.
scm: If src is a URL, specify the SCM. Only git or hd are supported and defaults ot git.
version: The version of the role to download. Can also be tag, commit, or branch name and defaults to master.
include: Path to additional requirements.yml files.
# v2 (roles and collections)
---
roles:
# Same as v1 format just under the roles key
collections:
- namespace.collection
- name: namespace.collection
version: version identifier, multiple identifiers are separated by ','
source: the URL or a predefined source name that relates to C.GALAXY_SERVER_LIST
:param requirements_file: The path to the requirements file.
:param allow_old_format: Will fail if a v1 requirements file is found and this is set to False.
:return: a dict containing roles and collections to found in the requirements file.
"""
requirements = {
'roles': [],
'collections': [],
}
b_requirements_file = to_bytes(requirements_file, errors='surrogate_or_strict')
if not os.path.exists(b_requirements_file):
raise AnsibleError("The requirements file '%s' does not exist." % to_native(requirements_file))
display.vvv("Reading requirement file at '%s'" % requirements_file)
with open(b_requirements_file, 'rb') as req_obj:
try:
file_requirements = yaml.safe_load(req_obj)
except YAMLError as err:
raise AnsibleError(
"Failed to parse the requirements yml at '%s' with the following error:\n%s"
% (to_native(requirements_file), to_native(err)))
if file_requirements is None:
raise AnsibleError("No requirements found in file '%s'" % to_native(requirements_file))
def parse_role_req(requirement):
if "include" not in requirement:
role = RoleRequirement.role_yaml_parse(requirement)
display.vvv("found role %s in yaml file" % to_text(role))
if "name" not in role and "src" not in role:
raise AnsibleError("Must specify name or src for role")
return [GalaxyRole(self.galaxy, self.api, **role)]
else:
b_include_path = to_bytes(requirement["include"], errors="surrogate_or_strict")
if not os.path.isfile(b_include_path):
raise AnsibleError("Failed to find include requirements file '%s' in '%s'"
% (to_native(b_include_path), to_native(requirements_file)))
with open(b_include_path, 'rb') as f_include:
try:
return [GalaxyRole(self.galaxy, self.api, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include))]
except Exception as e:
raise AnsibleError("Unable to load data from include requirements file: %s %s"
% (to_native(requirements_file), to_native(e)))
if isinstance(file_requirements, list):
# Older format that contains only roles
if not allow_old_format:
raise AnsibleError("Expecting requirements file to be a dict with the key 'collections' that contains "
"a list of collections to install")
for role_req in file_requirements:
requirements['roles'] += parse_role_req(role_req)
else:
# Newer format with a collections and/or roles key
extra_keys = set(file_requirements.keys()).difference(set(['roles', 'collections']))
if extra_keys:
raise AnsibleError("Expecting only 'roles' and/or 'collections' as base keys in the requirements "
"file. Found: %s" % (to_native(", ".join(extra_keys))))
for role_req in file_requirements.get('roles', []):
requirements['roles'] += parse_role_req(role_req)
for collection_req in file_requirements.get('collections', []):
if isinstance(collection_req, dict):
req_name = collection_req.get('name', None)
if req_name is None:
raise AnsibleError("Collections requirement entry should contain the key name.")
req_version = collection_req.get('version', '*')
req_source = collection_req.get('source', None)
if req_source:
# Try and match up the requirement source with our list of Galaxy API servers defined in the
# config, otherwise create a server with that URL without any auth.
req_source = next(iter([a for a in self.api_servers if req_source in [a.name, a.api_server]]),
GalaxyAPI(self.galaxy, "explicit_requirement_%s" % req_name, req_source))
requirements['collections'].append((req_name, req_version, req_source))
else:
requirements['collections'].append((collection_req, '*', None))
return requirements
@staticmethod
def exit_without_ignore(rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not context.CLIARGS['ignore_errors']:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
@staticmethod
def _display_role_info(role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
text.append(u"\tdescription: %s" % role_info.get('description', ''))
for k in sorted(role_info.keys()):
if k in GalaxyCLI.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in GalaxyCLI.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
return u'\n'.join(text)
@staticmethod
def _resolve_path(path):
return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
@staticmethod
def _get_skeleton_galaxy_yml(template_path, inject_data):
with open(to_bytes(template_path, errors='surrogate_or_strict'), 'rb') as template_obj:
meta_template = to_text(template_obj.read(), errors='surrogate_or_strict')
galaxy_meta = get_collections_galaxy_meta_info()
required_config = []
optional_config = []
for meta_entry in galaxy_meta:
config_list = required_config if meta_entry.get('required', False) else optional_config
value = inject_data.get(meta_entry['key'], None)
if not value:
meta_type = meta_entry.get('type', 'str')
if meta_type == 'str':
value = ''
elif meta_type == 'list':
value = []
elif meta_type == 'dict':
value = {}
meta_entry['value'] = value
config_list.append(meta_entry)
link_pattern = re.compile(r"L\(([^)]+),\s+([^)]+)\)")
const_pattern = re.compile(r"C\(([^)]+)\)")
def comment_ify(v):
if isinstance(v, list):
v = ". ".join([l.rstrip('.') for l in v])
v = link_pattern.sub(r"\1 <\2>", v)
v = const_pattern.sub(r"'\1'", v)
return textwrap.fill(v, width=117, initial_indent="# ", subsequent_indent="# ", break_on_hyphens=False)
def to_yaml(v):
return yaml.safe_dump(v, default_flow_style=False).rstrip()
env = Environment(loader=BaseLoader)
env.filters['comment_ify'] = comment_ify
env.filters['to_yaml'] = to_yaml
template = env.from_string(meta_template)
meta_value = template.render({'required_config': required_config, 'optional_config': optional_config})
return meta_value
def _require_one_of_collections_requirements(self, collections, requirements_file):
if collections and requirements_file:
raise AnsibleError("The positional collection_name arg and --requirements-file are mutually exclusive.")
elif not collections and not requirements_file:
raise AnsibleError("You must specify a collection name or a requirements file.")
elif requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._parse_requirements_file(requirements_file, allow_old_format=False)['collections']
else:
requirements = []
for collection_input in collections:
requirement = None
if os.path.isfile(to_bytes(collection_input, errors='surrogate_or_strict')) or \
urlparse(collection_input).scheme.lower() in ['http', 'https']:
# Arg is a file path or URL to a collection
name = collection_input
else:
name, dummy, requirement = collection_input.partition(':')
requirements.append((name, requirement or '*', None))
return requirements
############################
# execute actions
############################
def execute_role(self):
"""
Perform the action on an Ansible Galaxy role. Must be combined with a further action like delete/install/init
as listed below.
"""
# To satisfy doc build
pass
def execute_collection(self):
"""
Perform the action on an Ansible Galaxy collection. Must be combined with a further action like init/install as
listed below.
"""
# To satisfy doc build
pass
def execute_build(self):
"""
Build an Ansible Galaxy collection artifact that can be stored in a central repository like Ansible Galaxy.
By default, this command builds from the current working directory. You can optionally pass in the
collection input path (where the ``galaxy.yml`` file is).
"""
force = context.CLIARGS['force']
output_path = GalaxyCLI._resolve_path(context.CLIARGS['output_path'])
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
elif os.path.isfile(b_output_path):
raise AnsibleError("- the output collection directory %s is a file - aborting" % to_native(output_path))
for collection_path in context.CLIARGS['args']:
collection_path = GalaxyCLI._resolve_path(collection_path)
build_collection(collection_path, output_path, force)
def execute_download(self):
collections = context.CLIARGS['args']
no_deps = context.CLIARGS['no_deps']
download_path = context.CLIARGS['download_path']
ignore_certs = context.CLIARGS['ignore_certs']
requirements_file = context.CLIARGS['requirements']
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._require_one_of_collections_requirements(collections, requirements_file)
download_path = GalaxyCLI._resolve_path(download_path)
b_download_path = to_bytes(download_path, errors='surrogate_or_strict')
if not os.path.exists(b_download_path):
os.makedirs(b_download_path)
download_collections(requirements, download_path, self.api_servers, (not ignore_certs), no_deps,
context.CLIARGS['allow_pre_release'])
return 0
def execute_init(self):
"""
Creates the skeleton framework of a role or collection that complies with the Galaxy metadata format.
Requires a role or collection name. The collection name must be in the format ``<namespace>.<collection>``.
"""
galaxy_type = context.CLIARGS['type']
init_path = context.CLIARGS['init_path']
force = context.CLIARGS['force']
obj_skeleton = context.CLIARGS['{0}_skeleton'.format(galaxy_type)]
obj_name = context.CLIARGS['{0}_name'.format(galaxy_type)]
inject_data = dict(
description='your {0} description'.format(galaxy_type),
ansible_plugin_list_dir=get_versioned_doclink('plugins/plugins.html'),
)
if galaxy_type == 'role':
inject_data.update(dict(
author='your name',
company='your company (optional)',
license='license (GPL-2.0-or-later, MIT, etc)',
role_name=obj_name,
role_type=context.CLIARGS['role_type'],
issue_tracker_url='http://example.com/issue/tracker',
repository_url='http://example.com/repository',
documentation_url='http://docs.example.com',
homepage_url='http://example.com',
min_ansible_version=ansible_version[:3], # x.y
))
obj_path = os.path.join(init_path, obj_name)
elif galaxy_type == 'collection':
namespace, collection_name = obj_name.split('.', 1)
inject_data.update(dict(
namespace=namespace,
collection_name=collection_name,
version='1.0.0',
readme='README.md',
authors=['your name <example@domain.com>'],
license=['GPL-2.0-or-later'],
repository='http://example.com/repository',
documentation='http://docs.example.com',
homepage='http://example.com',
issues='http://example.com/issue/tracker',
build_ignore=[],
))
obj_path = os.path.join(init_path, namespace, collection_name)
b_obj_path = to_bytes(obj_path, errors='surrogate_or_strict')
if os.path.exists(b_obj_path):
if os.path.isfile(obj_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % to_native(obj_path))
elif not force:
raise AnsibleError("- the directory %s already exists. "
"You can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % to_native(obj_path))
if obj_skeleton is not None:
own_skeleton = False
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
else:
own_skeleton = True
obj_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
obj_skeleton = os.path.expanduser(obj_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
if not os.path.exists(obj_skeleton):
raise AnsibleError("- the skeleton path '{0}' does not exist, cannot init {1}".format(
to_native(obj_skeleton), galaxy_type)
)
template_env = Environment(loader=FileSystemLoader(obj_skeleton))
# create role directory
if not os.path.exists(b_obj_path):
os.makedirs(b_obj_path)
for root, dirs, files in os.walk(obj_skeleton, topdown=True):
rel_root = os.path.relpath(root, obj_skeleton)
rel_dirs = rel_root.split(os.sep)
rel_root_dir = rel_dirs[0]
if galaxy_type == 'collection':
# A collection can contain templates in playbooks/*/templates and roles/*/templates
in_templates_dir = rel_root_dir in ['playbooks', 'roles'] and 'templates' in rel_dirs
else:
in_templates_dir = rel_root_dir == 'templates'
dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
if galaxy_type == 'collection' and own_skeleton and rel_root == '.' and f == 'galaxy.yml.j2':
# Special use case for galaxy.yml.j2 in our own default collection skeleton. We build the options
# dynamically which requires special options to be set.
# The templated data's keys must match the key name but the inject data contains collection_name
# instead of name. We just make a copy and change the key back to name for this file.
template_data = inject_data.copy()
template_data['name'] = template_data.pop('collection_name')
meta_value = GalaxyCLI._get_skeleton_galaxy_yml(os.path.join(root, rel_root, f), template_data)
b_dest_file = to_bytes(os.path.join(obj_path, rel_root, filename), errors='surrogate_or_strict')
with open(b_dest_file, 'wb') as galaxy_obj:
galaxy_obj.write(to_bytes(meta_value, errors='surrogate_or_strict'))
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(rel_root, f)
dest_file = os.path.join(obj_path, rel_root, filename)
template_env.get_template(src_template).stream(inject_data).dump(dest_file, encoding='utf-8')
else:
f_rel_path = os.path.relpath(os.path.join(root, f), obj_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(obj_path, f_rel_path))
for d in dirs:
b_dir_path = to_bytes(os.path.join(obj_path, rel_root, d), errors='surrogate_or_strict')
if not os.path.exists(b_dir_path):
os.makedirs(b_dir_path)
display.display("- %s %s was created successfully" % (galaxy_type.title(), obj_name))
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
roles_path = context.CLIARGS['roles_path']
data = ''
for role in context.CLIARGS['args']:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, self.api, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['installed_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if not context.CLIARGS['offline']:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data = self._display_role_info(role_info)
# FIXME: This is broken in both 1.9 and 2.0 as
# _display_role_info() always returns something
if not data:
data = u"\n- the role %s was not found" % role
self.pager(data)
def execute_verify(self):
collections = context.CLIARGS['args']
search_paths = context.CLIARGS['collections_path']
ignore_certs = context.CLIARGS['ignore_certs']
ignore_errors = context.CLIARGS['ignore_errors']
requirements_file = context.CLIARGS['requirements']
requirements = self._require_one_of_collections_requirements(collections, requirements_file)
resolved_paths = [validate_collection_path(GalaxyCLI._resolve_path(path)) for path in search_paths]
verify_collections(requirements, resolved_paths, self.api_servers, (not ignore_certs), ignore_errors,
allow_pre_release=True)
return 0
def execute_install(self):
"""
Install one or more roles(``ansible-galaxy role install``), or one or more collections(``ansible-galaxy collection install``).
You can pass in a list (roles or collections) or use the file
option listed below (these are mutually exclusive). If you pass in a list, it
can be a name (which will be downloaded via the galaxy API and github), or it can be a local tar archive file.
"""
if context.CLIARGS['type'] == 'collection':
collections = context.CLIARGS['args']
force = context.CLIARGS['force']
output_path = context.CLIARGS['collections_path']
ignore_certs = context.CLIARGS['ignore_certs']
ignore_errors = context.CLIARGS['ignore_errors']
requirements_file = context.CLIARGS['requirements']
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
if collections and requirements_file:
raise AnsibleError("The positional collection_name arg and --requirements-file are mutually exclusive.")
elif not collections and not requirements_file:
raise AnsibleError("You must specify a collection name or a requirements file.")
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._require_one_of_collections_requirements(collections, requirements_file)
output_path = GalaxyCLI._resolve_path(output_path)
collections_path = C.COLLECTIONS_PATHS
if len([p for p in collections_path if p.startswith(output_path)]) == 0:
display.warning("The specified collections path '%s' is not part of the configured Ansible "
"collections paths '%s'. The installed collection won't be picked up in an Ansible "
"run." % (to_text(output_path), to_text(":".join(collections_path))))
output_path = validate_collection_path(output_path)
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
install_collections(requirements, output_path, self.api_servers, (not ignore_certs), ignore_errors,
no_deps, force, force_deps, context.CLIARGS['allow_pre_release'])
return 0
role_file = context.CLIARGS['role_file']
if not context.CLIARGS['args'] and role_file is None:
# the user needs to specify one of either --role-file or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
force = context.CLIARGS['force'] or force_deps
roles_left = []
if role_file:
if not (role_file.endswith('.yaml') or role_file.endswith('.yml')):
raise AnsibleError("Invalid role requirements file, it must end with a .yml or .yaml extension")
roles_left = self._parse_requirements_file(role_file)['roles']
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in context.CLIARGS['args']:
role = RoleRequirement.role_yaml_parse(rname.strip())
roles_left.append(GalaxyRole(self.galaxy, self.api, **role))
for role in roles_left:
# only process roles in roles files when names matches if given
if role_file and context.CLIARGS['args'] and role.name not in context.CLIARGS['args']:
display.vvv('Skipping role %s' % role.name)
continue
display.vvv('Processing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version or force:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
if not role.metadata:
display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
else:
role_dependencies = role.metadata.get('dependencies') or []
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, self.api, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in roles_left:
display.display('- adding dependency: %s' % to_text(dep_role))
roles_left.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
if force_deps:
display.display('- changing dependant role %s from %s to %s' %
(dep_role.name, dep_role.install_info['version'], dep_role.version or "unspecified"))
dep_role.remove()
roles_left.append(dep_role)
else:
display.warning('- dependency %s (%s) from role %s differs from already installed version (%s), skipping' %
(to_text(dep_role), dep_role.version, role.name, dep_role.install_info['version']))
else:
if force_deps:
roles_left.append(dep_role)
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if not context.CLIARGS['args']:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in context.CLIARGS['args']:
role = GalaxyRole(self.galaxy, self.api, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e)))
return 0
def execute_list(self):
"""
List installed collections or roles
"""
if context.CLIARGS['type'] == 'role':
self.execute_list_role()
elif context.CLIARGS['type'] == 'collection':
self.execute_list_collection()
def execute_list_role(self):
"""
List all roles installed on the local system or a specific role
"""
path_found = False
role_found = False
warnings = []
roles_search_paths = context.CLIARGS['roles_path']
role_name = context.CLIARGS['role']
for path in roles_search_paths:
role_path = GalaxyCLI._resolve_path(path)
if os.path.isdir(path):
path_found = True
else:
warnings.append("- the configured path {0} does not exist.".format(path))
continue
if role_name:
# show the requested role, if it exists
gr = GalaxyRole(self.galaxy, self.api, role_name, path=os.path.join(role_path, role_name))
if os.path.isdir(gr.path):
role_found = True
display.display('# %s' % os.path.dirname(gr.path))
_display_role(gr)
break
warnings.append("- the role %s was not found" % role_name)
else:
if not os.path.exists(role_path):
warnings.append("- the configured path %s does not exist." % role_path)
continue
if not os.path.isdir(role_path):
warnings.append("- the configured path %s, exists, but it is not a directory." % role_path)
continue
display.display('# %s' % role_path)
path_files = os.listdir(role_path)
for path_file in path_files:
gr = GalaxyRole(self.galaxy, self.api, path_file, path=path)
if gr.metadata:
_display_role(gr)
# Do not warn if the role was found in any of the search paths
if role_found and role_name:
warnings = []
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type']))
return 0
def execute_list_collection(self):
"""
List all collections installed on the local system
"""
collections_search_paths = set(context.CLIARGS['collections_path'])
collection_name = context.CLIARGS['collection']
default_collections_path = C.config.get_configuration_definition('COLLECTIONS_PATHS').get('default')
warnings = []
path_found = False
collection_found = False
for path in collections_search_paths:
collection_path = GalaxyCLI._resolve_path(path)
if not os.path.exists(path):
if path in default_collections_path:
# don't warn for missing default paths
continue
warnings.append("- the configured path {0} does not exist.".format(collection_path))
continue
if not os.path.isdir(collection_path):
warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path))
continue
path_found = True
if collection_name:
# list a specific collection
validate_collection_name(collection_name)
namespace, collection = collection_name.split('.')
collection_path = validate_collection_path(collection_path)
b_collection_path = to_bytes(os.path.join(collection_path, namespace, collection), errors='surrogate_or_strict')
if not os.path.exists(b_collection_path):
warnings.append("- unable to find {0} in collection paths".format(collection_name))
continue
if not os.path.isdir(collection_path):
warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path))
continue
collection_found = True
collection = CollectionRequirement.from_path(b_collection_path, False)
fqcn_width, version_width = _get_collection_widths(collection)
_display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
_display_collection(collection, fqcn_width, version_width)
else:
# list all collections
collection_path = validate_collection_path(path)
if os.path.isdir(collection_path):
display.vvv("Searching {0} for collections".format(collection_path))
collections = find_existing_collections(collection_path)
else:
# There was no 'ansible_collections/' directory in the path, so there
# or no collections here.
display.vvv("No 'ansible_collections' directory found at {0}".format(collection_path))
continue
if not collections:
display.vvv("No collections found at {0}".format(collection_path))
continue
# Display header
fqcn_width, version_width = _get_collection_widths(collections)
_display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
# Sort collections by the namespace and name
collections.sort(key=to_text)
for collection in collections:
_display_collection(collection, fqcn_width, version_width)
# Do not warn if the specific collection was found in any of the search paths
if collection_found and collection_name:
warnings = []
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type']))
return 0
def execute_publish(self):
"""
Publish a collection into Ansible Galaxy. Requires the path to the collection tarball to publish.
"""
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['args'])
wait = context.CLIARGS['wait']
timeout = context.CLIARGS['import_timeout']
publish_collection(collection_path, self.api, wait, timeout)
def execute_search(self):
''' searches for roles on the Ansible Galaxy server'''
page_size = 1000
search = None
if context.CLIARGS['args']:
search = '+'.join(context.CLIARGS['args'])
if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'],
tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_login(self):
"""
verify user's identify via Github and retrieve an auth token from Ansible Galaxy.
"""
# Authenticate with github and retrieve a token
if context.CLIARGS['token'] is None:
if C.GALAXY_TOKEN:
github_token = C.GALAXY_TOKEN
else:
login = GalaxyLogin(self.galaxy)
github_token = login.create_github_token()
else:
github_token = context.CLIARGS['token']
galaxy_response = self.api.authenticate(github_token)
if context.CLIARGS['token'] is None and C.GALAXY_TOKEN is None:
# Remove the token we created
login.remove_github_token()
# Store the Galaxy token
token = GalaxyToken()
token.set(galaxy_response['token'])
display.display("Successfully logged into Galaxy as %s" % galaxy_response['username'])
return 0
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
github_user = to_text(context.CLIARGS['github_user'], errors='surrogate_or_strict')
github_repo = to_text(context.CLIARGS['github_repo'], errors='surrogate_or_strict')
if context.CLIARGS['check_status']:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo,
reference=context.CLIARGS['reference'],
role_name=context.CLIARGS['role_name'])
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not context.CLIARGS['wait']:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if context.CLIARGS['check_status'] or context.CLIARGS['wait']:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
if context.CLIARGS['setup_list']:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if context.CLIARGS['remove_id']:
# Remove a secret
self.api.remove_secret(context.CLIARGS['remove_id'])
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
source = context.CLIARGS['source']
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
secret = context.CLIARGS['secret']
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return True
|
apollo13/ansible
|
lib/ansible/cli/galaxy.py
|
Python
|
gpl-3.0
| 71,608
|
[
"Galaxy"
] |
1d4b5a4d0c5f71c94be4b73a2fe3d3c94f121dbc8dced3a1e4699df925ea5616
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def AddBending(system, kb):
# currently only works for ONE SINGLE soft object
# angles
import espressomd.interactions
with open("tables/softAngles", "r") as fp:
numAngles = int(fp.readline())
print(f"Found {numAngles}")
# actual add
for _ in range(0, numAngles):
line = str.split(fp.readline())
id1 = int(line[0])
id2 = int(line[1])
id3 = int(line[2])
id4 = int(line[3])
tribend = espressomd.interactions.IBM_Tribend(
ind1=id1, ind2=id2, ind3=id3, ind4=id4, kb=kb, refShape="Initial")
system.bonded_inter.add(tribend)
system.part.by_id(id1).add_bond((tribend, id2, id3, id4))
|
pkreissl/espresso
|
samples/immersed_boundary/addBending.py
|
Python
|
gpl-3.0
| 1,450
|
[
"ESPResSo"
] |
c0dc770efa7b939d8f677efa16e4c7db10768a27a6b3c998838d2cb03800edaa
|
#!/usr/bin/env python
#/************************************************************
#*
#* Licensed to the Apache Software Foundation (ASF) under one
#* or more contributor license agreements. See the NOTICE file
#* distributed with this work for additional information
#* regarding copyright ownership. The ASF licenses this file
#* to you under the Apache License, Version 2.0 (the
#* "License"); you may not use this file except in compliance
#* with the License. You may obtain a copy of the License at
#*
#* http://www.apache.org/licenses/LICENSE-2.0
#*
#* Unless required by applicable law or agreed to in writing,
#* software distributed under the License is distributed on an
#* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#* KIND, either express or implied. See the License for the
#* specific language governing permissions and limitations
#* under the License.
#*
#*************************************************************/
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__),'..'))
from singa.model import *
from examples.datasets import mnist
rbmid = 4
pvalues = {'batchsize' : 100, 'shape' : 784, 'std_value' : 255}
X_train, X_test, workspace = mnist.load_data(
workspace = 'examples/rbm/rbm'+str(rbmid),
nb_rbm = rbmid,
checkpoint_steps = 6000,
**pvalues)
m = Energy('rbm'+str(rbmid), sys.argv)
out_dim = [1000, 500, 250, 30]
m.add(RBM(out_dim, sampling='gaussian', w_std=0.1, b_wd=0))
sgd = SGD(lr=0.001, decay=0.0002, momentum=0.8)
topo = Cluster(workspace)
m.compile(optimizer=sgd, cluster=topo)
m.fit(X_train, alg='cd', nb_epoch=6000)
#result = m.evaluate(X_test, test_steps=100, test_freq=500)
|
cac2003/incubator-singa
|
tool/python/examples/mnist_rbm4.py
|
Python
|
apache-2.0
| 1,708
|
[
"Gaussian"
] |
ecd3783cbf72b1b6ece0bc7fa69261ffe8dc2dd2aff70469a61a59495ff7ea17
|
# spud - keep track of photos
# Copyright (C) 2008-2013 Brian May
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, print_function, unicode_literals
import base64
import mimetypes
import os
import shutil
import pytz
from django.conf import settings
from django.contrib.auth.models import Group, User
from django.db import transaction
from django.db.models import Max
from rest_framework import exceptions
from rest_framework import fields as f
from rest_framework import serializers
from rest_framework.utils import html
from . import media, models
class BinaryField(serializers.Field):
def to_internal_value(self, data):
return base64.decodebytes(data.encode('ASCII'))
def to_representation(self, value):
return base64.encodebytes(value)
class CharField(f.CharField):
default_empty_html = None
class ListSerializer(serializers.ListSerializer):
def set_request(self, request):
field = self.child
if isinstance(field, ModelSerializer):
field.set_request(request)
elif isinstance(field, ListSerializer):
field.set_request(request)
class ModelSerializer(serializers.ModelSerializer):
def set_request(self, request):
for key, field in self.fields.items():
if isinstance(field, ModelSerializer):
field.set_request(request)
elif isinstance(field, ListSerializer):
field.set_request(request)
class PhotoFileSerializer(ModelSerializer):
url = f.URLField(source="get_url")
class Meta:
model = models.photo_file
fields = ['id', 'url', 'size_key', 'width', 'height', 'mime_type', 'is_video', 'photo']
class PhotoFileListSerializer(ListSerializer):
child = PhotoFileSerializer()
def to_representation(self, value):
result = {}
for v in value:
if v.size_key not in result:
result[v.size_key] = []
result[v.size_key].append(self.child.to_representation(v))
return result
class PhotoTitleField(CharField):
def get_attribute(self, obj):
value = super(PhotoTitleField, self).get_attribute(obj)
if not value:
value = obj.name
return value
class NestedPhotoPlaceSerializer(ModelSerializer):
class Meta:
model = models.place
fields = [
'id', 'title',
]
list_serializer_class = ListSerializer
class NestedPhotoSerializer(ModelSerializer):
title = PhotoTitleField(required=False, allow_null=True)
place = NestedPhotoPlaceSerializer(read_only=True)
thumbs = PhotoFileListSerializer(
source="get_thumbs", read_only=True)
class Meta:
model = models.photo
fields = [
'id', 'title', 'description', 'datetime', 'utc_offset', 'place',
'action', 'thumbs',
]
list_serializer_class = ListSerializer
class UserSerializer(ModelSerializer):
class Meta:
model = User
fields = [
'id', 'username', 'first_name', 'last_name', 'email', 'groups'
]
list_serializer_class = ListSerializer
class GroupSerializer(ModelSerializer):
class Meta:
model = Group
fields = ['id', 'name']
list_serializer_class = ListSerializer
class NestedAlbumSerializer(ModelSerializer):
cover_photo = NestedPhotoSerializer(read_only=True)
cover_photo_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="cover_photo",
required=False, allow_null=True,
style={'base_template': 'input.html'})
class Meta:
model = models.album
fields = [
'id', 'title', 'cover_photo', 'cover_photo_pk',
]
list_serializer_class = ListSerializer
class AlbumSerializer(ModelSerializer):
cover_photo = NestedPhotoSerializer(read_only=True)
cover_photo_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="cover_photo",
required=False, allow_null=True,
style={'base_template': 'input.html'})
ascendants = NestedAlbumSerializer(
source="list_ascendants", many=True, read_only=True)
def set_request(self, request):
super(AlbumSerializer, self).set_request(request)
if not request.user.is_staff:
del self.fields['revised']
del self.fields['revised_utc_offset']
class Meta:
model = models.album
list_serializer_class = ListSerializer
fields = [
'id', 'cover_photo', 'cover_photo_pk', 'ascendants', 'title',
'description', 'sort_name', 'sort_order',
'revised', 'revised_utc_offset', 'parent',
]
class NestedCategorySerializer(ModelSerializer):
cover_photo = NestedPhotoSerializer(read_only=True)
cover_photo_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="cover_photo",
required=False, allow_null=True,
style={'base_template': 'input.html'})
class Meta:
model = models.category
fields = [
'id', 'title', 'cover_photo', 'cover_photo_pk',
]
list_serializer_class = ListSerializer
class CategorySerializer(ModelSerializer):
cover_photo = NestedPhotoSerializer(read_only=True)
cover_photo_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="cover_photo",
required=False, allow_null=True,
style={'base_template': 'input.html'})
ascendants = NestedCategorySerializer(
source="list_ascendants", many=True, read_only=True)
class Meta:
model = models.category
list_serializer_class = ListSerializer
fields = [
'id', 'cover_photo', 'cover_photo_pk', 'ascendants', 'title',
'description', 'sort_name', 'sort_order', 'parent', 'cover_photo'
]
class NestedPlaceSerializer(ModelSerializer):
cover_photo = NestedPhotoSerializer(read_only=True)
cover_photo_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="cover_photo",
required=False, allow_null=True,
style={'base_template': 'input.html'})
class Meta:
model = models.place
fields = [
'id', 'title', 'cover_photo', 'cover_photo_pk',
]
list_serializer_class = ListSerializer
class PlaceSerializer(ModelSerializer):
cover_photo = NestedPhotoSerializer(read_only=True)
cover_photo_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="cover_photo",
required=False, allow_null=True,
style={'base_template': 'input.html'})
ascendants = NestedPlaceSerializer(
source="list_ascendants", many=True, read_only=True)
def set_request(self, request):
super(PlaceSerializer, self).set_request(request)
if not request.user.is_staff:
del self.fields['address']
del self.fields['address2']
class Meta:
model = models.place
list_serializer_class = ListSerializer
fields = [
'id', 'cover_photo', 'cover_photo_pk', 'ascendants', 'title',
'address', 'address2', 'city', 'state', 'postcode', 'country',
'url', 'urldesc', 'notes', 'parent', 'cover_photo', 'description',
]
class PersonTitleField(CharField):
def get_attribute(self, obj):
return obj
def to_representation(self, value):
return "%s" % value
class NestedPersonSerializer(ModelSerializer):
title = PersonTitleField(read_only=True)
cover_photo = NestedPhotoSerializer(read_only=True)
cover_photo_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="cover_photo",
required=False, allow_null=True,
style={'base_template': 'input.html'})
class Meta:
model = models.photo
fields = [
'id', 'title', 'cover_photo', 'cover_photo_pk',
]
list_serializer_class = ListSerializer
class PersonSerializer(ModelSerializer):
title = PersonTitleField(read_only=True)
cover_photo = NestedPhotoSerializer(read_only=True)
cover_photo_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="cover_photo",
required=False, allow_null=True,
style={'base_template': 'input.html'})
home = PlaceSerializer(read_only=True)
home_pk = serializers.PrimaryKeyRelatedField(
queryset=models.place.objects.all(), source="home",
required=False, allow_null=True)
work = PlaceSerializer(read_only=True)
work_pk = serializers.PrimaryKeyRelatedField(
queryset=models.place.objects.all(), source="work",
required=False, allow_null=True)
mother = NestedPersonSerializer(read_only=True)
mother_pk = serializers.PrimaryKeyRelatedField(
queryset=models.person.objects.all(), source="mother",
required=False, allow_null=True)
father = NestedPersonSerializer(read_only=True)
father_pk = serializers.PrimaryKeyRelatedField(
queryset=models.person.objects.all(), source="father",
required=False, allow_null=True)
spouse = NestedPersonSerializer(read_only=True)
spouse_pk = serializers.PrimaryKeyRelatedField(
queryset=models.person.objects.all(), source="spouse",
required=False, allow_null=True)
spouses = NestedPersonSerializer(many=True, read_only=True)
grandparents = NestedPersonSerializer(many=True, read_only=True)
uncles_aunts = NestedPersonSerializer(many=True, read_only=True)
parents = NestedPersonSerializer(many=True, read_only=True)
siblings = NestedPersonSerializer(many=True, read_only=True)
cousins = NestedPersonSerializer(many=True, read_only=True)
children = NestedPersonSerializer(many=True, read_only=True)
nephews_nieces = NestedPersonSerializer(many=True, read_only=True)
grandchildren = NestedPersonSerializer(many=True, read_only=True)
ascendants = NestedPersonSerializer(
source="list_ascendants", many=True, read_only=True)
def set_request(self, request):
super(PersonSerializer, self).set_request(request)
if not request.user.is_staff:
del self.fields['sex']
del self.fields['dob']
del self.fields['dod']
del self.fields['home']
del self.fields['home_pk']
del self.fields['work']
del self.fields['work_pk']
del self.fields['father']
del self.fields['father_pk']
del self.fields['mother']
del self.fields['mother_pk']
del self.fields['spouse']
del self.fields['spouse_pk']
del self.fields['spouses']
del self.fields['grandparents']
del self.fields['uncles_aunts']
del self.fields['parents']
del self.fields['siblings']
del self.fields['cousins']
del self.fields['children']
del self.fields['nephews_nieces']
del self.fields['grandchildren']
del self.fields['notes']
del self.fields['email']
del self.fields['ascendants']
class Meta:
model = models.person
list_serializer_class = ListSerializer
fields = [
'id', 'title', 'description',
'cover_photo', 'cover_photo_pk',
'home', 'home_pk',
'work', 'work_pk',
'mother', 'mother_pk',
'father', 'father_pk',
'spouse', 'spouse_pk', 'spouses',
'grandparents',
'uncles_aunts',
'parents',
'siblings',
'cousins',
'children',
'nephews_nieces',
'grandchildren',
'ascendants',
'first_name', 'last_name', 'middle_name',
'called', 'sex', 'dob', 'dod', 'notes', 'email',
'cover_photo'
]
class PersonListSerializer(ListSerializer):
child = PersonSerializer()
def get_value(self, dictionary):
if html.is_html_input(dictionary):
return dictionary.getlist(self.field_name)
return dictionary.get(self.field_name, None)
def to_internal_value(self, data):
raise NotImplementedError()
def to_representation(self, value):
result = []
for pp in value.all():
result.append(self.child.to_representation(pp.person))
return result
class PersonPkListSerializer(ListSerializer):
child = serializers.PrimaryKeyRelatedField(
queryset=models.person.objects.all())
def get_value(self, dictionary):
if html.is_html_input(dictionary):
return dictionary.getlist(self.field_name)
return dictionary.get(self.field_name, None)
def to_internal_value(self, data):
r = []
for index, pk in enumerate(data):
try:
pk = int(pk)
except ValueError:
raise exceptions.ValidationError(
"Person '%s' is not integer." % pk)
try:
models.person.objects.get(pk=pk)
except models.person.DoesNotExist:
raise exceptions.ValidationError(
"Person '%s' does not exist." % pk)
data_entry = {
'person_id': pk,
'position': index + 1,
}
r.append(data_entry)
return r
def to_representation(self, value):
result = []
for pp in value.all():
result.append(pp.person_id)
return result
class NestedFeedbackSerializer(ModelSerializer):
cover_photo = NestedPhotoSerializer(read_only=True)
cover_photo_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="cover_photo",
allow_null=True,
style={'base_template': 'input.html'})
class Meta:
model = models.feedback
fields = [
'id', 'cover_photo', 'cover_photo_pk', 'rating', 'comment',
'user_name', 'user_email', 'user_url',
'submit_datetime', 'utc_offset',
'ip_address', 'is_public', 'is_removed',
'user',
]
list_serializer_class = ListSerializer
class FeedbackSerializer(ModelSerializer):
cover_photo = NestedPhotoSerializer(read_only=True)
cover_photo_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="cover_photo",
allow_null=True,
style={'base_template': 'input.html'})
ascendants = NestedFeedbackSerializer(
source="list_ascendants", many=True, read_only=True)
class Meta:
model = models.feedback
list_serializer_class = ListSerializer
extra_kwargs = {
'submit_datetime': {'read_only': True},
'utc_offset': {'read_only': True},
}
fields = [
'id', 'cover_photo', 'cover_photo_pk', 'ascendants',
'rating', 'comment', 'user_name', 'user_email', 'user_url',
'submit_datetime', 'utc_offset', 'ip_address', 'is_public',
'is_removed', 'cover_photo', 'parent', 'user'
]
class PhotoRelationSerializer(ModelSerializer):
class Meta:
model = models.photo_relation
list_serializer_class = ListSerializer
fields = [
'id',
'photo_1', 'photo_1_pk', 'desc_1',
'photo_2', 'photo_2_pk', 'desc_2',
]
photo_1 = NestedPhotoSerializer(read_only=True)
photo_1_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="photo_1",
allow_null=True,
style={'base_template': 'input.html'})
photo_2 = NestedPhotoSerializer(read_only=True)
photo_2_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="photo_2",
allow_null=True,
style={'base_template': 'input.html'})
default_timezone = pytz.timezone(settings.TIME_ZONE)
class PhotoListSerializer(ListSerializer):
def to_representation(self, data):
# iterable = data.all() if isinstance(data, models.Manager) else data
iterable = data
results = []
for photo in iterable.all():
result = self.child.to_representation(photo)
if 'related_photo_pk' in self.context:
related_photo_pk = self.context['related_photo_pk']
try:
pr = photo.relations_2.get(photo_1__id=related_photo_pk)
result['relation'] = pr.desc_2
except models.photo_relation.DoesNotExist:
pass
try:
pr = photo.relations_1.get(photo_2__id=related_photo_pk)
result['relation'] = pr.desc_1
except models.photo_relation.DoesNotExist:
pass
results.append(result)
return results
class CreatePhotoSerializer(ModelSerializer):
orig_url = f.URLField(source="get_orig_url", read_only=True)
title = PhotoTitleField(required=False, allow_null=True)
albums_pk = serializers.PrimaryKeyRelatedField(
queryset=models.album.objects.all(), source="albums",
many=True, required=False,
style={'base_template': 'input.html'})
categorys_pk = serializers.PrimaryKeyRelatedField(
queryset=models.category.objects.all(), source="categorys",
many=True, required=False,
style={'base_template': 'input.html'})
place_pk = serializers.PrimaryKeyRelatedField(
queryset=models.place.objects.all(), source="place",
required=False, allow_null=True,
style={'base_template': 'input.html'})
persons_pk = PersonPkListSerializer(
source="photo_person_set", required=False, allow_null=True)
photographer_pk = serializers.PrimaryKeyRelatedField(
queryset=models.person.objects.all(), source="photographer",
required=False, allow_null=True,
style={'base_template': 'input.html'})
sha256_hash = BinaryField(write_only=True)
def validate(self, attrs):
if 'photo' not in self.initial_data:
raise exceptions.ValidationError('Photo was not supplied.')
file_obj = self.initial_data['photo']
if settings.IMAGE_PATH is None:
raise exceptions.PermissionDenied(
'This site does not support uploads.')
# if file_obj.size > options["maxfilesize"]:
# raise exceptions.ValidationError('Maximum file size exceeded.')
try:
m = media.get_media(file_obj.name, file_obj)
except media.UnknownMediaType:
raise exceptions.ValidationError('File type not supported.')
width, height = m.get_size()
photo_dir = models.photo.build_photo_dir(attrs['datetime'], attrs['utc_offset'])
new_name = file_obj.name
sha256_hash = m.get_sha256_hash()
mime_type, _ = mimetypes.guess_type(new_name)
is_video = m.is_video()
size_key = "orig"
if attrs['sha256_hash'] != sha256_hash:
raise exceptions.ValidationError(
"File received with incorrect sha256 hash")
del attrs['sha256_hash']
dups = models.photo_file.get_conflicts(dir, new_name, size_key, sha256_hash)
if dups.count() > 0:
raise exceptions.ValidationError(
'File already exists in db at %s.'
% ",".join([str(d.id) for d in dups]))
new_dir = models.photo_file.build_dir(is_video, size_key, photo_dir)
models.photo_file.check_filename_free(new_dir, new_name)
pf = {
'size_key': size_key,
'width': width,
'height': height,
'mime_type': mime_type,
'dir': new_dir,
'name': new_name,
'is_video': is_video,
'sha256_hash': sha256_hash,
'num_bytes': file_obj.size,
}
attrs['photo_file_set'] = [pf]
attrs['name'] = new_name
return attrs
def create(self, validated_attrs):
if 'photo' not in self.initial_data:
raise exceptions.ValidationError('Photo file not supplied')
file_obj = self.initial_data['photo']
validated_attrs['action'] = 'R'
pf = validated_attrs['photo_file_set'][0]
dir = pf['dir']
name = pf['name']
dst = os.path.join(settings.IMAGE_PATH, dir, name)
# Go ahead and do stuff
print("importing to %s" % dst)
umask = os.umask(0o022)
try:
if not os.path.lexists(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst), 0o755)
with open(dst, "wb") as dst_file_obj:
file_obj.seek(0)
shutil.copyfileobj(file_obj, dst_file_obj)
finally:
os.umask(umask)
try:
m = media.get_media(dst)
exif = m.get_normalized_exif()
assert 'datetime' not in exif
exif.update(validated_attrs)
validated_attrs = exif
with transaction.atomic():
m2m_attrs = self._pop_m2m_attrs(validated_attrs)
print(validated_attrs)
instance = models.photo.objects.create(**validated_attrs)
self._process_m2m(instance, m2m_attrs)
print("imported %s/%s as %d" % (dir, name, instance.pk))
return instance
except Exception:
print("deleting failed import %s" % dst)
os.remove(dst)
raise
def _pop_m2m_attrs(self, validated_attrs):
return {
'albums': validated_attrs.pop("albums", None),
'categorys': validated_attrs.pop("categorys", None),
'persons': validated_attrs.pop("photo_person_set", None),
'photo_file_set': validated_attrs.pop("photo_file_set", []),
}
def _process_m2m(self, instance, m2m_attrs):
albums = m2m_attrs["albums"]
categorys = m2m_attrs["categorys"]
persons = m2m_attrs["persons"]
photo_file_set = m2m_attrs["photo_file_set"]
print("albums", albums)
print("categorys", categorys)
print("persons", persons)
print("photo_file_set", photo_file_set)
if albums is not None:
for value in albums:
models.photo_album.objects.create(
photo=instance, album=value)
del value
if categorys is not None:
for value in categorys:
models.photo_category.objects.create(
photo=instance, category=value)
del value
if persons is not None:
for person in persons:
models.photo_person.objects.create(
photo=instance, **person)
del person
for pf in photo_file_set:
instance.photo_file_set.create(**pf)
return instance
class Meta:
model = models.photo
list_serializer_class = PhotoListSerializer
fields = [
'id', 'orig_url', 'sha256_hash', 'title',
'albums_pk', 'categorys_pk', 'persons_pk',
'place_pk', 'photographer_pk',
'title', 'view', 'rating',
'description', 'utc_offset', 'datetime', 'camera_make',
'camera_model', 'flash_used', 'focal_length', 'exposure',
'compression', 'aperture', 'level', 'iso_equiv', 'metering_mode',
'focus_dist', 'ccd_width', 'comment',
'photographer',
'relations'
]
class PhotoSerializer(ModelSerializer):
orig_url = f.URLField(source="get_orig_url", read_only=True)
title = PhotoTitleField(required=False, allow_null=True)
albums = AlbumSerializer(many=True, read_only=True)
albums_pk = serializers.PrimaryKeyRelatedField(
queryset=models.album.objects.all(), source="albums",
many=True, required=False,
style={'base_template': 'input.html'})
add_albums_pk = serializers.PrimaryKeyRelatedField(
queryset=models.album.objects.all(), write_only=True,
many=True, required=False,
style={'base_template': 'input.html'})
rem_albums_pk = serializers.PrimaryKeyRelatedField(
queryset=models.album.objects.all(), write_only=True,
many=True, required=False,
style={'base_template': 'input.html'})
categorys = CategorySerializer(many=True, read_only=True)
categorys_pk = serializers.PrimaryKeyRelatedField(
queryset=models.category.objects.all(), source="categorys",
many=True, required=False,
style={'base_template': 'input.html'})
add_categorys_pk = serializers.PrimaryKeyRelatedField(
queryset=models.category.objects.all(), write_only=True,
many=True, required=False,
style={'base_template': 'input.html'})
rem_categorys_pk = serializers.PrimaryKeyRelatedField(
queryset=models.category.objects.all(), write_only=True,
many=True, required=False,
style={'base_template': 'input.html'})
place = PlaceSerializer(read_only=True)
place_pk = serializers.PrimaryKeyRelatedField(
queryset=models.place.objects.all(), source="place",
required=False, allow_null=True,
style={'base_template': 'input.html'})
persons = PersonListSerializer(
child=NestedPersonSerializer(),
source="photo_person_set", read_only=True)
persons_pk = PersonPkListSerializer(
source="photo_person_set", required=False, allow_null=True)
add_persons_pk = PersonPkListSerializer(
required=False, write_only=True, allow_null=True)
rem_persons_pk = PersonPkListSerializer(
required=False, write_only=True, allow_null=True)
photographer = NestedPersonSerializer(read_only=True)
photographer_pk = serializers.PrimaryKeyRelatedField(
queryset=models.person.objects.all(), source="photographer",
required=False, allow_null=True,
style={'base_template': 'input.html'})
feedbacks = FeedbackSerializer(many=True, read_only=True)
thumbs = PhotoFileListSerializer(
source="get_thumbs", read_only=True)
videos = PhotoFileListSerializer(
source="get_videos", read_only=True)
def update(self, instance, validated_attrs):
m2m_attrs = self._pop_m2m_attrs(validated_attrs)
for attr, value in validated_attrs.items():
setattr(instance, attr, value)
instance.save()
self._process_m2m(instance, m2m_attrs)
# we need to get new object to ensure m2m attributes not cached
instance = models.photo.objects.get(pk=instance.pk)
return instance
def _pop_m2m_attrs(self, validated_attrs):
return {
'albums': validated_attrs.pop("albums", None),
'add_albums': validated_attrs.pop("add_albums_pk", None),
'rem_albums': validated_attrs.pop("rem_albums_pk", None),
'categorys': validated_attrs.pop("categorys", None),
'add_categorys': validated_attrs.pop("add_categorys_pk", None),
'rem_categorys': validated_attrs.pop("rem_categorys_pk", None),
'persons': validated_attrs.pop("photo_person_set", None),
'add_persons': validated_attrs.pop("add_persons_pk", None),
'rem_persons': validated_attrs.pop("rem_persons_pk", None),
}
def _process_m2m(self, instance, m2m_attrs):
albums = m2m_attrs["albums"]
add_albums = m2m_attrs["add_albums"]
rem_albums = m2m_attrs["rem_albums"]
categorys = m2m_attrs["categorys"]
add_categorys = m2m_attrs["add_categorys"]
rem_categorys = m2m_attrs["rem_categorys"]
persons = m2m_attrs["persons"]
add_persons = m2m_attrs["add_persons"]
rem_persons = m2m_attrs["rem_persons"]
print("albums", albums, add_albums, rem_albums)
print("categorys", categorys, add_categorys, rem_categorys)
print("persons", persons, add_persons, rem_persons)
if albums is not None:
pa_list = list(instance.photo_album_set.all())
for pa in pa_list:
if pa.album in albums:
albums.remove(pa.album)
else:
pa.delete()
del pa
for value in albums:
models.photo_album.objects.create(
photo=instance, album=value)
del value
del pa_list
if rem_albums is not None:
for album in rem_albums:
models.photo_album.objects.filter(
photo=instance, album=album).delete()
if add_albums is not None:
for album in add_albums:
models.photo_album.objects.get_or_create(
photo=instance, album=album)
if categorys is not None:
pc_list = list(instance.photo_category_set.all())
for pc in pc_list:
if pc.category in categorys:
categorys.remove(pc.category)
else:
pc.delete()
del pc
for value in categorys:
models.photo_category.objects.create(
photo=instance, category=value)
del value
del pc_list
if rem_categorys is not None:
for category in rem_categorys:
models.photo_category.objects.filter(
photo=instance, category=category).delete()
if add_categorys is not None:
for category in add_categorys:
models.photo_category.objects.get_or_create(
photo=instance, category=category)
if persons is not None:
pp_list = list(instance.photo_person_set.all())
for pp in pp_list:
found = None
for index, person in enumerate(persons):
if pp.position == person['position'] and \
pp.person_id == person['person_id']:
found = index
if found is not None:
del persons[found]
else:
pp.delete()
for person in persons:
models.photo_person.objects.create(
photo=instance, **person)
del person
del pp_list
if rem_persons is not None:
for person in rem_persons:
person_id = person['person_id']
models.photo_person.objects.filter(
photo=instance, person_id=person_id).delete()
if add_persons is not None:
for person in add_persons:
result = models.photo_person.objects\
.filter(photo=instance)\
.aggregate(Max('position'))
position_max = result['position__max'] or 0
person_id = person['person_id']
position = position_max + 1
models.photo_person.objects.get_or_create(
photo=instance, person_id=person_id,
defaults={'position': position})
return instance
def set_request(self, request):
super(PhotoSerializer, self).set_request(request)
if not request.user.is_staff:
del self.fields['orig_url']
class Meta:
model = models.photo
extra_kwargs = {
'name': {'read_only': True},
'timestamp': {'read_only': True},
'action': {'required': False},
}
list_serializer_class = PhotoListSerializer
fields = [
'id', 'orig_url', 'title',
'albums', 'albums_pk', 'add_albums_pk', 'rem_albums_pk',
'categorys',
'categorys_pk', 'add_categorys_pk', 'rem_categorys_pk',
'place', 'place_pk',
'persons', 'persons_pk',
'add_persons_pk', 'rem_persons_pk',
'photographer', 'photographer_pk',
'feedbacks', 'thumbs', 'videos',
'name', 'title', 'view', 'rating',
'description', 'utc_offset', 'datetime', 'camera_make',
'camera_model', 'flash_used', 'focal_length', 'exposure',
'compression', 'aperture', 'level', 'iso_equiv', 'metering_mode',
'focus_dist', 'ccd_width', 'comment', 'action', 'timestamp',
'photographer',
'relations'
]
|
brianmay/spud
|
spud/serializers.py
|
Python
|
gpl-3.0
| 33,434
|
[
"Brian"
] |
ac9f60bbe4d9296e90d9aa055670b84e06f18699bd465bc1fe2d926aa722c5d8
|
class Node(object):
def __init__(self, parent, char, value):
self._parent = parent
self._char = char
self._children = {}
self._value = value
self._isset = False
def add_child(self, ch, value):
if ch in self._children:
return self._children[ch]
node = Node(parent = self, char = ch, value = value)
self._children[node.char] = node
return node
def remove_child(self, char):
if char in self._children:
del self._children[char]
def is_leaf(self):
return len(self._children) == 0
def has_children(self):
return len(self._children) > 0
@property
def parent(self):
return self._parent
@property
def char(self):
return self._char
@property
def children(self):
return self._children
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._isset = True
self._value = value
@property
def isset(self):
return self._isset
def clear(self):
self._value = None
self._isset = False
def __repr__(self):
return "%s(parent: %s, char: \"%s\", children: %s, value: %r)"%(
self.__class__,
str(self._parent),
self._char,
self._children.keys(),
self._value)
def __str__(self):
return "%s -> %s"%(self._char, self._children.keys())
class Trie(object):
"""Trie structure, allowing strings to be inserted and associated with a
value.
"""
def __init__(self):
self._root = Node(parent = None, char = None, value = None)
self._length = 0
def get_node(self, s):
"""Get node corresponding to the given string.
None is returned if node does not exist.
"""
node = self._root
for ch in s:
if not ch in node.children:
return None
else:
node = node.children[ch]
return node
def _add(self, s):
"""Add string to the trie."""
if not isinstance(s, basestring):
raise TypeError("String is not (derived from) basestring.")
node = self._root
for ch in s:
node = node.add_child(ch, value = None)
return node
def has_node(self, key):
"""Return True if a prefix exists in the trie.
Note, this will also return true for nodes not associated with a
particular value (i.e. a string corresponding to prefix has not
explicitly been inserted into the trie).
"""
node = self._root
for ch in key:
if not ch in node.children:
return False
else:
node = node.children[ch]
return True
def __len__(self):
return self._length
def setdefault(self, key, defval = None):
if key in self:
return self[key]
else:
self[key] = defval
return defval
def __setitem__(self, key, value):
"""Insert a string into the trie and associate it with the given value
(can be anything, including None).
A TypeError will be raised if the string is not derived from
basestring.
"""
node = self._add(key)
if not node.isset:
self._length += 1
node.value = value
def __getitem__(self, key):
"""Get value associated with the given string from the trie.
Note, None is considered a value. An exception will be thrown if string
is not associated with a value.
"""
if not isinstance(key, basestring):
raise TypeError("key is not a string")
node = self._root
len_key = len(key)
if len_key == 0 and self._root.isset:
return self._root.value
i = 0
for i, ch in enumerate(key):
if not ch in node.children:
raise KeyError("String does not exist.")
else:
node = node.children[ch]
if i+1 != len_key or not node.isset:
raise KeyError("String does not exist.")
return node.value
def get(self, key, default = None):
try:
return self[key]
except:
return default
def __contains__(self, key):
try:
self[key]
return True
except KeyError:
return False
def has_key(self, key):
return key in self
def __delitem__(self, key):
"""Remove all nodes that lead up to and only to the given string.
If the string itself is a prefix of another string, then the string
will be given a value of None (and no nodes will be removed).
"""
node = self.get_node(key)
if node is None or not node.isset:
raise ValueError("String (\"%s\") does not exist."%s)
node.clear()
while node.is_leaf() and not node.isset and not node.parent is None:
node.parent.remove_child(node.char)
node = node.parent
self._length -= 1
def get_nearest_variants(self, s, maxhd = None):
"""Find string or strings most similar to s, but not s itself.
Returns generator, yielding tuples of Hamming distance, string,
and value corresponding to found string.
:maxhd: maximum Hamming distance the function will consider for finding
nearest variant to s.
"""
stack = [(self._root, "", 0, 0)]
next_stack = []
cur_maxhd = 1
done = False
while stack:
node, alt_s, pos, hd = stack.pop()
if pos == len(s) and hd > 0 and node.isset:
done = True
yield hd, alt_s, node.value
continue
for ch, next_node in node.children.iteritems():
if ch == s[pos]:
stack.append((next_node, alt_s + ch, pos + 1, hd))
elif hd < cur_maxhd:
stack.append((next_node, alt_s + ch, pos + 1, hd + 1))
elif not done:
# This next_node is too far, do not visit it unless nothing
# is found at current maxhd.
next_stack.append((next_node, alt_s + ch, pos + 1, hd + 1))
if not stack and not done:
if cur_maxhd == maxhd:
return
cur_maxhd += 1
stack = next_stack
next_stack = []
def neighbors(self, s, maxhd):
"""Search for all strings within the given Hamming distance of the
given string.
Returns generator, yielding tuples of Hamming distance, string,
and value corresponding to found string.
"""
if maxhd < 1:
raise ValueError("maxhd < 1")
if self.get_node(s) is None:
raise ValueError("String (%s) does not exist."%s)
stack = [(self._root, "", 0, 0)]
while stack:
node, alt_s, pos, hd = stack.pop()
if pos == len(s) and node.isset and hd > 0:
yield hd, alt_s, node.value
continue
for ch, next_node in node.children.iteritems():
if ch == s[pos]:
stack.append((next_node, alt_s + ch, pos + 1, hd))
elif hd < maxhd:
stack.append((next_node, alt_s + ch, pos + 1, hd + 1))
def pairs(self, keylen, maxhd):
"""Generator function to iterate all pairs of strings of given length
that are within a certain Hamming distance of each other.
Yields tuples with the following format:
(Hamming distance, string1, value1, string2, value2)
"""
if keylen < 1:
raise ValueError("keylen < 1")
if maxhd < 1:
raise ValueError("maxhd < 1")
# Traverse the tree in search of strings of length keylen.
targets = {}
stack = [(self._root, "", 0)]
while stack:
node, s, depth = stack.pop()
if depth == keylen and node.isset:
targets[s] = node
for ch, next_node in node.children.iteritems():
stack.append((next_node, s + ch, depth + 1))
explored = set()
for s, node_s in targets.iteritems():
explored.add(s)
stack = [(self._root, "", 0, 0)]
while stack:
node, alt_s, pos, hd = stack.pop()
if pos == keylen:
if alt_s in targets:
yield hd, s, node_s.value, \
alt_s, targets[alt_s].value
# No need to go deeper into the tree because only equal
# length strings are considered.
continue
n_explored = 0
for ch, next_node in node.children.iteritems():
next_alt_s = alt_s + ch
if next_alt_s in explored:
# all pairs within maxhd for len(s) prefixes have been
# found already for the next_alt_s branch in the tree.
n_explored += 1
continue
if ch == s[pos]:
stack.append((next_node, next_alt_s, pos + 1, hd))
elif hd < maxhd:
stack.append((next_node, next_alt_s, pos + 1, hd + 1))
if n_explored == len(node.children):
# Closing off current branch of the tree for current
# sequence length.
explored.add(alt_s)
continue
def pairs_ext(self, maxhd, pairfunc = lambda node: node.isset):
"""Generator function to iterate all pairs of prefixes that are within
a certain Hamming distance of each other.
Yields tuples with the following format:
(Hamming distance, prefix1, value1, prefix2, value2)
"""
# Traverse the tree in search of pairfunc nodes.
pairfunc_nodes = {}
stack = [(self._root, "")]
while stack:
node, s = stack.pop()
if pairfunc(node):
pairfunc_nodes[s] = node
for ch, next_node in node.children.iteritems():
stack.append((next_node, s + ch))
explored = set()
for s, node_s in pairfunc_nodes.iteritems():
len_s = len(s)
explored.add((len_s, s))
stack = [(self._root, "", 0, 0)]
while stack:
node, alt_s, pos, hd = stack.pop()
if pos == len_s:
if pairfunc(node):
yield hd, s, node_s.value, \
alt_s, pairfunc_nodes[alt_s].value
# No need to go deeper into the tree because only equal
# length prefixes are considered.
continue
n_explored = 0
for ch, next_node in node.children.iteritems():
next_alt_s = alt_s + ch
if (len_s, next_alt_s) in explored:
# all pairs within maxhd for len(s) prefixes have been
# found already for the next_alt_s branch in the tree.
n_explored += 1
continue
if ch == s[pos]:
stack.append((next_node, next_alt_s, pos + 1, hd))
elif hd < maxhd:
stack.append((next_node, next_alt_s, pos + 1, hd + 1))
if n_explored == len(node.children):
# Closing off current branch of the tree for current
# sequence length.
explored.add((len_s, alt_s))
continue
|
uubram/RTCR
|
rtcr/trie/trie.py
|
Python
|
gpl-3.0
| 12,106
|
[
"VisIt"
] |
f7b436586efc9261c0987c5bd61fab64e21e907172d5f29c5a7ff5bb91d05fe0
|
from sympy import (meijerg, I, S, integrate, Integral, oo, gamma, cosh,
hyperexpand, exp, simplify, sqrt, pi, erf, erfc, sin, cos,
exp_polar, polygamma, hyper, log, expand_func)
from sympy.integrals.meijerint import (_rewrite_single, _rewrite1,
meijerint_indefinite, _inflate_g, _create_lookup_table,
meijerint_definite, meijerint_inversion)
from sympy.utilities import default_sort_key
from sympy.utilities.pytest import slow
from sympy.utilities.randtest import (verify_numerically,
random_complex_number as randcplx)
from sympy.core.compatibility import range
from sympy.abc import x, y, a, b, c, d, s, t, z
def test_rewrite_single():
def t(expr, c, m):
e = _rewrite_single(meijerg([a], [b], [c], [d], expr), x)
assert e is not None
assert isinstance(e[0][0][2], meijerg)
assert e[0][0][2].argument.as_coeff_mul(x) == (c, (m,))
def tn(expr):
assert _rewrite_single(meijerg([a], [b], [c], [d], expr), x) is None
t(x, 1, x)
t(x**2, 1, x**2)
t(x**2 + y*x**2, y + 1, x**2)
tn(x**2 + x)
tn(x**y)
def u(expr, x):
from sympy import Add, exp, exp_polar
r = _rewrite_single(expr, x)
e = Add(*[res[0]*res[2] for res in r[0]]).replace(
exp_polar, exp) # XXX Hack?
assert verify_numerically(e, expr, x)
u(exp(-x)*sin(x), x)
# The following has stopped working because hyperexpand changed slightly.
# It is probably not worth fixing
#u(exp(-x)*sin(x)*cos(x), x)
# This one cannot be done numerically, since it comes out as a g-function
# of argument 4*pi
# NOTE This also tests a bug in inverse mellin transform (which used to
# turn exp(4*pi*I*t) into a factor of exp(4*pi*I)**t instead of
# exp_polar).
#u(exp(x)*sin(x), x)
assert _rewrite_single(exp(x)*sin(x), x) == \
([(-sqrt(2)/(2*sqrt(pi)), 0,
meijerg(((-S(1)/2, 0, S(1)/4, S(1)/2, S(3)/4), (1,)),
((), (-S(1)/2, 0)), 64*exp_polar(-4*I*pi)/x**4))], True)
def test_rewrite1():
assert _rewrite1(x**3*meijerg([a], [b], [c], [d], x**2 + y*x**2)*5, x) == \
(5, x**3, [(1, 0, meijerg([a], [b], [c], [d], x**2*(y + 1)))], True)
def test_meijerint_indefinite_numerically():
def t(fac, arg):
g = meijerg([a], [b], [c], [d], arg)*fac
subs = {a: randcplx()/10, b: randcplx()/10 + I,
c: randcplx(), d: randcplx()}
integral = meijerint_indefinite(g, x)
assert integral is not None
assert verify_numerically(g.subs(subs), integral.diff(x).subs(subs), x)
t(1, x)
t(2, x)
t(1, 2*x)
t(1, x**2)
t(5, x**S('3/2'))
t(x**3, x)
t(3*x**S('3/2'), 4*x**S('7/3'))
def test_meijerint_definite():
v, b = meijerint_definite(x, x, 0, 0)
assert v.is_zero and b is True
v, b = meijerint_definite(x, x, oo, oo)
assert v.is_zero and b is True
def test_inflate():
subs = {a: randcplx()/10, b: randcplx()/10 + I, c: randcplx(),
d: randcplx(), y: randcplx()/10}
def t(a, b, arg, n):
from sympy import Mul
m1 = meijerg(a, b, arg)
m2 = Mul(*_inflate_g(m1, n))
# NOTE: (the random number)**9 must still be on the principal sheet.
# Thus make b&d small to create random numbers of small imaginary part.
return verify_numerically(m1.subs(subs), m2.subs(subs), x, b=0.1, d=-0.1)
assert t([[a], [b]], [[c], [d]], x, 3)
assert t([[a, y], [b]], [[c], [d]], x, 3)
assert t([[a], [b]], [[c, y], [d]], 2*x**3, 3)
def test_recursive():
from sympy import symbols, refine
a, b, c = symbols('a b c', positive=True)
r = exp(-(x - a)**2)*exp(-(x - b)**2)
e = integrate(r, (x, 0, oo), meijerg=True)
assert simplify(e.expand()) == (
sqrt(2)*sqrt(pi)*(
(erf(sqrt(2)*(a + b)/2) + 1)*exp(-a**2/2 + a*b - b**2/2))/4)
e = integrate(exp(-(x - a)**2)*exp(-(x - b)**2)*exp(c*x), (x, 0, oo), meijerg=True)
assert simplify(e) == (
sqrt(2)*sqrt(pi)*(erf(sqrt(2)*(2*a + 2*b + c)/4) + 1)*exp(-a**2 - b**2
+ (2*a + 2*b + c)**2/8)/4)
assert simplify(integrate(exp(-(x - a - b - c)**2), (x, 0, oo), meijerg=True)) == \
sqrt(pi)/2*(1 + erf(a + b + c))
assert simplify(refine(integrate(exp(-(x + a + b + c)**2), (x, 0, oo), meijerg=True))) == \
sqrt(pi)/2*(1 - erf(a + b + c))
@slow
def test_meijerint():
from sympy import symbols, expand, arg
s, t, mu = symbols('s t mu', real=True)
assert integrate(meijerg([], [], [0], [], s*t)
*meijerg([], [], [mu/2], [-mu/2], t**2/4),
(t, 0, oo)).is_Piecewise
s = symbols('s', positive=True)
assert integrate(x**s*meijerg([[], []], [[0], []], x), (x, 0, oo)) == \
gamma(s + 1)
assert integrate(x**s*meijerg([[], []], [[0], []], x), (x, 0, oo),
meijerg=True) == gamma(s + 1)
assert isinstance(integrate(x**s*meijerg([[], []], [[0], []], x),
(x, 0, oo), meijerg=False),
Integral)
assert meijerint_indefinite(exp(x), x) == exp(x)
# TODO what simplifications should be done automatically?
# This tests "extra case" for antecedents_1.
a, b = symbols('a b', positive=True)
assert simplify(meijerint_definite(x**a, x, 0, b)[0]) == \
b**(a + 1)/(a + 1)
# This tests various conditions and expansions:
meijerint_definite((x + 1)**3*exp(-x), x, 0, oo) == (16, True)
# Again, how about simplifications?
sigma, mu = symbols('sigma mu', positive=True)
i, c = meijerint_definite(exp(-((x - mu)/(2*sigma))**2), x, 0, oo)
assert simplify(i) == sqrt(pi)*sigma*(2 - erfc(mu/(2*sigma)))
assert c == True
i, _ = meijerint_definite(exp(-mu*x)*exp(sigma*x), x, 0, oo)
# TODO it would be nice to test the condition
assert simplify(i) == 1/(mu - sigma)
# Test substitutions to change limits
assert meijerint_definite(exp(x), x, -oo, 2) == (exp(2), True)
# Note: causes a NaN in _check_antecedents
assert expand(meijerint_definite(exp(x), x, 0, I)[0]) == exp(I) - 1
assert expand(meijerint_definite(exp(-x), x, 0, x)[0]) == \
1 - exp(-exp(I*arg(x))*abs(x))
# Test -oo to oo
assert meijerint_definite(exp(-x**2), x, -oo, oo) == (sqrt(pi), True)
assert meijerint_definite(exp(-abs(x)), x, -oo, oo) == (2, True)
assert meijerint_definite(exp(-(2*x - 3)**2), x, -oo, oo) == \
(sqrt(pi)/2, True)
assert meijerint_definite(exp(-abs(2*x - 3)), x, -oo, oo) == (1, True)
assert meijerint_definite(exp(-((x - mu)/sigma)**2/2)/sqrt(2*pi*sigma**2),
x, -oo, oo) == (1, True)
# Test one of the extra conditions for 2 g-functinos
assert meijerint_definite(exp(-x)*sin(x), x, 0, oo) == (S(1)/2, True)
# Test a bug
def res(n):
return (1/(1 + x**2)).diff(x, n).subs(x, 1)*(-1)**n
for n in range(6):
assert integrate(exp(-x)*sin(x)*x**n, (x, 0, oo), meijerg=True) == \
res(n)
# This used to test trigexpand... now it is done by linear substitution
assert simplify(integrate(exp(-x)*sin(x + a), (x, 0, oo), meijerg=True)
) == sqrt(2)*sin(a + pi/4)/2
# Test the condition 14 from prudnikov.
# (This is besselj*besselj in disguise, to stop the product from being
# recognised in the tables.)
a, b, s = symbols('a b s')
from sympy import And, re
assert meijerint_definite(meijerg([], [], [a/2], [-a/2], x/4)
*meijerg([], [], [b/2], [-b/2], x/4)*x**(s - 1), x, 0, oo) == \
(4*2**(2*s - 2)*gamma(-2*s + 1)*gamma(a/2 + b/2 + s)
/(gamma(-a/2 + b/2 - s + 1)*gamma(a/2 - b/2 - s + 1)
*gamma(a/2 + b/2 - s + 1)),
And(0 < -2*re(4*s) + 8, 0 < re(a/2 + b/2 + s), re(2*s) < 1))
# test a bug
assert integrate(sin(x**a)*sin(x**b), (x, 0, oo), meijerg=True) == \
Integral(sin(x**a)*sin(x**b), (x, 0, oo))
# test better hyperexpand
assert integrate(exp(-x**2)*log(x), (x, 0, oo), meijerg=True) == \
(sqrt(pi)*polygamma(0, S(1)/2)/4).expand()
# Test hyperexpand bug.
from sympy import lowergamma
n = symbols('n', integer=True)
assert simplify(integrate(exp(-x)*x**n, x, meijerg=True)) == \
lowergamma(n + 1, x)
# Test a bug with argument 1/x
alpha = symbols('alpha', positive=True)
assert meijerint_definite((2 - x)**alpha*sin(alpha/x), x, 0, 2) == \
(sqrt(pi)*alpha*gamma(alpha + 1)*meijerg(((), (alpha/2 + S(1)/2,
alpha/2 + 1)), ((0, 0, S(1)/2), (-S(1)/2,)), alpha**S(2)/16)/4, True)
# test a bug related to 3016
a, s = symbols('a s', positive=True)
assert simplify(integrate(x**s*exp(-a*x**2), (x, -oo, oo))) == \
a**(-s/2 - S(1)/2)*((-1)**s + 1)*gamma(s/2 + S(1)/2)/2
def test_bessel():
from sympy import besselj, besseli
assert simplify(integrate(besselj(a, z)*besselj(b, z)/z, (z, 0, oo),
meijerg=True, conds='none')) == \
2*sin(pi*(a/2 - b/2))/(pi*(a - b)*(a + b))
assert simplify(integrate(besselj(a, z)*besselj(a, z)/z, (z, 0, oo),
meijerg=True, conds='none')) == 1/(2*a)
# TODO more orthogonality integrals
assert simplify(integrate(sin(z*x)*(x**2 - 1)**(-(y + S(1)/2)),
(x, 1, oo), meijerg=True, conds='none')
*2/((z/2)**y*sqrt(pi)*gamma(S(1)/2 - y))) == \
besselj(y, z)
# Werner Rosenheinrich
# SOME INDEFINITE INTEGRALS OF BESSEL FUNCTIONS
assert integrate(x*besselj(0, x), x, meijerg=True) == x*besselj(1, x)
assert integrate(x*besseli(0, x), x, meijerg=True) == x*besseli(1, x)
# TODO can do higher powers, but come out as high order ... should they be
# reduced to order 0, 1?
assert integrate(besselj(1, x), x, meijerg=True) == -besselj(0, x)
assert integrate(besselj(1, x)**2/x, x, meijerg=True) == \
-(besselj(0, x)**2 + besselj(1, x)**2)/2
# TODO more besseli when tables are extended or recursive mellin works
assert integrate(besselj(0, x)**2/x**2, x, meijerg=True) == \
-2*x*besselj(0, x)**2 - 2*x*besselj(1, x)**2 \
+ 2*besselj(0, x)*besselj(1, x) - besselj(0, x)**2/x
assert integrate(besselj(0, x)*besselj(1, x), x, meijerg=True) == \
-besselj(0, x)**2/2
assert integrate(x**2*besselj(0, x)*besselj(1, x), x, meijerg=True) == \
x**2*besselj(1, x)**2/2
assert integrate(besselj(0, x)*besselj(1, x)/x, x, meijerg=True) == \
(x*besselj(0, x)**2 + x*besselj(1, x)**2 -
besselj(0, x)*besselj(1, x))
# TODO how does besselj(0, a*x)*besselj(0, b*x) work?
# TODO how does besselj(0, x)**2*besselj(1, x)**2 work?
# TODO sin(x)*besselj(0, x) etc come out a mess
# TODO can x*log(x)*besselj(0, x) be done?
# TODO how does besselj(1, x)*besselj(0, x+a) work?
# TODO more indefinite integrals when struve functions etc are implemented
# test a substitution
assert integrate(besselj(1, x**2)*x, x, meijerg=True) == \
-besselj(0, x**2)/2
def test_inversion():
from sympy import piecewise_fold, besselj, sqrt, sin, cos, Heaviside
def inv(f):
return piecewise_fold(meijerint_inversion(f, s, t))
assert inv(1/(s**2 + 1)) == sin(t)*Heaviside(t)
assert inv(s/(s**2 + 1)) == cos(t)*Heaviside(t)
assert inv(exp(-s)/s) == Heaviside(t - 1)
assert inv(1/sqrt(1 + s**2)) == besselj(0, t)*Heaviside(t)
# Test some antcedents checking.
assert meijerint_inversion(sqrt(s)/sqrt(1 + s**2), s, t) is None
assert inv(exp(s**2)) is None
assert meijerint_inversion(exp(-s**2), s, t) is None
@slow
def test_lookup_table():
from random import uniform, randrange
from sympy import Add
from sympy.integrals.meijerint import z as z_dummy
table = {}
_create_lookup_table(table)
for _, l in sorted(table.items()):
for formula, terms, cond, hint in sorted(l, key=default_sort_key):
subs = {}
for a in list(formula.free_symbols) + [z_dummy]:
if hasattr(a, 'properties') and a.properties:
# these Wilds match positive integers
subs[a] = randrange(1, 10)
else:
subs[a] = uniform(1.5, 2.0)
if not isinstance(terms, list):
terms = terms(subs)
# First test that hyperexpand can do this.
expanded = [hyperexpand(g) for (_, g) in terms]
assert all(x.is_Piecewise or not x.has(meijerg) for x in expanded)
# Now test that the meijer g-function is indeed as advertised.
expanded = Add(*[f*x for (f, x) in terms])
a, b = formula.n(subs=subs), expanded.n(subs=subs)
r = min(abs(a), abs(b))
if r < 1:
assert abs(a - b).n() <= 1e-10
else:
assert (abs(a - b)/r).n() <= 1e-10
def test_branch_bug():
from sympy import powdenest, lowergamma
# TODO combsimp cannot prove that the factor is unity
assert powdenest(integrate(erf(x**3), x, meijerg=True).diff(x),
polar=True) == 2*erf(x**3)*gamma(S(2)/3)/3/gamma(S(5)/3)
assert integrate(erf(x**3), x, meijerg=True) == \
2*x*erf(x**3)*gamma(S(2)/3)/(3*gamma(S(5)/3)) \
- 2*gamma(S(2)/3)*lowergamma(S(2)/3, x**6)/(3*sqrt(pi)*gamma(S(5)/3))
def test_linear_subs():
from sympy import besselj
assert integrate(sin(x - 1), x, meijerg=True) == -cos(1 - x)
assert integrate(besselj(1, x - 1), x, meijerg=True) == -besselj(0, 1 - x)
@slow
def test_probability():
# various integrals from probability theory
from sympy.abc import x, y
from sympy import symbols, Symbol, Abs, expand_mul, combsimp, powsimp, sin
mu1, mu2 = symbols('mu1 mu2', real=True, nonzero=True, finite=True)
sigma1, sigma2 = symbols('sigma1 sigma2', real=True, nonzero=True,
finite=True, positive=True)
rate = Symbol('lambda', real=True, positive=True, finite=True)
def normal(x, mu, sigma):
return 1/sqrt(2*pi*sigma**2)*exp(-(x - mu)**2/2/sigma**2)
def exponential(x, rate):
return rate*exp(-rate*x)
assert integrate(normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) == 1
assert integrate(x*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) == \
mu1
assert integrate(x**2*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) \
== mu1**2 + sigma1**2
assert integrate(x**3*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) \
== mu1**3 + 3*mu1*sigma1**2
assert integrate(normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == 1
assert integrate(x*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == mu1
assert integrate(y*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == mu2
assert integrate(x*y*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == mu1*mu2
assert integrate((x + y + 1)*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == 1 + mu1 + mu2
assert integrate((x + y - 1)*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == \
-1 + mu1 + mu2
i = integrate(x**2*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True)
assert not i.has(Abs)
assert simplify(i) == mu1**2 + sigma1**2
assert integrate(y**2*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == \
sigma2**2 + mu2**2
assert integrate(exponential(x, rate), (x, 0, oo), meijerg=True) == 1
assert integrate(x*exponential(x, rate), (x, 0, oo), meijerg=True) == \
1/rate
assert integrate(x**2*exponential(x, rate), (x, 0, oo), meijerg=True) == \
2/rate**2
def E(expr):
res1 = integrate(expr*exponential(x, rate)*normal(y, mu1, sigma1),
(x, 0, oo), (y, -oo, oo), meijerg=True)
res2 = integrate(expr*exponential(x, rate)*normal(y, mu1, sigma1),
(y, -oo, oo), (x, 0, oo), meijerg=True)
assert expand_mul(res1) == expand_mul(res2)
return res1
assert E(1) == 1
assert E(x*y) == mu1/rate
assert E(x*y**2) == mu1**2/rate + sigma1**2/rate
ans = sigma1**2 + 1/rate**2
assert simplify(E((x + y + 1)**2) - E(x + y + 1)**2) == ans
assert simplify(E((x + y - 1)**2) - E(x + y - 1)**2) == ans
assert simplify(E((x + y)**2) - E(x + y)**2) == ans
# Beta' distribution
alpha, beta = symbols('alpha beta', positive=True)
betadist = x**(alpha - 1)*(1 + x)**(-alpha - beta)*gamma(alpha + beta) \
/gamma(alpha)/gamma(beta)
assert integrate(betadist, (x, 0, oo), meijerg=True) == 1
i = integrate(x*betadist, (x, 0, oo), meijerg=True, conds='separate')
assert (combsimp(i[0]), i[1]) == (alpha/(beta - 1), 1 < beta)
j = integrate(x**2*betadist, (x, 0, oo), meijerg=True, conds='separate')
assert j[1] == (1 < beta - 1)
assert combsimp(j[0] - i[0]**2) == (alpha + beta - 1)*alpha \
/(beta - 2)/(beta - 1)**2
# Beta distribution
# NOTE: this is evaluated using antiderivatives. It also tests that
# meijerint_indefinite returns the simplest possible answer.
a, b = symbols('a b', positive=True)
betadist = x**(a - 1)*(-x + 1)**(b - 1)*gamma(a + b)/(gamma(a)*gamma(b))
assert simplify(integrate(betadist, (x, 0, 1), meijerg=True)) == 1
assert simplify(integrate(x*betadist, (x, 0, 1), meijerg=True)) == \
a/(a + b)
assert simplify(integrate(x**2*betadist, (x, 0, 1), meijerg=True)) == \
a*(a + 1)/(a + b)/(a + b + 1)
assert simplify(integrate(x**y*betadist, (x, 0, 1), meijerg=True)) == \
gamma(a + b)*gamma(a + y)/gamma(a)/gamma(a + b + y)
# Chi distribution
k = Symbol('k', integer=True, positive=True)
chi = 2**(1 - k/2)*x**(k - 1)*exp(-x**2/2)/gamma(k/2)
assert powsimp(integrate(chi, (x, 0, oo), meijerg=True)) == 1
assert simplify(integrate(x*chi, (x, 0, oo), meijerg=True)) == \
sqrt(2)*gamma((k + 1)/2)/gamma(k/2)
assert simplify(integrate(x**2*chi, (x, 0, oo), meijerg=True)) == k
# Chi^2 distribution
chisquared = 2**(-k/2)/gamma(k/2)*x**(k/2 - 1)*exp(-x/2)
assert powsimp(integrate(chisquared, (x, 0, oo), meijerg=True)) == 1
assert simplify(integrate(x*chisquared, (x, 0, oo), meijerg=True)) == k
assert simplify(integrate(x**2*chisquared, (x, 0, oo), meijerg=True)) == \
k*(k + 2)
assert combsimp(integrate(((x - k)/sqrt(2*k))**3*chisquared, (x, 0, oo),
meijerg=True)) == 2*sqrt(2)/sqrt(k)
# Dagum distribution
a, b, p = symbols('a b p', positive=True)
# XXX (x/b)**a does not work
dagum = a*p/x*(x/b)**(a*p)/(1 + x**a/b**a)**(p + 1)
assert simplify(integrate(dagum, (x, 0, oo), meijerg=True)) == 1
# XXX conditions are a mess
arg = x*dagum
assert simplify(integrate(arg, (x, 0, oo), meijerg=True, conds='none')
) == a*b*gamma(1 - 1/a)*gamma(p + 1 + 1/a)/(
(a*p + 1)*gamma(p))
assert simplify(integrate(x*arg, (x, 0, oo), meijerg=True, conds='none')
) == a*b**2*gamma(1 - 2/a)*gamma(p + 1 + 2/a)/(
(a*p + 2)*gamma(p))
# F-distribution
d1, d2 = symbols('d1 d2', positive=True)
f = sqrt(((d1*x)**d1 * d2**d2)/(d1*x + d2)**(d1 + d2))/x \
/gamma(d1/2)/gamma(d2/2)*gamma((d1 + d2)/2)
assert simplify(integrate(f, (x, 0, oo), meijerg=True)) == 1
# TODO conditions are a mess
assert simplify(integrate(x*f, (x, 0, oo), meijerg=True, conds='none')
) == d2/(d2 - 2)
assert simplify(integrate(x**2*f, (x, 0, oo), meijerg=True, conds='none')
) == d2**2*(d1 + 2)/d1/(d2 - 4)/(d2 - 2)
# TODO gamma, rayleigh
# inverse gaussian
lamda, mu = symbols('lamda mu', positive=True)
dist = sqrt(lamda/2/pi)*x**(-S(3)/2)*exp(-lamda*(x - mu)**2/x/2/mu**2)
mysimp = lambda expr: simplify(expr.rewrite(exp))
assert mysimp(integrate(dist, (x, 0, oo))) == 1
assert mysimp(integrate(x*dist, (x, 0, oo))) == mu
assert mysimp(integrate((x - mu)**2*dist, (x, 0, oo))) == mu**3/lamda
assert mysimp(integrate((x - mu)**3*dist, (x, 0, oo))) == 3*mu**5/lamda**2
# Levi
c = Symbol('c', positive=True)
assert integrate(sqrt(c/2/pi)*exp(-c/2/(x - mu))/(x - mu)**S('3/2'),
(x, mu, oo)) == 1
# higher moments oo
# log-logistic
distn = (beta/alpha)*x**(beta - 1)/alpha**(beta - 1)/ \
(1 + x**beta/alpha**beta)**2
assert simplify(integrate(distn, (x, 0, oo))) == 1
# NOTE the conditions are a mess, but correctly state beta > 1
assert simplify(integrate(x*distn, (x, 0, oo), conds='none')) == \
pi*alpha/beta/sin(pi/beta)
# (similar comment for conditions applies)
assert simplify(integrate(x**y*distn, (x, 0, oo), conds='none')) == \
pi*alpha**y*y/beta/sin(pi*y/beta)
# weibull
k = Symbol('k', positive=True)
n = Symbol('n', positive=True)
distn = k/lamda*(x/lamda)**(k - 1)*exp(-(x/lamda)**k)
assert simplify(integrate(distn, (x, 0, oo))) == 1
assert simplify(integrate(x**n*distn, (x, 0, oo))) == \
lamda**n*gamma(1 + n/k)
# rice distribution
from sympy import besseli
nu, sigma = symbols('nu sigma', positive=True)
rice = x/sigma**2*exp(-(x**2 + nu**2)/2/sigma**2)*besseli(0, x*nu/sigma**2)
assert integrate(rice, (x, 0, oo), meijerg=True) == 1
# can someone verify higher moments?
# Laplace distribution
mu = Symbol('mu', real=True)
b = Symbol('b', positive=True)
laplace = exp(-abs(x - mu)/b)/2/b
assert integrate(laplace, (x, -oo, oo), meijerg=True) == 1
assert integrate(x*laplace, (x, -oo, oo), meijerg=True) == mu
assert integrate(x**2*laplace, (x, -oo, oo), meijerg=True) == \
2*b**2 + mu**2
# TODO are there other distributions supported on (-oo, oo) that we can do?
# misc tests
k = Symbol('k', positive=True)
assert combsimp(expand_mul(integrate(log(x)*x**(k - 1)*exp(-x)/gamma(k),
(x, 0, oo)))) == polygamma(0, k)
def test_expint():
""" Test various exponential integrals. """
from sympy import (expint, unpolarify, Symbol, Ci, Si, Shi, Chi,
sin, cos, sinh, cosh, Ei)
assert simplify(unpolarify(integrate(exp(-z*x)/x**y, (x, 1, oo),
meijerg=True, conds='none'
).rewrite(expint).expand(func=True))) == expint(y, z)
assert integrate(exp(-z*x)/x, (x, 1, oo), meijerg=True,
conds='none').rewrite(expint).expand() == \
expint(1, z)
assert integrate(exp(-z*x)/x**2, (x, 1, oo), meijerg=True,
conds='none').rewrite(expint).expand() == \
expint(2, z).rewrite(Ei).rewrite(expint)
assert integrate(exp(-z*x)/x**3, (x, 1, oo), meijerg=True,
conds='none').rewrite(expint).expand() == \
expint(3, z).rewrite(Ei).rewrite(expint).expand()
t = Symbol('t', positive=True)
assert integrate(-cos(x)/x, (x, t, oo), meijerg=True).expand() == Ci(t)
assert integrate(-sin(x)/x, (x, t, oo), meijerg=True).expand() == \
Si(t) - pi/2
assert integrate(sin(x)/x, (x, 0, z), meijerg=True) == Si(z)
assert integrate(sinh(x)/x, (x, 0, z), meijerg=True) == Shi(z)
assert integrate(exp(-x)/x, x, meijerg=True).expand().rewrite(expint) == \
I*pi - expint(1, x)
assert integrate(exp(-x)/x**2, x, meijerg=True).rewrite(expint).expand() \
== expint(1, x) - exp(-x)/x - I*pi
u = Symbol('u', polar=True)
assert integrate(cos(u)/u, u, meijerg=True).expand().as_independent(u)[1] \
== Ci(u)
assert integrate(cosh(u)/u, u, meijerg=True).expand().as_independent(u)[1] \
== Chi(u)
assert integrate(expint(1, x), x, meijerg=True
).rewrite(expint).expand() == x*expint(1, x) - exp(-x)
assert integrate(expint(2, x), x, meijerg=True
).rewrite(expint).expand() == \
-x**2*expint(1, x)/2 + x*exp(-x)/2 - exp(-x)/2
assert simplify(unpolarify(integrate(expint(y, x), x,
meijerg=True).rewrite(expint).expand(func=True))) == \
-expint(y + 1, x)
assert integrate(Si(x), x, meijerg=True) == x*Si(x) + cos(x)
assert integrate(Ci(u), u, meijerg=True).expand() == u*Ci(u) - sin(u)
assert integrate(Shi(x), x, meijerg=True) == x*Shi(x) - cosh(x)
assert integrate(Chi(u), u, meijerg=True).expand() == u*Chi(u) - sinh(u)
assert integrate(Si(x)*exp(-x), (x, 0, oo), meijerg=True) == pi/4
assert integrate(expint(1, x)*sin(x), (x, 0, oo), meijerg=True) == log(2)/2
def test_messy():
from sympy import (laplace_transform, Si, Shi, Chi, atan, Piecewise,
acoth, E1, besselj, acosh, asin, And, re,
fourier_transform, sqrt)
assert laplace_transform(Si(x), x, s) == ((-atan(s) + pi/2)/s, 0, True)
assert laplace_transform(Shi(x), x, s) == (acoth(s)/s, 1, True)
# where should the logs be simplified?
assert laplace_transform(Chi(x), x, s) == \
((log(s**(-2)) - log((s**2 - 1)/s**2))/(2*s), 1, True)
# TODO maybe simplify the inequalities?
assert laplace_transform(besselj(a, x), x, s)[1:] == \
(0, And(S(0) < re(a/2) + S(1)/2, S(0) < re(a/2) + 1))
# NOTE s < 0 can be done, but argument reduction is not good enough yet
assert fourier_transform(besselj(1, x)/x, x, s, noconds=False) == \
(Piecewise((0, 4*abs(pi**2*s**2) > 1),
(2*sqrt(-4*pi**2*s**2 + 1), True)), s > 0)
# TODO FT(besselj(0,x)) - conditions are messy (but for acceptable reasons)
# - folding could be better
assert integrate(E1(x)*besselj(0, x), (x, 0, oo), meijerg=True) == \
log(1 + sqrt(2))
assert integrate(E1(x)*besselj(1, x), (x, 0, oo), meijerg=True) == \
log(S(1)/2 + sqrt(2)/2)
assert integrate(1/x/sqrt(1 - x**2), x, meijerg=True) == \
Piecewise((-acosh(1/x), 1 < abs(x**(-2))), (I*asin(1/x), True))
def test_issue_6122():
assert integrate(exp(-I*x**2), (x, -oo, oo), meijerg=True) == \
-I*sqrt(pi)*exp(I*pi/4)
def test_issue_6252():
expr = 1/x/(a + b*x)**(S(1)/3)
anti = integrate(expr, x, meijerg=True)
assert not expr.has(hyper)
# XXX the expression is a mess, but actually upon differentiation and
# putting in numerical values seems to work...
def test_issue_6348():
assert integrate(exp(I*x)/(1 + x**2), (x, -oo, oo)).simplify().rewrite(exp) \
== pi*exp(-1)
def test_fresnel():
from sympy import fresnels, fresnelc
assert expand_func(integrate(sin(pi*x**2/2), x)) == fresnels(x)
assert expand_func(integrate(cos(pi*x**2/2), x)) == fresnelc(x)
def test_issue_6860():
assert meijerint_indefinite(x**x**x, x) is None
def test_issue_8368():
assert meijerint_indefinite(cosh(x)*exp(-x*t), x) == (
(-t - 1)*exp(x) + (-t + 1)*exp(-x))*exp(-t*x)/2/(t**2 - 1)
|
Shaswat27/sympy
|
sympy/integrals/tests/test_meijerint.py
|
Python
|
bsd-3-clause
| 27,490
|
[
"Gaussian"
] |
6c4b20f70647ccecdba784e9fff3cadeddb0678dc5e68704f9915c68a46e2465
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include("gin.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request),
url(r'^403/$', default_views.permission_denied),
url(r'^404/$', default_views.page_not_found),
url(r'^500/$', default_views.server_error),
]
|
adhoc434/growninnyc
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,273
|
[
"VisIt"
] |
d980ca505a51da62d2f8c6af2061db80d986e8c92c398e19ca4e4e2450132b32
|
#*************************************************************************
# Copyright (C) 2015 by Arash Bakhtiari
# You may not use this file except in compliance with the License.
# You obtain a copy of the License in the LICENSE file.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#*************************************************************************
#*************************************************************************
# In order to run this script, you should do the following in advance:
#
# 1- Make sure you have VisIt package on your machine; for LRZ Linux cluster,
# one should load the VisIt module:
#
# >>> module load visit
#
# 2- run the script on the machine by invoking visit
#
# >>> visit -cli -nowin -s vis.py -i<vtk-files-dir>
#
#
# IMPORTANT NOTE: make sure you are using the proper system on which Xlib is
# accessible by VisIt; this means you need to run the code on special nodes;
# namely Render Nodes. For instace, for linux cluster in LRZ one should should
# use the following command on the remote visualization nodes:
#
# >>> rvglrun visit -cli -nowin -s vis.py -i<vtk-files-dir>
#
# For more information, please refer to the LRZ user manual web-page:
#
# https://www.lrz.de/services/v2c_en/remote_visualisation_en/super_muc_users_en/
#*************************************************************************
############################################################################
# IMPORT SYSTEM LIBRARIES
############################################################################
import time
import sys
import os
############################################################################
# IMPORT LOCAL LIBRARIES
############################################################################
from visit import *
from vis_plot_utils import *
from vis_plot_slice import *
from vis_plot_porous import *
from vis_plot_taylor_green import *
from vis_plot_two_vortex_tube import *
############################################################################
# INPUT ARGUMENTS
############################################################################
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i', dest='input_dir', action='store')
args, unknown = parser.parse_known_args()
############################################################################
# SET THE TIME STRING
############################################################################
TIMESTR = time.strftime("%Y%m%d-%H%M%S")
############################################################################
# DATABASES
############################################################################
VTK_DIR = args.input_dir
IMAGE_DIR = VTK_DIR+"/images-"+TIMESTR
os.makedirs(IMAGE_DIR)
CON_VTK_FILES = VTK_DIR+"/"+"conc_T*_P.pvtu database"
CON_VTK_FILES1_0 = VTK_DIR+"/"+"conc01_T0000_P.pvtu"
CON_VTK_FILES2_0 = VTK_DIR+"/"+"conc02_T0000_P.pvtu"
CON_VTK_FILES3_0 = VTK_DIR+"/"+"conc03_T0000_P.pvtu"
CON_VTK_FILES1 = VTK_DIR+"/"+"conc01_T*_P.pvtu database"
CON_VTK_FILES2 = VTK_DIR+"/"+"conc02_T*_P.pvtu database"
CON_VTK_FILES3 = VTK_DIR+"/"+"conc03_T*_P.pvtu database"
RHO_VTK_FILES = VTK_DIR+"/"+"stokes_rho_0_.pvtu"
VEL_VTK_FILES = VTK_DIR+"/"+"stokes_vel_0_.pvtu"
VEL_VTK_FILES = VTK_DIR+"/"+"vel_T*_P.pvtu database"
VOR_VTK_FILES = VTK_DIR+"/"+"vort_T*_P.pvtu database"
## uncomment for taylor-green
#CON_VTK_FILES = VTK_DIR+"/"+"conc_T*_P.pvtu database"
#VEL_VTK_FILES = VTK_DIR+"/"+"velocity_T0000_P.pvtu"
############################################################################
# VISUALIZATION SCENARIOS
############################################################################
def vis_slice(vtk_files, output_dir):
OpenDatabase(vtk_files)
draw_slice()
save_images(output_dir)
def vis_porous(rho_vtk_files, vel_vtk_files, conc_vtk_files, output_dir):
OpenDatabase(rho_vtk_files, 0)
draw_porous_media_IV()
cut_porous_media()
OpenDatabase(vel_vtk_files, 1)
ActivateDatabase(vel_vtk_files)
draw_porous_velocity()
OpenDatabase(conc_vtk_files, 2)
ActivateDatabase(conc_vtk_files)
draw_concentration_field()
set_view()
save_images(output_dir)
def vis_porous_three_spheres(rho_vtk_files, vel_vtk_files, conc_vtk_files1, conc_vtk_files2, conc_vtk_files3, output_dir):
OpenDatabase(rho_vtk_files, 0)
draw_porous_media_IV()
cut_porous_media()
OpenDatabase(vel_vtk_files, 1)
ActivateDatabase(vel_vtk_files)
draw_porous_velocity()
OpenDatabase(conc_vtk_files1, 2)
ActivateDatabase(conc_vtk_files1)
draw_three_concentration_fields(2, 'b')
OpenDatabase(conc_vtk_files2, 3)
ActivateDatabase(conc_vtk_files2)
draw_three_concentration_fields(3, 'g')
OpenDatabase(conc_vtk_files3, 4)
ActivateDatabase(conc_vtk_files3)
draw_three_concentration_fields(4, 'y')
set_view()
save_images(output_dir)
def vis_porous_three_spheres_initial_camera_rotation(rho_vtk_files, vel_vtk_files, conc_vtk_files1, conc_vtk_files2, conc_vtk_files3, output_dir):
OpenDatabase(rho_vtk_files, 0)
SetActivePlots(0)
draw_porous_media_IV()
cut_porous_media()
SetActivePlots(1)
draw_porous_media_IV()
cut_porous_media(1)
translate_porous()
OpenDatabase(vel_vtk_files, 0)
ActivateDatabase(vel_vtk_files)
draw_porous_velocity(2,0)
OpenDatabase(vel_vtk_files, 0)
ActivateDatabase(vel_vtk_files)
draw_porous_velocity(3,1)
SetActivePlots(3)
translate_porous()
OpenDatabase(conc_vtk_files2, 0)
ActivateDatabase(conc_vtk_files1)
draw_three_concentration_fields(4, 'b')
OpenDatabase(conc_vtk_files2, 0)
ActivateDatabase(conc_vtk_files2)
draw_three_concentration_fields(5, 'g')
OpenDatabase(conc_vtk_files3, 0)
ActivateDatabase(conc_vtk_files3)
draw_three_concentration_fields(6, 'y')
change_view_and_save(output_dir)
ToggleLockViewMode()
ToggleMaintainViewMode()
translate_and_save(output_dir, 1, 3)
def vis_taylor_green(vel_vtk_files, conc_vtk_files, output_dir):
OpenDatabase(vel_vtk_files, 0)
draw_taylor_green_velocity(1,0)
# OpenDatabase(conc_vtk_files, 0)
# draw_taylor_green_concentration_field(1)
set_view(8*pi/12)
save_images(output_dir)
def vis_two_vortex_tubes(vor_vtk_files, output_dir):
OpenDatabase(vor_vtk_files, 0)
draw_two_vortex_vorticity(1,0);
# draw_taylor_green_velocity(1,0)
# set_view(8*pi/12)
save_images(output_dir)
############################################################################
# MAIN
############################################################################
if __name__ == '__main__':
########################################################################
# PLOTS
########################################################################
# vis_slice(CON_VTK_FILES, IMAGE_DIR)
#vis_porous(RHO_VTK_FILES, VEL_VTK_FILES, CON_VTK_FILES, IMAGE_DIR)
#vis_porous_three_spheres(RHO_VTK_FILES, VEL_VTK_FILES, CON_VTK_FILES1, CON_VTK_FILES2, CON_VTK_FILES3, IMAGE_DIR)
#vis_porous_three_spheres_initial_camera_rotation(RHO_VTK_FILES, VEL_VTK_FILES, CON_VTK_FILES1_0, CON_VTK_FILES2_0, CON_VTK_FILES3_0, IMAGE_DIR)
# vis_taylor_green(VEL_VTK_FILES ,CON_VTK_FILES, IMAGE_DIR)
vis_taylor_green(VOR_VTK_FILES ,CON_VTK_FILES, IMAGE_DIR)
# vis_two_vortex_tubes(VOR_VTK_FILES, IMAGE_DIR)
sys.exit()
|
arashb/tbslas
|
scripts/vis.py
|
Python
|
bsd-3-clause
| 7,860
|
[
"VTK",
"VisIt"
] |
6670eb8feba665f073df35645617d5ffb0922985630fe550a5f646e9e1dd09ff
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import argparse
import logging
import numpy as np
import torch
import pyro
import pyro.distributions as dist
from pyro.contrib.examples.bart import load_bart_od
from pyro.contrib.forecast import ForecastingModel, backtest
from pyro.ops.tensor_utils import periodic_cumsum, periodic_repeat
logging.getLogger("pyro").setLevel(logging.DEBUG)
logging.getLogger("pyro").handlers[0].setLevel(logging.DEBUG)
def preprocess(args):
"""
Extract a tensor of (arrivals,departures) to Embarcadero station.
"""
print("Loading data")
dataset = load_bart_od()
# The full dataset has all station->station ridership counts for all of 50
# train stations. In this simple example we will model only the aggretate
# counts to and from a single station, Embarcadero.
i = dataset["stations"].index("EMBR")
arrivals = dataset["counts"][:, :, i].sum(-1)
departures = dataset["counts"][:, i, :].sum(-1)
data = torch.stack([arrivals, departures], dim=-1)
# This simple example uses no covariates, so we will construct a
# zero-element tensor of the correct length as empty covariates.
covariates = torch.zeros(len(data), 0)
return data, covariates
# We define a model by subclassing the ForecastingModel class and implementing
# a single .model() method.
class Model(ForecastingModel):
# The .model() method inputs two tensors: a fake tensor zero_data that is
# the same size and dtype as the real data (but of course the generative
# model shouldn't depend on the value of the data it generates!), and a
# tensor of covariates. Our simple model depends on no covariates, so we
# simply pass in an empty tensor (see the preprocess() function above).
def model(self, zero_data, covariates):
period = 24 * 7
duration, dim = zero_data.shape[-2:]
assert dim == 2 # Data is bivariate: (arrivals, departures).
# Sample global parameters.
noise_scale = pyro.sample(
"noise_scale", dist.LogNormal(torch.full((dim,), -3.0), 1.0).to_event(1)
)
assert noise_scale.shape[-1:] == (dim,)
trans_timescale = pyro.sample(
"trans_timescale", dist.LogNormal(torch.zeros(dim), 1).to_event(1)
)
assert trans_timescale.shape[-1:] == (dim,)
trans_loc = pyro.sample("trans_loc", dist.Cauchy(0, 1 / period))
trans_loc = trans_loc.unsqueeze(-1).expand(trans_loc.shape + (dim,))
assert trans_loc.shape[-1:] == (dim,)
trans_scale = pyro.sample(
"trans_scale", dist.LogNormal(torch.zeros(dim), 0.1).to_event(1)
)
trans_corr = pyro.sample("trans_corr", dist.LKJCholesky(dim, torch.ones(())))
trans_scale_tril = trans_scale.unsqueeze(-1) * trans_corr
assert trans_scale_tril.shape[-2:] == (dim, dim)
obs_scale = pyro.sample(
"obs_scale", dist.LogNormal(torch.zeros(dim), 0.1).to_event(1)
)
obs_corr = pyro.sample("obs_corr", dist.LKJCholesky(dim, torch.ones(())))
obs_scale_tril = obs_scale.unsqueeze(-1) * obs_corr
assert obs_scale_tril.shape[-2:] == (dim, dim)
# Note the initial seasonality should be sampled in a plate with the
# same dim as the time_plate, dim=-1. That way we can repeat the dim
# below using periodic_repeat().
with pyro.plate("season_plate", period, dim=-1):
season_init = pyro.sample(
"season_init", dist.Normal(torch.zeros(dim), 1).to_event(1)
)
assert season_init.shape[-2:] == (period, dim)
# Sample independent noise at each time step.
with self.time_plate:
season_noise = pyro.sample(
"season_noise", dist.Normal(0, noise_scale).to_event(1)
)
assert season_noise.shape[-2:] == (duration, dim)
# Construct a prediction. This prediction has an exactly repeated
# seasonal part plus slow seasonal drift. We use two deterministic,
# linear functions to transform our diagonal Normal noise to nontrivial
# samples from a Gaussian process.
prediction = periodic_repeat(season_init, duration, dim=-2) + periodic_cumsum(
season_noise, period, dim=-2
)
assert prediction.shape[-2:] == (duration, dim)
# Construct a joint noise model. This model is a GaussianHMM, whose
# .rsample() and .log_prob() methods are parallelized over time; this
# this entire model is parallelized over time.
init_dist = dist.Normal(torch.zeros(dim), 100).to_event(1)
trans_mat = trans_timescale.neg().exp().diag_embed()
trans_dist = dist.MultivariateNormal(trans_loc, scale_tril=trans_scale_tril)
obs_mat = torch.eye(dim)
obs_dist = dist.MultivariateNormal(torch.zeros(dim), scale_tril=obs_scale_tril)
noise_model = dist.GaussianHMM(
init_dist, trans_mat, trans_dist, obs_mat, obs_dist, duration=duration
)
assert noise_model.event_shape == (duration, dim)
# The final statement registers our noise model and prediction.
self.predict(noise_model, prediction)
def main(args):
data, covariates = preprocess(args)
# We will model positive count data by log1p-transforming it into real
# valued data. But since we want to evaluate back in the count domain, we
# will also define a transform to apply during evaluation, transforming
# from real back to count-valued data. Truth is mapped by the log1p()
# inverse expm1(), but the prediction will be sampled from a Poisson
# distribution.
data = data.log1p()
def transform(pred, truth):
pred = torch.poisson(pred.clamp(min=1e-4).expm1())
truth = truth.expm1()
return pred, truth
# The backtest() function automatically trains and evaluates our model on
# different windows of data.
forecaster_options = {
"num_steps": args.num_steps,
"learning_rate": args.learning_rate,
"log_every": args.log_every,
"dct_gradients": args.dct,
}
metrics = backtest(
data,
covariates,
Model,
train_window=args.train_window,
test_window=args.test_window,
stride=args.stride,
num_samples=args.num_samples,
forecaster_options=forecaster_options,
)
for name in ["mae", "rmse", "crps"]:
values = [m[name] for m in metrics]
mean = np.mean(values)
std = np.std(values)
print("{} = {:0.3g} +- {:0.3g}".format(name, mean, std))
return metrics
if __name__ == "__main__":
assert pyro.__version__.startswith("1.7.0")
parser = argparse.ArgumentParser(description="Bart Ridership Forecasting Example")
parser.add_argument("--train-window", default=2160, type=int)
parser.add_argument("--test-window", default=336, type=int)
parser.add_argument("--stride", default=168, type=int)
parser.add_argument("-n", "--num-steps", default=501, type=int)
parser.add_argument("-lr", "--learning-rate", default=0.05, type=float)
parser.add_argument("--dct", action="store_true")
parser.add_argument("--num-samples", default=100, type=int)
parser.add_argument("--log-every", default=50, type=int)
parser.add_argument("--seed", default=1234567890, type=int)
args = parser.parse_args()
main(args)
|
uber/pyro
|
examples/contrib/forecast/bart.py
|
Python
|
apache-2.0
| 7,475
|
[
"Gaussian"
] |
54b510c874eed8dfa3921aa3e0632681bd7c9784937514358b59d25ccbaea128
|
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
import urllib
import cgi
from invenio.config import \
CFG_CERN_SITE, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_SITE_NAME_INTL, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_SECURE_URL, \
CFG_SITE_URL, \
CFG_WEBSESSION_RESET_PASSWORD_EXPIRE_IN_DAYS, \
CFG_WEBSESSION_ADDRESS_ACTIVATION_EXPIRE_IN_DAYS, \
CFG_WEBSESSION_DIFFERENTIATE_BETWEEN_GUESTS, \
CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS, \
CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS, \
CFG_SITE_RECORD
from invenio.access_control_config import CFG_EXTERNAL_AUTH_USING_SSO, \
CFG_EXTERNAL_AUTH_LOGOUT_SSO
from invenio.urlutils import make_canonical_urlargd, create_url, create_html_link
from invenio.htmlutils import escape_html, nmtoken_from_string
from invenio.messages import gettext_set_language, language_list_long
from invenio.websession_config import CFG_WEBSESSION_GROUP_JOIN_POLICY
class Template:
def tmpl_back_form(self, ln, message, url, link):
"""
A standard one-message-go-back-link page.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'message' *string* - The message to display
- 'url' *string* - The url to go back to
- 'link' *string* - The link text
"""
out = """
<table>
<tr>
<td align="left">%(message)s
<a href="%(url)s">%(link)s</a></td>
</tr>
</table>
"""% {
'message' : message,
'url' : url,
'link' : link,
'ln' : ln
}
return out
def tmpl_external_setting(self, ln, key, value):
_ = gettext_set_language(ln)
out = """
<tr>
<td align="right"><strong>%s:</strong></td>
<td><i>%s</i></td>
</tr>""" % (key, value)
return out
def tmpl_external_user_settings(self, ln, html_settings):
_ = gettext_set_language(ln)
out = """
<p><big><strong class="headline">%(external_user_settings)s</strong></big></p>
<table>
%(html_settings)s
</table>
<p><big><strong class="headline">%(external_user_groups)s</strong></big></p>
<p>%(consult_external_groups)s</p>
""" % {
'external_user_settings' : _('External account settings'),
'html_settings' : html_settings,
'consult_external_groups' : _('You can consult the list of your external groups directly in the %(x_url_open)sgroups page%(x_url_close)s.') % {
'x_url_open' : '<a href="../yourgroups/display?ln=%s#external_groups">' % ln,
'x_url_close' : '</a>'
},
'external_user_groups' : _('External user groups'),
}
return out
def tmpl_user_preferences(self, ln, email, email_disabled, password_disabled, nickname):
"""
Displays a form for the user to change his email/password.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'email' *string* - The email of the user
- 'email_disabled' *boolean* - If the user has the right to edit his email
- 'password_disabled' *boolean* - If the user has the right to edit his password
- 'nickname' *string* - The nickname of the user (empty string if user does not have it)
"""
# load the right message language
_ = gettext_set_language(ln)
out = """
<p><big><strong class="headline">%(edit_params)s</strong></big></p>
<form method="post" action="%(sitesecureurl)s/youraccount/change" name="edit_logins_settings">
<p>%(change_user)s</p>
<table>
<tr><td align="right" valign="top"><strong>
<label for="nickname">%(nickname_label)s:</label></strong><br />
<small class="important">(%(mandatory)s)</small>
</td><td valign="top">
%(nickname_prefix)s%(nickname)s%(nickname_suffix)s<br />
<small><span class="quicknote">%(note)s:</span>
%(fixed_nickname_note)s
</small>
</td>
</tr>
<tr><td align="right"><strong>
<label for="email">%(new_email)s:</label></strong><br />
<small class="important">(%(mandatory)s)</small>
</td><td>
<input type="text" size="25" name="email" id="email" %(email_disabled)s value="%(email)s" /><br />
<small><span class="quicknote">%(example)s:</span>
<span class="example">john.doe@example.com</span>
</small>
</td>
</tr>
<tr><td></td><td align="left">
<code class="blocknote"><input class="formbutton" type="submit" value="%(set_values)s" /></code>
</td></tr>
</table>
<input type="hidden" name="action" value="edit" />
</form>
""" % {
'change_user' : _("If you want to change your email or set for the first time your nickname, please set new values in the form below."),
'edit_params' : _("Edit login credentials"),
'nickname_label' : _("Nickname"),
'nickname' : nickname,
'nickname_prefix' : nickname=='' and '<input type="text" size="25" name="nickname" id="nickname" value=""' or '',
'nickname_suffix' : nickname=='' and '" /><br /><small><span class="quicknote">'+_("Example")+':</span><span class="example">johnd</span></small>' or '',
'new_email' : _("New email address"),
'mandatory' : _("mandatory"),
'example' : _("Example"),
'note' : _("Note"),
'set_values' : _("Set new values"),
'email' : email,
'email_disabled' : email_disabled and "readonly" or "",
'sitesecureurl': CFG_SITE_SECURE_URL,
'fixed_nickname_note' : _('Since this is considered as a signature for comments and reviews, once set it can not be changed.')
}
if not password_disabled and not CFG_EXTERNAL_AUTH_USING_SSO:
out += """
<form method="post" action="%(sitesecureurl)s/youraccount/change" name="edit_password">
<p>%(change_pass)s</p>
<table>
<tr>
<td align="right"><strong><label for="old_password">%(old_password)s:</label></strong><br />
</td><td align="left">
<input type="password" size="25" name="old_password" id="old_password" %(password_disabled)s /><br />
<small><span class="quicknote">%(note)s:</span>
%(old_password_note)s
</small>
</td>
</tr>
<tr>
<td align="right"><strong><label for="new_password">%(new_password)s:</label></strong><br />
</td><td align="left">
<input type="password" size="25" name="password" id="new_password" %(password_disabled)s /><br />
<small><span class="quicknote">%(note)s:</span>
%(password_note)s
</small>
</td>
</tr>
<tr>
<td align="right"><strong><label for="new_password2">%(retype_password)s:</label></strong></td>
<td align="left">
<input type="password" size="25" name="password2" id="new_password2" %(password_disabled)s value="" />
</td>
</tr>
<tr><td></td><td align="left">
<code class="blocknote"><input class="formbutton" type="submit" value="%(set_values)s" /></code>
</td></tr>
</table>
<input type="hidden" name="action" value="edit" />
</form>
""" % {
'change_pass' : _("If you want to change your password, please enter the old one and set the new value in the form below."),
'mandatory' : _("mandatory"),
'old_password' : _("Old password"),
'new_password' : _("New password"),
'optional' : _("optional"),
'note' : _("Note"),
'password_note' : _("The password phrase may contain punctuation, spaces, etc."),
'old_password_note' : _("You must fill the old password in order to set a new one."),
'retype_password' : _("Retype password"),
'set_values' : _("Set new password"),
'password_disabled' : password_disabled and "disabled" or "",
'sitesecureurl': CFG_SITE_SECURE_URL,
}
elif not CFG_EXTERNAL_AUTH_USING_SSO and CFG_CERN_SITE:
out += "<p>" + _("""If you are using a lightweight CERN account you can
%(x_url_open)sreset the password%(x_url_close)s.""") % \
{'x_url_open' : \
'<a href="http://cern.ch/LightweightRegistration/ResetPassword.aspx%s">' \
% (make_canonical_urlargd({'email': email, 'returnurl' : CFG_SITE_SECURE_URL + '/youraccount/edit' + make_canonical_urlargd({'lang' : ln}, {})}, {})), 'x_url_close' : '</a>'} + "</p>"
elif CFG_EXTERNAL_AUTH_USING_SSO and CFG_CERN_SITE:
out += "<p>" + _("""You can change or reset your CERN account password by means of the %(x_url_open)sCERN account system%(x_url_close)s.""") % \
{'x_url_open' : '<a href="https://cern.ch/login/password.aspx">', 'x_url_close' : '</a>'} + "</p>"
return out
def tmpl_user_bibcatalog_auth(self, bibcatalog_username="", bibcatalog_password="", ln=CFG_SITE_LANG):
"""template for setting username and pw for bibcatalog backend"""
_ = gettext_set_language(ln)
out = """
<form method="post" action="%(sitesecureurl)s/youraccount/change" name="edit_bibcatalog_settings">
<p><big><strong class="headline">%(edit_bibcatalog_settings)s</strong></big></p>
<table>
<tr>
<td> %(username)s: <input type="text" size="25" name="bibcatalog_username" value="%(bibcatalog_username)s" id="bibcatuid"></td>
<td> %(password)s: <input type="password" size="25" name="bibcatalog_password" value="%(bibcatalog_password)s" id="bibcatpw"></td>
</tr>
<tr>
<td><input class="formbutton" type="submit" value="%(update_settings)s" /></td>
</tr>
</table>
""" % {
'sitesecureurl' : CFG_SITE_SECURE_URL,
'bibcatalog_username' : bibcatalog_username,
'bibcatalog_password' : bibcatalog_password,
'edit_bibcatalog_settings' : _("Edit cataloging interface settings"),
'username' : _("Username"),
'password' : _("Password"),
'update_settings' : _('Update settings')
}
return out
def tmpl_user_lang_edit(self, ln, preferred_lang):
_ = gettext_set_language(ln)
out = """
<form method="post" action="%(sitesecureurl)s/youraccount/change" name="edit_lang_settings">
<p><big><strong class="headline">%(edit_lang_settings)s</strong></big></p>
<table>
<tr><td align="right"><select name="lang" id="lang">
""" % {
'sitesecureurl' : CFG_SITE_SECURE_URL,
'edit_lang_settings' : _("Edit language-related settings"),
}
for short_ln, long_ln in language_list_long():
out += """<option %(selected)s value="%(short_ln)s">%(long_ln)s</option>""" % {
'selected' : preferred_lang == short_ln and 'selected="selected"' or '',
'short_ln' : short_ln,
'long_ln' : escape_html(long_ln)
}
out += """</select></td><td valign="top"><strong><label for="lang">%(select_lang)s</label></strong></td></tr>
<tr><td></td><td><input class="formbutton" type="submit" value="%(update_settings)s" /></td></tr>
</table></form>""" % {
'select_lang' : _('Select desired language of the web interface.'),
'update_settings' : _('Update settings')
}
return out
def tmpl_user_websearch_edit(self, ln, current = 10, show_latestbox = True, show_helpbox = True):
_ = gettext_set_language(ln)
out = """
<form method="post" action="%(sitesecureurl)s/youraccount/change" name="edit_websearch_settings">
<p><big><strong class="headline">%(edit_websearch_settings)s</strong></big></p>
<table>
<tr><td align="right"><input type="checkbox" %(checked_latestbox)s value="1" name="latestbox" id="latestbox"/></td>
<td valign="top"><b><label for="latestbox">%(show_latestbox)s</label></b></td></tr>
<tr><td align="right"><input type="checkbox" %(checked_helpbox)s value="1" name="helpbox" id="helpbox"/></td>
<td valign="top"><b><label for="helpbox">%(show_helpbox)s</label></b></td></tr>
<tr><td align="right"><select name="group_records" id="group_records">
""" % {
'sitesecureurl' : CFG_SITE_SECURE_URL,
'edit_websearch_settings' : _("Edit search-related settings"),
'show_latestbox' : _("Show the latest additions box"),
'checked_latestbox' : show_latestbox and 'checked="checked"' or '',
'show_helpbox' : _("Show collection help boxes"),
'checked_helpbox' : show_helpbox and 'checked="checked"' or '',
}
for i in 10, 25, 50, 100, 250, 500:
if i <= CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS:
out += """<option %(selected)s>%(i)s</option>
""" % {
'selected' : current == i and 'selected="selected"' or '',
'i' : i
}
out += """</select></td><td valign="top"><strong><label for="group_records">%(select_group_records)s</label></strong></td></tr>
<tr><td></td><td><input class="formbutton" type="submit" value="%(update_settings)s" /></td></tr>
</table>
</form>""" % {
'update_settings' : _("Update settings"),
'select_group_records' : _("Number of search results per page"),
}
return out
def tmpl_user_external_auth(self, ln, methods, current, method_disabled):
"""
Displays a form for the user to change his authentication method.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'methods' *array* - The methods of authentication
- 'method_disabled' *boolean* - If the user has the right to change this
- 'current' *string* - The currently selected method
"""
# load the right message language
_ = gettext_set_language(ln)
out = """
<form method="post" action="%(sitesecureurl)s/youraccount/change">
<big><strong class="headline">%(edit_method)s</strong></big>
<p>%(explain_method)s:</p>
<table>
<tr><td valign="top"><b>%(select_method)s:</b></td><td>
""" % {
'edit_method' : _("Edit login method"),
'explain_method' : _("Please select which login method you would like to use to authenticate yourself"),
'select_method' : _("Select method"),
'sitesecureurl': CFG_SITE_SECURE_URL,
}
for system in methods:
out += """<input type="radio" name="login_method" value="%(system)s" id="%(id)s" %(disabled)s %(selected)s /><label for="%(id)s">%(system)s</label><br />""" % {
'system' : system,
'disabled' : method_disabled and 'disabled="disabled"' or "",
'selected' : current == system and 'checked="checked"' or "",
'id' : nmtoken_from_string(system),
}
out += """ </td></tr>
<tr><td> </td>
<td><input class="formbutton" type="submit" value="%(select_method)s" /></td></tr></table>
</form>""" % {
'select_method' : _("Select method"),
}
return out
def tmpl_lost_password_form(self, ln):
"""
Displays a form for the user to ask for his password sent by email.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'msg' *string* - Explicative message on top of the form.
"""
# load the right message language
_ = gettext_set_language(ln)
out = "<p>" + _("If you have lost the password for your %(sitename)s %(x_fmt_open)sinternal account%(x_fmt_close)s, then please enter your email address in the following form in order to have a password reset link emailed to you.") % {'x_fmt_open' : '<em>', 'x_fmt_close' : '</em>', 'sitename' : CFG_SITE_NAME_INTL[ln]} + "</p>"
out += """
<blockquote>
<form method="post" action="../youraccount/send_email">
<table>
<tr>
<td align="right"><strong><label for="p_email">%(email)s:</label></strong></td>
<td><input type="text" size="25" name="p_email" id="p_email" value="" />
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="action" value="lost" />
</td>
</tr>
<tr><td> </td>
<td><code class="blocknote"><input class="formbutton" type="submit" value="%(send)s" /></code></td>
</tr>
</table>
</form>
</blockquote>
""" % {
'ln': ln,
'email' : _("Email address"),
'send' : _("Send password reset link"),
}
if CFG_CERN_SITE:
out += "<p>" + _("If you have been using the %(x_fmt_open)sCERN login system%(x_fmt_close)s, then you can recover your password through the %(x_url_open)sCERN authentication system%(x_url_close)s.") % {'x_fmt_open' : '<em>', 'x_fmt_close' : '</em>', 'x_url_open' : '<a href="https://cern.ch/lightweightregistration/ResetPassword.aspx%s">' \
% make_canonical_urlargd({'lf': 'auth', 'returnURL' : CFG_SITE_SECURE_URL + '/youraccount/login?ln='+ln}, {}), 'x_url_close' : '</a>'} + " "
else:
out += "<p>" + _("Note that if you have been using an external login system, then we cannot do anything and you have to ask there.") + " "
out += _("Alternatively, you can ask %s to change your login system from external to internal.") % ("""<a href="mailto:%(email)s">%(email)s</a>""" % { 'email' : CFG_SITE_SUPPORT_EMAIL }) + "</p>"
return out
def tmpl_account_info(self, ln, uid, guest, CFG_CERN_SITE):
"""
Displays the account information
Parameters:
- 'ln' *string* - The language to display the interface in
- 'uid' *string* - The user id
- 'guest' *boolean* - If the user is guest
- 'CFG_CERN_SITE' *boolean* - If the site is a CERN site
"""
# load the right message language
_ = gettext_set_language(ln)
out = """<p>%(account_offer)s</p>
<blockquote>
<dl>
""" % {
'account_offer' : _("%s offers you the possibility to personalize the interface, to set up your own personal library of documents, or to set up an automatic alert query that would run periodically and would notify you of search results by email.") % CFG_SITE_NAME_INTL[ln],
}
if not guest:
out += """
<dt>
<a href="./edit?ln=%(ln)s">%(your_settings)s</a>
</dt>
<dd>%(change_account)s</dd>""" % {
'ln' : ln,
'your_settings' : _("Your Settings"),
'change_account' : _("Set or change your account email address or password. Specify your preferences about the look and feel of the interface.")
}
out += """
<dt><a href="../youralerts/display?ln=%(ln)s">%(your_searches)s</a></dt>
<dd>%(search_explain)s</dd>""" % {
'ln' : ln,
'your_searches' : _("Your Searches"),
'search_explain' : _("View all the searches you performed during the last 30 days."),
}
out += """
<dt><a href="../yourbaskets/display?ln=%(ln)s">%(your_baskets)s</a></dt>
<dd>%(basket_explain)s""" % {
'ln' : ln,
'your_baskets' : _("Your Baskets"),
'basket_explain' : _("With baskets you can define specific collections of items, store interesting records you want to access later or share with others."),
}
if guest and CFG_WEBSESSION_DIFFERENTIATE_BETWEEN_GUESTS:
out += self.tmpl_warning_guest_user(ln = ln, type = "baskets")
out += """</dd>
<dt><a href="../youralerts/list?ln=%(ln)s">%(your_alerts)s</a></dt>
<dd>%(explain_alerts)s""" % {
'ln' : ln,
'your_alerts' : _("Your Alerts"),
'explain_alerts' : _("Subscribe to a search which will be run periodically by our service. The result can be sent to you via Email or stored in one of your baskets."),
}
if guest and CFG_WEBSESSION_DIFFERENTIATE_BETWEEN_GUESTS:
out += self.tmpl_warning_guest_user(type="alerts", ln = ln)
out += "</dd>"
if CFG_CERN_SITE:
out += """</dd>
<dt><a href="%(CFG_SITE_SECURE_URL)s/yourloans/display?ln=%(ln)s">%(your_loans)s</a></dt>
<dd>%(explain_loans)s</dd>""" % {
'your_loans' : _("Your Loans"),
'explain_loans' : _("Check out book you have on loan, submit borrowing requests, etc. Requires CERN ID."),
'ln': ln,
'CFG_SITE_SECURE_URL': CFG_SITE_SECURE_URL
}
out += """
</dl>
</blockquote>"""
return out
def tmpl_warning_guest_user(self, ln, type):
"""
Displays a warning message about the specified type
Parameters:
- 'ln' *string* - The language to display the interface in
- 'type' *string* - The type of data that will get lost in case of guest account (for the moment: 'alerts' or 'baskets')
"""
# load the right message language
_ = gettext_set_language(ln)
if (type=='baskets'):
msg = _("You are logged in as a guest user, so your baskets will disappear at the end of the current session.") + ' '
elif (type=='alerts'):
msg = _("You are logged in as a guest user, so your alerts will disappear at the end of the current session.") + ' '
msg += _("If you wish you can %(x_url_open)slogin or register here%(x_url_close)s.") % {'x_url_open': '<a href="' + CFG_SITE_SECURE_URL + '/youraccount/login?ln=' + ln + '">',
'x_url_close': '</a>'}
return """<table class="errorbox" summary="">
<tr>
<th class="errorboxheader">%s</th>
</tr>
</table>""" % msg
def tmpl_account_body(self, ln, user):
"""
Displays the body of the actions of the user
Parameters:
- 'ln' *string* - The language to display the interface in
- 'user' *string* - The username (nickname or email)
"""
# load the right message language
_ = gettext_set_language(ln)
out = _("You are logged in as %(x_user)s. You may want to a) %(x_url1_open)slogout%(x_url1_close)s; b) edit your %(x_url2_open)saccount settings%(x_url2_close)s.") %\
{'x_user': user,
'x_url1_open': '<a href="' + CFG_SITE_SECURE_URL + '/youraccount/logout?ln=' + ln + '">',
'x_url1_close': '</a>',
'x_url2_open': '<a href="' + CFG_SITE_SECURE_URL + '/youraccount/edit?ln=' + ln + '">',
'x_url2_close': '</a>',
}
return out + "<br /><br />"
def tmpl_account_template(self, title, body, ln, url):
"""
Displays a block of the your account page
Parameters:
- 'ln' *string* - The language to display the interface in
- 'title' *string* - The title of the block
- 'body' *string* - The body of the block
- 'url' *string* - The URL to go to the proper section
"""
out ="""
<table class="youraccountbox" width="90%%" summary="" >
<tr>
<th class="youraccountheader"><a href="%s">%s</a></th>
</tr>
<tr>
<td class="youraccountbody">%s</td>
</tr>
</table>""" % (url, title, body)
return out
def tmpl_account_page(self, ln, warnings, warning_list, accBody, baskets, alerts, searches, messages, loans, groups, submissions, approvals, tickets, administrative):
"""
Displays the your account page
Parameters:
- 'ln' *string* - The language to display the interface in
- 'accBody' *string* - The body of the heading block
- 'baskets' *string* - The body of the baskets block
- 'alerts' *string* - The body of the alerts block
- 'searches' *string* - The body of the searches block
- 'messages' *string* - The body of the messages block
- 'groups' *string* - The body of the groups block
- 'submissions' *string* - The body of the submission block
- 'approvals' *string* - The body of the approvals block
- 'administrative' *string* - The body of the administrative block
"""
# load the right message language
_ = gettext_set_language(ln)
out = ""
if warnings == "1":
out += self.tmpl_general_warnings(warning_list)
out += self.tmpl_account_template(_("Your Account"), accBody, ln, '/youraccount/edit?ln=%s' % ln)
if messages:
out += self.tmpl_account_template(_("Your Messages"), messages, ln, '/yourmessages/display?ln=%s' % ln)
if loans:
out += self.tmpl_account_template(_("Your Loans"), loans, ln, '/yourloans/display?ln=%s' % ln)
if baskets:
out += self.tmpl_account_template(_("Your Baskets"), baskets, ln, '/yourbaskets/display?ln=%s' % ln)
if alerts:
out += self.tmpl_account_template(_("Your Alert Searches"), alerts, ln, '/youralerts/list?ln=%s' % ln)
if searches:
out += self.tmpl_account_template(_("Your Searches"), searches, ln, '/youralerts/display?ln=%s' % ln)
if groups:
groups_description = _("You can consult the list of %(x_url_open)syour groups%(x_url_close)s you are administering or are a member of.")
groups_description %= {'x_url_open': '<a href="' + CFG_SITE_URL + '/yourgroups/display?ln=' + ln + '">',
'x_url_close': '</a>'}
out += self.tmpl_account_template(_("Your Groups"), groups_description, ln, '/yourgroups/display?ln=%s' % ln)
if submissions:
submission_description = _("You can consult the list of %(x_url_open)syour submissions%(x_url_close)s and inquire about their status.")
submission_description %= {'x_url_open': '<a href="' + CFG_SITE_URL + '/yoursubmissions.py?ln=' + ln + '">',
'x_url_close': '</a>'}
out += self.tmpl_account_template(_("Your Submissions"), submission_description, ln, '/yoursubmissions.py?ln=%s' % ln)
if approvals:
approval_description = _("You can consult the list of %(x_url_open)syour approvals%(x_url_close)s with the documents you approved or refereed.")
approval_description %= {'x_url_open': '<a href="' + CFG_SITE_URL + '/yourapprovals.py?ln=' + ln + '">',
'x_url_close': '</a>'}
out += self.tmpl_account_template(_("Your Approvals"), approval_description, ln, '/yourapprovals.py?ln=%s' % ln)
#check if this user might have tickets
if tickets:
ticket_description = _("You can consult the list of %(x_url_open)syour tickets%(x_url_close)s.")
ticket_description %= {'x_url_open': '<a href="' + CFG_SITE_URL + '/yourtickets?ln=' + ln + '">',
'x_url_close': '</a>'}
out += self.tmpl_account_template(_("Your Tickets"), ticket_description, ln, '/yourtickets?ln=%s' % ln)
if administrative:
out += self.tmpl_account_template(_("Your Administrative Activities"), administrative, ln, '/admin')
return out
def tmpl_account_emailMessage(self, ln, msg):
"""
Displays a link to retrieve the lost password
Parameters:
- 'ln' *string* - The language to display the interface in
- 'msg' *string* - Explicative message on top of the form.
"""
# load the right message language
_ = gettext_set_language(ln)
out =""
out +="""
<body>
%(msg)s <a href="../youraccount/lost?ln=%(ln)s">%(try_again)s</a>
</body>
""" % {
'ln' : ln,
'msg' : msg,
'try_again' : _("Try again")
}
return out
def tmpl_account_reset_password_email_body(self, email, reset_key, ip_address, ln=CFG_SITE_LANG):
"""
The body of the email that sends lost internal account
passwords to users.
"""
_ = gettext_set_language(ln)
out = """
%(intro)s
%(intro2)s
<%(link)s>
%(outro)s
%(outro2)s""" % {
'intro': _("Somebody (possibly you) coming from %(x_ip_address)s "
"has asked\nfor a password reset at %(x_sitename)s\nfor "
"the account \"%(x_email)s\"." % {
'x_sitename' :CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME),
'x_email' : email,
'x_ip_address' : ip_address,
}
),
'intro2' : _("If you want to reset the password for this account, please go to:"),
'link' : "%s/youraccount/access%s" %
(CFG_SITE_SECURE_URL, make_canonical_urlargd({
'ln' : ln,
'mailcookie' : reset_key
}, {})),
'outro' : _("in order to confirm the validity of this request."),
'outro2' : _("Please note that this URL will remain valid for about %(days)s days only.") % {'days': CFG_WEBSESSION_RESET_PASSWORD_EXPIRE_IN_DAYS},
}
return out
def tmpl_account_address_activation_email_body(self, email, address_activation_key, ip_address, ln=CFG_SITE_LANG):
"""
The body of the email that sends email address activation cookie
passwords to users.
"""
_ = gettext_set_language(ln)
out = """
%(intro)s
%(intro2)s
<%(link)s>
%(outro)s
%(outro2)s""" % {
'intro': _("Somebody (possibly you) coming from %(x_ip_address)s "
"has asked\nto register a new account at %(x_sitename)s\nfor the "
"email address \"%(x_email)s\"." % {
'x_sitename' :CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME),
'x_email' : email,
'x_ip_address' : ip_address,
}
),
'intro2' : _("If you want to complete this account registration, please go to:"),
'link' : "%s/youraccount/access%s" %
(CFG_SITE_SECURE_URL, make_canonical_urlargd({
'ln' : ln,
'mailcookie' : address_activation_key
}, {})),
'outro' : _("in order to confirm the validity of this request."),
'outro2' : _("Please note that this URL will remain valid for about %(days)s days only.") % {'days' : CFG_WEBSESSION_ADDRESS_ACTIVATION_EXPIRE_IN_DAYS},
}
return out
def tmpl_account_emailSent(self, ln, email):
"""
Displays a confirmation message for an email sent
Parameters:
- 'ln' *string* - The language to display the interface in
- 'email' *string* - The email to which the message has been sent
"""
# load the right message language
_ = gettext_set_language(ln)
out =""
out += _("Okay, a password reset link has been emailed to %s.") % email
return out
def tmpl_account_delete(self, ln):
"""
Displays a confirmation message about deleting the account
Parameters:
- 'ln' *string* - The language to display the interface in
"""
# load the right message language
_ = gettext_set_language(ln)
out = "<p>" + _("""Deleting your account""") + '</p>'
return out
def tmpl_account_logout(self, ln):
"""
Displays a confirmation message about logging out
Parameters:
- 'ln' *string* - The language to display the interface in
"""
# load the right message language
_ = gettext_set_language(ln)
out = _("You are no longer recognized by our system.") + ' '
if CFG_EXTERNAL_AUTH_USING_SSO and CFG_EXTERNAL_AUTH_LOGOUT_SSO:
out += _("""You are still recognized by the centralized
%(x_fmt_open)sSSO%(x_fmt_close)s system. You can
%(x_url_open)slogout from SSO%(x_url_close)s, too.""") % \
{'x_fmt_open' : '<strong>', 'x_fmt_close' : '</strong>',
'x_url_open' : '<a href="%s">' % CFG_EXTERNAL_AUTH_LOGOUT_SSO,
'x_url_close' : '</a>'}
out += '<br />'
out += _("If you wish you can %(x_url_open)slogin here%(x_url_close)s.") % \
{'x_url_open': '<a href="./login?ln=' + ln + '">',
'x_url_close': '</a>'}
return out
def tmpl_login_form(self, ln, referer, internal, register_available, methods, selected_method, msg=None):
"""
Displays a login form
Parameters:
- 'ln' *string* - The language to display the interface in
- 'referer' *string* - The referer URL - will be redirected upon after login
- 'internal' *boolean* - If we are producing an internal authentication
- 'register_available' *boolean* - If users can register freely in the system
- 'methods' *array* - The available authentication methods
- 'selected_method' *string* - The default authentication method
- 'msg' *string* - The message to print before the form, if needed
"""
# load the right message language
_ = gettext_set_language(ln)
if msg is "":
out = "<p>%(please_login)s</p>" % {
'please_login' : cgi.escape(_("If you already have an account, please login using the form below."))
}
if CFG_CERN_SITE:
out += "<p>" + _("If you don't own a CERN account yet, you can register a %(x_url_open)snew CERN lightweight account%(x_url_close)s.") % {'x_url_open' : '<a href="https://www.cern.ch/lightweightregistration/RegisterAccount.aspx">', 'x_url_close' : '</a>'} + "</p>"
else:
if register_available:
out += "<p>"+_("If you don't own an account yet, please %(x_url_open)sregister%(x_url_close)s an internal account.") %\
{'x_url_open': '<a href="../youraccount/register?ln=' + ln + '">',
'x_url_close': '</a>'} + "</p>"
else:
# users cannot register accounts, so advise them
# how to get one, or be silent about register
# facility if account level is more than 4:
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS < 5:
out += "<p>" + _("If you don't own an account yet, please contact %s.") % ('<a href="mailto:%s">%s</a>' % (cgi.escape(CFG_SITE_SUPPORT_EMAIL, True), cgi.escape(CFG_SITE_SUPPORT_EMAIL))) + "</p>"
else:
out = "<p>%s</p>" % msg
out += """<form method="post" action="../youraccount/login">
<table>
"""
if len(methods) > 1:
# more than one method, must make a select
login_select = """<select name="login_method" id="login_method">"""
for method in methods:
login_select += """<option value="%(method)s" %(selected)s>%(method)s</option>""" % {
'method' : cgi.escape(method, True),
'selected' : (method == selected_method and 'selected="selected"' or "")
}
login_select += "</select>"
out += """
<tr>
<td align="right"><strong><label for="login_method">%(login_title)s</label></strong></td>
<td>%(login_select)s</td>
</tr>""" % {
'login_title' : cgi.escape(_("Login method:")),
'login_select' : cgi.escape(login_select),
}
else:
# only one login method available
out += """<input type="hidden" name="login_method" value="%s" />""" % cgi.escape(methods[0], True)
out += """<tr>
<td align="right">
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="referer" value="%(referer)s" />
<strong><label for="p_un">%(username)s:</label></strong>
</td>
<td><input type="text" size="25" name="p_un" id="p_un" value="" /></td>
</tr>
<tr>
<td align="right"><strong><label for="p_pw">%(password)s:</label></strong></td>
<td align="left"><input type="password" size="25" name="p_pw" id="p_pw" value="" /></td>
</tr>
<tr>
<td></td>
<td align="left"><input type="checkbox" name="remember_me" id="remember_me"/><em><label for="remember_me">%(remember_me)s</label></em></td>
<tr>
<td></td>
<td align="center" colspan="3"><code class="blocknote"><input class="formbutton" type="submit" name="action" value="%(login)s" /></code>""" % {
'ln': cgi.escape(ln, True),
'referer' : cgi.escape(referer, True),
'username' : cgi.escape(_("Username")),
'password' : cgi.escape(_("Password")),
'remember_me' : cgi.escape(_("Remember login on this computer.")),
'login' : cgi.escape(_("login")),
}
if internal:
out += """ (<a href="./lost?ln=%(ln)s">%(lost_pass)s</a>)""" % {
'ln' : cgi.escape(ln, True),
'lost_pass' : cgi.escape(_("Lost your password?"))
}
out += """</td>
</tr>
</table></form>"""
out += """<p><strong>%(note)s:</strong> %(note_text)s</p>""" % {
'note' : cgi.escape(_("Note")),
'note_text': cgi.escape(_("You can use your nickname or your email address to login."))}
return out
def tmpl_lost_your_password_teaser(self, ln=CFG_SITE_LANG):
"""Displays a short sentence to attract user to the fact that
maybe he lost his password. Used by the registration page.
"""
_ = gettext_set_language(ln)
out = ""
out += """<a href="./lost?ln=%(ln)s">%(maybe_lost_pass)s</a>""" % {
'ln' : ln,
'maybe_lost_pass': ("Maybe you have lost your password?")
}
return out
def tmpl_reset_password_form(self, ln, email, reset_key, msg=''):
"""Display a form to reset the password."""
_ = gettext_set_language(ln)
out = ""
out = "<p>%s</p>" % _("Your request is valid. Please set the new "
"desired password in the following form.")
if msg:
out += """<p class='warning'>%s</p>""" % msg
out += """
<form method="post" action="../youraccount/resetpassword?ln=%(ln)s">
<input type="hidden" name="k" value="%(reset_key)s" />
<input type="hidden" name="e" value="%(email)s" />
<input type="hidden" name="reset" value="1" />
<table>
<tr><td align="right"><strong>%(set_password_for)s</strong>:</td><td><em>%(email)s</em></td></tr>
<tr><td align="right"><strong><label for="password">%(type_new_password)s:</label></strong></td>
<td><input type="password" name="password" id="password" value="123" /></td></tr>
<tr><td align="right"><strong><label for="password2">%(type_it_again)s:</label></strong></td>
<td><input type="password" name="password2" id="password2" value="" /></td></tr>
<tr><td align="center" colspan="2">
<input class="formbutton" type="submit" name="action" value="%(set_new_password)s" />
</td></tr>
</table>
</form>""" % {
'ln' : ln,
'reset_key' : reset_key,
'email' : email,
'set_password_for' : _('Set a new password for'),
'type_new_password' : _('Type the new password'),
'type_it_again' : _('Type again the new password'),
'set_new_password' : _('Set the new password')
}
return out
def tmpl_register_page(self, ln, referer, level):
"""
Displays a login form
Parameters:
- 'ln' *string* - The language to display the interface in
- 'referer' *string* - The referer URL - will be redirected upon after login
- 'level' *int* - Login level (0 - all access, 1 - accounts activated, 2+ - no self-registration)
"""
# load the right message language
_ = gettext_set_language(ln)
out = ""
if level <= 1:
out += _("Please enter your email address and desired nickname and password:")
if level == 1:
out += _("It will not be possible to use the account before it has been verified and activated.")
out += """
<form method="post" action="../youraccount/register">
<input type="hidden" name="referer" value="%(referer)s" />
<input type="hidden" name="ln" value="%(ln)s" />
<table>
<tr>
<td align="right"><strong><label for="p_email">%(email_address)s:</label></strong><br /><small class="important">(%(mandatory)s)</small></td>
<td><input type="text" size="25" name="p_email" id="p_email" value="" /><br />
<small><span class="quicknote">%(example)s:</span>
<span class="example">john.doe@example.com</span></small>
</td>
<td></td>
</tr>
<tr>
<td align="right"><strong><label for="p_nickname">%(nickname)s:</label></strong><br /><small class="important">(%(mandatory)s)</small></td>
<td><input type="text" size="25" name="p_nickname" id="p_nickname" value="" /><br />
<small><span class="quicknote">%(example)s:</span>
<span class="example">johnd</span></small>
</td>
<td></td>
</tr>
<tr>
<td align="right"><strong><label for="p_pw">%(password)s:</label></strong><br /><small class="quicknote">(%(optional)s)</small></td>
<td align="left"><input type="password" size="25" name="p_pw" id="p_pw" value="" /><br />
<small><span class="quicknote">%(note)s:</span> %(password_contain)s</small>
</td>
<td></td>
</tr>
<tr>
<td align="right"><strong><label for="p_pw2">%(retype)s:</label></strong></td>
<td align="left"><input type="password" size="25" name="p_pw2" id="p_pw2" value="" /></td>
<td></td>
</tr>
<tr>
<td></td>
<td align="left" colspan="3"><code class="blocknote"><input class="formbutton" type="submit" name="action" value="%(register)s" /></code></td>
</tr>
</table>
</form>
<p><strong>%(note)s:</strong> %(explain_acc)s""" % {
'referer' : cgi.escape(referer),
'ln' : cgi.escape(ln),
'email_address' : _("Email address"),
'nickname' : _("Nickname"),
'password' : _("Password"),
'mandatory' : _("mandatory"),
'optional' : _("optional"),
'example' : _("Example"),
'note' : _("Note"),
'password_contain' : _("The password phrase may contain punctuation, spaces, etc."),
'retype' : _("Retype Password"),
'register' : _("register"),
'explain_acc' : _("Please do not use valuable passwords such as your Unix, AFS or NICE passwords with this service. Your email address will stay strictly confidential and will not be disclosed to any third party. It will be used to identify you for personal services of %s. For example, you may set up an automatic alert search that will look for new preprints and will notify you daily of new arrivals by email.") % CFG_SITE_NAME,
}
else:
# level >=2, so users cannot register accounts
out += "<p>" + _("It is not possible to create an account yourself. Contact %s if you want an account.") % ('<a href="mailto:%s">%s</a>' % (CFG_SITE_SUPPORT_EMAIL, CFG_SITE_SUPPORT_EMAIL)) + "</p>"
return out
def tmpl_account_adminactivities(self, ln, uid, guest, roles, activities):
"""
Displays the admin activities block for this user
Parameters:
- 'ln' *string* - The language to display the interface in
- 'uid' *string* - The used id
- 'guest' *boolean* - If the user is guest
- 'roles' *array* - The current user roles
- 'activities' *array* - The user allowed activities
"""
# load the right message language
_ = gettext_set_language(ln)
out = ""
# guest condition
if guest:
return _("You seem to be a guest user. You have to %(x_url_open)slogin%(x_url_close)s first.") % \
{'x_url_open': '<a href="../youraccount/login?ln=' + ln + '">',
'x_url_close': '<a/>'}
# no rights condition
if not roles:
return "<p>" + _("You are not authorized to access administrative functions.") + "</p>"
# displaying form
out += "<p>" + _("You are enabled to the following roles: %(x_role)s.") % {'x_role': ('<em>' + ", ".join(roles) + "</em>")} + '</p>'
if activities:
# print proposed links:
activities.sort(lambda x, y: cmp(x.lower(), y.lower()))
tmp_out = ''
for action in activities:
if action == "runbibedit":
tmp_out += """<br /> <a href="%s/%s/edit/">%s</a>""" % (CFG_SITE_URL, CFG_SITE_RECORD, _("Run Record Editor"))
if action == "runbibeditmulti":
tmp_out += """<br /> <a href="%s/%s/multiedit/">%s</a>""" % (CFG_SITE_URL, CFG_SITE_RECORD, _("Run Multi-Record Editor"))
if action == "runbibcirculation":
tmp_out += """<br /> <a href="%s/admin/bibcirculation/bibcirculationadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Run BibCirculation"))
if action == "runbibmerge":
tmp_out += """<br /> <a href="%s/%s/merge/">%s</a>""" % (CFG_SITE_URL, CFG_SITE_RECORD, _("Run Record Merger"))
if action == "runbibswordclient":
tmp_out += """<br /> <a href="%s/%s/bibsword/">%s</a>""" % (CFG_SITE_URL, CFG_SITE_RECORD, _("Run BibSword Client"))
if action == "runbatchuploader":
tmp_out += """<br /> <a href="%s/batchuploader/metadata?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Run Batch Uploader"))
if action == "cfgbibformat":
tmp_out += """<br /> <a href="%s/admin/bibformat/bibformatadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure BibFormat"))
tmp_out += """<br /> <a href="%s/kb?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure BibKnowledge"))
if action == "cfgoaiharvest":
tmp_out += """<br /> <a href="%s/admin/bibharvest/oaiharvestadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure OAI Harvest"))
if action == "cfgoairepository":
tmp_out += """<br /> <a href="%s/admin/bibharvest/oairepositoryadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure OAI Repository"))
if action == "cfgbibindex":
tmp_out += """<br /> <a href="%s/admin/bibindex/bibindexadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure BibIndex"))
if action == "cfgbibrank":
tmp_out += """<br /> <a href="%s/admin/bibrank/bibrankadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure BibRank"))
if action == "cfgwebaccess":
tmp_out += """<br /> <a href="%s/admin/webaccess/webaccessadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure WebAccess"))
if action == "cfgwebcomment":
tmp_out += """<br /> <a href="%s/admin/webcomment/webcommentadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure WebComment"))
if action == "cfgwebjournal":
tmp_out += """<br /> <a href="%s/admin/webjournal/webjournaladmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure WebJournal"))
if action == "cfgwebsearch":
tmp_out += """<br /> <a href="%s/admin/websearch/websearchadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure WebSearch"))
if action == "cfgwebsubmit":
tmp_out += """<br /> <a href="%s/admin/websubmit/websubmitadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure WebSubmit"))
if action == "runbibdocfile":
tmp_out += """<br /> <a href="%s/submit/managedocfiles?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Run Document File Manager"))
if action == "cfgbibsort":
tmp_out += """<br /> <a href="%s/admin/bibsort/bibsortadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure BibSort"))
if tmp_out:
out += _("Here are some interesting web admin links for you:") + tmp_out
out += "<br />" + _("For more admin-level activities, see the complete %(x_url_open)sAdmin Area%(x_url_close)s.") %\
{'x_url_open': '<a href="' + CFG_SITE_URL + '/help/admin?ln=' + ln + '">',
'x_url_close': '</a>'}
return out
def tmpl_create_userinfobox(self, ln, url_referer, guest, username, submitter, referee, admin, usebaskets, usemessages, usealerts, usegroups, useloans, usestats):
"""
Displays the user block
Parameters:
- 'ln' *string* - The language to display the interface in
- 'url_referer' *string* - URL of the page being displayed
- 'guest' *boolean* - If the user is guest
- 'username' *string* - The username (nickname or email)
- 'submitter' *boolean* - If the user is submitter
- 'referee' *boolean* - If the user is referee
- 'admin' *boolean* - If the user is admin
- 'usebaskets' *boolean* - If baskets are enabled for the user
- 'usemessages' *boolean* - If messages are enabled for the user
- 'usealerts' *boolean* - If alerts are enabled for the user
- 'usegroups' *boolean* - If groups are enabled for the user
- 'useloans' *boolean* - If loans are enabled for the user
- 'usestats' *boolean* - If stats are enabled for the user
@note: with the update of CSS classes (cds.cds ->
invenio.css), the variables useloans etc are not used in
this function, since they are in the menus. But we keep
them in the function signature for backwards
compatibility.
"""
# load the right message language
_ = gettext_set_language(ln)
out = """<img src="%s/img/user-icon-1-20x20.gif" border="0" alt=""/> """ % CFG_SITE_URL
if guest:
out += """%(guest_msg)s ::
<a class="userinfo" href="%(sitesecureurl)s/youraccount/login?ln=%(ln)s%(referer)s">%(login)s</a>""" % {
'sitesecureurl': CFG_SITE_SECURE_URL,
'ln' : ln,
'guest_msg' : _("guest"),
'referer' : url_referer and ('&referer=%s' % urllib.quote(url_referer)) or '',
'login' : _('login')
}
else:
out += """
<a class="userinfo" href="%(sitesecureurl)s/youraccount/display?ln=%(ln)s">%(username)s</a> :: """ % {
'sitesecureurl' : CFG_SITE_SECURE_URL,
'ln' : ln,
'username' : username
}
out += """<a class="userinfo" href="%(sitesecureurl)s/youraccount/logout?ln=%(ln)s">%(logout)s</a>""" % {
'sitesecureurl' : CFG_SITE_SECURE_URL,
'ln' : ln,
'logout' : _("logout"),
}
return out
def tmpl_create_useractivities_menu(self, ln, selected, url_referer, guest, username, submitter, referee, admin, usebaskets, usemessages, usealerts, usegroups, useloans, usestats):
"""
Returns the main navigation menu with actions based on user's
priviledges
@param ln: The language to display the interface in
@type ln: string
@param selected: If the menu is currently selected
@type selected: boolean
@param url_referer: URL of the page being displayed
@type url_referer: string
@param guest: If the user is guest
@type guest: string
@param username: The username (nickname or email)
@type username: string
@param submitter: If the user is submitter
@type submitter: boolean
@param referee: If the user is referee
@type referee: boolean
@param admin: If the user is admin
@type admin: boolean
@param usebaskets: If baskets are enabled for the user
@type usebaskets: boolean
@param usemessages: If messages are enabled for the user
@type usemessages: boolean
@param usealerts: If alerts are enabled for the user
@type usealerts: boolean
@param usegroups: If groups are enabled for the user
@type usegroups: boolean
@param useloans: If loans are enabled for the user
@type useloans: boolean
@param usestats: If stats are enabled for the user
@type usestats: boolean
@return: html menu of the user activities
@rtype: string
"""
# load the right message language
_ = gettext_set_language(ln)
out = '''<div class="hassubmenu%(on)s">
<a hreflang="en" class="header%(selected)s" href="%(CFG_SITE_SECURE_URL)s/youraccount/display?ln=%(ln)s">%(personalize)s</a>
<ul class="subsubmenu">''' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'personalize': _("Personalize"),
'on': selected and " on" or '',
'selected': selected and "selected" or ''
}
if not guest:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/youraccount/display?ln=%(ln)s">%(account)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'account' : _('Your account')
}
if usealerts or guest:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/youralerts/list?ln=%(ln)s">%(alerts)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'alerts' : _('Your alerts')
}
if referee:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/yourapprovals.py?ln=%(ln)s">%(approvals)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'approvals' : _('Your approvals')
}
if usebaskets or guest:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/yourbaskets/display?ln=%(ln)s">%(baskets)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'baskets' : _('Your baskets')
}
if usegroups:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/yourgroups/display?ln=%(ln)s">%(groups)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'groups' : _('Your groups')
}
if useloans:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/yourloans/display?ln=%(ln)s">%(loans)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'loans' : _('Your loans')
}
if usemessages:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/yourmessages/display?ln=%(ln)s">%(messages)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'messages' : _('Your messages')
}
if submitter:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/yoursubmissions.py?ln=%(ln)s">%(submissions)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'submissions' : _('Your submissions')
}
if usealerts or guest:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/youralerts/display?ln=%(ln)s">%(searches)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'searches' : _('Your searches')
}
out += '</ul></div>'
return out
def tmpl_create_adminactivities_menu(self, ln, selected, url_referer, guest, username, submitter, referee, admin, usebaskets, usemessages, usealerts, usegroups, useloans, usestats, activities):
"""
Returns the main navigation menu with actions based on user's
priviledges
@param ln: The language to display the interface in
@type ln: string
@param selected: If the menu is currently selected
@type selected: boolean
@param url_referer: URL of the page being displayed
@type url_referer: string
@param guest: If the user is guest
@type guest: string
@param username: The username (nickname or email)
@type username: string
@param submitter: If the user is submitter
@type submitter: boolean
@param referee: If the user is referee
@type referee: boolean
@param admin: If the user is admin
@type admin: boolean
@param usebaskets: If baskets are enabled for the user
@type usebaskets: boolean
@param usemessages: If messages are enabled for the user
@type usemessages: boolean
@param usealerts: If alerts are enabled for the user
@type usealerts: boolean
@param usegroups: If groups are enabled for the user
@type usegroups: boolean
@param useloans: If loans are enabled for the user
@type useloans: boolean
@param usestats: If stats are enabled for the user
@type usestats: boolean
@param activities: dictionary of admin activities
@rtype activities: dict
@return: html menu of the user activities
@rtype: string
"""
# load the right message language
_ = gettext_set_language(ln)
out = ''
if activities:
out += '''<div class="hassubmenu%(on)s">
<a hreflang="en" class="header%(selected)s" href="%(CFG_SITE_SECURE_URL)s/youraccount/youradminactivities?ln=%(ln)s">%(admin)s</a>
<ul class="subsubmenu">''' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'admin': _("Administration"),
'on': selected and " on" or '',
'selected': selected and "selected" or ''
}
for name in sorted(activities.iterkeys()):
url = activities[name]
out += '<li><a href="%(url)s">%(name)s</a></li>' % {
'url': url,
'name': name
}
if usestats:
out += """<li><a href="%(CFG_SITE_URL)s/stats/?ln=%(ln)s">%(stats)s</a></li>""" % {
'CFG_SITE_URL' : CFG_SITE_URL,
'ln' : ln,
'stats' : _("Statistics"),
}
out += '</ul></div>'
return out
def tmpl_warning(self, warnings, ln=CFG_SITE_LANG):
"""
Display len(warnings) warning fields
@param infos: list of strings
@param ln=language
@return: html output
"""
if not((type(warnings) is list) or (type(warnings) is tuple)):
warnings = [warnings]
warningbox = ""
if warnings != []:
warningbox = "<div class=\"warningbox\">\n <b>Warning:</b>\n"
for warning in warnings:
lines = warning.split("\n")
warningbox += " <p>"
for line in lines[0:-1]:
warningbox += line + " <br />\n"
warningbox += lines[-1] + " </p>"
warningbox += "</div><br />\n"
return warningbox
def tmpl_error(self, error, ln=CFG_SITE_LANG):
"""
Display error
@param error: string
@param ln=language
@return: html output
"""
_ = gettext_set_language(ln)
errorbox = ""
if error != "":
errorbox = "<div class=\"errorbox\">\n <b>Error:</b>\n"
errorbox += " <p>"
errorbox += error + " </p>"
errorbox += "</div><br />\n"
return errorbox
def tmpl_display_all_groups(self,
infos,
admin_group_html,
member_group_html,
external_group_html = None,
warnings=[],
ln=CFG_SITE_LANG):
"""
Displays the 3 tables of groups: admin, member and external
Parameters:
- 'ln' *string* - The language to display the interface in
- 'admin_group_html' *string* - HTML code for displaying all the groups
the user is the administrator of
- 'member_group_html' *string* - HTML code for displaying all the groups
the user is member of
- 'external_group_html' *string* - HTML code for displaying all the
external groups the user is member of
"""
_ = gettext_set_language(ln)
group_text = self.tmpl_infobox(infos)
group_text += self.tmpl_warning(warnings)
if external_group_html:
group_text += """
<table>
<tr>
<td>%s</td>
</tr>
<tr>
<td><br />%s</td>
</tr>
<tr>
<td><br /><a name='external_groups'></a>%s</td>
</tr>
</table>""" %(admin_group_html, member_group_html, external_group_html)
else:
group_text += """
<table>
<tr>
<td>%s</td>
</tr>
<tr>
<td><br />%s</td>
</tr>
</table>""" %(admin_group_html, member_group_html)
return group_text
def tmpl_display_admin_groups(self, groups, ln=CFG_SITE_LANG):
"""
Display the groups the user is admin of.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'groups' *list* - All the group the user is admin of
- 'infos' *list* - Display infos on top of admin group table
"""
_ = gettext_set_language(ln)
img_link = """
<a href="%(siteurl)s/yourgroups/%(action)s?grpID=%(grpID)s&ln=%(ln)s">
<img src="%(siteurl)s/img/%(img)s" alt="%(text)s" style="border:0" width="25"
height="25" /><br /><small>%(text)s</small>
</a>"""
out = self.tmpl_group_table_title(img="/img/group_admin.png",
text=_("You are an administrator of the following groups:") )
out += """
<table class="mailbox">
<thead class="mailboxheader">
<tr class="inboxheader">
<td>%s</td>
<td>%s</td>
<td style="width: 20px;" > </td>
<td style="width: 20px;"> </td>
</tr>
</thead>
<tfoot>
<tr style="height:0px;">
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
</tfoot>
<tbody class="mailboxbody">""" %(_("Group"), _("Description"))
if len(groups) == 0:
out += """
<tr class="mailboxrecord" style="height: 100px;">
<td colspan="4" style="text-align: center;">
<small>%s</small>
</td>
</tr>""" %(_("You are not an administrator of any groups."),)
for group_data in groups:
(grpID, name, description) = group_data
edit_link = img_link % {'siteurl' : CFG_SITE_URL,
'grpID' : grpID,
'ln': ln,
'img':"webbasket_create_small.png",
'text':_("Edit group"),
'action':"edit"
}
members_link = img_link % {'siteurl' : CFG_SITE_URL,
'grpID' : grpID,
'ln': ln,
'img':"webbasket_usergroup.png",
'text':_("Edit %s members") % '',
'action':"members"
}
out += """
<tr class="mailboxrecord">
<td>%s</td>
<td>%s</td>
<td style="text-align: center;" >%s</td>
<td style="text-align: center;" >%s</td>
</tr>""" % (cgi.escape(name), cgi.escape(description), edit_link, members_link)
out += """
<tr class="mailboxfooter">
<td colspan="2">
<form name="newGroup" action="create?ln=%(ln)s" method="post">
<input type="submit" name="create_group" value="%(write_label)s" class="formbutton" />
</form>
</td>
<td> </td>
<td> </td>
<td> </td>
</tr>
</tbody>
</table>""" % {'ln': ln,
'write_label': _("Create new group"),
}
return out
def tmpl_display_member_groups(self, groups, ln=CFG_SITE_LANG):
"""
Display the groups the user is member of.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'groups' *list* - All the group the user is member of
"""
_ = gettext_set_language(ln)
group_text = self.tmpl_group_table_title(img="/img/webbasket_us.png", text=_("You are a member of the following groups:"))
group_text += """
<table class="mailbox">
<thead class="mailboxheader">
<tr class="inboxheader">
<td>%s</td>
<td>%s</td>
</tr>
</thead>
<tfoot>
<tr style="height:0px;">
<td></td>
<td></td>
</tr>
</tfoot>
<tbody class="mailboxbody">""" % (_("Group"), _("Description"))
if len(groups) == 0:
group_text += """
<tr class="mailboxrecord" style="height: 100px;">
<td colspan="2" style="text-align: center;">
<small>%s</small>
</td>
</tr>""" %(_("You are not a member of any groups."),)
for group_data in groups:
(id, name, description) = group_data
group_text += """
<tr class="mailboxrecord">
<td>%s</td>
<td>%s</td>
</tr>""" % (cgi.escape(name), cgi.escape(description))
group_text += """
<tr class="mailboxfooter">
<td>
<form name="newGroup" action="join?ln=%(ln)s" method="post">
<input type="submit" name="join_group" value="%(join_label)s" class="formbutton" />
</form>
</td>
<td>
<form name="newGroup" action="leave?ln=%(ln)s" method="post">
<input type="submit" name="leave" value="%(leave_label)s" class="formbutton" />
</form>
</td>
</tr>
</tbody>
</table>
""" % {'ln': ln,
'join_label': _("Join new group"),
'leave_label':_("Leave group")
}
return group_text
def tmpl_display_external_groups(self, groups, ln=CFG_SITE_LANG):
"""
Display the external groups the user is member of.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'groups' *list* - All the group the user is member of
"""
_ = gettext_set_language(ln)
group_text = self.tmpl_group_table_title(img="/img/webbasket_us.png", text=_("You are a member of the following external groups:"))
group_text += """
<table class="mailbox">
<thead class="mailboxheader">
<tr class="inboxheader">
<td>%s</td>
<td>%s</td>
</tr>
</thead>
<tfoot>
<tr style="height:0px;">
<td></td>
<td></td>
</tr>
</tfoot>
<tbody class="mailboxbody">""" % (_("Group"), _("Description"))
if len(groups) == 0:
group_text += """
<tr class="mailboxrecord" style="height: 100px;">
<td colspan="2" style="text-align: center;">
<small>%s</small>
</td>
</tr>""" %(_("You are not a member of any external groups."),)
for group_data in groups:
(id, name, description) = group_data
group_text += """
<tr class="mailboxrecord">
<td>%s</td>
<td>%s</td>
</tr>""" % (cgi.escape(name), cgi.escape(description))
group_text += """
</tbody>
</table>
"""
return group_text
def tmpl_display_input_group_info(self,
group_name,
group_description,
join_policy,
act_type="create",
grpID=None,
warnings=[],
ln=CFG_SITE_LANG):
"""
Display group data when creating or updating a group:
Name, description, join_policy.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'group_name' *string* - name of the group
- 'group_description' *string* - description of the group
- 'join_policy' *string* - join policy
- 'act_type' *string* - info about action : create or edit(update)
- 'grpID' *int* - ID of the group(not None in case of group editing)
- 'warnings' *list* - Display warning if values are not correct
"""
_ = gettext_set_language(ln)
#default
hidden_id =""
form_name = "create_group"
action = CFG_SITE_URL + '/yourgroups/create'
button_label = _("Create new group")
button_name = "create_button"
label = _("Create new group")
delete_text = ""
if act_type == "update":
form_name = "update_group"
action = CFG_SITE_URL + '/yourgroups/edit'
button_label = _("Update group")
button_name = "update"
label = _('Edit group %s') % cgi.escape(group_name)
delete_text = """<input type="submit" value="%s" class="formbutton" name="%s" />"""
delete_text %= (_("Delete group"),"delete")
if grpID is not None:
hidden_id = """<input type="hidden" name="grpID" value="%s" />"""
hidden_id %= grpID
out = self.tmpl_warning(warnings)
out += """
<form name="%(form_name)s" action="%(action)s" method="post">
<input type="hidden" name="ln" value="%(ln)s" />
<div style="padding:10px;">
<table class="bskbasket">
<thead class="bskbasketheader">
<tr>
<td class="bskactions">
<img src="%(logo)s" alt="%(label)s" />
</td>
<td class="bsktitle">
<b>%(label)s</b><br />
</td>
</tr>
</thead>
<tfoot>
<tr><td colspan="2"></td></tr>
</tfoot>
<tbody>
<tr>
<td colspan="2">
<table>
<tr>
<td><label for="group_name">%(name_label)s</label></td>
<td>
<input type="text" name="group_name" id="group_name" value="%(group_name)s" />
</td>
</tr>
<tr>
<td><label for="group_description">%(description_label)s</label></td>
<td>
<input type="text" name="group_description" id="group_description" value="%(group_description)s" />
</td>
</tr>
<tr>
<td>%(join_policy_label)s</td>
<td>
%(join_policy)s
</td>
</tr>
</table>
</td>
</tr>
</tbody>
</table>
%(hidden_id)s
<table>
<tr>
<td>
<input type="submit" value="%(button_label)s" class="formbutton" name="%(button_name)s" />
</td>
<td>
%(delete_text)s
</td>
<td>
<input type="submit" value="%(cancel_label)s" class="formbutton" name="cancel" />
</td>
</tr>
</table>
</div>
</form>
"""
out %= {'action' : action,
'logo': CFG_SITE_URL + '/img/webbasket_create.png',
'label': label,
'form_name' : form_name,
'name_label': _("Group name:"),
'delete_text': delete_text,
'description_label': _("Group description:"),
'join_policy_label': _("Group join policy:"),
'group_name': cgi.escape(group_name, 1),
'group_description': cgi.escape(group_description, 1),
'button_label': button_label,
'button_name':button_name,
'cancel_label':_("Cancel"),
'hidden_id':hidden_id,
'ln': ln,
'join_policy' :self.__create_join_policy_selection_menu("join_policy",
join_policy,
ln)
}
return out
def tmpl_display_input_join_group(self,
group_list,
group_name,
group_from_search,
search,
warnings=[],
ln=CFG_SITE_LANG):
"""
Display the groups the user can join.
He can use default select list or the search box
Parameters:
- 'ln' *string* - The language to display the interface in
- 'group_list' *list* - All the group the user can join
- 'group_name' *string* - Name of the group the user is looking for
- 'group_from search' *list* - List of the group the user can join matching group_name
- 'search' *int* - User is looking for group using group_name
- 'warnings' *list* - Display warning if two group are selected
"""
_ = gettext_set_language(ln)
out = self.tmpl_warning(warnings)
search_content = ""
if search:
search_content = """<tr><td> </td><td>"""
if group_from_search != []:
search_content += self.__create_select_menu('grpID', group_from_search, _("Please select:"))
else:
search_content += _("No matching group")
search_content += """</td><td> </td></tr>"""
out += """
<form name="join_group" action="%(action)s" method="post">
<input type="hidden" name="ln" value="%(ln)s" />
<div style="padding:10px;">
<table class="bskbasket">
<thead class="bskbasketheader">
<tr>
<td class="bskactions">
<img src="%(logo)s" alt="%(label)s" />
</td>
<td class="bsktitle">
<b>%(label)s</b><br />
</td>
</tr>
</thead>
<tfoot>
<tr><td colspan="2"></td></tr>
</tfoot>
<tbody>
<tr>
<td colspan="2">
<table>
<tr>
<td>%(list_label)s</td>
<td>
%(group_list)s
</td>
<td>
</td>
</tr>
<tr>
<td><br /><label for="group_name">%(label2)s</label></td>
<td><br /><input type="text" name="group_name" id="group_name" value="%(group_name)s" /></td>
<td><br />
<input type="submit" name="find_button" value="%(find_label)s" class="nonsubmitbutton" />
</td>
</tr>
%(search_content)s
</table>
</td>
</tr>
</tbody>
</table>
<table>
<tr>
<td>
<input type="submit" name="join_button" value="%(label)s" class="formbutton" />
</td>
<td>
<input type="submit" value="%(cancel_label)s" class="formbutton" name="cancel" />
</td>
</tr>
</table>
</div>
</form>
"""
out %= {'action' : CFG_SITE_URL + '/yourgroups/join',
'logo': CFG_SITE_URL + '/img/webbasket_create.png',
'label': _("Join group"),
'group_name': cgi.escape(group_name, 1),
'label2':_("or find it") + ': ',
'list_label':_("Choose group:"),
'ln': ln,
'find_label': _("Find group"),
'cancel_label':_("Cancel"),
'group_list' :self.__create_select_menu("grpID",group_list, _("Please select:")),
'search_content' : search_content
}
return out
def tmpl_display_manage_member(self,
grpID,
group_name,
members,
pending_members,
infos=[],
warnings=[],
ln=CFG_SITE_LANG):
"""Display current members and waiting members of a group.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'grpID *int* - ID of the group
- 'group_name' *string* - Name of the group
- 'members' *list* - List of the current members
- 'pending_members' *list* - List of the waiting members
- 'infos' *tuple of 2 lists* - Message to inform user about his last action
- 'warnings' *list* - Display warning if two group are selected
"""
_ = gettext_set_language(ln)
out = self.tmpl_warning(warnings)
out += self.tmpl_infobox(infos)
out += """
<form name="member" action="%(action)s" method="post">
<p>%(title)s</p>
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="grpID" value="%(grpID)s"/>
<table>
<tr>
<td>
<table class="bskbasket">
<thead class="bskbasketheader">
<tr>
<td class="bskactions">
<img src="%(imgurl)s/webbasket_usergroup.png" alt="%(img_alt_header1)s" />
</td>
<td class="bsktitle">
%(header1)s<br />
</td>
</tr>
</thead>
<tfoot>
<tr><td colspan="2"></td></tr>
</tfoot>
<tbody>
<tr>
<td colspan="2">
<table>
<tr>
%(member_text)s
</tr>
</table>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
<tr>
<td>
<table class="bskbasket">
<thead class="bskbasketheader">
<tr>
<td class="bskactions">
<img src="%(imgurl)s/webbasket_usergroup_gray.png" alt="%(img_alt_header2)s" />
</td>
<td class="bsktitle">
%(header2)s<br />
</td>
</tr>
</thead>
<tfoot>
<tr><td colspan="2"></td></tr>
</tfoot>
<tbody>
<tr>
<td colspan="2">
<table>
<tr>
%(pending_text)s
</tr>
</table>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
<tr>
<td>
<table class="bskbasket" style="width: 400px">
<thead class="bskbasketheader">
<tr>
<td class="bskactions">
<img src="%(imgurl)s/iconpen.gif" alt="%(img_alt_header3)s" />
</td>
<td class="bsktitle">
<b>%(header3)s</b><br />
</td>
</tr>
</thead>
<tfoot>
<tr><td colspan="2"></td></tr>
</tfoot>
<tbody>
<tr>
<td colspan="2">
<table>
<tr>
<td colspan="2" style="padding: 0 5 10 5;">%(invite_text)s</td>
</tr>
</table>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
<tr>
<td>
<input type="submit" value="%(cancel_label)s" class="formbutton" name="cancel" />
</td>
</tr>
</table>
</form>
"""
if members :
member_list = self.__create_select_menu("member_id", members, _("Please select:"))
member_text = """
<td style="padding: 0 5 10 5;">%s</td>
<td style="padding: 0 5 10 5;">
<input type="submit" name="remove_member" value="%s" class="nonsubmitbutton"/>
</td>""" % (member_list,_("Remove member"))
else :
member_text = """<td style="padding: 0 5 10 5;" colspan="2">%s</td>""" % _("No members.")
if pending_members :
pending_list = self.__create_select_menu("pending_member_id", pending_members, _("Please select:"))
pending_text = """
<td style="padding: 0 5 10 5;">%s</td>
<td style="padding: 0 5 10 5;">
<input type="submit" name="add_member" value="%s" class="nonsubmitbutton"/>
</td>
<td style="padding: 0 5 10 5;">
<input type="submit" name="reject_member" value="%s" class="nonsubmitbutton"/>
</td>""" % (pending_list,_("Accept member"), _("Reject member"))
else :
pending_text = """<td style="padding: 0 5 10 5;" colspan="2">%s</td>""" % _("No members awaiting approval.")
header1 = self.tmpl_group_table_title(text=_("Current members"))
header2 = self.tmpl_group_table_title(text=_("Members awaiting approval"))
header3 = _("Invite new members")
write_a_message_url = create_url(
"%s/yourmessages/write" % CFG_SITE_URL,
{
'ln' : ln,
'msg_subject' : _('Invitation to join "%s" group' % escape_html(group_name)),
'msg_body' : _("""\
Hello:
I think you might be interested in joining the group "%(x_name)s".
You can join by clicking here: %(x_url)s.
Best regards.
""") % {'x_name': group_name,
'x_url': create_html_link("%s/yourgroups/join" % CFG_SITE_URL, { 'grpID' : grpID,
'join_button' : "1",
},
link_label=group_name, escape_urlargd=True, escape_linkattrd=True)}})
link_open = '<a href="%s">' % escape_html(write_a_message_url)
invite_text = _("If you want to invite new members to join your group, please use the %(x_url_open)sweb message%(x_url_close)s system.") % \
{'x_url_open': link_open,
'x_url_close': '</a>'}
action = CFG_SITE_URL + '/yourgroups/members?ln=' + ln
out %= {'title':_('Group: %s') % escape_html(group_name),
'member_text' : member_text,
'pending_text' :pending_text,
'action':action,
'grpID':grpID,
'header1': header1,
'header2': header2,
'header3': header3,
'img_alt_header1': _("Current members"),
'img_alt_header2': _("Members awaiting approval"),
'img_alt_header3': _("Invite new members"),
'invite_text': invite_text,
'imgurl': CFG_SITE_URL + '/img',
'cancel_label':_("Cancel"),
'ln':ln
}
return out
def tmpl_display_input_leave_group(self,
groups,
warnings=[],
ln=CFG_SITE_LANG):
"""Display groups the user can leave.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'groups' *list* - List of groups the user is currently member of
- 'warnings' *list* - Display warning if no group is selected
"""
_ = gettext_set_language(ln)
out = self.tmpl_warning(warnings)
out += """
<form name="leave" action="%(action)s" method="post">
<input type="hidden" name="ln" value="%(ln)s" />
<div style="padding:10px;">
<table class="bskbasket">
<thead class="bskbasketheader">
<tr>
<td class="bskactions">
<img src="%(logo)s" alt="%(label)s" />
</td>
<td class="bsktitle">
<b>%(label)s</b><br />
</td>
</tr>
</thead>
<tfoot>
<tr><td colspan="2"></td></tr>
</tfoot>
<tbody>
<tr>
<td colspan="2">
<table>
<tr>
<td>%(list_label)s</td>
<td>
%(groups)s
</td>
<td>
</td>
</tr>
</table>
</td>
</tr>
</tbody>
</table>
<table>
<tr>
<td>
%(submit)s
</td>
<td>
<input type="submit" value="%(cancel_label)s" class="formbutton" name="cancel" />
</td>
</tr>
</table>
</div>
</form>
"""
if groups:
groups = self.__create_select_menu("grpID", groups, _("Please select:"))
list_label = _("Group list")
submit = """<input type="submit" name="leave_button" value="%s" class="formbutton"/>""" % _("Leave group")
else :
groups = _("You are not member of any group.")
list_label = ""
submit = ""
action = CFG_SITE_URL + '/yourgroups/leave?ln=%s'
action %= (ln)
out %= {'groups' : groups,
'list_label' : list_label,
'action':action,
'logo': CFG_SITE_URL + '/img/webbasket_create.png',
'label' : _("Leave group"),
'cancel_label':_("Cancel"),
'ln' :ln,
'submit' : submit
}
return out
def tmpl_confirm_delete(self, grpID, ln=CFG_SITE_LANG):
"""
display a confirm message when deleting a group
@param grpID *int* - ID of the group
@param ln: language
@return: html output
"""
_ = gettext_set_language(ln)
action = CFG_SITE_URL + '/yourgroups/edit'
out = """
<form name="delete_group" action="%(action)s" method="post">
<table class="confirmoperation">
<tr>
<td colspan="2" class="confirmmessage">
%(message)s
</td>
</tr>
<tr>
<td>
<input type="hidden" name="confirmed" value="1" />
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="grpID" value="%(grpID)s" />
<input type="submit" name="delete" value="%(yes_label)s" class="formbutton" />
</td>
<td>
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="grpID" value="%(grpID)s" />
<input type="submit" value="%(no_label)s" class="formbutton" />
</td>
</tr>
</table>
</form>"""% {'message': _("Are you sure you want to delete this group?"),
'ln':ln,
'yes_label': _("Yes"),
'no_label': _("No"),
'grpID':grpID,
'action': action
}
return out
def tmpl_confirm_leave(self, uid, grpID, ln=CFG_SITE_LANG):
"""
display a confirm message
@param grpID *int* - ID of the group
@param ln: language
@return: html output
"""
_ = gettext_set_language(ln)
action = CFG_SITE_URL + '/yourgroups/leave'
out = """
<form name="leave_group" action="%(action)s" method="post">
<table class="confirmoperation">
<tr>
<td colspan="2" class="confirmmessage">
%(message)s
</td>
</tr>
<tr>
<td>
<input type="hidden" name="confirmed" value="1" />
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="grpID" value="%(grpID)s" />
<input type="submit" name="leave_button" value="%(yes_label)s" class="formbutton" />
</td>
<td>
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="grpID" value="%(grpID)s" />
<input type="submit" value="%(no_label)s" class="formbutton" />
</td>
</tr>
</table>
</form>"""% {'message': _("Are you sure you want to leave this group?"),
'ln':ln,
'yes_label': _("Yes"),
'no_label': _("No"),
'grpID':grpID,
'action': action
}
return out
def __create_join_policy_selection_menu(self, name, current_join_policy, ln=CFG_SITE_LANG):
"""Private function. create a drop down menu for selection of join policy
@param current_join_policy: join policy as defined in CFG_WEBSESSION_GROUP_JOIN_POLICY
@param ln: language
"""
_ = gettext_set_language(ln)
elements = [(CFG_WEBSESSION_GROUP_JOIN_POLICY['VISIBLEOPEN'],
_("Visible and open for new members")),
(CFG_WEBSESSION_GROUP_JOIN_POLICY['VISIBLEMAIL'],
_("Visible but new members need approval"))
]
select_text = _("Please select:")
return self.__create_select_menu(name, elements, select_text, selected_key=current_join_policy)
def __create_select_menu(self, name, elements, select_text, multiple=0, selected_key=None):
""" private function, returns a popup menu
@param name: name of HTML control
@param elements: list of (key, value)
"""
if multiple :
out = """
<select name="%s" multiple="multiple" style="width:100%%">"""% (name)
else :
out = """<select name="%s" style="width:100%%">""" % name
out += '<option value="-1">%s</option>' % (select_text)
for (key, label) in elements:
selected = ''
if key == selected_key:
selected = ' selected="selected"'
out += '<option value="%s"%s>%s</option>'% (key, selected, label)
out += '</select>'
return out
def tmpl_infobox(self, infos, ln=CFG_SITE_LANG):
"""Display len(infos) information fields
@param infos: list of strings
@param ln=language
@return: html output
"""
_ = gettext_set_language(ln)
if not((type(infos) is list) or (type(infos) is tuple)):
infos = [infos]
infobox = ""
for info in infos:
infobox += '<div><span class="info">'
lines = info.split("\n")
for line in lines[0:-1]:
infobox += line + "<br />\n"
infobox += lines[-1] + "</span></div>\n"
return infobox
def tmpl_navtrail(self, ln=CFG_SITE_LANG, title=""):
"""
display the navtrail, e.g.:
Your account > Your group > title
@param title: the last part of the navtrail. Is not a link
@param ln: language
return html formatted navtrail
"""
_ = gettext_set_language(ln)
nav_h1 = '<a class="navtrail" href="%s/youraccount/display">%s</a>'
nav_h2 = ""
if (title != ""):
nav_h2 = ' > <a class="navtrail" href="%s/yourgroups/display">%s</a>'
nav_h2 = nav_h2 % (CFG_SITE_URL, _("Your Groups"))
return nav_h1 % (CFG_SITE_URL, _("Your Account")) + nav_h2
def tmpl_group_table_title(self, img="", text="", ln=CFG_SITE_LANG):
"""
display the title of a table:
- 'img' *string* - img path
- 'text' *string* - title
- 'ln' *string* - The language to display the interface in
"""
out = "<div>"
if img:
out += """
<img src="%s" alt="" />
""" % (CFG_SITE_URL + img)
out += """
<b>%s</b>
</div>""" % text
return out
def tmpl_admin_msg(self, group_name, grpID, ln=CFG_SITE_LANG):
"""
return message content for joining group
- 'group_name' *string* - name of the group
- 'grpID' *int* - ID of the group
- 'ln' *string* - The language to display the interface in
"""
_ = gettext_set_language(ln)
subject = _("Group %s: New membership request") % group_name
url = CFG_SITE_URL + "/yourgroups/members?grpID=%s&ln=%s"
url %= (grpID, ln)
# FIXME: which user? We should show his nickname.
body = (_("A user wants to join the group %s.") % group_name) + '<br />'
body += _("Please %(x_url_open)saccept or reject%(x_url_close)s this user's request.") % {'x_url_open': '<a href="' + url + '">',
'x_url_close': '</a>'}
body += '<br />'
return subject, body
def tmpl_member_msg(self,
group_name,
accepted=0,
ln=CFG_SITE_LANG):
"""
return message content when new member is accepted/rejected
- 'group_name' *string* - name of the group
- 'accepted' *int* - 1 if new membership has been accepted, 0 if it has been rejected
- 'ln' *string* - The language to display the interface in
"""
_ = gettext_set_language(ln)
if accepted:
subject = _("Group %s: Join request has been accepted") % (group_name)
body = _("Your request for joining group %s has been accepted.") % (group_name)
else:
subject = _("Group %s: Join request has been rejected") % (group_name)
body = _("Your request for joining group %s has been rejected.") % (group_name)
url = CFG_SITE_URL + "/yourgroups/display?ln=" + ln
body += '<br />'
body += _("You can consult the list of %(x_url_open)syour groups%(x_url_close)s.") % {'x_url_open': '<a href="' + url + '">',
'x_url_close': '</a>'}
body += '<br />'
return subject, body
def tmpl_delete_msg(self,
group_name,
ln=CFG_SITE_LANG):
"""
return message content when new member is accepted/rejected
- 'group_name' *string* - name of the group
- 'ln' *string* - The language to display the interface in
"""
_ = gettext_set_language(ln)
subject = _("Group %s has been deleted") % group_name
url = CFG_SITE_URL + "/yourgroups/display?ln=" + ln
body = _("Group %s has been deleted by its administrator.") % group_name
body += '<br />'
body += _("You can consult the list of %(x_url_open)syour groups%(x_url_close)s.") % {'x_url_open': '<a href="' + url + '">',
'x_url_close': '</a>'}
body += '<br />'
return subject, body
def tmpl_group_info(self, nb_admin_groups=0, nb_member_groups=0, nb_total_groups=0, ln=CFG_SITE_LANG):
"""
display infos about groups (used by myaccount.py)
@param nb_admin_group: number of groups the user is admin of
@param nb_member_group: number of groups the user is member of
@param total_group: number of groups the user belongs to
@param ln: language
return: html output.
"""
_ = gettext_set_language(ln)
out = _("You can consult the list of %(x_url_open)s%(x_nb_total)i groups%(x_url_close)s you are subscribed to (%(x_nb_member)i) or administering (%(x_nb_admin)i).")
out %= {'x_url_open': '<a href="' + CFG_SITE_URL + '/yourgroups/display?ln=' + ln + '">',
'x_nb_total': nb_total_groups,
'x_url_close': '</a>',
'x_nb_admin': nb_admin_groups,
'x_nb_member': nb_member_groups}
return out
def tmpl_general_warnings(self, warning_list, ln=CFG_SITE_LANG):
"""
display information to the admin user about possible
ssecurity problems in the system.
"""
message = ""
_ = gettext_set_language(ln)
#Try and connect to the mysql database with the default invenio password
if "warning_mysql_password_equal_to_invenio_password" in warning_list:
message += "<p><font color=red>"
message += _("Warning: The password set for MySQL root user is the same as the default Invenio password. For security purposes, you may want to change the password.")
message += "</font></p>"
#Try and connect to the invenio database with the default invenio password
if "warning_invenio_password_equal_to_default" in warning_list:
message += "<p><font color=red>"
message += _("Warning: The password set for the Invenio MySQL user is the same as the shipped default. For security purposes, you may want to change the password.")
message += "</font></p>"
#Check if the admin password is empty
if "warning_empty_admin_password" in warning_list:
message += "<p><font color=red>"
message += _("Warning: The password set for the Invenio admin user is currently empty. For security purposes, it is strongly recommended that you add a password.")
message += "</font></p>"
#Check if the admin email has been changed from the default
if "warning_site_support_email_equal_to_default" in warning_list:
message += "<p><font color=red>"
message += _("Warning: The email address set for support email is currently set to info@invenio-software.org. It is recommended that you change this to your own address.")
message += "</font></p>"
#Check for a new release
if "note_new_release_available" in warning_list:
message += "<p><font color=red>"
message += _("A newer version of Invenio is available for download. You may want to visit ")
message += "<a href=\"http://invenio-software.org/wiki/Installation/Download\">http://invenio-software.org/wiki/Installation/Download</a>"
message += "</font></p>"
#Error downloading release notes
if "error_cannot_download_release_notes" in warning_list:
message += "<p><font color=red>"
message += _("Cannot download or parse release notes from http://invenio-software.org/repo/invenio/tree/RELEASE-NOTES")
message += "</font></p>"
return message
|
cul-it/Invenio
|
modules/websession/lib/websession_templates.py
|
Python
|
gpl-2.0
| 103,402
|
[
"VisIt"
] |
f6903356098f5d872c02a64c407739c4ef00e3896fc91c217fc0bf93f01ddc94
|
"""Dummy setup.py file solely for the purposes of getting an on-the-fly
computed version number into the conda recipe.
"""
import sys
from distutils.core import setup
def version_func():
import subprocess
command = 'python psi4/versioner.py --formatonly --format={versionlong}'
process = subprocess.Popen(command.split(), shell=False, stdout=subprocess.PIPE)
(out, err) = process.communicate()
if sys.version_info >= (3, 0):
return out.decode('utf-8').strip()
else:
return out.strip()
setup(
version=version_func(),
)
|
andysim/psi4
|
conda/_conda_vers.py
|
Python
|
gpl-2.0
| 566
|
[
"Psi4"
] |
909176ae2b424a23e92a25b036e0740edc0f29eadd4bba6b9aedb18055fd0732
|
# region gplv3preamble
# The Medical Simulation Markup Language (MSML) - Simplifying the biomechanical modeling workflow
#
# MSML has been developed in the framework of 'SFB TRR 125 Cognition-Guided Surgery'
#
# If you use this software in academic work, please cite the paper:
# S. Suwelack, M. Stoll, S. Schalck, N.Schoch, R. Dillmann, R. Bendl, V. Heuveline and S. Speidel,
# The Medical Simulation Markup Language (MSML) - Simplifying the biomechanical modeling workflow,
# Medicine Meets Virtual Reality (MMVR) 2014
#
# Copyright (C) 2013-2014 see Authors.txt
#
# If you have any questions please feel free to contact us at suwelack@kit.edu
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# endregion
"""
MSMLFile to XML
"""
__author__ = 'Alexander Weigl <uiduw@student.kit.edu>'
from msml.exporter.visitor import *
def Sub(root, tagname, **kwargs):
def parse(v):
if isinstance(v, type):
return v.__name__
return str(v)
str_kwargs = {k: parse(v) for k, v in kwargs.items()
if v is not None}
return etree.SubElement(root, tagname, str_kwargs)
def object_element(parent, tag, attributes):
def parse(v):
if isinstance(v, Reference):
return "${%s.%s}" % (v.task, v.slot)
if isinstance(v, Constant):
return v.value
return v
attribs = {str(k): parse(v) for k, v in attributes.items() if k != "__tag__"}
return Sub(parent, tag, **attribs)
class XmlBuilder(VisitorExporterFramework, Visitor):
def __init__(self, msml_file):
VisitorExporterFramework.__init__(self, msml_file, None)
Visitor.__init__(self, self)
self.visitor = self
def gather_inputs(self):
pass
def gather_output(self):
pass
def to_xml(self):
return self.visit()
def __object_element(self, parent, element):
assert isinstance(element, ObjectElement)
return object_element(parent, element.tag, element.attributes)
def write_export_file(self, msml_file_path, product):
pass
def scene_begin(self, _msml, scene):
return Sub(_msml, "scene")
def object_sets_begin(self, _msml, _scene, _object, sets):
return Sub(_object, "sets")
def object_sets_elements_begin(self, _msml, _scene, _object, _object_sets, elements):
return Sub(_object_sets, "elements")
def object_sets_nodes_begin(self, _msml, _scene, _object, _object_sets, nodes):
return Sub(_object_sets, 'nodes')
def object_sets_surfaces_begin(self, _msml, _scene, _object, _object_sets, surfaces):
return Sub(_object_sets, "surfaces")
def object_sets_surfaces_element(self, _msml, _scene, _object, _object_sets, _surfaces, surface):
return self.__object_element(_object_sets, surface)
def object_sets_nodes_element(self, _msml, _scene, _object, _object_sets, _nodes, node):
return self.__object_element(_object_sets, node)
def object_sets_elements_element(self, _msml, _scene, _object, _object_sets, _elements, element):
return self.__object_element(_object_sets, element)
def object_output_begin(self, _msml, _scene, _object, outputs):
return Sub(_object, "output")
def object_output_element(self, _msml, _scene, _object, _output, output):
return self.__object_element(_output, output)
def object_mesh(self, _msml, _scene, _object, mesh):
assert isinstance(mesh, Mesh)
return Sub(_object, mesh.type, id=mesh.id, mesh=mesh.mesh)
def object_material_region_begin(self, _msml, _scene, _object, _material, region):
assert isinstance(region, MaterialRegion)
return Sub(_material, "region",
id=region.id)
def object_material_region_element(self, _msml, _scene, _object, _material, _region, element):
return self.__object_element(_region, element)
def object_material_begin(self, _msml, _scene, _object, materials):
return Sub(_object, "material")
def object_constraints_begin(self, _msml, _scene, _object, constraints):
return Sub(_object, "constraints")
def object_constraint_element(self, _msml, _scene, _object, _constraints, _constraint, element):
return self.__object_element(_constraint, element)
def object_constraint_begin(self, _msml, _scene, _object, _constraints, constraint):
assert isinstance(constraint, ObjectConstraints)
return Sub(_constraints, "constraint", name=constraint.name, forStep=constraint.for_step)
def object_begin(self, _msml, _scene, object):
return Sub(_scene, "object", id=object.id)
def msml_begin(self, msml_file):
return etree.Element("msml")
# TODO Add namespace attributes
# xmlns:msml="http://sfb125.de/msml"
# xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
# xsi:schemaLocation="http://sfb125.de/msml
def environment_solver(self, _msml, _environment, solver):
return Sub(_environment, "solver",
dampingRayleighRatioMass=solver.dampingRayleighRatioMass,
preconditioner=solver.preconditioner,
dampingRayleighRatioStiffness=solver.dampingRayleighRatioStiffness,
linearSolver=solver.linearSolver,
timeIntegration=solver.timeIntegration,
processingUnit=solver.processingUnit)
def environment_simulation_begin(self, _msml, _environment, simulation):
return Sub(_environment, "simulation")
def environment_simulation_element(self, _msml, _environment, _simulation, step):
return Sub(_simulation, "step", dt=step.dt, name=step.name,
iterations=step.iterations)
def environment_begin(self, _msml, env):
return Sub(_msml, "environment")
def variables_begin(self, _msml, variables):
return Sub(_msml, "variables")
def variables_element(self, _msml, _variables, variable):
assert isinstance(variable, MSMLVariable)
if variable.name.startswith("_gen"):
return None
return Sub(_variables, "var", format=variable.physical_type, name=variable.name, value=variable.value,
type=variable.logical_type)
def workflow_begin(self, _msml, workflow):
return Sub(_msml, "workflow")
def workflow_element(self, _msml, _workflow, task):
a = dict(task.attributes)
a['id'] = task.id
return object_element(_workflow, task.name, a)
__all__ = ['to_xml', 'save_xml']
def to_xml(msml_file):
"""translate the given `msml_file` into a XML dom tree.
:param msml.model.MSMLFile msml_file:
a MSMLFile object
:rtype: lxml.etree._Element
:returns: root element of etree dom
"""
b = XmlBuilder(msml_file)
return b.to_xml()
import codecs
def save_xml(filename, xml):
"""saves the given `xml` element into the given `filename`
:param str filename: the file written to
:param Element xml: the element to be written.
:returns: None
"""
r = etree.ElementTree(xml)
with codecs.open(filename, 'w', 'utf-8') as fp:
r.write(fp, pretty_print=True)
|
CognitionGuidedSurgery/msml
|
src/msml/io/writer.py
|
Python
|
gpl-3.0
| 7,774
|
[
"VisIt"
] |
8b0153d57d67e72e02bbae6d17c5c58306ef37c3796b00e0435014e6cba9f5e9
|
# $Id$
#
# Copyright (C) 2002-2008 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for the Crippen clogp and MR calculators
"""
from __future__ import print_function
import unittest, sys, os
import io
import numpy
from rdkit import RDConfig
from rdkit.six.moves import cPickle
from rdkit import Chem
from rdkit.Chem import Crippen
def feq(n1, n2, tol=1e-5):
return abs(n1 - n2) <= tol
class TestCase(unittest.TestCase):
def setUp(self):
self.fName = os.path.join(RDConfig.RDCodeDir, 'Chem/test_data', 'Crippen.csv')
self.detailName = os.path.join(RDConfig.RDCodeDir, 'Chem/test_data',
'Crippen_contribs_regress.pkl')
self.detailName2 = os.path.join(RDConfig.RDCodeDir, 'Chem/test_data',
'Crippen_contribs_regress.2.pkl')
def _readData(self):
smis = []
clogs = []
mrs = []
with open(self.fName, 'r') as f:
lines = f.readlines()
for line in lines:
if len(line) and line[0] != '#':
splitL = line.split(',')
if len(splitL) == 3:
smi, clog, mr = splitL
smis.append(smi)
clogs.append(float(clog))
mrs.append(float(mr))
self.smis = smis
self.clogs = clogs
self.mrs = mrs
def testLogP(self):
self._readData()
nMols = len(self.smis)
#outF = file(self.fName,'w')
for i in range(nMols):
smi = self.smis[i]
mol = Chem.MolFromSmiles(smi)
if 1:
clog = self.clogs[i]
tmp = Crippen.MolLogP(mol)
self.assertTrue(feq(clog, tmp), 'bad logp for %s: %4.4f != %4.4f' % (smi, clog, tmp))
mr = self.mrs[i]
tmp = Crippen.MolMR(mol)
self.assertTrue(feq(mr, tmp), 'bad MR for %s: %4.4f != %4.4f' % (smi, mr, tmp))
else:
clog = Crippen.MolLogP(mol)
mr = Crippen.MolMR(mol)
print('%s,%.4f,%.4f' % (smi, clog, mr), file=outF)
def testRepeat(self):
self._readData()
nMols = len(self.smis)
for i in range(nMols):
smi = self.smis[i]
mol = Chem.MolFromSmiles(smi)
clog = self.clogs[i]
tmp = Crippen.MolLogP(mol)
tmp = Crippen.MolLogP(mol)
self.assertTrue(feq(clog, tmp), 'bad logp fooutF,r %s: %4.4f != %4.4f' % (smi, clog, tmp))
mr = self.mrs[i]
tmp = Crippen.MolMR(mol)
tmp = Crippen.MolMR(mol)
self.assertTrue(feq(mr, tmp), 'bad MR for %s: %4.4f != %4.4f' % (smi, mr, tmp))
def _writeDetailFile(self, inF, outF):
while 1:
try:
smi, refContribs = cPickle.load(inF)
except EOFError:
break
else:
mol = Chem.MolFromSmiles(smi)
if mol:
mol = Chem.AddHs(mol, 1)
smi2 = Chem.MolToSmiles(mol)
contribs = Crippen._GetAtomContribs(mol)
cPickle.dump((smi, contribs), outF)
else:
print('Problems with SMILES:', smi)
def _doDetailFile(self, inF, nFailsAllowed=1):
done = 0
verbose = 0
nFails = 0
while not done:
if verbose:
print('---------------')
try:
smi, refContribs = cPickle.load(inF)
except EOFError:
done = 1
else:
refContribs = [x[0] for x in refContribs]
refOrder = numpy.argsort(refContribs)
mol = Chem.MolFromSmiles(smi)
if mol:
mol = Chem.AddHs(mol, 1)
smi2 = Chem.MolToSmiles(mol)
contribs = Crippen._GetAtomContribs(mol)
contribs = [x[0] for x in contribs]
#
# we're comparing to the old results using the oelib code.
# Since we have some disagreements with them as to what is
# aromatic and what isn't, we may have different numbers of
# Hs. For the sake of comparison, just pop those off our
# new results.
#
while len(contribs) > len(refContribs):
del contribs[-1]
order = numpy.argsort(contribs)
for i in range(len(refContribs)):
refL = refContribs[refOrder[i]]
l = contribs[order[i]]
if not feq(refL, l):
print('%s (%s): %d %6.5f != %6.5f' % (smi, smi2, order[i], refL, l))
Crippen._GetAtomContribs(mol, force=1)
print('-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*')
nFails += 1
break
else:
print('Problems with SMILES:', smi)
self.assertTrue(nFails < nFailsAllowed)
def testDetails(self):
Crippen._Init()
with open(self.detailName, 'r') as inTF:
buf = inTF.read().replace('\r\n', '\n').encode('utf-8')
inTF.close()
with io.BytesIO(buf) as inF:
if 0:
outF = open('tmp.pkl', 'wb+')
self._writeDetailFile(inF, outF)
self._doDetailFile(inF)
def testDetails2(self):
Crippen._Init()
with open(self.detailName2, 'r') as inTF:
buf = inTF.read().replace('\r\n', '\n').encode('utf-8')
inTF.close()
with io.BytesIO(buf) as inF:
if 0:
outF = open('tmp.pkl', 'wb+')
self._writeDetailFile(inF, outF)
self._doDetailFile(inF)
def testIssue80(self):
from rdkit.Chem import Lipinski
m = Chem.MolFromSmiles('CCOC')
ref = Crippen.MolLogP(m)
Lipinski.NHOHCount(m)
probe = Crippen.MolLogP(m)
self.assertTrue(probe == ref)
def testIssue1749494(self):
m1 = Chem.MolFromSmiles('[*]CC')
v = Crippen.MolLogP(m1)
self.assertTrue(feq(v, 0.9739))
if __name__ == '__main__':
unittest.main()
|
jandom/rdkit
|
rdkit/Chem/UnitTestCrippen.py
|
Python
|
bsd-3-clause
| 5,710
|
[
"RDKit"
] |
415ccccea81cf7c3dc8831a50cdbcdf1b08a007c785dcd8bd8ce4279f2ddd6cb
|
# Author: Samuel Genheden samuel.genheden@gmail.com
"""
Script to find the space where to insert the solutes
"""
import sys
import MDAnalysis
import MDAnalysis.lib.distances as mddist
import numpy as np
u = MDAnalysis.Universe(sys.argv[1])
lipids = u.select_atoms("name PO4 and resid 9108:10688")
com = np.asarray([lipids.center_of_geometry()])
radius = mddist.distance_array(com,lipids.positions,None).mean()
print "outside sphere %.3f %.3f %.3f %.3f"%(com[0,0], com[0,1], com[0,2], radius+10)
print "inside box 0.0 0.0 0.0 %.3f %.3f %.3f"%tuple(u.dimensions[:3])
|
SGenheden/Scripts
|
Projects/Liposome/get_solutespace.py
|
Python
|
mit
| 570
|
[
"MDAnalysis"
] |
b369a4efe3dab727a98e5d6b533965c2f3444ab3164fc9c247c37b625cee86f0
|
from aces.materials.POSCAR import structure as Material
class structure(Material):
def getPOSCAR(self):
return self.getMinimized()
return """Mo N
1.0
2.98 0 0
1.49 2.5807557 0
0 0 25
Mo N
1 2
Direct
0.1666666666666643 0.6666666666666643 0.5000000000000000
0.8333333333333357 0.3333333333333357 0.456
0.8333333333333357 0.3333333333333357 0.544
"""
def csetup(self):
from ase.dft.kpoints import ibz_points
#self.bandpoints=ibz_points['hexagonal']
import numpy as np
x=0.5*np.cos(np.arange(8)/8.0*2.0*np.pi)
y=0.5*np.sin(np.arange(8)/8.0*2.0*np.pi)
self.bandpath=['Gamma']
for i in range(8):
if(np.abs(x[i])>0.2):x[i]/=np.abs(x[i])*2.0
if(np.abs(y[i])>0.2):y[i]/=np.abs(y[i])*2.0
self.bandpoints['X'+str(i)]=[x[i],y[i],0.0]
self.bandpath.append('X'+str(i))
self.bandpath.append('Gamma')
def getMinimized(self):
return """POSCAR file written by OVITO
1.0
2.9916000366 0.0000000000 0.0000000000
1.4957998991 2.5908014232 0.0000000000
0.0000000000 0.0000000000 25.0000000000
Mo N
1 2
Direct
0.000000000 0.000000000 0.500000000
0.666666687 0.666666687 0.455509961
0.666666687 0.666666687 0.544490039
"""
|
vanceeasleaf/aces
|
aces/materials/MoN2_alpha.py
|
Python
|
gpl-2.0
| 1,375
|
[
"ASE",
"OVITO"
] |
bb75611b6031ab22886856eaf62e5e32e8f1fb14a4b558c122453f8121705885
|
#!/usr/bin/env python
#
# This example demonstrates how to use multiple renderers within a
# render window. It is a variation of the Cone.py example. Please
# refer to that example for additional documentation.
#
import vtk
import time
#
# Next we create an instance of vtkConeSource and set some of its
# properties. The instance of vtkConeSource "cone" is part of a visualization
# pipeline (it is a source process object); it produces data (output type is
# vtkPolyData) which other filters may process.
#
cone = vtk.vtkConeSource()
cone.SetHeight( 3.0 )
cone.SetRadius( 1.0 )
cone.SetResolution( 10 )
#
# In this example we terminate the pipeline with a mapper process object.
# (Intermediate filters such as vtkShrinkPolyData could be inserted in
# between the source and the mapper.) We create an instance of
# vtkPolyDataMapper to map the polygonal data into graphics primitives. We
# connect the output of the cone souece to the input of this mapper.
#
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
#
# Create an actor to represent the cone. The actor orchestrates rendering of
# the mapper's graphics primitives. An actor also refers to properties via a
# vtkProperty instance, and includes an internal transformation matrix. We
# set this actor's mapper to be coneMapper which we created above.
#
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
#
# Create two renderers and assign actors to them. A renderer renders into a
# viewport within the vtkRenderWindow. It is part or all of a window on the
# screen and it is responsible for drawing the actors it has. We also set
# the background color here. In this example we are adding the same actor
# to two different renderers; it is okay to add different actors to
# different renderers as well.
#
ren1 = vtk.vtkRenderer()
ren1.AddActor(coneActor)
ren1.SetBackground(0.1, 0.2, 0.4)
ren1.SetViewport(0.0, 0.0, 0.5, 1.0)
ren2 = vtk.vtkRenderer()
ren2.AddActor(coneActor)
ren2.SetBackground(0.1, 0.2, 0.4)
ren2.SetViewport(0.5, 0.0, 1.0, 1.0)
#
# Finally we create the render window which will show up on the screen.
# We add our two renderers into the render window using AddRenderer. We also
# set the size to be 600 pixels by 300.
#
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer( ren1 )
renWin.AddRenderer( ren2 )
renWin.SetSize(600, 300)
#
# Make one camera view 90 degrees from other.
#
ren1.ResetCamera()
ren1.GetActiveCamera().Azimuth(90)
#
# Now we loop over 360 degreeees and render the cone each time.
#
# for i in range(0,360):
# time.sleep(0.03)
# renWin.Render()
# ren1.GetActiveCamera().Azimuth( 1 )
# ren2.GetActiveCamera().Azimuth( 1 )
|
CMUSV-VisTrails/WorkflowRecommendation
|
examples/vtk_examples/Tutorial/Step3/Cone3.py
|
Python
|
bsd-3-clause
| 2,703
|
[
"VTK"
] |
ef391e50be51a01d633a45b99d2148ddcf261f97ab11815ebf098ef663f4392b
|
# Copyright (C) 2012,2013 Olaf Lenz
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Check whether all features used in the code are defined
#
import sys, os, re, fileinput
sys.path.append(os.path.join(sys.path[0], '..', '..', 'config'))
import featuredefs
if len(sys.argv) < 3:
print "Usage: %s DEFFILE [FILE...]" % sys.argv[0]
exit(2)
print "Checking for completeness of features in test configurations..."
fdefs = featuredefs.defs(sys.argv[1])
featurefound = set()
featurere = re.compile('^#define (\w+)')
for line in fileinput.input(sys.argv[2:]):
res = featurere.match(line)
if res is not None:
feature = res.group(1)
featurefound.add(feature)
unused = fdefs.features.difference(featurefound)
unused = unused.difference(fdefs.notestfeatures)
if len(unused) > 0:
for feature in unused:
print "check_myconfig_complete: %s is not used" % feature
else:
print "check_myconfig_complete: All features are used!"
|
roehm/espresso_cpp
|
config/check_myconfig_complete.py
|
Python
|
gpl-3.0
| 1,592
|
[
"ESPResSo"
] |
2d4fa5fc37710716de5352d869cc38ad1f344e3486686ebb386a7981c2d23f11
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
# This is moose.server.
# It accepts simulation request on a specified TCP port (default 31417).
# It simulates the given file (usually a archive file e.g., tar.bz2) and sends
# back artefacts generated by simulation (mostly images); and streams data from
# moose.Tables back to client.
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2019, Dilawar Singh"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import sys
import re
import os
import time
import math
import shutil
import socket
import signal
import tarfile
import tempfile
import threading
import subprocess
import logging
logger_ = logging.getLogger('moose.server')
__all__ = [ 'serve' ]
# Global variable to stop all running threads.
stop_all_ = False
sock_ = None
stop_streamer_ = {}
# Use prefixL_ bytes to encode the size of stream. One can probably use just one
# byte to do. Lets go with the inefficient one for now.
prefixL_ = 9
# Matplotlib text for running simulation. It make sures at each figure is saved
# to individual png files.
matplotlibText = """
print( '>>>> saving all figues')
import matplotlib.pyplot as plt
def multipage(filename, figs=None, dpi=200):
pp = PdfPages(filename)
if figs is None:
figs = [plt.figure(n) for n in plt.get_fignums()]
for fig in figs:
fig.savefig(pp, format='pdf')
pp.close()
def saveall(prefix='results', figs=None):
if figs is None:
figs = [plt.figure(n) for n in plt.get_fignums()]
for i, fig in enumerate(figs):
outfile = '%s.%d.png' % (prefix, i)
fig.savefig(outfile)
print( '>>>> %s saved.' % outfile )
plt.close()
try:
saveall()
except Exception as e:
print( '>>>> Error in saving: %s' % e )
quit(0)
"""
def execute(cmd):
"""execute: Execute a given command.
:param cmd: string, given command.
Return:
------
Return a iterator over output.
"""
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
for stdout_line in iter(popen.stdout.readline, ""):
yield stdout_line
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
def find_files( dirname, ext=None, name_contains=None, text_regex_search=None):
files = []
for d, sd, fs in os.walk(dirname):
for f in fs:
fpath = os.path.join(d,f)
include = True
if ext is not None:
if f.split('.')[-1] != ext:
include = False
if name_contains:
if name_contains not in os.path.basename(f):
include = False
if text_regex_search:
with open(fpath, 'r' ) as f:
txt = f.read()
if re.search(text_regex_search, txt) is None:
include = False
if include:
files.append(fpath)
return files
def prefix_data_with_size(data):
global prefixL_
prefix = b'0'*(prefixL_-int(math.log10(len(data)))-1) + b'%d' % len(data)
assert len(prefix) == prefixL_
return b'%s%s' % (prefix, data)
# Signal handler.
def signal_handler(signum, frame):
global stop_all_
global sock_
logger_.info( "User terminated all processes." )
stop_all_ = True
# sock_.shutdown( socket.SHUT_RDWR )
sock_.close()
time.sleep(1)
quit(1)
def split_data( data ):
global prefixL_
return data[:prefixL_].strip(), data[prefixL_:]
def send_msg(msg, conn, prefix='LOG'):
if not msg.strip():
return False
if prefix != 'TAB':
logger_.debug(msg)
else:
logger_.debug( 'Sending msg with size %d' % len(msg))
msg = '<%s>%s' % (prefix, msg)
conn.sendall(prefix_data_with_size(msg))
def run(cmd, conn, cwd=None):
logger_.info( "Executing %s" % cmd )
oldCWD = os.getcwd()
if cwd is not None:
os.chdir(cwd)
try:
for line in execute(cmd.split()):
if line:
send_msg(line, conn)
except Exception as e:
send_msg("Simulation failed: %s" % e, conn)
os.chdir(oldCWD)
def recv_input(conn, size=1024):
# first 10 bytes always tell how much to read next. Make sure the submit job
# script has it
d = conn.recv(prefixL_, socket.MSG_WAITALL)
while len(d) < prefixL_:
try:
d = conn.recv(prefixL_, socket.MSG_WAITALL)
except Exception:
logger_.error("MSG FORMAT: %d bytes are size of msg."%prefixL_)
continue
d, data = int(d), b''
while len(data) < d:
data += conn.recv(d-len(data), socket.MSG_WAITALL)
return data
def writeTarfile( data ):
tfile = os.path.join(tempfile.mkdtemp(), 'data.tar.bz2')
with open(tfile, 'wb' ) as f:
logger_.info( "Writing %d bytes to %s" % (len(data), tfile))
f.write(data)
# Sleep for some time so that file can be written to disk.
time.sleep(0.1)
if not tarfile.is_tarfile(tfile):
logger_.warning( 'Not a valid tar file: %s' % tfile)
return None
return tfile
def suffixMatplotlibStmt( filename ):
outfile = '%s.1.py' % filename
with open(filename, 'r') as f:
txt = f.read()
with open(outfile, 'w' ) as f:
f.write( txt )
f.write( '\n' )
f.write( matplotlibText )
return outfile
def streamer_client(socketPath, conn):
# Connect to running socket server.
global stop_streamer_
stop = False
logger_.debug( "Trying to connect to server at : %s" % socketPath )
while not os.path.exists( socketPath ):
#print( 'socket %s is not available yet.' % socketPath )
time.sleep(0.1)
stop = stop_streamer_[threading.currentThread().name]
if stop:
return
stClient = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
stClient.connect(socketPath)
except socket.error as e:
logger_.warning('Could not connect: %s' % e)
return
# send streaming data back to client. The streamer send fixed size messages
# of 1024/2048 bytes each (see the c++ implmenetation).
logger_.info( "Socket Streamer is connected with server." )
stClient.settimeout(0.05)
send_msg( b'Now streaming table data.', conn, 'TAB')
while not stop:
stop = stop_streamer_[threading.currentThread().name]
data = b''
try:
data = stClient.recv(1024)
if len(data.strip()) > 0:
send_msg(data, conn, 'TAB')
except socket.timeout:
continue
stClient.close()
if os.path.isfile(socketPath):
os.unlink(socketPath)
def run_file(filename, conn, cwd=None):
# set environment variable so that socket streamer can start.
global stop_streamer_
socketPath = os.path.join(tempfile.mkdtemp(), 'SOCK_TABLE_STREAMER')
os.environ['MOOSE_STREAMER_ADDRESS'] = socketPath
streamerThread = threading.Thread(target=streamer_client
, args=(socketPath, conn,))
stop_streamer_[streamerThread.name] = False
streamerThread.daemon = True
streamerThread.start()
filename = suffixMatplotlibStmt(filename)
run( "%s %s" % (sys.executable, filename), conn, cwd)
stop_streamer_[streamerThread.name] = True
streamerThread.join( timeout = 1)
if streamerThread.is_alive():
logger_.error( "The socket streamer client is still running...")
def extract_files(tfile, to):
userFiles = []
with tarfile.open(tfile, 'r' ) as f:
userFiles = f.getnames( )
try:
f.extractall( to )
except Exception as e:
logger_.warning( e)
# now check if all files have been extracted properly
for f in userFiles:
if not os.path.exists(f):
logger_.error( "File %s could not be extracted." % f )
return userFiles
def prepareMatplotlib( cwd ):
with open(os.path.join(cwd, 'matplotlibrc'), 'w') as f:
f.write( 'interactive : True' )
def send_bz2(conn, data):
global prefixL_
send_msg(data, conn, 'TAR')
def sendResults(tdir, conn, notTheseFiles):
# Only send new files.
resdir = tempfile.mkdtemp()
resfile = os.path.join(resdir, 'results.tar.bz2')
with tarfile.open( resfile, 'w|bz2') as tf:
for f in find_files(tdir, ext='png'):
logger_.info( "Adding file %s" % f )
tf.add(f, os.path.basename(f))
time.sleep(0.01)
# now send the tar file back to client
with open(resfile, 'rb' ) as f:
data = f.read()
logger_.info( 'Total bytes to send to client: %d' % len(data))
send_bz2(conn, data)
shutil.rmtree(resdir)
def find_files_to_run( files ):
"""Any file name starting with __main is to be run.
Many such files can be recieved by client.
"""
toRun = []
for f in files:
if '__main' in os.path.basename(f):
toRun.append(f)
if toRun:
return toRun
# Else guess.
if len(files) == 1:
return files
for f in files:
with open(f, 'r' ) as fh:
txt = fh.read()
if re.search(r'def\s+main\(', txt):
if re.search(r'^\s+main\(\S+?\)', txt):
toRun.append(f)
return toRun
def simulate( tfile, conn ):
"""Simulate a given tar file.
"""
tdir = os.path.dirname( tfile )
os.chdir( tdir )
userFiles = extract_files(tfile, tdir)
# Now simulate.
toRun = find_files_to_run(userFiles)
if len(toRun) < 1:
return 1
prepareMatplotlib(tdir)
status, msg = 0, ''
for _file in toRun:
try:
run_file(_file, conn, tdir)
except Exception as e:
msg += str(e)
status = 1
return status, msg
def savePayload( conn ):
data = recv_input(conn)
tarfileName = writeTarfile(data)
return tarfileName, len(data)
def handle_client(conn, ip, port):
isActive = True
logger_.info( "Serving request from %s:%s" % (ip, port) )
while isActive:
tarfileName, nBytes = savePayload(conn)
if tarfileName is None:
logger_.warning( "Could not recieve data." )
isActive = False
if not os.path.isfile(tarfileName):
send_msg("[ERROR] %s is not a valid tarfile. Retry"%tarfileName, conn)
break
# list of files before the simulation.
notthesefiles = find_files(os.path.dirname(tarfileName))
res, msg = simulate( tarfileName, conn )
if 0 != res:
send_msg( "Failed to run simulation: %s" % msg, conn)
isActive = False
time.sleep(0.1)
# Send results after DONE is sent.
send_msg('All done', conn, 'EOS')
sendResults(os.path.dirname(tarfileName), conn, notthesefiles)
break
def start_server( host, port, max_requests = 10 ):
global stop_all_
global sock_
sock_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock_.bind( (host, port))
logger_.info( "Server created %s:%s" %(host,port) )
except Exception as e:
logger_.error( "Failed to bind: %s" % e)
quit(1)
# listen upto 10 of requests
sock_.listen(max_requests)
while True:
if stop_all_:
break
sock_.settimeout(10)
try:
conn, (ip, port) = sock_.accept()
except socket.timeout as e:
continue
sock_.settimeout(0.0)
t = threading.Thread(target=handle_client, args=(conn, ip, port))
t.start()
sock_.close()
def serve(host, port):
start_server(host, port)
def main( args ):
global stop_all_
host, port = args.host, args.port
# Install a signal handler.
signal.signal( signal.SIGINT, signal_handler)
serve(host, port)
if __name__ == '__main__':
import argparse
# Argument parser.
description = '''Run MOOSE server.'''
parser = argparse.ArgumentParser(description=description, add_help=False)
parser.add_argument( '--help', action='help', help='Show this msg and exit')
parser.add_argument('--host', '-h'
, required = False, default = socket.gethostbyname(socket.gethostname())
, help = 'Server Name'
)
parser.add_argument('--port', '-p'
, required = False, default = 31417, type=int
, help = 'Port number'
)
class Args: pass
args = Args()
parser.parse_args(namespace=args)
try:
main(args)
except KeyboardInterrupt as e:
stop_all_ = True
quit(1)
|
BhallaLab/moose-core
|
python/moose/server.py
|
Python
|
gpl-3.0
| 12,862
|
[
"MOOSE"
] |
089938d61f3154a44665aa2e0f1e113bfc349a3231d7e1997d8446797edcf9b6
|
import sys,os,glob, inspect
import pylab as pl
from numpy import *
from scipy import optimize
import pickle
import time
import copy
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]) + "/templates")
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
from templutils import *
filters=sys.argv[1:]
start=0
def mygauss(x,p):
global start
start = 0.1
# start+=0.05
PENALTY=0
GAUSSPLOT=False#True
g=p[0]*exp(-(x-p[3])**2/p[1]**2)+p[4]
# add linear decay after 50 days from peak f(x)=0.02*x+1
f=p[8]*((x))
g+=f
if GAUSSPLOT:
print "plotting gaussian, ",start
pl.plot(x,g,'r-', alpha=start)
pl.draw()
# pl.ylim(3.5,-1.0)
return g
'''
if PENALTY:
#if p[0]<0:
# print "penalty 3!!"
# g=g*abs(p[0])
# print max(g)
if min(g)<9.8:
g=g-(9.5-min(g))
print "penalty 4!!"
print 9.8-min(g)
else :
print "here", min(g)
#add second gaussian
if 'BOSr' in filter or 'BOSi' in filter or 'UKIRTH' in filter or 'UKIRTJ' in filter or 'UKIRTK' in filter or 'WIRCH' in filter or 'WIRCJ' in filter or 'WIRCK' in filter or 'FTNi' in filter or 'FTNr' in filter:
g=g+p[5]*exp(-(x-p[6])**2/p[7]**2)
if PENALTY:
if p[5]<0:
print "penalty 2!!"
g=g*abs(p[5])
if abs(p[6]-50)>20.0:
print "penalty 1!!"
g=g*abs(p[6]-50)
'''
def exprise(x,g,p):
global start
EXPPLOT=False
start+=10
# g[where(x<-2)]*=x[where(x<-2)]**(p[11]*2)
tmp=p[9]
# pl.plot (x,g,'r-',linewidth=3)
newg=copy.deepcopy(g)
newg=newg+1
# newg[where(x<-tmp)]
newg*=((exp(-x/p[10])/exp(tmp/p[10]))+1)#[where(x<-tmp)]
print "newg: ",p[10],p[9]
#(p[9]*exp((abs(x+tmp)**p[11])/p[10]**p[11]))[where(x<-tmp)]
# pl.plot (x,g,'y-')
# pl.draw()
# print p[8],p[9]
# plot (x,p[11]*exp(-(x)/p[2])/min(exp(-(x)/p[2]))+1,'r',alpha=start)
# print "now the rise is plotted ",min(g),max(g), p[11]
newg=newg-1
if EXPPLOT:
pl.plot (x,newg,'y-',alpha = 0.3)#,linewidth=3)
# pl.ylim(3.5,-0.5)
pl.draw()
# pl.show()
# time.sleep(10)
# ylim(11,9)
return newg
#errfunc = lambda p, x, y, err: (y - mygauss(x,p,filter))/ err
if __name__=='__main__':
pl.ion()
errfunc = lambda p, x, y,myfilter: (y - mygauss(x,p))
errfuncrise = lambda p, x, g, y: (y - exprise(x,g,p))
#errfuncrise = lambda p, x, y,filter: (y - mygauss2(x,p,filter))
#ion()
template=Mytempclass()
template.loadtemplatefile()
pl.figure()
for b in 'V','R':
pl.plot(template.template[b].x, template.template[b].median, 'b-')
pl.fill_between(template.template[b].x,template.template[b].median-template.template[b].std,template.template[b].median+template.template[b].std, alpha=0.1, color='#0000ff')
pl.ylim(3.5,-1)
pl.xlim(-10,50)
pl.draw()
q=[0.03,30,12]
pinit=zeros(12,float)
pinit[ 0 ]= -2.23712689035
pinit[ 1 ]= 23.771414014
pinit[ 2 ]= 30.0
pinit[ 3 ]= 0.44643855512
pinit[ 4 ]= 2.3090049932
pinit[ 5 ]= -2.0
pinit[ 6 ]= 40.0
pinit[ 7 ]= 20.0
pinit[ 8 ]= 0.01
pinit[ 9 ]= 5.49366
pinit[ 10 ]= 3.5207
pinit[ 11 ]= 0.01
newx=arange(-10,150)
err = ones(len(template.template[b].x),float)#+20.0
#err[where(abs(template.template[b].x-10)==min(abs(template.template[b].x-10)))]=1.0
for b in ['V','R']:
for repeat in [0,1]:
pl.figure()
pl.plot(template.template[b].x, template.template[b].median, 'b-')
pl.fill_between(template.template[b].x,template.template[b].median-template.template[b].std,template.template[b].median+template.template[b].std, alpha=0.1, color='#0000ff')
pl.ylim(3.5,-1)
pl.xlim(-10,50)
#pl.show()
pl.draw()
for i,p in enumerate(pinit):
print "repeat: ",repeat," pinit[",i,"]=",p
myfilter=b
#plot initial guess
pl.plot(newx,mygauss(newx,pinit)*(exp(-(newx-0)/2.0)/max(exp(-(newx-0)/2.0))+1), 'k--')
# pl.draw()
minx=0
out = optimize.leastsq(errfunc, pinit,args=(template.template[b].x[where(template.template[b].x>minx)],template.template[b].median[where(template.template[b].x>minx)],myfilter),full_output=1)#,maxfev=50)#, err[where(sn[0]>25)]), full_output=1)
pfinal=out[0]
covar=out[1]
for i,p in enumerate(pfinal):
print "renew pinit[",i,"]=",p
# pl.plot(template.template[b].x, template.template[b].median)
# pl.fill_between(template.template[b].x,template.template[b].median-template.template[b].std,template.template[b].median+template.template[b].std, alpha=0.1)
pl.plot(newx,mygauss(newx,pfinal),'c-')
pl.draw()
pinit=pfinal
start=0
print "now for the rise"
out = optimize.leastsq(errfuncrise, pinit,args=(template.template[b].x,mygauss(template.template[b].x,pfinal), template.template[b].median)
,full_output=1)#, err[where(sn[0]>-5)]), full_output=1)
# time.sleep(20)
# show()
pfinal=out[0]
print pfinal
for i,p in enumerate(pfinal):
print "repeat ", repeat," renew pinit[",i,"]=",p
# covar=out[1]
pl.plot(template.template[b].x,exprise(template.template[b].x,mygauss(template.template[b].x,pfinal),pfinal),'y-',linewidth=2)
pl.draw()
pinit=pfinal
pl.ylabel(myfilter)
pl.xlabel("epoch")
pl.savefig("templates/empiricalmodel_"+b+".png")
pickle.dump(pfinal,open("templates/empiricalmodel_"+b+".pkl", "wb"))
# time.sleep(3)
#savefig("empiricalmodel_"+filter+".png")
# time.sleep(3)
#show()
#savefig("allempiricalmodels.png")
|
fedhere/SESNCfAlib
|
fitgauss2sntemplate.py
|
Python
|
mit
| 6,119
|
[
"Gaussian"
] |
3425e0f15822f86f5df453dff2d4ce6ae8c51d91e8e047318503020fd4dd178e
|
# cell_test_util.py ---
#
# Filename: cell_test_util.py
# Description: Utility functions for testing single cells
# Author:
# Maintainer:
# Created: Mon Oct 15 15:03:09 2012 (+0530)
# Version:
# Last-Updated: Sun Jun 25 16:04:13 2017 (-0400)
# By: subha
# Update #: 309
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# Code:
from datetime import datetime
import time
import os
import sys
import uuid
import unittest
import numpy as np
from matplotlib import pyplot as plt
import pylab
import moose
from moose import utils as mutils
import config
import cells
import testutils
from testutils import compare_cell_dump, setup_clocks, assign_clocks, step_run
def setup_current_step_model(model_container,
data_container,
celltype,
pulsearray):
"""Setup a single cell simulation.
model_container: element to hold the model
data_container: element to hold data
celltype: str - cell type
pulsearray: nx3 array - with row[i] = (delay[i], width[i],
level[i]) of current injection.
simdt: float - simulation time step
plotdt: float - sampling interval for plotting
solver: str - numerical method to use, can be `hsolve` or `ee`
"""
classname = 'cells.%s' % (celltype)
print('mc=', model_container, 'dc=', data_container, 'ct=', celltype, 'pa=', pulsearray, 'classname=', classname)
cell_class = eval(classname)
cell = cell_class('%s/%s' % (model_container.path, celltype))
pulsegen = moose.PulseGen('%s/pulse' % (model_container.path))
pulsegen.count = len(pulsearray)
for ii in range(len(pulsearray)):
pulsegen.delay[ii] = pulsearray[ii][0]
pulsegen.width[ii] = pulsearray[ii][1]
pulsegen.level[ii] = pulsearray[ii][2]
moose.connect(pulsegen, 'output', cell.soma, 'injectMsg')
presyn_vm = moose.Table('%s/presynVm' % (data_container.path))
soma_vm = moose.Table('%s/somaVm' % (data_container.path))
moose.connect(presyn_vm, 'requestOut', cell.presynaptic, 'getVm')
moose.connect(soma_vm, 'requestOut', cell.soma, 'getVm')
pulse_table = moose.Table('%s/injectCurrent' % (data_container.path))
moose.connect(pulse_table, 'requestOut', pulsegen, 'getOutputValue')
return {'cell': cell,
'stimulus': pulsegen,
'presynVm': presyn_vm,
'somaVm': soma_vm,
'injectionCurrent': pulse_table, }
class SingleCellCurrentStepTest(unittest.TestCase):
"""Base class for simulating a single cell with step current
injection"""
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.pulse_array = [[100e-3, 100e-3, 1e-9],
[1e9, 0, 0]]
self.solver = config.simulationSettings.method
self.simdt = None
self.plotdt = None
self.tseries = []
def setUp(self):
self.test_id = uuid.uuid4().int
self.test_container = moose.Neutral('test%d' % (self.test_id))
self.model_container = moose.Neutral('%s/model' % (self.test_container.path))
self.data_container = moose.Neutral('%s/data' % (self.test_container.path))
params = setup_current_step_model(
self.model_container,
self.data_container,
self.celltype,
self.pulse_array)
self.cell = params['cell']
for ch in moose.wildcardFind(self.cell.soma.path + '/##[ISA=ChanBase]'):
config.logger.debug('%s Ek = %g' % (ch.path, ch.Ek))
for ch in moose.wildcardFind(self.cell.soma.path + '/##[ISA=CaConc]'):
config.logger.debug('%s tau = %g' % (ch.path, ch.tau))
self.somaVmTab = params['somaVm']
self.presynVmTab = params['presynVm']
self.injectionTab = params['injectionCurrent']
self.pulsegen = params['stimulus']
# setup_clocks(self.simdt, self.plotdt)
# assign_clocks(self.model_container, self.data_container, self.solver)
def tweak_stimulus(self, pulsearray):
"""Update the pulsegen for this model with new (delay, width,
level) values specified in `pulsearray` list."""
for ii in range(len(pulsearray)):
self.pulsegen.delay[ii] = pulsearray[ii][0]
self.pulsegen.width[ii] = pulsearray[ii][1]
self.pulsegen.level[ii] = pulsearray[ii][2]
def schedule(self, simdt, plotdt, solver):
config.logger.info('Scheduling: simdt=%g, plotdt=%g, solver=%s' % (simdt, plotdt, solver))
self.simdt = simdt
self.plotdt = plotdt
self.solver = solver
if self.solver == 'hsolve':
self.hsolve = moose.HSolve('%s/solver' % (self.cell.path))
self.hsolve.dt = simdt
self.hsolve.target = self.cell.path
mutils.setDefaultDt(elecdt=simdt, plotdt2=plotdt)
mutils.assignDefaultTicks(modelRoot=self.model_container.path,
dataRoot=self.data_container.path,
solver=self.solver)
def runsim(self, simtime, stepsize=0.1, pulsearray=None):
"""Run the simulation for `simtime`. Save the data at the
end."""
config.logger.info('running: simtime=%g, stepsize=%g, pulsearray=%s' % (simtime, stepsize, str(pulsearray)))
self.simtime = simtime
if pulsearray is not None:
self.tweak_stimulus(pulsearray)
for ii in range(self.pulsegen.count):
config.logger.info('pulse[%d]: delay=%g, width=%g, level=%g' % (ii, self.pulsegen.delay[ii], self.pulsegen.width[ii], self.pulsegen.level[ii]))
config.logger.info('Start reinit')
self.schedule(self.simdt, self.plotdt, self.solver)
moose.reinit()
config.logger.info('Finished reinit')
ts = datetime.now()
mutils.stepRun(simtime, simtime/10.0, verbose=True)
# The sleep is required to get all threads to end
while moose.isRunning():
time.sleep(0.1)
te = datetime.now()
td = te - ts
config.logger.info('Simulation time of %g s at simdt=%g with solver %s: %g s' % \
(simtime, self.simdt, self.solver,
td.seconds + td.microseconds * 1e-6))
def savedata(self):
# Now save the data
for table_id in self.data_container.children:
ts = np.linspace(0, self.simtime, len(table_id[0].vector))
data = np.vstack((ts, table_id[0].vector))
fname = os.path.join(config.data_dir,
'%s_%s_%s_%s.dat' % (self.celltype,
table_id[0].name,
self.solver,
config.filename_suffix))
np.savetxt(fname, np.transpose(data))
config.logger.info('Saved %s in %s' % (table_id[0].name, fname))
def plot_vm(self):
"""Plot Vm for presynaptic compartment and soma - along with
the same in NEURON simulation if possible."""
pylab.subplot(211)
pylab.title('Soma Vm')
self.tseries = np.linspace(0, self.simtime, len(self.somaVmTab.vector))
pylab.plot(self.tseries*1e3, self.somaVmTab.vector * 1e3,
label='Vm (mV) - moose')
pylab.plot(self.tseries*1e3, self.injectionTab.vector * 1e9,
label='Stimulus (nA)')
try:
nrn_data = np.loadtxt('../nrn/data/%s_soma_Vm.dat' % \
(self.celltype))
nrn_indices = np.nonzero(nrn_data[:, 0] <= self.tseries[-1]*1e3)[0]
pylab.plot(nrn_data[nrn_indices,0], nrn_data[nrn_indices,1],
label='Vm (mV) - neuron')
except IOError:
print('No neuron data found.')
pylab.legend()
pylab.subplot(212)
pylab.title('Presynaptic Vm')
pylab.plot(self.tseries*1e3, self.presynVmTab.vector * 1e3,
label='Vm (mV) - moose')
pylab.plot(self.tseries*1e3, self.injectionTab.vector * 1e9,
label='Stimulus (nA)')
try:
fname = os.path.join(config.mydir, '..', 'nrn', 'data',
'%s_presynaptic_Vm.dat' % (self.celltype))
nrn_data = np.loadtxt( fname)
nrn_indices = np.nonzero(nrn_data[:, 0] <=
self.tseries[-1]*1e3)[0]
pylab.plot(nrn_data[nrn_indices,0],
nrn_data[nrn_indices,1], label='Vm (mV) - neuron')
except IOError:
print('No neuron data found.')
pylab.legend()
pylab.show()
#
# cell_test_util.py ends here
|
BhallaLab/moose-examples
|
traub_2005/py/cell_test_util.py
|
Python
|
gpl-2.0
| 8,995
|
[
"MOOSE",
"NEURON"
] |
5c9c4618e5358f5567a468bebc0c06a28359f387b09dd7387935fdce220f7aa3
|
#######################################################################
#
# Copyright 2009-2010 by Ullrich Koethe
#
# This file is part of the VIGRA computer vision library.
# The VIGRA Website is
# http://hci.iwr.uni-heidelberg.de/vigra/
# Please direct questions, bug reports, and contributions to
# ullrich.koethe@iwr.uni-heidelberg.de or
# vigra@informatik.uni-hamburg.de
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
#######################################################################
import sys, os, time, math
from numbers import Number
from multiprocessing import cpu_count
try:
import pylab
except Exception, e:
pass
_vigra_path = os.path.abspath(os.path.dirname(__file__))
_vigra_doc_path = _vigra_path + '/doc/vigranumpy/index.html'
if sys.platform.startswith('win'):
# On Windows, add subdirectory 'dlls' to the PATH in order to find
# the DLLs vigranumpy depends upon. Since this directory appears
# at the end of PATH, already installed DLLs are always preferred.
_vigra_dll_path = _vigra_path + '/dlls'
if os.path.exists(_vigra_dll_path):
os.putenv('PATH', os.getenv('PATH') + os.pathsep + _vigra_dll_path)
def _fallbackModule(moduleName, message):
'''This function installs a fallback module with the given 'moduleName'.
All function calls into this module raise an ImportError with the
given 'message' that hopefully tells the user why the real module
was not available.
'''
import sys
moduleClass = vigranumpycore.__class__
class FallbackModule(moduleClass):
def __init__(self, name):
moduleClass.__init__(self, name)
self.__name__ = name
def __getattr__(self, name):
if name.startswith('__'):
return moduleClass.__getattribute__(self, name)
try:
return moduleClass.__getattribute__(self, name)
except AttributeError:
raise ImportError("""%s.%s: %s""" % (self.__name__, name, self.__doc__))
module = FallbackModule(moduleName)
sys.modules[moduleName] = module
module.__doc__ = """Import of module '%s' failed.\n%s""" % (moduleName, message)
if not os.path.exists(_vigra_doc_path):
_vigra_doc_path = "http://hci.iwr.uni-heidelberg.de/vigra/doc/vigranumpy/index.html"
__doc__ = '''VIGRA Computer Vision Library
HTML documentation is available in
%s
Help on individual functions can be obtained via their doc strings
as usual.
The following sub-modules group related functionality:
* arraytypes (VigraArray and axistags, automatically imported into 'vigra')
* ufunc (improved array arithmetic, automatically used by VigraArray)
* impex (image and array I/O)
* colors (color space transformations)
* filters (spatial filtering, e.g. smoothing)
* sampling (image and array re-sampling and interpolation)
* fourier (Fourier transform and Fourier domain filters)
* analysis (image analysis and segmentation)
* learning (machine learning and classification)
* noise (noise estimation and normalization)
* geometry (geometric algorithms, e.g. convex hull)
* histogram (histograms and channel representation)
* graphs (grid graphs / graphs / graph algorithms)
* utilities (priority queues)
''' % _vigra_doc_path
from __version__ import version
import vigranumpycore
import arraytypes
import impex
import sampling
import filters
import analysis
import learning
import colors
import noise
import geometry
import optimization
import histogram
import graphs
import utilities
import blockwise
sampling.ImagePyramid = arraytypes.ImagePyramid
class Timer:
def __init__(self, name, verbose=True):
self.name = name
self.verbose = verbose
def __enter__(self):
if self.verbose:
print self.name, "..."
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.interval = self.end - self.start
if self.verbose :
print "... took ", self.interval, "sec"
try:
import fourier
except Exception, e:
_fallbackModule('vigra.fourier',
'''
%s
Make sure that the fftw3 libraries are found during compilation and import.
They may be downloaded at http://www.fftw.org/.''' % str(e))
import fourier
# import most frequently used functions
from arraytypes import *
standardArrayType = arraytypes.VigraArray
defaultAxistags = arraytypes.VigraArray.defaultAxistags
from vigranumpycore import ChunkedArrayFull, ChunkedArrayLazy, ChunkedArrayCompressed, ChunkedArrayTmpFile, Compression
try:
from vigranumpycore import ChunkedArrayHDF5, HDF5Mode
except:
pass
from impex import readImage, readVolume
def readHDF5(filenameOrGroup, pathInFile, order=None):
'''Read an array from an HDF5 file.
'filenameOrGroup' can contain a filename or a group object
referring to an already open HDF5 file. 'pathInFile' is the name
of the dataset to be read, including intermediate groups. If the
first argument is a group object, the path is relative to this
group, otherwise it is relative to the file's root group.
If the dataset has an attribute 'axistags', the returned array
will have type :class:`~vigra.VigraArray` and will be transposed
into the given 'order' ('vigra.VigraArray.defaultOrder'
will be used if no order is given). Otherwise, the returned
array is a plain 'numpy.ndarray'. In this case, order='F' will
return the array transposed into Fortran order.
Requirements: the 'h5py' module must be installed.
'''
import h5py
if isinstance(filenameOrGroup, h5py.highlevel.Group):
file = None
group = filenameOrGroup
else:
file = h5py.File(filenameOrGroup, 'r')
group = file['/']
try:
dataset = group[pathInFile]
if not isinstance(dataset, h5py.highlevel.Dataset):
raise IOError("readHDF5(): '%s' is not a dataset" % pathInFile)
data = dataset.value
axistags = dataset.attrs.get('axistags', None)
if axistags is not None:
data = data.view(arraytypes.VigraArray)
data.axistags = arraytypes.AxisTags.fromJSON(axistags)
if order is None:
order = arraytypes.VigraArray.defaultOrder
data = data.transposeToOrder(order)
else:
if order == 'F':
data = data.transpose()
elif order not in [None, 'C', 'A']:
raise IOError("readHDF5(): unsupported order '%s'" % order)
finally:
if file is not None:
file.close()
return data
def writeHDF5(data, filenameOrGroup, pathInFile, compression=None, chunks=None):
'''Write an array to an HDF5 file.
'filenameOrGroup' can contain a filename or a group object
referring to an already open HDF5 file. 'pathInFile' is the name of the
dataset to be written, including intermediate groups. If the first
argument is a group object, the path is relative to this group,
otherwise it is relative to the file's root group. If the dataset already
exists, it will be replaced without warning.
If 'data' has an attribute 'axistags', the array is transposed to
numpy order before writing. Moreover, the axistags will be
stored along with the data in an attribute 'axistags'.
'compression' can be set to 'gzip', 'szip' or 'lzf'
gzip (standard compression),
szip (available if HDF5 is compiled with szip. Faster compression, limited types),
lzf (very fast compression, all types).
The 'lzf' compression filter is many times faster than 'gzip'
at the cost of a lower compresion ratio.
Chunking is disabled by default. When 'chunks' is set to True
chunking is enabled and a chunk shape is determined automatically.
Alternatively a chunk shape can be specified explicitly by passing
a tuple of the desired shape.
Requirements: the 'h5py' module must be installed.
'''
import h5py
if isinstance(filenameOrGroup, h5py.highlevel.Group):
file = None
group = filenameOrGroup
else:
file = h5py.File(filenameOrGroup)
group = file['/']
try:
levels = pathInFile.split('/')
for groupname in levels[:-1]:
if groupname == '':
continue
g = group.get(groupname, default=None)
if g is None:
group = group.create_group(groupname)
elif not isinstance(g, h5py.highlevel.Group):
raise IOError("writeHDF5(): invalid path '%s'" % pathInFile)
else:
group = g
dataset = group.get(levels[-1], default=None)
if dataset is not None:
if isinstance(dataset, h5py.highlevel.Dataset):
del group[levels[-1]]
else:
raise IOError("writeHDF5(): cannot replace '%s' because it is not a dataset" % pathInFile)
try:
data = data.transposeToNumpyOrder()
except:
pass
dataset = group.create_dataset(levels[-1], data=data, compression=compression, chunks=chunks)
if hasattr(data, 'axistags'):
dataset.attrs['axistags'] = data.axistags.toJSON()
finally:
if file is not None:
file.close()
impex.readHDF5 = readHDF5
readHDF5.__module__ = 'vigra.impex'
impex.writeHDF5 = writeHDF5
writeHDF5.__module__ = 'vigra.impex'
from filters import convolve, gaussianSmoothing
from sampling import resize
def gaussianDerivative(array, sigma, orders, out=None, window_size=0.0):
'''
Convolve 'array' with a Gaussian derivate kernel of the given 'orders'.
'orders' must contain a list of integers >= 0 for each non-channel axis.
Each channel of the array is treated independently. If 'sigma' is a single
value, the kernel size is equal in each dimension. If 'sigma' is a tuple
or list of values of appropriate length, a different size is used for each axis.
'window_size' specifies the ratio between the filter scale and the size of
the filter window. Use values around 2.0 to speed-up the computation for the
price of increased cut-off error, and values >= 4.0 for vary accurate results.
The window size is automatically determined for the default value 0.0.
'''
if hasattr(array, 'dropChannelAxis'):
if array.dropChannelAxis().ndim != len(orders):
raise RuntimeError("gaussianDerivative(): len(orders) doesn't match array dimension.")
else:
if array.ndim == len(orders):
raise RuntimeError("gaussianDerivative(): len(orders) doesn't match array dimension.")
try:
len(sigma)
except:
sigma = [sigma]*len(orders)
kernels = tuple([filters.gaussianDerivativeKernel(s, o, window_size=window_size) \
for s, o in zip(sigma, orders)])
return filters.convolve(array, kernels, out)
filters.gaussianDerivative = gaussianDerivative
gaussianDerivative.__module__ = 'vigra.filters'
# import enums
CLOCKWISE = sampling.RotationDirection.CLOCKWISE
COUNTER_CLOCKWISE = sampling.RotationDirection.COUNTER_CLOCKWISE
UPSIDE_DOWN = sampling.RotationDirection.UPSIDE_DOWN
CompleteGrow = analysis.SRGType.CompleteGrow
KeepContours = analysis.SRGType.KeepContours
StopAtThreshold = analysis.SRGType.StopAtThreshold
_selfdict = globals()
def searchfor(searchstring):
'''Scan all vigra modules to find classes and functions containing
'searchstring' in their name.
'''
for attr in _selfdict.keys():
contents = dir(_selfdict[attr])
for cont in contents:
if ( cont.upper().find(searchstring.upper()) ) >= 0:
print attr+"."+cont
# FIXME: use axistags here
def imshow(image,show=True, **kwargs):
'''Display a scalar or RGB image by means of matplotlib.
If the image does not have one or three channels, an exception is raised.
The image will be automatically scaled to the range 0...255 when its dtype
is not already 'uint8' and neither 'cmap' nor 'norm' are specified in kwargs
'''
import matplotlib.pylab
if not hasattr(image, 'axistags'):
plot = matplotlib.pyplot.imshow(image, **kwargs)
if show:
matplotlib.pylab.show()
return plot
image = image.transposeToNumpyOrder()
if image.channels == 1:
image = image.dropChannelAxis().view(numpy.ndarray)
if 'cmap' in kwargs.keys():
cmap = kwargs['cmap']
elif 'norm' in kwargs.keys():
norm = kwargs['norm']
else:
cmap = matplotlib.cm.gray
norm = matplotlib.cm.colors.Normalize()
plot = matplotlib.pyplot.imshow(image, cmap=cmap, norm=norm, **kwargs)
if show:
matplotlib.pylab.show()
return plot
elif image.channels == 3:
if image.dtype != numpy.uint8:
out = image.__class__(image.shape, dtype=numpy.uint8, axistags=image.axistags)
image = colors.linearRangeMapping(image, newRange=(0.0, 255.0), out=out)
plot = matplotlib.pyplot.imshow(image.view(numpy.ndarray), **kwargs)
if show:
matplotlib.pylab.show()
return plot
else:
raise RuntimeError("vigra.imshow(): Image must have 1 or 3 channels.")
def multiImshow(images,shape, show=True):
nImg = len(images)
f = pylab.figure()
s = tuple(shape)
for c,iname in enumerate(images.keys()):
data,itype = images[iname]
if itype == 'img':
ax1 = f.add_subplot(s[0],s[1],c+1)
imshow(data,show=False)
ax1.set_title(iname)
pylab.axis('off')
if show :
pylab.show()
def segShow(img,labels,edgeColor=(0,0,0),alpha=0.3,show=False,returnImg=False,r=0):
img = numpy.squeeze(img)
if img.ndim ==2:
img = numpy.concatenate( [ img[:,:,None]]*3 ,axis=2).astype(numpy.float32)
img = taggedView(img, 'xyc')
labels = numpy.squeeze(labels)
crackedEdges = analysis.regionImageToCrackEdgeImage(labels+1).squeeze()
#print "cracked shape",crackedEdges.shape
whereEdge = numpy.where(crackedEdges==0)
whereNoEdge = numpy.where(crackedEdges!=0)
crackedEdges[whereEdge] = 1
crackedEdges[whereNoEdge] = 0
if r>0 :
res = filters.discDilation(crackedEdges.astype(numpy.uint8),int(r) )
whereEdge = numpy.where(res==1)
imgToDisplay = resize(img,numpy.squeeze(crackedEdges).shape)
imgToDisplay-=imgToDisplay.min()
imgToDisplay/=imgToDisplay.max()
for c in range(3):
ic = imgToDisplay[:,:,c]
ic[whereEdge]=(1.0-alpha)*edgeColor[c] + alpha*ic[whereEdge]
if returnImg:
return imgToDisplay
return imshow(imgToDisplay,show=show)
def nestedSegShow(img,labels,edgeColors=None,scale=1,show=False,returnImg=False):
shape=(labels.shape[0]*scale,labels.shape[1]*scale)
if scale!=1:
img=vigra.resize(img,shape)
assert numpy.squeeze(labels).ndim==3
nSegs = labels.shape[2]
if edgeColors is None :
edgeColors=numpy.ones([nSegs,4])
a =numpy.array([0,0,0.0,0.6],dtype=numpy.float32)
b =numpy.array([1,0,0,0.4],dtype=numpy.float32)
for s in range(nSegs):
f=float(s)/float(nSegs-1)
edgeColors[s,:]=f*b + (1.0-f)*a
tShape=(img.shape[0]*2-1,img.shape[1]*2-1)
imgToDisplay = resize(img,tShape)
imgToDisplay-=imgToDisplay.min()
imgToDisplay/=imgToDisplay.max()
imgIn = imgToDisplay.copy()
for si in range(nSegs):
l = labels[:,:,si].copy()
if scale!=1:
l=resize(l.astype(numpy.float32),shape,order=0).astype(numpy.uint32)
crackedEdges = analysis.regionImageToCrackEdgeImage(l)
whereEdge = numpy.where(crackedEdges==0)
if len(edgeColors[si])<4:
alpha = 0.0
else:
alpha = edgeColors[si,3]
for c in range(3):
icI = imgIn[:,:,c]
ic = imgToDisplay[:,:,c]
ic[whereEdge]=(1.0-alpha) * edgeColors[si,c] + alpha*icI[whereEdge]
if returnImg:
return imgToDisplay
return imshow(imgToDisplay,show=show)
def show():
import matplotlib.pylab
matplotlib.pylab.show()
# auto-generate code for additional Kernel generators:
def _genKernelFactories(name):
for oldName in dir(eval('filters.'+name)):
if not oldName.startswith('init'):
continue
#remove init from beginning and start with lower case character
newPrefix = oldName[4].lower() + oldName[5:]
if newPrefix == "explicitly":
newPrefix = "explict"
newName = newPrefix + 'Kernel'
if name == 'Kernel2D':
newName += '2D'
code = '''def %(newName)s(*args, **kw):
k = filters.%(name)s()
k.%(oldName)s(*args, **kw)
return k
%(newName)s.__doc__ = filters.%(name)s.%(oldName)s.__doc__
filters.%(newName)s=%(newName)s
''' % {'oldName': oldName, 'newName': newName, 'name': name}
exec code
_genKernelFactories('Kernel1D')
_genKernelFactories('Kernel2D')
del _genKernelFactories
# define watershedsUnionFind()
def _genWatershedsUnionFind():
def watershedsUnionFind(image, neighborhood=None, out = None):
'''Compute watersheds of an image using the union find algorithm.
If 'neighborhood' is 'None', it defaults to 8-neighborhood for 2D inputs
and 6-neighborhood for 3D inputs.
Calls :func:`watersheds` with parameters::\n\n
watersheds(image, neighborhood=neighborhood, method='UnionFind', out=out)
'''
if neighborhood is None:
neighborhood = 8 if image.spatialDimensions == 2 else 6
return analysis.watersheds(image, neighborhood=neighborhood, method='UnionFind', out=out)
watershedsUnionFind.__module__ = 'vigra.analysis'
analysis.watershedsUnionFind = watershedsUnionFind
_genWatershedsUnionFind()
del _genWatershedsUnionFind
# define watershedsReoptimization)
def _genWatershedsReoptimization():
def watershedsReoptimization(labels,edgeIndicator,shrinkN,out=None,visu=False):
# do unseeding
#if visu :
# import matplotlib,numpy
# import pylab
# # A random colormap for matplotlib
# cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( 256,3))
# pylab.imshow ( numpy.swapaxes(labels,0,1) , cmap = cmap)
# pylab.show()
seeds=analysis.segToSeeds(labels,long(shrinkN))
if visu :
import matplotlib,numpy
import pylab
# A random colormap for matplotlib
cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( 256,3))
pylab.imshow ( numpy.swapaxes(seeds,0,1) , cmap = cmap)
pylab.show()
#if seeds.ndim==2:
# seeds=analysis.labelImageWithBackground(seeds)
#elif seeds.ndim==3:
# seeds=analysis.labelVolumeWithBackground(seeds)
#else :
# raise RuntimeError("only implemented for 2d and 3d")
if visu :
import matplotlib,numpy
import pylab
# A random colormap for matplotlib
cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( 256,3))
pylab.imshow ( numpy.swapaxes(seeds,0,1) , cmap = cmap)
pylab.show()
return analysis.watersheds(edgeIndicator,seeds=seeds,out=out)
watershedsReoptimization.__module__ = 'vigra.analysis'
analysis.watershedsReoptimization = watershedsReoptimization
_genWatershedsReoptimization()
del _genWatershedsReoptimization
# define tensor convenience functions
def _genTensorConvenienceFunctions():
def hessianOfGaussianEigenvalues(image, scale, out=None,
sigma_d=0.0, step_size=1.0, window_size=0.0, roi=None):
'''Compute the eigenvalues of the Hessian of Gaussian at the given scale
for a scalar image or volume.
Calls :func:`hessianOfGaussian` and :func:`tensorEigenvalues`.
'''
hessian = filters.hessianOfGaussian(image, scale,
sigma_d=sigma_d, step_size=step_size,
window_size=window_size, roi=roi)
return filters.tensorEigenvalues(hessian, out=out)
hessianOfGaussianEigenvalues.__module__ = 'vigra.filters'
filters.hessianOfGaussianEigenvalues = hessianOfGaussianEigenvalues
def structureTensorEigenvalues(image, innerScale, outerScale, out=None,
sigma_d=0.0, step_size=1.0, window_size=0.0, roi=None):
'''Compute the eigenvalues of the structure tensor at the given scales
for a scalar or multi-channel image or volume.
Calls :func:`structureTensor` and :func:`tensorEigenvalues`.
'''
st = filters.structureTensor(image, innerScale, outerScale,
sigma_d=sigma_d, step_size=step_size,
window_size=window_size, roi=roi)
return filters.tensorEigenvalues(st, out=out)
structureTensorEigenvalues.__module__ = 'vigra.filters'
filters.structureTensorEigenvalues = structureTensorEigenvalues
_genTensorConvenienceFunctions()
del _genTensorConvenienceFunctions
# define tensor convenience functions
def _genDistanceTransformFunctions():
def distanceTransform(array,background=True,norm=2,pixel_pitch=None, out=None):
if array.squeeze().ndim == 2:
return filters.distanceTransform2D(array,background=background,norm=norm,
pixel_pitch=pixel_pitch, out=out)
elif array.squeeze().ndim == 3:
return filters.distanceTransform3D(array.astype('float32'),background=background,norm=2)
else:
raise RuntimeError("distanceTransform is only implemented for 2D and 3D arrays")
distanceTransform.__module__ = 'vigra.filters'
filters.distanceTransform = distanceTransform
_genDistanceTransformFunctions()
del _genDistanceTransformFunctions
# define feature convenience functions
def _genFeaturConvenienceFunctions():
def supportedFeatures(array):
'''Return a list of feature names that are available for the given array. These feature
names are the valid inputs to a call of :func:`extractFeatures`. E.g., to compute
just the first two features in the list, use::
f = vigra.analysis.supportedFeatures(array)
print "Computing features:", f[:2]
r = vigra.analysis.extractFeatures(array, features=f[:2])
'''
return analysis.extractFeatures(array, None).supportedFeatures()
supportedFeatures.__module__ = 'vigra.analysis'
analysis.supportedFeatures = supportedFeatures
def supportedRegionFeatures(array, labels):
'''Return a list of feature names that are available for the given array and label array.
These feature names are the valid inputs to a call of
:func:`extractRegionFeatures`. E.g., to compute just the first two features in the
list, use::
f = vigra.analysis.supportedRegionFeatures(array, labels)
print "Computing features:", f[:2]
r = vigra.analysis.extractRegionFeatures(array, labels, features=f[:2])
'''
return analysis.extractRegionFeatures(array, labels, None).supportedFeatures()
supportedRegionFeatures.__module__ = 'vigra.analysis'
analysis.supportedRegionFeatures = supportedRegionFeatures
def supportedConvexHullFeatures(labels):
'''Return a list of Convex Hull feature names that are available for the given 2D label array.
These Convex Hull feature names are the valid inputs to a call of
:func:`extractConvexHullFeatures`. E.g., to compute just the first two features in the
list, use::
f = vigra.analysis.supportedConvexHullFeatures(labels)
print "Computing Convex Hull features:", f[:2]
r = vigra.analysis.extractConvexHullFeatures(labels, features=f[:2])
'''
try:
return analysis.extractConvexHullFeatures(labels, list_features_only=True)
except:
return []
supportedConvexHullFeatures.__module__ = 'vigra.analysis'
analysis.supportedConvexHullFeatures = supportedConvexHullFeatures
def supportedSkeletonFeatures(labels):
'''Return a list of Skeleton feature names that are available for the given 2D label array.
These Skeleton feature names are the valid inputs to a call of
:func:`extractSkeletonFeatures`. E.g., to compute just the first two features in the
list, use::
f = vigra.analysis.supportedSkeletonFeatures(labels)
print "Computing Skeleton features:", f[:2]
r = vigra.analysis.extractSkeletonFeatures(labels, features=f[:2])
'''
try:
return analysis.extractSkeletonFeatures(labels, list_features_only=True)
except:
return []
supportedSkeletonFeatures.__module__ = 'vigra.analysis'
analysis.supportedSkeletonFeatures = supportedSkeletonFeatures
# implement the read-only part of the 'dict' API in FeatureAccumulator and RegionFeatureAccumulator
def __len__(self):
return len(self.keys())
def __iter__(self):
return self.keys().__iter__()
def has_key(self, key):
try:
return self.isActive(key)
except:
return False
def values(self):
return [self[k] for k in self.keys()]
def items(self):
return [(k, self[k]) for k in self.keys()]
def iterkeys(self):
return self.keys().__iter__()
def itervalues(self):
for k in self.keys():
yield self[k]
def iteritems(self):
for k in self.keys():
yield (k, self[k])
for k in ['__len__', '__iter__', 'has_key', 'values', 'items', 'iterkeys', 'itervalues', 'iteritems']:
setattr(analysis.FeatureAccumulator, k, eval(k))
setattr(analysis.RegionFeatureAccumulator, k, eval(k))
_genFeaturConvenienceFunctions()
del _genFeaturConvenienceFunctions
MetricType = graphs.MetricType
# define grid graph convenience functions
# and extend grid graph classes
def _genGridGraphConvenienceFunctions():
def gridGraph(shape,directNeighborhood=True):
'''Return a grid graph with certain shape.
Parameters:
- shape -- shape of the image
- directNeighborhood -- use 4 (True) or 8 (False) neighborhood (default: True)
Returns:
- grid graph
use::
>>> # 4-connected
>>> g = vigra.graps.gridGraph(shape=[10,20])
>>> g.nodeNum
200
>>> # 8-connected
>>> g = vigra.graps.gridGraph(shape=[10,20],directNeighborhood=False)
'''
if(len(shape)==2):
return graphs.GridGraphUndirected2d(shape,directNeighborhood)
elif(len(shape)==3):
return graphs.GridGraphUndirected3d(shape,directNeighborhood)
else:
raise RuntimeError("GridGraph is only implemented for 2d and 3d grids")
gridGraph.__module__ = 'vigra.graphs'
graphs.gridGraph = gridGraph
# extend grid graph via meta classes
for cls in [graphs.GridGraphUndirected2d, graphs.GridGraphUndirected3d] :
metaCls = cls.__class__
class gridGraphInjector(object):
class __metaclass__(metaCls):
def __init__(self, name, bases, dict):
for b in bases:
if type(b) not in (self, type):
for k,v in dict.items():
setattr(b,k,v)
return type.__init__(self, name, bases, dict)
##inject some methods in the point foo
class moreGridGraph(gridGraphInjector, cls):
@property
def shape(self):
""" shape of grid graph"""
return self.intrinsicNodeMapShape()
def nodeSize(self):
""" node map filled with 1.0"""
size = graphs.graphMap(self,item='node',dtype=numpy.float32)
size[:]=1
return size
def edgeLengths(self):
""" node map filled with 1.0"""
size = graphs.graphMap(self,item='edge',dtype=numpy.float32)
size[:]=1
return size
def mergeGraph(self):
if len(self.shape)==2:
mg = graphs.GridGraphUndirected2dMergeGraph(self)
else:
mg = graphs.GridGraphUndirected3dMergeGraph(self)
return mg
def isGridGraph(obj):
""" check if obj is gridGraph"""
return isinstance(obj,(graphs.GridGraphUndirected2d , graphs.GridGraphUndirected3d))
def isGridGraph2d(obj):
""" check if obj is gridGraph"""
return isinstance(obj,graphs.GridGraphUndirected2d)
isGridGraph.__module__ = 'vigra.graphs'
graphs.isGridGraph = isGridGraph
isGridGraph2d.__module__ = 'vigra.graphs'
graphs.isGridGraph2d = isGridGraph2d
_genGridGraphConvenienceFunctions()
del _genGridGraphConvenienceFunctions
def _genGraphConvenienceFunctions():
def listGraph(nodes=0,edges=0):
''' Return an empty directed graph
Parameters :
- nodes : number of nodes to reserveEdges
- edges : number of edges to reserve
Returns :
- graph
'''
return graphs.AdjacencyListGraph(nodes,edges)
listGraph.__module__ = 'vigra.graphs'
graphs.listGraph = listGraph
def intrinsicGraphMapShape(graph,item):
""" Intrinsic shape of node/edge/arc-map for a given graph.
Node edge and arc maps are stored in numpy arrays by default.
The instric shape may not be confused with the number
of nodes/edges/arcs. The instric shape is used to
allocate a numpy are which can store data for nodes/arcs/edgeSizes
of a given graph.
Parameters:
- graph : input graph to get the shape for
- item : item must be ``'node'`` , ``'edge'`` or ``'arc'``
Returns:
- shape as tuple
"""
if item=='edge':
return graph.intrinsicEdgeMapShape()
elif item=='node':
return graph.intrinsicNodeMapShape()
elif item=='arc':
return graph.intrinsicArcMapShape()
else :
raise RuntimeError("%s is not valid,must be 'edge','node' or 'arc' "%item)
intrinsicGraphMapShape.__module__ = 'vigra.graphs'
graphs.intrinsicGraphMapShape = intrinsicGraphMapShape
def graphMap(graph,item,dtype=numpy.float32,channels=1,addChannelDim=False):
""" Return a graph map for a given graph item (``'node'`` , ``'edge'`` or ``'arc'``).
Parameters:
- graph : graph to get a graph map for
- item : ``'node'`` , ``'edge'`` or ``'arc'``
- dtype : desired dtype
- channels : number of channels (default: 1)
- addChannelDim -- add an explicit channelDim :(default: False)
only useful if channels == 1
Returns:
- graphmap as numpy.ndarray / VigraArray
"""
s = intrinsicGraphMapShape(graph,item)
intrDim = len(s)
if(channels==1) and addChannelDim==False:
a=numpy.zeros(shape=s,dtype=dtype)
if intrDim == 1:
return taggedView(a,'x')
elif intrDim == 2:
return taggedView(a,'xy')
elif intrDim == 3:
return taggedView(a,'xyz')
elif intrDim == 4:
return taggedView(a,'xyzt')
else :
raise RuntimeError("graphs with intrisic dimension >4 are not supported")
else:
s = s+(channels,)
a=numpy.zeros(shape=s,dtype=dtype)
if intrDim == 1:
return taggedView(a,'xc')
elif intrDim == 2:
return taggedView(a,'xyc')
elif intrDim == 3:
return taggedView(a,'xyzc')
elif intrDim == 4:
return taggedView(a,'xyztc')
else :
raise RuntimeError("graphs with intrisic dimension >4 are not supported")
def graphMap2(graph,item,dtype=numpy.float32,channels=1,addChannelDim=False):
""" Return a graph map for a given graph item (``'node'`` , ``'edge'`` or ``'arc'``).
Parameters:
- graph : graph to get a graph map for
- item : ``'node'`` , ``'edge'`` or ``'arc'``
- dtype : desired dtype
- channels : number of channels (default: 1)
- addChannelDim -- add an explicit channelDim :(default: False)
only useful if channels == 1
Returns:
- graphmap as numpy.ndarray / VigraArray
"""
s = intrinsicGraphMapShape(graph,item)
intrDim = len(s)
if(channels==1) and addChannelDim==False:
a=numpy.zeros(shape=s,dtype=dtype)
if intrDim == 1:
return taggedView(a,'x')
elif intrDim == 2:
return taggedView(a,'xy')
elif intrDim == 3:
return taggedView(a,'xyz')
elif intrDim == 4:
return taggedView(a,'xyzt')
else :
raise RuntimeError("graphs with intrisic dimension >4 are not supported")
else:
s = s+(channels,)
a=numpy.zeros(shape=s,dtype=dtype)
if intrDim == 1:
return taggedView(a,'xc')
elif intrDim == 2:
return taggedView(a,'xyc')
elif intrDim == 3:
return taggedView(a,'xyzc')
elif intrDim == 4:
return taggedView(a,'xyztc')
else :
raise RuntimeError("graphs with intrisic dimension >4 are not supported")
graphMap.__module__ = 'vigra.graphs'
graphs.graphMap = graphMap
def mergeGraph(graph):
""" get a merge graph from input graph.
A merge graph might be usefull for hierarchical clustering
"""
#mg = graph.mergeGraph()
mg = graphs.__mergeGraph(graph)
#mg.__base_graph__=graph
return mg
mergeGraph.__module__ = 'vigra.graphs'
graphs.mergeGraph = mergeGraph
INVALID = graphs.Invalid()
graphs.INVALID = INVALID
class ShortestPathPathDijkstra(object):
def __init__(self,graph):
""" shortest path computer
Keyword Arguments:
- graph : input graph
"""
self.pathFinder = graphs._shortestPathDijkstra(graph)
self.graph=graph
self.source = None
self.target = None
def run(self,weights,source,target=None):
""" run shortest path search
Keyword Arguments:
- weights : edge weights encoding distance from two adjacent nodes
- source : source node
- target : target node (default: None)
If target node is None, the shortest path
to all nodes!=source is computed
"""
self.source = source
self.target = target
if target is None:
self.pathFinder.run(weights,source)
else:
self.pathFinder.run(weights,source,target)
return self
def runIgnoreLargeWeights(self,weights,source,val):
""" run shortest path search, nodes with all edge weights larger than val will be ignored
Keyword Arguments:
- weights : edge weights encoding distance from two adjacent nodes
- source : source node
- val : upper bound
"""
self.source = source
self.target = None
self.pathFinder.runIgnoreLargeWeights(weights,source,val)
return self
def path(self,target=None,pathType='coordinates'):
""" get the shortest path from source to target
Keyword Arguments:
- weights : edge weights encoding distance from two adjacent nodes
- source : source node
- target : target node (default: None)
If target node is None, the target specified
by 'run' is used.
pathType : 'coordinates' or 'ids' path (default: 'coordinates')
"""
if target is None:
assert self.target is not None
target=self.target
if pathType=='coordinates':
return self.pathFinder.nodeCoordinatePath(target)
elif pathType == 'ids':
return self.pathFinder.nodeIdPath(target)
def distance(self,target=None):
""" get distance from source to target
Keyword Arguments:
- target : target node (default: None)
If target node is None, the target specified
by 'run' is used.
"""
if target is None:
assert self.target is not None
target=self.target
return self.pathFinder.distance(target)
def distances(self,out=None):
""" return the full distance map"""
return self.pathFinder.distances(out)
def predecessors(self,out=None):
""" return the full predecessors map"""
return self.pathFinder.predecessors(out)
ShortestPathPathDijkstra.__module__ = 'vigra.graphs'
graphs.ShortestPathPathDijkstra = ShortestPathPathDijkstra
_genGraphConvenienceFunctions()
del _genGraphConvenienceFunctions
def _genRegionAdjacencyGraphConvenienceFunctions():
class RegionAdjacencyGraph(graphs.AdjacencyListGraph):
def __init__(self,graph=None ,labels=None ,ignoreLabel=None,reserveEdges=0, maxLabel=None, isDense=None):
""" Region adjacency graph
Keyword Arguments :
- graph : the base graph, the region adjacency graph should be based on
- labels : label map for the graph
- ignoreLabel : ignore a label in the labels map (default: None)
- reserveEdges : reserve a certain number of Edges
Attributes:
- labels : labels passed in constructor
- ignoreLabel : ignoreLabel passed in constructor
- baseGraphLabels : labels passed in constructor
(fixme,dublicated attribute (see labels) )
- baseGraph : baseGraph is the graph passed in constructor
- affiliatedEdges : for each edge in the region adjacency graph,
a vector of edges of the baseGraph is stored in affiliatedEdges
"""
if(graph is not None and labels is not None):
super(RegionAdjacencyGraph,self).__init__(long(labels.max()+1),long(reserveEdges))
if ignoreLabel is None and isDense is not None and isDense == True:
if ignoreLabel is None:
ignoreLabel=-1
self.labels = labels
self.ignoreLabel = ignoreLabel
self.baseGraphLabels = labels
self.baseGraph = graph
if maxLabel is None:
maxLabel = int(numpy.max(labels))
# set up rag
self.affiliatedEdges = graphs._regionAdjacencyGraphFast(graph,labels,self,maxLabel,int(reserveEdges))
else:
if ignoreLabel is None:
ignoreLabel=-1
self.labels = labels
self.ignoreLabel = ignoreLabel
self.baseGraphLabels = labels
self.baseGraph = graph
# set up rag
self.affiliatedEdges = graphs._regionAdjacencyGraph(graph,labels,self,self.ignoreLabel)
else :
super(RegionAdjacencyGraph,self).__init__(0,0)
def mergeGraph(self):
return graphs.AdjacencyListGraphMergeGraph(self)
def accumulateSeeds(self, seeds, out=None):
graph = self.baseGraph
labels = self.labels
return graphs._pyAccNodeSeeds(self, graph, labels, seeds, out)
def accumulateEdgeFeatures(self,edgeFeatures,acc='mean',out=None):
""" accumulate edge features from base graphs edges features
Keyword Argument:
- edgeFeatures : edge features of baseGraph
- acc : used accumulator (default: 'mean')
Currently only 'mean' and 'sum' are implemented
- out : preallocated edge map
Returns :
accumulated edge features
"""
graph = self.baseGraph
affiliatedEdges = self.affiliatedEdges
if isinstance(edgeFeatures, (graphs.ImplicitMEanEdgeMap_2d_float_float, graphs.ImplicitMEanEdgeMap_3d_float_float)):
if graphs.isGridGraph(graph)==False:
raise RuntimeError("implicit edge maps are only implemented for grid graphs")
return graphs._ragEdgeFeatures(self, graph, affiliatedEdges, edgeFeatures,acc, out)
else:
if self.edgeNum == 0:
raise RuntimeError("self.edgeNum == 0 => cannot accumulate edge features")
if acc == 'mean':
weights = self.baseGraph.edgeLengths()
#print "Weights",weights
else:
weights = graphs.graphMap(self.baseGraph,'edge',dtype=numpy.float32)
weights[:] = 1
if graphs.isGridGraph2d(graph) and edgeFeatures.ndim == 4 :
return graphs._ragEdgeFeaturesMb(self,graph,affiliatedEdges,edgeFeatures,weights,acc,out)
else:
return graphs._ragEdgeFeatures(self,graph,affiliatedEdges,edgeFeatures,weights,acc,out)
def accumulateNodeFeatures(self,nodeFeatures,acc='mean',out=None):
""" accumulate edge features from base graphs edges features
Keyword Argument:
- nodeFeatures : node features of baseGraph
- acc : used accumulator (default: 'mean')
Currently only 'mean' and 'sum' are implemented
- out : preallocated node map (default: None)
Returns :
accumulated node features
"""
if self.edgeNum == 0 :
raise RuntimeError("self.edgeNum == 0 => cannot accumulate edge features")
graph = self.baseGraph
labels = self.baseGraphLabels
ignoreLabel = self.ignoreLabel
if acc == 'mean':
#print "get node size..."
weights = self.baseGraph.nodeSize()
#print "weights == ", weights
else :
weights = graphs.graphMap(self.baseGraph,'node',dtype=numpy.float32)
weights[:]=1
return graphs._ragNodeFeatures(self,graph,labels,nodeFeatures,weights,acc,ignoreLabel,out)
def projectNodeFeatureToBaseGraph(self,features,out=None):
""" project node features from this graph, to the base graph of this graph.
Keyword Arguments:
- features : node feautres for this graph
- out : preallocated node map of baseGraph (default: None)
Returns :
projected node features of base graph
"""
out=graphs._ragProjectNodeFeaturesToBaseGraph(
rag=self,
baseGraph=self.baseGraph,
baseGraphLabels=numpy.squeeze(self.baseGraphLabels),
ragNodeFeatures=features,
ignoreLabel=self.ignoreLabel,
out=out
)
#print "out",out.shape,out.dtype
return out
def projectLabelsBack(self,steps,labels=None,_current=0):
""" project labels from current graph to baseGraph and repeat this recursively
Keyword Arguments:
- steps : how often should the labels be projected back
- labels : labels for the current graph (default: None)
If labels is None, each node gets its own label
"""
if labels is None :
# identity segmentation on this level
labels = self.nodeIdMap()
if steps == current :
return labels
else :
labels = self.projectLabelsToBaseGraph(labels)
return self.baseGraph.projectLabelsBack(steps,labels,_current+1)
def projectLabelsToBaseGraph(self,labels=None):
""" project node labels from this graph, to the base graph of this graph.
Keyword Arguments:
- labels : node labels for this graph (default: None)
If labels is None, each node gets its own label
- out : preallocated node map of baseGraph (default: None)
Returns :
"""
if labels is None :
# identity segmentation on this level
labels = self.nodeIdMap()
return self.projectNodeFeatureToBaseGraph(features=labels)
def projectBaseGraphGt(self, baseGraphGt, gt=None, gtQuality=None):
bggt = numpy.require(baseGraphGt,dtype=numpy.uint32)
gt, gtQuality = graphs._ragProjectGroundTruth(rag=self, graph=self.baseGraph,
labels=self.baseGraphLabels, gt=bggt,
ragGt=gt, ragGtQuality=gtQuality)
return gt, gtQuality
def edgeUVCoordinates(self, edgeId):
try :
ei = int(edgeId)
except:
ei = edgeId.id
affEdges = self.affiliatedEdges
uvCoords = affEdges.getUVCoordinates(self.baseGraph, ei)
dim = uvCoords.shape[1]/2
uCoords = uvCoords[:,0:dim]
vCoords = uvCoords[:,dim:2*dim]
return (uCoords,vCoords)
def edgeTopologicalCoordinates(self, edgeId):
uc,vc = self.edgeUVCoordinates(edgeId)
return uc+vc
def edgeCoordinates(self, edgeId):
uc,vc = self.edgeUVCoordinates(edgeId)
return (uc+vc)/2.0
RegionAdjacencyGraph.__module__ = 'vigra.graphs'
graphs.RegionAdjacencyGraph = RegionAdjacencyGraph
class GridRegionAdjacencyGraph(graphs.RegionAdjacencyGraph):
def __init__(self,graph=None,labels=None,ignoreLabel=None,reserveEdges=0, maxLabel=None, isDense=None):
""" Grid Region adjacency graph
A region adjaceny graph,where the base graph should be
a grid graph or a GridRegionAdjacencyGraph.
Keyword Arguments :
- graph : the base graph, the region adjacency graph should be based on
- labels : label map for the graph
- ignoreLabel : ignore a label in the labels map (default: None)
- reserveEdges : reserve a certain number of Edges
Attributes :
- labels : labels passed in constructor
- ignoreLabel : ignoreLabel passed in constructor
- baseGraphLabels : labels passed in constructor
(fixme,dublicated attribute (see labels) )
- baseGraph : baseGraph is the graph passed in constructor
- affiliatedEdges : for each edge in the region adjacency graph,
a vector of edges of the baseGraph is stored in affiliatedEdges
- shape : shape of the grid graph which is a base graph in the
complete graph chain.
"""
if graph is not None and labels is not None:
if not (graphs.isGridGraph(graph) or isinstance(graph,GridRegionAdjacencyGraph)):
raise RuntimeError("graph must be a GridGraph or a GridRegionAdjacencyGraph")
super(GridRegionAdjacencyGraph, self).__init__(graph, labels, ignoreLabel, reserveEdges, maxLabel, isDense)
else:
super(GridRegionAdjacencyGraph, self).__init__()
@property
def shape(self):
""" shape of the underlying grid graph"""
return self.baseGraph.shape
def projectLabelsToGridGraph(self,labels=None):
"""project labels of this graph to the underlying grid graph.
Keyword Arguments :
- labels : node labeling of this graph (default: None)
If labels is None, each node gets its own label
Returns :
grid graph labeling
"""
if labels is None :
# identity segmentation on this level
labels = self.nodeIdMap()
if graphs.isGridGraph(self.baseGraph):
return self.projectLabelsToBaseGraph(labels)
else :
labels = self.projectLabelsToBaseGraph(labels)
return self.baseGraph.projectLabelsToGridGraph(labels)
def projectNodeFeaturesToGridGraph(self,features):
""" project features of this graph to the underlying grid graph.
Therefore project the features to an image.
Keyword Arguments :
- features : nodeFeatures of the current graph
Returns :
grid graph labeling
"""
if graphs.isGridGraph(self.baseGraph):
return self.projectNodeFeatureToBaseGraph(features)
else :
features = self.projectNodeFeatureToBaseGraph(features)
return self.baseGraph.projectNodeFeaturesToGridGraph(features)
def showNested(self,img,labels=None,returnImg=False):
""" show the complet graph chain / hierarchy given an RGB image
Keyword Arguments:
- img : RGB image
- labels : node labeling of this graph (default: None)
If labels is None, each node gets its own label
"""
ll=[]
if labels is not None:
ll.append( self.projectLabelsToGridGraph(labels) )
ll.append( self.projectLabelsToGridGraph() )
g=self.baseGraph
while graphs.isGridGraph(g)==False:
ll.append( g.projectLabelsToGridGraph() )
g=g.baseGraph
ll.reverse()
gridLabels = [l[...,numpy.newaxis] for l in ll ]
gridLabels = numpy.concatenate(gridLabels,axis=2)
return nestedSegShow(img,gridLabels,returnImg=returnImg)
def show(self,img,labels=None,edgeColor=(0,0,0),alpha=0.3,returnImg=False):
""" show the graph given an RGB image
Keyword Arguments:
- img : RGB image
- labels : node labeling of this graph (default: None)
If labels is None, each node gets its own label
- edgeColor : RGB tuple of edge color (default: (0,0,0) ).
Do not use values bigger than 1 in edgeColor.
- alpha : make edges semi transparent (default: 0.3).
0 means no transparency,1 means full transparency.
"""
pLabels = self.projectLabelsToGridGraph(labels)
return segShow(img,numpy.squeeze(pLabels),edgeColor=edgeColor,alpha=alpha,returnImg=returnImg)
def showEdgeFeature(self, img, edgeFeature, cmap='jet', returnImg=False, labelMode=False):
import matplotlib
assert graphs.isGridGraph(self.baseGraph)
imgOut = img.copy().squeeze()
if imgOut.ndim == 2:
imgOut = numpy.concatenate([imgOut[:,:,None]]*3,axis=2)
imgOut = taggedView(imgOut,'xyc')
imgOut-=imgOut.min()
imgOut/=imgOut.max()
if not labelMode:
edgeFeatureShow = edgeFeature.copy()
mi = edgeFeatureShow.min()
ma = edgeFeatureShow.max()
cm = matplotlib.cm.ScalarMappable(cmap=cmap)
rgb = cm.to_rgba(edgeFeatureShow)[:,0:3]
print rgb.shape
if(ma > mi):
edgeFeatureShow -=mi
edgeFeatureShow /= edgeFeatureShow.max()
else:
edgeFeatureShow[:] = 1
for e in self.edgeIter():
u,v = self.edgeUVCoordinates(e.id)
if not labelMode:
showVal = rgb[e.id,:]
else:
if edgeFeature[e.id] == 0:
showVal=[0,0,1]
elif edgeFeature[e.id] == 1:
showVal=[0,1,0]
elif edgeFeature[e.id] == -1:
showVal=[1,0,0]
imgOut[u[:,0],u[:,1],:] = showVal
imgOut[v[:,0],v[:,1],:] = showVal
#print u.shape
if returnImg:
return imgOut
imshow(imgOut)
def nodeSize(self):
""" get the geometric size of the nodes """
if graphs.isGridGraph(self.baseGraph):
return graphs._ragNodeSize(self, self.baseGraph, self.labels, self.ignoreLabel)
else:
baseNodeSizes = self.baseGraph.nodeSize()
return self.accumulateNodeFeatures(baseNodeSizes,acc='sum')
def edgeLengths(self):
""" get the geometric length of the edges"""
if graphs.isGridGraph(self.baseGraph):
return graphs._ragEdgeSize(self,self.affiliatedEdges)
else:
baseNodeSizes = self.baseGraph.edgeLengths()
return self.accumulateEdgeFeatures(baseNodeSizes,acc='sum')
def writeHDF5(self, filename, dset):
if(graphs.isGridGraph(self.baseGraph)):
sGraph = self.serialize()
sAffEdges = graphs._serialzieGridGraphAffiliatedEdges(self.baseGraph, self, self.affiliatedEdges )
sLabels = self.labels
writeHDF5(numpy.array([self.ignoreLabel]), filename, dset+'/ignore_label')
writeHDF5(sLabels, filename, dset+'/labels')
writeHDF5(sGraph, filename, dset+'/graph')
writeHDF5(sAffEdges, filename, dset+'/affiliated_edges')
else:
raise RuntimeError("only RAGs of Grid graph can be serialized")
#def readHdf5(self, filename, dset):
# labels = readHdf5(filename, dset+'/labels')
# shape = labels.shape
# self.baseGraph = graphs.gridGraph(shape)
GridRegionAdjacencyGraph.__module__ = 'vigra.graphs'
graphs.GridRegionAdjacencyGraph = GridRegionAdjacencyGraph
class TinyEdgeLabelGui(object):
def __init__(self, rag, img, edgeLabels = None, labelMode=True):
if labelMode and isinstance(edgeLabels, numpy.ndarray):
assert set(numpy.unique(edgeLabels)).issubset({-1, 0, 1}), 'if labelMode is true only label values of [-1, 0, 1] are permitted'
self.press = None
self.rag = rag
self.img = img
self.edgeLabels = edgeLabels
self.dim = len(img.shape)
self.zOffset = 0
self.edgeRag2dToRag = None
self.edgeRagToRag2d = None
if self.dim == 3:
self.zOffset = self.img.shape[2]/2
self.visuImg = numpy.array(img, dtype=numpy.float32)
self.visuImg -= self.visuImg.min()
self.visuImg /= self.visuImg.max()
self.rag2d = None
self.visuImg2d = None
self.labelMode = labelMode
if self.edgeLabels is None :
self.edgeLabels = numpy.zeros(self.rag.edgeNum, dtype=numpy.float32)
self.edgeLabels2d = None
self.slice2d()
self.implot = None
self.currentLabel = 1
self.brushSize = 1
def startGui(self):
from functools import partial
import pylab as plt
from matplotlib.widgets import Slider, Button, RadioButtons
ax = plt.gca()
fig = plt.gcf()
imgWithEdges =self.rag2d.showEdgeFeature(self.visuImg2d, self.edgeLabels2d, returnImg=True, labelMode=self.labelMode)
self.implot = ax.imshow(numpy.swapaxes(imgWithEdges,0,1))
ff = partial(self.onclick, self)
cid = fig.canvas.mpl_connect('button_press_event', self.onclick)
fig.canvas.mpl_connect('key_press_event', self.press_event)
fig.canvas.mpl_connect('scroll_event', self.scroll)
fig.canvas.mpl_connect('motion_notify_event', self.on_motion)
fig.canvas.mpl_connect('button_release_event', self.on_release)
if self.labelMode:
axcolor = 'lightgoldenrodyellow'
axamp = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
self.slideBrush = Slider(axamp, 'brush-size', 1, 20.0, valinit=2)
self.slideBrush.on_changed(self.updateBrushSize)
plt.show()
def updateBrushSize(self, val):
self.brushSize = int(val+0.5)
def press_event(self, event):
sys.stdout.flush()
if event.key=='0' or event.key=='3':
self.currentLabel = 0
if event.key=='1':
self.currentLabel = 1
if event.key=='2':
self.currentLabel = -1
def slice2d(self):
if self.dim==3:
labels = self.rag.labels[:,:,self.zOffset].squeeze()
gg = graphs.gridGraph(labels.shape)
self.rag2d = graphs.regionAdjacencyGraph(gg, labels)
# update edges 2d:
self.edgeLabels2d = numpy.zeros(self.rag2d.edgeNum, dtype=numpy.float32)
# update edge correlation
self.edgeIdRag2dToRag = dict()
self.edgeIdRagToRag2d = dict()
for edge in self.rag2d.edgeIter():
edge3d = self.rag.findEdge(edge.u, edge.v)
self.edgeIdRag2dToRag[edge.id] = edge3d.id
self.edgeIdRagToRag2d[edge3d.id] = edge.id
self.visuImg2d = self.visuImg[:,:,self.zOffset]
# update edge 2d status:
for i in numpy.arange(self.edgeLabels2d.shape[0]):
self.edgeLabels2d[i] = self.edgeLabels[self.edgeIdRag2dToRag[i]]
elif self.dim==2:
self.rag2d = self.rag
self.visuImg2d = self.visuImg
self.edgeIdRag2dToRag = dict()
for edge in self.rag.edgeIter():
self.edgeIdRag2dToRag[edge.id] = edge.id
self.edgeIdRagToRag2d = self.edgeIdRag2dToRag
self.edgeLabels2d = self.edgeLabels
else:
print 'warning: bad dimension!'
def scroll(self, event):
import pylab as plt
if self.dim==3:
if event.button == 'up':
self.zOffset += 1
else:
self.zOffset -= 1
self.zOffset = self.zOffset % self.visuImg.shape[2]
self.slice2d()
imgWithEdges = self.rag2d.showEdgeFeature(self.visuImg2d, self.edgeLabels2d,returnImg=True, labelMode=self.labelMode)
self.implot.set_data(numpy.swapaxes(imgWithEdges,0,1))
plt.draw()
def on_motion(self, event):
if self.press is None:
return
print event.xdata, event.ydata
self.handle_click(event)
def on_release(self, event):
self.press = None
def onclick(self, event):
self.press = event.xdata, event.ydata
print event.xdata, event.ydata
try:
self.handle_click(event)
except:
pass
def handle_click(self, event):
import pylab as plt
if event.button==1:
self.currentLabel = 1
if event.button==2:
self.currentLabel = 0
if event.button==3:
self.currentLabel = -1
img = self.img
rag = self.rag2d
labels = rag.baseGraphLabels
shape = img.shape
if event.xdata != None and event.ydata != None:
xRaw,yRaw = event.xdata,event.ydata
if xRaw >=0.0 and yRaw>=0.0 and xRaw<img.shape[0] and yRaw<img.shape[1]:
x,y = long(math.floor(event.xdata)),long(math.floor(event.ydata))
#print "X,Y",x,y
l = labels[x,y]
others = []
bs = self.brushSize
for xo in range(-1*bs, bs+1):
for yo in range(-1*bs, bs+1):
xx = x+xo
yy = y+yo
if xo is not 0 or yo is not 0:
if xx >=0 and xx<shape[0] and \
yy >=0 and yy<shape[0]:
otherLabel = labels[xx, yy]
if l != otherLabel:
edge = rag.findEdge(long(l), long(otherLabel))
#print edge
others.append((xx,yy,edge))
#break
#if other is not None:
# pass
if self.labelMode:
for other in others:
eid = other[2].id
oldLabel = self.edgeLabels[self.edgeIdRag2dToRag[eid]]
if self.currentLabel == oldLabel:
newLabel = oldLabel
else:
newLabel = self.currentLabel
self.edgeLabels[self.edgeIdRag2dToRag[eid]] = newLabel
self.edgeLabels2d[eid] = newLabel
imgWithEdges = rag.showEdgeFeature(self.visuImg2d, self.edgeLabels2d,returnImg=True, labelMode=self.labelMode)
self.implot.set_data(numpy.swapaxes(imgWithEdges,0,1))
plt.draw()
TinyEdgeLabelGui.__module__ = 'vigra.graphs'
graphs.TinyEdgeLabelGui = TinyEdgeLabelGui
def loadGridRagHDF5(filename , dset):
#print "load labels and make grid graph"
labels = readHDF5(filename, dset+'/labels')
shape = labels.shape
gridGraph = graphs.gridGraph(shape)
#print gridGraph
#print "load graph serialization"
graphSerialization = readHDF5(filename, dset+'/graph')
#print "make empty grid rag"
gridRag = GridRegionAdjacencyGraph()
#print "deserialize"
gridRag.deserialize(graphSerialization)
#print "load affiliatedEdges"
affEdgeSerialization = readHDF5(filename, dset+'/affiliated_edges')
#print "deserialize"
affiliatedEdges = graphs._deserialzieGridGraphAffiliatedEdges(gridGraph, gridRag, affEdgeSerialization)
ignoreLabel = readHDF5(filename, dset+'/ignore_label')
gridRag.affiliatedEdges = affiliatedEdges
gridRag.labels = taggedView(labels,"xyz")
gridRag.ignoreLabel = int(ignoreLabel[0])
gridRag.baseGraphLabels = taggedView(labels,"xyz")
gridRag.baseGraph = gridGraph
return gridRag
loadGridRagHDF5.__module__ = 'vigra.graphs'
graphs.loadGridRagHDF5 = loadGridRagHDF5
def regionAdjacencyGraph(graph,labels,ignoreLabel=None,reserveEdges=0, maxLabel=None, isDense=None):
""" Return a region adjacency graph for a labeld graph.
Parameters:
- graph -- input graph
- lables -- node-map with labels for each nodeSumWeights
- ignoreLabel -- label to ingnore (default: None)
- reserveEdges -- reverse a certain number of edges (default: 0)
Returns:
- rag -- instance of RegionAdjacencyGraph or GridRegionAdjacencyGraph
If graph is a GridGraph or a GridRegionAdjacencyGraph, a GridRegionAdjacencyGraph
will be returned.
Otherwise a RegionAdjacencyGraph will be returned
"""
if isinstance(graph , graphs.GridRegionAdjacencyGraph) or graphs.isGridGraph(graph):
return GridRegionAdjacencyGraph(graph=graph, labels=labels, ignoreLabel=ignoreLabel,
reserveEdges=reserveEdges, maxLabel=maxLabel, isDense=isDense)
else:
return RegionAdjacencyGraph(graph=graph, labels=labels, ignoreLabel=ignoreLabel,
reserveEdges=reserveEdges, maxLabel=maxLabel, isDense=isDense)
regionAdjacencyGraph.__module__ = 'vigra.graphs'
graphs.regionAdjacencyGraph = regionAdjacencyGraph
def gridRegionAdjacencyGraph(labels,ignoreLabel=None,reserveEdges=0, maxLabel=None, isDense=None):
""" get a region adjacency graph and a grid graph from a labeling.
This function will call 'graphs.gridGraph' and 'graphs.regionAdjacencyGraph'
Keyword Arguments:
- labels : label image
- ignoreLabel : label to ingnore (default: None)
- reserveEdges : reserve a number of edges (default: 0)
"""
_gridGraph=graphs.gridGraph(numpy.squeeze(labels).shape)
rag=graphs.regionAdjacencyGraph(graph=_gridGraph, labels=labels, ignoreLabel=ignoreLabel,
reserveEdges=reserveEdges, maxLabel=maxLabel, isDense=isDense)
return _gridGraph, rag
gridRegionAdjacencyGraph.__module__ = 'vigra.graphs'
graphs.gridRegionAdjacencyGraph = gridRegionAdjacencyGraph
_genRegionAdjacencyGraphConvenienceFunctions()
del _genRegionAdjacencyGraphConvenienceFunctions
def _genGraphSegmentationFunctions():
def getNodeSizes(graph):
""" get size of nodes:
This functions will try to call 'graph.nodeSize()' .
If this fails, a node map filled with 1.0 will be
returned
Keyword Arguments:
- graph : input graph
"""
try:
return graph.nodeSize()
except:
size = graphs.graphMap(graph,'node',dtype=numpy.float32)
size[:]=1
return size
getNodeSizes.__module__ = 'vigra.graphs'
graphs.getNodeSizes = getNodeSizes
def getEdgeLengths(graph):
""" get lengths/sizes of edges:
This functions will try to call 'graph.edgeLength()' .
If this fails, an edge map filled with 1.0 will be
returned
Keyword Arguments:
- graph : input graph
"""
try:
return graph.edgeLengths()
except:
size = graphs.graphMap(graph,'edge',dtype=numpy.float32)
size[:]=1
return size
getEdgeLengths.__module__ = 'vigra.graphs'
graphs.getEdgeLengths = getEdgeLengths
def felzenszwalbSegmentation(graph,edgeWeights,nodeSizes=None,k=1.0,nodeNumStop=None,out=None):
""" felzenszwalbs segmentation method
Keyword Arguments :
- graph : input graph
- edgeWeights : edge weights / indicators
- nodeSizes : size of each node (default: None)
If nodeSizes is None, 'getNodeSizes' will be called
- k : free parameter in felzenszwalbs algorithms (default : 1.0)
(todo: write better docu)
- nodeNumStop : stop the agglomeration at a given nodeNum (default :None)
If nodeNumStop is None, the resulting number of nodes does depends on k.
- backgroundBias : backgroundBias (default : None)
"""
if nodeNumStop is None :
nodeNumStop=-1
if nodeSizes is None :
nodeSizes=graphs.getNodeSizes(graph)
return graphs._felzenszwalbSegmentation(graph=graph,edgeWeights=edgeWeights,nodeSizes=nodeSizes,
k=k,nodeNumStop=nodeNumStop,out=out)
felzenszwalbSegmentation.__module__ = 'vigra.graphs'
graphs.felzenszwalbSegmentation = felzenszwalbSegmentation
def edgeWeightedWatersheds(graph,edgeWeights,seeds,backgroundLabel=None,backgroundBias=None,out=None):
""" edge weighted seeded watersheds
Keyword Arguments :
- graph : input graph
- edgeWeights : evaluation weights
- seeds : node map with seeds .
For at least one node, seeds must be nonzero
- backgroundLabel : a specific backgroundLabel (default : None)
- backgroundBias : backgroundBias (default : None)
"""
if backgroundLabel is None and backgroundBias is None:
return graphs._edgeWeightedWatershedsSegmentation(graph=graph,edgeWeights=edgeWeights,seeds=seeds,
out=out)
else :
if backgroundLabel is None or backgroundBias is None:
raise RuntimeError("if backgroundLabel or backgroundBias is not None, the other must also be not None")
return graphs._carvingSegmentation(graph=graph,edgeWeights=edgeWeights,seeds=seeds,
backgroundLabel=backgroundLabel,backgroundBias=backgroundBias,out=out)
edgeWeightedWatersheds.__module__ = 'vigra.graphs'
graphs.edgeWeightedWatersheds = edgeWeightedWatersheds
def nodeWeightedWatershedsSeeds(graph,nodeWeights,out=None):
""" generate watersheds seeds
Keyword Arguments :
- graph : input graph
- nodeWeights : node height map
- out : seed map
"""
return graphs._nodeWeightedWatershedsSeeds(graph=graph,nodeWeights=nodeWeights,out=out)
nodeWeightedWatershedsSeeds.__module__ = 'vigra.graphs'
graphs.nodeWeightedWatershedsSeeds = nodeWeightedWatershedsSeeds
def geoDt(graph, edgeWeights, nodeWeights, mask, out=None):
""" node weighted seeded watersheds
Keyword Arguments :
- graph : input graph
- edgeWeights : edge weight map
- nodeWeights : node weight map
- mask : mask where to calculate the distance from
"""
return graphs._geoDt(graph=graph, edgeWeights=edgeWeights, nodeWeights=nodeWeights, mask=mask, out=out)
geoDt.__module__ = 'vigra.graphs'
graphs.geoDt = geoDt
def shortestPathSegmentation(graph, edgeWeights, nodeWeights, seeds=None, out=None):
""" node weighted seeded watersheds
Keyword Arguments :
- graph : input graph
- edgeWeights : edge weight map
- nodeWeights : node weight map
- seeds : node map with seeds (default: None)
If seeds are None, 'nodeWeightedWatershedsSeeds' will be called
"""
if seeds is None:
seeds = graphs.nodeWeightedWatershedsSeeds(graph=graph,nodeWeights=nodeWeights)
return graphs._shortestPathSegmentation(graph=graph, edgeWeights=edgeWeights, nodeWeights=nodeWeights,
seeds=seeds, out=out)
shortestPathSegmentation.__module__ = 'vigra.graphs'
graphs.shortestPathSegmentation = shortestPathSegmentation
def nodeWeightedWatersheds(graph,nodeWeights,seeds=None,method='regionGrowing',out=None):
""" node weighted seeded watersheds
Keyword Arguments :
- graph : input graph
- nodeWeights : node height map / evaluation weights
- seeds : node map with seeds (default: None)
If seeds are None, 'nodeWeightedWatershedsSeeds' will be called
"""
if seeds is None:
seeds = graphs.nodeWeightedWatershedsSeeds(graph=graph,nodeWeights=nodeWeights)
if method!='regionGrowing':
raise RuntimeError("currently only 'regionGrowing' is supported")
return graphs._nodeWeightedWatershedsSegmentation(graph=graph,nodeWeights=nodeWeights,seeds=seeds,method=method,out=out)
nodeWeightedWatersheds.__module__ = 'vigra.graphs'
graphs.nodeWeightedWatersheds = nodeWeightedWatersheds
def seededSegmentation(graph, nodeMap=None, edgeMap=None, seeds=None, alg='ws',out=None,**kwargs):
"""
alg:
- 'ws' watershed
- 'sp' shortest path
- 'crf' crf/mrf method
- 'hc' hierarchical-clustering method
"""
if alg == 'ws':
# "default" node weighted watershed
if nodeMap is not None and edgeMap is None:
seg = graphs.nodeWeightedWatersheds(graph=graph,
nodeWeights=nodeMap,
seeds=seeds,out=out)
# edge weighted watershed
elif nodeMap is None and edgeMap is not None:
seg = graphs.edgeWeightedWatersheds(graph=graph,
edgeWeights=edgeMap,
seeds=seeds,out=out)
# hybrid (not yet implemented)
elif nodeMap is not None and edgeMap is not None:
raise RuntimeError("Not Yet Implemented")
else :
# error
raise RuntimeError("error")
elif alg == 'sp':
# "default" shortest path
if nodeMap is None and edgeMap is None:
raise RuntimeError("Not Yet Implemented")
elif nodeMap is not None or edgeMap is not None:
if nodeMap is None:
nodeMap = graphs.graphMap(graph,'node',dtype='float32')
nodeMap[:] = 0
if edgeMap is None:
edgeMap = graphs.graphMap(graph,'edge',dtype='float32')
edgeMap[:] = 0
seg = graphs.shortestPathSegmentation(graph=graph,
edgeWeights=edgeMap,
nodeWeights=nodeMap,
seeds=seeds,out=out)
else :
# error
raise RuntimeError("error")
elif alg == 'crf':
raise RuntimeError("Not Yet Implemented")
return seg
seededSegmentation.__module__ = 'vigra.graphs'
graphs.seededSegmentation = seededSegmentation
def wsDtSegmentation(pmap, pmin, minMembraneSize, minSegmentSize, sigmaMinima, sigmaWeights, cleanCloseSeeds=True):
"""A probability map 'pmap' is provided and thresholded using pmin.
This results in a mask. Every connected component which has fewer pixel
than 'minMembraneSize' is deleted from the mask. The mask is used to
calculate the signed distance transformation.
From this distance transformation the segmentation is computed using
a seeded watershed algorithm. The seeds are placed on the local maxima
of the distanceTrafo after smoothing with 'sigmaMinima'.
The weights of the watershed are defined by the inverse of the signed
distance transform smoothed with 'sigmaWeights'.
'minSegmentSize' determines how small the smallest segment in the final
segmentation is allowed to be. If there are smaller ones the corresponding
seeds are deleted and the watershed is done again.
If 'cleanCloseSeeds' is True, multiple seed points that are clearly in the
same neuron will be merged with a heuristik that ensures that no seeds of
two different neurons are merged.
"""
def cdist(xy1, xy2):
# influenced by: http://stackoverflow.com/a/1871630
d = numpy.zeros((xy1.shape[1], xy1.shape[0], xy1.shape[0]))
for i in numpy.arange(xy1.shape[1]):
d[i,:,:] = numpy.square(numpy.subtract.outer(xy1[:,i], xy2[:,i]))
d = numpy.sum(d, axis=0)
return numpy.sqrt(d)
def findBestSeedCloserThanMembrane(seeds, distances, distanceTrafo, membraneDistance):
""" finds the best seed of the given seeds, that is the seed with the highest value distance transformation."""
closeSeeds = distances <= membraneDistance
numpy.zeros_like(closeSeeds)
# iterate over all close seeds
maximumDistance = -numpy.inf
mostCentralSeed = None
for seed in seeds[closeSeeds]:
if distanceTrafo[seed[0], seed[1], seed[2]] > maximumDistance:
maximumDistance = distanceTrafo[seed[0], seed[1], seed[2]]
mostCentralSeed = seed
return mostCentralSeed
def nonMaximumSuppressionSeeds(seeds, distanceTrafo):
""" removes all seeds that have a neigbour that is closer than the the next membrane
seeds is a list of all seeds, distanceTrafo is array-like
return is a list of all seeds that are relevant.
works only for 3d
"""
seedsCleaned = set()
# calculate the distances from each seed to the next seeds.
distances = cdist(seeds, seeds)
for i in numpy.arange(len(seeds)):
membraneDistance = distanceTrafo[seeds[i,0], seeds[i,1], seeds[i,2]]
bestAlternative = findBestSeedCloserThanMembrane(seeds, distances[i,:], distanceTrafo, membraneDistance)
seedsCleaned.add(tuple(bestAlternative))
return numpy.array(list(seedsCleaned))
def volumeToListOfPoints(seedsVolume, threshold=0.):
return numpy.array(numpy.where(seedsVolume > threshold)).transpose()
# get the thresholded pmap
binary = numpy.zeros_like(pmap, dtype=numpy.uint32)
binary[pmap >= pmin] = 1
# delete small CCs
labeled = analysis.labelVolumeWithBackground(binary)
analysis.sizeFilterSegInplace(labeled, int(numpy.max(labeled)), int(minMembraneSize), checkAtBorder=True)
# use cleaned binary image as mask
mask = numpy.zeros_like(binary, dtype = numpy.float32)
mask[labeled > 0] = 1.
# perform signed dt on mask
dt = filters.distanceTransform3D(mask)
dtInv = filters.distanceTransform3D(mask, background=False)
dtInv[dtInv>0] -= 1
dtSigned = dt.max() - dt + dtInv
dtSignedSmoothMinima = filters.gaussianSmoothing(dtSigned, sigmaMinima)
dtSignedSmoothWeights = filters.gaussianSmoothing(dtSigned, sigmaWeights)
seedsVolume = analysis.localMinima3D(dtSignedSmoothMinima, neighborhood=26, allowAtBorder=True)
if cleanCloseSeeds:
seeds = nonMaximumSuppressionSeeds(volumeToListOfPoints(seedsVolume), dt)
seedsVolume = numpy.zeros_like(pmap, dtype=numpy.uint32)
seedsVolume[seeds.T.tolist()] = 1
seedsLabeled = analysis.labelVolumeWithBackground(seedsVolume)
segmentation = analysis.watershedsNew(dtSignedSmoothWeights, seeds = seedsLabeled, neighborhood=26)[0]
analysis.sizeFilterSegInplace(segmentation, int(numpy.max(segmentation)), int(minSegmentSize), checkAtBorder=True)
segmentation = analysis.watershedsNew(dtSignedSmoothWeights, seeds = segmentation, neighborhood=26)[0]
return segmentation
wsDtSegmentation.__module__ = 'vigra.analysis'
analysis.wsDtSegmentation = wsDtSegmentation
def agglomerativeClustering(graph,edgeWeights=None,edgeLengths=None,nodeFeatures=None,nodeSizes=None,
nodeLabels=None,nodeNumStop=None,beta=0.5,metric='l1',wardness=1.0,out=None):
""" agglomerative hierarchicalClustering
Keyword Arguments :
- graph : input graph
- edgeWeights : edge weights / indicators (default : None)
- edgeLengths : length / weight of each edge (default : None)
Since we do weighted mean agglomeration, a length/weight
is needed for each edge to merge 2 edges w.r.t. weighted mean.
If no edgeLengths is given, 'getEdgeLengths' is called.
- nodeFeatures : a feature vector for each node (default: None)
A feature vector as RGB values,or a histogram for each node.
Within the agglomeration, an additional edge weight will be
computed from the "difference" between the features of two adjacent nodes.
The metric specified in the keyword 'metric' is used to compute this
difference
- nodeSizes : size / weight of each node (default : None)
Since we do weighted mean agglomeration, a size / weight
is needed for each node to merge 2 edges w.r.t. weighted mean.
If no nodeSizes is given, 'getNodeSizes' is called.
- nodeNumStop : stop the agglomeration at a given nodeNum (default : graph.nodeNum/2)
- beta : weight between edgeWeights and nodeFeatures based edgeWeights (default:0.5) :
0.0 means only edgeWeights (from keyword edge weights) and 1.0 means only edgeWeights
from nodeFeatures differences
- metric : metric used to compute node feature difference (default : 'l1')
- wardness : 0 means do not apply wards critrion, 1.0 means fully apply wards critrion (default : 1.0)
- out : preallocated nodeMap for the resulting labeling (default : None)
Returns:
A node labele map encoding the segmentation
"""
assert edgeWeights is not None or nodeFeatures is not None
print "prepare "
if nodeNumStop is None:
nodeNumStop = max(graph.nodeNum/2,min(graph.nodeNum,2))
if edgeLengths is None :
print "get edge length"
edgeLengths = graphs.getEdgeLengths(graph)
if nodeSizes is None:
print "get node size"
nodeSizes = graphs.getNodeSizes(graph)
if edgeWeights is None :
print "get wegihts length"
edgeWeights = graphs.graphMap(graph,'edge')
edgeWeights[:]=0
if nodeFeatures is None :
print "get node feat"
nodeFeatures = graphs.graphMap(graph,'node',addChannelDim=True)
nodeFeatures[:]=0
if nodeLabels is None:
nodeLabels = graphs.graphMap(graph,'node',dtype='uint32')
#import sys
#print "graph refcout", sys.getrefcount(graph)
mg = graphs.mergeGraph(graph)
#print "graph refcout", sys.getrefcount(graph)
#mg = []
#del mg
#import gc
#gc.collect()
#print "graph refcout", sys.getrefcount(graph)
#sys.exit(0)
clusterOp = graphs.minEdgeWeightNodeDist(mg,edgeWeights=edgeWeights,edgeLengths=edgeLengths,
nodeFeatures=nodeFeatures,nodeSizes=nodeSizes,
nodeLabels=nodeLabels,
beta=float(beta),metric=metric,wardness=wardness)
hc = graphs.hierarchicalClustering(clusterOp, nodeNumStopCond=nodeNumStop,
buildMergeTreeEncoding=False)
hc.cluster()
labels = hc.resultLabels(out=out)
#del hc
#del clusterOp
#del mg
return labels
agglomerativeClustering.__module__ = 'vigra.graphs'
graphs.agglomerativeClustering = agglomerativeClustering
def minEdgeWeightNodeDist(mergeGraph,edgeWeights=None,edgeLengths=None,nodeFeatures=None,nodeSizes=None,
nodeLabels=None,outWeight=None,
beta=0.5,metric='squaredNorm',wardness=1.0, gamma=10000000.0):
graph=mergeGraph.graph()
assert edgeWeights is not None or nodeFeatures is not None
if edgeLengths is None :
edgeLengths = graphs.getEdgeLengths(graph,addChannelDim=True)
if nodeSizes is None:
nodeSizes = graphs.getNodeSizes(graph,addChannelDim=True)
if edgeWeights is None :
edgeWeights = graphs.graphMap(graph,'edge',addChannelDim=True)
edgeWeights[:]=0
if nodeFeatures is None :
nodeFeatures = graphs.graphMap(graph,'node',addChannelDim=True)
nodeFeatures[:]=0
if outWeight is None:
outWeight=graphs.graphMap(graph,item='edge',dtype=numpy.float32)
if nodeLabels is None :
nodeLabels = graphs.graphMap(graph,'node',dtype='uint32')
nodeLabels[:]=0
if metric=='squaredNorm':
nd=graphs.MetricType.squaredNorm
elif metric=='norm':
nd=graphs.MetricType.norm
elif metric=='chiSquared':
nd=graphs.MetricType.chiSquared
elif metric in ('l1','manhattan'):
nd=graphs.MetricType.manhattan
elif isinstance(metric,graphs.MetricType):
nd=metric
else :
raise RuntimeError("'%s' is not a supported distance type"%str(metric))
# call unsave c++ function and make it sav
print "nodeLabels ",nodeLabels.shape, nodeLabels.dtype
op = graphs.__minEdgeWeightNodeDistOperator(mergeGraph,edgeWeights,edgeLengths,nodeFeatures,nodeSizes,outWeight,nodeLabels,
float(beta),nd,float(wardness),float(gamma))
op.__base_object__=mergeGraph
op.__outWeightArray__=outWeight
op.edgeLengths=edgeLengths
op.nodeSizes=nodeSizes
op.edgeWeights=edgeWeights
op.nodeFeatures=nodeFeatures
return op
minEdgeWeightNodeDist.__module__ = 'vigra.graphs'
graphs.minEdgeWeightNodeDist = minEdgeWeightNodeDist
def pythonClusterOperator(mergeGraph,operator,useMergeNodeCallback=True,useMergeEdgesCallback=True,useEraseEdgeCallback=True):
#call unsave function and make it save
op = graphs.__pythonClusterOperator(mergeGraph,operator,useMergeNodeCallback,useMergeEdgesCallback,useEraseEdgeCallback)
#op.__dict__['__base_object__']=mergeGraph
#op.__base_object__=mergeGraph
return op
pythonClusterOperator.__module__ = 'vigra.graphs'
graphs.pythonClusterOperator = pythonClusterOperator
def hierarchicalClustering(clusterOperator,nodeNumStopCond,buildMergeTreeEncoding=True):
# call unsave c++ function and make it save
hc = graphs.__hierarchicalClustering(clusterOperator,long(nodeNumStopCond),bool(buildMergeTreeEncoding))
#hc.__dict__['__base_object__']=clusterOperator
hc.__base_object__ = clusterOperator
return hc
hierarchicalClustering.__module__ = 'vigra.graphs'
graphs.hierarchicalClustering = hierarchicalClustering
_genGraphSegmentationFunctions()
del _genGraphSegmentationFunctions
def _genHistogram():
def gaussianHistogram(image,minVals,maxVals,bins=30,
sigma=3.0,sigmaBin=2.0,out=None):
"""
"""
spatialDim = image.ndim - 1
out = histogram.gaussianHistogram_(image=image, minVals=minVals, maxVals=maxVals,
bins=bins, sigma=sigma, sigmaBin=sigmaBin,
out=out)
out = out.reshape(image.shape[0:spatialDim]+(-1,))
if spatialDim == 2:
out /= numpy.sum(out,axis=spatialDim)[:,:, numpy.newaxis]
elif spatialDim == 3:
out /= numpy.sum(out,axis=spatialDim)[:,:,:, numpy.newaxis]
elif spatialDim == 4:
out /= numpy.sum(out,axis=spatialDim)[:,:,:, :,numpy.newaxis]
return out
gaussianHistogram.__module__ = 'vigra.histogram'
histogram.gaussianHistogram = gaussianHistogram
def gaussianRankOrder(image, minVal=None, maxVal=None,
bins=20, sigmas=None, ranks=[0.1,0.25,0.5,0.75,0.9],
out=None):
image = numpy.require(image.squeeze(),dtype='float32')
nDim = image.ndim
if sigmas is None:
sigmas = (2.0,)*nDim + (float(bins)/10.0,)
ranks = numpy.require(ranks,dtype='float32')
sigmas = numpy.require(sigmas,dtype='float32')
assert len(sigmas) == image.ndim + 1
if minVal is None :
minVal = image.min()
if maxVal is None :
maxVal = image.max()
#print "image",image.shape,image.dtype
#print "ranks",ranks.shape,ranks.dtype
#print "sigmas",sigmas
return histogram._gaussianRankOrder(image=image,
minVal=float(minVal),
maxVal=float(maxVal),
bins=int(bins),
sigmas=sigmas,ranks=ranks,
out=out)
gaussianRankOrder.__module__ = 'vigra.histogram'
histogram.gaussianRankOrder = gaussianRankOrder
_genHistogram()
del _genHistogram
def _genGraphSmoothingFunctions():
def recursiveGraphSmoothing( graph,nodeFeatures,edgeIndicator,gamma,
edgeThreshold,scale=1.0,iterations=1,out=None):
""" recursive graph smoothing to smooth node features.
Each node feature is smoothed with the features of neighbor nodes.
The strength of the smoothing is computed from:
"edgeIndicator > edgeThreshold ? 0 : exp(-1.0*gamma*edgeIndicator)*scale"
Therefore this filter is edge preserving.
Keyword Arguments :
- graph : input graph
- nodeFeatures : node features which should be smoothed
- edgeIndicator : edge indicator
- gamma : scale edgeIndicator by gamma bevore taking the negative exponent
- scale : how much should a node be mixed with its neighbours per iteration
- iteration : how often should recursiveGraphSmoothing be called recursively
Returns :
smoothed nodeFeatures
"""
return graphs._recursiveGraphSmoothing(graph=graph,nodeFeatures=nodeFeatures,edgeIndicator=edgeIndicator,
gamma=gamma,edgeThreshold=edgeThreshold,scale=scale,iterations=iterations,out=out)
recursiveGraphSmoothing.__module__ = 'vigra.graphs'
graphs.recursiveGraphSmoothing = recursiveGraphSmoothing
_genGraphSmoothingFunctions()
del _genGraphSmoothingFunctions
def _genGraphMiscFunctions():
def nodeFeaturesToEdgeWeights(graph,nodeFeatures,metric='l1',out=None):
""" compute an edge indicator from node features .
Keyword Arguments :
- graph : input graph
- nodeFeatures : node map with feature vector for each node
- metric : metric / distance used to convert 2 node features to
an edge weight
Returns :
edge indicator
"""
return graphs._nodeFeatureDistToEdgeWeight(graph=graph,nodeFeatures=nodeFeatures,metric=metric,out=out)
nodeFeaturesToEdgeWeights.__module__ = 'vigra.graphs'
graphs.nodeFeaturesToEdgeWeights = nodeFeaturesToEdgeWeights
_genGraphMiscFunctions()
del _genGraphMiscFunctions
def _genBlockwiseFunctions():
def makeTuple(val, ndim):
tvals = None
if isinstance(val, Number):
tvals = (float(val),)*ndim
else :
tvals = tuple(val)
if len(tvals) != ndim:
raise RuntimeError("sigma/innerScale/outerScale must be as long as ndim, or must be a scalar")
return tvals
def getConvolutionOptionsClass(ndim):
assert ndim >=2 and ndim <= 5
if ndim == 2 :
return blockwise.BlockwiseConvolutionOptions2D
elif ndim == 3 :
return blockwise.BlockwiseConvolutionOptions3D
elif ndim == 4 :
return blockwise.BlockwiseConvolutionOptions4D
elif ndim == 5 :
return blockwise.BlockwiseConvolutionOptions5D
def convolutionOptions(blockShape, sigma=None,innerScale=None, outerScale=None, numThreads = cpu_count()):
ndim = len(blockShape)
options = getConvolutionOptionsClass(ndim)()
options.blockShape = blockShape
options.numThreads = numThreads
if sigma is not None:
sigma = makeTuple(sigma,ndim)
options.stdDev = sigma
if innerScale is not None:
options.innerScale = makeTuple(innerScale,ndim)
if outerScale is not None:
options.outerScale = makeTuple(outerScale,ndim)
return options
convolutionOptions.__module__ = 'vigra.blockwise'
blockwise.convolutionOptions = convolutionOptions
blockwise.convOpts = convolutionOptions
def gaussianSmooth(image,options,out=None):
out = blockwise._gaussianSmooth(image,options,out)
return out
gaussianSmooth.__module__ = 'vigra.blockwise'
blockwise.gaussianSmooth = gaussianSmooth
def gaussianGradient(image,options,out=None):
out = blockwise._gaussianGradient(image,options,out)
return out
gaussianGradient.__module__ = 'vigra.blockwise'
blockwise.gaussianGradient = gaussianGradient
def gaussianGradientMagnitude(image,options,out=None):
out = blockwise._gaussianGradientMagnitude(image,options,out)
return out
gaussianGradientMagnitude.__module__ = 'vigra.blockwise'
blockwise.gaussianGradientMagnitude = gaussianGradientMagnitude
def hessianOfGaussianEigenvalues(image,options,out=None):
out = blockwise._hessianOfGaussianEigenvalues(image,options,out)
return out
hessianOfGaussianEigenvalues.__module__ = 'vigra.blockwise'
blockwise.hessianOfGaussianEigenvalues = hessianOfGaussianEigenvalues
def hessianOfGaussianFirstEigenvalue(image,options,out=None):
out = blockwise._hessianOfGaussianFirstEigenvalue(image,options,out)
return out
hessianOfGaussianFirstEigenvalue.__module__ = 'vigra.blockwise'
blockwise.hessianOfGaussianFirstEigenvalue = hessianOfGaussianFirstEigenvalue
def hessianOfGaussianLastEigenvalue(image,options,out=None):
out = blockwise._hessianOfGaussianLastEigenvalue(image,options,out)
return out
hessianOfGaussianLastEigenvalue.__module__ = 'vigra.blockwise'
blockwise.hessianOfGaussianLastEigenvalue = hessianOfGaussianLastEigenvalue
_genBlockwiseFunctions()
del _genBlockwiseFunctions
def loadBSDGt(filename):
import scipy.io as sio
matContents = sio.loadmat(filename)
ngt = len(matContents['groundTruth'][0])
gts = []
for gti in range(ngt):
gt = matContents['groundTruth'][0][gti][0]['Segmentation'][0]
gt = numpy.swapaxes(gt,0,1)
gt = gt.astype(numpy.uint32)
print gt.min(),gt.max()
gts.append(gt[:,:,None])
gtArray = numpy.concatenate(gts,axis=2)
print gtArray.shape
return gtArray
def pmapSeeds(pmap):
pass
|
timoMa/vigra
|
vigranumpy/lib/__init__.py
|
Python
|
mit
| 98,721
|
[
"Gaussian",
"NEURON"
] |
8b79e620f28fdcf96b8be77dd5a28360c0e12b29f3c26881aac1c3cfa3e2599f
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
sessions2trash.py
Run this script in a web2py environment shell e.g. python web2py.py -S app
If models are loaded (-M option) auth.settings.expiration is assumed
for sessions without an expiration. If models are not loaded, sessions older
than 60 minutes are removed. Use the --expiration option to override these
values.
Typical usage:
# Delete expired sessions every 5 minutes
nohup python web2py.py -S app -M -R scripts/sessions2trash.py &
# Delete sessions older than 60 minutes regardless of expiration,
# with verbose output, then exit.
python web2py.py -S app -M -R scripts/sessions2trash.py -A -o -x 3600 -f -v
# Delete all sessions regardless of expiry and exit.
python web2py.py -S app -M -R scripts/sessions2trash.py -A -o -x 0
"""
from __future__ import with_statement
from gluon.storage import Storage
from optparse import OptionParser
import cPickle
import datetime
import os
import stat
import time
EXPIRATION_MINUTES = 60
SLEEP_MINUTES = 5
VERSION = 0.3
class SessionSet(object):
"""Class representing a set of sessions"""
def __init__(self, expiration, force, verbose):
self.expiration = expiration
self.force = force
self.verbose = verbose
def get(self):
"""Get session files/records."""
raise NotImplementedError
def trash(self):
"""Trash expired sessions."""
now = datetime.datetime.now()
for item in self.get():
status = 'OK'
last_visit = item.last_visit_default()
try:
session = item.get()
if session.auth:
if session.auth.expiration and not self.force:
self.expiration = session.auth.expiration
if session.auth.last_visit:
last_visit = session.auth.last_visit
except:
pass
age = 0
if last_visit:
age = total_seconds(now - last_visit)
if age > self.expiration or not self.expiration:
item.delete()
status = 'trashed'
if self.verbose > 1:
print 'key: %s' % str(item)
print 'expiration: %s seconds' % self.expiration
print 'last visit: %s' % str(last_visit)
print 'age: %s seconds' % age
print 'status: %s' % status
print ''
elif self.verbose > 0:
print('%s %s' % (str(item), status))
class SessionSetDb(SessionSet):
"""Class representing a set of sessions stored in database"""
def __init__(self, expiration, force, verbose):
SessionSet.__init__(self, expiration, force, verbose)
def get(self):
"""Return list of SessionDb instances for existing sessions."""
sessions = []
tablename = 'web2py_session'
from gluon import current
(record_id_name, table, record_id, unique_key) = \
current.response._dbtable_and_field
for row in table._db(table.id > 0).select():
sessions.append(SessionDb(row))
return sessions
class SessionSetFiles(SessionSet):
"""Class representing a set of sessions stored in flat files"""
def __init__(self, expiration, force, verbose):
SessionSet.__init__(self, expiration, force, verbose)
def get(self):
"""Return list of SessionFile instances for existing sessions."""
path = os.path.join(request.folder, 'sessions')
return [SessionFile(os.path.join(path, x)) for x in os.listdir(path)]
class SessionDb(object):
"""Class representing a single session stored in database"""
def __init__(self, row):
self.row = row
def delete(self):
from gluon import current
(record_id_name, table, record_id, unique_key) = \
current.response._dbtable_and_field
self.row.delete_record()
table._db.commit()
def get(self):
session = Storage()
session.update(cPickle.loads(self.row.session_data))
return session
def last_visit_default(self):
if isinstance(self.row.modified_datetime, datetime.datetime):
return self.row.modified_datetime
else:
try:
return datetime.datetime.strptime(self.row.modified_datetime, '%Y-%m-%d %H:%M:%S.%f')
except:
print 'failed to retrieve last modified time (value: %s)' % self.row.modified_datetime
def __str__(self):
return self.row.unique_key
class SessionFile(object):
"""Class representing a single session stored as a flat file"""
def __init__(self, filename):
self.filename = filename
def delete(self):
os.unlink(self.filename)
def get(self):
session = Storage()
with open(self.filename, 'rb+') as f:
session.update(cPickle.load(f))
return session
def last_visit_default(self):
return datetime.datetime.fromtimestamp(
os.stat(self.filename)[stat.ST_MTIME])
def __str__(self):
return self.filename
def total_seconds(delta):
"""
Adapted from Python 2.7's timedelta.total_seconds() method.
Args:
delta: datetime.timedelta instance.
"""
return (delta.microseconds + (delta.seconds + (delta.days * 24 * 3600)) *
10 ** 6) / 10 ** 6
def main():
"""Main processing."""
usage = '%prog [options]' + '\nVersion: %s' % VERSION
parser = OptionParser(usage=usage)
parser.add_option('-f', '--force',
action='store_true', dest='force', default=False,
help=('Ignore session expiration. '
'Force expiry based on -x option or auth.settings.expiration.')
)
parser.add_option('-o', '--once',
action='store_true', dest='once', default=False,
help='Delete sessions, then exit.',
)
parser.add_option('-s', '--sleep',
dest='sleep', default=SLEEP_MINUTES * 60, type="int",
help='Number of seconds to sleep between executions. Default 300.',
)
parser.add_option('-v', '--verbose',
default=0, action='count',
help="print verbose output, a second -v increases verbosity")
parser.add_option('-x', '--expiration',
dest='expiration', default=None, type="int",
help='Expiration value for sessions without expiration (in seconds)',
)
(options, unused_args) = parser.parse_args()
expiration = options.expiration
if expiration is None:
try:
expiration = auth.settings.expiration
except:
expiration = EXPIRATION_MINUTES * 60
set_db = SessionSetDb(expiration, options.force, options.verbose)
set_files = SessionSetFiles(expiration, options.force, options.verbose)
while True:
set_db.trash()
set_files.trash()
if options.once:
break
else:
if options.verbose:
print 'Sleeping %s seconds' % (options.sleep)
time.sleep(options.sleep)
main()
|
jefftc/changlab
|
web2py/scripts/sessions2trash.py
|
Python
|
mit
| 7,366
|
[
"VisIt"
] |
6730852dce77c5cc870d3bee51385de902a111ee5c0d40666e7eba641c00d7c1
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Pipeline, the top-level Beam object.
A pipeline holds a DAG of data transforms. Conceptually the nodes of the DAG
are transforms (:class:`~apache_beam.transforms.ptransform.PTransform` objects)
and the edges are values (mostly :class:`~apache_beam.pvalue.PCollection`
objects). The transforms take as inputs one or more PValues and output one or
more :class:`~apache_beam.pvalue.PValue` s.
The pipeline offers functionality to traverse the graph. The actual operation
to be executed for each node visited is specified through a runner object.
Typical usage::
# Create a pipeline object using a local runner for execution.
with beam.Pipeline('DirectRunner') as p:
# Add to the pipeline a "Create" transform. When executed this
# transform will produce a PCollection object with the specified values.
pcoll = p | 'Create' >> beam.Create([1, 2, 3])
# Another transform could be applied to pcoll, e.g., writing to a text file.
# For other transforms, refer to transforms/ directory.
pcoll | 'Write' >> beam.io.WriteToText('./output')
# run() will execute the DAG stored in the pipeline. The execution of the
# nodes visited is done using the specified local runner.
"""
from __future__ import absolute_import
import abc
import collections
import logging
import os
import shutil
import tempfile
from apache_beam import pvalue
from apache_beam.internal import pickler
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import TypeOptions
from apache_beam.options.pipeline_options_validator import PipelineOptionsValidator
from apache_beam.pvalue import PCollection
from apache_beam.runners import PipelineRunner
from apache_beam.runners import create_runner
from apache_beam.transforms import ptransform
from apache_beam.typehints import TypeCheckError
from apache_beam.typehints import typehints
from apache_beam.utils import urns
from apache_beam.utils.annotations import deprecated
__all__ = ['Pipeline', 'PTransformOverride']
class Pipeline(object):
"""A pipeline object that manages a DAG of
:class:`~apache_beam.pvalue.PValue` s and their
:class:`~apache_beam.transforms.ptransform.PTransform` s.
Conceptually the :class:`~apache_beam.pvalue.PValue` s are the DAG's nodes and
the :class:`~apache_beam.transforms.ptransform.PTransform` s computing
the :class:`~apache_beam.pvalue.PValue` s are the edges.
All the transforms applied to the pipeline must have distinct full labels.
If same transform instance needs to be applied then the right shift operator
should be used to designate new names
(e.g. ``input | "label" >> my_tranform``).
"""
def __init__(self, runner=None, options=None, argv=None):
"""Initialize a pipeline object.
Args:
runner (~apache_beam.runners.runner.PipelineRunner): An object of
type :class:`~apache_beam.runners.runner.PipelineRunner` that will be
used to execute the pipeline. For registered runners, the runner name
can be specified, otherwise a runner object must be supplied.
options (~apache_beam.options.pipeline_options.PipelineOptions):
A configured
:class:`~apache_beam.options.pipeline_options.PipelineOptions` object
containing arguments that should be used for running the Beam job.
argv (List[str]): a list of arguments (such as :data:`sys.argv`)
to be used for building a
:class:`~apache_beam.options.pipeline_options.PipelineOptions` object.
This will only be used if argument **options** is :data:`None`.
Raises:
~exceptions.ValueError: if either the runner or options argument is not
of the expected type.
"""
if options is not None:
if isinstance(options, PipelineOptions):
self._options = options
else:
raise ValueError(
'Parameter options, if specified, must be of type PipelineOptions. '
'Received : %r', options)
elif argv is not None:
if isinstance(argv, list):
self._options = PipelineOptions(argv)
else:
raise ValueError(
'Parameter argv, if specified, must be a list. Received : %r', argv)
else:
self._options = PipelineOptions([])
if runner is None:
runner = self._options.view_as(StandardOptions).runner
if runner is None:
runner = StandardOptions.DEFAULT_RUNNER
logging.info(('Missing pipeline option (runner). Executing pipeline '
'using the default runner: %s.'), runner)
if isinstance(runner, str):
runner = create_runner(runner)
elif not isinstance(runner, PipelineRunner):
raise TypeError('Runner must be a PipelineRunner object or the '
'name of a registered runner.')
# Validate pipeline options
errors = PipelineOptionsValidator(self._options, runner).validate()
if errors:
raise ValueError(
'Pipeline has validations errors: \n' + '\n'.join(errors))
# Default runner to be used.
self.runner = runner
# Stack of transforms generated by nested apply() calls. The stack will
# contain a root node as an enclosing (parent) node for top transforms.
self.transforms_stack = [AppliedPTransform(None, None, '', None)]
# Set of transform labels (full labels) applied to the pipeline.
# If a transform is applied and the full label is already in the set
# then the transform will have to be cloned with a new label.
self.applied_labels = set()
@property
@deprecated(since='First stable release',
extra_message='References to <pipeline>.options'
' will not be supported')
def options(self):
return self._options
def _current_transform(self):
"""Returns the transform currently on the top of the stack."""
return self.transforms_stack[-1]
def _root_transform(self):
"""Returns the root transform of the transform stack."""
return self.transforms_stack[0]
def _remove_labels_recursively(self, applied_transform):
for part in applied_transform.parts:
if part.full_label in self.applied_labels:
self.applied_labels.remove(part.full_label)
if part.parts:
for part2 in part.parts:
self._remove_labels_recursively(part2)
def _replace(self, override):
assert isinstance(override, PTransformOverride)
matcher = override.get_matcher()
output_map = {}
output_replacements = {}
input_replacements = {}
class TransformUpdater(PipelineVisitor): # pylint: disable=used-before-assignment
""""A visitor that replaces the matching PTransforms."""
def __init__(self, pipeline):
self.pipeline = pipeline
def _replace_if_needed(self, transform_node):
if matcher(transform_node):
replacement_transform = override.get_replacement_transform(
transform_node.transform)
inputs = transform_node.inputs
# TODO: Support replacing PTransforms with multiple inputs.
if len(inputs) > 1:
raise NotImplementedError(
'PTransform overriding is only supported for PTransforms that '
'have a single input. Tried to replace input of '
'AppliedPTransform %r that has %d inputs',
transform_node, len(inputs))
transform_node.transform = replacement_transform
self.pipeline.transforms_stack.append(transform_node)
# Keeping the same label for the replaced node but recursively
# removing labels of child transforms since they will be replaced
# during the expand below.
self.pipeline._remove_labels_recursively(transform_node)
new_output = replacement_transform.expand(inputs[0])
if new_output.producer is None:
# When current transform is a primitive, we set the producer here.
new_output.producer = transform_node
# We only support replacing transforms with a single output with
# another transform that produces a single output.
# TODO: Support replacing PTransforms with multiple outputs.
if (len(transform_node.outputs) > 1 or
not isinstance(transform_node.outputs[None], PCollection) or
not isinstance(new_output, PCollection)):
raise NotImplementedError(
'PTransform overriding is only supported for PTransforms that '
'have a single output. Tried to replace output of '
'AppliedPTransform %r with %r.'
, transform_node, new_output)
# Recording updated outputs. This cannot be done in the same visitor
# since if we dynamically update output type here, we'll run into
# errors when visiting child nodes.
output_map[transform_node.outputs[None]] = new_output
self.pipeline.transforms_stack.pop()
def enter_composite_transform(self, transform_node):
self._replace_if_needed(transform_node)
def visit_transform(self, transform_node):
self._replace_if_needed(transform_node)
self.visit(TransformUpdater(self))
# Adjusting inputs and outputs
class InputOutputUpdater(PipelineVisitor): # pylint: disable=used-before-assignment
""""A visitor that records input and output values to be replaced.
Input and output values that should be updated are recorded in maps
input_replacements and output_replacements respectively.
We cannot update input and output values while visiting since that results
in validation errors.
"""
def __init__(self, pipeline):
self.pipeline = pipeline
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
if (None in transform_node.outputs and
transform_node.outputs[None] in output_map):
output_replacements[transform_node] = (
output_map[transform_node.outputs[None]])
replace_input = False
for input in transform_node.inputs:
if input in output_map:
replace_input = True
break
if replace_input:
new_input = [
input if not input in output_map else output_map[input]
for input in transform_node.inputs]
input_replacements[transform_node] = new_input
self.visit(InputOutputUpdater(self))
for transform in output_replacements:
transform.replace_output(output_replacements[transform])
for transform in input_replacements:
transform.inputs = input_replacements[transform]
def _check_replacement(self, override):
matcher = override.get_matcher()
class ReplacementValidator(PipelineVisitor):
def visit_transform(self, transform_node):
if matcher(transform_node):
raise RuntimeError('Transform node %r was not replaced as expected.',
transform_node)
self.visit(ReplacementValidator())
def replace_all(self, replacements):
""" Dynamically replaces PTransforms in the currently populated hierarchy.
Currently this only works for replacements where input and output types
are exactly the same.
TODO: Update this to also work for transform overrides where input and
output types are different.
Args:
replacements (List[~apache_beam.pipeline.PTransformOverride]): a list of
:class:`~apache_beam.pipeline.PTransformOverride` objects.
"""
for override in replacements:
assert isinstance(override, PTransformOverride)
self._replace(override)
# Checking if the PTransforms have been successfully replaced. This will
# result in a failure if a PTransform that was replaced in a given override
# gets re-added in a subsequent override. This is not allowed and ordering
# of PTransformOverride objects in 'replacements' is important.
for override in replacements:
self._check_replacement(override)
def run(self, test_runner_api=True):
"""Runs the pipeline. Returns whatever our runner returns after running."""
# When possible, invoke a round trip through the runner API.
if test_runner_api and self._verify_runner_api_compatible():
return Pipeline.from_runner_api(
self.to_runner_api(), self.runner, self._options).run(False)
if self._options.view_as(SetupOptions).save_main_session:
# If this option is chosen, verify we can pickle the main session early.
tmpdir = tempfile.mkdtemp()
try:
pickler.dump_session(os.path.join(tmpdir, 'main_session.pickle'))
finally:
shutil.rmtree(tmpdir)
return self.runner.run_pipeline(self)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_type:
self.run().wait_until_finish()
def visit(self, visitor):
"""Visits depth-first every node of a pipeline's DAG.
Runner-internal implementation detail; no backwards-compatibility guarantees
Args:
visitor (~apache_beam.pipeline.PipelineVisitor):
:class:`~apache_beam.pipeline.PipelineVisitor` object whose callbacks
will be called for each node visited. See
:class:`~apache_beam.pipeline.PipelineVisitor` comments.
Raises:
~exceptions.TypeError: if node is specified and is not a
:class:`~apache_beam.pvalue.PValue`.
~apache_beam.error.PipelineError: if node is specified and does not
belong to this pipeline instance.
"""
visited = set()
self._root_transform().visit(visitor, self, visited)
def apply(self, transform, pvalueish=None, label=None):
"""Applies a custom transform using the pvalueish specified.
Args:
transform (~apache_beam.transforms.ptransform.PTransform): the
:class:`~apache_beam.transforms.ptransform.PTransform` to apply.
pvalueish (~apache_beam.pvalue.PCollection): the input for the
:class:`~apache_beam.transforms.ptransform.PTransform` (typically a
:class:`~apache_beam.pvalue.PCollection`).
label (str): label of the
:class:`~apache_beam.transforms.ptransform.PTransform`.
Raises:
~exceptions.TypeError: if the transform object extracted from the
argument list is not a
:class:`~apache_beam.transforms.ptransform.PTransform`.
~exceptions.RuntimeError: if the transform object was already applied to
this pipeline and needs to be cloned in order to apply again.
"""
if isinstance(transform, ptransform._NamedPTransform):
return self.apply(transform.transform, pvalueish,
label or transform.label)
if not isinstance(transform, ptransform.PTransform):
raise TypeError("Expected a PTransform object, got %s" % transform)
if label:
# Fix self.label as it is inspected by some PTransform operations
# (e.g. to produce error messages for type hint violations).
try:
old_label, transform.label = transform.label, label
return self.apply(transform, pvalueish)
finally:
transform.label = old_label
full_label = '/'.join([self._current_transform().full_label,
label or transform.label]).lstrip('/')
if full_label in self.applied_labels:
raise RuntimeError(
'Transform "%s" does not have a stable unique label. '
'This will prevent updating of pipelines. '
'To apply a transform with a specified label write '
'pvalue | "label" >> transform'
% full_label)
self.applied_labels.add(full_label)
pvalueish, inputs = transform._extract_input_pvalues(pvalueish)
try:
inputs = tuple(inputs)
for leaf_input in inputs:
if not isinstance(leaf_input, pvalue.PValue):
raise TypeError
except TypeError:
raise NotImplementedError(
'Unable to extract PValue inputs from %s; either %s does not accept '
'inputs of this format, or it does not properly override '
'_extract_input_pvalues' % (pvalueish, transform))
current = AppliedPTransform(
self._current_transform(), transform, full_label, inputs)
self._current_transform().add_part(current)
self.transforms_stack.append(current)
type_options = self._options.view_as(TypeOptions)
if type_options.pipeline_type_check:
transform.type_check_inputs(pvalueish)
pvalueish_result = self.runner.apply(transform, pvalueish)
if type_options is not None and type_options.pipeline_type_check:
transform.type_check_outputs(pvalueish_result)
for result in ptransform.get_nested_pvalues(pvalueish_result):
assert isinstance(result, (pvalue.PValue, pvalue.DoOutputsTuple))
# Make sure we set the producer only for a leaf node in the transform DAG.
# This way we preserve the last transform of a composite transform as
# being the real producer of the result.
if result.producer is None:
result.producer = current
# TODO(robertwb): Multi-input, multi-output inference.
# TODO(robertwb): Ideally we'd do intersection here.
if (type_options is not None and type_options.pipeline_type_check
and isinstance(result, pvalue.PCollection)
and not result.element_type):
input_element_type = (
inputs[0].element_type
if len(inputs) == 1
else typehints.Any)
type_hints = transform.get_type_hints()
declared_output_type = type_hints.simple_output_type(transform.label)
if declared_output_type:
input_types = type_hints.input_types
if input_types and input_types[0]:
declared_input_type = input_types[0][0]
result.element_type = typehints.bind_type_variables(
declared_output_type,
typehints.match_type_variables(declared_input_type,
input_element_type))
else:
result.element_type = declared_output_type
else:
result.element_type = transform.infer_output_type(input_element_type)
assert isinstance(result.producer.inputs, tuple)
current.add_output(result)
if (type_options is not None and
type_options.type_check_strictness == 'ALL_REQUIRED' and
transform.get_type_hints().output_types is None):
ptransform_name = '%s(%s)' % (transform.__class__.__name__, full_label)
raise TypeCheckError('Pipeline type checking is enabled, however no '
'output type-hint was found for the '
'PTransform %s' % ptransform_name)
current.update_input_refcounts()
self.transforms_stack.pop()
return pvalueish_result
def __reduce__(self):
# Some transforms contain a reference to their enclosing pipeline,
# which in turn reference all other transforms (resulting in quadratic
# time/space to pickle each transform individually). As we don't
# require pickled pipelines to be executable, break the chain here.
return str, ('Pickled pipeline stub.',)
def _verify_runner_api_compatible(self):
if self._options.view_as(TypeOptions).runtime_type_check:
# This option is incompatible with the runner API as it requires
# the runner to inspect non-serialized hints on the transform
# itself.
return False
class Visitor(PipelineVisitor): # pylint: disable=used-before-assignment
ok = True # Really a nonlocal.
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
try:
# Transforms must be picklable.
pickler.loads(pickler.dumps(transform_node.transform,
enable_trace=False),
enable_trace=False)
except Exception:
Visitor.ok = False
def visit_value(self, value, _):
if isinstance(value, pvalue.PDone):
Visitor.ok = False
self.visit(Visitor())
return Visitor.ok
def to_runner_api(self, return_context=False):
"""For internal use only; no backwards-compatibility guarantees."""
from apache_beam.runners import pipeline_context
from apache_beam.portability.api import beam_runner_api_pb2
context = pipeline_context.PipelineContext()
# Mutates context; placing inline would force dependence on
# argument evaluation order.
root_transform_id = context.transforms.get_id(self._root_transform())
proto = beam_runner_api_pb2.Pipeline(
root_transform_ids=[root_transform_id],
components=context.to_runner_api())
if return_context:
return proto, context
else:
return proto
@staticmethod
def from_runner_api(proto, runner, options, return_context=False):
"""For internal use only; no backwards-compatibility guarantees."""
p = Pipeline(runner=runner, options=options)
from apache_beam.runners import pipeline_context
context = pipeline_context.PipelineContext(proto.components)
root_transform_id, = proto.root_transform_ids
p.transforms_stack = [
context.transforms.get_by_id(root_transform_id)]
# TODO(robertwb): These are only needed to continue construction. Omit?
p.applied_labels = set([
t.unique_name for t in proto.components.transforms.values()])
for id in proto.components.pcollections:
pcollection = context.pcollections.get_by_id(id)
pcollection.pipeline = p
if not pcollection.producer:
raise ValueError('No producer for %s' % id)
# Inject PBegin input where necessary.
from apache_beam.io.iobase import Read
from apache_beam.transforms.core import Create
has_pbegin = [Read, Create]
for id in proto.components.transforms:
transform = context.transforms.get_by_id(id)
if not transform.inputs and transform.transform.__class__ in has_pbegin:
transform.inputs = (pvalue.PBegin(p),)
if return_context:
return p, context
else:
return p
class PipelineVisitor(object):
"""For internal use only; no backwards-compatibility guarantees.
Visitor pattern class used to traverse a DAG of transforms
(used internally by Pipeline for bookeeping purposes).
"""
def visit_value(self, value, producer_node):
"""Callback for visiting a PValue in the pipeline DAG.
Args:
value: PValue visited (typically a PCollection instance).
producer_node: AppliedPTransform object whose transform produced the
pvalue.
"""
pass
def visit_transform(self, transform_node):
"""Callback for visiting a transform leaf node in the pipeline DAG."""
pass
def enter_composite_transform(self, transform_node):
"""Callback for entering traversal of a composite transform node."""
pass
def leave_composite_transform(self, transform_node):
"""Callback for leaving traversal of a composite transform node."""
pass
class AppliedPTransform(object):
"""For internal use only; no backwards-compatibility guarantees.
A transform node representing an instance of applying a PTransform
(used internally by Pipeline for bookeeping purposes).
"""
def __init__(self, parent, transform, full_label, inputs):
self.parent = parent
self.transform = transform
# Note that we want the PipelineVisitor classes to use the full_label,
# inputs, side_inputs, and outputs fields from this instance instead of the
# ones of the PTransform instance associated with it. Doing this permits
# reusing PTransform instances in different contexts (apply() calls) without
# any interference. This is particularly useful for composite transforms.
self.full_label = full_label
self.inputs = inputs or ()
self.side_inputs = () if transform is None else tuple(transform.side_inputs)
self.outputs = {}
self.parts = []
# Per tag refcount dictionary for PValues for which this node is a
# root producer.
self.refcounts = collections.defaultdict(int)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, self.full_label,
type(self.transform).__name__)
def update_input_refcounts(self):
"""Increment refcounts for all transforms providing inputs."""
def real_producer(pv):
real = pv.producer
while real.parts:
real = real.parts[-1]
return real
if not self.is_composite():
for main_input in self.inputs:
if not isinstance(main_input, pvalue.PBegin):
real_producer(main_input).refcounts[main_input.tag] += 1
for side_input in self.side_inputs:
real_producer(side_input.pvalue).refcounts[side_input.pvalue.tag] += 1
def replace_output(self, output, tag=None):
"""Replaces the output defined by the given tag with the given output.
Args:
output: replacement output
tag: tag of the output to be replaced.
"""
if isinstance(output, pvalue.DoOutputsTuple):
self.replace_output(output[output._main_tag])
elif isinstance(output, pvalue.PValue):
self.outputs[tag] = output
else:
raise TypeError("Unexpected output type: %s" % output)
def add_output(self, output, tag=None):
if isinstance(output, pvalue.DoOutputsTuple):
self.add_output(output[output._main_tag])
elif isinstance(output, pvalue.PValue):
# TODO(BEAM-1833): Require tags when calling this method.
if tag is None and None in self.outputs:
tag = len(self.outputs)
assert tag not in self.outputs
self.outputs[tag] = output
else:
raise TypeError("Unexpected output type: %s" % output)
def add_part(self, part):
assert isinstance(part, AppliedPTransform)
self.parts.append(part)
def is_composite(self):
"""Returns whether this is a composite transform.
A composite transform has parts (inner transforms) or isn't the
producer for any of its outputs. (An example of a transform that
is not a producer is one that returns its inputs instead.)
"""
return bool(self.parts) or all(
pval.producer is not self for pval in self.outputs.values())
def visit(self, visitor, pipeline, visited):
"""Visits all nodes reachable from the current node."""
for pval in self.inputs:
if pval not in visited and not isinstance(pval, pvalue.PBegin):
assert pval.producer is not None
pval.producer.visit(visitor, pipeline, visited)
# The value should be visited now since we visit outputs too.
assert pval in visited, pval
# Visit side inputs.
for pval in self.side_inputs:
if isinstance(pval, pvalue.AsSideInput) and pval.pvalue not in visited:
pval = pval.pvalue # Unpack marker-object-wrapped pvalue.
assert pval.producer is not None
pval.producer.visit(visitor, pipeline, visited)
# The value should be visited now since we visit outputs too.
assert pval in visited
# TODO(silviuc): Is there a way to signal that we are visiting a side
# value? The issue is that the same PValue can be reachable through
# multiple paths and therefore it is not guaranteed that the value
# will be visited as a side value.
# Visit a composite or primitive transform.
if self.is_composite():
visitor.enter_composite_transform(self)
for part in self.parts:
part.visit(visitor, pipeline, visited)
visitor.leave_composite_transform(self)
else:
visitor.visit_transform(self)
# Visit the outputs (one or more). It is essential to mark as visited the
# tagged PCollections of the DoOutputsTuple object. A tagged PCollection is
# connected directly with its producer (a multi-output ParDo), but the
# output of such a transform is the containing DoOutputsTuple, not the
# PCollection inside it. Without the code below a tagged PCollection will
# not be marked as visited while visiting its producer.
for pval in self.outputs.values():
if isinstance(pval, pvalue.DoOutputsTuple):
pvals = (v for v in pval)
else:
pvals = (pval,)
for v in pvals:
if v not in visited:
visited.add(v)
visitor.visit_value(v, self)
def named_inputs(self):
# TODO(BEAM-1833): Push names up into the sdk construction.
main_inputs = {str(ix): input
for ix, input in enumerate(self.inputs)
if isinstance(input, pvalue.PCollection)}
side_inputs = {'side%s' % ix: si.pvalue
for ix, si in enumerate(self.side_inputs)}
return dict(main_inputs, **side_inputs)
def named_outputs(self):
return {str(tag): output for tag, output in self.outputs.items()
if isinstance(output, pvalue.PCollection)}
def to_runner_api(self, context):
from apache_beam.portability.api import beam_runner_api_pb2
def transform_to_runner_api(transform, context):
if transform is None:
return None
else:
return transform.to_runner_api(context)
return beam_runner_api_pb2.PTransform(
unique_name=self.full_label,
spec=transform_to_runner_api(self.transform, context),
subtransforms=[context.transforms.get_id(part, label=part.full_label)
for part in self.parts],
inputs={tag: context.pcollections.get_id(pc)
for tag, pc in self.named_inputs().items()},
outputs={str(tag): context.pcollections.get_id(out)
for tag, out in self.named_outputs().items()},
# TODO(BEAM-115): display_data
display_data=None)
@staticmethod
def from_runner_api(proto, context):
def is_side_input(tag):
# As per named_inputs() above.
return tag.startswith('side')
main_inputs = [context.pcollections.get_by_id(id)
for tag, id in proto.inputs.items()
if not is_side_input(tag)]
# Ordering is important here.
indexed_side_inputs = [(int(tag[4:]), context.pcollections.get_by_id(id))
for tag, id in proto.inputs.items()
if is_side_input(tag)]
side_inputs = [si for _, si in sorted(indexed_side_inputs)]
result = AppliedPTransform(
parent=None,
transform=ptransform.PTransform.from_runner_api(proto.spec, context),
full_label=proto.unique_name,
inputs=main_inputs)
if result.transform and result.transform.side_inputs:
for si, pcoll in zip(result.transform.side_inputs, side_inputs):
si.pvalue = pcoll
result.side_inputs = tuple(result.transform.side_inputs)
result.parts = [
context.transforms.get_by_id(id) for id in proto.subtransforms]
result.outputs = {
None if tag == 'None' else tag: context.pcollections.get_by_id(id)
for tag, id in proto.outputs.items()}
# This annotation is expected by some runners.
if proto.spec.urn == urns.PARDO_TRANSFORM:
result.transform.output_tags = set(proto.outputs.keys()).difference(
{'None'})
if not result.parts:
for tag, pcoll_id in proto.outputs.items():
if pcoll_id not in proto.inputs.values():
pc = context.pcollections.get_by_id(pcoll_id)
pc.producer = result
pc.tag = None if tag == 'None' else tag
result.update_input_refcounts()
return result
class PTransformOverride(object):
"""For internal use only; no backwards-compatibility guarantees.
Gives a matcher and replacements for matching PTransforms.
TODO: Update this to support cases where input and/our output types are
different.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_matcher(self):
"""Gives a matcher that will be used to to perform this override.
Returns:
a callable that takes an AppliedPTransform as a parameter and returns a
boolean as a result.
"""
raise NotImplementedError
@abc.abstractmethod
def get_replacement_transform(self, ptransform):
"""Provides a runner specific override for a given PTransform.
Args:
ptransform: PTransform to be replaced.
Returns:
A PTransform that will be the replacement for the PTransform given as an
argument.
"""
# Returns a PTransformReplacement
raise NotImplementedError
|
jbonofre/beam
|
sdks/python/apache_beam/pipeline.py
|
Python
|
apache-2.0
| 33,443
|
[
"VisIt"
] |
6aae54837eb836dce713ef984e3fa163335fc38c44fff051c83da35f4c710a4e
|
"""Ewald summation solver for 3D periodic cells.
"""
import numpy as np
from pydft.geometry import get_cell
def Eq(basis, n, cell=None, sigma=None):
"""Calculates the approximate total energy due to the nuclei using
the Arias quick-and-dirty method.
Args:
basis (str): one of the *modules* in `pydft.bases` that
implements the necessary operators.
n (numpy.ndarray): density sampled at each of the points in
real-space.
"""
from importlib import import_module
B = import_module("pydft.bases.{}".format(basis))
O = B.O
J = B.J
o = get_cell(cell)
from pydft.solvers import poisson
phip = poisson.phi(basis, n, o)
Unum = 0.5*np.real(np.dot(J(phip), O(J(n))))
if sigma is not None:
if not isinstance(sigma, list):
sigma = [sigma]*len(o.Z)
Uself = np.sum(o.Z**2/(2*np.sqrt(np.pi))*(1./np.array(sigma)))
else:
Uself = np.sum(o.Z**2/(2*np.sqrt(np.pi)))
return Unum-Uself
def E(cell=None, alpha=None, R=None, accuracy=1e-2):
"""Returns the total energy due to the nucleii
electrostatic potential.
Args:
alpha (float): width parameter for the `erf` windowing
function.
R (float): maximum extent in real-space to consider for the short-ranged
sum; defaults to one lattice parameter.
accuracy (float): desired accuracy for the sum.
"""
o = get_cell(cell)
from itertools import product
#First, construct a matrix of all the points likely to be within
#the error function window.
#Exclude the zero point in the list, since it is just the regular
#point (no lattice vector summation).
if alpha is None:
if R is None:
R = 0.65*o.vol**(1./3)
p = np.abs(np.log(accuracy))
K = 2*p/R
alpha = K/np.sqrt(p)/2
nmax=int(np.ceil(np.abs(R/np.linalg.norm(np.dot(o.R, [1,1,1])))))+1
ni = list(range(nmax)) + list(range(-nmax+1, 0))
npts = np.array(list(product(ni, repeat=3)))[1:]
n = np.dot(o.R, npts.T).T
kmax = int(np.ceil(np.abs(K/np.linalg.norm(np.dot(o.K, [1,1,1])))*2*np.pi))+1
ki = list(range(kmax)) + list(range(-kmax+1, 0))
kpts = np.array(list(product(ki, repeat=3)))[1:]
k = np.dot(o.K, kpts.T).T
#First, we calculate the short-ranged contributions for the sum
#that converges quickly in real space.
Fs = 0.
from scipy.special import erfc
for i in range(o.X.shape[0]):
for j in range(o.X.shape[0]):
rij = o.X[i,:] - o.X[j,:]
if i != j:
#Handle the atom in the central cell explicitly if it is
#not on the point we are actually looking.
absr = np.linalg.norm(rij)
Fs += o.Z[i]*o.Z[j]*erfc(alpha*absr)/absr
nr = np.linalg.norm(n + rij, axis=1)
Fs += o.Z[i]*o.Z[j]*np.sum(erfc(alpha*nr)/nr)
#Next, compute the long range sum. The Fourier transform using the
#Gaussian charge trick (erf window) has already been calculated,
#so we can just use it directly.
Fl = 0.
k2 = np.linalg.norm(k, axis=1)
absk = np.exp(-(np.pi*k2/alpha)**2)
for i in range(o.X.shape[0]):
for j in range(o.X.shape[0]):
rij = o.X[i,:] - o.X[j,:]
ekr = np.exp(2*np.pi*1j*np.dot(k, rij))
Fl += o.Z[i]*o.Z[j]*np.sum(absk*ekr/k2**2)
#Round off any random complex pieces that showed up.
coeff = 1./(2*np.pi*o.vol)
return Fs/2. + np.real(Fl)*coeff - alpha/(2*np.sqrt(np.pi))*np.dot(o.Z, o.Z)
|
rosenbrockc/dft
|
pydft/solvers/ewald.py
|
Python
|
mit
| 3,603
|
[
"Gaussian"
] |
9196ba9a9bdf04c8989de42ce8d16c0aeea26bbd2f4ae423e492f08fa7eec83d
|
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import DataError
from robot.model import SuiteVisitor
class Merger(SuiteVisitor):
def __init__(self, result):
self.root = result.suite
self.current = None
def merge(self, merged):
merged.suite.visit(self)
def start_suite(self, suite):
try:
self.current = self._find_suite(self.current, suite.name)
except IndexError:
suite.message = self._create_add_message(suite, test=False)
self.current.suites.append(suite)
return False
def _find_suite(self, parent, name):
if not parent:
suite = self._find_root(name)
else:
suite = self._find(parent.suites, name)
suite.starttime = suite.endtime = None
return suite
def _find_root(self, name):
if self.root.name == name:
return self.root
raise DataError("Cannot merge outputs containing different root "
"suites. Original suite is '%s' and merged is '%s'."
% (self.root.name, name))
def _find(self, items, name):
for item in items:
if item.name == name:
return item
raise IndexError
def end_suite(self, suite):
self.current = self.current.parent
def visit_test(self, test):
try:
old = self._find(self.current.tests, test.name)
except IndexError:
test.message = self._create_add_message(test)
self.current.tests.append(test)
else:
test.message = self._create_merge_message(test, old)
index = self.current.tests.index(old)
self.current.tests[index] = test
def _create_add_message(self, item, test=True):
prefix = '%s added from merged output.' % ('Test' if test else 'Suite')
if not item.message:
return prefix
return '\n'.join([prefix, '- - -', item.message])
def _create_merge_message(self, new, old):
return '\n'.join(['Re-executed test has been merged.',
'- - -',
'New status: %s' % new.status,
'New message: %s' % new.message,
'- - -',
'Old status: %s' % old.status,
'Old message: %s' % old.message])
|
eric-stanley/robotframework
|
src/robot/result/merger.py
|
Python
|
apache-2.0
| 2,998
|
[
"VisIt"
] |
30d67b14b46ec784616bab6324838b3bf10ac970a0cc410f8614acfb852bd96a
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`~openlp.plugins.custom.lib.customtab` module contains the settings tab
for the Custom Slides plugin, which is inserted into the configuration dialog.
"""
from PyQt4 import QtCore, QtGui
from openlp.core.lib import SettingsTab, Settings, translate
class CustomTab(SettingsTab):
"""
CustomTab is the Custom settings tab in the settings dialog.
"""
def __init__(self, parent, title, visible_title, icon_path):
SettingsTab.__init__(self, parent, title, visible_title, icon_path)
def setupUi(self):
self.setObjectName(u'CustomTab')
SettingsTab.setupUi(self)
self.customModeGroupBox = QtGui.QGroupBox(self.leftColumn)
self.customModeGroupBox.setObjectName(u'customModeGroupBox')
self.customModeLayout = QtGui.QFormLayout(self.customModeGroupBox)
self.customModeLayout.setObjectName(u'customModeLayout')
self.displayFooterCheckBox = QtGui.QCheckBox(self.customModeGroupBox)
self.displayFooterCheckBox.setObjectName(u'displayFooterCheckBox')
self.customModeLayout.addRow(self.displayFooterCheckBox)
self.add_from_service_checkbox = QtGui.QCheckBox(self.customModeGroupBox)
self.add_from_service_checkbox.setObjectName(u'add_from_service_checkbox')
self.customModeLayout.addRow(self.add_from_service_checkbox)
self.leftLayout.addWidget(self.customModeGroupBox)
self.leftLayout.addStretch()
self.rightLayout.addStretch()
QtCore.QObject.connect(self.displayFooterCheckBox, QtCore.SIGNAL(u'stateChanged(int)'),
self.onDisplayFooterCheckBoxChanged)
QtCore.QObject.connect(self.add_from_service_checkbox, QtCore.SIGNAL(u'stateChanged(int)'),
self.on_add_from_service_check_box_changed)
def retranslateUi(self):
self.customModeGroupBox.setTitle(translate('CustomPlugin.CustomTab', 'Custom Display'))
self.displayFooterCheckBox.setText(translate('CustomPlugin.CustomTab', 'Display footer'))
self.add_from_service_checkbox.setText(translate('CustomPlugin.CustomTab',
'Import missing custom slides from service files'))
def onDisplayFooterCheckBoxChanged(self, check_state):
"""
Toggle the setting for displaying the footer.
"""
self.displayFooter = False
# we have a set value convert to True/False
if check_state == QtCore.Qt.Checked:
self.displayFooter = True
def on_add_from_service_check_box_changed(self, check_state):
self.update_load = (check_state == QtCore.Qt.Checked)
def load(self):
settings = Settings()
settings.beginGroup(self.settingsSection)
self.displayFooter = settings.value(u'display footer')
self.update_load = settings.value(u'add custom from service')
self.displayFooterCheckBox.setChecked(self.displayFooter)
self.add_from_service_checkbox.setChecked(self.update_load)
settings.endGroup()
def save(self):
settings = Settings()
settings.beginGroup(self.settingsSection)
settings.setValue(u'display footer', self.displayFooter)
settings.setValue(u'add custom from service', self.update_load)
settings.endGroup()
|
marmyshev/transitions
|
openlp/plugins/custom/lib/customtab.py
|
Python
|
gpl-2.0
| 5,354
|
[
"Brian"
] |
cb507a3536f539ad143c2f5349b0a0e93f783cf90aae264ba447d54e80263c3c
|
"""
Point quadtree
The class PointQuadTree is currently just a wrapper around
some of the functions developed in the GIS Algorithms book.
Contact:
Ningchuan Xiao
The Ohio State University
Columbus, OH
"""
__author__ = "Ningchuan Xiao <ncxiao@gmail.com>"
__all__ = ['pointquadtree']
INF = float('inf')
from cgl.point import Point
from cgl.kdtree import update_neighbors
class PQuadTreeNode():
def __init__(self,point,nw=None,ne=None,se=None,sw=None):
self.point = point
self.nw = nw
self.ne = ne
self.se = se
self.sw = sw
def __repr__(self):
return str(self.point)
def is_leaf(self):
return self.nw==None and self.ne==None and \
self.se==None and self.sw==None
class pointquadtree():
def __init__(self, points):
self.root = PQuadTreeNode(point = points[0])
for p in points[1:]:
self.insert_pqtree(p)
def insert_pqtree(self, p):
n = search_pqtree(self.root, p, False)
node = PQuadTreeNode(point=p)
if p.x < n.point.x and p.y < n.point.y:
n.sw = node
elif p.x < n.point.x and p.y >= n.point.y:
n.nw = node
elif p.x >= n.point.x and p.y < n.point.y:
n.se = node
else:
n.ne = node
def search_pqtree(self, p, is_find_only=True):
return search_pqtree(self.root, p, is_find_only)
def range_query(self, p, r):
return range_query(self.root, p, r)
def nearest_neighbor_query(self, p, n=1):
return pq_nearest_neighbor_query(self.root, p, n)
def search_pqtree(q, p, is_find_only):
if q is None:
return
if q.point == p:
if is_find_only:
return q
else:
return
dx,dy = 0,0
if p.x >= q.point.x:
dx = 1
if p.y >= q.point.y:
dy = 1
qnum = dx+dy*2
child = [q.sw, q.se, q.nw, q.ne][qnum]
if child is None and not is_find_only:
return q
return search_pqtree(child, p, is_find_only)
def range_query(t, p, r):
"""
Circular range query
"""
def rquery(t, p, r, found):
if t is None:
return
x, y = t.point.x, t.point.y
xmin, xmax = p.x-r, p.x+r
ymin, ymax = p.y-r, p.y+r
if x<xmin and y<ymin:
rquery(t.ne, p, r, found)
return
elif x<xmin and y>ymax:
rquery(t.se, p, r, found)
return
elif x>xmax and y>ymax:
rquery(t.sw, p, r, found)
return
elif x>xmax and y<ymin:
rquery(t.nw, p, r, found)
return
else:
if x < xmin:
rquery(t.ne, p, r, found) # right points only
rquery(t.se, p, r, found)
return
if y < ymin:
rquery(t.ne, p, r, found) # above points only
rquery(t.nw, p, r, found)
return
if x > xmax:
rquery(t.nw, p, r, found) # left points only
rquery(t.sw, p, r, found)
return
if y > ymax:
rquery(t.se, p, r, found) # below points only
rquery(t.sw, p, r, found)
return
if p.distance(t.point) <= r:
found.append(t.point)
rquery(t.nw, p, r, found)
rquery(t.ne, p, r, found)
rquery(t.se, p, r, found)
rquery(t.sw, p, r, found)
return
found = []
if t is not None:
rquery(t, p, r, found)
return found
# returns the quad of t where p is located
# 0-NW, 1-NE, 2-SE, 3-SW
def pqcompare(t, p):
if p.x<t.point.x and p.y<t.point.y:
return 3 # sw
elif p.x<t.point.x and p.y>=t.point.y:
return 0
elif p.x>=t.point.x and p.y<t.point.y:
return 2
else:
return 1
def pq_nnquery(t, p, n, found, pqmaxdist=INF):
if t is None:
return
if t.is_leaf():
pqmaxdist = update_neighbors(t.point, p, found, n)
return
quad_index = pqcompare(t, p)
quads = [t.nw, t.ne, t.se, t.sw]
pq_nnquery(quads[quad_index], p, n, found, pqmaxdist)
pqmaxdist = update_neighbors(t.point, p, found, n)
# check if the circle of pqmaxdist overlap with other quads
for i in range(4):
if i != quad_index:
if abs(t.point.x-p.x) < pqmaxdist or abs(t.point.y-p.y) < pqmaxdist:
pq_nnquery(quads[i], p, n, found, pqmaxdist)
return
def pq_nearest_neighbor_query(t, p, n=1):
nearest_neighbors = []
pq_nnquery(t, p, n, nearest_neighbors)
return nearest_neighbors[:n]
|
compgeog/cgl
|
cgl/pointquadtree.py
|
Python
|
gpl-3.0
| 4,627
|
[
"COLUMBUS"
] |
066eb0b26a1d71b28db7a23126b3d48a90b6f508ca23d503c31810e9a3922b4e
|
"""Filter design.
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy
from numpy import (atleast_1d, poly, polyval, roots, real, asarray, allclose,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, array)
from numpy import mintypecode
import numpy as np
from scipy import special, optimize
from scipy.special import comb
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'filter_dict', 'band_dict', 'BadCoefficients',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk']
class BadCoefficients(UserWarning):
pass
abs = absolute
def findfreqs(num, den, N):
"""
Find an array of frequencies for computing the response of a filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system. The coefficients are
ordered from highest to lowest degree.
N : int
The length of the array to be computed.
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
2 * ez.imag)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the numerator `b` and denominator `a` of a filter, compute its
frequency response::
b[0]*(jw)**(nb-1) + b[1]*(jw)**(nb-2) + ... + b[nb-1]
H(w) = -------------------------------------------------------
a[0]*(jw)**(na-1) + a[1]*(jw)**(na-2) + ... + a[na-1]
Parameters
----------
b : ndarray
Numerator of a linear filter.
a : ndarray
Denominator of a linear filter.
worN : {None, int}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which h was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
if worN is None:
w = findfreqs(b, a, 200)
elif isinstance(worN, int):
N = worN
w = findfreqs(b, a, N)
else:
w = worN
w = atleast_1d(w)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if plot is not None:
plot(w, h)
return w, h
def freqz(b, a=1, worN=None, whole=0, plot=None):
"""
Compute the frequency response of a digital filter.
Given the numerator `b` and denominator `a` of a digital filter,
compute its frequency response::
jw -jw -jmw
jw B(e) b[0] + b[1]e + .... + b[m]e
H(e) = ---- = ------------------------------------
jw -jw -jnw
A(e) a[0] + a[1]e + .... + a[n]e
Parameters
----------
b : ndarray
numerator of a linear filter
a : ndarray
denominator of a linear filter
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The normalized frequencies at which h was computed, in radians/sample.
h : ndarray
The frequency response.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy import signal
>>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
b, a = map(atleast_1d, (b, a))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j * w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if plot is not None:
plot(w, h)
return w, h
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print zc
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print zr
[ 1. 3. 4.]
"""
z = atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = numpy.diff(concatenate(([0], same_real, [0])))
run_starts = numpy.where(diffs > 0)[0]
run_stops = numpy.where(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing real part. In each
pair, the number with negative imaginary part appears first.
If pairs have identical real parts, they are sorted by increasing
imaginary magnitude.
Two complex numbers are considered a conjugate pair if their real and
imaginary parts differ in magnitude by less than ``tol * abs(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely real numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered real if its imaginary part is
smaller than `tol` times the magnitude of the number.
Parameters
----------
z : array_like
1-dimensional input array to be sorted.
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
y : ndarray
Complex conjugate pairs followed by real numbers.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreal
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = atleast_1d(z)
if z.size == 0 or np.isrealobj(z):
return np.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-dimensional')
zc, zr = _cplxreal(z, tol)
# Interleave complex values and their conjugates, with negative imaginary
# parts first in each pair
zc = np.dstack((zc.conj(), zc)).flatten()
z = np.append(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` arrays are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the inputs
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for all filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use real output if possible. Copied from numpy.poly, since
# we can't depend on a specific version of numpy.
if issubclass(b.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(z, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
b = b.real.copy()
if issubclass(a.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(p, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
a = a.real.copy()
return b, a
def tf2sos(b, a, pairing='nearest'):
"""
Return second-order sections from transfer function representation
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos`.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is generally discouraged to convert from TF to SOS format, since doing
so usually will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadded:: 0.16.0
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing)
def sos2tf(sos):
"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
b = [1.]
a = [1.]
n_sections = sos.shape[0]
for section in range(n_sections):
b = np.polymul(b, sos[section, :3])
a = np.polymul(a, sos[section, 3:])
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
n_sections = sos.shape[0]
z = np.empty(n_sections*2, np.complex128)
p = np.empty(n_sections*2, np.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z[2*section:2*(section+1)] = zpk[0]
p[2*section:2*(section+1)] = zpk[1]
k *= zpk[2]
return z, p, k
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex')
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.where(mask)[0][0]]
def zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifically for use with digital
filters. Although they can operate on analog filters, the results may
be sub-optimal.
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle to begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing == 'keep_odd'`` method.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficents of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return array([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
sos = zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
p_sos = np.zeros((n_sections, 2), np.complex128)
z_sos = np.zeros_like(p_sos)
for si in range(n_sections):
# Select the next "worst" pole
p1_idx = np.argmin(np.abs(1 - np.abs(p)))
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case to set a first-order section
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
p2 = z2 = 0
else:
if not np.isreal(p1) and np.isreal(z).sum() == 1:
# Special case to ensure we choose a complex zero to pair
# with so later (setting up a first-order section)
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
assert not np.isreal(z[z1_idx])
else:
# Pair the pole with the closest zero (real or complex)
z1_idx = np.argmin(np.abs(p1 - z))
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
# Now that we have p1 and z1, figure out what p2 and z2 need to be
if not np.isreal(p1):
if not np.isreal(z1): # complex pole, complex zero
p2 = p1.conj()
z2 = z1.conj()
else: # complex pole, real zero
p2 = p1.conj()
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
else:
if not np.isreal(z1): # real pole, complex zero
z2 = z1.conj()
p2_idx = _nearest_real_complex_idx(p, z1, 'real')
p2 = p[p2_idx]
assert np.isreal(p2)
else: # real pole, real zero
# pick the next "worst" pole to use
idx = np.where(np.isreal(p))[0]
assert len(idx) > 0
p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))]
p2 = p[p2_idx]
# find a real zero to match the added pole
assert np.isreal(p2)
z2_idx = _nearest_real_complex_idx(z, p2, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
p = np.delete(p, p2_idx)
p_sos[si] = [p1, p2]
z_sos[si] = [z1, z2]
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# Construct the system, reversing order so the "worst" are last
p_sos = np.reshape(p_sos[::-1], (n_sections, 2))
z_sos = np.reshape(z_sos[::-1], (n_sections, 2))
gains = np.ones(n_sections)
gains[0] = k
for si in range(n_sections):
x = zpk2tf(z_sos[si], p_sos[si], gains[si])
sos[si] = np.concatenate(x)
return sos
def normalize(b, a):
"""Normalize polynomial representation of a transfer function.
If values of `b` are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
"""
b, a = map(atleast_1d, (b, a))
if len(a.shape) != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if len(b.shape) > 2:
raise ValueError("Numerator polynomial must be rank-1 or"
" rank-2 array.")
if len(b.shape) == 1:
b = asarray([b], b.dtype.char)
while a[0] == 0.0 and len(a) > 1:
a = a[1:]
outb = b * (1.0) / a[0]
outa = a * (1.0) / a[0]
if allclose(0, outb[:, 0], atol=1e-14):
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
while allclose(0, outb[:, 0], atol=1e-14) and (outb.shape[-1] > 1):
outb = outb[:, 1:]
if outb.shape[0] == 1:
outb = outb[0]
return outb, outa
def lp2lp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d, n))
pwo = pow(wo, numpy.arange(M - 1, -1, -1))
start1 = max((n - d, 0))
start2 = max((d - n, 0))
b = b * pwo[start1] / pwo[start2:]
a = a * pwo[start1] / pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo, numpy.arange(max((d, n))))
else:
pwo = numpy.ones(max((d, n)), b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b, (d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a, (n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
ma = max([N, D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i
aprime[Dp - j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
M = max([N, D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * b[N - i] *
(wosq) ** (M - i - k) * bw ** i)
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * a[D - i] *
(wosq) ** (M - i - k) * bw ** i)
aprime[Dp - j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog one using a bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1)`` for ``s``.
"""
fs = float(fs)
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N, D])
Np = M
Dp = M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
for j in range(Np + 1):
val = 0.0
for i in range(N + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * b[N - i] *
pow(2 * fs, i) * (-1) ** k)
bprime[j] = real(val)
for j in range(Dp + 1):
val = 0.0
for i in range(D + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * a[D - i] *
pow(2 * fs, i) * (-1) ** k)
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains, construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba'), pole-zero ('zpk') or second order
sections ('sos') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError(("%s does not have order selection. Use "
"iirfilter function.") % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2 * (len(wp) - 1)
band_type += 1
if wp[0] >= ws[0]:
band_type += 1
btype = {1: 'lowpass', 2: 'highpass',
3: 'bandstop', 4: 'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype,
ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba'):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Generate a 17th-order Chebyshev II bandpass filter and plot the frequency
response:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.semilogx(w, 20 * np.log10(abs(h)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [radians / second]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("'%s' is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("'%s' is not a valid basic IIR filter." % ftype)
if output not in ['ba', 'zpk', 'sos']:
raise ValueError("'%s' is not a valid output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc in [buttap, besselap]:
z, p, k = typefunc(N)
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
elif typefunc == ellipap:
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn < 0) or numpy.any(Wn > 1):
raise ValueError("Digital filter critical frequencies "
"must be 0 <= Wn <= 1")
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if numpy.size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn')
if btype == 'lowpass':
z, p, k = _zpklp2lp(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = _zpklp2hp(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
except IndexError:
raise ValueError('Wn must specify start and stop frequencies')
if btype == 'bandpass':
z, p, k = _zpklp2bp(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = _zpklp2bs(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
# Find discrete equivalent if necessary
if not analog:
z, p, k = _zpkbilinear(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
elif output == 'sos':
return zpk2sos(z, p, k)
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
else:
return degree
# TODO: merge these into existing functions or make public versions
def _zpkbilinear(z, p, k, fs):
"""
Return a digital filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
z : ndarray
Zeros of the analog IIR filter transfer function.
p : ndarray
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g. hertz). No prewarping is
done in this function.
Returns
-------
z : ndarray
Zeros of the transformed digital filter transfer function.
p : ndarray
Poles of the transformed digital filter transfer function.
k : float
System gain of the transformed digital filter.
"""
z = atleast_1d(z)
p = atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = 2*fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = append(z_z, -ones(degree))
# Compensate for gain change
k_z = k * real(prod(fs2 - z) / prod(fs2 - p))
return z_z, p_z, k_z
def _zpklp2lp(z, p, k, wo=1.0):
"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : ndarray
Zeros of the analog IIR filter transfer function.
p : ndarray
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed low-pass filter transfer function.
p : ndarray
Poles of the transformed low-pass filter transfer function.
k : float
System gain of the transformed low-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo) # Avoid np.int wraparound
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = wo * z
p_lp = wo * p
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def _zpklp2hp(z, p, k, wo=1.0):
"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : ndarray
Zeros of the analog IIR filter transfer function.
p : ndarray
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed high-pass filter transfer function.
p : ndarray
Poles of the transformed high-pass filter transfer function.
k : float
System gain of the transformed high-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
degree = _relative_degree(z, p)
# Invert positions radially about unit circle to convert LPF to HPF
# Scale all points radially from origin to shift cutoff frequency
z_hp = wo / z
p_hp = wo / p
# If lowpass had zeros at infinity, inverting moves them to origin.
z_hp = append(z_hp, zeros(degree))
# Cancel out gain change caused by inversion
k_hp = k * real(prod(-z) / prod(-p))
return z_hp, p_hp, k_hp
def _zpklp2bp(z, p, k, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : ndarray
Zeros of the analog IIR filter transfer function.
p : ndarray
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired passband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-pass filter transfer function.
p : ndarray
Poles of the transformed band-pass filter transfer function.
k : float
System gain of the transformed band-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Scale poles and zeros to desired bandwidth
z_lp = z * bw/2
p_lp = p * bw/2
# Square root needs to produce complex result, not NaN
z_lp = z_lp.astype(complex)
p_lp = p_lp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2),
z_lp - sqrt(z_lp**2 - wo**2)))
p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2),
p_lp - sqrt(p_lp**2 - wo**2)))
# Move degree zeros to origin, leaving degree zeros at infinity for BPF
z_bp = append(z_bp, zeros(degree))
# Cancel out gain change from frequency scaling
k_bp = k * bw**degree
return z_bp, p_bp, k_bp
def _zpklp2bs(z, p, k, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
stopband width `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : ndarray
Zeros of the analog IIR filter transfer function.
p : ndarray
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired stopband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired stopband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-stop filter transfer function.
p : ndarray
Poles of the transformed band-stop filter transfer function.
k : float
System gain of the transformed band-stop filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
This is the "wideband" transformation, producing a stopband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Invert to a highpass filter with desired bandwidth
z_hp = (bw/2) / z
p_hp = (bw/2) / p
# Square root needs to produce complex result, not NaN
z_hp = z_hp.astype(complex)
p_hp = p_hp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2),
z_hp - sqrt(z_hp**2 - wo**2)))
p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2),
p_hp - sqrt(p_hp**2 - wo**2)))
# Move any zeros that were at infinity to the center of the stopband
z_bs = append(z_bs, +1j*wo * ones(degree))
z_bs = append(z_bs, -1j*wo * ones(degree))
# Cancel out gain change caused by inversion
k_bs = k * real(prod(-z) / prod(-p))
return z_bs, p_bs, k_bs
def butter(N, Wn, btype='low', analog=False, output='ba'):
"""
Butterworth digital and analog filter design.
Design an Nth order digital or analog Butterworth filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Butterworth filter, this is the point at which the gain
drops to 1/sqrt(2) that of the passband (the "-3 dB point").
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
buttord
Notes
-----
The Butterworth filter has maximally flat frequency response in the
passband.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type I digital and analog filter design.
Design an Nth order digital or analog Chebyshev type I filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type I filters, this is the point in the transition band at which
the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb1ord
Notes
-----
The Chebyshev type I filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the passband and increased ringing in the step response.
Type I filters roll off faster than Type II (`cheby2`), but Type II
filters do not have any ripple in the passband.
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type I frequency response (rp=5)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type II digital and analog filter design.
Design an Nth order digital or analog Chebyshev type II filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type II filters, this is the point in the transition band at which
the gain first reaches -`rs`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb2ord
Notes
-----
The Chebyshev type II filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the stopband and increased ringing in the step response.
Type II filters do not roll off as fast as Type I (`cheby1`).
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type II frequency response (rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba'):
"""
Elliptic (Cauer) digital and analog filter design.
Design an Nth order digital or analog elliptic filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For elliptic filters, this is the point in the transition band at
which the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
ellipord
Notes
-----
Also known as Cauer or Zolotarev filters, the elliptical filter maximizes
the rate of transition between the frequency response's passband and
stopband, at the expense of ripple in both, and increased ringing in the
step response.
As `rp` approaches 0, the elliptical filter becomes a Chebyshev
type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev
type I filter (`cheby1`). As both approach 0, it becomes a Butterworth
filter (`butter`).
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptic filter frequency response (rp=5, rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog,
output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=False, output='ba'):
"""Bessel/Thomson digital and analog filter design.
Design an Nth order digital or analog Bessel filter and return the
filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Bessel filter, this is defined as the point at which the
asymptotes of the response are the same as a Butterworth filter of
the same order.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
Notes
-----
Also known as a Thomson filter, the analog Bessel filter has maximally
flat group delay and maximally linear phase response, with very little
ringing in the step response.
As order increases, the Bessel filter approaches a Gaussian filter.
The digital Bessel filter is generated using the bilinear
transform, which does not preserve the phase response of the analog
filter. As such, it is only approximately correct at frequencies
below about fs/4. To get maximally flat group delay at higher
frequencies, the analog Bessel filter must be transformed using
phase-preserving techniques.
For a given `Wn`, the lowpass and highpass filter have the same phase vs
frequency curves; they are "phase-matched".
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the flat group delay and
the relationship to the Butterworth's cutoff frequency:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.plot(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed')
>>> b, a = signal.bessel(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.title('Bessel filter frequency response (with Butterworth)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
>>> plt.figure()
>>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w))
>>> plt.title('Bessel filter group delay')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Group delay [seconds]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='bessel')
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""
Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp : scalar
Edge of passband `passb`.
ind : int, {0, 1}
Index specifying which `passb` edge to vary (0 or 1).
passb : ndarray
Two element sequence of fixed passband edges.
stopb : ndarray
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : {'butter', 'cheby', 'ellip'}
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = (stopb * (passbC[0] - passbC[1]) /
(stopb ** 2 - passbC[0] * passbC[1]))
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))
elif type == 'cheby':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
n = (d0[0] * d1[1] / (d0[1] * d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=False):
"""Butterworth filter order selection.
Return the order of the lowest order digital or analog Butterworth filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
See Also
--------
butter : Filter design using order and critical points
cheb1ord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog bandpass filter with passband within 3 dB from 20 to
50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s.
Plot its frequency response, showing the passband and stopband
constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True)
>>> b, a = signal.butter(N, Wn, 'band', True)
>>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth bandpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop
>>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop
>>> plt.axis([10, 100, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))))
# Find the Butterworth natural frequency WN (or the "3dB" frequency")
# to give exactly gpass at passb.
try:
W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord))
except ZeroDivisionError:
W0 = 1.0
print("Warning, order is zero...check input parameters.")
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0 * passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2, float)
discr = sqrt((passb[1] - passb[0]) ** 2 +
4 * W0 ** 2 * passb[0] * passb[1])
WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0)
WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0], float)
WN = (-W0 * (passb[1] - passb[0]) / 2.0 +
sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 +
passb[0] * passb[1]))
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0 / pi) * arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type I
filter that loses no more than `gpass` dB in the passband and has at
least `gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
See Also
--------
cheby1 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital lowpass filter such that the passband is within 3 dB up
to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40)
>>> b, a = signal.cheby1(N, 3, Wn, 'low')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev I lowpass filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass
>>> plt.axis([0.08, 1, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0 / pi) * arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type II filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type II
filter that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
See Also
--------
cheby2 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to
0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above
0.6*(fs/2). Plot its frequency response, showing the passband and
stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
>>> b, a = signal.cheby2(N, 60, Wn, 'stop')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev II bandstop filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass
>>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop
>>> plt.axis([0.06, 1, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2, float)
nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) +
sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 +
passb[1] * passb[0]))
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2, float)
nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) +
sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) +
passb[1] * passb[0]))
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0 / pi) * arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=False):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital or analog elliptic filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.
See Also
--------
ellip : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog highpass filter such that the passband is within 3 dB
above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.ellipord(30, 10, 3, 60, True)
>>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True)
>>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptical highpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop
>>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.axis([1, 300, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0])))
if not analog:
wn = arctan(passb) * 2.0 / pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) for analog prototype of Nth order Butterworth filter.
The filter will have an angular (e.g. rad/s) cutoff frequency of 1.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
z = numpy.array([])
m = numpy.arange(-N+1, N, 2)
# Middle value is 0 to ensure an exactly real pole
p = -numpy.exp(1j * pi * m / (2 * N))
k = 1
return z, p, k
def cheb1ap(N, rp):
"""
Return (z,p,k) for Nth order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rp` decibels of ripple in the passband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero error
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
z = numpy.array([])
# Ripple factor (epsilon)
eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0)
mu = 1.0 / N * arcsinh(1 / eps)
# Arrange poles in an ellipse on the left half of the S-plane
m = numpy.arange(-N+1, N, 2)
theta = pi * m / (2*N)
p = -sinh(mu + 1j*theta)
k = numpy.prod(-p, axis=0).real
if N % 2 == 0:
k = k / sqrt((1 + eps * eps))
return z, p, k
def cheb2ap(N, rs):
"""
Return (z,p,k) for Nth order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rs` decibels of ripple in the stopband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first reaches ``-rs``.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
return numpy.array([]), numpy.array([]), 1
# Ripple factor (epsilon)
de = 1.0 / sqrt(10 ** (0.1 * rs) - 1)
mu = arcsinh(1.0 / de) / N
if N % 2:
m = numpy.concatenate((numpy.arange(-N+1, 0, 2),
numpy.arange(2, N, 2)))
else:
m = numpy.arange(-N+1, N, 2)
z = -conjugate(1j / sin(m * pi / (2.0 * N)))
# Poles around the unit circle like Butterworth
p = -exp(1j * pi * numpy.arange(-N+1, N, 2) / (2 * N))
# Warp into Chebyshev II
p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag
p = 1.0 / p
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
return z, p, k
EPSILON = 2e-16
def _vratio(u, ineps, mp):
[s, c, d, phi] = special.ellipj(u, mp)
ret = abs(ineps - s / c)
return ret
def _kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m, 1 - m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) of Nth order elliptic analog lowpass filter.
The filter is a normalized prototype that has `rp` decibels of ripple
in the passband and a stopband `rs` decibels down.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
References
----------
Lutova, Tosic, and Evans, "Filter Design for Signal Processing", Chapters 5
and 12.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
elif N == 1:
p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0))
k = -p
z = []
return asarray(z), asarray(p), k
eps = numpy.sqrt(10 ** (0.1 * rp) - 1)
ck1 = eps / numpy.sqrt(10 ** (0.1 * rs) - 1)
ck1p = numpy.sqrt(1 - ck1 * ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs"
" specifications.")
val = special.ellipk([ck1 * ck1, ck1p * ck1p])
if abs(1 - ck1p * ck1p) < EPSILON:
krat = 0
else:
krat = N * val[0] / val[1]
m = optimize.fmin(_kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(_kratio, 0, 1, args=(krat,), maxfun=250,
maxiter=250, disp=0)
capk = special.ellipk(m)
j = numpy.arange(1 - N % 2, N, 2)
jj = len(j)
[s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s, axis=-1)
z = 1.0 / (sqrt(m) * snew)
z = 1j * z
z = numpy.concatenate((z, conjugate(z)))
r = optimize.fmin(_vratio, special.ellipk(m), args=(1. / eps, ck1p * ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N * val[0])
[sv, cv, dv, phi] = special.ellipj(v0, 1 - m)
p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON *
numpy.sqrt(numpy.sum(p * numpy.conjugate(p),
axis=0).real),
p, axis=-1)
p = numpy.concatenate((p, conjugate(newp)))
else:
p = numpy.concatenate((p, conjugate(p)))
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1 + eps * eps))
return z, p, k
def besselap(N):
"""Return (z,p,k) for analog prototype of an Nth order Bessel filter.
The filter is normalized such that the filter asymptotes are the same as
a Butterworth filter of the same order with an angular (e.g. rad/s)
cutoff frequency of 1.
Parameters
----------
N : int
The order of the Bessel filter to return zeros, poles and gain for.
Values in the range 0-25 are supported.
Returns
-------
z : ndarray
Zeros. Is always an empty array.
p : ndarray
Poles.
k : scalar
Gain. Always 1.
"""
z = []
k = 1
if N == 0:
p = []
elif N == 1:
p = [-1]
elif N == 2:
p = [-.8660254037844386467637229 + .4999999999999999999999996j,
-.8660254037844386467637229 - .4999999999999999999999996j]
elif N == 3:
p = [-.9416000265332067855971980,
-.7456403858480766441810907 - .7113666249728352680992154j,
-.7456403858480766441810907 + .7113666249728352680992154j]
elif N == 4:
p = [-.6572111716718829545787781 - .8301614350048733772399715j,
-.6572111716718829545787788 + .8301614350048733772399715j,
-.9047587967882449459642637 - .2709187330038746636700923j,
-.9047587967882449459642624 + .2709187330038746636700926j]
elif N == 5:
p = [-.9264420773877602247196260,
-.8515536193688395541722677 - .4427174639443327209850002j,
-.8515536193688395541722677 + .4427174639443327209850002j,
-.5905759446119191779319432 - .9072067564574549539291747j,
-.5905759446119191779319432 + .9072067564574549539291747j]
elif N == 6:
p = [-.9093906830472271808050953 - .1856964396793046769246397j,
-.9093906830472271808050953 + .1856964396793046769246397j,
-.7996541858328288520243325 - .5621717346937317988594118j,
-.7996541858328288520243325 + .5621717346937317988594118j,
-.5385526816693109683073792 - .9616876881954277199245657j,
-.5385526816693109683073792 + .9616876881954277199245657j]
elif N == 7:
p = [-.9194871556490290014311619,
-.8800029341523374639772340 - .3216652762307739398381830j,
-.8800029341523374639772340 + .3216652762307739398381830j,
-.7527355434093214462291616 - .6504696305522550699212995j,
-.7527355434093214462291616 + .6504696305522550699212995j,
-.4966917256672316755024763 - 1.002508508454420401230220j,
-.4966917256672316755024763 + 1.002508508454420401230220j]
elif N == 8:
p = [-.9096831546652910216327629 - .1412437976671422927888150j,
-.9096831546652910216327629 + .1412437976671422927888150j,
-.8473250802359334320103023 - .4259017538272934994996429j,
-.8473250802359334320103023 + .4259017538272934994996429j,
-.7111381808485399250796172 - .7186517314108401705762571j,
-.7111381808485399250796172 + .7186517314108401705762571j,
-.4621740412532122027072175 - 1.034388681126901058116589j,
-.4621740412532122027072175 + 1.034388681126901058116589j]
elif N == 9:
p = [-.9154957797499037686769223,
-.8911217017079759323183848 - .2526580934582164192308115j,
-.8911217017079759323183848 + .2526580934582164192308115j,
-.8148021112269012975514135 - .5085815689631499483745341j,
-.8148021112269012975514135 + .5085815689631499483745341j,
-.6743622686854761980403401 - .7730546212691183706919682j,
-.6743622686854761980403401 + .7730546212691183706919682j,
-.4331415561553618854685942 - 1.060073670135929666774323j,
-.4331415561553618854685942 + 1.060073670135929666774323j]
elif N == 10:
p = [-.9091347320900502436826431 - .1139583137335511169927714j,
-.9091347320900502436826431 + .1139583137335511169927714j,
-.8688459641284764527921864 - .3430008233766309973110589j,
-.8688459641284764527921864 + .3430008233766309973110589j,
-.7837694413101441082655890 - .5759147538499947070009852j,
-.7837694413101441082655890 + .5759147538499947070009852j,
-.6417513866988316136190854 - .8175836167191017226233947j,
-.6417513866988316136190854 + .8175836167191017226233947j,
-.4083220732868861566219785 - 1.081274842819124562037210j,
-.4083220732868861566219785 + 1.081274842819124562037210j]
elif N == 11:
p = [-.9129067244518981934637318,
-.8963656705721166099815744 - .2080480375071031919692341j,
-.8963656705721166099815744 + .2080480375071031919692341j,
-.8453044014712962954184557 - .4178696917801248292797448j,
-.8453044014712962954184557 + .4178696917801248292797448j,
-.7546938934722303128102142 - .6319150050721846494520941j,
-.7546938934722303128102142 + .6319150050721846494520941j,
-.6126871554915194054182909 - .8547813893314764631518509j,
-.6126871554915194054182909 + .8547813893314764631518509j,
-.3868149510055090879155425 - 1.099117466763120928733632j,
-.3868149510055090879155425 + 1.099117466763120928733632j]
elif N == 12:
p = [-.9084478234140682638817772 - 95506365213450398415258360.0e-27j,
-.9084478234140682638817772 + 95506365213450398415258360.0e-27j,
-.8802534342016826507901575 - .2871779503524226723615457j,
-.8802534342016826507901575 + .2871779503524226723615457j,
-.8217296939939077285792834 - .4810212115100676440620548j,
-.8217296939939077285792834 + .4810212115100676440620548j,
-.7276681615395159454547013 - .6792961178764694160048987j,
-.7276681615395159454547013 + .6792961178764694160048987j,
-.5866369321861477207528215 - .8863772751320727026622149j,
-.5866369321861477207528215 + .8863772751320727026622149j,
-.3679640085526312839425808 - 1.114373575641546257595657j,
-.3679640085526312839425808 + 1.114373575641546257595657j]
elif N == 13:
p = [-.9110914665984182781070663,
-.8991314665475196220910718 - .1768342956161043620980863j,
-.8991314665475196220910718 + .1768342956161043620980863j,
-.8625094198260548711573628 - .3547413731172988997754038j,
-.8625094198260548711573628 + .3547413731172988997754038j,
-.7987460692470972510394686 - .5350752120696801938272504j,
-.7987460692470972510394686 + .5350752120696801938272504j,
-.7026234675721275653944062 - .7199611890171304131266374j,
-.7026234675721275653944062 + .7199611890171304131266374j,
-.5631559842430199266325818 - .9135900338325109684927731j,
-.5631559842430199266325818 + .9135900338325109684927731j,
-.3512792323389821669401925 - 1.127591548317705678613239j,
-.3512792323389821669401925 + 1.127591548317705678613239j]
elif N == 14:
p = [-.9077932138396487614720659 - 82196399419401501888968130.0e-27j,
-.9077932138396487614720659 + 82196399419401501888968130.0e-27j,
-.8869506674916445312089167 - .2470079178765333183201435j,
-.8869506674916445312089167 + .2470079178765333183201435j,
-.8441199160909851197897667 - .4131653825102692595237260j,
-.8441199160909851197897667 + .4131653825102692595237260j,
-.7766591387063623897344648 - .5819170677377608590492434j,
-.7766591387063623897344648 + .5819170677377608590492434j,
-.6794256425119233117869491 - .7552857305042033418417492j,
-.6794256425119233117869491 + .7552857305042033418417492j,
-.5418766775112297376541293 - .9373043683516919569183099j,
-.5418766775112297376541293 + .9373043683516919569183099j,
-.3363868224902037330610040 - 1.139172297839859991370924j,
-.3363868224902037330610040 + 1.139172297839859991370924j]
elif N == 15:
p = [-.9097482363849064167228581,
-.9006981694176978324932918 - .1537681197278439351298882j,
-.9006981694176978324932918 + .1537681197278439351298882j,
-.8731264620834984978337843 - .3082352470564267657715883j,
-.8731264620834984978337843 + .3082352470564267657715883j,
-.8256631452587146506294553 - .4642348752734325631275134j,
-.8256631452587146506294553 + .4642348752734325631275134j,
-.7556027168970728127850416 - .6229396358758267198938604j,
-.7556027168970728127850416 + .6229396358758267198938604j,
-.6579196593110998676999362 - .7862895503722515897065645j,
-.6579196593110998676999362 + .7862895503722515897065645j,
-.5224954069658330616875186 - .9581787261092526478889345j,
-.5224954069658330616875186 + .9581787261092526478889345j,
-.3229963059766444287113517 - 1.149416154583629539665297j,
-.3229963059766444287113517 + 1.149416154583629539665297j]
elif N == 16:
p = [-.9072099595087001356491337 - 72142113041117326028823950.0e-27j,
-.9072099595087001356491337 + 72142113041117326028823950.0e-27j,
-.8911723070323647674780132 - .2167089659900576449410059j,
-.8911723070323647674780132 + .2167089659900576449410059j,
-.8584264231521330481755780 - .3621697271802065647661080j,
-.8584264231521330481755780 + .3621697271802065647661080j,
-.8074790293236003885306146 - .5092933751171800179676218j,
-.8074790293236003885306146 + .5092933751171800179676218j,
-.7356166304713115980927279 - .6591950877860393745845254j,
-.7356166304713115980927279 + .6591950877860393745845254j,
-.6379502514039066715773828 - .8137453537108761895522580j,
-.6379502514039066715773828 + .8137453537108761895522580j,
-.5047606444424766743309967 - .9767137477799090692947061j,
-.5047606444424766743309967 + .9767137477799090692947061j,
-.3108782755645387813283867 - 1.158552841199330479412225j,
-.3108782755645387813283867 + 1.158552841199330479412225j]
elif N == 17:
p = [-.9087141161336397432860029,
-.9016273850787285964692844 - .1360267995173024591237303j,
-.9016273850787285964692844 + .1360267995173024591237303j,
-.8801100704438627158492165 - .2725347156478803885651973j,
-.8801100704438627158492165 + .2725347156478803885651973j,
-.8433414495836129204455491 - .4100759282910021624185986j,
-.8433414495836129204455491 + .4100759282910021624185986j,
-.7897644147799708220288138 - .5493724405281088674296232j,
-.7897644147799708220288138 + .5493724405281088674296232j,
-.7166893842372349049842743 - .6914936286393609433305754j,
-.7166893842372349049842743 + .6914936286393609433305754j,
-.6193710717342144521602448 - .8382497252826992979368621j,
-.6193710717342144521602448 + .8382497252826992979368621j,
-.4884629337672704194973683 - .9932971956316781632345466j,
-.4884629337672704194973683 + .9932971956316781632345466j,
-.2998489459990082015466971 - 1.166761272925668786676672j,
-.2998489459990082015466971 + 1.166761272925668786676672j]
elif N == 18:
p = [-.9067004324162775554189031 - 64279241063930693839360680.0e-27j,
-.9067004324162775554189031 + 64279241063930693839360680.0e-27j,
-.8939764278132455733032155 - .1930374640894758606940586j,
-.8939764278132455733032155 + .1930374640894758606940586j,
-.8681095503628830078317207 - .3224204925163257604931634j,
-.8681095503628830078317207 + .3224204925163257604931634j,
-.8281885016242836608829018 - .4529385697815916950149364j,
-.8281885016242836608829018 + .4529385697815916950149364j,
-.7726285030739558780127746 - .5852778162086640620016316j,
-.7726285030739558780127746 + .5852778162086640620016316j,
-.6987821445005273020051878 - .7204696509726630531663123j,
-.6987821445005273020051878 + .7204696509726630531663123j,
-.6020482668090644386627299 - .8602708961893664447167418j,
-.6020482668090644386627299 + .8602708961893664447167418j,
-.4734268069916151511140032 - 1.008234300314801077034158j,
-.4734268069916151511140032 + 1.008234300314801077034158j,
-.2897592029880489845789953 - 1.174183010600059128532230j,
-.2897592029880489845789953 + 1.174183010600059128532230j]
elif N == 19:
p = [-.9078934217899404528985092,
-.9021937639390660668922536 - .1219568381872026517578164j,
-.9021937639390660668922536 + .1219568381872026517578164j,
-.8849290585034385274001112 - .2442590757549818229026280j,
-.8849290585034385274001112 + .2442590757549818229026280j,
-.8555768765618421591093993 - .3672925896399872304734923j,
-.8555768765618421591093993 + .3672925896399872304734923j,
-.8131725551578197705476160 - .4915365035562459055630005j,
-.8131725551578197705476160 + .4915365035562459055630005j,
-.7561260971541629355231897 - .6176483917970178919174173j,
-.7561260971541629355231897 + .6176483917970178919174173j,
-.6818424412912442033411634 - .7466272357947761283262338j,
-.6818424412912442033411634 + .7466272357947761283262338j,
-.5858613321217832644813602 - .8801817131014566284786759j,
-.5858613321217832644813602 + .8801817131014566284786759j,
-.4595043449730988600785456 - 1.021768776912671221830298j,
-.4595043449730988600785456 + 1.021768776912671221830298j,
-.2804866851439370027628724 - 1.180931628453291873626003j,
-.2804866851439370027628724 + 1.180931628453291873626003j]
elif N == 20:
p = [-.9062570115576771146523497 - 57961780277849516990208850.0e-27j,
-.9062570115576771146523497 + 57961780277849516990208850.0e-27j,
-.8959150941925768608568248 - .1740317175918705058595844j,
-.8959150941925768608568248 + .1740317175918705058595844j,
-.8749560316673332850673214 - .2905559296567908031706902j,
-.8749560316673332850673214 + .2905559296567908031706902j,
-.8427907479956670633544106 - .4078917326291934082132821j,
-.8427907479956670633544106 + .4078917326291934082132821j,
-.7984251191290606875799876 - .5264942388817132427317659j,
-.7984251191290606875799876 + .5264942388817132427317659j,
-.7402780309646768991232610 - .6469975237605228320268752j,
-.7402780309646768991232610 + .6469975237605228320268752j,
-.6658120544829934193890626 - .7703721701100763015154510j,
-.6658120544829934193890626 + .7703721701100763015154510j,
-.5707026806915714094398061 - .8982829066468255593407161j,
-.5707026806915714094398061 + .8982829066468255593407161j,
-.4465700698205149555701841 - 1.034097702560842962315411j,
-.4465700698205149555701841 + 1.034097702560842962315411j,
-.2719299580251652601727704 - 1.187099379810885886139638j,
-.2719299580251652601727704 + 1.187099379810885886139638j]
elif N == 21:
p = [-.9072262653142957028884077,
-.9025428073192696303995083 - .1105252572789856480992275j,
-.9025428073192696303995083 + .1105252572789856480992275j,
-.8883808106664449854431605 - .2213069215084350419975358j,
-.8883808106664449854431605 + .2213069215084350419975358j,
-.8643915813643204553970169 - .3326258512522187083009453j,
-.8643915813643204553970169 + .3326258512522187083009453j,
-.8299435470674444100273463 - .4448177739407956609694059j,
-.8299435470674444100273463 + .4448177739407956609694059j,
-.7840287980408341576100581 - .5583186348022854707564856j,
-.7840287980408341576100581 + .5583186348022854707564856j,
-.7250839687106612822281339 - .6737426063024382240549898j,
-.7250839687106612822281339 + .6737426063024382240549898j,
-.6506315378609463397807996 - .7920349342629491368548074j,
-.6506315378609463397807996 + .7920349342629491368548074j,
-.5564766488918562465935297 - .9148198405846724121600860j,
-.5564766488918562465935297 + .9148198405846724121600860j,
-.4345168906815271799687308 - 1.045382255856986531461592j,
-.4345168906815271799687308 + 1.045382255856986531461592j,
-.2640041595834031147954813 - 1.192762031948052470183960j,
-.2640041595834031147954813 + 1.192762031948052470183960j]
elif N == 22:
p = [-.9058702269930872551848625 - 52774908289999045189007100.0e-27j,
-.9058702269930872551848625 + 52774908289999045189007100.0e-27j,
-.8972983138153530955952835 - .1584351912289865608659759j,
-.8972983138153530955952835 + .1584351912289865608659759j,
-.8799661455640176154025352 - .2644363039201535049656450j,
-.8799661455640176154025352 + .2644363039201535049656450j,
-.8534754036851687233084587 - .3710389319482319823405321j,
-.8534754036851687233084587 + .3710389319482319823405321j,
-.8171682088462720394344996 - .4785619492202780899653575j,
-.8171682088462720394344996 + .4785619492202780899653575j,
-.7700332930556816872932937 - .5874255426351153211965601j,
-.7700332930556816872932937 + .5874255426351153211965601j,
-.7105305456418785989070935 - .6982266265924524000098548j,
-.7105305456418785989070935 + .6982266265924524000098548j,
-.6362427683267827226840153 - .8118875040246347267248508j,
-.6362427683267827226840153 + .8118875040246347267248508j,
-.5430983056306302779658129 - .9299947824439872998916657j,
-.5430983056306302779658129 + .9299947824439872998916657j,
-.4232528745642628461715044 - 1.055755605227545931204656j,
-.4232528745642628461715044 + 1.055755605227545931204656j,
-.2566376987939318038016012 - 1.197982433555213008346532j,
-.2566376987939318038016012 + 1.197982433555213008346532j]
elif N == 23:
p = [-.9066732476324988168207439,
-.9027564979912504609412993 - .1010534335314045013252480j,
-.9027564979912504609412993 + .1010534335314045013252480j,
-.8909283242471251458653994 - .2023024699381223418195228j,
-.8909283242471251458653994 + .2023024699381223418195228j,
-.8709469395587416239596874 - .3039581993950041588888925j,
-.8709469395587416239596874 + .3039581993950041588888925j,
-.8423805948021127057054288 - .4062657948237602726779246j,
-.8423805948021127057054288 + .4062657948237602726779246j,
-.8045561642053176205623187 - .5095305912227258268309528j,
-.8045561642053176205623187 + .5095305912227258268309528j,
-.7564660146829880581478138 - .6141594859476032127216463j,
-.7564660146829880581478138 + .6141594859476032127216463j,
-.6965966033912705387505040 - .7207341374753046970247055j,
-.6965966033912705387505040 + .7207341374753046970247055j,
-.6225903228771341778273152 - .8301558302812980678845563j,
-.6225903228771341778273152 + .8301558302812980678845563j,
-.5304922463810191698502226 - .9439760364018300083750242j,
-.5304922463810191698502226 + .9439760364018300083750242j,
-.4126986617510148836149955 - 1.065328794475513585531053j,
-.4126986617510148836149955 + 1.065328794475513585531053j,
-.2497697202208956030229911 - 1.202813187870697831365338j,
-.2497697202208956030229911 + 1.202813187870697831365338j]
elif N == 24:
p = [-.9055312363372773709269407 - 48440066540478700874836350.0e-27j,
-.9055312363372773709269407 + 48440066540478700874836350.0e-27j,
-.8983105104397872954053307 - .1454056133873610120105857j,
-.8983105104397872954053307 + .1454056133873610120105857j,
-.8837358034555706623131950 - .2426335234401383076544239j,
-.8837358034555706623131950 + .2426335234401383076544239j,
-.8615278304016353651120610 - .3403202112618624773397257j,
-.8615278304016353651120610 + .3403202112618624773397257j,
-.8312326466813240652679563 - .4386985933597305434577492j,
-.8312326466813240652679563 + .4386985933597305434577492j,
-.7921695462343492518845446 - .5380628490968016700338001j,
-.7921695462343492518845446 + .5380628490968016700338001j,
-.7433392285088529449175873 - .6388084216222567930378296j,
-.7433392285088529449175873 + .6388084216222567930378296j,
-.6832565803536521302816011 - .7415032695091650806797753j,
-.6832565803536521302816011 + .7415032695091650806797753j,
-.6096221567378335562589532 - .8470292433077202380020454j,
-.6096221567378335562589532 + .8470292433077202380020454j,
-.5185914574820317343536707 - .9569048385259054576937721j,
-.5185914574820317343536707 + .9569048385259054576937721j,
-.4027853855197518014786978 - 1.074195196518674765143729j,
-.4027853855197518014786978 + 1.074195196518674765143729j,
-.2433481337524869675825448 - 1.207298683731972524975429j,
-.2433481337524869675825448 + 1.207298683731972524975429j]
elif N == 25:
p = [-.9062073871811708652496104,
-.9028833390228020537142561 - 93077131185102967450643820.0e-27j,
-.9028833390228020537142561 + 93077131185102967450643820.0e-27j,
-.8928551459883548836774529 - .1863068969804300712287138j,
-.8928551459883548836774529 + .1863068969804300712287138j,
-.8759497989677857803656239 - .2798521321771408719327250j,
-.8759497989677857803656239 + .2798521321771408719327250j,
-.8518616886554019782346493 - .3738977875907595009446142j,
-.8518616886554019782346493 + .3738977875907595009446142j,
-.8201226043936880253962552 - .4686668574656966589020580j,
-.8201226043936880253962552 + .4686668574656966589020580j,
-.7800496278186497225905443 - .5644441210349710332887354j,
-.7800496278186497225905443 + .5644441210349710332887354j,
-.7306549271849967721596735 - .6616149647357748681460822j,
-.7306549271849967721596735 + .6616149647357748681460822j,
-.6704827128029559528610523 - .7607348858167839877987008j,
-.6704827128029559528610523 + .7607348858167839877987008j,
-.5972898661335557242320528 - .8626676330388028512598538j,
-.5972898661335557242320528 + .8626676330388028512598538j,
-.5073362861078468845461362 - .9689006305344868494672405j,
-.5073362861078468845461362 + .9689006305344868494672405j,
-.3934529878191079606023847 - 1.082433927173831581956863j,
-.3934529878191079606023847 + 1.082433927173831581956863j,
-.2373280669322028974199184 - 1.211476658382565356579418j,
-.2373280669322028974199184 + 1.211476658382565356579418j]
else:
raise ValueError("Bessel Filter not supported for order %s" % N)
return asarray(z), asarray(p), k
filter_dict = {'butter': [buttap, buttord],
'butterworth': [buttap, buttord],
'cauer': [ellipap, ellipord],
'elliptic': [ellipap, ellipord],
'ellip': [ellipap, ellipord],
'bessel': [besselap],
'cheby1': [cheb1ap, cheb1ord],
'chebyshev1': [cheb1ap, cheb1ord],
'chebyshevi': [cheb1ap, cheb1ord],
'cheby2': [cheb2ap, cheb2ord],
'chebyshev2': [cheb2ap, cheb2ord],
'chebyshevii': [cheb2ap, cheb2ord],
}
band_dict = {'band': 'bandpass',
'bandpass': 'bandpass',
'pass': 'bandpass',
'bp': 'bandpass',
'bs': 'bandstop',
'bandstop': 'bandstop',
'bands': 'bandstop',
'stop': 'bandstop',
'l': 'lowpass',
'low': 'lowpass',
'lowpass': 'lowpass',
'lp': 'lowpass',
'high': 'highpass',
'highpass': 'highpass',
'h': 'highpass',
'hp': 'highpass',
}
|
vhaasteren/scipy
|
scipy/signal/filter_design.py
|
Python
|
bsd-3-clause
| 124,671
|
[
"Gaussian"
] |
9d38c8ee5dfdf83d6733e302170c3fe3e3ab16d4c7a3b558d92cc59ce6da5f7a
|
# coding: utf-8
# # Tidal currents from ADCIRC u,v tidal constituent netcdf file
# In[3]:
get_ipython().magic('matplotlib inline')
import matplotlib.pyplot as plt
import iris
import warnings
import pytz
from datetime import datetime
from pandas import date_range
from matplotlib.dates import date2num
from utide import _ut_constants_fname
from utide.utilities import loadbunch
from utide.harmonics import FUV
# In[4]:
ncfile = ('http://geoport.whoi.edu/thredds/dodsC/usgs/vault0/models/tides/'
'vdatum_gulf_of_maine/adcirc54_38_orig.nc')
wl = -70.7234; el = -70.4532; sl = 41.4258; nl = 41.5643 # Vineyard sound 2.
# In[5]:
ncfile = ('http://geoport.whoi.edu/thredds/dodsC/usgs/vault0/models/tides/'
'FLsab_adcirc54.nc')
print(ncfile)
wl = -85.25; el = -84.75; sl = 29.58; nl = 29.83 # Apalachicola Bay
# In[6]:
ncfile = ('http://geoport.whoi.edu/thredds/dodsC/usgs/vault0/models/tides/'
'DEdelches01_adcirc54.nc')
print(ncfile)
wl = -74.537378; el = -74.0315462; sl = 39.354624; nl = 39.704567 # South Bay, NY
# In[7]:
ncfile = ('http://geoport.whoi.edu/thredds/dodsC/usgs/vault0/models/tides/'
'NYsndbght02_adcirc54.nc')
print(ncfile)
sl = 40.5457896; wl = -73.664; nl = 40.6990759; el = -73.3376574
# In[8]:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cubes = iris.load_raw(ncfile)
print(cubes)
# In[9]:
units = dict({'knots': 1.9438, 'm/s': 1.0})
consts = ['STEADY', 'M2', 'S2', 'N2', 'K1', 'O1', 'P1', 'M4', 'M6']
# In[10]:
start = datetime.strptime('18-Sep-2015 05:00',
'%d-%b-%Y %H:%M').replace(tzinfo=pytz.utc)
stop = datetime.strptime('19-Sep-2015 05:00', # '18-Sep-2015 18:00'
'%d-%b-%Y %H:%M').replace(tzinfo=pytz.utc)
dt = 1.0 # Hours.
glocals = date_range(start, stop, freq='1H').to_pydatetime()
ntimes = len(glocals)
# In[11]:
def parse_string(name):
lista = [e.decode().strip() for e in name.tolist()]
return ''.join(lista)
# In[12]:
names = []
data = cubes.extract_strict('Tide Constituent').data
# In[13]:
for name in data:
names.append(parse_string(name))
# In[14]:
#from scipy.spatial import Delaunay
depth = cubes.extract_strict('depth').data
latf = cubes.extract_strict('latitude').data
lonf = cubes.extract_strict('longitude').data
frequency = cubes.extract_strict('Tide Frequency').data
# Not sure why this is not working.
# trif = cubes.extract_strict('Horizontal Element Incidence List').data
#trif = Delaunay(zip(lonf, latf)).vertices
# In[15]:
# Find indices in box.
import numpy as np
inbox = np.logical_and(np.logical_and(lonf >= wl,
lonf <= el),
np.logical_and(latf >= sl,
latf <= nl))
lon = lonf[inbox]
lat = latf[inbox]
# In[16]:
lon.shape
# In[17]:
con_info = loadbunch(_ut_constants_fname)['const']
# In[18]:
# Find the indices of the tidal constituents.
k = 0
ind_nc, ind_ttide = [], []
const_name = [e.strip() for e in con_info['name'].tolist()]
for name in consts:
try:
if name == 'STEADY':
indx = const_name.index('Z0')
else:
indx = const_name.index(name)
k += 1
ind_ttide.append(indx)
ind_nc.append(names.index(name))
except ValueError:
pass # `const` not found.
# In[19]:
ua = cubes.extract_strict('Eastward Water Velocity Amplitude')
up = cubes.extract_strict('Eastward Water Velocity Phase')
va = cubes.extract_strict('Northward Water Velocity Amplitude')
vp = cubes.extract_strict('Northward Water Velocity Phase')
# In[33]:
ua.coord_system
# In[21]:
uamp = ua.data[0, inbox, :][:, ind_nc]
vamp = va.data[0, inbox, :][:, ind_nc]
upha = up.data[0, inbox, :][:, ind_nc]
vpha = vp.data[0, inbox, :][:, ind_nc]
# In[22]:
ind_nc
# In[23]:
freq_nc = frequency[ind_nc]
# In[24]:
print(uamp.shape)
print(freq_nc.shape)
# In[25]:
freq_ttide = con_info['freq'][ind_ttide]
# In[26]:
t_tide_names = np.array(const_name)[ind_ttide]
# In[27]:
omega_ttide = 2*np.pi * freq_ttide # Convert from radians/s to radians/hour.
omega = freq_nc * 3600
rllat = 55 # Reference latitude for 3rd order satellites (degrees) (55 is fine always)
# In[28]:
# Convert to Matlab datenum.
# (Soon UTide will take python datetime objects.)
jd_start = date2num(start) + 366.1667
# In[29]:
# NB: I am not a 100% sure if this is identical to what we had with t_tide.
# ngflgs -> [NodsatLint NodsatNone GwchLint GwchNone]
v, u, f = FUV(t=np.array([jd_start]), tref=np.array([0]),
lind=np.array([ind_ttide]),
lat=55, ngflgs=[0, 0, 0, 0])
# In[30]:
# Convert phase in radians.
v, u, f = map(np.squeeze, (v, u, f))
v = v * 2 * np.pi
u = u * 2 * np.pi
thours = np.array([d.total_seconds() for d in
(glocals - glocals[0])]) / 60 / 60.
# In[62]:
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
tiler = cimgt.OSM()
fig, ax = plt.subplots(figsize=(9, 9),
subplot_kw=dict(projection=ccrs.PlateCarree()))
#ax.coastlines(resolution='10m')
ax.set_extent([wl, el, sl, nl])
ax.add_image(tiler, 10)
k=0
U = (f * uamp * np.cos(v + thours[k] * omega + u - upha * np.pi/180)).sum(axis=1)
V = (f * vamp * np.cos(v + thours[k] * omega + u - vpha * np.pi/180)).sum(axis=1)
w = units['knots'] * (U + 1j * V)
wf = np.NaN * np.ones_like(lonf, dtype=w.dtype)
wf[inbox] = w
# FIXME: Cannot use masked arrays and tricontour!
# wf = ma.masked_invalid(wf)
# cs = ax.tricontour(lonf, latf, trif, np.abs(wf).filled(fill_value=0))
# fig.colorbar(cs)
#ut, vt = tiler.crs.transform_vectors(ccrs.PlateCarree(), lon,lat,U,V)
subsample = 3
ind = list(range(len(lon)))
np.random.shuffle(ind)
Nvec = int(len(ind) / subsample)
idv = ind[:Nvec]
q = plt.quiver(lon[idv], lat[idv], U[idv], V[idv], scale=10, transform=ccrs.PlateCarree())
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=2, color='gray', alpha=0.5, linestyle='--')
#plt.axis([wl, el, sl, nl]) # Vineyard sound 2.
#q.set_title('{}'.format(glocals[k]))
from JSAnimation import IPython_display
from matplotlib.animation import FuncAnimation
def update_figure(k):
global ax, fig
ax.cla()
U = (f * uamp * np.cos(v + thours[k] * omega + u - upha * np.pi/180)).sum(axis=1)
V = (f * vamp * np.cos(v + thours[k] * omega + u - vpha * np.pi/180)).sum(axis=1)
w = units['knots'] * (U + 1j * V)
wf = np.NaN * np.ones_like(lonf, dtype=w.dtype)
wf[inbox] = w
# FIXME: Cannot use masked arrays and tricontour!
# wf = ma.masked_invalid(wf)
# cs = ax.tricontour(lonf, latf, trif, np.abs(wf).filled(fill_value=0))
# fig.colorbar(cs)
q = ax.quiver(lon, lat, U, V, scale=40)
ax.axis(bbox) # Vineyard sound 2.
ax.set_title('{}'.format(glocals[k]))
fig, ax = plt.subplots(figsize=(7, 5))
FuncAnimation(fig, update_figure, interval=100, frames=ntimes)
# In[ ]:
|
rsignell-usgs/tri_tide_movie
|
python/tri_tide_movie.py
|
Python
|
cc0-1.0
| 6,977
|
[
"NetCDF"
] |
6b9490b3e125b8e86251ce3048c84c19af3ba8728e21c707bac925f5c17b24a8
|
'''
Created on Jan 9, 2017
This module contains meta data and access functions for normals and monthly historical time-series data from the
Canadian Forest Service (Natural Resources Canada, NRCan)
@author: Andre R. Erler, GPL v3
'''
# external imports
import numpy as np
import numpy.ma as ma
import os
# internal imports
from geodata.base import Variable, Axis
from geodata.gdal import GridDefinition, addGDALtoVar
from datasets.common import getRootFolder, loadObservations, transformMonthly, addLengthAndNamesOfMonth, monthlyTransform, addLandMask
from geodata.misc import DatasetError, VariableError, AxisError
from utils.nctools import writeNetCDF
from datasets.misc import loadXRDataset
## NRCan Meta-data
dataset_name = 'NRCan'
root_folder = getRootFolder(dataset_name=dataset_name) # get dataset root folder based on environment variables
# NRCan grid definitions
# make GridDefinition instances
geotransform_NA12 = (-168.0, 1./12., 0.0, 25.0, 0.0, 1./12.); size_NA12 = (1392, 720) # (x,y) map size of NRCan grid
NRCan_NA12_grid = GridDefinition(name=dataset_name, projection=None, geotransform=geotransform_NA12, size=size_NA12)
geotransform_NA60 = (-168.0, 1./60., 0.0, 25.0, 0.0, 1./60.); size_NA60 = (6960, 3600) # (x,y) map size of NRCan grid
NRCan_NA60_grid = GridDefinition(name=dataset_name, projection=None, geotransform=geotransform_NA60, size=size_NA60)
geotransform_CA12 = (-141.0, 1./12., 0.0, 41.0, 0.0, 1./12.); size_CA12 = (1068, 510) # (x,y) map size of NRCan grid
NRCan_CA12_grid = GridDefinition(name=dataset_name, projection=None, geotransform=geotransform_CA12, size=size_CA12)
geotransform_CA24 = (-141.0, 1./24., 0.0, 41.0, 0.0, 1./24.); size_CA24 = (2136, 1008) # (x,y) map size of NRCan grid
NRCan_CA24_grid = GridDefinition(name=dataset_name, projection=None, geotransform=geotransform_CA24, size=size_CA24)
geotransform_SON60 = (-85.0, 1./60., 0.0, 41.0, 0.0, 1./60.); size_SON60 = (660, 360) # (x,y) map size of NRCan grid
NRCan_SON60_grid = GridDefinition(name=dataset_name, projection=None, geotransform=geotransform_SON60, size=size_SON60)
NRCan_grids = ['NA12','NA60','CA12','CA24','SON60']
# default grid (NA12)
NRCan_grid = NRCan_NA12_grid; geotransform = geotransform_NA12; size = size_NA12
# variable attributes and names (only applied to original time-series!)
varatts = dict(Tmax = dict(name='Tmax', units='K'), # 2m maximum temperature
Tmin = dict(name='Tmin', units='K'), # 2m minimum temperature
precip = dict(name='precip', units='kg/m^2/s'), # total precipitation
pet = dict(name='pet', units='kg/m^2/s'), # potential evapo-transpiration
liqprec = dict(name='liqprec', units='kg/m^2/s'), # total precipitation
snowh = dict(name='snowh', units='m'), # snow depth
SWD = dict(name='SWDNB', units='W/m^2', scalefactor=30.4e6), # solar radiation, corrected (MJ/day->J/month)
SWDNB = dict(name='SWDNB', units='W/m^2'), # solar radiation
# diagnostic variables
T2 = dict(name='T2', units='K'), # 2m average temperature
solprec = dict(name='solprec', units='kg/m^2/s'), # total precipitation
snow = dict(name='snow', units='kg/m^2'), # snow water equivalent
snwmlt = dict(name='snwmlt', units='kg/m^2/s'), # snow melt (rate)
snow_acc = dict(name='snow_acc', units='kg/m^2/s'), # rat of change of snowpack - in lieu of actual snowmelt
liqwatflx = dict(name='liqwatflx', units='kg/m^2/s'), # liquid water forcing (rate)
landmask = dict(name='landmask', units='N/A'), # the land mask...
# axes (don't have their own file; listed in axes)
time = dict(name='time', units='month',), # time coordinate
# N.B.: the time-series time offset has to be chosen such that 1979 begins with the origin (time=0)
lon = dict(name='lon', units='deg E'), # geographic longitude field
lat = dict(name='lat', units='deg N')) # geographic latitude field
tsvaratts = varatts.copy()
# list of variables to load
varlist = list(varatts.keys()) # also includes coordinate fields
# variable and file lists settings
nofile = ('T2','solprec','lat','lon','time') # variables that don't have their own files
## Functions to load different types of NRCan datasets
def checkGridRes(grid, resolution, snow_density=None, period=None, lclim=False):
''' helper function to verify grid/resoluton selection '''
# prepare input
if grid is not None and grid.upper() in NRCan_grids:
resolution = grid.lower()
grid = None
if resolution is None: resolution = 'na12' # default
if not isinstance(resolution, str): raise TypeError(resolution)
# figure out clim/TS
if period is not None: lclim=True
# check for valid resolution
if lclim and resolution not in LTM_grids and resolution.upper() not in LTM_grids:
raise DatasetError("Selected resolution '{:s}' is not available for long-term means!".format(resolution))
if not lclim and resolution not in TS_grids and resolution.upper() not in TS_grids:
raise DatasetError("Selected resolution '{:s}' is not available for historical time-series!".format(resolution))
# handle special case of snow density parameter: append to resolution
if snow_density:
# check validity (just to raise error if invalid)
tmp = getSnowDensity(snow_class=snow_density, lraise=True); del tmp
# append to resolution
if resolution is None: resolution = snow_density
else: resolution = resolution + '_' + snow_density
# return
return grid, resolution
# pre-processed climatology and timeseries files (varatts etc. should not be necessary)
clim_period = (1970,2000) # default time period for long-term means
#clim_period = (1980,2010) # default time period for long-term means
avgfolder = root_folder + 'nrcanavg/'
avgfile = 'nrcan{0:s}_clim{1:s}.nc' # the filename needs to be extended by %('_'+resolution,'_'+period)
tsfile = 'nrcan{0:s}_monthly.nc' # extend with grid type only
# daily data
daily_folder = root_folder + dataset_name.lower()+'_daily/'
netcdf_filename = dataset_name.lower()+'_{RES:s}_{VAR:s}_daily.nc' # extend with variable name
netcdf_dtype = np.dtype('<f4') # little-endian 32-bit float
netcdf_settings = dict(chunksizes=(8,256,256))
def loadNRCan_Daily(varname=None, varlist=None, grid=None, resolution=None, shape=None, station=None,
resampling=None, varatts=None, varmap=None, lgeoref=True, geoargs=None,
chunks=True, multi_chunks=None, lxarray=True, lgeospatial=True, **kwargs):
''' function to load daily NRCan data from NetCDF-4 files using xarray and add some projection information '''
if not ( lxarray and lgeospatial ):
raise NotImplementedError("Only loading via geospatial.xarray_tools is currently implemented.")
if resolution is None:
if grid and grid[:3] in ('son','snw',): resolution = 'SON60'
else: resolution = 'CA12' # default
default_varlist = res_varlists.get(resolution, None)
xds = loadXRDataset(varname=varname, varlist=varlist, dataset='NRCan', grid=grid, resolution=resolution, shape=shape,
station=station, default_varlist=default_varlist, resampling=resampling, varatts=varatts, varmap=varmap,
lgeoref=lgeoref, geoargs=geoargs, chunks=chunks, multi_chunks=multi_chunks, **kwargs)
return xds
# function to load these files...
def loadNRCan(name=dataset_name, title=dataset_name, resolution=None, period=clim_period, grid=None, varlist=None,
snow_density=None, varatts=None, folder=avgfolder, filelist=None, lautoregrid=False, filemode='r'):
''' Get the pre-processed monthly NRCan climatology as a DatasetNetCDF. '''
grid, resolution = checkGridRes(grid, resolution, snow_density=snow_density, period=period, lclim=True)
# load standardized climatology dataset with NRCan-specific parameters
dataset = loadObservations(name=name, title=title, folder=folder, projection=None, resolution=resolution, period=period,
grid=grid, varlist=varlist, varatts=varatts, filepattern=avgfile, griddef=NRCan_NA12_grid,
filelist=filelist, lautoregrid=lautoregrid, mode='climatology', filemode=filemode)
# return formatted dataset
return dataset
# function to load Time-series (monthly)
def loadNRCan_TS(name=dataset_name, title=dataset_name, grid=None, resolution=None, varlist=None, varatts=None,
snow_density=None, folder=avgfolder, filelist=None, lautoregrid=False, filemode='r'):
''' Get the pre-processed monthly NRCan time-series as a DatasetNetCDF at station locations. '''
grid, resolution = checkGridRes(grid, resolution, snow_density=snow_density, period=None, lclim=False)
# load standardized time-series dataset with NRCan-specific parameters
dataset = loadObservations(name=name, title=title, folder=folder, projection=None, period=None, grid=grid,
varlist=varlist, varatts=varatts, filepattern=tsfile, filelist=filelist,
resolution=resolution, lautoregrid=False, mode='time-series', filemode=filemode)
# return formatted dataset
return dataset
# function to load station climatologies
def loadNRCan_Stn(name=dataset_name, title=dataset_name, period=clim_period, station=None, resolution=None, varlist=None,
snow_density=None, varatts=None, folder=avgfolder, filelist=None):
''' Get the pre-processed monthly NRCan climatology as a DatasetNetCDF at station locations. '''
grid, resolution = checkGridRes(None, resolution, snow_density=snow_density, period=period, lclim=True); del grid
# load standardized climatology dataset with NRCan-specific parameters
dataset = loadObservations(name=name, title=title, folder=folder, projection=None, period=period, station=station,
varlist=varlist, varatts=varatts, filepattern=avgfile, filelist=filelist,
resolution=resolution, lautoregrid=False, mode='climatology')
# return formatted dataset
return dataset
# function to load station time-series
def loadNRCan_StnTS(name=dataset_name, title=dataset_name, station=None, resolution=None, varlist=None, varatts=None,
snow_density=None, folder=avgfolder, filelist=None):
''' Get the pre-processed monthly NRCan time-series as a DatasetNetCDF at station locations. '''
grid, resolution = checkGridRes(None, resolution, snow_density=snow_density, period=None, lclim=False); del grid
# load standardized time-series dataset with NRCan-specific parameters
dataset = loadObservations(name=name, title=title, folder=folder, projection=None, period=None, station=station,
varlist=varlist, varatts=varatts, filepattern=tsfile, filelist=filelist,
resolution=resolution, lautoregrid=False, mode='time-series')
# return formatted dataset
return dataset
# function to load regionally averaged climatologies
def loadNRCan_Shp(name=dataset_name, title=dataset_name, period=clim_period, shape=None, resolution=None, varlist=None,
snow_density=None, varatts=None, folder=avgfolder, filelist=None, lencl=False):
''' Get the pre-processed monthly NRCan climatology as a DatasetNetCDF averaged over regions. '''
grid, resolution = checkGridRes(None, resolution, snow_density=snow_density, period=period, lclim=True); del grid
# load standardized climatology dataset with NRCan-specific parameters
dataset = loadObservations(name=name, title=title, folder=folder, projection=None, period=period, shape=shape,
lencl=lencl, station=None, varlist=varlist, varatts=varatts, filepattern=avgfile,
filelist=filelist, resolution=resolution, lautoregrid=False, mode='climatology')
# return formatted dataset
return dataset
# function to load regional/shape time-series
def loadNRCan_ShpTS(name=dataset_name, title=dataset_name, shape=None, resolution=None, varlist=None, varatts=None,
snow_density=None, folder=avgfolder, filelist=None, lencl=False):
''' Get the pre-processed monthly NRCan time-series as a DatasetNetCDF averaged over regions. '''
grid, resolution = checkGridRes(None, resolution, snow_density=snow_density, period=None, lclim=False); del grid
# load standardized time-series dataset with NRCan-specific parameters
dataset = loadObservations(name=name, title=title, folder=folder, projection=None, shape=shape, station=None,
lencl=lencl, varlist=varlist, varatts=varatts, filepattern=tsfile, filelist=filelist,
resolution=resolution, lautoregrid=False, mode='time-series', period=None)
# return formatted dataset
return dataset
## snow density estimates
def getSnowDensity(snow_class, lraise=True):
''' '''
# estimates from the Canadian Meteorological Centre for maritime climates (Table 3):
# https://nsidc.org/data/NSIDC-0447/versions/1
# a factor of 1000 has been applied, because snow depth is in m (and not mm)
if snow_class.lower() == 'tundra':
# Tundra snow cover
density = np.asarray([0.2303, 0.2427, 0.2544, 0.2736, 0.3117, 0.3693, 0.3693, 0.3693, 0.2, 0.2, 0.2107, 0.2181], dtype=np.float32)*1000.
elif snow_class.lower() == 'taiga':
# Taiga snow cover
density = np.asarray([0.1931, 0.2059, 0.2218, 0.2632, 0.3190, 0.3934, 0.3934, 0.3934, 0.16, 0.16, 0.1769, 0.1798], dtype=np.float32)*1000.
elif snow_class.lower() == 'maritime':
# Maritime snow cover
density = np.asarray([0.2165, 0.2485, 0.2833, 0.332, 0.3963, 0.501, 0.501, 0.501, 0.16, 0.16, 0.1835, 0.1977], dtype=np.float32)*1000.
elif snow_class.lower() == 'ephemeral':
# Ephemeral snow cover
density = np.asarray([0.3168, 0.3373, 0.3643, 0.4046, 0.4586, 0.5098, 0.5098, 0.5098, 0.25, 0.25, 0.3, 0.3351], dtype=np.float32)*1000.
elif snow_class.lower() == 'prairies':
# Prairie snow cover
density = np.asarray([0.2137, 0.2416, 0.2610, 0.308, 0.3981, 0.4645, 0.4645, 0.4645, 0.14, 0.14, 0.1616, 0.1851], dtype=np.float32)*1000.
elif snow_class.lower() == 'alpine':
# Alpine snow cover
density = np.asarray([0.2072, 0.2415, 0.2635, 0.312, 0.3996, 0.4889, 0.4889, 0.4889, 0.16, 0.16, 0.172, 0.1816], dtype=np.float32)*1000.
elif lraise:
raise ValueError("Value '{}' for snow denisty class not defined.".format(snow_class))
return density
## functions to load ASCII data and generate complete GeoPy datasets
# a universal load function for normals and historical timeseries; also computes some derived variables, and combines NA and CA grids
def loadASCII_TS(name=None, title=None, atts=None, derived_vars=None, varatts=None, NA_grid=None, CA_grid=None, lskipNA=False,
merged_axis=None, time_axis='time', resolution=None, grid_defs=None, period=None, var_pattern=None,
snow_density='maritime', grid_pattern=None, vardefs=None, axdefs=None, lfeedback=True):
''' load NRCan time-series data from ASCII files, merge CA and NA grids and compute some additional variables; return Dataset '''
from utils.ascii import rasterDataset
# determine grids / resolution
if grid_defs is None:
grid_defs = grid_def # define in API; register for all pre-defined grids
if resolution is not None:
resolution = str(resolution)
NA_grid = 'NA{:s}'.format(resolution) if NA_grid is None else NA_grid.upper()
CA_grid = 'CA{:s}'.format(resolution) if CA_grid is None else CA_grid.upper()
# seperate variables
NA_vardefs = dict(); CA_vardefs = dict()
for key,var in list(vardefs.items()):
var = var.copy(); grid = var.pop('grid',None).upper()
if grid.upper() not in grid_defs:
# skip variable
print("Warning: grid '{}' for variable '{}'('{}') not found - variable will be skipped!".format(grid,key,var['name']))
elif grid.upper() == NA_grid: NA_vardefs[key] = var
elif grid.upper() == CA_grid: CA_vardefs[key] = var
else: raise VariableError(grid)
# determine period extension
prdstr = '_{0:04d}-{1:04d}'.format(period[0]+1, period[1]) if period is not None else ''
# load NA grid
if NA_vardefs:
dataset = rasterDataset(name=name, title=title, vardefs=NA_vardefs, axdefs=axdefs, atts=atts, projection=None,
griddef=grid_defs[NA_grid], lgzip=None, lgdal=True, lmask=True, fillValue=None,
lskipMissing=True, lgeolocator=True, time_axis=time_axis, lfeedback=lfeedback,
file_pattern=grid_pattern.format(GRID=NA_grid,PRDSTR=prdstr)+var_pattern )
else:
if lskipNA:
dataset = None
else:
raise NotImplementedError("North America grid '{}' not defined; could either skip or construct from pickle.".format(NA_grid))
# load CA grid
if CA_vardefs:
ca_ds = rasterDataset(name=name, title=title, vardefs=CA_vardefs, axdefs=axdefs, atts=atts, projection=None,
griddef=grid_defs[CA_grid], lgzip=None, lgdal=True, lmask=True, fillValue=None,
lskipMissing=True, lgeolocator=False, time_axis=time_axis, lfeedback=lfeedback,
file_pattern=grid_pattern.format(GRID=CA_grid,PRDSTR=prdstr)+var_pattern )
if dataset is None:
dataset = ca_ds
else:
# merge grids
naaxes = dataset.axes
nagt = dataset.geotransform; cagt = ca_ds.geotransform
assert nagt[2] == nagt[4] == cagt[2] == cagt[4] == 0
assert nagt[1] == cagt[1] and nagt[5] == cagt[5]
ios = int( ( cagt[0] - nagt[0] ) / nagt[1] )
jos = int( ( cagt[3] - nagt[3] ) / nagt[5] )
nashp = dataset.mapSize # mapSize has the correct axis order (y,x)
caje,caie = ca_ds.mapSize # axis order is (y,x)
# create new variables
for key,var in list(ca_ds.variables.items()):
# create new data array
assert var.shape[-2:] == (caje,caie)
data = np.ma.empty(var.shape[:-2]+nashp, dtype=var.dtype) # use the shape of the NA grid and other axes from the original
data[:] = np.ma.masked # everything that is not explicitly assigned, shall be masked
data[...,jos:jos+caje,ios:ios+caie] = var.data_array # assign partial data
# figure out axes and create Variable
axes = [naaxes[ax.name] for ax in var.axes]
newvar = Variable(name=key, units=var.units, axes=axes, data=data, atts=var.atts, plot=var.plot)
newvar = addGDALtoVar(newvar, griddef=dataset.griddef,)
dataset.addVariable(newvar, copy=False)
else:
pass # can be skipped - Canada doesn't matter ;-)
# snow needs some special care: replace mask with mask from rain and set the rest to zero
if 'snowh' in dataset:
assert 'liqprec' in dataset
assert dataset.snowh.shape == dataset.liqprec.shape, dataset
snwd = ma.masked_where(condition=dataset.liqprec.data_array.mask, a=dataset.snowh.data_array.filled(0), copy=False)
dataset.snowh.data_array = snwd # reassingment is necessary, because filled() creates a copy
dataset.snowh.fillValue = dataset.liqprec.fillValue
assert np.all( dataset.snowh.data_array.mask == dataset.liqprec.data_array.mask ), dataset.snowh.data_array
assert dataset.snowh.fillValue == dataset.liqprec.fillValue, dataset.snowh.data_array
# merge time axes (for historical timeseries)
if merged_axis:
if merged_axis.name == 'time':
if not 'merged_axes' in merged_axis.atts:
raise AxisError('No list/tuple of merge_axes specified in merged_axis atts!')
merge_axes = merged_axis.atts['merged_axes']
dataset = dataset.mergeAxes(axes=merge_axes, new_axis=merged_axis, axatts=None, asVar=True, linplace=True,
lcheckAxis=False, lcheckVar=None, lvarall=True, ldsall=True, lstrict=True)
# compute some secondary/derived variables
if derived_vars:
for var in derived_vars:
# don't overwrite existing variables
if var in dataset: raise DatasetError(var)
# 2m Temperature as mean of diurnal min/max temperature
if var == 'T2':
if not ( 'Tmin' in dataset and 'Tmax' in dataset ): # check prerequisites
raise VariableError("Prerequisites for '{:s}' not found.\n{}".format(var,dataset))
# compute values and add to dataset
dataset[var] = ( dataset.Tmax + dataset.Tmin ) / 2. # simple average
# Solid Precipitation (snow) as difference of total and liquid precipitation (rain)
elif var == 'solprec':
if not ( 'precip' in dataset and 'liqprec' in dataset ): # check prerequisites
raise VariableError("Prerequisites for '{:s}' not found.\n{}".format(var,dataset))
# compute values and add to dataset
newvar = dataset.precip - dataset.liqprec # simple difference
newvar.data_array.clip(min=0, out=newvar.data_array) # clip values smaller than zero (in-place)
dataset[var] = newvar
# Snowmelt as residual of snow fall and accumulation changes
elif var == 'snow':
if not 'snowh' in dataset: # check prerequisites
raise VariableError("Prerequisites for '{:s}' not found.\n{}".format(var,dataset))
# before we can compute anything, we need estimates of snow density from a seasonal climatology
density = getSnowDensity(snow_density)
density_note = "Snow density extimates from CMC for {:s} snow cover (Tab. 3): https://nsidc.org/data/NSIDC-0447/versions/1#title15".format(snow_density.title())
# compute values and add to dataset
newvar = monthlyTransform(var=dataset.snowh.copy(deepcopy=True), lvar=True, linplace=True, scalefactor=density)
newvar.atts['long_name'] = 'Snow Water Equivalent at the end of the month.'
newvar.atts['note'] = density_note
dataset[var] = newvar
# Snowmelt as residual of snow fall and snow accumulation (water equivalent) changes
elif var == 'snwmlt':
if not ( 'solprec' in dataset and 'snow' in dataset ): # check prerequisites
raise VariableError("Prerequisites for '{:s}' not found.\n{}".format(var,dataset))
snow = dataset.snow; tax = snow.axes[0]; swe = snow.data_array
if tax.name != 'time' and len(tax) == 12:
raise NotImplementedError("Computing differences is currently only implemented for climatologies.")
# compute central differences
delta = ma.diff(swe, axis=0); dd = ( swe[0,:] - swe[-1,:] ).reshape((1,)+swe.shape[1:])
assert dd.ndim == swe.ndim
assert np.all( dd.mask[0,:] == swe.mask[0,:] ), dd
#data = -1 * ( ma.concatenate((dd,delta), axis=0) + ma.concatenate((delta,dd), axis=0) ) / 2.
data = -1 * ma.concatenate((dd,delta), axis=0)
# N.B.: snow values are already at the end of the month, so differences are average snowmelt over the month
# create snowmelt variable and do some conversions
newvar = addGDALtoVar(Variable(data=data, axes=snow.axes, name=var, units='kg/m^2/month'), griddef=dataset.griddef)
newvar = transformMonthly(var=newvar, slc=None, l365=False, lvar=True, linplace=True)
newvar += dataset.solprec # add that in-place as well, but after transforming monthly SWE change to SI rate
newvar.data_array.clip(min=0, out=newvar.data_array) # clip values smaller than zero (in-place)
newvar.atts['note'] = density_note
dataset[var] = newvar
## normalize snowmelt so that it does not exceed snow fall
r = dataset.snwmlt.mean(axis=0,keepdims=True,asVar=False)/dataset.solprec.mean(axis=0,keepdims=True,asVar=False)
rm = r.mean()
print(("\nSnowmelt to snowfall ratio: {}\n".format(rm)))
if rm > 1:
#r0 = dataset.snwmlt.mean(axis=0,keepdims=True,asVar=False)/dataset.solprec.mean(axis=0,keepdims=True,asVar=False)
dataset.snwmlt.data_array /= r # normalize to total snow fall annually and grid point-wise
assert np.ma.allclose(dataset.snwmlt.mean(axis=0,asVar=False), dataset.solprec.mean(axis=0,asVar=False)), dataset.snwmlt.mean()/dataset.solprec.mean()
# add snow ratio as diagnostic
atts = dict(name='ratio', units='', long_name='Ratio of Snowfall to Snowmelt')
dataset += addGDALtoVar(Variable(data=r.squeeze(), axes=snow.axes[1:], atts=atts), griddef=dataset.griddef)
elif var == 'liqwatflx':
# surface water forcing (not including ET)
if not ( 'liqprec' in dataset and 'snwmlt' in dataset ): # check prerequisites
raise VariableError("Prerequisites for '{:s}' not found.\n{}".format(var,dataset))
# create variable and compute data
assert dataset.liqprec.units == 'kg/m^2/s', dataset.liqprec.units
assert dataset.snwmlt.units == 'kg/m^2/s', dataset.snwmlt.units
data = dataset.liqprec[:] + dataset.snwmlt[:]
newvar = addGDALtoVar(Variable(data=data, axes=dataset.liqprec.axes, name=var, units='kg/m^2/s'), griddef=dataset.griddef)
newvar.data_array.clip(min=0, out=newvar.data_array) # clip values smaller than zero (in-place)
newvar.atts['note'] = density_note
dataset[var] = newvar
else: raise VariableError(var)
# for completeness, add attributes
dataset[var].atts.update(varatts[var])
dataset[var].data_array._fill_value = dataset[var].fillValue
# add length and names of month
if dataset.hasAxis('time') and len(dataset.time) == 12:
addLengthAndNamesOfMonth(dataset) # basically only works for climatologies
addLandMask(dataset, varname='precip', maskname='landmask', atts=None)
# return properly formatted dataset
return dataset
## Normals (long-term means): ASCII data specifications
# monthly normals at 1/12 degree resolution (~10 km)
norm12_period = (1970,2000)
norm12_defaults = dict(axes=('time',None,None), dtype=np.float32)
norm12_vardefs = dict(maxt = dict(grid='NA12', name='Tmax', units='K', offset=273.15, **norm12_defaults), # 2m maximum temperature, originally in degrees Celsius
mint = dict(grid='NA12', name='Tmin', units='K', offset=273.15, **norm12_defaults), # 2m minimum temperature
pcp = dict(grid='NA12', name='precip', units='kg/m^2/month', transform=transformMonthly, **norm12_defaults), # total precipitation
pet = dict(grid='NA12', name='pet', units='kg/m^2/month', transform=transformMonthly, **norm12_defaults), # potential evapo-transpiration
rrad = dict(grid='NA12', name='SWDNB', units='W/m^2', scalefactor=1e6/86400., **norm12_defaults), # solar radiation, originally in MJ/m^2/day
rain = dict(grid='CA12', name='liqprec', units='kg/m^2/month', transform=transformMonthly, **norm12_defaults), # total precipitation
snwd = dict(grid='CA12', name='snowh', units='m', scalefactor=1./100., **norm12_defaults), ) # snow depth
norm12_axdefs = dict(time = dict(name='time', units='month', coord=np.arange(1,13)),) # time coordinate
norm12_derived = ('T2','solprec','snow','snwmlt','liqwatflx')
norm12_grid_pattern = root_folder+'{GRID:s}_normals{PRDSTR:s}/' # dataset root folder
norm12_var_pattern = '{VAR:s}/{VAR:s}_{time:02d}.asc.gz' # path to variables
norm12_title = 'NRCan Gridded Normals'
def loadASCII_Normals(name=dataset_name, title=norm12_title, atts=None, derived_vars=norm12_derived, varatts=varatts,
NA_grid=None, CA_grid=None, resolution=12, grid_defs=None, period=norm12_period, snow_density='maritime',
var_pattern=norm12_var_pattern, grid_pattern=norm12_grid_pattern, vardefs=norm12_vardefs, axdefs=norm12_axdefs):
''' load NRCan normals from ASCII files, merge CA and NA grids and compute some additional variables; return Dataset '''
return loadASCII_TS(name=name, title=title, atts=atts, derived_vars=derived_vars, varatts=varatts, snow_density=snow_density,
NA_grid=NA_grid, CA_grid=CA_grid, merged_axis=None, resolution=resolution, grid_defs=grid_defs,
period=period, var_pattern=var_pattern, grid_pattern=grid_pattern, vardefs=vardefs, axdefs=axdefs)
## Historical time-series: ASCII data specifications
# monthly transient at 1/12 degree resolution (~10 km)
# hist_period = (1866,2013) # precip and min/max T only
mons12_period = (1950,2010) # with rain, and snow from 1958 - 2010
mons12_defaults = dict(axes=('year','month',None,None), dtype=np.float32)
mons12_vardefs = dict(maxt = dict(grid='NA12', name='Tmax', units='K', offset=273.15, **mons12_defaults), # 2m maximum temperature, originally in degrees Celsius
mint = dict(grid='NA12', name='Tmin', units='K', offset=273.15, **mons12_defaults), # 2m minimum temperature
pcp = dict(grid='NA12', name='precip', units='kg/m^2/month', transform=transformMonthly, **mons12_defaults), # total precipitation
rain = dict(grid='CA12', name='liqprec', units='kg/m^2/month', transform=transformMonthly, **mons12_defaults), # total precipitation
snwd = dict(grid='CA12', name='snowh', units='m', scalefactor=1./100., **mons12_defaults), ) # snow depth
mons12_axdefs = dict(year = dict(name='year', units='year', coord=None), # yearly coordinate; select coordinate based on period
month = dict(name='month', units='month', coord=np.arange(1,13)),) # monthly coordinate
# define merged time axis
mons12_matts = dict(name='time', units='month', long_name='Months since 1979-01', merged_axes = ('year','month'))
# N.B.: the time-series time offset has to be chose such that 1979 begins with the origin (time=0)
mons12_derived = norm12_derived # same as for normals
mons12_grid_pattern = root_folder+'{GRID:s}_hist/'
mons12_var_pattern = '{VAR:s}/{year:04d}/{VAR:s}_{month:02d}.asc.gz'
mons12_title = 'NRCan Historical Gridded Time-series'
# monthly transient at 1/60 degree resolution (~2 km)
mons60_period = (2011,2018) # SnoDAS period for southern Ontario
mons60_defaults = mons12_defaults
mons60_vardefs = dict(maxt = dict(grid='NA60', name='Tmax', units='K', offset=273.15, **mons60_defaults), # 2m maximum temperature, originally in degrees Celsius
mint = dict(grid='NA60', name='Tmin', units='K', offset=273.15, **mons60_defaults), # 2m minimum temperature
pcp = dict(grid='NA60', name='precip', units='kg/m^2/month', transform=transformMonthly, **mons60_defaults),) # total precipitation
# define original split and merged time axes
mons60_axdefs = mons12_axdefs; mons60_matts = mons12_matts
# N.B.: the time-series time offset has to be chose such that 1979 begins with the origin (time=0)
mons60_derived = ('T2',) # no snow or rain yet
mons60_grid_pattern = root_folder+'{GRID:s}_mons/'
mons60_var_pattern = '{VAR:s}/{year:04d}/{VAR:s}60_{month:02d}.asc.gz'
mons60_title = 'NRCan Historical Gridded Time-series'
def loadASCII_Hist(name=dataset_name, title=mons12_title, atts=None, derived_vars=mons12_derived, varatts=varatts, snow_density='maritime',
NA_grid=None, CA_grid=None, resolution=12, grid_defs=None, period=mons12_period, merged_axis=mons12_matts,
var_pattern=mons12_var_pattern, grid_pattern=mons12_grid_pattern, vardefs=mons12_vardefs, axdefs=mons12_axdefs):
''' load historical NRCan timeseries from ASCII files, merge CA and NA grids and compute some additional variables; return Dataset '''
# figure out time period for merged time axis
for axname,axdef in list(axdefs.items()):
if 'coord' not in axdef or axdef['coord'] is None:
assert axdef['units'].lower() == 'year', axdef
axdef['coord'] = np.arange(period[0],period[1]+1)
if merged_axis:
if isinstance(merged_axis,dict) and period:
merged_axis = Axis(coord=np.arange((period[0]-1979)*12,(period[1]-1978)*12), atts=merged_axis)
assert 'merged_axes' in merged_axis.atts
nlen = np.prod([len(mons12_axdefs[axname]['coord']) for axname in merged_axis.atts['merged_axes']])
assert len(merged_axis) == nlen, (nlen,merged_axis.prettyPrint(short=True))
elif not isinstance(merged_axis,Axis):
raise TypeError(merged_axis)
# load ASCII data
return loadASCII_TS(name=name, title=title, atts=atts, derived_vars=derived_vars, varatts=varatts, time_axis='month',
snow_density=snow_density,
NA_grid=NA_grid, CA_grid=CA_grid, merged_axis=merged_axis, resolution=resolution, grid_defs=grid_defs,
period=period, var_pattern=var_pattern, grid_pattern=grid_pattern, vardefs=vardefs, axdefs=axdefs)
# daily transient at 1/12 degree resolution
day12_period = (2011,2018) # SnoDAS period for southern Ontario
day12_defaults = dict(axes=('year','day',None,None), dtype=np.float32, fillValue=None)
day12_vardefs = dict(maxt = dict(grid='CA12', name='Tmax', units='K', offset=273.15, alt_name='max', **day12_defaults), # 2m maximum temperature, originally in degrees Celsius
mint = dict(grid='CA12', name='Tmin', units='K', offset=273.15, alt_name='min', **day12_defaults), # 2m minimum temperature
pcp = dict(grid='CA12', name='precip', units='kg/m^2/s', scalefactor=1./86400., **day12_defaults),) # total precipitation
day12_varlist = [atts['name'] for atts in day12_vardefs.values()]
# define original split and merged time axes
day12_axdefs = dict(time = dict(name='time', units='day', coord=np.arange(1,366)),) # time coordinate
day12_matts = dict(name='time', units='day', long_name='Days since 1979-01-01', merged_axes = ('year','day'))
# N.B.: the time-series time offset has to be chose such that 1979 begins with the origin (time=0)
day12_derived = ('T2',) # no snow or rain yet
day12_grid_pattern = root_folder+'{GRID:s}_Daily/'
day12_var_pattern = '{VAR:s}/{year:04d}/{VAR:s}{year:04d}_{day:d}.asc.gz'
day12_title = 'NRCan Daily Gridded Time-series'
# daily transient at 1/60 degree resolution
son60_period = (1997,2018) # SnoDAS period for southern Ontario
son60_defaults = dict(axes=('year','day',None,None), dtype=np.float32, fillValue=None)
son60_vardefs = dict(maxt = dict(grid='SON60', name='Tmax', units='K', offset=273.15, alt_name='max', **day12_defaults), # 2m maximum temperature, originally in degrees Celsius
mint = dict(grid='SON60', name='Tmin', units='K', offset=273.15, alt_name='min', **day12_defaults), # 2m minimum temperature
pcp = dict(grid='SON60', name='precip', units='kg/m^2/s', scalefactor=1./86400., **day12_defaults), # unadjusted total precipitation
pcp_adj = dict(grid='SON60', name='precip_adj', units='kg/m^2/s', scalefactor=1./86400.,
alt_name='pcp', **day12_defaults),) # adjusted total precipitation
son60_varlist = [atts['name'] for atts in son60_vardefs.values()]
# define original split and merged time axes
son60_axdefs = day12_axdefs; son60_matts = day12_matts
# N.B.: the time-series time offset has to be chose such that 1979 begins with the origin (time=0)
son60_derived = ('T2',) # no snow or rain yet
son60_grid_pattern = day12_grid_pattern; son60_var_pattern = day12_var_pattern
son60_title = 'NRCan Daily Gridded Time-series for Southern Ontario'
# default varlists for daily variables for different resolutions
added_variables = ['pet_hog','pet_har','pet_haa','pet_th']
res_varlists = dict(CA12 = list(day12_derived) + day12_varlist + added_variables,
SON60 = list(son60_derived) + son60_varlist + added_variables,
)
def loadASCII_Daily(name=dataset_name, title=day12_title, atts=None, derived_vars=day12_derived, varatts=varatts, snow_density='maritime',
NA_grid=None, CA_grid=None, resolution=12, grid_defs=None, period=day12_period, merged_axis=day12_matts,
var_pattern=day12_var_pattern, grid_pattern=day12_grid_pattern, vardefs=day12_vardefs, axdefs=day12_axdefs):
''' load historical NRCan timeseries from ASCII files, merge CA and NA grids and compute some additional variables; return Dataset '''
# figure out time period for merged time axis
for axname,axdef in list(axdefs.items()):
if 'coord' not in axdef or axdef['coord'] is None:
assert axdef['units'].lower() == 'year', axdef
axdef['coord'] = np.arange(period[0],period[1]+1)
if merged_axis:
if isinstance(merged_axis,dict) and period:
nlen = np.prod([len(axdefs[axname]['coord']) for axname in merged_axis['merged_axes']])
merged_axis = Axis(coord=np.arange(nlen), atts=merged_axis)
assert 'merged_axes' in merged_axis.atts
assert len(merged_axis) == nlen, (nlen,merged_axis.prettyPrint(short=True))
elif not isinstance(merged_axis,Axis):
raise TypeError(merged_axis)
# load ASCII data
return loadASCII_TS(name=name, title=title, atts=atts, derived_vars=derived_vars, varatts=varatts, time_axis='day',
snow_density=snow_density, lskipNA=True,
NA_grid=NA_grid, CA_grid=CA_grid, merged_axis=merged_axis, resolution=resolution, grid_defs=grid_defs,
period=period, var_pattern=var_pattern, grid_pattern=grid_pattern, vardefs=vardefs, axdefs=axdefs)
# Historical time-series
CMC_period = (1998,2015)
CMC_vardefs = dict(snowh = dict(grid='NA12', name='snowh', units='m', dtype=np.float32, scalefactor=0.01, # Snow depth, originally in cm
axes=('year','month',None,None),),) # this is the axes order in which the data are read
CMC_axdefs = dict(year = dict(name='year', units='year', coord=np.arange(CMC_period[0],CMC_period[1]+1)), # yearly coordinate
month = dict(name='month', units='month', coord=np.arange(1,13)),) # monthly coordinate - will be replaced
# N.B.: the time-series time offset has to be chose such that 1979 begins with the origin (time=0)
CMC_derived = ('snow','snow_acc',)
CMC_root = root_folder+'/CMC_hist/'
CMC_var_pattern = '{VAR:s}/ps_cmc_sdepth_analyses_{year:04d}_ascii/{year:04d}_{month:02d}_01.tif'
CMC_title = 'CMC Historical Gridded Snow Time-series'
# load normals (from different/unspecified periods... ), computer some derived variables, and combine NA and CA grids
def loadCMC_Hist(name='CMC', title=CMC_title, atts=None, derived_vars=CMC_derived, varatts=varatts,
grid='NA12', resolution=12, grid_defs=None, period=CMC_period, lcheck=True, mask=None,
lmergeTime=False, # merge the year and month "axes" into a single monthly time axis
snow_density=None,
var_pattern=CMC_var_pattern, data_root=CMC_root, vardefs=CMC_vardefs, axdefs=CMC_axdefs):
''' load CMC historical snow time-series from GeoTIFF files, merge with NRCan dataset and recompute snowmelt '''
from utils.ascii import rasterDataset
# determine grids / resolution
if grid_defs is None:
grid_defs = grid_def # define in API; register for all pre-defined grids
if resolution is not None:
resolution = str(resolution)
grid = 'NA{:s}'.format(resolution) if grid is None else grid.upper()
# update period
if period is not None: # this is mainly for testing
axdefs['year']['coord'] = np.arange(period[0],period[1]+1)
# load NA grid
dataset = rasterDataset(name=name, title=title, vardefs=vardefs, axdefs=axdefs, atts=atts, projection=None,
griddef=grid_defs[grid], lgzip=None, lgdal=True, lmask=False, fillValue=0, lskipMissing=True,
lgeolocator=False, file_pattern=data_root+var_pattern )
# merge year and month axes
dataset = dataset.mergeAxes(axes=list(axdefs.keys()), axatts=varatts['time'], linplace=True)
assert dataset.hasAxis('time'), dataset
assert dataset.time[0] == 0, dataset.time.coord
dataset.time.coord += 12 * ( axdefs['year']['coord'][0] - 1979 ) # set origin to Jan 1979! (convention)
dataset.time.atts['long_name'] = 'Month since 1979-01'
# apply mask
if mask:
if not isinstance(mask,Variable): raise TypeError(mask)
dataset.mask(mask=mask)
# shift snow values by one month, since these values are for the 1st of the month
snowh = dataset.snowh; tax = snowh.axisIndex('time'); tlen1 = snowh.shape[tax]-1
assert lcheck is False or ( snowh.masked and np.all( snowh.data_array.mask.take([0], axis=tax) ) ), snowh.data_array.mask.take([0], axis=tax).sum()
snowh.data_array = np.roll(snowh.data_array, -1, axis=tax) # there is no MA function, for some reason it works just fine...
assert lcheck is False or ( snowh.masked and np.all( snowh.data_array.mask.take([tlen1], axis=tax) ) ), snowh.data_array.mask.take([tlen1], axis=tax).sum()
assert 'long_name' not in snowh.atts, snowh.atts['long_name']
snowh.atts['long_name'] = "Snow Water Equivalent (end of month)"
# compute derived variables
for var in derived_vars:
if var == 'snow':
# compute snow water equivalent
# before we can compute anything, we need estimates of snow density from a seasonal climatology
density = getSnowDensity(snow_class=snow_density)
density_note = "Snow density estimates from CMC for {:s} snow cover (Tab. 3): https://nsidc.org/data/NSIDC-0447/versions/1#title15".format(snow_density.title())
# compute values and add to dataset
newvar = monthlyTransform(var=dataset.snowh.copy(deepcopy=True), scalefactor=density, lvar=True, linplace=True)
newvar.atts['long_name'] = 'Snow Water Equivalent at the end of the month.'
newvar.atts['note'] = density_note
elif var == 'snow_acc':
# compute snow accumulation
snow = dataset.snow; tax = snow.axisIndex('time'); data = snow[:]
delta = ma.empty_like(data)
assert tax == 0, snow
delta[1:,:] = ma.diff(data, axis=tax); delta[1,:] = ma.masked
# N.B.: the snow/SWE date has already been shifted to the end of the month
# create snow accumulation variable and divide by time
newvar = Variable(data=delta, axes=snow.axes, name=var, units='kg/m^2/month')
newvar = transformMonthly(var=newvar, slc=None, l365=False, lvar=True, linplace=True)
# general stuff for all variables
newvar = addGDALtoVar(newvar, griddef=dataset.griddef)
dataset[var] = newvar
# apply varatts
for varname,var in list(dataset.variables.items()):
var.atts.update(varatts[varname]) # update in-place
# N.B.: 'long_name' and 'note' are not in varatts, and 'snow_acc
# return dataset
return dataset
## Dataset API
dataset_name # dataset name
root_folder # root folder of the dataset
orig_file_pattern = norm12_grid_pattern+norm12_var_pattern # filename pattern: variable name and resolution
ts_file_pattern = tsfile # filename pattern: grid
clim_file_pattern = avgfile # filename pattern: variable name and resolution
data_folder = avgfolder # folder for user data
grid_def = {'NA12':NRCan_NA12_grid, 'NA60':NRCan_NA60_grid, 'CA12':NRCan_CA12_grid, 'CA24':NRCan_CA24_grid, 'SON60':NRCan_SON60_grid} # standardized grid dictionary
LTM_grids = ['NA12','CA12','CA24','SON60'] # grids that have long-term mean data
LTM_grids += ['na12_tundra','na12_taiga','na12_maritime','na12_ephemeral','na12_prairies','na12_alpine',] # some fake grids to accommodate different snow densities
TS_grids = ['NA12','NA60','CA12','SON60'] # grids that have time-series data
TS_grids += ['na60_'+var for var in varlist]
TS_grids += ['na12_tundra','na12_taiga','na12_maritime','na12_ephemeral','na12_prairies','na12_alpine',] # some fake grids to accommodate different snow densities
grid_res = {'NA12':1./12.,'NA60':1./60.,'CA12':1./12.,'CA24':1./24.,'SON60':1./60.} # no special name, since there is only one...
default_grid = NRCan_NA12_grid
# functions to access specific datasets
loadDailyTimeSeries = loadNRCan_Daily # daily time-series data
loadLongTermMean = loadNRCan # climatology provided by publisher
loadTimeSeries = loadNRCan_TS # time-series data
loadClimatology = loadNRCan # pre-processed, standardized climatology
loadStationClimatology = loadNRCan_Stn # climatologies without associated grid (e.g. stations)
loadStationTimeSeries = loadNRCan_StnTS # time-series without associated grid (e.g. stations)
loadShapeClimatology = loadNRCan_Shp # climatologies without associated grid (e.g. provinces or basins)
loadShapeTimeSeries = loadNRCan_ShpTS # time-series without associated grid (e.g. provinces or basins)
if __name__ == '__main__':
mode = 'test_daily'
# mode = 'test_climatology'
# mode = 'test_timeseries'
# mode = 'test_point_climatology'
# mode = 'test_point_timeseries'
# mode = 'convert_Normals'
# mode = 'convert_Historical'
# mode = 'convert_Daily'
# mode = 'convert_to_netcdf'
# mode = 'add_CMC'
# mode = 'test_CMC'
pntset = 'glbshp' # 'ecprecip'
# pntset = 'ecprecip'
# period
# period = (1970,2000)
period = (1980,2010)
# period = (2011,2019)
# snow density/type
# snow_density = 'ephemeral'
snow_density = 'maritime'
# snow_density = 'prairies'
# snow_density = 'taiga'
# snow_density = 'alpine'
res = None; grid = None
if mode == 'convert_to_netcdf':
from utils.ascii import convertRasterToNetCDF
from time import time
# parameters for daily ascii
# varlist = ['pcp',]
# varlist = ['pcp', 'maxt', 'mint'] # order of importance...
varlist = ['pcp',] # complete job
# varlist = day12_vardefs.keys()
vardefs = day12_vardefs
grid_res = 'CA12'
# varlist = ['pcp', 'maxt', 'mint', 'pcp_adj'] # order of importance...
# varlist = ['maxt', 'mint'] # recalculate
# vardefs = son60_vardefs
# grid_res = 'SON60'
griddef = grid_def[grid_res]
# parameters for rasters
start_date = '1950-01-01'; end_date = '2017-12-31'; sampling = 'D'; loverwrite = True
# start_date = '2015-12-01'; end_date = '2016-01-31'; sampling = 'D'; loverwrite = True
# start_date = '2011-01-01'; end_date = '2011-02-01'; sampling = 'D'; loverwrite = True
# start_date = '2011-01-01'; end_date = '2018-01-01'; sampling = 'D'; loverwrite = False
# start_date = '2000-01-01'; end_date = '2018-01-01'; sampling = 'D'; loverwrite = True
# start_date = '1997-01-01'; end_date = '2018-01-01'; sampling = 'D'; loverwrite = True
# start_date = '2016-01-01'; end_date = '2018-01-01'; sampling = 'D'; loverwrite = True
raster_folder = root_folder + grid_res+'_Daily/'
def raster_path_func(datetime, varname, **varatts):
''' determine path to appropriate raster for given datetime and variable'''
day = datetime.dayofyear; year = datetime.year
if not datetime.is_leap_year and day >= 60: day += 1
altname = varatts.get('alt_name',varname)
if varname in ('maxt','mint') and year in (2016,2017):
path = '{VAR:s}/{YEAR:04d}/{ALT:s}/{YEAR:04d}_{DAY:d}.asc.gz'.format(YEAR=year, VAR=varname, ALT=altname, DAY=day)
else:
path = '{VAR:s}/{YEAR:04d}/{ALT:s}{YEAR:04d}_{DAY:d}.asc.gz'.format(YEAR=year, VAR=varname, ALT=altname, DAY=day)
return path
# NetCDF definitions
ds_atts = dict(start_date=start_date, end_date=end_date, sampling=sampling)
# start operation
start = time()
## loop over variables (individual files)
for varname in varlist:
print("\n *** Reading rasters for variable '{}' ('{}') *** \n".format(varname,vardefs[varname]['name']))
nc_name = vardefs[varname]['name']
nc_filepath = daily_folder + netcdf_filename.format(VAR=nc_name, RES=grid_res).lower()
tmp_filepath = nc_filepath + '.tmp' # use temporary file during creation
vardef = {varname:vardefs[varname]} # only one variable
# read rasters and write to NetCDF file
print('\nSaving to NetCDF-4 file:\n '+nc_filepath+'\n')
convertRasterToNetCDF(filepath=tmp_filepath, raster_folder=raster_folder, raster_path_func=raster_path_func, vardefs=vardef,
start_date=start_date, end_date=end_date, sampling=sampling, ds_atts=ds_atts, griddef=griddef,
loverwrite=loverwrite,)
assert os.path.exists(tmp_filepath), tmp_filepath
# replace original file
if os.path.exists(nc_filepath): os.remove(nc_filepath)
os.rename(tmp_filepath, nc_filepath)
# print timing
end = time()
print(('\n Required time: {:.0f} seconds\n'.format(end-start)))
# inspect Dataset
import xarray as xr
xds = xr.open_dataset(nc_filepath, decode_cf=True, decode_times=True, decode_coords=True, use_cftime=True)
print(xds)
#print(ds.variables)
#print(xds['time'])
elif mode == 'test_daily':
varlist = ['precip','T2']
xds = loadNRCan_Daily(varlist=varlist, resolution='CA12', grid=None, chunks=True, lskip=True)
print(xds)
print('')
for varname,xv in xds.variables.items():
if xv.ndim == 3: break
xv = xds[varname] # get DataArray instead of Variable object
#xv = xv.sel(time=slice('2018-01-01','2018-02-01'),x=slice(-3500,4500),y=slice(-1000,2000))
xv = xv.loc['2011-01-01',:,:]
print(xv)
print(('Size in Memory: {:6.1f} MB'.format(xv.nbytes/1024./1024.)))
elif mode == 'test_climatology':
# load averaged climatology file
print('')
dataset = loadNRCan(grid=grid,period=period,resolution=res, varatts=dict(pet=dict(name='pet_wrf')),
varlist=['liqwatflx_adj30'])
print(dataset)
print('')
print((dataset.geotransform))
print((dataset.liqwatflx.mean()))
print((dataset.liqwatflx.masked))
# print time coordinate
print()
print(dataset.time.atts)
print()
print(dataset.time.data_array)
elif mode == 'test_timeseries':
# load time-series file
print('')
dataset = loadNRCan_TS(grid=grid,resolution='na12_maritime')
print(dataset)
print('')
print((dataset.time))
print((dataset.time.coord))
print((dataset.time.coord[29*12])) # Jan 1979
if mode == 'test_point_climatology':
# load averaged climatology file
print('')
if pntset in ('shpavg','glbshp'):
dataset = loadNRCan_Shp(shape=pntset, resolution=res, period=period)
print((dataset.shp_area.mean()))
print('')
else: dataset = loadNRCan_Stn(station=pntset, resolution=res, period=period)
dataset.load()
print(dataset)
print('')
print((dataset['shape_name']))
print('')
print((dataset['shape_name'][:]))
print('')
print((dataset.filepath))
# dataset = dataset(shape_name='GRW')
# print(dataset)
# print('')
# print(dataset.atts.shp_area)
# print(dataset.liqprec.mean()*86400)
# print(dataset.precip.masked)
# print(dataset.T2.mean())
# print(dataset.atts.shp_empty,dataset.atts.shp_full,dataset.atts.shp_encl,)
# # print time coordinate
# print
# print dataset.time.atts
# print
# print dataset.time.data_array
elif mode == 'test_point_timeseries':
# load station time-series file
print('')
if pntset in ('shpavg',): dataset = loadNRCan_ShpTS(shape=pntset, resolution=res)
else: dataset = loadNRCan_StnTS(station=pntset, resolution=res)
print(dataset)
print('')
print((dataset.time))
print((dataset.time.coord))
assert dataset.time.coord[29*12] == 0 # Jan 1979
assert dataset.shape[0] == 1
elif mode == 'convert_Normals':
# parameters
prdstr = '_{}-{}'.format(*period)
resolution = 12; grdstr = '_na{:d}_{:s}'.format(resolution, snow_density)
ncfile = avgfolder + avgfile.format(grdstr,prdstr)
if not os.path.exists(avgfolder): os.mkdir(avgfolder)
# load ASCII dataset with default values
dataset = loadASCII_Normals(period=period, resolution=resolution, snow_density=snow_density, grid_defs=grid_def,)
# test
print(dataset)
print('')
print((dataset.snow))
# write to NetCDF
print('')
writeNetCDF(dataset=dataset, ncfile=ncfile, ncformat='NETCDF4', zlib=True, writeData=True, overwrite=True,
skipUnloaded=False, feedback=True, close=True)
assert os.path.exists(ncfile), ncfile
elif mode == 'convert_Historical':
# parameters
# snow_density = 'ephemeral'
snow_density = 'maritime'
# snow_density = 'prairies'
if not os.path.exists(avgfolder): os.mkdir(avgfolder)
# use actual, real values
# NA12 grid
title = mons12_title; resolution = 12; grid_pattern = mons12_grid_pattern
vardefs = mons12_vardefs; var_pattern = mons12_var_pattern; derived_vars = mons12_derived
period = mons12_period; split_axdefs = mons12_axdefs; merged_atts = mons12_matts
file_tag = snow_density
# NA60 grid
varname = 'pcp'; period = (2011,2018); snow_density = None
title = mons60_title; resolution = 60; grid_pattern = mons60_grid_pattern
vardefs = {varname:mons60_vardefs[varname]}
var_pattern = mons60_var_pattern; derived_vars = None # mons60_derived
split_axdefs = mons60_axdefs; merged_atts = mons60_matts
file_tag = mons60_vardefs[varname]['name'] # use common variable name as file tag
# test values
# period = (1970,2000) # for production
# period = (1981,2010) # for production
# period = (1991,2000) # for testing
# vardefs = dict(maxt = dict(grid='NA12', name='Tmax', units='K', offset=273.15, **hist_defaults), # 2m maximum temperature, originally in degrees Celsius
# mint = dict(grid='NA12', name='Tmin', units='K', offset=273.15, **hist_defaults), # 2m minimum temperature
# snwd = dict(grid='CA12', name='snowh', units='m', scalefactor=1./100., **hist_defaults), # snow depth
# pcp = dict(grid='NA12', name='precip', units='kg/m^2/month', transform=transformMonthly, **hist_defaults),)
# derived_vars = ('T2',)
# load ASCII dataset with default values
dataset = loadASCII_Hist(title=title, resolution=resolution, grid_pattern=grid_pattern,
vardefs=vardefs, var_pattern=var_pattern, derived_vars=derived_vars,
period=period, axdefs=split_axdefs, merged_axis=merged_atts,
snow_density=snow_density, grid_defs=grid_def,)
# test
print(dataset)
print('')
print((dataset.precip))
# write to NetCDF
grdstr = '_na{:d}_{:s}'.format(resolution, file_tag)
ncfile = avgfolder + tsfile.format(grdstr)
print('')
writeNetCDF(dataset=dataset, ncfile=ncfile, ncformat='NETCDF4', zlib=True, writeData=True, overwrite=True,
skipUnloaded=False, feedback=True, close=True)
assert os.path.exists(ncfile), ncfile
elif mode == 'add_CMC':
## SWE correction for CMC data
scale_tag = ''
scale_factor = 1.
scale_note = None
# scale_tag = '_adj30'
# scale_factor = 3.
# scale_note = 'CMC SWE data has been scaled by 3.0 to match NRCan SWE over Canada'
# scale_tag = '_adj35'
# scale_factor = 3.5
# scale_note = 'CMC SWE data has been scaled by 3.5 to match NRCan SWE over Canada'
# CMC_period = (1998,1999) # for tests
# filelist = ['test_' + avgfile.format('_na{:d}'.format(12),'_1970-2000')]
filelist = None
# load NRCan dataset (for precip and to add variables)
nrcan = loadNRCan(filelist=filelist, period=period, filemode='rw', snow_density=snow_density).load()
# load ASCII dataset with default values
cmc = loadCMC_Hist(period=CMC_period, mask=nrcan.landmask, snow_density=snow_density)
# test
print(cmc)
# climatology
print('')
cmc = cmc.climMean()
# print(cmc)
# apply scale factor
for varname,var in list(cmc.variables.items()):
if varname.lower().startswith('snow'):
if scale_factor != 1:
var *= scale_factor # scale snow/SWE variables
# N.B.: we are mainly using SWE differences, but this is all linear...
# values
print('')
var = cmc.snow_acc.mean(axes=('lat','lon'))
print((var[:]))
print('')
for varname,var in list(cmc.variables.items()):
if var.masked:
print((varname, float(var.data_array.mask.sum())/float(var.data_array.size)))
# add liquid water flux, based on precip and snow accumulation/storage changes
print('')
lwf = 'liqwatflx'; data = ( nrcan.precip[:] - cmc.snow_acc[:] ).clip(min=0) # clip smaller than zero
cmc[lwf] = addGDALtoVar(Variable(data=data, axes=cmc.snow_acc.axes, atts=varatts[lwf]), griddef=cmc.griddef)
print((cmc[lwf]))
# values
print('')
var = cmc[lwf].mean(axes=('lat','lon'))
print((var[:]))
# create merged lwf and add to NRCan
for varname in (lwf,'snow','snowh'):
if varname+'_NRCan' in nrcan:
nrcan_var = nrcan[varname+'_NRCan']
else:
nrcan_var = nrcan[varname].load().copy(deepcopy=True) # load liqwatflx and rename
nrcan[varname+'_NRCan'] = nrcan_var
varname_tag = varname + scale_tag
new_var = nrcan_var.copy(deepcopy=False) # replace old variable
data = np.where(nrcan_var.data_array.mask,cmc[varname].data_array,nrcan_var.data_array)
new_var.data_array = data
new_var.atts['note'] = 'merged data from NRCan and CMC'
if scale_note: new_var.atts['note'] = new_var.atts['note'] + '; ' + scale_note
if varname == lwf: new_var.atts['long_name'] = 'Merged Liquid Water Flux'
if varname == 'snow': new_var.atts['long_name'] = 'Merged Snow Water Equivalent'
if varname == 'snowh': new_var.atts['long_name'] = 'Merged Snow Depth'
new_var.fillValue = -999.
# save variable in NRCan dataset
if varname_tag in nrcan: del nrcan[varname_tag] # remove old variable
nrcan[varname_tag] = new_var
print((nrcan[lwf+scale_tag]))
# add other CMC variables to NRCan datasets
for varname,var in list(cmc.variables.items()):
if varname in CMC_derived or varname in CMC_vardefs or varname == lwf:
if scale_note: var.atts['note'] = scale_note
cmc_var = varname+'_CMC'+scale_tag
if cmc_var in nrcan: del nrcan[cmc_var] # overwrite existing
nrcan[cmc_var] = var
print('')
print(nrcan)
# save additional variables
nrcan.close(); del nrcan # implies sync
# now check
print('')
nrcan = loadNRCan(filelist=filelist, period=period, snow_density=snow_density)
print(nrcan)
print(("\nNetCDF file path:\n '{}'".format(nrcan.filelist[0])))
print('')
for varname,var in list(cmc.variables.items()):
if varname in CMC_derived or varname in CMC_vardefs:
assert varname+'_CMC'+scale_tag in nrcan, nrcan
# print('')
# print(nrcan[varname+'_CMC'])
elif mode == 'test_CMC':
# load ASCII dataset with default values
period = (1998,2000)
cmc = loadCMC_Hist(period=period, lcheck=True)
# test
print(cmc)
assert cmc.time[0] == 12*(period[0]-1979), cmc.time[:]
# climatology
print('')
cmc = cmc.climMean()
# print(cmc)
# values
print('')
var = cmc.snow.mean(axes=('lat','lon'))
print((var[:]))
for varname,var in list(cmc.variables.items()):
print((varname, var.masked, float(var.data_array.mask.sum())/float(var.data_array.size)))
|
aerler/GeoPy
|
src/datasets/NRCan.py
|
Python
|
gpl-3.0
| 62,692
|
[
"NetCDF"
] |
4e15f7908faa325e69081fa855515fbe971c2c941ed0505b6e97b6ad598eda07
|
#!/usr/bin/env python
#
# Copyright 2012 Google Inc. All Rights Reserved.
"""Python script for interacting with BigQuery."""
import cmd
import codecs
import datetime
import httplib
import json
import os
import pdb
import pipes
import platform
import shlex
import sys
import time
import traceback
import types
import apiclient
import httplib2
import oauth2client
import oauth2client.client
import oauth2client.file
import oauth2client.gce
import oauth2client.tools
import yaml
from google.apputils import app
from google.apputils import appcommands
import gflags as flags
import table_formatter
import bigquery_client
# pylint: disable=unused-import
import bq_flags
# pylint: enable=unused-import
FLAGS = flags.FLAGS
# These are long names.
# pylint: disable=g-bad-name
JobReference = bigquery_client.ApiClientHelper.JobReference
ProjectReference = bigquery_client.ApiClientHelper.ProjectReference
DatasetReference = bigquery_client.ApiClientHelper.DatasetReference
TableReference = bigquery_client.ApiClientHelper.TableReference
BigqueryClient = bigquery_client.BigqueryClient
JobIdGeneratorIncrementing = bigquery_client.JobIdGeneratorIncrementing
JobIdGeneratorRandom = bigquery_client.JobIdGeneratorRandom
JobIdGeneratorFingerprint = bigquery_client.JobIdGeneratorFingerprint
# pylint: enable=g-bad-name
_VERSION_NUMBER = '2.0.22'
_CLIENT_USER_AGENT = 'Cloud SDK Command Line Tool' + _VERSION_NUMBER
_CLIENT_SCOPE = [
'https://www.googleapis.com/auth/bigquery',
]
_CLIENT_ID = '32555940559.apps.googleusercontent.com'
_CLIENT_INFO = {
'client_id': _CLIENT_ID,
'client_secret': 'ZmssLNjJy2998hD4CTg2ejr2',
'scope': _CLIENT_SCOPE,
'user_agent': _CLIENT_USER_AGENT,
}
_BIGQUERY_TOS_MESSAGE = (
'In order to get started, please visit the Google APIs Console to '
'create a project and agree to our Terms of Service:\n'
'\thttp://code.google.com/apis/console\n\n'
'For detailed sign-up instructions, please see our Getting Started '
'Guide:\n'
'\thttps://developers.google.com/bigquery/docs/getting-started\n\n'
'Once you have completed the sign-up process, please try your command '
'again.')
_DELIMITER_MAP = {
'tab': '\t',
'\\t': '\t',
}
# These aren't relevant for user-facing docstrings:
# pylint: disable=g-doc-return-or-yield
# pylint: disable=g-doc-args
# TODO(user): Write some explanation of the structure of this file.
####################
# flags processing
####################
def _ValidateGlobalFlags():
"""Validate combinations of global flag values."""
if FLAGS.service_account and FLAGS.use_gce_service_account:
raise app.UsageError(
'Cannot specify both --service_account and --use_gce_service_account.')
def ValidateAtMostOneSelected(*args):
"""Validates that at most one of the argument flags is selected.
Returns:
True if more than 1 flag was selected, False if 1 or 0 were selected.
"""
count = 0
for arg in args:
if arg:
count += 1
return count > 1
def _GetBigqueryRcFilename():
"""Return the name of the bigqueryrc file to use.
In order, we look for a flag the user specified, an environment
variable, and finally the default value for the flag.
Returns:
bigqueryrc filename as a string.
"""
return ((FLAGS['bigqueryrc'].present and FLAGS.bigqueryrc) or
os.environ.get('BIGQUERYRC') or
FLAGS.bigqueryrc)
def _ProcessBigqueryrc():
"""Updates FLAGS with values found in the bigqueryrc file."""
bigqueryrc = _GetBigqueryRcFilename()
if not os.path.exists(bigqueryrc):
return
with open(bigqueryrc) as rcfile:
for line in rcfile:
if line.lstrip().startswith('#') or not line.strip():
continue
elif line.lstrip().startswith('['):
# TODO(user): Support command-specific flag sections.
continue
flag, equalsign, value = line.partition('=')
# if no value given, assume stringified boolean true
if not equalsign:
value = 'true'
flag = flag.strip()
value = value.strip()
while flag.startswith('-'):
flag = flag[1:]
# We want flags specified at the command line to override
# those in the flagfile.
if flag not in FLAGS:
raise app.UsageError(
'Unknown flag %s found in bigqueryrc file' % (flag,))
if not FLAGS[flag].present:
FLAGS[flag].Parse(value)
elif FLAGS[flag].Type().startswith('multi'):
old_value = getattr(FLAGS, flag)
FLAGS[flag].Parse(value)
setattr(FLAGS, flag, old_value + getattr(FLAGS, flag))
def _ResolveApiInfoFromFlags():
"""Determine an api and api_version."""
api_version = FLAGS.api_version
api = FLAGS.api
return {'api': api, 'api_version': api_version}
def _UseServiceAccount():
return bool(FLAGS.use_gce_service_account or FLAGS.service_account)
def _GetServiceAccountCredentialsFromFlags(storage): # pylint: disable=unused-argument
client_scope = _CLIENT_SCOPE
if FLAGS.use_gce_service_account:
return oauth2client.gce.AppAssertionCredentials(client_scope)
if not oauth2client.client.HAS_OPENSSL:
raise app.UsageError(
'BigQuery requires OpenSSL to be installed in order to use '
'service account credentials. Please install OpenSSL '
'and the Python OpenSSL package.')
if FLAGS.service_account_private_key_file:
try:
with file(FLAGS.service_account_private_key_file, 'rb') as f:
key = f.read()
except IOError as e:
raise app.UsageError(
'Service account specified, but private key in file "%s" '
'cannot be read:\n%s' % (FLAGS.service_account_private_key_file, e))
else:
raise app.UsageError(
'Service account authorization requires the '
'service_account_private_key_file flag to be set.')
return oauth2client.client.SignedJwtAssertionCredentials(
FLAGS.service_account, key, client_scope,
private_key_password=FLAGS.service_account_private_key_password,
user_agent=_CLIENT_USER_AGENT)
def _GetCredentialsFromOAuthFlow(storage):
print
print '******************************************************************'
print '** No OAuth2 credentials found, beginning authorization process **'
print '******************************************************************'
print
if FLAGS.headless:
print 'Running in headless mode, exiting.'
sys.exit(1)
client_info = _CLIENT_INFO.copy()
while True:
# If authorization fails, we want to retry, rather than let this
# cascade up and get caught elsewhere. If users want out of the
# retry loop, they can ^C.
try:
flow = oauth2client.client.OAuth2WebServerFlow(**client_info)
credentials = oauth2client.tools.run(flow, storage)
break
except (oauth2client.client.FlowExchangeError, SystemExit) as e:
# Here SystemExit is "no credential at all", and the
# FlowExchangeError is "invalid" -- usually because you reused
# a token.
print 'Invalid authorization: %s' % (e,)
print
except httplib2.HttpLib2Error as e:
print 'Error communicating with server. Please check your internet '
print 'connection and try again.'
print
print 'Error is: %s' % (e,)
sys.exit(1)
print
print '************************************************'
print '** Continuing execution of BigQuery operation **'
print '************************************************'
print
return credentials
def _GetCredentialsFromFlags():
# In the case of a GCE service account, we can skip the entire
# process of loading from storage.
if FLAGS.use_gce_service_account:
return _GetServiceAccountCredentialsFromFlags(None)
if FLAGS.service_account:
credentials_getter = _GetServiceAccountCredentialsFromFlags
credential_file = FLAGS.service_account_credential_file
if not credential_file:
raise app.UsageError(
'The flag --service_account_credential_file must be specified '
'if --service_account is used.')
else:
credentials_getter = _GetCredentialsFromOAuthFlow
credential_file = FLAGS.credential_file
try:
# Note that oauth2client.file ensures the file is created with
# the correct permissions.
storage = oauth2client.file.Storage(credential_file)
except OSError as e:
raise bigquery_client.BigqueryError(
'Cannot create credential file %s: %s' % (FLAGS.credential_file, e))
try:
credentials = storage.get()
except BaseException as e:
BigqueryCmd.ProcessError(
e, name='GetCredentialsFromFlags',
message_prefix=(
'Credentials appear corrupt. Please delete the credential file '
'and try your command again. You can delete your credential '
'file using "bq init --delete_credentials".\n\nIf that does '
'not work, you may have encountered a bug in the BigQuery CLI.'))
sys.exit(1)
if credentials is None or credentials.invalid:
credentials = credentials_getter(storage)
credentials.set_store(storage)
return credentials
def _GetFormatterFromFlags(secondary_format='sparse'):
if FLAGS['format'].present:
return table_formatter.GetFormatter(FLAGS.format)
else:
return table_formatter.GetFormatter(secondary_format)
def _ExpandForPrinting(fields, rows, formatter):
"""Expand entries that require special bq-specific formatting."""
return [_ExpandRowForPrinting(fields, row, formatter) for row in rows]
def _ExpandRowForPrinting(fields, row, formatter):
"""Expand entries in a single row with bq-specific formatting."""
def NormalizeTimestamp(entry, field): # pylint: disable=unused-argument
try:
date = datetime.datetime.utcfromtimestamp(float(entry))
return date.strftime('%Y-%m-%d %H:%M:%S')
except ValueError:
return '<date out of range for display>'
def NormalizeRecord(entry, field):
if isinstance(formatter, table_formatter.JsonFormatter):
subfields = field.get('fields', [])
subresults = _ExpandRowForPrinting(subfields, entry, formatter)
subfield_names = [subfield.get('name', '') for subfield in subfields]
result = {}
for subfield_name, subfield_data in zip(subfield_names, subresults):
result[subfield_name] = subfield_data
return result
else:
return entry
def NormalizeRepeatedRecord(entry, field):
if isinstance(formatter, table_formatter.JsonFormatter):
return [NormalizeRecord(record, field) for record in entry]
else:
return entry
column_normalizers = {}
for i, field in enumerate(fields):
if field['type'].upper() == 'TIMESTAMP':
column_normalizers[i] = NormalizeTimestamp
elif field['type'].upper() == 'RECORD':
if field['mode'].upper() == 'REPEATED':
column_normalizers[i] = NormalizeRepeatedRecord
else:
column_normalizers[i] = NormalizeRecord
def NormalizeNone():
if isinstance(formatter, table_formatter.JsonFormatter):
return None
elif isinstance(formatter, table_formatter.CsvFormatter):
return ''
else:
return 'NULL'
def NormalizeEntry(i, entry):
if entry is None:
return NormalizeNone()
elif i in column_normalizers:
return column_normalizers[i](entry, fields[i])
return entry
return [NormalizeEntry(i, e) for i, e in enumerate(row)]
def _PrintDryRunInfo(job):
num_bytes = job['statistics']['query']['totalBytesProcessed']
if FLAGS.format in ['prettyjson', 'json']:
_PrintFormattedJsonObject(job)
elif FLAGS.format == 'csv':
print num_bytes
else:
print (
'Query successfully validated. Assuming the tables are not modified, '
'running this query will process %s bytes of data.' % (num_bytes,))
def _PrintFormattedJsonObject(obj):
if FLAGS.format == 'prettyjson':
print json.dumps(obj, sort_keys=True, indent=2)
else:
print json.dumps(obj, separators=(',', ':'))
def _GetJobIdFromFlags():
"""Returns the job id or job generator from the flags."""
if FLAGS.fingerprint_job_id and FLAGS.job_id:
raise app.UsageError(
'The fingerprint_job_id flag cannot be specified with the job_id '
'flag.')
if FLAGS.fingerprint_job_id:
return JobIdGeneratorFingerprint()
elif FLAGS.job_id is None:
return JobIdGeneratorIncrementing(JobIdGeneratorRandom())
elif FLAGS.job_id:
return FLAGS.job_id
else:
# User specified a job id, but it was empty. Let the
# server come up with a job id.
return None
def _GetWaitPrinterFactoryFromFlags():
"""Returns the default wait_printer_factory to use while waiting for jobs."""
if FLAGS.quiet:
return BigqueryClient.QuietWaitPrinter
if FLAGS.headless:
return BigqueryClient.TransitionWaitPrinter
return BigqueryClient.VerboseWaitPrinter
def _PromptWithDefault(message):
"""Prompts user with message, return key pressed or '' on enter."""
if FLAGS.headless:
print 'Running --headless, accepting default for prompt: %s' % (message,)
return ''
return raw_input(message).lower()
def _PromptYN(message):
"""Prompts user with message, returning the key 'y', 'n', or '' on enter."""
response = None
while response not in ['y', 'n', '']:
response = _PromptWithDefault(message)
return response
def _NormalizeFieldDelimiter(field_delimiter):
"""Validates and returns the correct field_delimiter."""
# The only non-string delimiter we allow is None, which represents
# no field delimiter specified by the user.
if field_delimiter is None:
return field_delimiter
try:
# We check the field delimiter flag specifically, since a
# mis-entered Thorn character generates a difficult to
# understand error during request serialization time.
_ = field_delimiter.decode(sys.stdin.encoding or 'utf8')
except UnicodeDecodeError:
raise app.UsageError(
'The field delimiter flag is not valid. Flags must be '
'specified in your default locale. For example, '
'the Latin 1 representation of Thorn is byte code FE, '
'which in the UTF-8 locale would be expressed as C3 BE.')
# Allow TAB and \\t substitution.
key = field_delimiter.lower()
return _DELIMITER_MAP.get(key, field_delimiter)
class TablePrinter(object):
"""Base class for printing a table, with a default implementation."""
def __init__(self, **kwds):
super(TablePrinter, self).__init__()
# Most extended classes will require state.
for key, value in kwds.iteritems():
setattr(self, key, value)
def PrintTable(self, fields, rows):
formatter = _GetFormatterFromFlags(secondary_format='pretty')
formatter.AddFields(fields)
rows = _ExpandForPrinting(fields, rows, formatter)
formatter.AddRows(rows)
formatter.Print()
class Factory(object):
"""Class encapsulating factory creation of BigqueryClient."""
_BIGQUERY_CLIENT_FACTORY = None
class ClientTablePrinter(object):
_TABLE_PRINTER = None
@classmethod
def GetTablePrinter(cls):
if cls._TABLE_PRINTER is None:
cls._TABLE_PRINTER = TablePrinter()
return cls._TABLE_PRINTER
@classmethod
def SetTablePrinter(cls, printer):
if not isinstance(printer, TablePrinter):
raise TypeError('Printer must be an instance of TablePrinter.')
cls._TABLE_PRINTER = printer
@classmethod
def GetBigqueryClientFactory(cls):
if cls._BIGQUERY_CLIENT_FACTORY is None:
cls._BIGQUERY_CLIENT_FACTORY = bigquery_client.BigqueryClient
return cls._BIGQUERY_CLIENT_FACTORY
@classmethod
def SetBigqueryClientFactory(cls, factory):
if not issubclass(factory, bigquery_client.BigqueryClient):
raise TypeError('Factory must be subclass of BigqueryClient.')
cls._BIGQUERY_CLIENT_FACTORY = factory
class Client(object):
"""Class wrapping a singleton bigquery_client.BigqueryClient."""
client = None
@staticmethod
def Create(**kwds):
"""Build a new BigqueryClient configured from kwds and FLAGS."""
def KwdsOrFlags(name):
return kwds[name] if name in kwds else getattr(FLAGS, name)
# Note that we need to handle possible initialization tasks
# for the case of being loaded as a library.
_ProcessBigqueryrc()
bigquery_client.ConfigurePythonLogger(FLAGS.apilog)
credentials = _GetCredentialsFromFlags()
assert credentials is not None
client_args = {}
global_args = ('credential_file', 'job_property',
'project_id', 'dataset_id', 'trace', 'sync',
'api', 'api_version')
for name in global_args:
client_args[name] = KwdsOrFlags(name)
client_args['wait_printer_factory'] = _GetWaitPrinterFactoryFromFlags()
if FLAGS.discovery_file:
with open(FLAGS.discovery_file) as f:
client_args['discovery_document'] = f.read()
bigquery_client_factory = Factory.GetBigqueryClientFactory()
return bigquery_client_factory(credentials=credentials, **client_args)
@classmethod
def Get(cls):
"""Return a BigqueryClient initialized from flags."""
if cls.client is None:
try:
cls.client = Client.Create()
except ValueError as e:
# Convert constructor parameter errors into flag usage errors.
raise app.UsageError(e)
return cls.client
@classmethod
def Delete(cls):
"""Delete the existing client.
This is needed when flags have changed, and we need to force
client recreation to reflect new flag values.
"""
cls.client = None
def _Typecheck(obj, types, message=None): # pylint: disable=redefined-outer-name
"""Raises a user error if obj is not an instance of types."""
if not isinstance(obj, types):
message = message or 'Type of %s is not one of %s' % (obj, types)
raise app.UsageError(message)
# TODO(user): This code uses more than the average amount of
# Python magic. Explain what the heck is going on throughout.
class NewCmd(appcommands.Cmd):
"""Featureful extension of appcommands.Cmd."""
def __init__(self, name, flag_values):
super(NewCmd, self).__init__(name, flag_values)
run_with_args = getattr(self, 'RunWithArgs', None)
self._new_style = isinstance(run_with_args, types.MethodType)
if self._new_style:
func = run_with_args.im_func
code = func.func_code # pylint: disable=redefined-outer-name
self._full_arg_list = list(code.co_varnames[:code.co_argcount])
# TODO(user): There might be some corner case where this
# is *not* the right way to determine bound vs. unbound method.
if isinstance(run_with_args.im_self, run_with_args.im_class):
self._full_arg_list.pop(0)
self._max_args = len(self._full_arg_list)
self._min_args = self._max_args - len(func.func_defaults or [])
self._star_args = bool(code.co_flags & 0x04)
self._star_kwds = bool(code.co_flags & 0x08)
if self._star_args:
self._max_args = sys.maxint
self._debug_mode = FLAGS.debug_mode
self.surface_in_shell = True
self.__doc__ = self.RunWithArgs.__doc__
elif self.Run.im_func is NewCmd.Run.im_func:
raise appcommands.AppCommandsError(
'Subclasses of NewCmd must override Run or RunWithArgs')
def __getattr__(self, name):
if name in self._command_flags:
return self._command_flags[name].value
return super(NewCmd, self).__getattribute__(name)
def _GetFlag(self, flagname):
if flagname in self._command_flags:
return self._command_flags[flagname]
else:
return None
def _CheckFlags(self):
"""Validate flags after command specific flags have been loaded.
This function will run through all values in appcommands._cmd_argv and
pick out any unused flags and verify their validity. If the flag is
not defined, we will print the flags.FlagsError text and exit; otherwise,
we will print a positioning error message and exit. Print statements
were used in this case because raising app.UsageError caused the usage
help text to be printed.
If no extraneous flags exist, this function will do nothing.
"""
unused_flags = [f for f in appcommands.GetCommandArgv() if
f.startswith('--') or f.startswith('-')]
for flag in unused_flags:
flag_name = flag[4:] if flag.startswith('--no') else flag[2:]
flag_name = flag_name.split('=')[0]
if flag_name not in FLAGS:
print ("FATAL Flags parsing error: Unknown command line flag '%s'\n"
"Run 'bq.py help' to get help" % flag)
sys.exit(1)
else:
print ("FATAL Flags positioning error: Flag '%s' appears after final "
"command line argument. Please reposition the flag.\nRun 'bq.py"
" help' to get help." % flag)
sys.exit(1)
def Run(self, argv):
"""Run this command.
If self is a new-style command, we set up arguments and call
self.RunWithArgs, gracefully handling exceptions. If not, we
simply call self.Run(argv).
Args:
argv: List of arguments as strings.
Returns:
0 on success, nonzero on failure.
"""
self._CheckFlags()
if not self._new_style:
return super(NewCmd, self).Run(argv)
original_values = self._command_flags.FlagValuesDict()
try:
args = self._command_flags(argv)[1:]
for flag, value in self._command_flags.FlagValuesDict().iteritems():
setattr(self, flag, value)
if value == original_values[flag]:
original_values.pop(flag)
new_args = []
for argname in self._full_arg_list[:self._min_args]:
flag = self._GetFlag(argname)
if flag is not None and flag.present:
new_args.append(flag.value)
elif args:
new_args.append(args.pop(0))
else:
print 'Not enough positional args, still looking for %s' % (argname,)
if self.usage:
print 'Usage: %s' % (self.usage,)
return 1
new_kwds = {}
for argname in self._full_arg_list[self._min_args:]:
flag = self._GetFlag(argname)
if flag is not None and flag.present:
new_kwds[argname] = flag.value
elif args:
new_kwds[argname] = args.pop(0)
if args and not self._star_args:
print 'Too many positional args, still have %s' % (args,)
return 1
new_args.extend(args)
if self._debug_mode:
return self.RunDebug(new_args, new_kwds)
else:
return self.RunSafely(new_args, new_kwds)
finally:
for flag, value in original_values.iteritems():
setattr(self, flag, value)
self._command_flags[flag].Parse(value)
def RunCmdLoop(self, argv):
"""Hook for use in cmd.Cmd-based command shells."""
try:
args = shlex.split(argv)
except ValueError as e:
raise SyntaxError(bigquery_client.EncodeForPrinting(e))
return self.Run([self._command_name] + args)
def _HandleError(self, e):
message = e
if isinstance(e, bigquery_client.BigqueryClientConfigurationError):
message += ' Try running "bq init".'
print 'Exception raised in %s operation: %s' % (self._command_name, message)
return 1
def RunDebug(self, args, kwds):
"""Run this command in debug mode."""
try:
return_value = self.RunWithArgs(*args, **kwds)
except BaseException as e:
# Don't break into the debugger for expected exceptions.
if isinstance(e, app.UsageError) or (
isinstance(e, bigquery_client.BigqueryError) and
not isinstance(e, bigquery_client.BigqueryInterfaceError)):
return self._HandleError(e)
print
print '****************************************************'
print '** Unexpected Exception raised in bq execution! **'
if FLAGS.headless:
print '** --headless mode enabled, exiting. **'
print '** See STDERR for traceback. **'
else:
print '** --debug_mode enabled, starting pdb. **'
print '****************************************************'
print
traceback.print_exc()
print
if not FLAGS.headless:
pdb.post_mortem()
return 1
return return_value
def RunSafely(self, args, kwds):
"""Run this command, turning exceptions into print statements."""
try:
return_value = self.RunWithArgs(*args, **kwds)
except BaseException as e:
return self._HandleError(e)
return return_value
class BigqueryCmd(NewCmd):
"""Bigquery-specific NewCmd wrapper."""
def _NeedsInit(self):
"""Returns true if this command requires the init command before running.
Subclasses will override for any exceptional cases.
"""
return not _UseServiceAccount() and not (
os.path.exists(_GetBigqueryRcFilename()) or os.path.exists(
FLAGS.credential_file))
def Run(self, argv):
"""Bigquery commands run `init` before themselves if needed."""
if self._NeedsInit():
appcommands.GetCommandByName('init').Run([])
return super(BigqueryCmd, self).Run(argv)
def RunSafely(self, args, kwds):
"""Run this command, printing information about any exceptions raised."""
try:
return_value = self.RunWithArgs(*args, **kwds)
except BaseException as e:
return BigqueryCmd.ProcessError(e, name=self._command_name)
return return_value
@staticmethod
def ProcessError(
e, name='unknown',
message_prefix='You have encountered a bug in the BigQuery CLI.'):
"""Translate an error message into some printing and a return code."""
response = []
retcode = 1
contact_us_msg = (
'Please file a bug report in our public issue tracker:\n'
' https://code.google.com/p/google-bigquery/issues/list\n'
'Please include a brief description of the steps that led to this '
'issue, as well as the following information: \n\n')
error_details = (
'========================================\n'
'== Platform ==\n'
' %s\n'
'== bq version ==\n'
' %s\n'
'== Command line ==\n'
' %s\n'
'== UTC timestamp ==\n'
' %s\n'
'== Error trace ==\n'
'%s'
'========================================\n') % (
':'.join([
platform.python_implementation(),
platform.python_version(),
platform.platform()]),
_VERSION_NUMBER,
sys.argv,
time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime()),
''.join(traceback.format_tb(sys.exc_info()[2]))
)
codecs.register_error('strict', codecs.replace_errors)
message = bigquery_client.EncodeForPrinting(e)
if isinstance(e, (bigquery_client.BigqueryNotFoundError,
bigquery_client.BigqueryDuplicateError)):
response.append('BigQuery error in %s operation: %s' % (name, message))
retcode = 2
elif isinstance(e, bigquery_client.BigqueryTermsOfServiceError):
response.append(str(e) + '\n')
response.append(_BIGQUERY_TOS_MESSAGE)
elif isinstance(e, bigquery_client.BigqueryInvalidQueryError):
response.append('Error in query string: %s' % (message,))
elif (isinstance(e, bigquery_client.BigqueryError)
and not isinstance(e, bigquery_client.BigqueryInterfaceError)):
response.append('BigQuery error in %s operation: %s' % (name, message))
elif isinstance(e, (app.UsageError, TypeError)):
response.append(message)
elif (isinstance(e, SyntaxError) or
isinstance(e, bigquery_client.BigquerySchemaError)):
response.append('Invalid input: %s' % (message,))
elif isinstance(e, flags.FlagsError):
response.append('Error parsing command: %s' % (message,))
elif isinstance(e, KeyboardInterrupt):
response.append('')
else: # pylint: disable=broad-except
# Errors with traceback information are printed here.
# The traceback module has nicely formatted the error trace
# for us, so we don't want to undo that via TextWrap.
if isinstance(e, bigquery_client.BigqueryInterfaceError):
message_prefix = (
'Bigquery service returned an invalid reply in %s operation: %s.'
'\n\n'
'Please make sure you are using the latest version '
'of the bq tool and try again. '
'If this problem persists, you may have encountered a bug in the '
'bigquery client.' % (name, message))
elif isinstance(e, oauth2client.client.Error):
message_prefix = (
'Authorization error. This may be a network connection problem, '
'so please try again. If this problem persists, the credentials '
'may be corrupt. Try deleting and re-creating your credentials. '
'You can delete your credentials using '
'"bq init --delete_credentials".'
'\n\n'
'If this problem still occurs, you may have encountered a bug '
'in the bigquery client.')
elif (isinstance(e, httplib.HTTPException)
or isinstance(e, apiclient.errors.Error)
or isinstance(e, httplib2.HttpLib2Error)):
message_prefix = (
'Network connection problem encountered, please try again.'
'\n\n'
'If this problem persists, you may have encountered a bug in the '
'bigquery client.')
print flags.TextWrap(message_prefix + ' ' + contact_us_msg)
print error_details
response.append('Unexpected exception in %s operation: %s' % (
name, message))
print flags.TextWrap('\n'.join(response))
return retcode
def PrintJobStartInfo(self, job):
"""Print a simple status line."""
reference = BigqueryClient.ConstructObjectReference(job)
print 'Successfully started %s %s' % (self._command_name, reference)
class _Load(BigqueryCmd):
usage = """load <destination_table> <source> <schema>"""
def __init__(self, name, fv):
super(_Load, self).__init__(name, fv)
flags.DEFINE_string(
'field_delimiter', None,
'The character that indicates the boundary between columns in the '
'input file. "\\t" and "tab" are accepted names for tab.',
short_name='F', flag_values=fv)
flags.DEFINE_enum(
'encoding', None,
['UTF-8', 'ISO-8859-1'],
'The character encoding used by the input file. Options include:'
'\n ISO-8859-1 (also known as Latin-1)'
'\n UTF-8',
short_name='E', flag_values=fv)
flags.DEFINE_integer(
'skip_leading_rows', None,
'The number of rows at the beginning of the source file to skip.',
flag_values=fv)
flags.DEFINE_string(
'schema', None,
'Either a filename or a comma-separated list of fields in the form '
'name[:type].',
flag_values=fv)
flags.DEFINE_boolean(
'replace', False,
'If true erase existing contents before loading new data.',
flag_values=fv)
flags.DEFINE_string(
'quote', None,
'Quote character to use to enclose records. Default is ". '
'To indicate no quote character at all, use an empty string.',
flag_values=fv)
flags.DEFINE_integer(
'max_bad_records', 0,
'Maximum number of bad records allowed before the entire job fails.',
flag_values=fv)
flags.DEFINE_boolean(
'allow_quoted_newlines', None,
'Whether to allow quoted newlines in CSV import data.',
flag_values=fv)
flags.DEFINE_boolean(
'allow_jagged_rows', None,
'Whether to allow missing trailing optional columns '
'in CSV import data.',
flag_values=fv)
flags.DEFINE_boolean(
'ignore_unknown_values', None,
'Whether to allow and ignore extra, unrecognized values in CSV or JSON '
'import data.',
flag_values=fv)
flags.DEFINE_enum(
'source_format', None,
['CSV',
'NEWLINE_DELIMITED_JSON',
'DATASTORE_BACKUP'],
'Format of source data. Options include:'
'\n CSV'
'\n NEWLINE_DELIMITED_JSON'
'\n DATASTORE_BACKUP',
flag_values=fv)
flags.DEFINE_list(
'projection_fields', [],
'If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity '
'properties to load into BigQuery from a Cloud Datastore backup. '
'Property names are case sensitive and must refer to top-level '
'properties.',
flag_values=fv)
def RunWithArgs(self, destination_table, source, schema=None):
"""Perform a load operation of source into destination_table.
Usage:
load <destination_table> <source> [<schema>]
The <destination_table> is the fully-qualified table name of table to
create, or append to if the table already exists.
The <source> argument can be a path to a single local file, or a
comma-separated list of URIs.
The <schema> argument should be either the name of a JSON file or a text
schema. This schema should be omitted if the table already has one.
In the case that the schema is provided in text form, it should be a
comma-separated list of entries of the form name[:type], where type will
default to string if not specified.
In the case that <schema> is a filename, it should contain a
single array object, each entry of which should be an object with
properties 'name', 'type', and (optionally) 'mode'. See the online
documentation for more detail:
https://developers.google.com/bigquery/preparing-data-for-bigquery
Note: the case of a single-entry schema with no type specified is
ambiguous; one can use name:string to force interpretation as a
text schema.
Examples:
bq load ds.new_tbl ./info.csv ./info_schema.json
bq load ds.new_tbl gs://mybucket/info.csv ./info_schema.json
bq load ds.small gs://mybucket/small.csv name:integer,value:string
bq load ds.small gs://mybucket/small.csv field1,field2,field3
Arguments:
destination_table: Destination table name.
source: Name of local file to import, or a comma-separated list of
URI paths to data to import.
schema: Either a text schema or JSON file, as above.
"""
client = Client.Get()
table_reference = client.GetTableReference(destination_table)
opts = {
'encoding': self.encoding,
'skip_leading_rows': self.skip_leading_rows,
'max_bad_records': self.max_bad_records,
'allow_quoted_newlines': self.allow_quoted_newlines,
'job_id': _GetJobIdFromFlags(),
'source_format': self.source_format,
'projection_fields': self.projection_fields,
}
if self.replace:
opts['write_disposition'] = 'WRITE_TRUNCATE'
if self.field_delimiter:
opts['field_delimiter'] = _NormalizeFieldDelimiter(self.field_delimiter)
if self.quote is not None:
opts['quote'] = _NormalizeFieldDelimiter(self.quote)
if self.allow_jagged_rows is not None:
opts['allow_jagged_rows'] = self.allow_jagged_rows
if self.ignore_unknown_values is not None:
opts['ignore_unknown_values'] = self.ignore_unknown_values
job = client.Load(table_reference, source, schema=schema, **opts)
if not FLAGS.sync:
self.PrintJobStartInfo(job)
class _Query(BigqueryCmd):
usage = """query <sql>"""
def __init__(self, name, fv):
super(_Query, self).__init__(name, fv)
flags.DEFINE_string(
'destination_table', '',
'Name of destination table for query results.',
flag_values=fv)
flags.DEFINE_integer(
'start_row', 0,
'First row to return in the result.',
short_name='s', flag_values=fv)
flags.DEFINE_integer(
'max_rows', 100,
'How many rows to return in the result.',
short_name='n', flag_values=fv)
flags.DEFINE_boolean(
'batch', False,
'Whether to run the query in batch mode.',
flag_values=fv)
flags.DEFINE_boolean(
'append_table', False,
'When a destination table is specified, whether or not to append.',
flag_values=fv)
flags.DEFINE_boolean(
'rpc', False,
'If true, use rpc-style query API instead of jobs.insert().',
flag_values=fv)
flags.DEFINE_boolean(
'replace', False,
'If true, erase existing contents before loading new data.',
flag_values=fv)
flags.DEFINE_boolean(
'allow_large_results', None,
'Enables larger destination table sizes.',
flag_values=fv)
flags.DEFINE_boolean(
'dry_run', None,
'Whether the query should be validated without executing.',
flag_values=fv)
flags.DEFINE_boolean(
'require_cache', None,
'Whether to only run the query if it is already cached.',
flag_values=fv)
flags.DEFINE_boolean(
'use_cache', None,
'Whether to use the query cache to avoid rerunning cached queries.',
flag_values=fv)
flags.DEFINE_float(
'min_completion_ratio', None,
'[Experimental] The minimum fraction of data that must be scanned '
'before a query returns. If not set, the default server value (1.0) '
'will be used.',
lower_bound=0, upper_bound=1.0,
flag_values=fv)
flags.DEFINE_boolean(
'flatten_results', None,
'Whether to flatten nested and repeated fields in the result schema. '
'If not set, the default behavior is to flatten.',
flag_values=fv)
def RunWithArgs(self, *args):
# pylint: disable=g-doc-exception
"""Execute a query.
Examples:
bq query 'select count(*) from publicdata:samples.shakespeare'
Usage:
query <sql_query>
"""
# Set up the params that are the same for rpc-style and jobs.insert()-style
# queries.
kwds = {
'dry_run': self.dry_run,
'use_cache': self.use_cache,
'min_completion_ratio': self.min_completion_ratio,
}
query = ' '.join(args)
client = Client.Get()
if self.rpc:
if self.allow_large_results:
raise app.UsageError(
'allow_large_results cannot be specified in rpc mode.')
if self.destination_table:
raise app.UsageError(
'destination_table cannot be specified in rpc mode.')
if FLAGS.job_id or FLAGS.fingerprint_job_id:
raise app.UsageError(
'job_id and fingerprint_job_id cannot be specified in rpc mode.')
if self.batch:
raise app.UsageError(
'batch cannot be specified in rpc mode.')
if self.flatten_results:
raise app.UsageError(
'flatten_results cannot be specified in rpc mode.')
kwds['max_results'] = self.max_rows
fields, rows = client.RunQueryRpc(query, **kwds)
Factory.ClientTablePrinter.GetTablePrinter().PrintTable(fields, rows)
else:
if self.destination_table and self.append_table:
kwds['write_disposition'] = 'WRITE_APPEND'
if self.destination_table and self.replace:
kwds['write_disposition'] = 'WRITE_TRUNCATE'
if self.require_cache:
kwds['create_disposition'] = 'CREATE_NEVER'
if self.batch:
kwds['priority'] = 'BATCH'
kwds['destination_table'] = self.destination_table
kwds['allow_large_results'] = self.allow_large_results
kwds['flatten_results'] = self.flatten_results
kwds['job_id'] = _GetJobIdFromFlags()
job = client.Query(query, **kwds)
if self.dry_run:
_PrintDryRunInfo(job)
elif not FLAGS.sync:
self.PrintJobStartInfo(job)
else:
fields, rows = client.ReadSchemaAndJobRows(job['jobReference'],
start_row=self.start_row,
max_rows=self.max_rows)
Factory.ClientTablePrinter.GetTablePrinter().PrintTable(fields, rows)
class _Extract(BigqueryCmd):
usage = """extract <source_table> <destination_uris>"""
def __init__(self, name, fv):
super(_Extract, self).__init__(name, fv)
flags.DEFINE_string(
'field_delimiter', None,
'The character that indicates the boundary between columns in the '
'output file. "\\t" and "tab" are accepted names for tab.',
short_name='F', flag_values=fv)
flags.DEFINE_enum(
'destination_format', None,
['CSV', 'NEWLINE_DELIMITED_JSON', 'AVRO'],
'The format with which to write the extracted data. Tables with '
'nested or repeated fields cannot be extracted to CSV.',
flag_values=fv)
flags.DEFINE_enum(
'compression', 'NONE',
['GZIP', 'NONE'],
'The compression type to use for exported files. Possible values '
'include GZIP and NONE. The default value is NONE.',
flag_values=fv)
flags.DEFINE_boolean(
'print_header', None, 'Whether to print header rows for formats that '
'have headers. Prints headers by default.',
flag_values=fv)
def RunWithArgs(self, source_table, destination_uris):
"""Perform an extract operation of source_table into destination_uris.
Usage:
extract <source_table> <destination_uris>
Examples:
bq extract ds.summary gs://mybucket/summary.csv
Arguments:
source_table: Source table to extract.
destination_uris: One or more Google Storage URIs, separated by commas.
"""
client = Client.Get()
kwds = {
'job_id': _GetJobIdFromFlags(),
}
table_reference = client.GetTableReference(source_table)
job = client.Extract(
table_reference, destination_uris,
print_header=self.print_header,
field_delimiter=_NormalizeFieldDelimiter(self.field_delimiter),
destination_format=self.destination_format,
compression=self.compression, **kwds)
if not FLAGS.sync:
self.PrintJobStartInfo(job)
class _List(BigqueryCmd):
usage = """ls [-j|-p|-d][-a] [-n <number>] [<id>]""" # pylint: disable=g-line-too-long
def __init__(self, name, fv):
super(_List, self).__init__(name, fv)
flags.DEFINE_boolean(
'all', None,
'Show all results. For jobs, will show jobs from all users. For '
'datasets, will list hidden datasets.',
short_name='a', flag_values=fv)
flags.DEFINE_boolean(
'all_jobs', None,
'DEPRECATED. Use --all instead',
flag_values=fv)
flags.DEFINE_boolean(
'jobs', False,
'Show jobs described by this identifier.',
short_name='j', flag_values=fv)
flags.DEFINE_integer(
'max_results', None,
'Maximum number to list.',
short_name='n', flag_values=fv)
flags.DEFINE_boolean(
'projects', False,
'Show all projects.',
short_name='p', flag_values=fv)
flags.DEFINE_boolean(
'datasets', False,
'Show datasets described by this identifier.',
short_name='d', flag_values=fv)
def RunWithArgs(self, identifier=''):
"""List the objects contained in the named collection.
List the objects in the named project or dataset. A trailing : or
. can be used to signify a project or dataset.
* With -j, show the jobs in the named project.
* With -p, show all projects.
Examples:
bq ls
bq ls -j proj
bq ls -p -n 1000
bq ls mydataset
bq ls -a
"""
# pylint: disable=g-doc-exception
if ValidateAtMostOneSelected(self.j, self.p, self.d):
raise app.UsageError('Cannot specify more than one of -j, -p, or -d.')
if self.j and self.p:
raise app.UsageError(
'Cannot specify more than one of -j and -p.')
if self.p and identifier:
raise app.UsageError('Cannot specify an identifier with -p')
# Copy deprecated flag specifying 'all' to current one.
if self.all_jobs is not None:
self.a = self.all_jobs
client = Client.Get()
formatter = _GetFormatterFromFlags()
if identifier:
reference = client.GetReference(identifier)
else:
try:
reference = client.GetReference(identifier)
except bigquery_client.BigqueryError:
# We want to let through the case of no identifier, which
# will fall through to the second case below.
reference = None
# If we got a TableReference, we might be able to make sense
# of it as a DatasetReference, as in 'ls foo' with dataset_id
# set.
if isinstance(reference, TableReference):
try:
reference = client.GetDatasetReference(identifier)
except bigquery_client.BigqueryError:
pass
_Typecheck(reference, (types.NoneType, ProjectReference, DatasetReference),
('Invalid identifier "%s" for ls, cannot call list on object '
'of type %s') % (identifier, type(reference).__name__))
if self.d and isinstance(reference, DatasetReference):
reference = reference.GetProjectReference()
page_token = None
if self.j:
reference = client.GetProjectReference(identifier)
_Typecheck(reference, ProjectReference,
'Cannot determine job(s) associated with "%s"' % (identifier,))
project_reference = client.GetProjectReference(identifier)
BigqueryClient.ConfigureFormatter(formatter, JobReference)
results = map( # pylint: disable=g-long-lambda
client.FormatJobInfo,
client.ListJobs(reference=project_reference,
max_results=self.max_results,
all_users=self.a, page_token=page_token))
elif self.p or reference is None:
BigqueryClient.ConfigureFormatter(formatter, ProjectReference)
results = map( # pylint: disable=g-long-lambda
client.FormatProjectInfo,
client.ListProjects(max_results=self.max_results, page_token=page_token))
elif isinstance(reference, ProjectReference):
BigqueryClient.ConfigureFormatter(formatter, DatasetReference)
results = map( # pylint: disable=g-long-lambda
client.FormatDatasetInfo,
client.ListDatasets(reference, max_results=self.max_results,
list_all=self.a, page_token=page_token))
else: # isinstance(reference, DatasetReference):
BigqueryClient.ConfigureFormatter(formatter, TableReference)
results = map( # pylint: disable=g-long-lambda
client.FormatTableInfo,
client.ListTables(reference, max_results=self.max_results,
page_token=page_token))
for result in results:
formatter.AddDict(result)
formatter.Print()
class _Delete(BigqueryCmd):
usage = """rm [-f] [-r] [(-d|-t)] <identifier>"""
def __init__(self, name, fv):
super(_Delete, self).__init__(name, fv)
flags.DEFINE_boolean(
'dataset', False,
'Remove dataset described by this identifier.',
short_name='d', flag_values=fv)
flags.DEFINE_boolean(
'table', False,
'Remove table described by this identifier.',
short_name='t', flag_values=fv)
flags.DEFINE_boolean(
'force', False,
"Ignore existing tables and datasets, don't prompt.",
short_name='f', flag_values=fv)
flags.DEFINE_boolean(
'recursive', False,
'Remove dataset and any tables it may contain.',
short_name='r', flag_values=fv)
def RunWithArgs(self, identifier):
"""Delete the dataset or table described by identifier.
Always requires an identifier, unlike the show and ls commands.
By default, also requires confirmation before deleting. Supports
the -d -t flags to signify that the identifier is a dataset
or table.
* With -f, don't ask for confirmation before deleting.
* With -r, remove all tables in the named dataset.
Examples:
bq rm ds.table
bq rm -r -f old_dataset
"""
client = Client.Get()
# pylint: disable=g-doc-exception
if self.d and self.t:
raise app.UsageError('Cannot specify more than one of -d and -t.')
if not identifier:
raise app.UsageError('Must provide an identifier for rm.')
if self.t:
reference = client.GetTableReference(identifier)
elif self.d:
reference = client.GetDatasetReference(identifier)
else:
reference = client.GetReference(identifier)
_Typecheck(reference, (DatasetReference, TableReference),
'Invalid identifier "%s" for rm.' % (identifier,))
if isinstance(reference, TableReference) and self.r:
raise app.UsageError(
'Cannot specify -r with %r' % (reference,))
if not self.force:
if ((isinstance(reference, DatasetReference) and
client.DatasetExists(reference)) or
(isinstance(reference, TableReference)
and client.TableExists(reference))):
if 'y' != _PromptYN('rm: remove %r? (y/N) ' % (reference,)):
print 'NOT deleting %r, exiting.' % (reference,)
return 0
if isinstance(reference, DatasetReference):
client.DeleteDataset(reference,
ignore_not_found=self.force,
delete_contents=self.recursive)
elif isinstance(reference, TableReference):
client.DeleteTable(reference,
ignore_not_found=self.force)
class _Copy(BigqueryCmd):
usage = """cp [-n] <source_table>[,<source_table>]* <dest_table>"""
def __init__(self, name, fv):
super(_Copy, self).__init__(name, fv)
flags.DEFINE_boolean(
'no_clobber', False,
'Do not overwrite an existing table.',
short_name='n', flag_values=fv)
flags.DEFINE_boolean(
'force', False,
"Ignore existing destination tables, don't prompt.",
short_name='f', flag_values=fv)
flags.DEFINE_boolean(
'append_table', False,
'Append to an existing table.',
short_name='a', flag_values=fv)
def RunWithArgs(self, source_tables, dest_table):
"""Copies one table to another.
Examples:
bq cp dataset.old_table dataset2.new_table
"""
client = Client.Get()
source_references = [
client.GetTableReference(src) for src in source_tables.split(',')]
source_references_str = ', '.join(str(src) for src in source_references)
dest_reference = client.GetTableReference(dest_table)
if self.append_table:
write_disposition = 'WRITE_APPEND'
ignore_already_exists = True
elif self.no_clobber:
write_disposition = 'WRITE_EMPTY'
ignore_already_exists = True
else:
write_disposition = 'WRITE_TRUNCATE'
ignore_already_exists = False
if not self.force:
if client.TableExists(dest_reference):
if 'y' != _PromptYN('cp: replace %s? (y/N) ' % (dest_reference,)):
print 'NOT copying %s, exiting.' % (source_references_str,)
return 0
kwds = {
'write_disposition': write_disposition,
'ignore_already_exists': ignore_already_exists,
'job_id': _GetJobIdFromFlags(),
}
job = client.CopyTable(source_references, dest_reference, **kwds)
if job is None:
print "Table '%s' already exists, skipping" % (dest_reference,)
elif not FLAGS.sync:
self.PrintJobStartInfo(job)
else:
print "Tables '%s' successfully copied to '%s'" % (
source_references_str, dest_reference)
class _Make(BigqueryCmd):
usage = """mk [-d] <identifier> OR mk [-t] <identifier> [<schema>]"""
def __init__(self, name, fv):
super(_Make, self).__init__(name, fv)
flags.DEFINE_boolean(
'force', False,
'Ignore errors reporting that the object already exists.',
short_name='f', flag_values=fv)
flags.DEFINE_boolean(
'dataset', False,
'Create dataset with this name.',
short_name='d', flag_values=fv)
flags.DEFINE_boolean(
'table', False,
'Create table with this name.',
short_name='t', flag_values=fv)
flags.DEFINE_string(
'schema', '',
'Either a filename or a comma-separated list of fields in the form '
'name[:type].',
flag_values=fv)
flags.DEFINE_string(
'description', None,
'Description of the dataset or table.',
flag_values=fv)
flags.DEFINE_string(
'data_location', None,
'Location of the data. Either US or EU. Requires that the project '
'has data location enabled',
flag_values=fv)
flags.DEFINE_integer(
'expiration', None,
'Expiration time, in seconds from now, of a table.',
flag_values=fv)
flags.DEFINE_integer(
'default_table_expiration', None,
'Default lifetime, in seconds, for newly-created tables in a '
'dataset. Newly-created tables will have an expiration time of '
'the current time plus this value.',
flag_values=fv)
flags.DEFINE_string(
'view', '',
'Create view with this SQL query.',
flag_values=fv)
def RunWithArgs(self, identifier='', schema=''):
# pylint: disable=g-doc-exception
"""Create a dataset, table or view with this name.
See 'bq help load' for more information on specifying the schema.
Examples:
bq mk new_dataset
bq mk new_dataset.new_table
bq --dataset_id=new_dataset mk table
bq mk -t new_dataset.newtable name:integer,value:string
bq mk --view='select 1 as num' new_dataset.newview
bq mk -d --data_location=EU new_dataset
"""
client = Client.Get()
if self.d and self.t:
raise app.UsageError('Cannot specify both -d and -t.')
if ValidateAtMostOneSelected(self.schema, self.view):
raise app.UsageError('Cannot specify more than one of'
' --schema or --view.')
if self.t:
reference = client.GetTableReference(identifier)
elif self.view:
reference = client.GetTableReference(identifier)
elif self.d or not identifier:
reference = client.GetDatasetReference(identifier)
else:
reference = client.GetReference(identifier)
_Typecheck(reference, (DatasetReference, TableReference),
"Invalid identifier '%s' for mk." % (identifier,))
if isinstance(reference, DatasetReference):
if self.schema:
raise app.UsageError('Cannot specify schema with a dataset.')
if self.expiration:
raise app.UsageError('Cannot specify an expiration for a dataset.')
if client.DatasetExists(reference):
message = "Dataset '%s' already exists." % (reference,)
if not self.f:
raise bigquery_client.BigqueryError(message)
else:
print message
return
default_table_exp_ms = None
if self.default_table_expiration is not None:
default_table_exp_ms = self.default_table_expiration * 1000
client.CreateDataset(reference, ignore_existing=True,
description=self.description,
default_table_expiration_ms=default_table_exp_ms,
data_location=self.data_location)
print "Dataset '%s' successfully created." % (reference,)
elif isinstance(reference, TableReference):
object_name = 'Table'
if self.view:
object_name = 'View'
if client.TableExists(reference):
message = ("%s '%s' could not be created; a table with this name "
"already exists.") % (object_name, reference,)
if not self.f:
raise bigquery_client.BigqueryError(message)
else:
print message
return
if schema:
schema = bigquery_client.BigqueryClient.ReadSchema(schema)
else:
schema = None
expiration = None
if self.data_location:
raise app.UsageError('Cannot specify data location for a table.')
if self.default_table_expiration:
raise app.UsageError('Cannot specify default expiration for a table.')
if self.expiration:
expiration = int(self.expiration + time.time()) * 1000
query_arg = self.view or None
client.CreateTable(reference, ignore_existing=True, schema=schema,
description=self.description,
expiration=expiration,
view_query=query_arg,
)
print "%s '%s' successfully created." % (object_name, reference,)
class _Update(BigqueryCmd):
usage = """update [-d] [-t] <identifier> [<schema>]"""
def __init__(self, name, fv):
super(_Update, self).__init__(name, fv)
flags.DEFINE_boolean(
'dataset', False,
'Updates a dataset with this name.',
short_name='d', flag_values=fv)
flags.DEFINE_boolean(
'table', False,
'Updates a table with this name.',
short_name='t', flag_values=fv)
flags.DEFINE_string(
'schema', '',
'Either a filename or a comma-separated list of fields in the form '
'name[:type].',
flag_values=fv)
flags.DEFINE_string(
'description', None,
'Description of the dataset, table or view.',
flag_values=fv)
flags.DEFINE_integer(
'expiration', None,
'Expiration time, in seconds from now, of a table or view. '
'Specifying 0 removes expiration time.',
flag_values=fv)
flags.DEFINE_integer(
'default_table_expiration', None,
'Default lifetime, in seconds, for newly-created tables in a '
'dataset. Newly-created tables will have an expiration time of '
'the current time plus this value. Specify "0" to remove existing '
'expiration.',
flag_values=fv)
flags.DEFINE_string(
'source', None,
'Path to file with JSON payload for an update',
flag_values=fv)
flags.DEFINE_string(
'view', '',
'SQL query of a view.',
flag_values=fv)
def RunWithArgs(self, identifier='', schema=''):
# pylint: disable=g-doc-exception
"""Updates a dataset, table or view with this name.
See 'bq help load' for more information on specifying the schema.
Examples:
bq update --description "Dataset description" existing_dataset
bq update --description "My table" existing_dataset.existing_table
bq update -t existing_dataset.existing_table name:integer,value:string
bq update --view='select 1 as num' existing_dataset.existing_view
"""
client = Client.Get()
if self.d and self.t:
raise app.UsageError('Cannot specify both -d and -t.')
if ValidateAtMostOneSelected(self.schema, self.view):
raise app.UsageError('Cannot specify more than one of'
' --schema or --view.')
if self.t:
reference = client.GetTableReference(identifier)
elif self.view:
reference = client.GetTableReference(identifier)
elif self.d or not identifier:
reference = client.GetDatasetReference(identifier)
else:
reference = client.GetReference(identifier)
_Typecheck(reference, (DatasetReference, TableReference),
"Invalid identifier '%s' for update." % (identifier,))
if isinstance(reference, DatasetReference):
if self.schema:
raise app.UsageError('Cannot specify schema with a dataset.')
if self.view:
raise app.UsageError('Cannot specify view with a dataset.')
if self.expiration:
raise app.UsageError('Cannot specify an expiration for a dataset.')
if self.source and self.description:
raise app.UsageError('Cannot specify description with a source.')
default_table_exp_ms = None
if self.default_table_expiration is not None:
default_table_exp_ms = self.default_table_expiration * 1000
_UpdateDataset(client, reference, self.description, self.source,
default_table_exp_ms)
print "Dataset '%s' successfully updated." % (reference,)
elif isinstance(reference, TableReference):
object_name = 'Table'
if self.view:
object_name = 'View'
if self.source:
raise app.UsageError('%s update does not support --source.' %
object_name)
if schema:
schema = bigquery_client.BigqueryClient.ReadSchema(schema)
else:
schema = None
expiration = None
if self.expiration is not None:
if self.expiration == 0:
expiration = 0
else:
expiration = int(self.expiration + time.time()) * 1000
if self.default_table_expiration:
raise app.UsageError('Cannot specify default expiration for a table.')
query_arg = self.view or None
client.UpdateTable(reference, schema=schema,
description=self.description,
expiration=expiration,
view_query=query_arg,
)
print "%s '%s' successfully updated." % (object_name, reference,)
def _UpdateDataset(client, reference, description, source,
default_table_expiration_ms):
"""Updates a dataset.
Reads JSON file if specified and loads updated values, before calling bigquery
dataset update.
Args:
client: the BigQuery client.
reference: the DatasetReference to update.
description: an optional dataset description.
source: an optional filename containing the JSON payload.
default_table_expiration_ms: optional number of milliseconds for the
default expiration duration for new tables created in this dataset.
Raises:
UsageError: when incorrect usage or invalid args are used.
"""
acl = None
if source is not None:
if not os.path.exists(source):
raise app.UsageError('Source file not found: %s' % (source,))
if not os.path.isfile(source):
raise app.UsageError('Source path is not a file: %s' % (source,))
with open(source) as f:
try:
payload = json.load(f)
if payload.__contains__('description'):
description = payload['description']
if payload.__contains__('access'):
acl = payload['access']
except ValueError as e:
raise app.UsageError('Error decoding JSON schema from file %s: %s'
% (source, e))
client.UpdateDataset(reference, description=description, acl=acl,
default_table_expiration_ms=default_table_expiration_ms)
class _Show(BigqueryCmd):
usage = """show [<identifier>]"""
def __init__(self, name, fv):
super(_Show, self).__init__(name, fv)
flags.DEFINE_boolean(
'job', False,
'If true, interpret this identifier as a job id.',
short_name='j', flag_values=fv)
flags.DEFINE_boolean(
'dataset', False,
'Show dataset with this name.',
short_name='d', flag_values=fv)
flags.DEFINE_boolean(
'view', False,
'Show view specific details instead of general table details.',
flag_values=fv)
def RunWithArgs(self, identifier=''):
"""Show all information about an object.
Examples:
bq show -j <job_id>
bq show dataset
bq show dataset.table
bq show [--view] dataset.view
"""
# pylint: disable=g-doc-exception
client = Client.Get()
custom_format = 'show'
if self.j:
reference = client.GetJobReference(identifier)
elif self.d:
reference = client.GetDatasetReference(identifier)
elif self.view:
reference = client.GetTableReference(identifier)
custom_format = 'view'
else:
reference = client.GetReference(identifier)
if reference is None:
raise app.UsageError('Must provide an identifier for show.')
object_info = client.GetObjectInfo(reference)
_PrintObjectInfo(object_info, reference, custom_format=custom_format)
def _PrintObjectInfo(object_info, reference, custom_format):
# The JSON formats are handled separately so that they don't print
# the record as a list of one record.
if FLAGS.format in ['prettyjson', 'json']:
_PrintFormattedJsonObject(object_info)
elif FLAGS.format in [None, 'sparse', 'pretty']:
formatter = _GetFormatterFromFlags()
BigqueryClient.ConfigureFormatter(formatter, type(reference),
print_format=custom_format,
object_info=object_info)
object_info = BigqueryClient.FormatInfoByKind(object_info)
formatter.AddDict(object_info)
print '%s %s\n' % (reference.typename.capitalize(), reference)
formatter.Print()
print
if (isinstance(reference, JobReference) and
object_info['State'] == 'FAILURE'):
error_result = object_info['status']['errorResult']
error_ls = object_info['status'].get('errors', [])
error = bigquery_client.BigqueryError.Create(
error_result, error_result, error_ls)
print 'Errors encountered during job execution. %s\n' % (error,)
else:
formatter = _GetFormatterFromFlags()
formatter.AddColumns(object_info.keys())
formatter.AddDict(object_info)
formatter.Print()
class _Cancel(BigqueryCmd):
"""Attempt to cancel the specified job if it is runnning."""
usage = """cancel [--nosync] [<job_id>]"""
def __init__(self, name, fv):
super(_Cancel, self).__init__(name, fv)
def RunWithArgs(self, job_id=''):
# pylint: disable=g-doc-exception
"""Request a cancel and waits for the job to be cancelled.
Requests a cancel and then either:
a) waits until the job is done if the sync flag is set [default], or
b) returns immediately if the sync flag is not set.
Not all job types support a cancel, an error is returned if it cannot be
cancelled. Even for jobs that support a cancel, success is not guaranteed,
the job may have completed by the time the cancel request is noticed, or
the job may be in a stage where it cannot be cancelled.
Examples:
bq cancel job_id # Requests a cancel and waits until the job is done.
bq --nosync cancel job_id # Requests a cancel and returns immediately.
Arguments:
job_id: Job ID to cancel.
"""
client = Client.Get()
job = client.CancelJob(job_id=job_id)
_PrintObjectInfo(job, JobReference.Create(**job['jobReference']),
custom_format='show')
status = job['status']
if status['state'] == 'DONE':
if ('errorResult' in status and
'reason' in status['errorResult'] and
status['errorResult']['reason'] == 'stopped'):
print 'Job has been cancelled successfully.'
else:
print 'Job completed before it could be cancelled.'
else:
print 'Job cancel has been requested.'
return 0
class _Head(BigqueryCmd):
usage = """head [-n <max rows>] [-j] [-t] <identifier>"""
def __init__(self, name, fv):
super(_Head, self).__init__(name, fv)
flags.DEFINE_boolean(
'job', False,
'Reads the results of a query job.',
short_name='j', flag_values=fv)
flags.DEFINE_boolean(
'table', False,
'Reads rows from a table.',
short_name='t', flag_values=fv)
flags.DEFINE_integer(
'start_row', 0,
'The number of rows to skip before showing table data.',
short_name='s', flag_values=fv)
flags.DEFINE_integer(
'max_rows', 100,
'The number of rows to print when showing table data.',
short_name='n', flag_values=fv)
def RunWithArgs(self, identifier=''):
# pylint: disable=g-doc-exception
"""Displays rows in a table.
Examples:
bq head dataset.table
bq head -j job
bq head -n 10 dataset.table
bq head -s 5 -n 10 dataset.table
"""
client = Client.Get()
if self.j and self.t:
raise app.UsageError('Cannot specify both -j and -t.')
if self.j:
reference = client.GetJobReference(identifier)
else:
reference = client.GetTableReference(identifier)
if isinstance(reference, JobReference):
fields, rows = client.ReadSchemaAndJobRows(dict(reference),
start_row=self.s,
max_rows=self.n)
elif isinstance(reference, TableReference):
fields, rows = client.ReadSchemaAndRows(dict(reference),
start_row=self.s,
max_rows=self.n)
else:
raise app.UsageError("Invalid identifier '%s' for head." % (identifier,))
Factory.ClientTablePrinter.GetTablePrinter().PrintTable(fields, rows)
class _Insert(BigqueryCmd):
usage = """insert [-s] [-i] <table identifier> [file]"""
def __init__(self, name, fv):
super(_Insert, self).__init__(name, fv)
flags.DEFINE_boolean(
'skip_invalid_rows', None,
'Attempt to insert any valid rows, even if invalid rows are present.',
short_name='s', flag_values=fv)
flags.DEFINE_boolean(
'ignore_unknown_values', None,
'Ignore any values in a row that are not present in the schema.',
short_name='i', flag_values=fv)
def RunWithArgs(self, identifier='', filename=None):
"""Inserts rows in a table.
Inserts the records formatted as newline delimited JSON from file
into the specified table. If file is not specified, reads from stdin.
If there were any insert errors it prints the errors to stdout.
Examples:
bq insert dataset.table /tmp/mydata.json
echo '{"a":1, "b":2}' | bq insert dataset.table
"""
if filename:
with open(filename, 'r') as json_file:
return self._DoInsert(identifier, json_file,
skip_invalid_rows=self.skip_invalid_rows,
ignore_unknown_values=self.ignore_unknown_values)
else:
return self._DoInsert(identifier, sys.stdin,
skip_invalid_rows=self.skip_invalid_rows,
ignore_unknown_values=self.ignore_unknown_values)
def _DoInsert(self, identifier, json_file, skip_invalid_rows=None,
ignore_unknown_values=None):
"""Insert the contents of the file into a table."""
client = Client.Get()
reference = client.GetReference(identifier)
_Typecheck(reference, (TableReference,),
'Must provide a table identifier for insert.')
reference = dict(reference)
batch = []
def Flush():
result = client.InsertTableRows(
reference, batch,
skip_invalid_rows=skip_invalid_rows,
ignore_unknown_values=ignore_unknown_values)
del batch[:]
return result, result.get('insertErrors', None)
result = {}
errors = None
lineno = 1
for line in json_file:
try:
batch.append(bigquery_client.JsonToInsertEntry(None, line))
lineno += 1
except bigquery_client.BigqueryClientError as e:
raise app.UsageError('Line %d: %s' % (lineno, str(e)))
if (FLAGS.max_rows_per_request and
len(batch) == FLAGS.max_rows_per_request):
result, errors = Flush()
if errors: break
if batch and errors is None:
result, errors = Flush()
if FLAGS.format in ['prettyjson', 'json']:
_PrintFormattedJsonObject(result)
elif FLAGS.format in [None, 'sparse', 'pretty']:
if errors:
for entry in result['insertErrors']:
entry_errors = entry['errors']
sys.stdout.write('record %d errors: ' % (entry['index'],))
for error in entry_errors:
print '\t%s: %s' % (error['reason'], error.get('message'))
return 1 if errors else 0
class _Wait(BigqueryCmd):
usage = """wait [<job_id>] [<secs>]"""
def __init__(self, name, fv):
super(_Wait, self).__init__(name, fv)
flags.DEFINE_boolean(
'fail_on_error', True,
'When done waiting for the job, exit the process with an error '
'if the job is still running, or ended with a failure.',
flag_values=fv)
def RunWithArgs(self, job_id='', secs=sys.maxint):
# pylint: disable=g-doc-exception
"""Wait some number of seconds for a job to finish.
Poll job_id until either (1) the job is DONE or (2) the
specified number of seconds have elapsed. Waits forever
if unspecified. If no job_id is specified, and there is
only one running job, we poll that job.
Examples:
bq wait # Waits forever for the currently running job.
bq wait job_id # Waits forever
bq wait job_id 100 # Waits 100 seconds
bq wait job_id 0 # Polls if a job is done, then returns immediately.
# These may exit with a non-zero status code to indicate "failure":
bq wait --fail_on_error job_id # Succeeds if job succeeds.
bq wait --fail_on_error job_id 100 # Succeeds if job succeeds in 100 sec.
Arguments:
job_id: Job ID to wait on.
secs: Number of seconds to wait (must be >= 0).
"""
try:
secs = BigqueryClient.NormalizeWait(secs)
except ValueError:
raise app.UsageError('Invalid wait time: %s' % (secs,))
client = Client.Get()
if not job_id:
running_jobs = client.ListJobRefs(state_filter=['PENDING', 'RUNNING'])
if len(running_jobs) != 1:
raise bigquery_client.BigqueryError(
'No job_id provided, found %d running jobs' % (len(running_jobs),))
job_reference = running_jobs.pop()
else:
job_reference = client.GetJobReference(job_id)
try:
job = client.WaitJob(job_reference=job_reference, wait=secs)
_PrintObjectInfo(job, JobReference.Create(**job['jobReference']),
custom_format='show')
return 1 if self.fail_on_error and BigqueryClient.IsFailedJob(job) else 0
except StopIteration as e:
print
print e
# If we reach this point, we have not seen the job succeed.
return 1 if self.fail_on_error else 0
# pylint: disable=g-bad-name
class CommandLoop(cmd.Cmd):
"""Instance of cmd.Cmd built to work with NewCmd."""
class TerminateSignal(Exception):
"""Exception type used for signaling loop completion."""
pass
def __init__(self, commands, prompt=None):
cmd.Cmd.__init__(self)
self._commands = {'help': commands['help']}
self._special_command_names = ['help', 'repl', 'EOF']
for name, command in commands.iteritems():
if (name not in self._special_command_names and
isinstance(command, NewCmd) and
command.surface_in_shell):
self._commands[name] = command
setattr(self, 'do_%s' % (name,), command.RunCmdLoop)
self._default_prompt = prompt or 'BigQuery> '
self._set_prompt()
self._last_return_code = 0
@property
def last_return_code(self):
return self._last_return_code
def _set_prompt(self):
client = Client().Get()
if client.project_id:
path = str(client.GetReference())
self.prompt = '%s> ' % (path,)
else:
self.prompt = self._default_prompt
def do_EOF(self, *unused_args):
"""Terminate the running command loop.
This function raises an exception to avoid the need to do
potentially-error-prone string parsing inside onecmd.
Returns:
Never returns.
Raises:
CommandLoop.TerminateSignal: always.
"""
raise CommandLoop.TerminateSignal()
def postloop(self):
print 'Goodbye.'
def completedefault(self, unused_text, line, unused_begidx, unused_endidx):
if not line:
return []
else:
command_name = line.partition(' ')[0].lower()
usage = ''
if command_name in self._commands:
usage = self._commands[command_name].usage
elif command_name == 'set':
usage = 'set (project_id|dataset_id) <name>'
elif command_name == 'unset':
usage = 'unset (project_id|dataset_id)'
if usage:
print
print usage
print '%s%s' % (self.prompt, line),
return []
def emptyline(self):
print 'Available commands:',
print ' '.join(list(self._commands))
def precmd(self, line):
"""Preprocess the shell input."""
if line == 'EOF':
return line
if line.startswith('exit') or line.startswith('quit'):
return 'EOF'
words = line.strip().split()
if len(words) > 1 and words[0].lower() == 'select':
return 'query %s' % (pipes.quote(line),)
if len(words) == 1 and words[0] not in ['help', 'ls', 'version']:
return 'help %s' % (line.strip(),)
return line
def onecmd(self, line):
"""Process a single command.
Runs a single command, and stores the return code in
self._last_return_code. Always returns False unless the command
was EOF.
Args:
line: (str) Command line to process.
Returns:
A bool signaling whether or not the command loop should terminate.
"""
try:
self._last_return_code = cmd.Cmd.onecmd(self, line)
except CommandLoop.TerminateSignal:
return True
except BaseException as e:
name = line.split(' ')[0]
BigqueryCmd.ProcessError(e, name=name)
self._last_return_code = 1
return False
def get_names(self):
names = dir(self)
commands = (name for name in self._commands
if name not in self._special_command_names)
names.extend('do_%s' % (name,) for name in commands)
names.append('do_select')
names.remove('do_EOF')
return names
def do_set(self, line):
"""Set the value of the project_id or dataset_id flag."""
client = Client().Get()
name, value = (line.split(' ') + ['', ''])[:2]
if (name not in ('project_id', 'dataset_id') or
not 1 <= len(line.split(' ')) <= 2):
print 'set (project_id|dataset_id) <name>'
elif name == 'dataset_id' and not client.project_id:
print 'Cannot set dataset_id with project_id unset'
else:
setattr(client, name, value)
self._set_prompt()
return 0
def do_unset(self, line):
"""Unset the value of the project_id or dataset_id flag."""
name = line.strip()
client = Client.Get()
if name not in ('project_id', 'dataset_id'):
print 'unset (project_id|dataset_id)'
else:
setattr(client, name, '')
if name == 'project_id':
client.dataset_id = ''
self._set_prompt()
return 0
def do_help(self, command_name):
"""Print the help for command_name (if present) or general help."""
# TODO(user): Add command-specific flags.
def FormatOneCmd(name, command, command_names):
indent_size = appcommands.GetMaxCommandLength() + 3
if len(command_names) > 1:
indent = ' ' * indent_size
command_help = flags.TextWrap(
command.CommandGetHelp('', cmd_names=command_names),
indent=indent,
firstline_indent='')
first_help_line, _, rest = command_help.partition('\n')
first_line = '%-*s%s' % (indent_size, name + ':', first_help_line)
return '\n'.join((first_line, rest))
else:
default_indent = ' '
return '\n' + flags.TextWrap(
command.CommandGetHelp('', cmd_names=command_names),
indent=default_indent,
firstline_indent=default_indent) + '\n'
if not command_name:
print '\nHelp for Bigquery commands:\n'
command_names = list(self._commands)
print '\n\n'.join(
FormatOneCmd(name, command, command_names)
for name, command in self._commands.iteritems()
if name not in self._special_command_names)
print
elif command_name in self._commands:
print FormatOneCmd(command_name, self._commands[command_name],
command_names=[command_name])
return 0
def postcmd(self, stop, line):
return bool(stop) or line == 'EOF'
# pylint: enable=g-bad-name
class _Repl(BigqueryCmd):
"""Start an interactive bq session."""
def __init__(self, name, fv):
super(_Repl, self).__init__(name, fv)
self.surface_in_shell = False
flags.DEFINE_string(
'prompt', '',
'Prompt to use for BigQuery shell.',
flag_values=fv)
def RunWithArgs(self):
"""Start an interactive bq session."""
repl = CommandLoop(appcommands.GetCommandList(), prompt=self.prompt)
print 'Welcome to BigQuery! (Type help for more information.)'
while True:
try:
repl.cmdloop()
break
except KeyboardInterrupt:
print
return repl.last_return_code
class _Init(BigqueryCmd):
"""Create a .bigqueryrc file and set up OAuth credentials."""
def __init__(self, name, fv):
super(_Init, self).__init__(name, fv)
self.surface_in_shell = False
flags.DEFINE_boolean(
'delete_credentials', None,
'If specified, the credentials file associated with this .bigqueryrc '
'file is deleted.',
flag_values=fv)
def _NeedsInit(self):
"""Init never needs to call itself before running."""
return False
def DeleteCredentials(self):
"""Deletes this user's credential file."""
_ProcessBigqueryrc()
filename = FLAGS.service_account_credential_file or FLAGS.credential_file
if not os.path.exists(filename):
print 'Credential file %s does not exist.' % (filename,)
return 0
try:
if 'y' != _PromptYN('Delete credential file %s? (y/N) ' % (filename,)):
print 'NOT deleting %s, exiting.' % (filename,)
return 0
os.remove(filename)
except OSError as e:
print 'Error removing %s: %s' % (filename, e)
return 1
def RunWithArgs(self):
"""Authenticate and create a default .bigqueryrc file."""
_ProcessBigqueryrc()
bigquery_client.ConfigurePythonLogger(FLAGS.apilog)
if self.delete_credentials:
return self.DeleteCredentials()
bigqueryrc = _GetBigqueryRcFilename()
# Delete the old one, if it exists.
print
print 'Welcome to BigQuery! This script will walk you through the '
print 'process of initializing your .bigqueryrc configuration file.'
print
if os.path.exists(bigqueryrc):
print ' **** NOTE! ****'
print 'An existing .bigqueryrc file was found at %s.' % (bigqueryrc,)
print 'Are you sure you want to continue and overwrite your existing '
print 'configuration?'
print
if 'y' != _PromptYN('Overwrite %s? (y/N) ' % (bigqueryrc,)):
print 'NOT overwriting %s, exiting.' % (bigqueryrc,)
return 0
print
try:
os.remove(bigqueryrc)
except OSError as e:
print 'Error removing %s: %s' % (bigqueryrc, e)
return 1
print 'First, we need to set up your credentials if they do not '
print 'already exist.'
print
client = Client.Get()
entries = {'credential_file': FLAGS.credential_file}
projects = client.ListProjects()
print 'Credential creation complete. Now we will select a default project.'
print
if not projects:
print 'No projects found for this user. Please go to '
print ' https://code.google.com/apis/console'
print 'and create a project.'
print
else:
print 'List of projects:'
formatter = _GetFormatterFromFlags()
formatter.AddColumn('#')
BigqueryClient.ConfigureFormatter(formatter, ProjectReference)
for index, project in enumerate(projects):
result = BigqueryClient.FormatProjectInfo(project)
result.update({'#': index + 1})
formatter.AddDict(result)
formatter.Print()
if len(projects) == 1:
project_reference = BigqueryClient.ConstructObjectReference(
projects[0])
print 'Found only one project, setting %s as the default.' % (
project_reference,)
print
entries['project_id'] = project_reference.projectId
else:
print 'Found multiple projects. Please enter a selection for '
print 'which should be the default, or leave blank to not '
print 'set a default.'
print
response = None
while not isinstance(response, int):
response = _PromptWithDefault(
'Enter a selection (1 - %s): ' % (len(projects),))
try:
if not response or 1 <= int(response) <= len(projects):
response = int(response or 0)
except ValueError:
pass
print
if response:
project_reference = BigqueryClient.ConstructObjectReference(
projects[response - 1])
entries['project_id'] = project_reference.projectId
try:
with open(bigqueryrc, 'w') as rcfile:
for flag, value in entries.iteritems():
print >>rcfile, '%s = %s' % (flag, value)
except IOError as e:
print 'Error writing %s: %s' % (bigqueryrc, e)
return 1
print 'BigQuery configuration complete! Type "bq" to get started.'
print
_ProcessBigqueryrc()
# Destroy the client we created, so that any new client will
# pick up new flag values.
Client.Delete()
return 0
class _Version(BigqueryCmd):
usage = """version"""
def _NeedsInit(self):
"""If just printing the version, don't run `init` first."""
return False
def RunWithArgs(self):
"""Return the version of bq."""
print 'This is BigQuery CLI %s' % (_VERSION_NUMBER,)
def main(unused_argv):
# Avoid using global flags in main(). In this command line:
# bq <global flags> <command> <global and local flags> <command args>,
# only "<global flags>" will parse before main, not "<global and local flags>"
try:
FLAGS.auth_local_webserver = False
_ValidateGlobalFlags()
bq_commands = {
# Keep the commands alphabetical.
'cancel': _Cancel,
'cp': _Copy,
'extract': _Extract,
'head': _Head,
'init': _Init,
'insert': _Insert,
'load': _Load,
'ls': _List,
'mk': _Make,
'query': _Query,
'rm': _Delete,
'shell': _Repl,
'show': _Show,
'update': _Update,
'version': _Version,
'wait': _Wait,
}
for command, function in bq_commands.iteritems():
if command not in appcommands.GetCommandList():
appcommands.AddCmd(command, function)
except KeyboardInterrupt as e:
print 'Control-C pressed, exiting.'
sys.exit(1)
except BaseException as e: # pylint: disable=broad-except
print 'Error initializing bq client: %s' % (e,)
# Use global flags if they're available, but we're exitting so we can't
# count on global flag parsing anyways.
if FLAGS.debug_mode or FLAGS.headless:
traceback.print_exc()
if not FLAGS.headless:
pdb.post_mortem()
sys.exit(1)
# pylint: disable=g-bad-name
def run_main():
"""Function to be used as setuptools script entry point.
Appcommands assumes that it always runs as __main__, but launching
via a setuptools-generated entry_point breaks this rule. We do some
trickery here to make sure that appcommands and flags find their
state where they expect to by faking ourselves as __main__.
"""
# Put the flags for this module somewhere the flags module will look
# for them.
# pylint: disable=protected-access
new_name = flags._GetMainModule()
sys.modules[new_name] = sys.modules['__main__']
for flag in FLAGS.FlagsByModuleDict().get(__name__, []):
FLAGS._RegisterFlagByModule(new_name, flag)
for key_flag in FLAGS.KeyFlagsByModuleDict().get(__name__, []):
FLAGS._RegisterKeyFlagForModule(new_name, key_flag)
# pylint: enable=protected-access
# Now set __main__ appropriately so that appcommands will be
# happy.
sys.modules['__main__'] = sys.modules[__name__]
appcommands.Run()
sys.modules['__main__'] = sys.modules.pop(new_name)
if __name__ == '__main__':
appcommands.Run()
|
wemanuel/smry
|
smry/server-auth/ls/google-cloud-sdk/platform/bq/bq.py
|
Python
|
apache-2.0
| 87,866
|
[
"VisIt"
] |
b06e9389785e978b2f1759a08e0c500faf7d128522cbde1ed24879d4c2259063
|
#!/usr/bin/env python
##
## @file convertSBML.py
## @brief Converts SBML documents between levels
## @author Michael Hucka
## @author Sarah Keating
## @author Ben Bornstein
##
## This file is part of libSBML. Please visit http://sbml.org for more
## information about SBML, and the latest version of libSBML.
##
import sys
import os.path
from libsbml import *
def main (args):
"""Usage: convertSBML input-filename output-filename
This program will attempt to convert a model either to
SBML Level 3 Version 1 (if the model is not already) or, if
the model is already expressed in Level 3 Version 1, this
program will attempt to convert the model to Level 1 Version 2.
"""
latestLevel = SBMLDocument.getDefaultLevel();
latestVersion = SBMLDocument.getDefaultVersion();
if len(args) != 3:
print(main.__doc__)
sys.exit(1)
inputFile = args[1];
outputFile = args[2];
document = readSBML(inputFile);
errors = document.getNumErrors();
if (errors > 0):
print("Encountered the following SBML errors:" + "\n");
document.printErrors();
print("Conversion skipped. Please correct the problems above first."
+ "\n");
return errors;
#
# If the given model is not already L2v4, assume that the user wants to
# convert it to the latest release of SBML (which is L2v4 currently).
# If the model is already L2v4, assume that the user wants to attempt to
# convert it down to Level 1 (specifically L1v2).
#
olevel = document.getLevel();
oversion = document.getVersion();
success = False;
if (olevel < latestLevel or oversion < latestVersion):
print ("Attempting to convert Level " + str(olevel) + " Version " + str(oversion)
+ " model to Level " + str(latestLevel)
+ " Version " + str(latestVersion) + "." + "\n");
success = document.setLevelAndVersion(latestLevel, latestVersion);
else:
print ("Attempting to convert Level " + str(olevel) + " Version " + str(oversion)
+ " model to Level 1 Version 2." + "\n");
success = document.setLevelAndVersion(1, 2);
errors = document.getNumErrors();
if (not success):
print("Unable to perform conversion due to the following:" + "\n");
document.printErrors();
print("\n");
print("Conversion skipped. Either libSBML does not (yet)" + "\n"
+ "have the ability to convert this model or (automatic)" + "\n"
+ "conversion is not possible in this case." + "\n");
return errors;
elif (errors > 0):
print("Information may have been lost in conversion; but a valid model ");
print("was produced by the conversion.\nThe following information ");
print("was provided:\n");
document.printErrors();
writeSBML(document, outputFile);
else:
print("Conversion completed." + "\n");
writeSBML(document, outputFile);
return 0;
if __name__ == '__main__':
main(sys.argv)
|
dilawar/moose-full
|
dependencies/libsbml-5.9.0/examples/python/convertSBML.py
|
Python
|
gpl-2.0
| 3,107
|
[
"VisIt"
] |
cfef6c3858e838f14f2d46e2da6a2ca1f82b5695c0ab3a7a69d44a26b6038a2c
|
'''
Working from Miguel Rocha's script: findGalaxyProps.py. Find the center of the galaxy
at the peak in the stellar number density. Generate galaxy properties.
'''
import sys
import os
import glob
import yt
import numpy as np
from numpy import *
import astropy
from astropy.cosmology import Planck13 as cosmo
#reload(yt)
def find_center(dd, ds, units = 'kpc', cen_pos = 10.e3, bin_width = 4.e3, del_pos = 20):
'''
find the center using the number density
all lengths are in kpc
returns ndarray of max_ndens_arr = ([cenx, ceny, cenz])
'''
units = 'kpc'
stars_pos_x = dd['stars', 'particle_position_x'].in_units(units)
stars_pos_y = dd['stars', 'particle_position_y'].in_units(units)
stars_pos_z = dd['stars', 'particle_position_z'].in_units(units)
star_pos = [stars_pos_x.value, stars_pos_y.value, stars_pos_z.value]
min_pos = cen_pos - bin_width
max_pos = cen_pos + bin_width
bins = [arange(min_pos,max_pos,del_pos), arange(min_pos,max_pos,del_pos), arange(min_pos,max_pos,del_pos)]
H, edges = histogramdd(star_pos, bins = bins)
max_ndens_index = unravel_index(H.argmax(), H.shape)
max_ndens_loc = array([(edges[0][max_ndens_index[0]] + edges[0][max_ndens_index[0]+1])/2.,
(edges[1][max_ndens_index[1]] + edges[1][max_ndens_index[1]+1])/2.,
(edges[2][max_ndens_index[2]] + edges[2][max_ndens_index[2]+1])/2.])
max_ndens_arr = ds.arr([max_ndens_loc[0], max_ndens_loc[1], max_ndens_loc[2]], units)
#end of First pass
print('\tDone with coarse pass searching for center, moving to fine pass')
bin_width = 40
del_pos = 0.5
min_pos_x = float(max_ndens_arr[0]) - bin_width
max_pos_x = float(max_ndens_arr[0]) + bin_width
min_pos_y = float(max_ndens_arr[1]) - bin_width
max_pos_y = float(max_ndens_arr[1]) + bin_width
min_pos_z = float(max_ndens_arr[2]) - bin_width
max_pos_z = float(max_ndens_arr[2]) + bin_width
bins = [arange(min_pos_x,max_pos_x,del_pos), arange(min_pos_y,max_pos_y,del_pos), arange(min_pos_z,max_pos_z,del_pos)]
H, edges = histogramdd(star_pos, bins = bins)
max_ndens_index = unravel_index(H.argmax(), H.shape)
max_ndens_loc = array([(edges[0][max_ndens_index[0]] + edges[0][max_ndens_index[0]+1])/2.,
(edges[1][max_ndens_index[1]] + edges[1][max_ndens_index[1]+1])/2.,
(edges[2][max_ndens_index[2]] + edges[2][max_ndens_index[2]+1])/2.])
max_ndens_arr = ds.arr([max_ndens_loc[0], max_ndens_loc[1], max_ndens_loc[2]], units)
return max_ndens_arr
def find_rvirial(dd, ds, center, start_rad = 0, delta_rad_coarse = 20, delta_rad_fine = 1, rad_units = 'kpc'):
vir_check = 0
r0 = ds.arr(start_rad, rad_units)
critical_density = cosmo.critical_density(ds.current_redshift).value #is in g/cm^3
max_ndens_arr=center
while True:
r0_prev = r0
r0 = r0_prev + ds.arr(delta_rad_coarse, rad_units)
v_sphere = ds.sphere(max_ndens_arr, r0)
dark_mass = v_sphere[('darkmatter', 'particle_mass')].in_units('Msun').sum()
rho_internal = dark_mass.in_units('g')/((r0.in_units('cm'))**3.*(pi*4/3.))
if rho_internal < 200*ds.arr(critical_density,'g')/ds.arr(1.,'cm')**3.:
#now run fine test
print('\tNow running fine search on the virial radius')
r0 = r0_prev
while True:
r0 += ds.arr(delta_rad_fine, rad_units)
v_sphere = ds.sphere(max_ndens_arr, r0)
dark_mass = v_sphere[('darkmatter', 'particle_mass')].in_units('Msun').sum()
rho_internal = dark_mass.in_units('g')/((r0.in_units('cm'))**3.*(pi*4/3.))
if rho_internal < 200*ds.arr(critical_density,'g')/ds.arr(1.,'cm')**3.:
rvir = r0
return rvir
def find_hist_center(positions, masses):
'''
Find the center of a particle distribution by interactively refining
a mass weighted histogram
'''
pos = np.array(positions)
masses = np.array(masses)
if len(pos) == 0:
return None
mass_current = masses
old_center = np.array([0,0,0])
refined_pos = pos.copy()
refined_mas = mass_current.copy()
refined_dist = 1e20
nbins=3
center = None
dist = lambda x,y:np.sqrt(np.sum((x-y)**2.0))
dist2 = lambda x,y:np.sqrt(np.sum((x-y)**2.0,axis=1))
j=0
while len(refined_pos)>1e1 or j==0:
table,bins=np.histogramdd(refined_pos, bins=nbins, weights=refined_mas)
bin_size = min((np.max(bins,axis=1)-np.min(bins,axis=1))/nbins)
centeridx = np.where(table==table.max())
le = np.array([bins[0][centeridx[0][0]],
bins[1][centeridx[1][0]],
bins[2][centeridx[2][0]]])
re = np.array([bins[0][centeridx[0][0]+1],
bins[1][centeridx[1][0]+1],
bins[2][centeridx[2][0]+1]])
center = 0.5*(le+re)
refined_dist = dist(old_center,center)
old_center = center.copy()
idx = dist2(refined_pos,center)<bin_size
refined_pos = refined_pos[idx]
refined_mas = refined_mas[idx]
j+=1
return center
def find_shapes(center, pos, ds, nrad=10, rmax=None):
'''
Find the shape of the given particle distribution at nrad different
radii, spanning from 0.1*rmax to rmax.
rmax = max(r(pos)) if not given.
'''
print('Starting shape calculation')
units = center.units
center = center.value
try:
pos = np.array([pos[:,0] - center[0],
pos[:,1] - center[1],
pos[:,2] - center[2]]).transpose()
pos = ds.arr(pos, units)
pos = pos.in_units(units).value
r = np.sqrt(pos[:,0]**2 + pos[:,1]**2 + pos[:,2]**2)
except IndexError: # no stars found
pos = np.array([])
if len(pos) > 1:
if not rmax: rmax = r.max()
radii = np.linspace(0.1*rmax, rmax, nrad)
else:
radii = np.array([])
c_to_a = np.empty(radii.size)
b_to_a = np.empty(radii.size)
axes = []
for i,r in enumerate(radii):
# get shapes
try:
axis_out = axis_ratios(pos, r, axes_out=True, fix_volume = False)
c_to_a[i] = axis_out[0][0]
b_to_a[i] = axis_out[0][1]
axes.append(axis_out[1])
except UnboundLocalError:
print( 'Not enough particles to find shapes at r = %g in snapshot %s'%(r, ds.parameter_filename ))
b_to_a[i] = c_to_a[i] = None
axes.append([])
return radii, c_to_a, b_to_a, axes
def L_crossing(x, y, z, vx, vy, vz, weight, center):
x, y, z = x-center[0], y-center[1],z-center[2]
cx, cy, cz = y*vz - z*vy, z*vx - x*vz, x*vy - y*vx
lx, ly, lz = [np.sum(l * weight) for l in [cx, cy, cz]]
L = np.array([lx, ly, lz])
L /= np.sqrt(np.sum(L*L))
return L
def find_galaxyprops(galaxy_props, ds, hc_sphere, max_ndens_arr):
print( 'Determining stellar and gas mass...')
# Get total stellar mass
stars_mass = hc_sphere[('stars', 'particle_mass')].in_units('Msun')
stars_total_mass = stars_mass.sum().value[()]
galaxy_props['stars_total_mass'] = np.append(galaxy_props['stars_total_mass'], stars_total_mass)
# Get total mass of gas
gas_mass = hc_sphere[('gas', 'cell_mass')].in_units('Msun')
gas_total_mass = gas_mass.sum().value[()]
galaxy_props['gas_total_mass'] = np.append(galaxy_props['gas_total_mass'],
gas_total_mass)
print( '\tlog Mgas/Msun = ', log10(gas_total_mass))
print( '\tlog M*/Msun = ', log10(stars_total_mass))
print( 'Determining location of max stellar density...')
# Get max density of stars (value, location)
stars_maxdens = hc_sphere.quantities.max_location(('deposit', 'stars_cic'))
stars_maxdens_val = stars_maxdens[0].in_units('Msun/kpc**3').value[()]
print( stars_maxdens)
#difference bt yt-3.2.3 and yt-3.3dev: stars_maxdens has different # elements; this works for both
stars_maxdens_loc = np.array([stars_maxdens[-3].in_units('kpc').value[()],
stars_maxdens[-2].in_units('kpc').value[()],
stars_maxdens[-1].in_units('kpc').value[()]])
galaxy_props['stars_maxdens'].append((stars_maxdens_val, stars_maxdens_loc))
print( '\t Max Stellar Density = ', stars_maxdens_loc)
print( 'Determining location of max gas density...')
# Get max density of gas
gas_maxdens = hc_sphere.quantities.max_location(('gas', 'density'))
gas_maxdens_val = gas_maxdens[0].in_units('Msun/kpc**3').value[()]
gas_maxdens_loc = np.array([gas_maxdens[-3].in_units('kpc').value[()],
gas_maxdens[-2].in_units('kpc').value[()],
gas_maxdens[-1].in_units('kpc').value[()]])
galaxy_props['gas_maxdens'].append((gas_maxdens_val, gas_maxdens_loc))
print( '\t Max Gas Density = ', stars_maxdens_loc)
print( 'Determining refined histogram center of stars...')
#---Need to Check these--#
# Get refined histogram center of stars
stars_pos_x = hc_sphere[('stars', 'particle_position_x')].in_units('kpc')
stars_pos_y = hc_sphere[('stars', 'particle_position_y')].in_units('kpc')
stars_pos_z = hc_sphere[('stars', 'particle_position_z')].in_units('kpc')
stars_pos = np.array([stars_pos_x, stars_pos_y, stars_pos_z]).transpose()
stars_hist_center = find_hist_center(stars_pos, stars_mass)
galaxy_props['stars_hist_center'].append(stars_hist_center)
print( '\t Refined histogram center of stars = ', stars_hist_center)
print( 'Computing stellar density profile...')
# Get stellar density profile
sc_sphere_r = 0.1
ssphere_r = sc_sphere_r*hc_sphere.radius
while ssphere_r < ds.index.get_smallest_dx():
ssphere_r = 2.0*ssphere_r
sc_sphere = ds.sphere(max_ndens_arr, ssphere_r)
try:
p_plot = yt.ProfilePlot(sc_sphere, 'radius', 'stars_mass', n_bins=50, weight_field=None, accumulation=True)
p_plot.set_unit('radius', 'kpc')
p_plot.set_unit('stars_mass', 'Msun')
p = p_plot.profiles[0]
radii, smass = p.x.value, p['stars_mass'].value
rhalf = radii[smass >= 0.5*smass.max()][0]
except (IndexError, ValueError): # not enough stars found
radii, smass = None, None
rhalf = None
galaxy_props['stars_rhalf'] = np.append(galaxy_props['stars_rhalf'], rhalf)
galaxy_props['stars_mass_profile'].append((radii, smass))
print( '\tStars half-light radius = ', rhalf)
print( 'Determining center of mass within 15 kpc of the galaxy...')
# Get center of mass of stars
gal_sphere = ds.sphere(max_ndens_arr, (15, 'kpc'))
stars_pos_x = gal_sphere[('stars', 'particle_position_x')].in_units('kpc')
stars_pos_y = gal_sphere[('stars', 'particle_position_y')].in_units('kpc')
stars_pos_z = gal_sphere[('stars', 'particle_position_z')].in_units('kpc')
gal_stars_mass = gal_sphere[('stars', 'particle_mass')].in_units('Msun')
gal_total_mass = gal_stars_mass.sum().value[()]
stars_com = np.array([np.dot(stars_pos_x, gal_stars_mass)/gal_total_mass,
np.dot(stars_pos_y, gal_stars_mass)/gal_total_mass,
np.dot(stars_pos_z, gal_stars_mass)/gal_total_mass])
galaxy_props['stars_com'].append(stars_com)
print( '\tCenter of mass = ', stars_com)
print( 'Setting stars center...')
# Define center of stars
center = 'maxndens'
if center == 'max_dens':
stars_center = stars_maxdens_loc
elif center == 'com':
stars_center = stars_com
elif center == 'maxndens':
stars_center = max_ndens_arr
else:
stars_center = stars_hist_center
stars_center = ds.arr(stars_center, 'kpc')
galaxy_props['stars_center'].append(stars_hist_center)
print( '\tStars Center = ', stars_center)
# Get angular momentum of stars
try:
x, y, z = [sc_sphere[('stars', 'particle_position_%s'%s)] for s in 'xyz']
vx, vy, vz = [sc_sphere[('stars', 'particle_velocity_%s'%s)] for s in 'xyz']
mass = sc_sphere[('stars', 'particle_mass')]
try:
metals = sc_sphere[('stars', 'particle_metallicity1')]
stars_L = L_crossing(x, y, z, vx, vy, vz, mass*metals, sc_sphere.center)
except:
stars_L = L_crossing(x, y, z, vx, vy, vz, mass, sc_sphere.center)
except IndexError: # no stars found
stars_L = [None, None, None]
print("No stars exception")
galaxy_props['stars_L'].append(stars_L)
del(sc_sphere)
# Get angular momentum of gas
gas_center = ds.arr(gas_maxdens_loc, 'kpc')
gc_sphere = ds.sphere(gas_center, ssphere_r)
x, y, z = [gc_sphere[('gas', '%s'%s)] for s in 'xyz']
cell_volume = gc_sphere[('gas', 'cell_volume')]
try:
#for VELA runs
vx, vy, vz = [gc_sphere[('gas', 'momentum_%s'%s)] for s in 'xyz'] # momentum density
metals = gc_sphere[('gas', 'metal_ia_density')] + gc_sphere[('gas', 'metal_ii_density')]
gas_L = L_crossing(x, y, z, vx, vy, vz, metals*cell_volume**2, gc_sphere.center)
except:
#for enzo runs
density=gc_sphere[('gas', 'density')]
vx, vy, vz = [gc_sphere[('gas', 'velocity_%s'%s)] for s in 'xyz']
metals=gc_sphere[('gas', 'metal_density')]
gas_L = L_crossing(x, y, z, density*vx, density*vy, density*vz, metals*cell_volume**2, gc_sphere.center)
galaxy_props['gas_L'].append(gas_L)
del(gc_sphere)
return galaxy_props
if __name__ == "__main__":
#Should read these in from an initialization file
#gen_name, gal_name, snap_name, snaps = 'VELA_v2', 'VELA27', 'VELA27_a0.370', '../data/VELA27_v2/a0.370/10MpcBox_csf512_a0.370.d'
#snap_dir = '/Volumes/wd/yt_pipeline/Runs/%s/%s/%s'%(gen_name, gal_name, snap_name+'_sunrise')
#if not os.path.isdir(snap_dir):
# os.system('mkdir '+'/Volumes/wd/yt_pipeline/Runs/%s/%s'%(gen_name, gal_name))
# os.system('mkdir '+snap_dir)
# os.system('mkdir '+snap_dir+'/input')
#assert os.path.exists(snap_dir), 'Snapshot directory %s not found'%snap_dir
if len(sys.argv)==2:
snaps = np.asarray([sys.argv[1]])
else:
snaps = np.sort(np.asarray(glob.glob("*.d")))
print( "Calculating Galaxy Props for: ", snaps)
abssnap = os.path.abspath(snaps[0])
assert os.path.lexists(abssnap)
dirname = os.path.dirname(abssnap)
simname = os.path.basename(dirname) #assumes directory name for simulation name
print( "Simulation name: ", simname)
particle_headers = []
particle_data = []
stars_data = []
new_snapfiles = []
for sn in snaps:
aname = sn.split('_')[-1].rstrip('.d')
particle_headers.append('PMcrd'+aname+'.DAT')
particle_data.append('PMcrs0'+aname+'.DAT')
stars_data.append('stars_'+aname+'.dat')
snap_dir = os.path.join(simname+'_'+aname+'_sunrise')
yt_fig_dir = snap_dir+'/yt_projections'
print( "Sunrise directory: ", snap_dir)
if not os.path.lexists(snap_dir):
os.mkdir(snap_dir)
if not os.path.lexists(yt_fig_dir):
os.mkdir(yt_fig_dir)
newf = os.path.join(snap_dir,sn)
new_snapfiles.append(newf)
if not os.path.lexists(newf):
os.symlink(os.path.abspath(sn),newf)
os.symlink(os.path.abspath(particle_headers[-1]),os.path.join(snap_dir,particle_headers[-1]))
os.symlink(os.path.abspath(particle_data[-1]),os.path.join(snap_dir,particle_data[-1]))
os.symlink(os.path.abspath(stars_data[-1]),os.path.join(snap_dir,stars_data[-1]))
new_snapfiles = np.asarray(new_snapfiles)
galaxy_props = {}
fields = ['scale', 'stars_total_mass', 'stars_com', 'stars_maxdens', 'stars_maxndens', 'stars_hist_center',
'stars_rhalf', 'stars_mass_profile', 'stars_L',
'gas_total_mass', 'gas_maxdens', 'gas_L', 'rvir', 'Mvir_dm', 'stars_center','snap_files']
for field in fields:
if field in ['scale', 'stars_total_mass', 'stars_rhalf', 'gas_total_mass' ]:
galaxy_props[field] = np.array([])
else :
galaxy_props[field] = []
ts = yt.DatasetSeries(new_snapfiles)
for ds,snap_dir in zip(reversed(ts),np.flipud(new_snapfiles)):
print( "Getting galaxy props: ", ds._file_amr, snap_dir)
dd = ds.all_data()
ds.domain_right_edge = ds.arr(ds.domain_right_edge,'code_length')
ds.domain_left_edge = ds.arr(ds.domain_left_edge,'code_length')
print( ds.index.get_smallest_dx())
#need to exit gracefully here if there's no stars.
try:
stars_pos_x = dd['stars', 'particle_position_x'].in_units('kpc')
assert stars_pos_x.shape[0] > 5
except AttributeError:
print( "No star particles found, skipping: ", ds._file_amr)
continue
except AssertionError:
print( "No star particles found, skipping: ", ds._file_amr)
continue
scale = round(1.0/(ds.current_redshift+1.0),3)
galaxy_props['scale'] = np.append(galaxy_props['scale'], scale)
galaxy_props['snap_files'] = np.append(galaxy_props['snap_files'],ds._file_amr)
print( 'Determining center...')
max_ndens_arr = find_center(dd, ds, cen_pos = ds.domain_center.in_units('kpc')[0].value[()], units = 'kpc')
print( '\tCenter = ', max_ndens_arr)
#Generate Sphere Selection
print( 'Determining virial radius...')
rvir = find_rvirial(dd, ds, max_ndens_arr)
print( '\tRvir = ', rvir)
hc_sphere = ds.sphere(max_ndens_arr, rvir)
galaxy_props['stars_maxndens'].append(max_ndens_arr.value)
galaxy_props['rvir'] = np.append(galaxy_props['rvir'], rvir.value[()])
galaxy_props['Mvir_dm'] = np.append(galaxy_props['Mvir_dm'], hc_sphere[('darkmatter', 'particle_mass')].in_units('Msun').sum().value[()])
#Find Galaxy Properties
galaxy_props = find_galaxyprops(galaxy_props, ds, hc_sphere, max_ndens_arr)
#Making Figures
#if False:
# yt.ProjectionPlot(ds, 'z', ('gas', 'density'), center=([10,10,10],'Mpc'), width = (25.,'Mpc')).save('test.png')
# p = yt.ProjectionPlot(ds, 'z', ('gas', 'density'), center=(max_ndens_arr), width = (8.,'kpc'))
# p.save('projection_z.png')
# yt.ProjectionPlot(ds, 'z', ('gas', 'density'), center=(max_ndens_arr), width = (40.,'kpc')).save('testproj_2nd_pass_3_z.png')
# yt.ProjectionPlot(ds, 'z', ('gas', 'density'), center=(max_ndens_arr), width = (30, 'kpc')).save(yt_fig_dir+'/max_ndens_cen_30kpc.png')
# yt.ProjectionPlot(ds, 'z', ('gas', 'density'), center=(max_ndens_arr), width = (1, 'Mpc')).save(yt_fig_dir+'/max_ndens_cen_1Mpc.png')
# yt.ProjectionPlot(ds, 'z', ('gas', 'density'), center=(galaxy_props['stars_com'][0],'kpc'), width = (10, 'kpc')).save('max_ndens_arr.png')
# L = ds.arr([0,1./sqrt(2),1./sqrt(2)], 'kpc')
# yt.OffAxisProjectionPlot(ds, L, ('gas', 'density'), center=(max_ndens_arr), width = (10, 'kpc')).save('off_axis_projection.png')
# t0 = time.time()
# yt.OffAxisProjectionPlot(ds, L, ('gas', 'density'), center=(max_ndens_arr), depth = (1, "Mpc"), width = (25, "kpc")).save('off_axis_projection_2.png')
# t1 = time.time()
# print 'Took %.2f minutes'%((t1-t0)/60.)
del (hc_sphere)
sys.stdout.flush()
# Save galaxy props file
galaxy_props_file = simname+'_galprops.npy'
print( '\nSuccessfully computed galaxy properties')
print( 'Saving galaxy properties to ', galaxy_props_file)
np.save(galaxy_props_file, galaxy_props)
|
gsnyder206/vela-yt-sunrise
|
findGalaxyProps.py
|
Python
|
gpl-3.0
| 21,206
|
[
"Galaxy"
] |
73e9379cf4ab8fe135bacfeabb279e676307898639f13b345af20cfd9eedfcb9
|
"""
End-to-end test for cohorted courseware. This uses both Studio and LMS.
"""
from nose.plugins.attrib import attr
import json
from studio.base_studio_test import ContainerBase
from ..pages.studio.settings_group_configurations import GroupConfigurationsPage
from ..pages.studio.settings_advanced import AdvancedSettingsPage
from ..pages.studio.auto_auth import AutoAuthPage as StudioAutoAuthPage
from ..fixtures.course import XBlockFixtureDesc
from ..pages.studio.component_editor import ComponentVisibilityEditorView
from ..pages.lms.instructor_dashboard import InstructorDashboardPage
from ..pages.lms.course_nav import CourseNavPage
from ..pages.lms.courseware import CoursewarePage
from ..pages.lms.auto_auth import AutoAuthPage as LmsAutoAuthPage
from ..tests.lms.test_lms_user_preview import verify_expected_problem_visibility
from bok_choy.promise import EmptyPromise
@attr('shard_1')
class EndToEndCohortedCoursewareTest(ContainerBase):
def setUp(self, is_staff=True):
super(EndToEndCohortedCoursewareTest, self).setUp(is_staff=is_staff)
self.staff_user = self.user
self.content_group_a = "Content Group A"
self.content_group_b = "Content Group B"
# Create a student who will be in "Cohort A"
self.cohort_a_student_username = "cohort_a_student"
self.cohort_a_student_email = "cohort_a_student@example.com"
StudioAutoAuthPage(
self.browser, username=self.cohort_a_student_username, email=self.cohort_a_student_email, no_login=True
).visit()
# Create a student who will be in "Cohort B"
self.cohort_b_student_username = "cohort_b_student"
self.cohort_b_student_email = "cohort_b_student@example.com"
StudioAutoAuthPage(
self.browser, username=self.cohort_b_student_username, email=self.cohort_b_student_email, no_login=True
).visit()
# Create a student who will end up in the default cohort group
self.cohort_default_student_username = "cohort default student"
self.cohort_default_student_email = "cohort_default_student@example.com"
StudioAutoAuthPage(
self.browser, username=self.cohort_default_student_username,
email=self.cohort_default_student_email, no_login=True
).visit()
# Start logged in as the staff user.
StudioAutoAuthPage(
self.browser, username=self.staff_user["username"], email=self.staff_user["email"]
).visit()
def populate_course_fixture(self, course_fixture):
"""
Populate the children of the test course fixture.
"""
self.group_a_problem = 'GROUP A CONTENT'
self.group_b_problem = 'GROUP B CONTENT'
self.group_a_and_b_problem = 'GROUP A AND B CONTENT'
self.visible_to_all_problem = 'VISIBLE TO ALL CONTENT'
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('problem', self.group_a_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.group_b_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.group_a_and_b_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.visible_to_all_problem, data='<problem></problem>')
)
)
)
)
def enable_cohorts_in_course(self):
"""
This turns on cohorts for the course. Currently this is still done through Advanced
Settings. Eventually it will be done in the LMS Instructor Dashboard.
"""
advanced_settings = AdvancedSettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
advanced_settings.visit()
cohort_config = '{"cohorted": true}'
advanced_settings.set('Cohort Configuration', cohort_config)
advanced_settings.refresh_and_wait_for_load()
self.assertEquals(
json.loads(cohort_config),
json.loads(advanced_settings.get('Cohort Configuration')),
'Wrong input for Cohort Configuration'
)
def create_content_groups(self):
"""
Creates two content groups in Studio Group Configurations Settings.
"""
group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
group_configurations_page.visit()
group_configurations_page.create_first_content_group()
config = group_configurations_page.content_groups[0]
config.name = self.content_group_a
config.save()
group_configurations_page.add_content_group()
config = group_configurations_page.content_groups[1]
config.name = self.content_group_b
config.save()
def link_problems_to_content_groups_and_publish(self):
"""
Updates 3 of the 4 existing problems to limit their visibility by content group.
Publishes the modified units.
"""
container_page = self.go_to_unit_page()
def set_visibility(problem_index, content_group, second_content_group=None):
problem = container_page.xblocks[problem_index]
problem.edit_visibility()
if second_content_group:
ComponentVisibilityEditorView(self.browser, problem.locator).select_option(
second_content_group, save=False
)
ComponentVisibilityEditorView(self.browser, problem.locator).select_option(content_group)
set_visibility(1, self.content_group_a)
set_visibility(2, self.content_group_b)
set_visibility(3, self.content_group_a, self.content_group_b)
container_page.publish_action.click()
def create_cohorts_and_assign_students(self):
"""
Adds 2 manual cohorts, linked to content groups, to the course.
Each cohort is assigned one student.
"""
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
instructor_dashboard_page.visit()
membership_page = instructor_dashboard_page.select_membership()
cohort_management_page = membership_page.select_cohort_management_section()
def add_cohort_with_student(cohort_name, content_group, student):
cohort_management_page.add_cohort(cohort_name, content_group=content_group)
# After adding the cohort, it should automatically be selected
EmptyPromise(
lambda: cohort_name == cohort_management_page.get_selected_cohort(), "Waiting for new cohort"
).fulfill()
cohort_management_page.add_students_to_selected_cohort([student])
add_cohort_with_student("Cohort A", self.content_group_a, self.cohort_a_student_username)
add_cohort_with_student("Cohort B", self.content_group_b, self.cohort_b_student_username)
def view_cohorted_content_as_different_users(self):
"""
View content as staff, student in Cohort A, student in Cohort B, and student in Default Cohort.
"""
courseware_page = CoursewarePage(self.browser, self.course_id)
def login_and_verify_visible_problems(username, email, expected_problems):
LmsAutoAuthPage(
self.browser, username=username, email=email, course_id=self.course_id
).visit()
courseware_page.visit()
verify_expected_problem_visibility(self, courseware_page, expected_problems)
login_and_verify_visible_problems(
self.staff_user["username"], self.staff_user["email"],
[self.group_a_problem, self.group_b_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_a_student_username, self.cohort_a_student_email,
[self.group_a_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_b_student_username, self.cohort_b_student_email,
[self.group_b_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_default_student_username, self.cohort_default_student_email,
[self.visible_to_all_problem]
)
def test_cohorted_courseware(self):
"""
Scenario: Can create content that is only visible to students in particular cohorts
Given that I have course with 4 problems, 1 staff member, and 3 students
When I enable cohorts in the course
And I create two content groups, Content Group A, and Content Group B, in the course
And I link one problem to Content Group A
And I link one problem to Content Group B
And I link one problem to both Content Group A and Content Group B
And one problem remains unlinked to any Content Group
And I create two manual cohorts, Cohort A and Cohort B,
linked to Content Group A and Content Group B, respectively
And I assign one student to each manual cohort
And one student remains in the default cohort
Then the staff member can see all 4 problems
And the student in Cohort A can see all the problems except the one linked to Content Group B
And the student in Cohort B can see all the problems except the one linked to Content Group A
And the student in the default cohort can ony see the problem that is unlinked to any Content Group
"""
self.enable_cohorts_in_course()
self.create_content_groups()
self.link_problems_to_content_groups_and_publish()
self.create_cohorts_and_assign_students()
self.view_cohorted_content_as_different_users()
|
jazkarta/edx-platform-for-isc
|
common/test/acceptance/tests/test_cohorted_courseware.py
|
Python
|
agpl-3.0
| 10,267
|
[
"VisIt"
] |
79061a4aae968281f0146421a3c6feb6a13454043fdc44b61ee0396ae6b5619d
|
#!/usr/bin/env python
import sys
try:
import rdkit
print "import rdkit available"
from rdkit import Chem
print "import Chem from rdkit available"
sys.exit(0)
except ImportError, e:
print "ERROR: import rdkit NOT available"
sys.exit(1)
|
MMunibas/FittingWizard
|
scripts/check_rdkit_dependency.py
|
Python
|
bsd-3-clause
| 243
|
[
"RDKit"
] |
019fbc79f4cf384b47704cc030799816993f87cb8f1632e1ba45884d1c289f5f
|
from urllib import request
import random,platform,os
from bs4 import BeautifulSoup
PREFIX = "ceph"
path = ""
if path == "":
if platform.system() == "Windows":
path = os.getcwd()+"\\"
else:
path = os.getcwd()+"/"
filepath = path+PREFIX+".html"
def saveFile(data):
#TODO SELECT SINGLE FILE OR MULT FILE
file = open(filepath,'wb')
#将博文信息写入文件(以utf-8保存的文件声明为gbk)
for d in data:
file.write(d.encode('GB18030'))
file.close()
url = 'http://blog.csdn.net/junming_zhao/article/details/72528533'
user_agents=['Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20130406 Firefox/23.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:18.0) Gecko/20100101 Firefox/18.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533+ \(KHTML, like Gecko) Element Browser 5.0',
'IBM WebExplorer /v0.94', 'Galaxy/1.0 [en] (Mac OS X 10.5.6; U; en)',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)',
'Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14',
'Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) \Version/6.0 Mobile/10A5355d Safari/8536.25',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) \Chrome/28.0.1468.0 Safari/537.36',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; TheWorld)']
index=random.randint(0, 9)
user_agent=user_agents[index]
headers={'User_agent':user_agent}
req = request.Request(url=url, headers=headers)
page = request.urlopen(req)
# 从我的csdn博客主页抓取的内容是压缩后的内容,先解压缩
data = page.read()
data = data.decode('utf-8')
# 得到BeautifulSoup对象
soup = BeautifulSoup(data,'html5lib')
title = str(soup.find(class_='link_title').text).strip()
print(title)
content = str(soup.find('div',class_='article_content tracking-ad'))
print(content)
saveFile(title+"<br/><br/>"+content)
|
WZQ1397/automatic-repo
|
project/urlfetchANDcolor/webspider-csdncontent-download.py
|
Python
|
lgpl-3.0
| 2,042
|
[
"Galaxy"
] |
99b4778ec7bb9fb6bf4e62ad2ceeacf0415859ba3f6260a467ba4067ddf84233
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
from os import path as op
from ..util import load_data_file
# This is the package data dir, not the dir for config, etc.
DATA_DIR = op.join(op.dirname(__file__), '_data')
def load_iris():
"""Load the iris dataset
Returns
-------
iris : NpzFile
data['data'] : a (150, 4) NumPy array with the iris' features
data['group'] : a (150,) NumPy array with the iris' group
"""
return np.load(load_data_file('iris/iris.npz',
force_download='2014-09-04'))
def load_crate():
"""Load an image of a crate
Returns
-------
crate : array
256x256x3 crate image.
"""
return np.load(load_data_file('orig/crate.npz'))['crate']
def load_spatial_filters():
"""Load spatial-filters kernel
Returns
-------
kernel : array
16x1024 16 interpolation kernel with length 1024 each.
names : tuple of strings
Respective interpolation names, plus "Nearest" which does
not require a filter but can still be used
"""
names = ("Bilinear", "Hanning", "Hamming", "Hermite",
"Kaiser", "Quadric", "Bicubic", "CatRom",
"Mitchell", "Spline16", "Spline36", "Gaussian",
"Bessel", "Sinc", "Lanczos", "Blackman", "Nearest")
return (np.load(op.join(DATA_DIR, 'spatial-filters.npy')), names)
|
sbtlaarzc/vispy
|
vispy/io/datasets.py
|
Python
|
bsd-3-clause
| 1,520
|
[
"Gaussian"
] |
f0b245a32c97a28f96e6a13997001395e25135b458793e2eba63163461bfbbda
|
"""
Miscellaneous tests
"""
from unittest import skipIf
from regression.pages.whitelabel.const import (
LOGO_ALT_TEXT, LOGO_LINK, NO_OF_COUNTRIES, NO_OF_LANGUAGES,
ORG, SAMPLE_COUNTRIES, SAMPLE_LANGUAGES, SELECTED_COUNTRY, SOCIAL_MEDIA_LINK
)
from regression.pages.whitelabel.profile_page import ProfilePage
from regression.tests.whitelabel.white_label_tests_base import WhiteLabelTestsBaseClass
class TestMisc(WhiteLabelTestsBaseClass):
"""
Miscellaneous Tests
"""
def setUp(self):
"""
Initialize all page objects
"""
super().setUp()
self.profile_page = ProfilePage(self.browser)
@skipIf(ORG == 'MITxPRO', 'MITxPRO has no social media links')
def test_social_media_links(self):
"""
Scenario: To verify that correct social media links are present in
footer section
"""
self.home_page.visit()
self.assertEqual(SOCIAL_MEDIA_LINK, self.home_page.social_links)
def test_logos(self):
"""
Scenario: To verify that correct images are being used for header and
footer logos
"""
self.home_page.visit()
# Get the link for header logo and verify it
self.assertIn(LOGO_LINK, self.home_page.header_logo_link)
# Get the alt text for header logo and verify it
self.assertEqual(LOGO_ALT_TEXT, self.home_page.header_logo_alt_text)
# Get the link for footer logo and verify it
self.assertIn(LOGO_LINK, self.home_page.footer_logo_link)
# Get the alt text for footer logo and verify it
self.assertEqual(LOGO_ALT_TEXT, self.home_page.footer_logo_alt_text)
def test_countries_data(self):
"""
Scenario: To verify that correct countries data is present in user
profile
"""
self.register_using_api()
self.dashboard_page.wait_for_page()
# Open the profile page
self.dashboard_page.go_to_profile_page()
self.profile_page.wait_for_page()
# Get selected country and validate it
self.assertEqual(SELECTED_COUNTRY, self.profile_page.selected_country)
# Get countries list and validate it
countries = self.profile_page.countries_list
self.assertEqual(NO_OF_COUNTRIES, len(countries))
for country in SAMPLE_COUNTRIES:
self.assertIn(country, countries)
def test_languages_data(self):
"""
Scenario: To verify that correct languages data is present in user
profile
"""
self.register_using_api()
self.dashboard_page.wait_for_page()
# Open the profile page
self.dashboard_page.go_to_profile_page()
self.profile_page.wait_for_page()
# Get languages list and validate it
languages = self.profile_page.languages_list
self.assertEqual(NO_OF_LANGUAGES, len(languages))
for language in SAMPLE_LANGUAGES:
self.assertIn(language, languages)
|
edx/edx-e2e-tests
|
regression/tests/whitelabel/test_misc.py
|
Python
|
agpl-3.0
| 2,988
|
[
"VisIt"
] |
414fe21e81cb155cc62b12246c49757d5ec27767a0d58d46c2c71670da9380e7
|
#!/usr/bin/env python
"""Utility script to convert an old VTK file format to the new VTK XML
file format (serial format). The output XML file will contain *all*
the existing scalars, vectors and tensors in the input file.
This requires VTK 4.x or above.
Created May 2003, Prabhu Ramachandran <prabhu@aero.iitm.ernet.in>
Licence: VTK License.
"""
import sys
import vtk
import os.path
import getopt
def getReaderWriter(file_name, out_dir=None):
r = vtk.vtkDataSetReader()
r.SetFileName(file_name)
f_base = os.path.splitext(file_name)[0]
r.Update()
reader = None
writer = None
xmlsuffix = '.xml'
map = {'StructuredPoints': '.vti', 'StructuredGrid': '.vts',
'RectilinearGrid': '.vtr', 'UnstructuredGrid': '.vtu',
'PolyData': '.vtp'}
for i in ['StructuredPoints', 'StructuredGrid', 'RectilinearGrid',
'UnstructuredGrid', 'PolyData']:
if eval('r.IsFile%s()'%i):
reader = eval('vtk.vtk%sReader()'%i)
if i == 'StructuredPoints':
writer = eval('vtk.vtkXMLImageDataWriter()')
else:
writer = eval('vtk.vtkXML%sWriter()'%i)
xmlsuffix = map[i]
break
if not reader:
return None, None
reader.SetFileName(file_name)
reader.Update()
out_file = f_base + xmlsuffix
if out_dir:
out_file = os.path.join(out_dir,
os.path.basename(f_base) + xmlsuffix)
writer.SetFileName(out_file)
return reader, writer
def _getAttr(reader, lst, attr='Scalars'):
p_a = []
c_a = []
for i in lst:
eval('reader.Set%sName(i)'%attr)
reader.Update()
o = reader.GetOutput()
pd = o.GetPointData()
cd = o.GetCellData()
s = eval('pd.Get%s()'%attr)
if s and (s not in p_a):
p_a.append(s)
s = eval('cd.Get%s()'%attr)
if s and (s not in c_a):
c_a.append(s)
return p_a, c_a
def setAllAttributes(reader):
s_name = []
v_name = []
t_name = []
for i in range(reader.GetNumberOfScalarsInFile()):
s_name.append(reader.GetScalarsNameInFile(i))
for i in range(reader.GetNumberOfVectorsInFile()):
v_name.append(reader.GetVectorsNameInFile(i))
for i in range(reader.GetNumberOfTensorsInFile()):
t_name.append(reader.GetTensorsNameInFile(i))
p_s, c_s = _getAttr(reader, s_name, 'Scalars')
p_v, c_v = _getAttr(reader, v_name, 'Vectors')
p_t, c_t = _getAttr(reader, t_name, 'Tensors')
o = reader.GetOutput()
pd = o.GetPointData()
for i in p_s + p_v + p_t:
pd.AddArray(i)
cd = o.GetCellData()
for i in c_s + c_v + c_t:
cd.AddArray(i)
return o
def usage():
msg = """usage: vtk2xml.py [options] vtk_file1 vtk_file2 ...\n
This program converts VTK's old file format to the new XML format.
The default mode is to store the data as appended (compressed and
base64 encoded). Change this behaviour with the provided options.
This code requires VTK 4.x or above to run.
options:
-h, --help Show this help message and exit.
-b, --binary Store data as binary (compressed and base64 encoded).
-a, --ascii Store data as ascii.
-n, --no-encode Do not base64 encode the data. This violates the
XML specification but makes reading and writing fast
and files smaller.
-d, --output-dir <directory>
Output directory where the files should be generated.
Defaults to the same directory as the input file.
"""
return msg
def main():
options = "bahnd:"
long_opts = ['binary', 'ascii', 'help', 'no-encode', 'output-dir=']
try:
opts, args = getopt.getopt(sys.argv[1:], options, long_opts)
except getopt.error, msg:
print msg
print usage()
print '-'*70
print msg
sys.exit(1)
mode = 'p'
encode = 1
out_dir = None
for o, a in opts:
if o in ('-h', '--help'):
print usage()
sys.exit(0)
if o in ('-b', '--binary'):
mode = 'b'
if o in ('-a', '--ascii'):
mode = 'a'
if o in ('-n', '--no-encode'):
encode = 0
if o in ('-d', '--output-dir'):
out_dir = a
if not os.path.exists(out_dir):
print "Error: Directory %s does not exist!"%out_dir
sys.exit(1)
if len(args) < 1:
print "\nError: Incorrect number of arguments\n"
print usage()
sys.exit(1)
for i in args:
r, w = getReaderWriter(i, out_dir)
if not r:
print "\nError: Could not convert file: %s"%i
print "Unsupported data format!\n"
else:
o = setAllAttributes(r)
w.SetInput(o)
# set output modes
if mode == 'a':
w.SetDataModeToAscii()
elif mode == 'b':
w.SetDataModeToBinary()
else:
w.SetDataModeToAppended()
w.SetEncodeAppendedData(encode)
w.Write()
if __name__ == "__main__":
main()
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Utilities/vtk2xml.py
|
Python
|
bsd-3-clause
| 5,409
|
[
"VTK"
] |
f1410e64c15bb01a24e88454e1d2f30655821853aaf01d246a366f85755f93dc
|
#!/usr/bin/python
"""
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* isingrand: postprocessing of isingrand data
* Copyright (c) 2013 Analabha Roy (daneel@utexas.edu)
*
* This is free software: you can redistribute it and/or modify it under the
* terms of version 3 of the GNU Lesser General Public License as published by
* the Free Software Foundation.
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
"""
"""
Python program to
read files of quantities as functions of time
and plot their fluctuations as function of time
Usage: disorder_stdev.py <files>
"""
import numpy as np
import scipy as sp
import scipy.signal
import scipy.ndimage
import matplotlib.pyplot as plt
import sys,os.path,glob
gauss_winsize = 40
gauss_stdev = gauss_winsize/4
#Fraction of the total interval to be used for time average
frac = 5
out_fname = "stdev.dat"
out_fname_smooth = "stdev_smooth.dat"
def processFile(filename):
fileHandle = open(filename, "r")
out = list()
for line in fileHandle:
# do some processing
line=line.strip()
for number in line.split():
out.append(float(number))
fileHandle.close()
return out
def processFiles(args):
input_filemask = "log"
directory = args[1]
shape = (-1,2)
listofdata = list()
xvals_stdev = list()
if os.path.isdir(directory):
print "processing a directory"
list_of_files = glob.glob('%s/*.%s' % (directory, input_filemask))
else:
print "processing a list of files"
list_of_files = sys.argv[1:]
for file_name in list_of_files:
print file_name
data = np.array(processFile(file_name))
data.shape = shape
listofdata.append(data)
listofdata = np.array(listofdata)
listofdata.shape = shape
#Select common times,
xvals = np.unique(listofdata[:,0])
for x in xvals:
datasubset = listofdata[listofdata[:,0] == x]
#append stdev to xvals
xvals_stdev.append(np.std(datasubset[:,1]))
return xvals, xvals_stdev
if __name__ == '__main__':
if (len(sys.argv) > 1):
x,stdev = processFiles(sys.argv)
else:
print 'Usage: disorder_stdev.py <files> or <directory>'
windows = scipy.signal.gaussian(gauss_winsize,gauss_stdev)
stdev_smooth = sp.ndimage.filters.convolve1d(stdev,windows/windows.sum())
plt.gca().set_color_cycle(['blue', 'red', 'green', 'yellow'])
plt.plot(x,stdev)
plt.plot(x,stdev_smooth)
plt.xlim((0,x[-1]))
plt.xlabel('Time')
plt.ylabel('Fluctuations')
plt.legend(['Disorder fluctuations','Disorder fluctuations - smoothed'],loc='upper right')
#Uncomment below for file dump
#plt.savefig('stdev.png')
plt.show()
print "\nDumping stdev to file" , out_fname , "..."
x,stdev = np.array(x),np.array(stdev)
outdat = np.vstack((x, stdev)).T
np.savetxt(out_fname,outdat,delimiter=' ')
print "Done!"
print "\nDumping stdev smooth to file" , out_fname_smooth , "..."
x,stdev_smooth = np.array(x),np.array(stdev_smooth)
outdat = np.vstack((x, stdev_smooth)).T
print "Time avg of disorder avg = ",np.mean(outdat[:,1])
np.savetxt(out_fname_smooth,outdat,delimiter=' ')
print "Done!"
lowerlim = outdat[0,0]
upperlim = outdat[-1,0]
diff = upperlim-lowerlim
lowerlim = lowerlim+(diff/frac)
print
print "Time avg of disorder stdev from t = ",lowerlim,"- ",upperlim,"is:"
print np.mean(outdat[outdat[:,0]>=lowerlim][:,1])
print
|
hariseldon99/archives
|
isingrand/scripts/disorder_stdev.py
|
Python
|
gpl-2.0
| 3,579
|
[
"Gaussian"
] |
7f9a72facb5dddca8c452a21f4e5d3032d63dc8ecc3c9b684809b7700a493be6
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Keystone's pep8 extensions.
In order to make the review process faster and easier for core devs we are
adding some Keystone specific pep8 checks. This will catch common errors
so that core devs don't have to.
There are two types of pep8 extensions. One is a function that takes either
a physical or logical line. The physical or logical line is the first param
in the function definition and can be followed by other parameters supported
by pycodestyle. The second type is a class that parses AST trees. For more info
please see pycodestyle.py.
"""
import ast
from hacking import core
import re
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
def __init__(self, tree, filename):
"""Created object automatically by pep8.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pep8."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
message = message or self.CHECK_DESC
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
class CheckForMutableDefaultArgs(BaseASTChecker):
"""Check for the use of mutable objects as function/method defaults.
We are only checking for list and dict literals at this time. This means
that a developer could specify an instance of their own and cause a bug.
The fix for this is probably more work than it's worth because it will
get caught during code review.
"""
name = "check_for_mutable_default_args"
version = "1.0"
CHECK_DESC = 'K001 Using mutable as a function/method default'
MUTABLES = (
ast.List, ast.ListComp,
ast.Dict, ast.DictComp,
ast.Set, ast.SetComp,
ast.Call)
def visit_FunctionDef(self, node):
for arg in node.args.defaults:
if isinstance(arg, self.MUTABLES):
self.add_error(arg)
super(CheckForMutableDefaultArgs, self).generic_visit(node)
@core.flake8ext
def block_comments_begin_with_a_space(physical_line, line_number):
"""There should be a space after the # of block comments.
There is already a check in pep8 that enforces this rule for
inline comments.
Okay: # this is a comment
Okay: #!/usr/bin/python
Okay: # this is a comment
K002: #this is a comment
"""
MESSAGE = "K002 block comments should start with '# '"
# shebangs are OK
if line_number == 1 and physical_line.startswith('#!'):
return
text = physical_line.strip()
if text.startswith('#'): # look for block comments
if len(text) > 1 and not text[1].isspace():
return physical_line.index('#'), MESSAGE
class CheckForTranslationIssues(BaseASTChecker):
name = "check_for_translation_issues"
version = "1.0"
LOGGING_CHECK_DESC = 'K005 Using translated string in logging'
USING_DEPRECATED_WARN = 'K009 Using the deprecated Logger.warn'
LOG_MODULES = ('logging', 'oslo_log.log')
I18N_MODULES = (
'keystone.i18n._',
)
TRANS_HELPER_MAP = {
'debug': None,
'info': '_LI',
'warning': '_LW',
'error': '_LE',
'exception': '_LE',
'critical': '_LC',
}
def __init__(self, tree, filename):
super(CheckForTranslationIssues, self).__init__(tree, filename)
self.logger_names = []
self.logger_module_names = []
self.i18n_names = {}
# NOTE(dstanek): this kinda accounts for scopes when talking
# about only leaf node in the graph
self.assignments = {}
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
item._parent = node
self.visit(item)
elif isinstance(value, ast.AST):
value._parent = node
self.visit(value)
def _filter_imports(self, module_name, alias):
"""Keep lists of logging and i18n imports."""
if module_name in self.LOG_MODULES:
self.logger_module_names.append(alias.asname or alias.name)
elif module_name in self.I18N_MODULES:
self.i18n_names[alias.asname or alias.name] = alias.name
def visit_Import(self, node):
for alias in node.names:
self._filter_imports(alias.name, alias)
return super(CheckForTranslationIssues, self).generic_visit(node)
def visit_ImportFrom(self, node):
for alias in node.names:
full_name = '%s.%s' % (node.module, alias.name)
self._filter_imports(full_name, alias)
return super(CheckForTranslationIssues, self).generic_visit(node)
def _find_name(self, node):
"""Return the fully qualified name or a Name or Attribute."""
if isinstance(node, ast.Name):
return node.id
elif (isinstance(node, ast.Attribute)
and isinstance(node.value, (ast.Name, ast.Attribute))):
method_name = node.attr
obj_name = self._find_name(node.value)
if obj_name is None:
return None
return obj_name + '.' + method_name
elif isinstance(node, str):
return node
else: # could be Subscript, Call or many more
return None
def visit_Assign(self, node):
"""Look for 'LOG = logging.getLogger'.
This handles the simple case:
name = [logging_module].getLogger(...)
- or -
name = [i18n_name](...)
And some much more comple ones:
name = [i18n_name](...) % X
- or -
self.name = [i18n_name](...) % X
"""
attr_node_types = (ast.Name, ast.Attribute)
if (len(node.targets) != 1
or not isinstance(node.targets[0], attr_node_types)):
# say no to: "x, y = ..."
return super(CheckForTranslationIssues, self).generic_visit(node)
target_name = self._find_name(node.targets[0])
if (isinstance(node.value, ast.BinOp) and
isinstance(node.value.op, ast.Mod)):
if (isinstance(node.value.left, ast.Call) and
isinstance(node.value.left.func, ast.Name) and
node.value.left.func.id in self.i18n_names):
# NOTE(dstanek): this is done to match cases like:
# `msg = _('something %s') % x`
node = ast.Assign(value=node.value.left)
if not isinstance(node.value, ast.Call):
# node.value must be a call to getLogger
self.assignments.pop(target_name, None)
return super(CheckForTranslationIssues, self).generic_visit(node)
# is this a call to an i18n function?
if (isinstance(node.value.func, ast.Name)
and node.value.func.id in self.i18n_names):
self.assignments[target_name] = node.value.func.id
return super(CheckForTranslationIssues, self).generic_visit(node)
if (not isinstance(node.value.func, ast.Attribute)
or not isinstance(node.value.func.value, attr_node_types)):
# function must be an attribute on an object like
# logging.getLogger
return super(CheckForTranslationIssues, self).generic_visit(node)
object_name = self._find_name(node.value.func.value)
func_name = node.value.func.attr
if (object_name in self.logger_module_names
and func_name == 'getLogger'):
self.logger_names.append(target_name)
return super(CheckForTranslationIssues, self).generic_visit(node)
def visit_Call(self, node):
"""Look for the 'LOG.*' calls."""
# obj.method
if isinstance(node.func, ast.Attribute):
obj_name = self._find_name(node.func.value)
if isinstance(node.func.value, ast.Name):
method_name = node.func.attr
elif isinstance(node.func.value, ast.Attribute):
obj_name = self._find_name(node.func.value)
method_name = node.func.attr
else: # could be Subscript, Call or many more
return (super(CheckForTranslationIssues, self)
.generic_visit(node))
# if dealing with a logger the method can't be "warn"
if obj_name in self.logger_names and method_name == 'warn':
msg = node.args[0] # first arg to a logging method is the msg
self.add_error(msg, message=self.USING_DEPRECATED_WARN)
# must be a logger instance and one of the support logging methods
if (obj_name not in self.logger_names
or method_name not in self.TRANS_HELPER_MAP):
return (super(CheckForTranslationIssues, self)
.generic_visit(node))
# the call must have arguments
if not node.args:
return (super(CheckForTranslationIssues, self)
.generic_visit(node))
self._process_log_messages(node)
return super(CheckForTranslationIssues, self).generic_visit(node)
def _process_log_messages(self, node):
msg = node.args[0] # first arg to a logging method is the msg
# if first arg is a call to a i18n name
if (isinstance(msg, ast.Call)
and isinstance(msg.func, ast.Name)
and msg.func.id in self.i18n_names):
self.add_error(msg, message=self.LOGGING_CHECK_DESC)
# if the first arg is a reference to a i18n call
elif (isinstance(msg, ast.Name)
and msg.id in self.assignments):
self.add_error(msg, message=self.LOGGING_CHECK_DESC)
@core.flake8ext
def dict_constructor_with_sequence_copy(logical_line):
"""Should use a dict comprehension instead of a dict constructor.
PEP-0274 introduced dict comprehension with performance enhancement
and it also makes code more readable.
Okay: lower_res = {k.lower(): v for k, v in res[1].items()}
Okay: fool = dict(a='a', b='b')
K008: lower_res = dict((k.lower(), v) for k, v in res[1].items())
K008: attrs = dict([(k, _from_json(v))
K008: dict([[i,i] for i in range(3)])
"""
MESSAGE = ("K008 Must use a dict comprehension instead of a dict"
" constructor with a sequence of key-value pairs.")
dict_constructor_with_sequence_re = (
re.compile(r".*\bdict\((\[)?(\(|\[)(?!\{)"))
if dict_constructor_with_sequence_re.match(logical_line):
yield (0, MESSAGE)
|
openstack/keystone
|
keystone/tests/hacking/checks.py
|
Python
|
apache-2.0
| 12,074
|
[
"VisIt"
] |
c703317a0deebd03176b5f590f0e4531cce73a76bf01643b2f8d1fb639a7c574
|
"""Undocumented Module"""
__all__ = ['indent',
'StackTrace', 'traceFunctionCall', 'traceParentCall', 'printThisCall',
'doc', 'adjust', 'difference', 'intersection', 'union',
'sameElements', 'makeList', 'makeTuple', 'list2dict', 'invertDict',
'invertDictLossless', 'uniqueElements', 'disjoint', 'contains',
'replace', 'reduceAngle', 'fitSrcAngle2Dest', 'fitDestAngle2Src',
'closestDestAngle2', 'closestDestAngle', 'binaryRepr', 'profileFunc',
'profiled', 'startProfile', 'printProfile', 'getSetterName',
'getSetter', 'Functor', 'Stack', 'Queue',
'bound', 'clamp', 'lerp', 'average', 'addListsByValue',
'boolEqual', 'lineupPos', 'formatElapsedSeconds', 'solveQuadratic',
'stackEntryInfo', 'lineInfo', 'callerInfo', 'lineTag',
'findPythonModule', 'mostDerivedLast',
'weightedChoice', 'randFloat', 'normalDistrib',
'weightedRand', 'randUint31', 'randInt32', 'randUint32',
'SerialNumGen', 'serialNum', 'uniqueName', 'Enum', 'Singleton',
'SingletonError', 'printListEnum', 'safeRepr',
'fastRepr', 'isDefaultValue',
'ScratchPad', 'Sync', 'itype', 'getNumberedTypedString',
'getNumberedTypedSortedString', 'getNumberedTypedSortedStringWithReferrers',
'getNumberedTypedSortedStringWithReferrersGen',
'printNumberedTyped', 'DelayedCall', 'DelayedFunctor',
'FrameDelayedCall', 'SubframeCall', 'getBase',
'HotkeyBreaker','logMethodCalls','GoldenRatio',
'GoldenRectangle', 'rad90', 'rad180', 'rad270', 'rad360',
'nullGen', 'loopGen', 'makeFlywheelGen', 'flywheel', 'choice',
'printStack', 'printReverseStack', 'listToIndex2item', 'listToItem2index',
'pandaBreak','pandaTrace','formatTimeCompact',
'deeptype','getProfileResultString','StdoutCapture','StdoutPassthrough',
'Averager', 'getRepository', 'formatTimeExact', 'startSuperLog', 'endSuperLog',
'typeName', 'safeTypeName', 'histogramDict', 'unescapeHtmlString']
import types
import string
import math
import operator
import inspect
import os
import sys
import random
import time
import gc
#if __debug__:
import traceback
import __builtin__
from StringIO import StringIO
import marshal
__report_indent = 3
from panda3d.core import ConfigVariableBool
ScalarTypes = (types.FloatType, types.IntType, types.LongType)
"""
# with one integer positional arg, this uses about 4/5 of the memory of the Functor class below
def Functor(function, *args, **kArgs):
argsCopy = args[:]
def functor(*cArgs, **ckArgs):
kArgs.update(ckArgs)
return function(*(argsCopy + cArgs), **kArgs)
return functor
"""
class Functor:
def __init__(self, function, *args, **kargs):
assert callable(function), "function should be a callable obj"
self._function = function
self._args = args
self._kargs = kargs
if hasattr(self._function, '__name__'):
self.__name__ = self._function.__name__
else:
self.__name__ = str(itype(self._function))
if hasattr(self._function, '__doc__'):
self.__doc__ = self._function.__doc__
else:
self.__doc__ = self.__name__
def destroy(self):
del self._function
del self._args
del self._kargs
del self.__name__
del self.__doc__
def _do__call__(self, *args, **kargs):
_kargs = self._kargs.copy()
_kargs.update(kargs)
return self._function(*(self._args + args), **_kargs)
# this method is used in place of __call__ if we are recording creation stacks
def _exceptionLoggedCreationStack__call__(self, *args, **kargs):
try:
return self._do__call__(*args, **kargs)
except Exception, e:
print '-->Functor creation stack (%s): %s' % (
self.__name__, self.getCreationStackTraceCompactStr())
raise
__call__ = _do__call__
def __repr__(self):
s = 'Functor(%s' % self._function.__name__
for arg in self._args:
try:
argStr = repr(arg)
except:
argStr = 'bad repr: %s' % arg.__class__
s += ', %s' % argStr
for karg, value in self._kargs.items():
s += ', %s=%s' % (karg, repr(value))
s += ')'
return s
class Stack:
def __init__(self):
self.__list = []
def push(self, item):
self.__list.append(item)
def top(self):
# return the item on the top of the stack without popping it off
return self.__list[-1]
def pop(self):
return self.__list.pop()
def clear(self):
self.__list = []
def isEmpty(self):
return len(self.__list) == 0
def __len__(self):
return len(self.__list)
class Queue:
# FIFO queue
# interface is intentionally identical to Stack (LIFO)
def __init__(self):
self.__list = []
def push(self, item):
self.__list.append(item)
def top(self):
# return the next item at the front of the queue without popping it off
return self.__list[0]
def front(self):
return self.__list[0]
def back(self):
return self.__list[-1]
def pop(self):
return self.__list.pop(0)
def clear(self):
self.__list = []
def isEmpty(self):
return len(self.__list) == 0
def __len__(self):
return len(self.__list)
if __debug__ and __name__ == '__main__':
q = Queue()
assert q.isEmpty()
q.clear()
assert q.isEmpty()
q.push(10)
assert not q.isEmpty()
q.push(20)
assert not q.isEmpty()
assert len(q) == 2
assert q.front() == 10
assert q.back() == 20
assert q.top() == 10
assert q.top() == 10
assert q.pop() == 10
assert len(q) == 1
assert not q.isEmpty()
assert q.pop() == 20
assert len(q) == 0
assert q.isEmpty()
def indent(stream, numIndents, str):
"""
Write str to stream with numIndents in front of it
"""
# To match emacs, instead of a tab character we will use 4 spaces
stream.write(' ' * numIndents + str)
#if __debug__: #RAU accdg to Darren its's ok that StackTrace is not protected by __debug__
# DCR: if somebody ends up using StackTrace in production, either
# A) it will be OK because it hardly ever gets called, or
# B) it will be easy to track it down (grep for StackTrace)
class StackTrace:
def __init__(self, label="", start=0, limit=None):
"""
label is a string (or anything that be be a string)
that is printed as part of the trace back.
This is just to make it easier to tell what the
stack trace is referring to.
start is an integer number of stack frames back
from the most recent. (This is automatically
bumped up by one to skip the __init__ call
to the StackTrace).
limit is an integer number of stack frames
to record (or None for unlimited).
"""
self.label = label
if limit is not None:
self.trace = traceback.extract_stack(sys._getframe(1+start),
limit=limit)
else:
self.trace = traceback.extract_stack(sys._getframe(1+start))
def compact(self):
r = ''
comma = ','
for filename, lineNum, funcName, text in self.trace:
r += '%s.%s:%s%s' % (filename[:filename.rfind('.py')][filename.rfind('\\')+1:], funcName, lineNum, comma)
if len(r):
r = r[:-len(comma)]
return r
def reverseCompact(self):
r = ''
comma = ','
for filename, lineNum, funcName, text in self.trace:
r = '%s.%s:%s%s%s' % (filename[:filename.rfind('.py')][filename.rfind('\\')+1:], funcName, lineNum, comma, r)
if len(r):
r = r[:-len(comma)]
return r
def __str__(self):
r = "Debug stack trace of %s (back %s frames):\n"%(
self.label, len(self.trace),)
for i in traceback.format_list(self.trace):
r+=i
r+="***** NOTE: This is not a crash. This is a debug stack trace. *****"
return r
def printStack():
print StackTrace(start=1).compact()
return True
def printReverseStack():
print StackTrace(start=1).reverseCompact()
return True
def printVerboseStack():
print StackTrace(start=1)
return True
#-----------------------------------------------------------------------------
def traceFunctionCall(frame):
"""
return a string that shows the call frame with calling arguments.
e.g.
foo(x=234, y=135)
"""
f = frame
co = f.f_code
dict = f.f_locals
n = co.co_argcount
if co.co_flags & 4: n = n+1
if co.co_flags & 8: n = n+1
r=''
if 'self' in dict:
r = '%s.'%(dict['self'].__class__.__name__,)
r+="%s("%(f.f_code.co_name,)
comma=0 # formatting, whether we should type a comma.
for i in range(n):
name = co.co_varnames[i]
if name=='self':
continue
if comma:
r+=', '
else:
# ok, we skipped the first one, the rest get commas:
comma=1
r+=name
r+='='
if name in dict:
v=safeRepr(dict[name])
if len(v)>2000:
# r+="<too big for debug>"
r += (v[:2000] + "...")
else:
r+=v
else: r+="*** undefined ***"
return r+')'
def traceParentCall():
return traceFunctionCall(sys._getframe(2))
def printThisCall():
print traceFunctionCall(sys._getframe(1))
return 1 # to allow "assert printThisCall()"
# Magic numbers: These are the bit masks in func_code.co_flags that
# reveal whether or not the function has a *arg or **kw argument.
_POS_LIST = 4
_KEY_DICT = 8
def doc(obj):
if (isinstance(obj, types.MethodType)) or \
(isinstance(obj, types.FunctionType)):
print obj.__doc__
def adjust(command = None, dim = 1, parent = None, **kw):
"""
adjust(command = None, parent = None, **kw)
Popup and entry scale to adjust a parameter
Accepts any Slider keyword argument. Typical arguments include:
command: The one argument command to execute
min: The min value of the slider
max: The max value of the slider
resolution: The resolution of the slider
text: The label on the slider
These values can be accessed and/or changed after the fact
>>> vg = adjust()
>>> vg['min']
0.0
>>> vg['min'] = 10.0
>>> vg['min']
10.0
"""
# Make sure we enable Tk
# Don't use a regular import, to prevent ModuleFinder from picking
# it up as a dependency when building a .p3d package.
import importlib
Valuator = importlib.import_module('direct.tkwidgets.Valuator')
# Set command if specified
if command:
kw['command'] = lambda x: apply(command, x)
if parent is None:
kw['title'] = command.__name__
kw['dim'] = dim
# Create toplevel if needed
if not parent:
vg = apply(Valuator.ValuatorGroupPanel, (parent,), kw)
else:
vg = apply(Valuator.ValuatorGroup, (parent,), kw)
vg.pack(expand = 1, fill = 'x')
return vg
def difference(a, b):
"""
difference(list, list):
"""
if not a: return b
if not b: return a
d = []
for i in a:
if (i not in b) and (i not in d):
d.append(i)
for i in b:
if (i not in a) and (i not in d):
d.append(i)
return d
def intersection(a, b):
"""
intersection(list, list):
"""
if not a: return []
if not b: return []
d = []
for i in a:
if (i in b) and (i not in d):
d.append(i)
for i in b:
if (i in a) and (i not in d):
d.append(i)
return d
def union(a, b):
"""
union(list, list):
"""
# Copy a
c = a[:]
for i in b:
if (i not in c):
c.append(i)
return c
def sameElements(a, b):
if len(a) != len(b):
return 0
for elem in a:
if elem not in b:
return 0
for elem in b:
if elem not in a:
return 0
return 1
def makeList(x):
"""returns x, converted to a list"""
if type(x) is types.ListType:
return x
elif type(x) is types.TupleType:
return list(x)
else:
return [x,]
def makeTuple(x):
"""returns x, converted to a tuple"""
if type(x) is types.ListType:
return tuple(x)
elif type(x) is types.TupleType:
return x
else:
return (x,)
def list2dict(L, value=None):
"""creates dict using elements of list, all assigned to same value"""
return dict([(k, value) for k in L])
def listToIndex2item(L):
"""converts list to dict of list index->list item"""
d = {}
for i, item in enumerate(L):
d[i] = item
return d
assert listToIndex2item(['a','b']) == {0: 'a', 1: 'b',}
def listToItem2index(L):
"""converts list to dict of list item->list index
This is lossy if there are duplicate list items"""
d = {}
for i, item in enumerate(L):
d[item] = i
return d
assert listToItem2index(['a','b']) == {'a': 0, 'b': 1,}
def invertDict(D, lossy=False):
"""creates a dictionary by 'inverting' D; keys are placed in the new
dictionary under their corresponding value in the old dictionary.
It is an error if D contains any duplicate values.
>>> old = {'key1':1, 'key2':2}
>>> invertDict(old)
{1: 'key1', 2: 'key2'}
"""
n = {}
for key, value in D.items():
if not lossy and value in n:
raise 'duplicate key in invertDict: %s' % value
n[value] = key
return n
def invertDictLossless(D):
"""similar to invertDict, but values of new dict are lists of keys from
old dict. No information is lost.
>>> old = {'key1':1, 'key2':2, 'keyA':2}
>>> invertDictLossless(old)
{1: ['key1'], 2: ['key2', 'keyA']}
"""
n = {}
for key, value in D.items():
n.setdefault(value, [])
n[value].append(key)
return n
def uniqueElements(L):
"""are all elements of list unique?"""
return len(L) == len(list2dict(L))
def disjoint(L1, L2):
"""returns non-zero if L1 and L2 have no common elements"""
used = dict([(k, None) for k in L1])
for k in L2:
if k in used:
return 0
return 1
def contains(whole, sub):
"""
Return 1 if whole contains sub, 0 otherwise
"""
if (whole == sub):
return 1
for elem in sub:
# The first item you find not in whole, return 0
if elem not in whole:
return 0
# If you got here, whole must contain sub
return 1
def replace(list, old, new, all=0):
"""
replace 'old' with 'new' in 'list'
if all == 0, replace first occurrence
otherwise replace all occurrences
returns the number of items replaced
"""
if old not in list:
return 0
if not all:
i = list.index(old)
list[i] = new
return 1
else:
numReplaced = 0
for i in xrange(len(list)):
if list[i] == old:
numReplaced += 1
list[i] = new
return numReplaced
rad90 = math.pi / 2.
rad180 = math.pi
rad270 = 1.5 * math.pi
rad360 = 2. * math.pi
def reduceAngle(deg):
"""
Reduces an angle (in degrees) to a value in [-180..180)
"""
return (((deg + 180.) % 360.) - 180.)
def fitSrcAngle2Dest(src, dest):
"""
given a src and destination angle, returns an equivalent src angle
that is within [-180..180) of dest
examples:
fitSrcAngle2Dest(30, 60) == 30
fitSrcAngle2Dest(60, 30) == 60
fitSrcAngle2Dest(0, 180) == 0
fitSrcAngle2Dest(-1, 180) == 359
fitSrcAngle2Dest(-180, 180) == 180
"""
return dest + reduceAngle(src - dest)
def fitDestAngle2Src(src, dest):
"""
given a src and destination angle, returns an equivalent dest angle
that is within [-180..180) of src
examples:
fitDestAngle2Src(30, 60) == 60
fitDestAngle2Src(60, 30) == 30
fitDestAngle2Src(0, 180) == -180
fitDestAngle2Src(1, 180) == 180
"""
return src + (reduceAngle(dest - src))
def closestDestAngle2(src, dest):
# The function above didn't seem to do what I wanted. So I hacked
# this one together. I can't really say I understand it. It's more
# from impirical observation... GRW
diff = src - dest
if diff > 180:
# if the difference is greater that 180 it's shorter to go the other way
return dest - 360
elif diff < -180:
# or perhaps the OTHER other way...
return dest + 360
else:
# otherwise just go to the original destination
return dest
def closestDestAngle(src, dest):
# The function above didn't seem to do what I wanted. So I hacked
# this one together. I can't really say I understand it. It's more
# from impirical observation... GRW
diff = src - dest
if diff > 180:
# if the difference is greater that 180 it's shorter to go the other way
return src - (diff - 360)
elif diff < -180:
# or perhaps the OTHER other way...
return src - (360 + diff)
else:
# otherwise just go to the original destination
return dest
def binaryRepr(number, max_length = 32):
# This will only work reliably for relatively small numbers.
# Increase the value of max_length if you think you're going
# to use long integers
assert number < 2L << max_length
shifts = map (operator.rshift, max_length * [number], \
range (max_length - 1, -1, -1))
digits = map (operator.mod, shifts, max_length * [2])
if not digits.count (1): return 0
digits = digits [digits.index (1):]
return ''.join([repr(digit) for digit in digits])
class StdoutCapture:
# redirects stdout to a string
def __init__(self):
self._oldStdout = sys.stdout
sys.stdout = self
self._string = ''
def destroy(self):
sys.stdout = self._oldStdout
del self._oldStdout
def getString(self):
return self._string
# internal
def write(self, string):
self._string = ''.join([self._string, string])
class StdoutPassthrough(StdoutCapture):
# like StdoutCapture but also allows output to go through to the OS as normal
# internal
def write(self, string):
self._string = ''.join([self._string, string])
self._oldStdout.write(string)
# constant profile defaults
PyUtilProfileDefaultFilename = 'profiledata'
PyUtilProfileDefaultLines = 80
PyUtilProfileDefaultSorts = ['cumulative', 'time', 'calls']
_ProfileResultStr = ''
def getProfileResultString():
# if you called profile with 'log' not set to True,
# you can call this function to get the results as
# a string
global _ProfileResultStr
return _ProfileResultStr
def profileFunc(callback, name, terse, log=True):
global _ProfileResultStr
if 'globalProfileFunc' in __builtin__.__dict__:
# rats. Python profiler is not re-entrant...
base.notify.warning(
'PythonUtil.profileStart(%s): aborted, already profiling %s'
#'\nStack Trace:\n%s'
% (name, __builtin__.globalProfileFunc,
#StackTrace()
))
return
__builtin__.globalProfileFunc = callback
__builtin__.globalProfileResult = [None]
prefix = '***** START PROFILE: %s *****' % name
if log:
print prefix
startProfile(cmd='globalProfileResult[0]=globalProfileFunc()', callInfo=(not terse), silent=not log)
suffix = '***** END PROFILE: %s *****' % name
if log:
print suffix
else:
_ProfileResultStr = '%s\n%s\n%s' % (prefix, _ProfileResultStr, suffix)
result = globalProfileResult[0]
del __builtin__.__dict__['globalProfileFunc']
del __builtin__.__dict__['globalProfileResult']
return result
def profiled(category=None, terse=False):
""" decorator for profiling functions
turn categories on and off via "want-profile-categoryName 1"
e.g.
@profiled('particles')
def loadParticles():
...
want-profile-particles 1
"""
assert type(category) in (types.StringType, types.NoneType), "must provide a category name for @profiled"
# allow profiling in published versions
"""
try:
null = not __dev__
except:
null = not __debug__
if null:
# if we're not in __dev__, just return the function itself. This
# results in zero runtime overhead, since decorators are evaluated
# at module-load.
def nullDecorator(f):
return f
return nullDecorator
"""
def profileDecorator(f):
def _profiled(*args, **kArgs):
name = '(%s) %s from %s' % (category, f.func_name, f.__module__)
# showbase might not be loaded yet, so don't use
# base.config. Instead, query the ConfigVariableBool.
if (category is None) or ConfigVariableBool('want-profile-%s' % category, 0).getValue():
return profileFunc(Functor(f, *args, **kArgs), name, terse)
else:
return f(*args, **kArgs)
_profiled.__doc__ = f.__doc__
return _profiled
return profileDecorator
# intercept profile-related file operations to avoid disk access
movedOpenFuncs = []
movedDumpFuncs = []
movedLoadFuncs = []
profileFilenames = set()
profileFilenameList = Stack()
profileFilename2file = {}
profileFilename2marshalData = {}
def _profileOpen(filename, *args, **kArgs):
# this is a replacement for the file open() builtin function
# for use during profiling, to intercept the file open
# operation used by the Python profiler and profile stats
# systems
if filename in profileFilenames:
# if this is a file related to profiling, create an
# in-RAM file object
if filename not in profileFilename2file:
file = StringIO()
file._profFilename = filename
profileFilename2file[filename] = file
else:
file = profileFilename2file[filename]
else:
file = movedOpenFuncs[-1](filename, *args, **kArgs)
return file
def _profileMarshalDump(data, file):
# marshal.dump doesn't work with StringIO objects
# simulate it
if isinstance(file, StringIO) and hasattr(file, '_profFilename'):
if file._profFilename in profileFilenames:
profileFilename2marshalData[file._profFilename] = data
return None
return movedDumpFuncs[-1](data, file)
def _profileMarshalLoad(file):
# marshal.load doesn't work with StringIO objects
# simulate it
if isinstance(file, StringIO) and hasattr(file, '_profFilename'):
if file._profFilename in profileFilenames:
return profileFilename2marshalData[file._profFilename]
return movedLoadFuncs[-1](file)
def _installProfileCustomFuncs(filename):
assert filename not in profileFilenames
profileFilenames.add(filename)
profileFilenameList.push(filename)
movedOpenFuncs.append(__builtin__.open)
__builtin__.open = _profileOpen
movedDumpFuncs.append(marshal.dump)
marshal.dump = _profileMarshalDump
movedLoadFuncs.append(marshal.load)
marshal.load = _profileMarshalLoad
def _getProfileResultFileInfo(filename):
return (profileFilename2file.get(filename, None),
profileFilename2marshalData.get(filename, None))
def _setProfileResultsFileInfo(filename, info):
f, m = info
if f:
profileFilename2file[filename] = f
if m:
profileFilename2marshalData[filename] = m
def _clearProfileResultFileInfo(filename):
profileFilename2file.pop(filename, None)
profileFilename2marshalData.pop(filename, None)
def _removeProfileCustomFuncs(filename):
assert profileFilenameList.top() == filename
marshal.load = movedLoadFuncs.pop()
marshal.dump = movedDumpFuncs.pop()
__builtin__.open = movedOpenFuncs.pop()
profileFilenames.remove(filename)
profileFilenameList.pop()
profileFilename2file.pop(filename, None)
# don't let marshalled data pile up
profileFilename2marshalData.pop(filename, None)
# call this from the prompt, and break back out to the prompt
# to stop profiling
#
# OR to do inline profiling, you must make a globally-visible
# function to be profiled, i.e. to profile 'self.load()', do
# something like this:
#
# def func(self=self):
# self.load()
# import __builtin__
# __builtin__.func = func
# PythonUtil.startProfile(cmd='func()', filename='profileData')
# del __builtin__.func
#
def _profileWithoutGarbageLeak(cmd, filename):
# The profile module isn't necessarily installed on every Python
# installation, so we import it here, instead of in the module
# scope.
import profile
# this is necessary because the profile module creates a memory leak
Profile = profile.Profile
statement = cmd
sort = -1
retVal = None
#### COPIED FROM profile.run ####
prof = Profile()
try:
prof = prof.run(statement)
except SystemExit:
pass
if filename is not None:
prof.dump_stats(filename)
else:
#return prof.print_stats(sort) #DCR
retVal = prof.print_stats(sort) #DCR
#################################
# eliminate the garbage leak
del prof.dispatcher
return retVal
def startProfile(filename=PyUtilProfileDefaultFilename,
lines=PyUtilProfileDefaultLines,
sorts=PyUtilProfileDefaultSorts,
silent=0,
callInfo=1,
useDisk=False,
cmd='run()'):
# uniquify the filename to allow multiple processes to profile simultaneously
filename = '%s.%s%s' % (filename, randUint31(), randUint31())
if not useDisk:
# use a RAM file
_installProfileCustomFuncs(filename)
_profileWithoutGarbageLeak(cmd, filename)
if silent:
extractProfile(filename, lines, sorts, callInfo)
else:
printProfile(filename, lines, sorts, callInfo)
if not useDisk:
# discard the RAM file
_removeProfileCustomFuncs(filename)
else:
os.remove(filename)
# call these to see the results again, as a string or in the log
def printProfile(filename=PyUtilProfileDefaultFilename,
lines=PyUtilProfileDefaultLines,
sorts=PyUtilProfileDefaultSorts,
callInfo=1):
import pstats
s = pstats.Stats(filename)
s.strip_dirs()
for sort in sorts:
s.sort_stats(sort)
s.print_stats(lines)
if callInfo:
s.print_callees(lines)
s.print_callers(lines)
# same args as printProfile
def extractProfile(*args, **kArgs):
global _ProfileResultStr
# capture print output
sc = StdoutCapture()
# print the profile output, redirected to the result string
printProfile(*args, **kArgs)
# make a copy of the print output
_ProfileResultStr = sc.getString()
# restore stdout to what it was before
sc.destroy()
def getSetterName(valueName, prefix='set'):
# getSetterName('color') -> 'setColor'
# getSetterName('color', 'get') -> 'getColor'
return '%s%s%s' % (prefix, valueName[0].upper(), valueName[1:])
def getSetter(targetObj, valueName, prefix='set'):
# getSetter(smiley, 'pos') -> smiley.setPos
return getattr(targetObj, getSetterName(valueName, prefix))
def mostDerivedLast(classList):
"""pass in list of classes. sorts list in-place, with derived classes
appearing after their bases"""
class ClassSortKey(object):
__slots__ = 'classobj',
def __init__(self, classobj):
self.classobj = classobj
def __lt__(self, other):
return issubclass(other.classobj, self.classobj)
classList.sort(key=ClassSortKey)
def bound(value, bound1, bound2):
"""
returns value if value is between bound1 and bound2
otherwise returns bound that is closer to value
"""
if bound1 > bound2:
return min(max(value, bound2), bound1)
else:
return min(max(value, bound1), bound2)
clamp = bound
def lerp(v0, v1, t):
"""
returns a value lerped between v0 and v1, according to t
t == 0 maps to v0, t == 1 maps to v1
"""
return v0 + ((v1 - v0) * t)
def getShortestRotation(start, end):
"""
Given two heading values, return a tuple describing
the shortest interval from 'start' to 'end'. This tuple
can be used to lerp a camera between two rotations
while avoiding the 'spin' problem.
"""
start, end = start % 360, end % 360
if abs(end - start) > 180:
if end < start:
end += 360
else:
start += 360
return (start, end)
def average(*args):
""" returns simple average of list of values """
val = 0.
for arg in args:
val += arg
return val / len(args)
class Averager:
def __init__(self, name):
self._name = name
self.reset()
def reset(self):
self._total = 0.
self._count = 0
def addValue(self, value):
self._total += value
self._count += 1
def getAverage(self):
return self._total / self._count
def getCount(self):
return self._count
def addListsByValue(a, b):
"""
returns a new array containing the sums of the two array arguments
(c[0] = a[0 + b[0], etc.)
"""
c = []
for x, y in zip(a, b):
c.append(x + y)
return c
def boolEqual(a, b):
"""
returns true if a and b are both true or both false.
returns false otherwise
(a.k.a. xnor -- eXclusive Not OR).
"""
return (a and b) or not (a or b)
def lineupPos(i, num, spacing):
"""
use to line up a series of 'num' objects, in one dimension,
centered around zero
'i' is the index of the object in the lineup
'spacing' is the amount of space between objects in the lineup
"""
assert num >= 1
assert i >= 0 and i < num
pos = float(i) * spacing
return pos - ((float(spacing) * (num-1))/2.)
def formatElapsedSeconds(seconds):
"""
Returns a string of the form "mm:ss" or "hh:mm:ss" or "n days",
representing the indicated elapsed time in seconds.
"""
sign = ''
if seconds < 0:
seconds = -seconds
sign = '-'
# We use math.floor() instead of casting to an int, so we avoid
# problems with numbers that are too large to represent as
# type int.
seconds = math.floor(seconds)
hours = math.floor(seconds / (60 * 60))
if hours > 36:
days = math.floor((hours + 12) / 24)
return "%s%d days" % (sign, days)
seconds -= hours * (60 * 60)
minutes = (int)(seconds / 60)
seconds -= minutes * 60
if hours != 0:
return "%s%d:%02d:%02d" % (sign, hours, minutes, seconds)
else:
return "%s%d:%02d" % (sign, minutes, seconds)
def solveQuadratic(a, b, c):
# quadratic equation: ax^2 + bx + c = 0
# quadratic formula: x = [-b +/- sqrt(b^2 - 4ac)] / 2a
# returns None, root, or [root1, root2]
# a cannot be zero.
if a == 0.:
return None
# calculate the determinant (b^2 - 4ac)
D = (b * b) - (4. * a * c)
if D < 0:
# there are no solutions (sqrt(negative number) is undefined)
return None
elif D == 0:
# only one root
return (-b) / (2. * a)
else:
# OK, there are two roots
sqrtD = math.sqrt(D)
twoA = 2. * a
root1 = ((-b) - sqrtD) / twoA
root2 = ((-b) + sqrtD) / twoA
return [root1, root2]
def stackEntryInfo(depth=0, baseFileName=1):
"""
returns the sourcefilename, line number, and function name of
an entry in the stack.
'depth' is how far back to go in the stack; 0 is the caller of this
function, 1 is the function that called the caller of this function, etc.
by default, strips off the path of the filename; override with baseFileName
returns (fileName, lineNum, funcName) --> (string, int, string)
returns (None, None, None) on error
"""
try:
stack = None
frame = None
try:
stack = inspect.stack()
# add one to skip the frame associated with this function
frame = stack[depth+1]
filename = frame[1]
if baseFileName:
filename = os.path.basename(filename)
lineNum = frame[2]
funcName = frame[3]
result = (filename, lineNum, funcName)
finally:
del stack
del frame
except:
result = (None, None, None)
return result
def lineInfo(baseFileName=1):
"""
returns the sourcefilename, line number, and function name of the
code that called this function
(answers the question: 'hey lineInfo, where am I in the codebase?')
see stackEntryInfo, above, for info on 'baseFileName' and return types
"""
return stackEntryInfo(1, baseFileName)
def callerInfo(baseFileName=1, howFarBack=0):
"""
returns the sourcefilename, line number, and function name of the
caller of the function that called this function
(answers the question: 'hey callerInfo, who called me?')
see stackEntryInfo, above, for info on 'baseFileName' and return types
"""
return stackEntryInfo(2+howFarBack, baseFileName)
def lineTag(baseFileName=1, verbose=0, separator=':'):
"""
returns a string containing the sourcefilename and line number
of the code that called this function
(equivalent to lineInfo, above, with different return type)
see stackEntryInfo, above, for info on 'baseFileName'
if 'verbose' is false, returns a compact string of the form
'fileName:lineNum:funcName'
if 'verbose' is true, returns a longer string that matches the
format of Python stack trace dumps
returns empty string on error
"""
fileName, lineNum, funcName = callerInfo(baseFileName)
if fileName is None:
return ''
if verbose:
return 'File "%s", line %s, in %s' % (fileName, lineNum, funcName)
else:
return '%s%s%s%s%s' % (fileName, separator, lineNum, separator,
funcName)
def findPythonModule(module):
# Look along the python load path for the indicated filename.
# Returns the located pathname, or None if the filename is not
# found.
filename = module + '.py'
for dir in sys.path:
pathname = os.path.join(dir, filename)
if os.path.exists(pathname):
return pathname
return None
def weightedChoice(choiceList, rng=random.random, sum=None):
"""given a list of (weight, item) pairs, chooses an item based on the
weights. rng must return 0..1. if you happen to have the sum of the
weights, pass it in 'sum'."""
# TODO: add support for dicts
if sum is None:
sum = 0.
for weight, item in choiceList:
sum += weight
rand = rng()
accum = rand * sum
for weight, item in choiceList:
accum -= weight
if accum <= 0.:
return item
# rand is ~1., and floating-point error prevented accum from hitting 0.
# Or you passed in a 'sum' that was was too large.
# Return the last item.
return item
def randFloat(a, b=0., rng=random.random):
"""returns a random float in [a, b]
call with single argument to generate random float between arg and zero
"""
return lerp(a, b, rng())
def normalDistrib(a, b, gauss=random.gauss):
"""
NOTE: assumes a < b
Returns random number between a and b, using gaussian distribution, with
mean=avg(a, b), and a standard deviation that fits ~99.7% of the curve
between a and b.
For ease of use, outlying results are re-computed until result is in [a, b]
This should fit the remaining .3% of the curve that lies outside [a, b]
uniformly onto the curve inside [a, b]
------------------------------------------------------------------------
http://www-stat.stanford.edu/~naras/jsm/NormalDensity/NormalDensity.html
The 68-95-99.7% Rule
====================
All normal density curves satisfy the following property which is often
referred to as the Empirical Rule:
68% of the observations fall within 1 standard deviation of the mean.
95% of the observations fall within 2 standard deviations of the mean.
99.7% of the observations fall within 3 standard deviations of the mean.
Thus, for a normal distribution, almost all values lie within 3 standard
deviations of the mean.
------------------------------------------------------------------------
In calculating our standard deviation, we divide (b-a) by 6, since the
99.7% figure includes 3 standard deviations _on_either_side_ of the mean.
"""
while True:
r = gauss((a+b)*.5, (b-a)/6.)
if (r >= a) and (r <= b):
return r
def weightedRand(valDict, rng=random.random):
"""
pass in a dictionary with a selection -> weight mapping. Eg.
{"Choice 1": 10,
"Choice 2": 30,
"bear": 100}
-Weights need not add up to any particular value.
-The actual selection will be returned.
"""
selections = valDict.keys()
weights = valDict.values()
totalWeight = 0
for weight in weights:
totalWeight += weight
# get a random value between 0 and the total of the weights
randomWeight = rng() * totalWeight
# find the index that corresponds with this weight
for i in range(len(weights)):
totalWeight -= weights[i]
if totalWeight <= randomWeight:
return selections[i]
assert True, "Should never get here"
return selections[-1]
def randUint31(rng=random.random):
"""returns a random integer in [0..2^31).
rng must return float in [0..1]"""
return int(rng() * 0x7FFFFFFF)
def randInt32(rng=random.random):
"""returns a random integer in [-2147483648..2147483647].
rng must return float in [0..1]
"""
i = int(rng() * 0x7FFFFFFF)
if rng() < .5:
i *= -1
return i
def randUint32(rng=random.random):
"""returns a random integer in [0..2^32).
rng must return float in [0..1]"""
return long(rng() * 0xFFFFFFFFL)
class SerialNumGen:
"""generates serial numbers"""
def __init__(self, start=None):
if start is None:
start = 0
self.__counter = start-1
def next(self):
self.__counter += 1
return self.__counter
class SerialMaskedGen(SerialNumGen):
def __init__(self, mask, start=None):
self._mask = mask
SerialNumGen.__init__(self, start)
def next(self):
v = SerialNumGen.next(self)
return v & self._mask
_serialGen = SerialNumGen()
def serialNum():
global _serialGen
return _serialGen.next()
def uniqueName(name):
global _serialGen
return '%s-%s' % (name, _serialGen.next())
class EnumIter:
def __init__(self, enum):
self._values = enum._stringTable.keys()
self._index = 0
def __iter__(self):
return self
def next(self):
if self._index >= len(self._values):
raise StopIteration
self._index += 1
return self._values[self._index-1]
class Enum:
"""Pass in list of strings or string of comma-separated strings.
Items are accessible as instance.item, and are assigned unique,
increasing integer values. Pass in integer for 'start' to override
starting value.
Example:
>>> colors = Enum('red, green, blue')
>>> colors.red
0
>>> colors.green
1
>>> colors.blue
2
>>> colors.getString(colors.red)
'red'
"""
if __debug__:
# chars that cannot appear within an item string.
InvalidChars = string.whitespace
def _checkValidIdentifier(item):
invalidChars = string.whitespace+string.punctuation
invalidChars = invalidChars.replace('_','')
invalidFirstChars = invalidChars+string.digits
if item[0] in invalidFirstChars:
raise SyntaxError, ("Enum '%s' contains invalid first char" %
item)
if not disjoint(item, invalidChars):
for char in item:
if char in invalidChars:
raise SyntaxError, (
"Enum\n'%s'\ncontains illegal char '%s'" %
(item, char))
return 1
_checkValidIdentifier = staticmethod(_checkValidIdentifier)
def __init__(self, items, start=0):
if isinstance(items, str):
items = items.split(',')
self._stringTable = {}
# make sure we don't overwrite an existing element of the class
assert self._checkExistingMembers(items)
assert uniqueElements(items)
i = start
for item in items:
# remove leading/trailing whitespace
item = item.strip()
# is there anything left?
if len(item) == 0:
continue
# make sure there are no invalid characters
assert Enum._checkValidIdentifier(item)
self.__dict__[item] = i
self._stringTable[i] = item
i += 1
def __iter__(self):
return EnumIter(self)
def hasString(self, string):
return string in set(self._stringTable.values())
def fromString(self, string):
if self.hasString(string):
return self.__dict__[string]
# throw an error
{}[string]
def getString(self, value):
return self._stringTable[value]
def __contains__(self, value):
return value in self._stringTable
def __len__(self):
return len(self._stringTable)
def copyTo(self, obj):
# copies all members onto obj
for name, value in self._stringTable:
setattr(obj, name, value)
if __debug__:
def _checkExistingMembers(self, items):
for item in items:
if hasattr(self, item):
return 0
return 1
############################################################
# class: Singleton
# Purpose: This provides a base metaclass for all classes
# that require one and only one instance.
#
# Example: class mySingleton:
# __metaclass__ = PythonUtil.Singleton
# def __init__(self, ...):
# ...
#
# Note: This class is based on Python's New-Style Class
# design. An error will occur if a defined class
# attemps to inherit from a Classic-Style Class only,
# ie: class myClassX:
# def __init__(self, ...):
# ...
#
# class myNewClassX(myClassX):
# __metaclass__ = PythonUtil.Singleton
# def __init__(self, ...):
# myClassX.__init__(self, ...)
# ...
#
# This causes problems because myNewClassX is a
# New-Style class that inherits from only a
# Classic-Style base class. There are two ways
# simple ways to resolve this issue.
#
# First, if possible, make myClassX a
# New-Style class by inheriting from object
# object. IE: class myClassX(object):
#
# If for some reason that is not an option, make
# myNewClassX inherit from object and myClassX.
# IE: class myNewClassX(object, myClassX):
############################################################
class Singleton(type):
def __init__(cls, name, bases, dic):
super(Singleton, cls).__init__(name, bases, dic)
cls.instance=None
def __call__(cls, *args, **kw):
if cls.instance is None:
cls.instance=super(Singleton, cls).__call__(*args, **kw)
return cls.instance
class SingletonError(ValueError):
""" Used to indicate an inappropriate value for a Singleton."""
def printListEnumGen(l):
# log each individual item with a number in front of it
digits = 0
n = len(l)
while n > 0:
digits += 1
n //= 10
format = '%0' + '%s' % digits + 'i:%s'
for i in range(len(l)):
print format % (i, l[i])
yield None
def printListEnum(l):
for result in printListEnumGen(l):
pass
# base class for all Panda C++ objects
# libdtoolconfig doesn't seem to have this, grab it off of TypedObject
dtoolSuperBase = None
def _getDtoolSuperBase():
global dtoolSuperBase
from panda3d.core import TypedObject
dtoolSuperBase = TypedObject.__bases__[0]
assert dtoolSuperBase.__name__ == 'DTOOL_SUPER_BASE'
safeReprNotify = None
def _getSafeReprNotify():
global safeReprNotify
from direct.directnotify.DirectNotifyGlobal import directNotify
safeReprNotify = directNotify.newCategory("safeRepr")
return safeReprNotify
def safeRepr(obj):
global dtoolSuperBase
if dtoolSuperBase is None:
_getDtoolSuperBase()
global safeReprNotify
if safeReprNotify is None:
_getSafeReprNotify()
if isinstance(obj, dtoolSuperBase):
# repr of C++ object could crash, particularly if the object has been deleted
# log that we're calling repr
safeReprNotify.info('calling repr on instance of %s.%s' % (obj.__class__.__module__, obj.__class__.__name__))
sys.stdout.flush()
try:
return repr(obj)
except:
return '<** FAILED REPR OF %s instance at %s **>' % (obj.__class__.__name__, hex(id(obj)))
def safeReprTypeOnFail(obj):
global dtoolSuperBase
if dtoolSuperBase is None:
_getDtoolSuperBase()
global safeReprNotify
if safeReprNotify is None:
_getSafeReprNotify()
if isinstance(obj, dtoolSuperBase):
return type(obj)
try:
return repr(obj)
except:
return '<** FAILED REPR OF %s instance at %s **>' % (obj.__class__.__name__, hex(id(obj)))
def fastRepr(obj, maxLen=200, strFactor=10, _visitedIds=None):
""" caps the length of iterable types, so very large objects will print faster.
also prevents infinite recursion """
try:
if _visitedIds is None:
_visitedIds = set()
if id(obj) in _visitedIds:
return '<ALREADY-VISITED %s>' % itype(obj)
if type(obj) in (types.TupleType, types.ListType):
s = ''
s += {types.TupleType: '(',
types.ListType: '[',}[type(obj)]
if maxLen is not None and len(obj) > maxLen:
o = obj[:maxLen]
ellips = '...'
else:
o = obj
ellips = ''
_visitedIds.add(id(obj))
for item in o:
s += fastRepr(item, maxLen, _visitedIds=_visitedIds)
s += ', '
_visitedIds.remove(id(obj))
s += ellips
s += {types.TupleType: ')',
types.ListType: ']',}[type(obj)]
return s
elif type(obj) is types.DictType:
s = '{'
if maxLen is not None and len(obj) > maxLen:
o = obj.keys()[:maxLen]
ellips = '...'
else:
o = obj.keys()
ellips = ''
_visitedIds.add(id(obj))
for key in o:
value = obj[key]
s += '%s: %s, ' % (fastRepr(key, maxLen, _visitedIds=_visitedIds),
fastRepr(value, maxLen, _visitedIds=_visitedIds))
_visitedIds.remove(id(obj))
s += ellips
s += '}'
return s
elif type(obj) is types.StringType:
if maxLen is not None:
maxLen *= strFactor
if maxLen is not None and len(obj) > maxLen:
return safeRepr(obj[:maxLen])
else:
return safeRepr(obj)
else:
r = safeRepr(obj)
maxLen *= strFactor
if len(r) > maxLen:
r = r[:maxLen]
return r
except:
return '<** FAILED REPR OF %s **>' % obj.__class__.__name__
def convertTree(objTree, idList):
newTree = {}
for key in objTree.keys():
obj = (idList[key],)
newTree[obj] = {}
r_convertTree(objTree[key], newTree[obj], idList)
return newTree
def r_convertTree(oldTree, newTree, idList):
for key in oldTree.keys():
obj = idList.get(key)
if(not obj):
continue
obj = str(obj)[:100]
newTree[obj] = {}
r_convertTree(oldTree[key], newTree[obj], idList)
def pretty_print(tree):
for name in tree.keys():
print name
r_pretty_print(tree[name], 0)
def r_pretty_print(tree, num):
num+=1
for name in tree.keys():
print " "*num,name
r_pretty_print(tree[name],num)
def isDefaultValue(x):
return x == type(x)()
def appendStr(obj, st):
"""adds a string onto the __str__ output of an instance"""
def appendedStr(oldStr, st, self):
return oldStr() + st
oldStr = getattr(obj, '__str__', None)
if oldStr is None:
def stringer(s):
return s
oldStr = Functor(stringer, str(obj))
stringer = None
obj.__str__ = types.MethodType(Functor(appendedStr, oldStr, st), obj, obj.__class__)
appendedStr = None
return obj
class ScratchPad:
"""empty class to stick values onto"""
def __init__(self, **kArgs):
for key, value in kArgs.iteritems():
setattr(self, key, value)
self._keys = set(kArgs.keys())
def add(self, **kArgs):
for key, value in kArgs.iteritems():
setattr(self, key, value)
self._keys.update(kArgs.keys())
def destroy(self):
for key in self._keys:
delattr(self, key)
# allow dict [] syntax
def __getitem__(self, itemName):
return getattr(self, itemName)
def get(self, itemName, default=None):
return getattr(self, itemName, default)
# allow 'in'
def __contains__(self, itemName):
return itemName in self._keys
class Sync:
_SeriesGen = SerialNumGen()
def __init__(self, name, other=None):
self._name = name
if other is None:
self._series = self._SeriesGen.next()
self._value = 0
else:
self._series = other._series
self._value = other._value
def invalidate(self):
self._value = None
def change(self):
self._value += 1
def sync(self, other):
if (self._series != other._series) or (self._value != other._value):
self._series = other._series
self._value = other._value
return True
else:
return False
def isSynced(self, other):
return ((self._series == other._series) and
(self._value == other._value))
def __repr__(self):
return '%s(%s)<family=%s,value=%s>' % (self.__class__.__name__,
self._name, self._series, self._value)
def itype(obj):
# version of type that gives more complete information about instance types
global dtoolSuperBase
t = type(obj)
if t is types.InstanceType:
return '%s of <class %s>>' % (repr(types.InstanceType)[:-1],
str(obj.__class__))
else:
# C++ object instances appear to be types via type()
# check if this is a C++ object
if dtoolSuperBase is None:
_getDtoolSuperBase()
if isinstance(obj, dtoolSuperBase):
return '%s of %s>' % (repr(types.InstanceType)[:-1],
str(obj.__class__))
return t
def deeptype(obj, maxLen=100, _visitedIds=None):
if _visitedIds is None:
_visitedIds = set()
if id(obj) in _visitedIds:
return '<ALREADY-VISITED %s>' % itype(obj)
t = type(obj)
if t in (types.TupleType, types.ListType):
s = ''
s += {types.TupleType: '(',
types.ListType: '[',}[type(obj)]
if maxLen is not None and len(obj) > maxLen:
o = obj[:maxLen]
ellips = '...'
else:
o = obj
ellips = ''
_visitedIds.add(id(obj))
for item in o:
s += deeptype(item, maxLen, _visitedIds=_visitedIds)
s += ', '
_visitedIds.remove(id(obj))
s += ellips
s += {types.TupleType: ')',
types.ListType: ']',}[type(obj)]
return s
elif type(obj) is types.DictType:
s = '{'
if maxLen is not None and len(obj) > maxLen:
o = obj.keys()[:maxLen]
ellips = '...'
else:
o = obj.keys()
ellips = ''
_visitedIds.add(id(obj))
for key in o:
value = obj[key]
s += '%s: %s, ' % (deeptype(key, maxLen, _visitedIds=_visitedIds),
deeptype(value, maxLen, _visitedIds=_visitedIds))
_visitedIds.remove(id(obj))
s += ellips
s += '}'
return s
else:
return str(itype(obj))
def getNumberedTypedString(items, maxLen=5000, numPrefix=''):
"""get a string that has each item of the list on its own line,
and each item is numbered on the left from zero"""
digits = 0
n = len(items)
while n > 0:
digits += 1
n //= 10
digits = digits
format = numPrefix + '%0' + '%s' % digits + 'i:%s \t%s'
first = True
s = ''
snip = '<SNIP>'
for i in xrange(len(items)):
if not first:
s += '\n'
first = False
objStr = fastRepr(items[i])
if len(objStr) > maxLen:
objStr = '%s%s' % (objStr[:(maxLen-len(snip))], snip)
s += format % (i, itype(items[i]), objStr)
return s
def getNumberedTypedSortedString(items, maxLen=5000, numPrefix=''):
"""get a string that has each item of the list on its own line,
the items are stringwise-sorted, and each item is numbered on
the left from zero"""
digits = 0
n = len(items)
while n > 0:
digits += 1
n //= 10
digits = digits
format = numPrefix + '%0' + '%s' % digits + 'i:%s \t%s'
snip = '<SNIP>'
strs = []
for item in items:
objStr = fastRepr(item)
if len(objStr) > maxLen:
objStr = '%s%s' % (objStr[:(maxLen-len(snip))], snip)
strs.append(objStr)
first = True
s = ''
strs.sort()
for i in xrange(len(strs)):
if not first:
s += '\n'
first = False
objStr = strs[i]
s += format % (i, itype(items[i]), strs[i])
return s
def getNumberedTypedSortedStringWithReferrersGen(items, maxLen=10000, numPrefix=''):
"""get a string that has each item of the list on its own line,
the items are stringwise-sorted, the object's referrers are shown,
and each item is numbered on the left from zero"""
digits = 0
n = len(items)
while n > 0:
digits += 1
n //= 10
digits = digits
format = numPrefix + '%0' + '%s' % digits + 'i:%s @ %s \t%s'
snip = '<SNIP>'
strs = []
for item in items:
strs.append(fastRepr(item))
strs.sort()
for i in xrange(len(strs)):
item = items[i]
objStr = strs[i]
objStr += ', \tREFERRERS=['
referrers = gc.get_referrers(item)
for ref in referrers:
objStr += '%s@%s, ' % (itype(ref), id(ref))
objStr += ']'
if len(objStr) > maxLen:
objStr = '%s%s' % (objStr[:(maxLen-len(snip))], snip)
yield format % (i, itype(items[i]), id(items[i]), objStr)
def getNumberedTypedSortedStringWithReferrers(items, maxLen=10000, numPrefix=''):
"""get a string that has each item of the list on its own line,
the items are stringwise-sorted, the object's referrers are shown,
and each item is numbered on the left from zero"""
s = ''
for line in getNumberedTypedSortedStringWithReferrersGen(items, maxLen, numPrefix):
s += '%s\n' % line
return s
def printNumberedTyped(items, maxLen=5000):
"""print out each item of the list on its own line,
with each item numbered on the left from zero"""
digits = 0
n = len(items)
while n > 0:
digits += 1
n //= 10
digits = digits
format = '%0' + '%s' % digits + 'i:%s \t%s'
for i in xrange(len(items)):
objStr = fastRepr(items[i])
if len(objStr) > maxLen:
snip = '<SNIP>'
objStr = '%s%s' % (objStr[:(maxLen-len(snip))], snip)
print format % (i, itype(items[i]), objStr)
def printNumberedTypesGen(items, maxLen=5000):
digits = 0
n = len(items)
while n > 0:
digits += 1
n //= 10
digits = digits
format = '%0' + '%s' % digits + 'i:%s'
for i in xrange(len(items)):
print format % (i, itype(items[i]))
yield None
def printNumberedTypes(items, maxLen=5000):
"""print out the type of each item of the list on its own line,
with each item numbered on the left from zero"""
for result in printNumberedTypesGen(items, maxLen):
yield result
class DelayedCall:
""" calls a func after a specified delay """
def __init__(self, func, name=None, delay=None):
if name is None:
name = 'anonymous'
if delay is None:
delay = .01
self._func = func
self._taskName = 'DelayedCallback-%s' % name
self._delay = delay
self._finished = False
self._addDoLater()
def destroy(self):
self._finished = True
self._removeDoLater()
def finish(self):
if not self._finished:
self._doCallback()
self.destroy()
def _addDoLater(self):
taskMgr.doMethodLater(self._delay, self._doCallback, self._taskName)
def _removeDoLater(self):
taskMgr.remove(self._taskName)
def _doCallback(self, task):
self._finished = True
func = self._func
del self._func
func()
class FrameDelayedCall:
""" calls a func after N frames """
def __init__(self, name, callback, frames=None, cancelFunc=None):
# checkFunc is optional; called every frame, if returns True, FrameDelay is cancelled
# and callback is not called
if frames is None:
frames = 1
self._name = name
self._frames = frames
self._callback = callback
self._cancelFunc = cancelFunc
self._taskName = uniqueName('%s-%s' % (self.__class__.__name__, self._name))
self._finished = False
self._startTask()
def destroy(self):
self._finished = True
self._stopTask()
def finish(self):
if not self._finished:
self._finished = True
self._callback()
self.destroy()
def _startTask(self):
taskMgr.add(self._frameTask, self._taskName)
self._counter = 0
def _stopTask(self):
taskMgr.remove(self._taskName)
def _frameTask(self, task):
if self._cancelFunc and self._cancelFunc():
self.destroy()
return task.done
self._counter += 1
if self._counter >= self._frames:
self.finish()
return task.done
return task.cont
class DelayedFunctor:
""" Waits for this object to be called, then calls supplied functor after a delay.
Effectively inserts a time delay between the caller and the functor. """
def __init__(self, functor, name=None, delay=None):
self._functor = functor
self._name = name
# FunctionInterval requires __name__
self.__name__ = self._name
self._delay = delay
def _callFunctor(self):
cb = Functor(self._functor, *self._args, **self._kwArgs)
del self._functor
del self._name
del self._delay
del self._args
del self._kwArgs
del self._delayedCall
del self.__name__
cb()
def __call__(self, *args, **kwArgs):
self._args = args
self._kwArgs = kwArgs
self._delayedCall = DelayedCall(self._callFunctor, self._name, self._delay)
class SubframeCall:
"""Calls a callback at a specific time during the frame using the
task system"""
def __init__(self, functor, taskPriority, name=None):
self._functor = functor
self._name = name
self._taskName = uniqueName('SubframeCall-%s' % self._name)
taskMgr.add(self._doCallback,
self._taskName,
priority=taskPriority)
def _doCallback(self, task):
functor = self._functor
del self._functor
functor()
del self._name
self._taskName = None
return task.done
def cleanup(self):
if (self._taskName):
taskMgr.remove(self._taskName)
self._taskName = None
class PStatScope:
collectors = {}
def __init__(self, level = None):
self.levels = []
if level:
self.levels.append(level)
def copy(self, push = None):
c = PStatScope()
c.levels = self.levels[:]
if push:
c.push(push)
return c
def __repr__(self):
return 'PStatScope - \'%s\'' % (self,)
def __str__(self):
return ':'.join(self.levels)
def push(self, level):
self.levels.append(level.replace('_',''))
def pop(self):
return self.levels.pop()
def start(self, push = None):
if push:
self.push(push)
pass
self.getCollector().start()
def stop(self, pop = False):
self.getCollector().stop()
if pop:
self.pop()
def getCollector(self):
label = str(self)
if label not in self.collectors:
from panda3d.core import PStatCollector
self.collectors[label] = PStatCollector(label)
pass
# print ' ',self.collectors[label]
return self.collectors[label]
def pstatcollect(scope, level = None):
def decorator(f):
return f
try:
if not (__dev__ or ConfigVariableBool('force-pstatcollect', False)) or \
not scope:
return decorator
def decorator(f):
def wrap(*args, **kw):
scope.start(push = (level or f.__name__))
val = f(*args, **kw)
scope.stop(pop = True)
return val
return wrap
pass
except:
pass
return decorator
__report_indent = 0
def report(types = [], prefix = '', xform = None, notifyFunc = None, dConfigParam = []):
"""
This is a decorator generating function. Use is similar to
a @decorator, except you must be sure to call it as a function.
It actually returns the decorator which is then used to transform
your decorated function. Confusing at first, I know.
Decoration occurs at function definition time.
If __dev__ is not defined, or resolves to False, this function
has no effect and no wrapping/transform occurs. So in production,
it's as if the report has been asserted out.
Parameters::
types : A subset list of ['timeStamp', 'frameCount', 'avLocation']
This allows you to specify certain useful bits of info.
module: Prints the module that this report statement
can be found in.
args: Prints the arguments as they were passed to
this function.
timeStamp: Adds the current frame time to the output.
deltaStamp: Adds the current AI synched frame time to
the output
frameCount: Adds the current frame count to the output.
Usually cleaner than the timeStamp output.
avLocation: Adds the localAvatar's network location
to the output. Useful for interest debugging.
interests: Prints the current interest state after the
report.
stackTrace: Prints a stack trace after the report.
prefix: Optional string to prepend to output, just before the function.
Allows for easy grepping and is useful when merging AI/Client
reports into a single file.
xform: Optional callback that accepts a single parameter: argument 0 to
the decorated function. (assumed to be 'self')
It should return a value to be inserted into the report output string.
notifyFunc: A notify function such as info, debug, warning, etc.
By default the report will be printed to stdout. This
will allow you send the report to a designated 'notify'
output.
dConfigParam: A list of Config.prc string variables.
By default the report will always print. If you
specify this param, it will only print if one of the
specified config strings resolve to True.
"""
def indent(str):
global __report_indent
return ' '*__report_indent+str
def decorator(f):
return f
try:
if not (__dev__ or config.GetBool('force-reports', 0)):
return decorator
# determine whether we should use the decorator
# based on the value of dConfigParam.
dConfigParamList = []
doPrint = False
if not dConfigParam:
doPrint = True
else:
if not isinstance(dConfigParam, (list,tuple)):
dConfigParams = (dConfigParam,)
else:
dConfigParams = dConfigParam
dConfigParamList = [param for param in dConfigParams \
if config.GetBool('want-%s-report' % (param,), 0)]
doPrint = bool(dConfigParamList)
pass
if not doPrint:
return decorator
# Determine any prefixes defined in our Config.prc.
if prefix:
prefixes = set([prefix])
else:
prefixes = set()
pass
for param in dConfigParamList:
prefix = config.GetString('prefix-%s-report' % (param,), '')
if prefix:
prefixes.add(prefix)
pass
pass
except NameError,e:
return decorator
from direct.distributed.ClockDelta import globalClockDelta
def decorator(f):
def wrap(*args,**kwargs):
if args:
rArgs = [args[0].__class__.__name__ + ', ']
else:
rArgs = []
if 'args' in types:
rArgs += [repr(x)+', ' for x in args[1:]] + \
[ x + ' = ' + '%s, ' % repr(y) for x,y in kwargs.items()]
if not rArgs:
rArgs = '()'
else:
rArgs = '(' + reduce(str.__add__,rArgs)[:-2] + ')'
outStr = '%s%s' % (f.func_name, rArgs)
# Insert prefix place holder, if needed
if prefixes:
outStr = '%%s %s' % (outStr,)
if 'module' in types:
outStr = '%s {M:%s}' % (outStr, f.__module__.split('.')[-1])
if 'frameCount' in types:
outStr = '%-8d : %s' % (globalClock.getFrameCount(), outStr)
if 'timeStamp' in types:
outStr = '%-8.3f : %s' % (globalClock.getFrameTime(), outStr)
if 'deltaStamp' in types:
outStr = '%-8.2f : %s' % (globalClock.getRealTime() - \
globalClockDelta.delta, outStr)
if 'avLocation' in types:
outStr = '%s : %s' % (outStr, str(localAvatar.getLocation()))
if xform:
outStr = '%s : %s' % (outStr, xform(args[0]))
if prefixes:
# This will print the same report once for each prefix
for prefix in prefixes:
if notifyFunc:
notifyFunc(outStr % (prefix,))
else:
print indent(outStr % (prefix,))
else:
if notifyFunc:
notifyFunc(outStr)
else:
print indent(outStr)
if 'interests' in types:
base.cr.printInterestSets()
if 'stackTrace' in types:
print StackTrace()
global __report_indent
rVal = None
try:
__report_indent += 1
rVal = f(*args,**kwargs)
finally:
__report_indent -= 1
if rVal is not None:
print indent(' -> '+repr(rVal))
pass
pass
return rVal
wrap.func_name = f.func_name
wrap.func_dict = f.func_dict
wrap.func_doc = f.func_doc
wrap.__module__ = f.__module__
return wrap
return decorator
def getBase():
try:
return base
except:
return simbase
def getRepository():
try:
return base.cr
except:
return simbase.air
exceptionLoggedNotify = None
def exceptionLogged(append=True):
"""decorator that outputs the function name and all arguments
if an exception passes back through the stack frame
if append is true, string is appended to the __str__ output of
the exception. if append is false, string is printed to the log
directly. If the output will take up many lines, it's recommended
to set append to False so that the exception stack is not hidden
by the output of this decorator.
"""
try:
null = not __dev__
except:
null = not __debug__
if null:
# if we're not in __dev__, just return the function itself. This
# results in zero runtime overhead, since decorators are evaluated
# at module-load.
def nullDecorator(f):
return f
return nullDecorator
def _decoratorFunc(f, append=append):
global exceptionLoggedNotify
if exceptionLoggedNotify is None:
from direct.directnotify.DirectNotifyGlobal import directNotify
exceptionLoggedNotify = directNotify.newCategory("ExceptionLogged")
def _exceptionLogged(*args, **kArgs):
try:
return f(*args, **kArgs)
except Exception, e:
try:
s = '%s(' % f.func_name
for arg in args:
s += '%s, ' % arg
for key, value in kArgs.items():
s += '%s=%s, ' % (key, value)
if len(args) or len(kArgs):
s = s[:-2]
s += ')'
if append:
appendStr(e, '\n%s' % s)
else:
exceptionLoggedNotify.info(s)
except:
exceptionLoggedNotify.info(
'%s: ERROR IN PRINTING' % f.func_name)
raise
_exceptionLogged.__doc__ = f.__doc__
return _exceptionLogged
return _decoratorFunc
# class 'decorator' that records the stack at the time of creation
# be careful with this, it creates a StackTrace, and that can take a
# lot of CPU
def recordCreationStack(cls):
if not hasattr(cls, '__init__'):
raise 'recordCreationStack: class \'%s\' must define __init__' % cls.__name__
cls.__moved_init__ = cls.__init__
def __recordCreationStack_init__(self, *args, **kArgs):
self._creationStackTrace = StackTrace(start=1)
return self.__moved_init__(*args, **kArgs)
def getCreationStackTrace(self):
return self._creationStackTrace
def getCreationStackTraceCompactStr(self):
return self._creationStackTrace.compact()
def printCreationStackTrace(self):
print self._creationStackTrace
cls.__init__ = __recordCreationStack_init__
cls.getCreationStackTrace = getCreationStackTrace
cls.getCreationStackTraceCompactStr = getCreationStackTraceCompactStr
cls.printCreationStackTrace = printCreationStackTrace
return cls
# like recordCreationStack but stores the stack as a compact stack list-of-strings
# scales well for memory usage
def recordCreationStackStr(cls):
if not hasattr(cls, '__init__'):
raise 'recordCreationStackStr: class \'%s\' must define __init__' % cls.__name__
cls.__moved_init__ = cls.__init__
def __recordCreationStackStr_init__(self, *args, **kArgs):
# store as list of strings to conserve memory
self._creationStackTraceStrLst = StackTrace(start=1).compact().split(',')
return self.__moved_init__(*args, **kArgs)
def getCreationStackTraceCompactStr(self):
return ','.join(self._creationStackTraceStrLst)
def printCreationStackTrace(self):
print ','.join(self._creationStackTraceStrLst)
cls.__init__ = __recordCreationStackStr_init__
cls.getCreationStackTraceCompactStr = getCreationStackTraceCompactStr
cls.printCreationStackTrace = printCreationStackTrace
return cls
# class 'decorator' that logs all method calls for a particular class
def logMethodCalls(cls):
if not hasattr(cls, 'notify'):
raise 'logMethodCalls: class \'%s\' must have a notify' % cls.__name__
for name in dir(cls):
method = getattr(cls, name)
if hasattr(method, '__call__'):
def getLoggedMethodCall(method):
def __logMethodCall__(obj, *args, **kArgs):
s = '%s(' % method.__name__
for arg in args:
try:
argStr = repr(arg)
except:
argStr = 'bad repr: %s' % arg.__class__
s += '%s, ' % argStr
for karg, value in kArgs.items():
s += '%s=%s, ' % (karg, repr(value))
if len(args) or len(kArgs):
s = s[:-2]
s += ')'
obj.notify.info(s)
return method(obj, *args, **kArgs)
return __logMethodCall__
setattr(cls, name, getLoggedMethodCall(method))
__logMethodCall__ = None
return cls
# http://en.wikipedia.org/wiki/Golden_ratio
GoldenRatio = (1. + math.sqrt(5.)) / 2.
class GoldenRectangle:
@staticmethod
def getLongerEdge(shorter):
return shorter * GoldenRatio
@staticmethod
def getShorterEdge(longer):
return longer / GoldenRatio
class HotkeyBreaker:
def __init__(self,breakKeys = []):
from direct.showbase.DirectObject import DirectObject
self.do = DirectObject()
self.breakKeys = {}
if not isinstance(breakKeys, (list,tuple)):
breakKeys = (breakKeys,)
for key in breakKeys:
self.addBreakKey(key)
def addBreakKey(self,breakKey):
if __dev__:
self.do.accept(breakKey,self.breakFunc,extraArgs = [breakKey])
def removeBreakKey(self,breakKey):
if __dev__:
self.do.ignore(breakKey)
def breakFunc(self,breakKey):
if __dev__:
self.breakKeys[breakKey] = True
def setBreakPt(self, breakKey = None, persistent = False):
if __dev__:
if not breakKey:
import pdb;pdb.set_trace()
return True
else:
if self.breakKeys.get(breakKey,False):
if not persistent:
self.breakKeys.pop(breakKey)
import pdb;pdb.set_trace()
return True
return True
def clearBreakPt(self, breakKey):
if __dev__:
return bool(self.breakKeys.pop(breakKey,None))
def nullGen():
# generator that ends immediately
if False:
# yield that never runs but still exists, making this func a generator
yield None
def loopGen(l):
# generator that yields the items of an iterable object forever
def _gen(l):
while True:
for item in l:
yield item
gen = _gen(l)
# don't leak
_gen = None
return gen
def makeFlywheelGen(objects, countList=None, countFunc=None, scale=None):
# iterates and finally yields a flywheel generator object
# the number of appearances for each object is controlled by passing in
# a list of counts, or a functor that returns a count when called with
# an object from the 'objects' list.
# if scale is provided, all counts are scaled by the scale value and then int()'ed.
def flywheel(index2objectAndCount):
# generator to produce a sequence whose elements appear a specific number of times
while len(index2objectAndCount):
keyList = index2objectAndCount.keys()
for key in keyList:
if index2objectAndCount[key][1] > 0:
yield index2objectAndCount[key][0]
index2objectAndCount[key][1] -= 1
if index2objectAndCount[key][1] <= 0:
del index2objectAndCount[key]
# if we were not given a list of counts, create it by calling countFunc
if countList is None:
countList = []
for object in objects:
yield None
countList.append(countFunc(object))
if scale is not None:
# scale the counts if we've got a scale factor
for i in xrange(len(countList)):
yield None
if countList[i] > 0:
countList[i] = max(1, int(countList[i] * scale))
# create a dict for the flywheel to use during its iteration to efficiently select
# the objects for the sequence
index2objectAndCount = {}
for i in xrange(len(countList)):
yield None
index2objectAndCount[i] = [objects[i], countList[i]]
# create the flywheel generator
yield flywheel(index2objectAndCount)
def flywheel(*args, **kArgs):
# create a flywheel generator
# see arguments and comments in flywheelGen above
# example usage:
"""
>>> for i in flywheel([1,2,3], countList=[10, 5, 1]):
... print i,
...
1 2 3 1 2 1 2 1 2 1 2 1 1 1 1 1
"""
for flywheel in makeFlywheelGen(*args, **kArgs):
pass
return flywheel
if __debug__ and __name__ == '__main__':
f = flywheel(['a','b','c','d'], countList=[11,20,3,4])
obj2count = {}
for obj in f:
obj2count.setdefault(obj, 0)
obj2count[obj] += 1
assert obj2count['a'] == 11
assert obj2count['b'] == 20
assert obj2count['c'] == 3
assert obj2count['d'] == 4
f = flywheel([1,2,3,4], countFunc=lambda x: x*2)
obj2count = {}
for obj in f:
obj2count.setdefault(obj, 0)
obj2count[obj] += 1
assert obj2count[1] == 2
assert obj2count[2] == 4
assert obj2count[3] == 6
assert obj2count[4] == 8
f = flywheel([1,2,3,4], countFunc=lambda x: x, scale = 3)
obj2count = {}
for obj in f:
obj2count.setdefault(obj, 0)
obj2count[obj] += 1
assert obj2count[1] == 1 * 3
assert obj2count[2] == 2 * 3
assert obj2count[3] == 3 * 3
assert obj2count[4] == 4 * 3
def quickProfile(name="unnamed"):
import pstats
def profileDecorator(f):
if(not config.GetBool("use-profiler",0)):
return f
def _profiled(*args, **kArgs):
# must do this in here because we don't have base/simbase
# at the time that PythonUtil is loaded
if(not config.GetBool("profile-debug",0)):
#dumb timings
st=globalClock.getRealTime()
f(*args,**kArgs)
s=globalClock.getRealTime()-st
print "Function %s.%s took %s seconds"%(f.__module__, f.__name__,s)
else:
import profile as prof, pstats
#detailed profile, stored in base.stats under (
if(not hasattr(base,"stats")):
base.stats={}
if(not base.stats.get(name)):
base.stats[name]=[]
prof.runctx('f(*args, **kArgs)', {'f':f,'args':args,'kArgs':kArgs},None,"t.prof")
s=pstats.Stats("t.prof")
#p=hotshot.Profile("t.prof")
#p.runctx('f(*args, **kArgs)', {'f':f,'args':args,'kArgs':kArgs},None)
#s = hotshot.stats.load("t.prof")
s.strip_dirs()
s.sort_stats("cumulative")
base.stats[name].append(s)
_profiled.__doc__ = f.__doc__
return _profiled
return profileDecorator
def getTotalAnnounceTime():
td=0
for objs in base.stats.values():
for stat in objs:
td+=getAnnounceGenerateTime(stat)
return td
def getAnnounceGenerateTime(stat):
val=0
stats=stat.stats
for i in stats.keys():
if(i[2]=="announceGenerate"):
newVal=stats[i][3]
if(newVal>val):
val=newVal
return val
def choice(condition, ifTrue, ifFalse):
# equivalent of C++ (condition ? ifTrue : ifFalse)
if condition:
return ifTrue
else:
return ifFalse
class MiniLog:
def __init__(self, name):
self.indent = 1
self.name = name
self.lines = []
def __str__(self):
return '%s\nMiniLog: %s\n%s\n%s\n%s' % \
('*'*50, self.name, '-'*50, '\n'.join(self.lines), '*'*50)
def enterFunction(self, funcName, *args, **kw):
rArgs = [repr(x)+', ' for x in args] + \
[ x + ' = ' + '%s, ' % repr(y) for x,y in kw.items()]
if not rArgs:
rArgs = '()'
else:
rArgs = '(' + reduce(str.__add__,rArgs)[:-2] + ')'
line = '%s%s' % (funcName, rArgs)
self.appendFunctionCall(line)
self.indent += 1
return line
def exitFunction(self):
self.indent -= 1
return self.indent
def appendFunctionCall(self, line):
self.lines.append(' '*(self.indent*2) + line)
return line
def appendLine(self, line):
self.lines.append(' '*(self.indent*2) + '<< ' + line + ' >>')
return line
def flush(self):
outStr = str(self)
self.indent = 0
self.lines = []
return outStr
class MiniLogSentry:
def __init__(self, log, funcName, *args, **kw):
self.log = log
if self.log:
self.log.enterFunction(funcName, *args, **kw)
def __del__(self):
if self.log:
self.log.exitFunction()
del self.log
def logBlock(id, msg):
print '<< LOGBLOCK(%03d)' % id
print str(msg)
print '/LOGBLOCK(%03d) >>' % id
class HierarchyException(Exception):
JOSWILSO = 0
def __init__(self, owner, description):
self.owner = owner
self.desc = description
def __str__(self):
return '(%s): %s' % (self.owner, self.desc)
def __repr__(self):
return 'HierarchyException(%s)' % (self.owner, )
# __dev__ is not defined at import time, call this after it's defined
def recordFunctorCreationStacks():
global Functor
from panda3d.direct import get_config_showbase
config = get_config_showbase()
# off by default, very slow
if __dev__ and config.GetBool('record-functor-creation-stacks', 0):
if not hasattr(Functor, '_functorCreationStacksRecorded'):
Functor = recordCreationStackStr(Functor)
Functor._functorCreationStacksRecorded = True
Functor.__call__ = Functor._exceptionLoggedCreationStack__call__
def formatTimeCompact(seconds):
# returns string in format '1d3h22m43s'
result = ''
a = int(seconds)
seconds = a % 60
a //= 60
if a > 0:
minutes = a % 60
a //= 60
if a > 0:
hours = a % 24
a //= 24
if a > 0:
days = a
result += '%sd' % days
result += '%sh' % hours
result += '%sm' % minutes
result += '%ss' % seconds
return result
if __debug__ and __name__ == '__main__':
ftc = formatTimeCompact
assert ftc(0) == '0s'
assert ftc(1) == '1s'
assert ftc(60) == '1m0s'
assert ftc(64) == '1m4s'
assert ftc(60*60) == '1h0m0s'
assert ftc(24*60*60) == '1d0h0m0s'
assert ftc(24*60*60 + 2*60*60 + 34*60 + 12) == '1d2h34m12s'
del ftc
def formatTimeExact(seconds):
# like formatTimeCompact but leaves off '0 seconds', '0 minutes' etc. for
# times that are e.g. 1 hour, 3 days etc.
# returns string in format '1d3h22m43s'
result = ''
a = int(seconds)
seconds = a % 60
a //= 60
if a > 0:
minutes = a % 60
a //= 60
if a > 0:
hours = a % 24
a //= 24
if a > 0:
days = a
result += '%sd' % days
if hours or minutes or seconds:
result += '%sh' % hours
if minutes or seconds:
result += '%sm' % minutes
if seconds or result == '':
result += '%ss' % seconds
return result
if __debug__ and __name__ == '__main__':
fte = formatTimeExact
assert fte(0) == '0s'
assert fte(1) == '1s'
assert fte(2) == '2s'
assert fte(61) == '1m1s'
assert fte(60) == '1m'
assert fte(60*60) == '1h'
assert fte(24*60*60) == '1d'
assert fte((24*60*60) + (2 * 60)) == '1d0h2m'
del fte
class AlphabetCounter:
# object that produces 'A', 'B', 'C', ... 'AA', 'AB', etc.
def __init__(self):
self._curCounter = ['A']
def next(self):
result = ''.join([c for c in self._curCounter])
index = -1
while True:
curChar = self._curCounter[index]
if curChar is 'Z':
nextChar = 'A'
carry = True
else:
nextChar = chr(ord(self._curCounter[index])+1)
carry = False
self._curCounter[index] = nextChar
if carry:
if (-index) == len(self._curCounter):
self._curCounter = ['A',] + self._curCounter
break
else:
index -= 1
carry = False
else:
break
return result
if __debug__ and __name__ == '__main__':
def testAlphabetCounter():
tempList = []
ac = AlphabetCounter()
for i in xrange(26*3):
tempList.append(ac.next())
assert tempList == [ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'AA','AB','AC','AD','AE','AF','AG','AH','AI','AJ','AK','AL','AM','AN','AO','AP','AQ','AR','AS','AT','AU','AV','AW','AX','AY','AZ',
'BA','BB','BC','BD','BE','BF','BG','BH','BI','BJ','BK','BL','BM','BN','BO','BP','BQ','BR','BS','BT','BU','BV','BW','BX','BY','BZ',]
ac = AlphabetCounter()
num = 26 # A-Z
num += (26*26) # AA-ZZ
num += 26 # AAZ
num += 1 # ABA
num += 2 # ABC
for i in xrange(num):
x = ac.next()
assert x == 'ABC'
testAlphabetCounter()
del testAlphabetCounter
globalPdb = None
traceCalled = False
def setupPdb():
import pdb;
class pandaPdb(pdb.Pdb):
def stop_here(self, frame):
global traceCalled
if(traceCalled):
result = pdb.Pdb.stop_here(self, frame)
if(result == True):
traceCalled = False
return result
if frame is self.stopframe:
return True
return False
global globalPdb
globalPdb = pandaPdb()
globalPdb.reset()
sys.settrace(globalPdb.trace_dispatch)
def pandaTrace():
if __dev__:
if not globalPdb:
setupPdb()
global traceCalled
globalPdb.set_trace(sys._getframe().f_back)
traceCalled = True
packageMap = {
"toontown":"$TOONTOWN",
"direct":"$DIRECT",
"otp":"$OTP",
"pirates":"$PIRATES",
}
#assuming . dereferncing for nice linking to imports
def pandaBreak(dotpath, linenum, temporary = 0, cond = None):
if __dev__:
from panda3d.core import Filename
if not globalPdb:
setupPdb()
dirs = dotpath.split(".")
root = Filename.expandFrom(packageMap[dirs[0]]).toOsSpecific()
filename = root + "\\src"
for d in dirs[1:]:
filename="%s\\%s"%(filename,d)
print filename
globalPdb.set_break(filename+".py", linenum, temporary, cond)
class Default:
# represents 'use the default value'
# useful for keyword arguments to virtual methods
pass
superLogFile = None
def startSuperLog(customFunction = None):
global superLogFile
if(not superLogFile):
superLogFile = open("c:\\temp\\superLog.txt", "w")
def trace_dispatch(a,b,c):
if(b=='call' and a.f_code.co_name != '?' and a.f_code.co_name.find("safeRepr")<0):
vars = dict(a.f_locals)
if 'self' in vars:
del vars['self']
if '__builtins__' in vars:
del vars['__builtins__']
for i in vars:
vars[i] = safeReprTypeOnFail(vars[i])
if customFunction:
superLogFile.write( "before = %s\n"%customFunction())
superLogFile.write( "%s(%s):%s:%s\n"%(a.f_code.co_filename.split("\\")[-1],a.f_code.co_firstlineno, a.f_code.co_name, vars))
if customFunction:
superLogFile.write( "after = %s\n"%customFunction())
return trace_dispatch
sys.settrace(trace_dispatch)
def endSuperLog():
global superLogFile
if(superLogFile):
sys.settrace(None)
superLogFile.close()
superLogFile = None
def isInteger(n):
return type(n) in (types.IntType, types.LongType)
def configIsToday(configName):
# TODO: replace usage of strptime with something else
# returns true if config string is a valid representation of today's date
today = time.localtime()
confStr = config.GetString(configName, '')
for format in ('%m/%d/%Y', '%m-%d-%Y', '%m.%d.%Y'):
try:
confDate = time.strptime(confStr, format)
except ValueError:
pass
else:
if (confDate.tm_year == today.tm_year and
confDate.tm_mon == today.tm_mon and
confDate.tm_mday == today.tm_mday):
return True
return False
def typeName(o):
if hasattr(o, '__class__'):
return o.__class__.__name__
else:
return o.__name__
def safeTypeName(o):
try:
return typeName(o)
except:
pass
try:
return type(o)
except:
pass
return '<failed safeTypeName()>'
def histogramDict(l):
d = {}
for e in l:
d.setdefault(e, 0)
d[e] += 1
return d
def unescapeHtmlString(s):
# converts %## to corresponding character
# replaces '+' with ' '
result = ''
i = 0
while i < len(s):
char = s[i]
if char == '+':
char = ' '
elif char == '%':
if i < (len(s)-2):
num = int(s[i+1:i+3], 16)
char = chr(num)
i += 2
i += 1
result += char
return result
if __debug__ and __name__ == '__main__':
assert unescapeHtmlString('asdf') == 'asdf'
assert unescapeHtmlString('as+df') == 'as df'
assert unescapeHtmlString('as%32df') == 'as2df'
assert unescapeHtmlString('asdf%32') == 'asdf2'
def unicodeUtf8(s):
# * -> Unicode UTF-8
if type(s) is types.UnicodeType:
return s
else:
return unicode(str(s), 'utf-8')
def encodedUtf8(s):
# * -> 8-bit-encoded UTF-8
return unicodeUtf8(s).encode('utf-8')
import __builtin__
__builtin__.Functor = Functor
__builtin__.Stack = Stack
__builtin__.Queue = Queue
__builtin__.Enum = Enum
__builtin__.SerialNumGen = SerialNumGen
__builtin__.SerialMaskedGen = SerialMaskedGen
__builtin__.ScratchPad = ScratchPad
__builtin__.uniqueName = uniqueName
__builtin__.serialNum = serialNum
__builtin__.profiled = profiled
__builtin__.itype = itype
__builtin__.exceptionLogged = exceptionLogged
__builtin__.appendStr = appendStr
__builtin__.bound = bound
__builtin__.clamp = clamp
__builtin__.lerp = lerp
__builtin__.makeList = makeList
__builtin__.makeTuple = makeTuple
__builtin__.printStack = printStack
__builtin__.printReverseStack = printReverseStack
__builtin__.printVerboseStack = printVerboseStack
__builtin__.DelayedCall = DelayedCall
__builtin__.DelayedFunctor = DelayedFunctor
__builtin__.FrameDelayedCall = FrameDelayedCall
__builtin__.SubframeCall = SubframeCall
__builtin__.invertDict = invertDict
__builtin__.invertDictLossless = invertDictLossless
__builtin__.getBase = getBase
__builtin__.getRepository = getRepository
__builtin__.safeRepr = safeRepr
__builtin__.fastRepr = fastRepr
__builtin__.nullGen = nullGen
__builtin__.flywheel = flywheel
__builtin__.loopGen = loopGen
__builtin__.StackTrace = StackTrace
__builtin__.choice = choice
__builtin__.report = report
__builtin__.pstatcollect = pstatcollect
__builtin__.MiniLog = MiniLog
__builtin__.MiniLogSentry = MiniLogSentry
__builtin__.logBlock = logBlock
__builtin__.HierarchyException = HierarchyException
__builtin__.deeptype = deeptype
__builtin__.Default = Default
__builtin__.isInteger = isInteger
__builtin__.configIsToday = configIsToday
__builtin__.typeName = typeName
__builtin__.safeTypeName = safeTypeName
__builtin__.histogramDict = histogramDict
__builtin__.unicodeUtf8 = unicodeUtf8
__builtin__.encodedUtf8 = encodedUtf8
|
mgracer48/panda3d
|
direct/src/showbase/PythonUtil.py
|
Python
|
bsd-3-clause
| 94,176
|
[
"Gaussian"
] |
996220a7b47780418e1c92b85747575e76b094e6f8c4bc270bb03102426f27a5
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
pnmReader = vtk.vtkBMPReader()
pnmReader.SetFileName(VTK_DATA_ROOT + "/Data/masonry.bmp")
texture = vtk.vtkTexture()
texture.SetInputConnection(pnmReader.GetOutputPort())
triangleStripPoints = vtk.vtkPoints()
triangleStripPoints.SetNumberOfPoints(5)
triangleStripPoints.InsertPoint(0, 0, 1, 0)
triangleStripPoints.InsertPoint(1, 0, 0, .5)
triangleStripPoints.InsertPoint(2, 1, 1, .3)
triangleStripPoints.InsertPoint(3, 1, 0, .6)
triangleStripPoints.InsertPoint(4, 2, 1, .1)
triangleStripTCoords = vtk.vtkFloatArray()
triangleStripTCoords.SetNumberOfComponents(2)
triangleStripTCoords.SetNumberOfTuples(5)
triangleStripTCoords.InsertTuple2(0, 0, 1)
triangleStripTCoords.InsertTuple2(1, 0, 0)
triangleStripTCoords.InsertTuple2(2, .5, 1)
triangleStripTCoords.InsertTuple2(3, .5, 0)
triangleStripTCoords.InsertTuple2(4, 1, 1)
triangleStripPointScalars = vtk.vtkFloatArray()
triangleStripPointScalars.SetNumberOfTuples(5)
triangleStripPointScalars.InsertValue(0, 1)
triangleStripPointScalars.InsertValue(1, 0)
triangleStripPointScalars.InsertValue(2, 0)
triangleStripPointScalars.InsertValue(3, 0)
triangleStripPointScalars.InsertValue(4, 0)
triangleStripCellScalars = vtk.vtkFloatArray()
triangleStripCellScalars.SetNumberOfTuples(1)
triangleStripCellScalars.InsertValue(0, 1)
triangleStripPointNormals = vtk.vtkFloatArray()
triangleStripPointNormals.SetNumberOfComponents(3)
triangleStripPointNormals.SetNumberOfTuples(5)
triangleStripPointNormals.InsertTuple3(0, 0, 0, 1)
triangleStripPointNormals.InsertTuple3(1, 0, 1, 0)
triangleStripPointNormals.InsertTuple3(2, 0, 1, 1)
triangleStripPointNormals.InsertTuple3(3, 1, 0, 0)
triangleStripPointNormals.InsertTuple3(4, 1, 0, 1)
triangleStripCellNormals = vtk.vtkFloatArray()
triangleStripCellNormals.SetNumberOfComponents(3)
triangleStripCellNormals.SetNumberOfTuples(1)
triangleStripCellNormals.InsertTuple3(0, 1, 1, 1)
aTriangleStrip = vtk.vtkTriangleStrip()
aTriangleStrip.GetPointIds().SetNumberOfIds(5)
aTriangleStrip.GetPointIds().SetId(0, 0)
aTriangleStrip.GetPointIds().SetId(1, 1)
aTriangleStrip.GetPointIds().SetId(2, 2)
aTriangleStrip.GetPointIds().SetId(3, 3)
aTriangleStrip.GetPointIds().SetId(4, 4)
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(5)
lut.SetTableValue(0, 0, 0, 1, 1)
lut.SetTableValue(1, 0, 1, 0, 1)
lut.SetTableValue(2, 0, 1, 1, 1)
lut.SetTableValue(3, 1, 0, 0, 1)
lut.SetTableValue(4, 1, 0, 1, 1)
masks = [0, 1, 2, 3, 4, 5, 6, 7, 10, 11, 14, 15, 16, 18, 20, 22, 26, 30]
types = ["strip", "triangle"]
i = 0
j = 0
k = 0
for type in types:
for mask in masks:
idx = str(i)
exec("grid" + idx + " = vtk.vtkUnstructuredGrid()")
eval("grid" + idx).Allocate(1, 1)
eval("grid" + idx).InsertNextCell(
aTriangleStrip.GetCellType(), aTriangleStrip.GetPointIds())
eval("grid" + idx).SetPoints(triangleStripPoints)
exec("geometry" + idx + " = vtk.vtkGeometryFilter()")
eval("geometry" + idx).SetInputData(eval("grid" + idx))
exec("triangles" + idx + " = vtk.vtkTriangleFilter()")
eval("triangles" + idx).SetInputConnection(
eval("geometry" + idx).GetOutputPort())
exec("mapper" + idx + " = vtk.vtkPolyDataMapper()")
if (type == "strip"):
eval("mapper" + idx).SetInputConnection(
eval("geometry" + idx).GetOutputPort())
if (type == "triangle"):
eval("mapper" + idx).SetInputConnection(
eval("triangles" + idx).GetOutputPort())
eval("mapper" + idx).SetLookupTable(lut)
eval("mapper" + idx).SetScalarRange(0, 4)
exec("actor" + idx + " = vtk.vtkActor()")
eval("actor" + idx).SetMapper(eval("mapper" + idx))
if mask & 1 != 0:
eval("grid" + idx).GetPointData().SetNormals(
triangleStripPointNormals)
if mask & 2 != 0:
eval("grid" + idx).GetPointData().SetScalars(
triangleStripPointScalars)
eval("mapper" + idx).SetScalarModeToUsePointData()
if mask & 4 != 0:
eval("grid" + idx).GetPointData().SetTCoords(
triangleStripTCoords)
eval("actor" + idx).SetTexture(texture)
if mask & 8 != 0:
eval("grid" + idx).GetCellData().SetScalars(
triangleStripCellScalars)
eval("mapper" + idx).SetScalarModeToUseCellData()
if mask & 16 != 0:
eval("grid" + idx).GetCellData().SetNormals(
triangleStripCellNormals)
eval("actor" + idx).AddPosition(j * 2, k * 2, 0)
ren1.AddActor(eval("actor" + idx))
eval("actor" + idx).GetProperty().SetRepresentationToWireframe()
j += 1
if (j >= 6):
j = 0
k += 1
i += 1
renWin.SetSize(480, 480)
ren1.SetBackground(.7, .3, .1)
ren1.ResetCameraClippingRange()
renWin.Render()
# render the image
#
iren.Initialize()
threshold = 15
#iren.Start()
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Rendering/Core/Testing/Python/PolyDataMapperAllWireframe.py
|
Python
|
gpl-3.0
| 5,236
|
[
"VTK"
] |
00fb9cc030a55fde93eba69d17718a5778f6abbe483f8122a04f5e71599e5250
|
class mesh(object,mesh_tools):
def __init__(self,hex27=False,cpml=False,cpml_size=False,top_absorbing=False):
super(mesh, self).__init__()
self.netcdf_db=False
self._netcdf_num_nod_hex=8
self._netcdf_num_nod_quad=4
self.abs_block_ids=[1001,1002,1003,1004,1005,1006]
self._netcdf_len_string=33
self._netcdf_four=4
self._netcdf_len_name=33
self._netcdf_len_line=81
self.netcdf=False
self.ncname=False
self.mesh_name='mesh_file'
self.nodecoord_name='nodes_coords_file'
self.material_name='materials_file'
self.nummaterial_name='nummaterial_velocity_file'
self.absname='absorbing_surface_file'
self.cpmlname='absorbing_cpml_file'
self.freename='free_or_absorbing_surface_file_zmax'
self.recname='STATIONS'
version_cubit=get_cubit_version()
if version_cubit >= 15:
self.face='SHELL'
elif version_cubit >= 12:
self.face='SHELL4'
else:
self.face='QUAD4'
self.hex='HEX'
if version_cubit <= 13:
if hex27:
print "ATTENTION **********************\n\nCubit <= 12.2 doesn't support HEX27\nassuming HEX8 .....\n\n"
self.hex27=False
else:
self.hex27=hex27
self.edge='BAR2'
self.topo='face_topo'
self.topography=None
self.free=None
self.freetxt='free'
self.rec='receivers'
self.cpml=cpml
if cpml:
if cpml_size:
self.size=cpml_size
else:
print 'please specify cmpl size if you want to use cpml'
self.top_absorbing=top_absorbing
if hex27:
cubit.cmd('block all except block 1001 1002 1003 1004 1005 1006 element type hex27')
self._netcdf_num_nod_hex=27
self._netcdf_num_nod_quad=9
self.block_definition()
self.ngll=5
self.percent_gll=0.172
self.point_wavelength=5
self.xmin=False
self.ymin=False
self.zmin=False
self.xmax=False
self.ymax=False
self.zmax=False
cubit.cmd('compress all')
def __repr__(self):
pass
def block_definition(self):
block_flag=[]
block_mat=[]
block_bc=[]
block_bc_flag=[]
material={}
bc={}
blocks=cubit.get_block_id_list()
for block in blocks:
name=cubit.get_exodus_entity_name('block',block)
ty=cubit.get_block_element_type(block)
#print block,blocks,ty,self.hex,self.face
if self.hex in ty:
flag=None
vel=None
vs=None
rho=None
q=0
ani=0
# material domain id
if "acoustic" in name:
imaterial = 1
elif "elastic" in name:
imaterial = 2
elif "poroelastic" in name:
imaterial = 3
else:
imaterial = 0
#
nattrib=cubit.get_block_attribute_count(block)
if nattrib > 1:
# material flag:
# positive => material properties,
# negative => interface/tomography domain
flag=int(cubit.get_block_attribute_value(block,0))
if flag > 0 and nattrib >= 2:
# material properties
# vp
vel=cubit.get_block_attribute_value(block,1)
if nattrib >= 3:
# vs
vs=cubit.get_block_attribute_value(block,2)
if nattrib >= 4:
# density
rho=cubit.get_block_attribute_value(block,3)
if nattrib >= 5:
# next: Q_kappa or Q_mu (new/old format style)
q=cubit.get_block_attribute_value(block,4)
if nattrib == 6:
# only 6 parameters given (skipping Q_kappa ), old format style
qmu = q
#Q_kappa is 10 times stronger than Q_mu
qk = q * 10
# last entry is anisotropic flag
ani=cubit.get_block_attribute_value(block,5)
elif nattrib > 6:
#Q_kappa
qk=q
#Q_mu
qmu=cubit.get_block_attribute_value(block,5)
if nattrib == 7:
#anisotropy_flag
ani=cubit.get_block_attribute_value(block,6)
# for q to be valid: it must be positive
if qk < 0 or qmu < 0:
print 'error, Q value invalid:',qk,qmu
break
elif flag < 0:
# interface/tomography domain
# velocity model
vel=name
attrib=cubit.get_block_attribute_value(block,1)
if attrib == 1:
kind='interface'
flag_down=cubit.get_block_attribute_value(block,2)
flag_up=cubit.get_block_attribute_value(block,3)
elif attrib == 2:
kind='tomography'
elif nattrib == 1:
flag=cubit.get_block_attribute_value(block,0)
#print 'only 1 attribute ', name,block,flag
vel,vs,rho,qk,qmu,ani=(0,0,0,9999.,9999.,0)
else:
flag=block
vel,vs,rho,qk,qmu,ani=(name,0,0,9999.,9999.,0)
block_flag.append(int(flag))
block_mat.append(block)
if (flag > 0) and nattrib != 1:
par=tuple([imaterial,flag,vel,vs,rho,qk,qmu,ani])
elif flag < 0 and nattrib != 1:
if kind=='interface':
par=tuple([imaterial,flag,kind,name,flag_down,flag_up])
elif kind=='tomography':
par=tuple([imaterial,flag,kind,name])
elif flag==0 or nattrib == 1:
par=tuple([imaterial,flag,name])
material[block]=par
elif ty == self.face or ty == 'SHELL4':
block_bc_flag.append(4)
block_bc.append(block)
bc[block]=4 #face has connectivity = 4
if name == self.topo or block == 1001:
self.topography=block
if self.freetxt in name:
self.free=block
elif ty == 'SPHERE':
pass
else:
# block elements differ from HEX8/QUAD4/SHELL4
print '****************************************'
print 'block not properly defined:'
print ' name:',name
print ' type:',ty
print
print 'please check your block definitions!'
print
print 'only supported types are:'
print ' HEX/HEX8 for volumes'
print ' QUAD4 for surface'
print ' SHELL4 for surface'
print '****************************************'
continue
return None, None,None,None,None,None,None,None
nsets=cubit.get_nodeset_id_list()
if len(nsets) == 0: self.receivers=None
for nset in nsets:
name=cubit.get_exodus_entity_name('nodeset',nset)
if name == self.rec:
self.receivers=nset
else:
print 'nodeset '+name+' not defined'
self.receivers=None
try:
self.block_mat=block_mat
self.block_flag=block_flag
self.block_bc=block_bc
self.block_bc_flag=block_bc_flag
self.material=material
self.bc=bc
print 'HEX Blocks:'
for m,f in zip(self.block_mat,self.block_flag):
print 'block ',m,'material flag ',f
print 'Absorbing Boundary Conditions:'
for m,f in zip(self.block_bc,self.block_bc_flag):
print 'bc ',m,'bc flag ',f
print 'Topography (free surface)'
print self.topography
print 'Free surface'
print self.free
except:
print '****************************************'
print 'sorry, no blocks or blocks not properly defined'
print block_mat
print block_flag
print block_bc
print block_bc_flag
print material
print bc
print '****************************************'
def get_hex_connectivity(self,ind):
if self.hex27:
cubit.silent_cmd('group "nh" add Node in hex '+str(ind))
group1 = cubit.get_id_from_name("nh")
result=cubit.get_group_nodes(group1)
if len(result) != 27:
raise RuntimeError('Error: hexes with less than 27 nodes, hex27 True')
cubit.cmd('del group '+str(group1))
else:
result=cubit.get_connectivity('hex',ind)
return result
def get_face_connectivity(self,ind):
if self.hex27:
cubit.silent_cmd('group "nf" add Node in face '+str(ind))
group1 = cubit.get_id_from_name("nf")
result=cubit.get_group_nodes(group1)
cubit.cmd('del group '+str(group1))
else:
result=cubit.get_connectivity('face',ind)
return result
def mat_parameter(self,properties):
#print properties
#format nummaterials file: #material_domain_id #material_id #rho #vp #vs #Q_kappa #Q_mu #anisotropy_flag
imaterial=properties[0]
flag=properties[1]
print 'number of material:',flag
if flag > 0:
vel=properties[2]
if properties[2] is None and type(vel) != str:
# velocity model scales with given vp value
if vel >= 30:
m2km=1000.
else:
m2km=1.
vp=vel/m2km
rho=(1.6612*vp-0.472*vp**2+0.0671*vp**3-0.0043*vp**4+0.000106*vp**4)*m2km
txt='%1i %3i %20f %20f %20f %1i %1i\n' % (properties[0],properties[1],rho,vel,vel/(3**.5),0,0)
elif type(vel) != str and vel != 0.:
# velocity model given as vp,vs,rho,..
#format nummaterials file: #material_domain_id #material_id #rho #vp #vs #Q_kappa #Q_mu #anisotropy_flag
try:
qk=properties[5]
except:
qk=9999.
try:
qmu=properties[6]
except:
qmu=9999.
try:
ani=properties[7]
except:
ani=0
#print properties[0],properties[3],properties[1],properties[2],q,ani
#format: #material_domain_id #material_id #rho #vp #vs #Q_kappa #Q_mu #anisotropy_flag
txt='%1i %3i %20f %20f %20f %20f %20f %2i\n' % (properties[0],properties[1],properties[4],properties[2],properties[3],qk,qmu,ani)
elif type(vel) != str and vel != 0.:
helpstring="#material_domain_id #material_id #rho #vp #vs #Q_kappa #Q_mu #anisotropy"
txt='%1i %3i %s \n' % (properties[0],properties[1],helpstring)
else:
helpstring=" --> syntax: #material_domain_id #material_id #rho #vp #vs #Q_kappa #Q_mu #anisotropy"
txt='%1i %3i %s %s\n' % (properties[0],properties[1],properties[2],helpstring)
elif flag < 0:
if properties[2] == 'tomography':
txt='%1i %3i %s %s\n' % (properties[0],properties[1],properties[2],properties[3])
elif properties[2] == 'interface':
txt='%1i %3i %s %s %1i %1i\n' % (properties[0],properties[1],properties[2],properties[3],properties[4],properties[5])
else:
helpstring=" --> syntax: #material_domain_id 'tomography' #file_name "
txt='%1i %3i %s %s \n' % (properties[0],properties[1],properties[2],helpstring)
#
#print txt
return txt
def nummaterial_write(self,nummaterial_name,placeholder=True):
print 'Writing '+nummaterial_name+'.....'
nummaterial=open(nummaterial_name,'w')
for block in self.block_mat:
#name=cubit.get_exodus_entity_name('block',block)
nummaterial.write(str(self.mat_parameter(self.material[block])))
if placeholder:
txt='''
! note: format of nummaterial_velocity_file must be
! #(1)material_domain_id #(2)material_id #(3)rho #(4)vp #(5)vs #(6)Q_kappa #(7)Q_mu #(8)anisotropy_flag
!
! where
! material_domain_id : 1=acoustic / 2=elastic
! material_id : POSITIVE integer identifier corresponding to the identifier of material block
! rho : density
! vp : P-velocity
! vs : S-velocity
! Q_kappa : 9999 = no Q_kappa attenuation
! Q_mu : 9999 = no Q_mu attenuation
! anisotropy_flag : 0=no anisotropy/ 1,2,.. check with implementation in aniso_model.f90
!
!example:
!2 1 2300 2800 1500 9999.0 9999.0 0
!or
! #(1)material_domain_id #(2)material_id tomography elastic #(3)tomography_filename #(4)positive_unique_number
!
! where
! material_domain_id : 1=acoustic / 2=elastic
! material_id : NEGATIVE integer identifier corresponding to the identifier of material block
! tomography_filename: filename of the tomography file
! positive_unique_number: a positive unique identifier
!
!example:
!2 -1 tomography elastic tomo.xyz 1
'''
nummaterial.write(txt)
nummaterial.close()
print 'Ok'
def create_hexnode_string(self,hexa,hexnode_string=True):
nodes=self.get_hex_connectivity(hexa)
#nodes=self.jac_check(nodes) #is it valid for 3D? TODO
if self.hex27:
ordered_nodes=[hexa]+list(nodes[:20])+[nodes[21]]+[nodes[25]]+[nodes[24]]+[nodes[26]]+[nodes[23]]+[nodes[22]]+[nodes[20]]
txt=' '.join(str(x) for x in ordered_nodes)
txt=txt+'\n'
#txt=('%10i %10i %10i %10i %10i %10i %10i %10i ')% nodes[:8] #first 8 nodes following specfem3d numbering convenction..
#txt=txt+('%10i %10i %10i %10i %10i %10i %10i %10i ')% nodes[8:16] #middle 12 nodes following specfem3d numbering convenction..
#txt=txt+('%10i %10i %10i %10i ')% nodes[16:20]
#txt=txt+('%10i %10i %10i %10i %10i %10i ')% (nodes[21], nodes[25], nodes[24], nodes[26], nodes[23], nodes[22])
#txt=txt+('%10i\n ')% nodes[20] #center volume
else:
txt=str(hexa)+' '+' '.join(str(x) for x in nodes)
txt=txt+'\n'
#txt=('%10i %10i %10i %10i %10i %10i %10i %10i\n')% nodes[:]
if hexnode_string:
return txt
else:
map(int,txt.split())
def create_facenode_string(self,hexa,face,normal=None,cknormal=True,facenode_string=True):
nodes=self.get_face_connectivity(face)
if cknormal:
nodes_ok=self.normal_check(nodes[0:4],normal)
if self.hex27: nodes_ok2=self.normal_check(nodes[4:8],normal)
else:
nodes_ok=nodes[0:4]
if self.hex27: nodes_ok2=nodes[4:8]
#
if self.hex27:
ordered_nodes=[hexa]+list(nodes_ok)+list(nodes_ok2)+[nodes[8]]
txt=' '.join(str(x) for x in ordered_nodes)
txt=txt+'\n'
#txt=('%10i %10i %10i %10i %10i ') % (hexa,nodes_ok[0],nodes_ok[1],nodes_ok[2],nodes_ok[3]) #first 4 nodes following specfem3d numbering convenction..
#txt=txt+('%10i %10i %10i %10i ')% (nodes_ok2[0],nodes_ok2[1],nodes_ok2[2],nodes_ok2[3]) #middle 4 nodes following specfem3d numbering convenction..
#txt=txt+('%10i\n')% nodes[8]
else:
txt=str(hexa)+' '+' '.join(str(x) for x in nodes_ok)
txt=txt+'\n'
#txt=('%10i %10i %10i %10i %10i\n') % (hexa,nodes_ok[0],nodes_ok[1],nodes_ok[2],nodes_ok[3])
if facenode_string:
return txt
else:
map(int,txt.split())
def mesh_write(self,mesh_name):
print 'Writing '+mesh_name+'..... v2'
num_elems=cubit.get_hex_count()
meshfile=open(mesh_name,'w')
print ' total number of elements:',str(num_elems)
meshfile.write(str(num_elems)+'\n')
for block,flag in zip(self.block_mat,self.block_flag):
hexes=cubit.get_block_hexes(block)
print 'block ',block,' hexes ',len(hexes)
for hexa in hexes:
txt=self.create_hexnode_string(hexa)
meshfile.write(txt)
meshfile.close()
print 'Ok'
def material_write(self,mat_name):
mat=open(mat_name,'w')
print 'Writing '+mat_name+'.....'
for block,flag in zip(self.block_mat,self.block_flag):
print 'block ',block,'flag ',flag
hexes=cubit.get_block_hexes(block)
for hexa in hexes:
mat.write(('%10i %10i\n') % (hexa,flag))
mat.close()
print 'Ok'
def get_extreme(self,c,cmin,cmax):
if not cmin and not cmax:
cmin=c
cmax=c
else:
if c<cmin: cmin=c
if c>cmax: cmax=c
return cmin,cmax
def nodescoord_write(self,nodecoord_name):
nodecoord=open(nodecoord_name,'w')
print 'Writing '+nodecoord_name+'.....'
node_list=cubit.parse_cubit_list('node','all')
num_nodes=len(node_list)
print ' number of nodes:',str(num_nodes)
nodecoord.write('%10i\n' % num_nodes)
#
for node in node_list:
x,y,z=cubit.get_nodal_coordinates(node)
self.xmin,self.xmax=self.get_extreme(x,self.xmin,self.xmax)
self.ymin,self.ymax=self.get_extreme(y,self.ymin,self.ymax)
self.zmin,self.zmax=self.get_extreme(z,self.zmin,self.zmax)
txt=('%10i %20f %20f %20f\n') % (node,x,y,z)
nodecoord.write(txt)
nodecoord.close()
print 'Ok'
def free_write(self,freename=None):
# free surface
cubit.cmd('set info off')
cubit.cmd('set echo off')
cubit.cmd('set journal off')
from sets import Set
normal=(0,0,1)
if not freename: freename=self.freename
# writes free surface file
print 'Writing '+freename+'.....'
freehex=open(freename,'w')
#
# searches block definition with name face_topo
for block,flag in zip(self.block_bc,self.block_bc_flag):
if block == self.topography:
name=cubit.get_exodus_entity_name('block',block)
print 'free surface (topography) block name:',name,'id:',block
quads_all=cubit.get_block_faces(block)
print ' number of faces = ',len(quads_all)
dic_quads_all=dict(zip(quads_all,quads_all))
freehex.write('%10i\n' % len(quads_all))
list_hex=cubit.parse_cubit_list('hex','all')
for h in list_hex:
faces=cubit.get_sub_elements('hex',h,2)
for f in faces:
if dic_quads_all.has_key(f):
#print f
txt=self.create_facenode_string(h,f,normal,cknormal=True)
freehex.write(txt)
freehex.close()
elif block == self.free:
name=cubit.get_exodus_entity_name('block',block)
print 'free surface block name:',name,'id:',block
quads_all=cubit.get_block_faces(block)
print ' number of faces = ',len(quads_all)
dic_quads_all=dict(zip(quads_all,quads_all))
freehex.write('%10i\n' % len(quads_all))
list_hex=cubit.parse_cubit_list('hex','all')
for h in list_hex:
faces=cubit.get_sub_elements('hex',h,2)
for f in faces:
if dic_quads_all.has_key(f):
txt=self.create_facenode_string(h,f,normal,cknormal=False)
freehex.write(txt)
freehex.close()
print 'Ok'
cubit.cmd('set info on')
cubit.cmd('set echo on')
def check_cmpl_size(self,case='x'):
if case=='x':
vmaxtmp=self.xmax
vmintmp=self.xmin
elif case=='y':
vmaxtmp=self.ymax
vmintmp=self.ymin
elif case=='z':
vmaxtmp=self.zmax
vmintmp=self.zmin
if self.size > .3*(vmaxtmp-vmintmp):
print 'please select the size of cpml less than 30% of the '+case+' size of the volume'
print vmaxtmp-vmintmp,.3*(vmaxtmp-vmintmp)
print 'cmpl set to false, no '+self.cpmlname+' file will be created'
return False,False
else:
vmin=vmintmp+self.size
vmax=vmaxtmp-self.size
return vmin,vmax
def select_cpml(self):
xmin,xmax=self.check_cmpl_size(case='x')
ymin,ymax=self.check_cmpl_size(case='y')
zmin,zmax=self.check_cmpl_size(case='z')
#
if xmin is False or xmax is False or ymin is False or ymax is False or zmin is False or zmax is False:
return False
else:
txt="group 'hxmin' add hex with X_coord < "+str(xmin)
cubit.cmd(txt)
txt="group 'hxmax' add hex with X_coord > "+str(xmax)
cubit.cmd(txt)
txt="group 'hymin' add hex with Y_coord < "+str(ymin)
cubit.cmd(txt)
txt="group 'hymax' add hex with Y_coord > "+str(ymax)
cubit.cmd(txt)
txt="group 'hzmin' add hex with Z_coord < "+str(zmin)
cubit.cmd(txt)
txt="group 'hzmax' add hex with Z_coord > "+str(zmax)
cubit.cmd(txt)
from sets import Set
group1 = cubit.get_id_from_name("hxmin")
cpml_xmin =Set(list(cubit.get_group_hexes(group1)))
group1 = cubit.get_id_from_name("hymin")
cpml_ymin =Set(list(cubit.get_group_hexes(group1)))
group1 = cubit.get_id_from_name("hxmax")
cpml_xmax =Set(list(cubit.get_group_hexes(group1)))
group1 = cubit.get_id_from_name("hymax")
cpml_ymax =Set(list(cubit.get_group_hexes(group1)))
group1 = cubit.get_id_from_name("hzmin")
cpml_zmin =Set(list(cubit.get_group_hexes(group1)))
if self.top_absorbing:
group1 = cubit.get_id_from_name("hzmax")
cpml_zmax =Set(list(cubit.get_group_hexes(group1)))
else:
cpml_zmax =Set([])
cpml_all=cpml_ymin | cpml_ymax | cpml_xmin | cpml_xmax | cpml_zmin | cpml_zmax
cpml_x=cpml_all-cpml_zmin-cpml_ymin-cpml_ymax-cpml_zmax
cpml_y=cpml_all-cpml_zmin-cpml_xmin-cpml_xmax-cpml_zmax
cpml_xy=cpml_all-cpml_zmin-cpml_y-cpml_x-cpml_zmax
cpml_z=cpml_all-cpml_xmin-cpml_ymin-cpml_ymax-cpml_xmax
cpml_xz=cpml_zmin-cpml_ymin-cpml_ymax-cpml_z
cpml_yz=cpml_zmin-cpml_xmin-cpml_xmax-cpml_z
cpml_xyz=cpml_zmin-cpml_xz-cpml_yz-cpml_z
txt=' '.join(str(h) for h in cpml_x)
cubit.cmd("group 'x_cpml' add hex "+txt)
txt=' '.join(str(h) for h in cpml_y)
cubit.cmd("group 'y_cpml' add hex "+txt)
txt=' '.join(str(h) for h in cpml_z)
cubit.cmd("group 'z_cpml' add hex "+txt)
txt=' '.join(str(h) for h in cpml_xy)
cubit.cmd("group 'xy_cpml' add hex "+txt)
txt=' '.join(str(h) for h in cpml_xz)
cubit.cmd("group 'xz_cpml' add hex "+txt)
txt=' '.join(str(h) for h in cpml_yz)
cubit.cmd("group 'yz_cpml' add hex "+txt)
txt=' '.join(str(h) for h in cpml_xyz)
cubit.cmd("group 'xyz_cpml' add hex "+txt)
return cpml_x,cpml_y,cpml_z,cpml_xy,cpml_xz,cpml_yz,cpml_xyz
def abs_write(self,absname=None):
# absorbing boundaries
import re
cubit.cmd('set info off')
cubit.cmd('set echo off')
cubit.cmd('set journal off')
from sets import Set
if not absname: absname=self.absname
if self.cpml:
if not absname: absname=self.cpmlname
print 'Writing cpml'+absname+'.....'
list_cpml=self.select_cpml()
if list_cpml is False:
print 'error writing cpml files'
return
else:
abshex_cpml=open(absname,'w')
hexcount=sum(map(len,list_cpml))
abshex_cpml.write(('%10i\n') % (hexcount))
for icpml,lcpml in enumerate(list_cpml):
for hexa in lcpml:
abshex_cpml.write(('%10i %10i\n') % (hexa,icpml))
stacey_absorb=True
if stacey_absorb:
#
#
if not absname: absname=self.absname
# loops through all block definitions
list_hex=cubit.parse_cubit_list('hex','all')
for block,flag in zip(self.block_bc,self.block_bc_flag):
if block != self.topography:
name=cubit.get_exodus_entity_name('block',block)
print ' block name:',name,'id:',block
cknormal=True
abshex_local=False
# opens file
if re.search('xmin',name):
print 'xmin'
abshex_local=open(absname+'_xmin','w')
normal=(-1,0,0)
elif re.search('xmax',name):
print "xmax"
abshex_local=open(absname+'_xmax','w')
normal=(1,0,0)
elif re.search('ymin',name):
print "ymin"
abshex_local=open(absname+'_ymin','w')
normal=(0,-1,0)
elif re.search('ymax',name):
print "ymax"
abshex_local=open(absname+'_ymax','w')
normal=(0,1,0)
elif re.search('bottom',name):
print "bottom"
abshex_local=open(absname+'_bottom','w')
normal=(0,0,-1)
elif re.search('abs',name):
print "abs all - experimental, check the output"
cknormal=False
abshex_local=open(absname,'w')
else:
if block == 1003:
print 'xmin'
abshex_local=open(absname+'_xmin','w')
normal=(-1,0,0)
elif block == 1004:
print "ymin"
abshex_local=open(absname+'_ymin','w')
normal=(0,-1,0)
elif block == 1005:
print "xmax"
abshex_local=open(absname+'_xmax','w')
normal=(1,0,0)
elif block == 1006:
print "ymax"
abshex_local=open(absname+'_ymax','w')
normal=(0,1,0)
elif block == 1002:
print "bottom"
abshex_local=open(absname+'_bottom','w')
normal=(0,0,-1)
elif block == 1000:
print "custumized"
abshex_local=open(absname,'w')
cknormal=False
normal=None
#
#
if abshex_local:
# gets face elements
quads_all=cubit.get_block_faces(block)
dic_quads_all=dict(zip(quads_all,quads_all))
print ' number of faces = ',len(quads_all)
abshex_local.write('%10i\n' % len(quads_all))
#command = "group 'list_hex' add hex in face "+str(quads_all)
#command = command.replace("["," ").replace("]"," ").replace("("," ").replace(")"," ")
#cubit.cmd(command)
#group=cubit.get_id_from_name("list_hex")
#list_hex=cubit.get_group_hexes(group)
#command = "delete group "+ str(group)
#cubit.cmd(command)
for h in list_hex:
faces=cubit.get_sub_elements('hex',h,2)
for f in faces:
if dic_quads_all.has_key(f):
txt=self.create_facenode_string(h,f,normal=normal,cknormal=cknormal)
abshex_local.write(txt)
abshex_local.close()
print 'Ok'
cubit.cmd('set info on')
cubit.cmd('set echo on')
def surface_write(self,pathdir=None):
# optional surfaces, e.g. moho_surface
# should be created like e.g.:
# > block 10 face in surface 2
# > block 10 name 'moho_surface'
import re
from sets import Set
for block in self.block_bc:
if block != self.topography:
name=cubit.get_exodus_entity_name('block',block)
# skips block names like face_abs**, face_topo**
if re.search('abs',name):
continue
elif re.search('topo',name):
continue
elif re.search('surface',name):
filename=pathdir+name+'_file'
else:
continue
# gets face elements
print ' surface block name: ',name,'id: ',block
quads_all=cubit.get_block_faces(block)
print ' face = ',len(quads_all)
if len(quads_all) == 0:
continue
# writes out surface infos to file
print 'Writing '+filename+'.....'
surfhex_local=open(filename,'w')
dic_quads_all=dict(zip(quads_all,quads_all))
# writes number of surface elements
surfhex_local.write('%10i\n' % len(quads_all))
# writes out element node ids
list_hex=cubit.parse_cubit_list('hex','all')
for h in list_hex:
faces=cubit.get_sub_elements('hex',h,2)
for f in faces:
if dic_quads_all.has_key(f):
txt=self.create_facenode_string(h,f,cknormal=False)
surfhex_local.write(txt)
# closes file
surfhex_local.close()
print 'Ok'
def rec_write(self,recname):
print 'Writing '+self.recname+'.....'
recfile=open(self.recname,'w')
nodes=cubit.get_nodeset_nodes(self.receivers)
for i,n in enumerate(nodes):
x,y,z=cubit.get_nodal_coordinates(n)
recfile.write('ST%i XX %20f %20f 0.0 0.0 \n' % (i,x,z))
recfile.close()
print 'Ok'
def write(self,path='',netcdf_name=False):
cubit.cmd('set info off')
cubit.cmd('set echo off')
cubit.cmd('set journal off')
cubit.cmd('compress all')
if len(path) != 0:
if path[-1] != '/': path=path+'/'
if netcdf_name:
self._write_netcdf(path=path,name=netcdf_name)
else:
self._write_ascii(path=path)
cubit.cmd('set info on')
cubit.cmd('set echo on')
def _write_netcdf(self,name='mesh.specfem3D'):
if self.cpml:
raise NotImplementedError('cmpl not implemented for netcdf specfem3d mesh format')
try:
from netCDF4 import Dataset
except:
raise ImportError('error importing NETCDF4')
self.netcdf_db= Dataset(name, mode='w',format='NETCDF4')
self.netcdf_db.createDimension("len_string", self._netcdf_len_string)
self.netcdf_db.createDimension("len_line", self._netcdf_len_line)
self.netcdf_db.createDimension("four", self._netcdf_four)
self.netcdf_db.createDimension("len_name", self._netcdf_len_name=33)
self.netcdf_db.createDimension("time_step", 0)
self.netcdf_db.createDimension("num_dim", 3)
self.netcdf_db.createDimension("num_node_hex", self._netcdf_num_nod_hex)
self.netcdf_db.createDimension("num_node_quad", self._netcdf_num_nod_quad)
num_nodes=len(cubit.get_node_count())
self.netcdf_db.createDimension("num_nodes", num_nodes)
self.netcdf_db.createDimension("num_elem", cubit.get_hex_count())
num_block=len(cubit.get_block_id_list())
self.netcdf_db.createDimension("num_el_blk", num_block)
self.netcdf_db.createVariable("eb_prop1","i4",("num_el_blk",))# [1 1001 1002 ...]
self.netcdf_db.setncattr('name','ID')
self.netcdf_db.createVariable("node_coord","f8",("num_nodes",self.num_dim))
self.netcdf_db.createVariable("node_map","i4",("num_nodes",1))
self.netcdf_db.createVariable("eb_names","S1",("num_el_blk","len_name"))#[ ["v","o","l"...]..]
self.netcdf_db.createVariable("coor_names","S1",("num_dim","len_name"))#[ ["x","",""...]...]
self.netcdf_db.createVariable("mesh","i4",("num_elem",str(self.num_node_hex+1)))
self.netcdf_db.createVariable("free","i4",("num_elem",str(self.num_node_quad+1)))
self.netcdf_db.createVariable("material","i4",("num_elem",2))
self.netcdf_db.createVariable("block_hex","i4",("num_elem",2))
self.netcdf_db.variables['eb_prop1'][:]=list(self.block_mat)+list(self.block_bc)
self.netcdf_db.variables['mesh'][:]=self._netcdf_mesh_array()
self.netcdf_db.variables['material'][:]=self._netcdf_material_array()
self.netcdf_db.variables['node_map'][:]=self._netcdf_nodescoord_array()[0]
self.netcdf_db.variables['node_coord'][:]=self._netcdf_nodescoord_array()[1]
self.netcdf_db.variables['free'][:]=self._netcdf_free_array()
for block,flag in zip(self.block_bc,self.block_bc_flag):
if block != self.topography and block != self.free:
label,normal,cknormal=self._get_bc_flag(block)
quads_all=cubit.get_block_faces(block)
print label
print ' number of faces = ',len(quads_all)
self.netcdf_db.createDimension('num_el_'+label, len(quads_all))
self.netcdf_db.createVariable("abs_"+label,"i4",('num_el_'+label,str(self.num_node_quad+1)))
self.netcdf_db.variables["abs_"+label][:]=self._netcdf_abs_array(quads_all,normal,cknormal)
elif block == self.topography or block == self.free:
quads_all=cubit.get_block_faces(block)
self.netcdf_db.createDimension('num_el_free', len(quads_all))
self.netcdf_db.createVariable("free","i4",("num_elem",str(self.num_node_quad+1)))
self.netcdf_db.variables['free'][:]=self._netcdf_free_array()
self.nummaterial_write(path+self.nummaterial_name)
def _get_bc_flag(self,block):
import re
name=cubit.get_exodus_entity_name('block',block)
print ' block name:',name,'id:',block
cknormal=True
abshex_local=False
if re.search('xmin',name):
label= 'xmin'
normal=(-1,0,0)
elif re.search('xmax',name):
label = "xmax"
normal=(1,0,0)
elif re.search('ymin',name):
label= "ymin"
normal=(0,-1,0)
elif re.search('ymax',name):
label= "ymax"
normal=(0,1,0)
elif re.search('bottom',name):
label= "bottom"
normal=(0,0,-1)
elif re.search('abs',name):
label= "all"
print "abs all - experimental, check the output"
cknormal=False
else:
if block == 1003:
label= 'xmin'
normal=(-1,0,0)
elif block == 1004:
label= "ymin"
normal=(0,-1,0)
elif block == 1005:
label= "xmax"
normal=(1,0,0)
elif block == 1006:
label= "ymax"
normal=(0,1,0)
elif block == 1002:
label= "bottom"
normal=(0,0,-1)
elif block == 1000:
label= "all"
cknormal=False
normal=None
return label,normal,cknormal
def _netcdf_abs_array(self,quads_all,normal,cknormal):
# absorbing boundaries
import re
cubit.cmd('set info off')
cubit.cmd('set echo off')
cubit.cmd('set journal off')
from sets import Set
list_hex=cubit.parse_cubit_list('hex','all')
dic_quads_all=dict(zip(quads_all,quads_all))
abs_array = []
for h in list_hex:
faces=cubit.get_sub_elements('hex',h,2)
for f in faces:
if dic_quads_all.has_key(f):
abs_array.append(self.create_facenode_string(h,f,normal=normal,cknormal=cknormal,facenode_string=False))
print 'Ok'
cubit.cd('set info on')
cubit.cmd('set echo on')
return abs_array
def _netcdf_nodescoord_array(self):
print 'Writing node coordinates..... netcdf'
node_list=cubit.parse_cubit_list('node','all')
num_nodes=len(node_list)
print ' number of nodes:',str(num_nodes)
#
coord_array=[]
map_array=[]
for node in node_list:
x,y,z=cubit.get_nodal_coordinates(node)
self.xmin,self.xmax=self.get_extreme(x,self.xmin,self.xmax)
self.ymin,self.ymax=self.get_extreme(y,self.ymin,self.ymax)
self.zmin,self.zmax=self.get_extreme(z,self.zmin,self.zmax)
map_array.append([map])
coord_array.append([x,y,z])
print 'Ok'
return map_array,coord_array
def _netcdf_free_array(self):
# free surface
cubit.cmd('set info off')
cubit.cmd('set echo off')
cubit.cmd('set journal off')
from sets import Set
normal=(0,0,1)
# writes free surface file
print 'Writing free surface..... netcdf'
#
# searches block definition with name face_topo
free_array=[]
for block,flag in zip(self.block_bc,self.block_bc_flag):
if block == self.topography:
name=cubit.get_exodus_entity_name('block',block)
print 'free surface (topography) block name:',name,'id:',block
quads_all=cubit.get_block_faces(block)
print ' number of faces = ',len(quads_all)
dic_quads_all=dict(zip(quads_all,quads_all))
list_hex=cubit.parse_cubit_list('hex','all')
for h in list_hex:
faces=cubit.get_sub_elements('hex',h,2)
for f in faces:
if dic_quads_all.has_key(f):
#print f
free_array.append(self.create_facenode_string(h,f,normal,cknormal=True,facenode_string=False))
elif block == self.free:
name=cubit.get_exodus_entity_name('block',block)
print 'free surface block name:',name,'id:',block
quads_all=cubit.get_block_faces(block)
print ' number of faces = ',len(quads_all)
dic_quads_all=dict(zip(quads_all,quads_all))
list_hex=cubit.parse_cubit_list('hex','all')
for h in list_hex:
faces=cubit.get_sub_elements('hex',h,2)
for f in faces:
if dic_quads_all.has_key(f):
free_array.append(self.create_facenode_string(h,f,normal,cknormal=False,facenode_string=False))
print 'Ok'
cubit.cmd('set info on')
cubit.cmd('set echo on')
return free_array
def _netcdf_material_array(self):
print 'Writing material...... netcdf'
material_array=[]
for block,flag in zip(self.block_mat,self.block_flag):
print 'block ',block,'flag ',flag
hexes=cubit.get_block_hexes(block)
for hexa in hexes:
material_array.append([hexa,flag])
print 'Ok'
return material_array
def _netcdf_mesh_array(self):
print 'Writing '+mesh_name+'..... netcdf'
print 'total number of elements:',str(cubit.get_hex_count())
mesh_array=[]
for block,flag in zip(self.block_mat,self.block_flag):
hexes=cubit.get_block_hexes(block)
print 'block ',block,' hexes ',len(hexes)
for hexa in hexes:
mesh_array.append(create_hexnode_string(hexa,hexnode_string=False))
print 'Ok'
return mesh_array
def _write_ascii(self,path=''):
# mesh file
self.mesh_write(path+self.mesh_name)
# mesh material
self.material_write(path+self.material_name)
# mesh coordinates
self.nodescoord_write(path+self.nodecoord_name)
# free surface: face_top
self.free_write(path+self.freename)
# absorbing surfaces: abs_***
if self.cpml:
self.abs_write(path+self.cpmlname)
else:
self.abs_write(path+self.absname)
# material definitions
self.nummaterial_write(path+self.nummaterial_name)
# any other surfaces: ***surface***
self.surface_write(path)
# receivers
if self.receivers: self.rec_write(path+self.recname)
class read_netcdf_mesh(object,mesh):
def __init__(self,ncname=False):
self.ncname=ncname
def __repr__():
pass
def read_mesh():
mesh= Dataset(self.ncname, mode='r')
|
casarotti/GEOCUBIT--experimental
|
geocubitlib/dev/exodus2specfem3d.py
|
Python
|
gpl-3.0
| 43,672
|
[
"NetCDF"
] |
8ae860072ee39dd4aa8f999ca921df0119fdee93499ba6771ffa4ca26ffe8b5c
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
TODO: Modify module doc.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "11/19/13"
from pymatgen.analysis.energy_models import EwaldElectrostaticModel, \
SymmetryModel, IsingModel
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
import os
import unittest
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class EwaldElectrostaticModelTest(unittest.TestCase):
def test_get_energy(self):
coords = [[0, 0, 0], [0.75, 0.75, 0.75], [0.5, 0.5, 0.5],
[0.25, 0.25, 0.25]]
lattice = Lattice([[3.0, 0.0, 0.0],
[1.0, 3.0, 0.00],
[0.00, -2.0, 3.0]])
s = Structure(lattice,
[{"Si4+": 0.5, "O2-": 0.25, "P5+": 0.25},
{"Si4+": 0.5, "O2-": 0.25, "P5+": 0.25},
{"Si4+": 0.5, "O2-": 0.25, "P5+": 0.25},
{"Si4+": 0.5, "O2-": 0.25, "P5+": 0.25}], coords)
m = EwaldElectrostaticModel()
self.assertAlmostEqual(m.get_energy(s), 44.1070954178)
s2 = Structure.from_file(os.path.join(test_dir, "Li2O.cif"))
self.assertAlmostEqual(m.get_energy(s2), -36.3476248117)
def test_to_from_dict(self):
m = EwaldElectrostaticModel()
d = m.as_dict()
self.assertIsInstance(EwaldElectrostaticModel.from_dict(d),
EwaldElectrostaticModel)
class SymmetryModelTest(unittest.TestCase):
def test_get_energy(self):
m = SymmetryModel()
s2 = Structure.from_file(os.path.join(test_dir, "Li2O.cif"))
self.assertAlmostEqual(m.get_energy(s2), -225)
def test_to_from_dict(self):
m = SymmetryModel(symprec=0.2)
d = m.as_dict()
o = SymmetryModel.from_dict(d)
self.assertIsInstance(o, SymmetryModel)
self.assertAlmostEqual(o.symprec, 0.2)
class IsingModelTest(unittest.TestCase):
def test_get_energy(self):
m = IsingModel(5, 6)
from pymatgen.core.periodic_table import Specie
s = Structure.from_file(os.path.join(test_dir, "LiFePO4.cif"))
s.replace_species({"Fe": Specie("Fe", 2, {"spin": 4})})
self.assertAlmostEqual(m.get_energy(s), 172.81260515787977)
s[4] = Specie("Fe", 2, {"spin": -4})
s[5] = Specie("Fe", 2, {"spin": -4})
self.assertAlmostEqual(m.get_energy(s), 51.97424405382921)
def test_to_from_dict(self):
m = IsingModel(5, 4)
d = m.as_dict()
o = IsingModel.from_dict(d)
self.assertIsInstance(o, IsingModel)
self.assertAlmostEqual(o.j, 5)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
rousseab/pymatgen
|
pymatgen/analysis/tests/test_energy_models.py
|
Python
|
mit
| 2,986
|
[
"pymatgen"
] |
14c73587abaa310e95d54809bcc7281a07aef079a1e493c4cedc5cba5b512a2c
|
import unittest
import echidna.core.shift as shift
import echidna.core.spectra as spectra
import numpy as np
from scipy.optimize import curve_fit
class TestShift(unittest.TestCase):
def gaussian(self, x, *p):
""" A gaussian used for fitting.
Args:
x (float): Position the gaussian is calculated at.
*p (list): List of parameters to fit
Returns:
float: Value of gaussian at x for given parameters
"""
A, mean, sigma = p
A = np.fabs(A)
mean = np.fabs(mean)
sigma = np.fabs(sigma)
return A*np.exp(-(x-mean)**2/(2.*sigma**2))
def fit_gaussian_energy(self, spectra):
""" Fits a gausian to the energy of a spectrum.
Args:
spectra (core.spectra): Spectrum to be fitted
Returns:
tuple: mean (float), sigma (float) and
integral (float) of the spectrum.
"""
entries = []
energies = []
energy_width = spectra.get_config().get_par("energy_mc").get_width()
energy_low = spectra.get_config().get_par("energy_mc")._low
spectra_proj = spectra.project("energy_mc")
for i in range(len(spectra_proj)):
entries.append(spectra_proj[i])
energies.append(energy_low+energy_width*(i+0.5))
pars0 = [300., 2.5, 0.1]
coeff, var_mtrx = curve_fit(self.gaussian, energies, entries, p0=pars0)
return coeff[1], np.fabs(coeff[2]), np.array(entries).sum()
def test_shift(self):
""" Tests the variable shifting method.
Creates a Gaussian spectra with mean energy 2.5 MeV and sigma 0.2 MeV.
Radial values of the spectra have a uniform distribution.
The "energy_mc" of the spectra is then shifted by 0.111 MeV.
The shifted spectra is fitted with a Gaussian and the extracted
mean and sigma are checked against expected values within 1 %.
Integral of shifted spectrum is checked against original number of
entries.
This is then repeated for a shift of 0.2 MeV to test the shift_by_bin
method.
"""
np.random.seed()
test_decays = 10000
config_path = "echidna/config/example.yml"
config = spectra.SpectraConfig.load_from_file(config_path)
test_spectra = spectra.Spectra("Test", test_decays, config)
mean_energy = 2.5 # MeV
sigma_energy = 0.2 # MeV
for i in range(test_decays):
energy = np.random.normal(mean_energy, sigma_energy)
radius = np.random.random() * \
test_spectra.get_config().get_par("radial_mc")._high
test_spectra.fill(energy_mc=energy, radial_mc=radius)
mean_energy, sigma_energy, integral = self.fit_gaussian_energy(
test_spectra)
# First test interpolation shift
shifter = shift.Shift()
shift_e = 0.111
shifter.set_shift(shift_e)
shifted_spectra = shifter.shift(test_spectra, "energy_mc")
mean, sigma, integral = self.fit_gaussian_energy(shifted_spectra)
expected_mean = mean_energy+shift_e
expected_sigma = sigma_energy
self.assertTrue(expected_mean < 1.01*mean and
expected_mean > 0.99*mean,
msg="Expected mean energy %s, fitted mean energy %s"
% (expected_mean, mean))
self.assertTrue(expected_sigma < 1.01*sigma and
expected_sigma > 0.99*sigma,
msg="Expected sigma %s, fitted sigma %s"
% (expected_sigma, sigma))
self.assertAlmostEqual(integral/float(test_decays), 1.0,
msg="Input decays %s, integral of spectra %s"
% (test_decays, integral))
# Now test shift by bin
self.assertRaises(ValueError, shifter.shift_by_bin, test_spectra,
"energy_mc")
shift_e = 0.2
shifter.set_shift(shift_e)
shifted_spectra = shifter.shift_by_bin(test_spectra, "energy_mc")
mean, sigma, integral = self.fit_gaussian_energy(shifted_spectra)
expected_mean = mean_energy+shift_e
expected_sigma = sigma_energy
self.assertTrue(expected_mean < 1.01*mean and
expected_mean > 0.99*mean,
msg="Expected mean energy %s, fitted mean energy %s"
% (expected_mean, mean))
self.assertTrue(expected_sigma < 1.01*sigma and
expected_sigma > 0.99*sigma,
msg="Expected sigma %s, fitted sigma %s"
% (expected_sigma, sigma))
self.assertAlmostEqual(integral/float(test_decays), 1.0,
msg="Input decays %s, integral of spectra %s"
% (test_decays, integral))
|
mjmottram/echidna
|
echidna/test/test_shift.py
|
Python
|
mit
| 4,914
|
[
"Gaussian"
] |
bd4eed011c007c5b8e0e8ab0b2e395060c5d6cfcf70a4b4508a28b4f481300cc
|
from __future__ import print_function
import sys
import time
import requests
from numpy import pi, sin, cos
import numpy as np
from bokeh.objects import (Plot, DataRange1d, LinearAxis,
ColumnDataSource, Glyph, PanTool, WheelZoomTool)
from bokeh.glyphs import Line
from bokeh import session
from bokeh import document
document = document.Document()
session = session.Session()
session.use_doc('line_animate')
session.load_document(document)
x = np.linspace(-2*pi, 2*pi, 1000)
x_static = np.linspace(-2*pi, 2*pi, 1000)
y = sin(x)
z = cos(x)
source = ColumnDataSource(
data=dict(
x=x, y=y, z=z, x_static=x_static)
)
xdr = DataRange1d(sources=[source.columns("x")])
xdr_static = DataRange1d(sources=[source.columns("x_static")])
ydr = DataRange1d(sources=[source.columns("y")])
line_glyph = Line(x="x", y="y", line_color="blue")
line_glyph2 = Line(x="x", y="z", line_color="red")
renderer = Glyph(
data_source = source,
xdata_range = xdr,
ydata_range = ydr,
glyph = line_glyph
)
renderer2 = Glyph(
data_source = source,
xdata_range = xdr_static,
ydata_range = ydr,
glyph = line_glyph2
)
plot = Plot(x_range=xdr_static, y_range=ydr, data_sources=[source], min_border=50)
xaxis = LinearAxis(plot=plot, location="bottom")
plot.below.append(xaxis)
yaxis = LinearAxis(plot=plot, location="left")
plot.left.append(yaxis)
pantool = PanTool(dimensions=["width", "height"])
wheelzoomtool = WheelZoomTool(dimensions=["width", "height"])
plot.renderers.append(renderer)
plot.renderers.append(renderer2)
plot.tools = [pantool, wheelzoomtool]
document.add(plot)
session.store_document(document)
link = session.object_link(document._plotcontext)
print ("please visit %s to see plots" % link)
print ("animating")
while True:
for i in np.linspace(-2*pi, 2*pi, 50):
source.data['x'] = x +i
session.store_objects(source)
time.sleep(0.05)
|
the13fools/Bokeh_Examples
|
glyphs/line_animate.py
|
Python
|
bsd-3-clause
| 1,955
|
[
"VisIt"
] |
5a04ebd839c91693135ce81159d0e0b8b5497efad598046f551b0c46145f9ee5
|
#!/usr/bin/env python
"""
Dalton manages your security groups.
Usage:
dalton [-d | --dry-run] [--vpc=<vpc_id>] <config-dir> <environment> <region>
dalton -h | --help
dalton --version
Options:
-d --dry-run Performs a "dry run" to show (but not perform) security group changes
-v --version Show version.
-h --help Show this screen.
"""
from logging import basicConfig, getLogger, CRITICAL, INFO
from docopt import docopt
from path import path
import yaml
from dalton.config import YamlFileSecurityGroupsConfigLoader
from dalton.ec2 import Ec2SecurityGroupService
from dalton.updater import SecurityGroupUpdater
def main(config_dir, env, region, vpc_id, dry_run):
basicConfig(
level=INFO,
format='%(asctime)s %(levelname)-3s %(name)s (%(funcName)s:%(lineno)d) %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
getLogger('boto').level = CRITICAL
if vpc_id:
security_groups = YamlFileSecurityGroupsConfigLoader("%s/%s/security_groups_%s_%s.yaml" % (config_dir, env, region, vpc_id)).load()
else:
security_groups = YamlFileSecurityGroupsConfigLoader("%s/%s/security_groups_%s.yaml" % (config_dir, env, region)).load()
updater = SecurityGroupUpdater(Ec2SecurityGroupService(yaml.load(open('%s/aws.yaml' % config_dir, 'r').read())[env]))
for name, security_group in security_groups.iteritems():
created_new = updater.create_security_group_if_not_exists(name, security_group.description, region, vpc_id, dry_run)
# Can't dry_run rules creation if the group doesn't actually exist yet
if created_new and dry_run:
continue
updater.update_security_group_rules(name, security_group.rules, region, vpc_id, prune=security_group.prune, dry_run=dry_run)
# Delete any groups that aren't listed in the ruleset config
updater.delete_security_group_if(lambda group: group.name not in security_groups, region, vpc_id, dry_run=dry_run)
if __name__ == '__main__':
options = docopt(__doc__, version='Dalton 0.2.0')
main(path(options['<config-dir>']), options['<environment>'], options['<region>'], options['--vpc'], options['--dry-run'])
|
signal/dalton
|
dalton.py
|
Python
|
apache-2.0
| 2,184
|
[
"Dalton"
] |
90c63680e22af18bcb5065db278073f654b09fdada135139a610f1728e255e48
|
import openvoronoi as ovd
import ovdvtk
import time
import vtk
import datetime
import math
import random
import os
import sys
import pickle
import gzip
import ngc_writer
def drawCircle(myscreen, c, r, circlecolor):
ca = ovdvtk.Circle(center=(c.x, c.y, 0), radius=r, color=circlecolor, resolution=50)
myscreen.addActor(ca)
def drawPoint(myscreen, c, pcolor, rad=0.002):
ca = ovdvtk.Sphere(center=(c.x, c.y, 0), radius=rad, color=pcolor)
myscreen.addActor(ca)
# rotate by cos/sin. from emc2 gcodemodule.cc
def rotate(x, y, c, s):
tx = x * c - y * s;
y = x * s + y * c;
x = tx;
return [x, y]
def drawArc(myscreen, pt1, pt2, r, cen, cw, arcColor):
# draw arc as many line-segments
start = pt1 - cen
end = pt2 - cen
theta1 = math.atan2(start.x, start.y)
theta2 = math.atan2(end.x, end.y)
alfa = [] # the list of angles
da = 0.1
CIRCLE_FUZZ = 1e-9
# idea from emc2 / cutsim g-code interp G2/G3
if (cw == False):
while ((theta2 - theta1) > -CIRCLE_FUZZ):
theta2 -= 2 * math.pi
else:
while ((theta2 - theta1) < CIRCLE_FUZZ):
theta2 += 2 * math.pi
dtheta = theta2 - theta1
arclength = r * dtheta
dlength = min(0.01, arclength / 10)
steps = int(float(arclength) / float(dlength))
rsteps = float(1) / float(steps)
dc = math.cos(-dtheta * rsteps) # delta-cos
ds = math.sin(-dtheta * rsteps) # delta-sin
previous = pt1
tr = [start.x, start.y]
for i in range(steps):
tr = rotate(tr[0], tr[1], dc, ds) # ; // rotate center-start vector by a small amount
x = cen.x + tr[0]
y = cen.y + tr[1]
current = ovd.Point(x, y)
myscreen.addActor(ovdvtk.Line(p1=(previous.x, previous.y, 0), p2=(current.x, current.y, 0), color=arcColor))
ngc_writer.xy_line_to(current.x, current.y)
previous = current
def rapid_to_next(myscreen, prv_tang, nxt_tang, c1, r1, c2, r2, prv, nxt):
# rapid from prev, to nxt
# while staying inside c1(r1) and c2(r)
rad_default = 0.03
rad = min(rad_default, 0.9 * r1, 0.9 * r2)
prv_tang.normalize()
nxt_tang.normalize()
prv_normal = -1 * prv_tang.xy_perp()
nxt_normal = nxt_tang.xy_perp()
cen1 = prv + rad * prv_normal # + rad1*prv_tang
cen2 = nxt - rad * nxt_normal # rapid_tang # + rad1*prv_tang
rapid_tang = cen2 - cen1
rapid_tang.normalize()
trg1 = cen1 + rad * rapid_tang.xy_perp() # prv_tang
src2 = cen2 + rad * rapid_tang.xy_perp()
drawArc(myscreen, prv, trg1, rad, cen1, True, ovdvtk.blue) # lead-out arc
ovdvtk.drawLine(myscreen, trg1, src2, ovdvtk.cyan) # rapid
ngc_writer.xy_line_to(src2.x, src2.y)
drawArc(myscreen, src2, nxt, rad, cen2, True, ovdvtk.lblue) # lead-in arc
def rapid_to_new_branch(myscreen, prv_tang, nxt_tang, c1, r1, c2, r2, prv, nxt):
# rapid from prev, to nxt
# while staying inside c1(r1) and c2(r)
rad_default = 0.03
rad1 = min(rad_default, 0.9 * r1) # wrong? we get the new-branch r1 here, while we would want the old-branch r1
rad2 = min(rad_default, 0.9 * r2)
prv_tang.normalize()
nxt_tang.normalize()
prv_normal = -1 * prv_tang.xy_perp()
nxt_normal = nxt_tang.xy_perp()
cen1 = prv + rad1 * prv_normal # + rad1*prv_tang
cen2 = nxt - rad2 * nxt_normal # rapid_tang # + rad1*prv_tang
rapid_tang = cen2 - cen1
rapid_tang.normalize()
trg1 = cen1 + rad1 * prv_tang
src2 = cen2 - rad2 * nxt_tang
drawArc(myscreen, prv, trg1, rad1, cen1, True, ovdvtk.orange) # lead-out arc
ngc_writer.pen_up()
ovdvtk.drawLine(myscreen, trg1, src2, ovdvtk.magenta) # rapid
ngc_writer.xy_line_to(src2.x, src2.y)
ngc_writer.pen_down()
drawArc(myscreen, src2, nxt, rad2, cen2, True, ovdvtk.mag2) # lead-in arc
def final_lead_out(myscreen, prv_tang, nxt_tang, c1, r1, c2, r2, prv, nxt):
rad_default = 0.03
rad1 = min(rad_default, 0.9 * r1) # wrong? we get the new-branch r1 here, while we would want the old-branch r1
prv_tang.normalize()
prv_normal = -1 * prv_tang.xy_perp()
cen1 = prv + rad1 * prv_normal # + rad1*prv_tang
trg1 = cen1 + rad1 * prv_tang
drawArc(myscreen, prv, trg1, rad1, cen1, True, ovdvtk.red) # lead-out arc
def spiral_clear(myscreen, out_tangent, in_tangent, c1, r1, c2, r2, out1, in1):
print "( spiral clear! )"
ngc_writer.pen_up()
# end spiral at in1
# archimedean spiral
# r = a + b theta
in1_dir = in1 - c1
in1_theta = math.atan2(in1_dir.y, in1_dir.x)
# in1_theta = in1_theta
# print "c1 =", c1
# print "in1 = ",in1
# print " end theta = ",in1_theta
drawPoint(myscreen, c1, ovdvtk.red)
# drawPoint( myscreen, in1, ovdvtk.blue, 0.006 )
# width = 2*pi*b
# => b = width/(2*pi)
b = 0.01 / (2 * math.pi)
# r = a + b in1_theta = r_max
# =>
# a = r_max-b*in1_theta
a = r1 - b * in1_theta
# figure out the start-angle
theta_min = in1_theta
theta_max = in1_theta
dtheta = 0.1
min_r = 0.001
while True:
r = a + b * theta_min
if r < min_r:
break
else:
theta_min = theta_min - dtheta
# print "start_theta = ", theta_min
Npts = (theta_max - theta_min) / dtheta
Npts = int(Npts)
# print "spiral has ",Npts," points"
p = ovd.Point(c1)
ngc_writer.xy_rapid_to(p.x, p.y)
ngc_writer.pen_down()
theta_end = 0
for n in range(Npts + 1):
theta = theta_min + n * dtheta
r = a + b * theta
theta = theta - 2 * abs(in1_theta - math.pi / 2)
trg = c1 + r * ovd.Point(-math.cos(theta), math.sin(theta))
ovdvtk.drawLine(myscreen, p, trg, ovdvtk.pink)
ngc_writer.xy_line_to(trg.x, trg.y)
p = trg
theta_end = theta
# add a complete circle after the spiral.
print "( spiral-clear: final circle )"
Npts = (2 * math.pi) / dtheta
Npts = int(Npts)
for n in range(Npts + 2):
theta = theta_end + (n + 1) * dtheta
# theta = theta_min + n*dtheta
r = r1 # a + b*theta
# theta = theta - 2* abs(in1_theta - math.pi/2 )
trg = c1 + r * ovd.Point(-math.cos(theta), math.sin(theta))
ovdvtk.drawLine(myscreen, p, trg, ovdvtk.pink)
ngc_writer.xy_line_to(trg.x, trg.y)
if n != Npts + 1:
drawPoint(myscreen, trg, ovdvtk.orange)
else:
drawPoint(myscreen, trg, ovdvtk.orange, 0.004)
p = trg
# if n == Npts-2:
# break
# return a list of points corresponding to an arc
def arc_pts(pt1, pt2, r, cen, cw): # (start, end, radius, center, cw )
# draw arc as many line-segments
start = pt1 - cen
end = pt2 - cen
theta1 = math.atan2(start.x, start.y)
theta2 = math.atan2(end.x, end.y)
alfa = [] # the list of angles
da = 0.1
CIRCLE_FUZZ = 1e-9
# idea from emc2 / cutsim g-code interp G2/G3
if (cw == False):
while ((theta2 - theta1) > -CIRCLE_FUZZ):
theta2 -= 2 * math.pi
else:
while ((theta2 - theta1) < CIRCLE_FUZZ):
theta2 += 2 * math.pi
dtheta = theta2 - theta1
arclength = r * dtheta
dlength = min(0.001, arclength / 10)
steps = int(float(arclength) / float(dlength))
rsteps = float(1) / float(steps)
dc = math.cos(-dtheta * rsteps) # delta-cos
ds = math.sin(-dtheta * rsteps) # delta-sin
previous = pt1
tr = [start.x, start.y]
pts = []
for i in range(steps):
# f = (i+1) * rsteps #; // varies from 1/rsteps..1 (?)
# theta = theta1 + i* dtheta
tr = rotate(tr[0], tr[1], dc, ds) # ; // rotate center-start vector by a small amount
x = cen.x + tr[0]
y = cen.y + tr[1]
current = ovd.Point(x, y)
# myscreen.addActor( ovdvtk.Line(p1=(previous.x,previous.y,0),p2=(current.x,current.y,0),color=arcColor) )
pts.extend([previous, current])
previous = current
return pts
# return a list of points corresponding to an arc
# don't return the initial points, we already have that!
def arc_pts2(pt1, pt2, r, cen, cw): # (start, end, radius, center, cw )
# draw arc as many line-segments
start = pt1 - cen
end = pt2 - cen
theta1 = math.atan2(start.x, start.y)
theta2 = math.atan2(end.x, end.y)
alfa = [] # the list of angles
da = 0.1
CIRCLE_FUZZ = 1e-9
# idea from emc2 / cutsim g-code interp G2/G3
if (cw == False):
while ((theta2 - theta1) > -CIRCLE_FUZZ):
theta2 -= 2 * math.pi
else:
while ((theta2 - theta1) < CIRCLE_FUZZ):
theta2 += 2 * math.pi
dtheta = theta2 - theta1
arclength = r * dtheta
dlength = min(0.001, arclength / 10)
steps = int(float(arclength) / float(dlength))
rsteps = float(1) / float(steps)
dc = math.cos(-dtheta * rsteps) # delta-cos
ds = math.sin(-dtheta * rsteps) # delta-sin
previous = pt1
tr = [start.x, start.y]
pts = []
for i in range(steps):
# f = (i+1) * rsteps #; // varies from 1/rsteps..1 (?)
# theta = theta1 + i* dtheta
tr = rotate(tr[0], tr[1], dc, ds) # ; // rotate center-start vector by a small amount
x = cen.x + tr[0]
y = cen.y + tr[1]
current = ovd.Point(x, y)
pts.append(current)
previous = current
return pts
# faster drawing of offsets using vtkPolyData
def drawOffsets2(myscreen, ofs, offsetcolor=ovdvtk.lgreen):
# draw loops
nloop = 0
lineColor = offsetcolor
arcColor = ovdvtk.green # grass
ofs_points = []
for lop in ofs:
points = []
n = 0
N = len(lop)
first_point = []
previous = []
for p in lop:
# p[0] is the Point
# p[1] is -1 for lines, and r for arcs
if n == 0: # don't draw anything on the first iteration
previous = p[0]
# first_point = p[0]
else:
cw = p[3] # cw/ccw flag
cen = p[2] # center
r = p[1] # radius
p = p[0] # target point
if r == -1: # r=-1 means line-segment
points.extend([previous, p]) # drawLine(myscreen, previous, p, lineColor)
else: # otherwise we have an arc
points.extend(arc_pts(previous, p, r, cen, cw))
previous = p
n = n + 1
ofs_points.append(points)
# print "rendered loop ",nloop, " with ", len(lop), " points"
nloop = nloop + 1
# now draw each loop with polydata
oPoints = vtk.vtkPoints()
lineCells = vtk.vtkCellArray()
# self.colorLUT = vtk.vtkLookupTable()
# print len(ofs_points)," loops to render:"
idx = 0
last_idx = 0
for of in ofs_points:
epts = of
segs = []
first = 1
# print " loop with ", len(epts)," points"
for p in epts:
oPoints.InsertNextPoint(p.x, p.y, 0)
if first == 0:
seg = [last_idx, idx]
segs.append(seg)
first = 0
last_idx = idx
idx = idx + 1
# create line and cells
for seg in segs:
line = vtk.vtkLine()
line.GetPointIds().SetId(0, seg[0])
line.GetPointIds().SetId(1, seg[1])
# print " indexes: ", seg[0]," to ",seg[1]
lineCells.InsertNextCell(line)
linePolyData = vtk.vtkPolyData()
linePolyData.SetPoints(oPoints)
linePolyData.SetLines(lineCells)
linePolyData.Modified()
# linePolyData.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(linePolyData)
edge_actor = vtk.vtkActor()
edge_actor.SetMapper(mapper)
edge_actor.GetProperty().SetColor(offsetcolor)
myscreen.addActor(edge_actor)
def insert_polygon_points(vd, pts):
# pts=[]
# for p in polygon:
# pts.append( ovd.Point( p[0], p[1] ) )
id_list = []
# print "inserting ",len(pts)," point-sites:"
m = 0
for p in pts:
id_list.append(vd.addVertexSite(p))
# print " ",m," added vertex ", id_list[ len(id_list) -1 ], " at ",p
m = m + 1
# print "id list is ", id_list
return id_list
def insert_polygon_segments(vd, id_list):
j = 0
# print "inserting ",len(id_list)," line-segments:"
for n in range(len(id_list)):
n_nxt = n + 1
if n == (len(id_list) - 1):
n_nxt = 0
# print " ",j,"inserting segment ",id_list[n]," - ",id_list[n_nxt]
vd.addLineSite(id_list[n], id_list[n_nxt])
j = j + 1
# give ofsets ofs
# insert points and line-segments in the vd
def insert_offset_loop(vd, ofs):
polygon_ids = []
# create segs from ofs
segs = []
previous = ovd.Point()
for ofloop in ofs:
loop = []
first = True
for of in ofloop:
# print of
if first:
# loop.append( of[0] )
previous = of[0]
first = False
else:
cw = of[3] # cw/ccw flag
cen = of[2] # center
r = of[1] # radius
p = of[0] # target point
if r == -1: # r=-1 means line-segment
loop.append(p) # points.extend( [previous,p] ) #drawLine(myscreen, previous, p, lineColor)
else: # otherwise we have an arc
loop.extend(arc_pts2(previous, p, r, cen, cw))
# points.extend( arc_pts( previous, p, r,cen,cw) )
previous = p
# loop.append(p)
segs.append(loop)
# print segs
t_before = time.time()
for poly in segs:
poly_id = insert_polygon_points(vd, poly)
polygon_ids.append(poly_id)
t_after = time.time()
pt_time = t_after - t_before
t_before = time.time()
for ids in polygon_ids:
insert_polygon_segments(vd, ids)
t_after = time.time()
seg_time = t_after - t_before
return [pt_time, seg_time]
# a simple class with a write method
class WritableObject:
def __init__(self):
self.content = []
def write(self, string):
self.content.append(string)
if __name__ == "__main__":
# w=2500
# h=1500
w = 1920
h = 1080
# w=1024
# h=1024
myscreen = ovdvtk.VTKScreen(width=w, height=h)
ovdvtk.drawOCLtext(myscreen, rev_text=ovd.version())
scale = 1
myscreen.render()
random.seed(42)
far = 1
camPos = far
zmult = 1.8
# camPos/float(1000)
myscreen.camera.SetPosition(0, -camPos / float(1000), zmult * camPos)
myscreen.camera.SetClippingRange(-(zmult + 1) * camPos, (zmult + 1) * camPos)
myscreen.camera.SetFocalPoint(0.0, 0.22, 0)
# redirect stdout to file
# example with redirection of sys.stdout
foo = WritableObject() # a writable object
sys.stdout = foo # redirection
print "( Medial-Axis pocketing. Proof-of-principle. 2012-02-12 )"
print "( OpenVoronoi %s )" % (ovd.version())
print "( TOOL/MILL,10,0,50 ) "
print "( COLOR,0,255,255 ) "
print "( STOCK/BLOCK,700.0000,400.0000,10.0000,350.0000,160.0000,5.0000 ) "
linesegs = 1 # switch to turn on/off line-segments
segs = []
# ovd.Point(1,1)
eps = 0.9
p1 = ovd.Point(-0.1, -0.2)
p2 = ovd.Point(0.2, 0.1)
p3 = ovd.Point(0.4, 0.2)
p4 = ovd.Point(0.6, 0.6)
p5 = ovd.Point(-0.6, 0.3)
pts = [p1, p2, p3, p4, p5]
vd = ovd.VoronoiDiagram(far, 120)
# t_after = time.time()
# print ".done in {0:.3f} s.".format( t_after-t_before )
times = []
id_list = []
m = 0
t_before = time.time()
for p in pts:
id_list.append(vd.addVertexSite(p))
# print m," added vertex", seg_id[0]
m = m + 1
t_after = time.time()
times.append(t_after - t_before)
# print "all point sites inserted. "
vd.check()
t_before = time.time()
vd.addLineSite(id_list[0], id_list[1])
vd.addLineSite(id_list[1], id_list[2])
vd.addLineSite(id_list[2], id_list[3])
vd.addLineSite(id_list[3], id_list[4])
vd.addLineSite(id_list[4], id_list[0])
t_after = time.time()
times.append(t_after - t_before)
vd.check()
print "( VD1 done in %.3f s. )" % (sum(times))
# vod.setVDText2(times)
pi = ovd.PolygonInterior(False)
vd.filter_graph(pi)
of = ovd.Offset(vd.getGraph()) # pass the created graph to the Offset class
ofs_list = []
t_before = time.time()
ofs = of.offset(0.015)
t_after = time.time()
# print "( VD1 OFFSET in ", 1e3*(t_after-t_before)," milliseconds. )"
print "( VD1 OFFSET in %.3f s. )" % (1e3 * (t_after - t_before))
# print " offset is len=",len(ofs)
drawOffsets2(myscreen, ofs)
# now create a new VD from the offset
vd2 = ovd.VoronoiDiagram(1, 120)
tim2 = insert_offset_loop(vd2, ofs)
# print "( VD2 done in ", 1e3*(sum(tim2))," milliseconds. )"
print "( VD2 done in %.3f s. )" % (sum(tim2))
# now offset outward
pi = ovd.PolygonInterior(True)
vd2.filter_graph(pi)
of = ovd.Offset(vd2.getGraph()) # pass the created graph to the Offset class
t_before = time.time()
ofs = of.offset(0.015)
t_after = time.time()
# print "( VD2 OFFSET in ", 1e3*(t_after-t_before)," milliseconds. )"
print "( VD2 OFFSET in %.3f s. )" % (1e3 * (t_after - t_before))
drawOffsets2(myscreen, ofs, ovdvtk.pink)
# myscreen.render()
# myscreen.iren.Start()
# now create the VD for pocketing
vd3 = ovd.VoronoiDiagram(1, 120)
times = insert_offset_loop(vd3, ofs)
# print "( VD3 done in ", 1e3*(sum(times))," milliseconds. )"
print "( VD3 done in %.3f s. )" % (sum(times))
vod3 = ovdvtk.VD(myscreen, vd3, float(scale), textscale=0.01, vertexradius=0.003)
vod3.textScale = 0.0002
vod3.vertexRadius = 0.0031
vod3.drawVertices = 0
vod3.drawVertexIndex = 1
vod3.drawGenerators = 0
vod3.offsetEdges = 0
vod3.setVDText2(times)
pi = ovd.PolygonInterior(True)
vd3.filter_graph(pi)
ma = ovd.MedialAxis()
vd3.filter_graph(ma)
vod3.setAll()
myscreen.render()
myscreen.iren.Start()
mapocket = ovd.MedialAxisPocket(vd3.getGraph())
mapocket.setWidth(0.01)
mapocket.debug(False)
t_before = time.time()
mapocket.run()
mic_list = mapocket.get_mic_list()
t_after = time.time()
# print "( ma-pocket done in ", 1e3*(t_after-t_before)," milliseconds. got ", len(mic_list)," MICs )"
print "( MA-pocket done in %.3f s. Got %d MICs )" % ((t_after - t_before), len(mic_list))
maxmic = mic_list[0]
# print maxmic
previous_center = maxmic[0]
previous_radius = maxmic[1]
cl = ovd.Point(0, 0)
# the initial largest MIC. to be cleared with a spiral-path
drawCircle(myscreen, maxmic[0], maxmic[1], ovdvtk.red)
# myscreen.render()
# myscreen.iren.Start()
ngc_writer.scale = 10 / 0.03
ngc_writer.preamble()
# the rest of the MICs are then cleared
nframe = 0
first = True
previous_out1 = ovd.Point()
out_tangent = ovd.Point()
in_tangent = ovd.Point()
# while True:
# for mic in mic_list[1:]:
for n in range(1, len(mic_list)):
mic = mic_list[n] # apocket.nxtMic()
if 0: # nframe == 40:
break
cen2 = mic[0]
r2 = mic[1]
# drawCircle( myscreen, mic[0], mic[1] , ovdvtk.green )
previous_center = mic[6]
previous_radius = mic[7]
new_branch = mic[8] # true/false indicates if we are starting on new branch
prev_branch_center = mic[9]
prev_branch_radius = mic[10] # old branch MIC radius
in1 = mic[3]
in2 = mic[5]
out2 = mic[4]
out1 = mic[2]
in_tangent = in2 - in1
# rapid traverse to in1
if not first:
if new_branch:
# new branch re-position move
rapid_to_new_branch(myscreen, out_tangent, in_tangent, prev_branch_center, prev_branch_radius, cen2, r2,
previous_out1, in1)
else:
# normal arc-rapid-arc to next MIC
rapid_to_next(myscreen, out_tangent, in_tangent, previous_center, previous_radius, cen2, r2,
previous_out1, in1)
else:
# spiral-clear the start-MIC. The spiral should end at in1
spiral_clear(myscreen, out_tangent, in_tangent, previous_center, previous_radius, cen2, r2, previous_out1,
in1)
# print "No rapid-move on first-iteration."
first = False
# in bi-tangent
ovdvtk.drawLine(myscreen, in1, in2, ovdvtk.green)
ngc_writer.xy_line_to(in2.x, in2.y)
# draw arc
drawArc(myscreen, in2, out2, r2, cen2, True, ovdvtk.green)
# out bi-tangent
ovdvtk.drawLine(myscreen, out2, out1, ovdvtk.green)
ngc_writer.xy_line_to(out1.x, out1.y)
previous_out1 = out1 # this is used as the start-point for the rapid on the next iteration
out_tangent = out1 - out2
if n == len(mic_list) - 1:
# end of operation. do a final lead-out arc.
final_lead_out(myscreen, out_tangent, in_tangent, previous_center, previous_radius, cen2, r2, previous_out1,
in1)
# print "Final lead-out arc"
nframe = nframe + 1
# print "mic-pocket done."
# print "PYTHON All DONE."
ngc_writer.postamble()
sys.stdout = sys.__stdout__ # remember to reset sys.stdout!
f = open('output.nc', 'w')
for item in foo.content:
if item != '\n':
print>> f, item
f.close()
print "python done."
myscreen.render()
myscreen.iren.Start()
|
aewallin/openvoronoi
|
python_examples/ma-pocket/ma_pocket_09_external.py
|
Python
|
lgpl-2.1
| 21,908
|
[
"VTK"
] |
4da5b4c61f0c64722071466a1e78ccae57710cd99f34dec6d323ae2cb27ba81e
|
from scipy.io import netcdf
import glob
def getRestartTime():
datadir = '/home/daugue6/capeislerestart/output/'
#get the correct restart file, the second newest one (if there is more than one)
files = glob.glob(datadir + "*.nc")
#find the restart files
restart_files = []
for i in files:
if "restart" in i:
restart_files.append(i)
#get the latest restart file
filenums = []
for i in restart_files:
filenums.append(int(i[-7:-3]))
latest = filenums.index(max(filenums))
#we need the times data from the restart file
ncid = netcdf.netcdf_file(files[latest],'r')
Times = ncid.variables['Times'].data
ind = Times.shape[0] - 1
#join the elements of the list into a single string
time = "\'"
for i in Times[ind,:]:
if i == 'T':
time += ' '
else:
time += i
time += "\'"
name="\'{}\'".format(files[latest])
return time, name
|
wesleybowman/aidan-projects
|
placentia/restartConfig.py
|
Python
|
gpl-2.0
| 974
|
[
"NetCDF"
] |
5cd6d09b4e1117969aa513969fdb75cc0274d3abff9c99797de6676f5939ffa0
|
from django.db import models
from django.contrib.auth.models import User
CAMPAIGN_TYPE = (
('S', 'Standard'),
('P', 'Mini-Site'),
)
STATUS = (
('A', 'Active'),
('I', 'Inactive'),
)
PCAMPAIGN_LINK = (
('FB', 'Facebook Page'),
('TW', 'Twitter Page'),
('NOTE', 'Send us a Note'),
('WEB', 'Visit Website'),
)
THEME_LOOKUP = {
'a': 'Black',
'b': 'Blue',
'c': 'Dark Gray',
'd': 'Light Gray',
'e': 'Yellow',
}
PCAMPAIGN_LINK_TEXT_LOOKUP = {
'FB': 'Facebook Page',
'TW': 'Twitter Page',
'NOTE': 'Send us a Note',
'WEB': 'Visit Website',
}
class Campaign(models.Model):
user = models.ForeignKey(User, db_index=True)
name = models.CharField(max_length=80,)
data = models.CharField(max_length=7089,)
campaign_type = models.CharField(max_length=1, choices=CAMPAIGN_TYPE, default='S')
premium_title = models.CharField(max_length=40, default='', null=True, blank=True)
premium_theme = models.CharField(max_length=1, default='', null=True, blank=True)
premium_header_theme = models.CharField(max_length=1, default='', null=True, blank=True)
status = models.CharField(max_length=1, choices=STATUS, default='A')
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
def d_status(self):
for k,v in STATUS:
if k == self.status:
return v
return 'N/A'
def d_campaign_type(self):
for k,v in CAMPAIGN_TYPE:
if k == self.campaign_type:
return v
return 'N/A'
def d_premium_theme(self):
if self.premium_theme in THEME_LOOKUP:
return THEME_LOOKUP[self.premium_theme]
else:
return 'N/A'
def d_premium_header_theme(self):
if self.premium_header_theme in THEME_LOOKUP:
return THEME_LOOKUP[self.premium_header_theme]
else:
return 'N/A'
class Note(models.Model):
campaign = models.ForeignKey(Campaign, db_index=True)
ip = models.CharField(max_length=39,)
email = models.EmailField(max_length=75)
note = models.CharField(max_length=1024)
user_agent = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
class Premium(models.Model):
campaign = models.ForeignKey(Campaign, db_index=True)
seq = models.PositiveSmallIntegerField(default=1)
name = models.CharField(max_length=10, choices=PCAMPAIGN_LINK)
val = models.CharField(max_length=255, null=True, blank=True)
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
def d_name(self):
if self.name in PCAMPAIGN_LINK_TEXT_LOOKUP:
return PCAMPAIGN_LINK_TEXT_LOOKUP[self.name]
else:
return 'N/A'
class Scan(models.Model):
campaign = models.ForeignKey(Campaign, db_index=True)
ip = models.CharField(max_length=39,)
refer = models.CharField(max_length=255)
user_agent = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
class Click(models.Model):
campaign = models.ForeignKey(Campaign, db_index=True)
click_type = models.CharField(max_length=10, choices=PCAMPAIGN_LINK)
ip = models.CharField(max_length=39,)
refer = models.CharField(max_length=255)
user_agent = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
def d_click_type(self):
if self.click_type in PCAMPAIGN_LINK_TEXT_LOOKUP:
return PCAMPAIGN_LINK_TEXT_LOOKUP[self.click_type]
else:
return 'N/A'
|
TheAmbitiousInc/com.theambitious.qrtrace.web
|
web/campaign/models.py
|
Python
|
mit
| 3,777
|
[
"VisIt"
] |
ffeba68da876133fca81e9c0ec84acdbb794d7b3bf6f16c60bbeeb98001538d5
|
from PeacockActor import PeacockActor
import vtk
from vtk.util.colors import peacock, tomato, red, white, black
class GeneratedMeshActor(PeacockActor):
def __init__(self, renderer, mesh):
PeacockActor.__init__(self, renderer)
self.mesh = mesh
self.geom = vtk.vtkDataSetSurfaceFilter()
self.geom.SetInput(self.mesh)
self.geom.Update()
self.mapper = vtk.vtkPolyDataMapper()
self.mapper.SetInput(self.geom.GetOutput())
self.actor = vtk.vtkActor();
self.actor.SetMapper(self.mapper);
self.actor.GetProperty().SetPointSize(5)
self.actor.GetProperty().SetEdgeColor(0,0,0)
self.actor.GetProperty().SetAmbient(0.3);
def getBounds(self):
return self.actor.GetBounds()
def _show(self):
self.renderer.AddActor(self.actor)
def _hide(self):
self.renderer.RemoveActor(self.actor)
def _showEdges(self):
self.actor.GetProperty().EdgeVisibilityOn()
def _hideEdges(self):
self.actor.GetProperty().EdgeVisibilityOff()
def _goSolid(self):
self.actor.GetProperty().SetRepresentationToSurface()
def _goWireframe(self):
self.actor.GetProperty().SetRepresentationToWireframe()
def _setColor(self, color):
self.actor.GetProperty().SetColor(color)
|
gleicher27/Tardigrade
|
moose/gui/vtk/GeneratedMeshActor.py
|
Python
|
lgpl-2.1
| 1,233
|
[
"VTK"
] |
b620e1da08ea00aa62ea3c415c4feb64d363a247ca97f0e665083ea481438749
|
#!/usr/bin/env python
__author__ = "waroquiers"
import os
import shutil
import networkx as nx
from pymatgen.analysis.chemenv.connectivity.environment_nodes import (
EnvironmentNode,
get_environment_node,
)
from pymatgen.util.testing import PymatgenTest
try:
import bson # type: ignore # Ignore bson import for mypy
except ModuleNotFoundError:
bson = None
json_files_dir = os.path.join(
PymatgenTest.TEST_FILES_DIR,
"chemenv",
"json_test_files",
)
class EnvironmentNodesTest(PymatgenTest):
def test_equal(self):
s = self.get_structure("SiO2")
en = EnvironmentNode(central_site=s[0], i_central_site=0, ce_symbol="T:4")
en1 = EnvironmentNode(central_site=s[2], i_central_site=0, ce_symbol="T:4")
assert en == en1
assert not en.everything_equal(en1)
en2 = EnvironmentNode(central_site=s[0], i_central_site=3, ce_symbol="T:4")
assert en != en2
assert not en.everything_equal(en2)
en3 = EnvironmentNode(central_site=s[0], i_central_site=0, ce_symbol="O:6")
assert en == en3
assert not en.everything_equal(en3)
en4 = EnvironmentNode(central_site=s[0], i_central_site=0, ce_symbol="T:4")
assert en == en4
assert en.everything_equal(en4)
def test_as_dict(self):
s = self.get_structure("SiO2")
en = EnvironmentNode(central_site=s[2], i_central_site=2, ce_symbol="T:4")
en_from_dict = EnvironmentNode.from_dict(en.as_dict())
assert en.everything_equal(en_from_dict)
if bson is not None:
bson_data = bson.BSON.encode(en.as_dict())
en_from_bson = EnvironmentNode.from_dict(bson_data.decode())
assert en.everything_equal(en_from_bson)
def test_str(self):
s = self.get_structure("SiO2")
en = EnvironmentNode(central_site=s[2], i_central_site=2, ce_symbol="T:4")
assert str(en) == "Node #2 Si (T:4)"
if __name__ == "__main__":
import unittest
unittest.main()
|
vorwerkc/pymatgen
|
pymatgen/analysis/chemenv/connectivity/tests/test_environment_nodes.py
|
Python
|
mit
| 2,025
|
[
"pymatgen"
] |
4798211c12cd0ad241dbd50a0a2ce4cd58187d09fb4390ac17780df47259c62b
|
from __future__ import absolute_import
input_name = '../examples/multi_physics/piezo_elasticity.py'
output_name = 'test_piezo_elasticity.vtk'
from tests_basic import TestInput
class Test( TestInput ):
def from_conf( conf, options ):
return TestInput.from_conf( conf, options, cls = Test )
from_conf = staticmethod( from_conf )
def test_ebc( self ):
import numpy as nm
from sfepy.discrete import Problem
pb = Problem.from_conf(self.test_conf)
pb.time_update()
vvs = pb.get_variables()
setv = vvs.set_state_part
make_full = vvs.make_full_vec
svec_u = nm.ones( (vvs.adi.n_dof['u'],), dtype = nm.float64 )
svec_phi = nm.empty( (vvs.adi.n_dof['phi'],), dtype = nm.float64 )
svec_phi.fill( 2.0 )
svec = vvs.create_stripped_state_vector()
setv( svec, svec_u, 'u', stripped = True )
setv( svec, svec_phi, 'phi', stripped = True )
vec = make_full( svec )
ii_u = vvs.di.indx['u'].start + vvs['u'].eq_map.eqi
ii_phi = vvs.di.indx['phi'].start + vvs['phi'].eq_map.eqi
ok_ebc = vvs.has_ebc( vec )
ok_u = nm.all( vec[ii_u] == svec_u )
ok_phi = nm.all( vec[ii_phi] == svec_phi )
msg = '%s: %s'
self.report( msg % ('ebc', ok_ebc) )
self.report( msg % ('u', ok_u) )
self.report( msg % ('phi', ok_phi) )
ok = ok_ebc and ok_u and ok_phi
return ok
|
vlukes/sfepy
|
tests/test_input_piezo_elasticity.py
|
Python
|
bsd-3-clause
| 1,463
|
[
"VTK"
] |
f6b786e18a2cb821654d56f1fa2b25b08a0abc91c344a39a8c5802ea004dbdbf
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
**********************************
espressopp.integrator.MDIntegrator
**********************************
.. function:: espressopp.integrator.MDIntegrator.addExtension(extension)
:param extension:
:type extension:
:rtype:
.. function:: espressopp.integrator.MDIntegrator.getExtension(k)
:param k:
:type k:
:rtype:
.. function:: espressopp.integrator.MDIntegrator.getNumberOfExtensions()
:rtype:
.. function:: espressopp.integrator.MDIntegrator.run(niter)
:param niter:
:type niter:
:rtype:
"""
from espressopp import pmi
from _espressopp import integrator_MDIntegrator
import sys
class MDIntegratorLocal(object):
def run(self, niter):
if not isinstance(niter, int):
raise ValueError('The provided number of steps have to be an integer not {} with value {}'.format(type(niter), niter))
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.run(self, niter)
def addExtension(self, extension):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
# set integrator and connect to it
extension.cxxclass.setIntegrator(extension, self)
extension.cxxclass.connect(extension)
return self.cxxclass.addExtension(self, extension)
def getExtension(self, k):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getExtension(self, k)
def getNumberOfExtensions(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getNumberOfExtensions(self)
if pmi.isController :
class MDIntegrator(metaclass=pmi.Proxy):
pmiproxydefs = dict(
pmiproperty = [ 'dt', 'step' ],
pmicall = [ 'run', 'addExtension', 'getExtension', 'getNumberOfExtensions' ]
)
|
espressopp/espressopp
|
src/integrator/MDIntegrator.py
|
Python
|
gpl-3.0
| 3,045
|
[
"ESPResSo"
] |
e06c22e3a1df666a2ade83930f922ee4b657afded7c66ef05a774b860b482ade
|
"""User-friendly public interface to polynomial functions. """
from __future__ import print_function, division
from sympy.core import (
S, Basic, Expr, I, Integer, Add, Mul, Dummy, Tuple
)
from sympy.core.mul import _keep_coeff
from sympy.core.symbol import Symbol
from sympy.core.basic import preorder_traversal
from sympy.core.relational import Relational
from sympy.core.sympify import sympify
from sympy.core.decorators import _sympifyit
from sympy.core.function import Derivative
from sympy.logic.boolalg import BooleanAtom
from sympy.polys.polyclasses import DMP
from sympy.polys.polyutils import (
basic_from_dict,
_sort_gens,
_unify_gens,
_dict_reorder,
_dict_from_expr,
_parallel_dict_from_expr,
)
from sympy.polys.rationaltools import together
from sympy.polys.rootisolation import dup_isolate_real_roots_list
from sympy.polys.groebnertools import groebner as _groebner
from sympy.polys.fglmtools import matrix_fglm
from sympy.polys.monomials import Monomial
from sympy.polys.orderings import monomial_key
from sympy.polys.polyerrors import (
OperationNotSupported, DomainError,
CoercionFailed, UnificationFailed,
GeneratorsNeeded, PolynomialError,
MultivariatePolynomialError,
ExactQuotientFailed,
PolificationFailed,
ComputationFailed,
GeneratorsError,
)
from sympy.utilities import group, sift, public
import sympy.polys
import mpmath
from mpmath.libmp.libhyper import NoConvergence
from sympy.polys.domains import FF, QQ, ZZ
from sympy.polys.constructor import construct_domain
from sympy.polys import polyoptions as options
from sympy.core.compatibility import iterable, range
@public
class Poly(Expr):
"""Generic class for representing polynomial expressions. """
__slots__ = ['rep', 'gens']
is_commutative = True
is_Poly = True
def __new__(cls, rep, *gens, **args):
"""Create a new polynomial instance out of something useful. """
opt = options.build_options(gens, args)
if 'order' in opt:
raise NotImplementedError("'order' keyword is not implemented yet")
if iterable(rep, exclude=str):
if isinstance(rep, dict):
return cls._from_dict(rep, opt)
else:
return cls._from_list(list(rep), opt)
else:
rep = sympify(rep)
if rep.is_Poly:
return cls._from_poly(rep, opt)
else:
return cls._from_expr(rep, opt)
@classmethod
def new(cls, rep, *gens):
"""Construct :class:`Poly` instance from raw representation. """
if not isinstance(rep, DMP):
raise PolynomialError(
"invalid polynomial representation: %s" % rep)
elif rep.lev != len(gens) - 1:
raise PolynomialError("invalid arguments: %s, %s" % (rep, gens))
obj = Basic.__new__(cls)
obj.rep = rep
obj.gens = gens
return obj
@classmethod
def from_dict(cls, rep, *gens, **args):
"""Construct a polynomial from a ``dict``. """
opt = options.build_options(gens, args)
return cls._from_dict(rep, opt)
@classmethod
def from_list(cls, rep, *gens, **args):
"""Construct a polynomial from a ``list``. """
opt = options.build_options(gens, args)
return cls._from_list(rep, opt)
@classmethod
def from_poly(cls, rep, *gens, **args):
"""Construct a polynomial from a polynomial. """
opt = options.build_options(gens, args)
return cls._from_poly(rep, opt)
@classmethod
def from_expr(cls, rep, *gens, **args):
"""Construct a polynomial from an expression. """
opt = options.build_options(gens, args)
return cls._from_expr(rep, opt)
@classmethod
def _from_dict(cls, rep, opt):
"""Construct a polynomial from a ``dict``. """
gens = opt.gens
if not gens:
raise GeneratorsNeeded(
"can't initialize from 'dict' without generators")
level = len(gens) - 1
domain = opt.domain
if domain is None:
domain, rep = construct_domain(rep, opt=opt)
else:
for monom, coeff in rep.items():
rep[monom] = domain.convert(coeff)
return cls.new(DMP.from_dict(rep, level, domain), *gens)
@classmethod
def _from_list(cls, rep, opt):
"""Construct a polynomial from a ``list``. """
gens = opt.gens
if not gens:
raise GeneratorsNeeded(
"can't initialize from 'list' without generators")
elif len(gens) != 1:
raise MultivariatePolynomialError(
"'list' representation not supported")
level = len(gens) - 1
domain = opt.domain
if domain is None:
domain, rep = construct_domain(rep, opt=opt)
else:
rep = list(map(domain.convert, rep))
return cls.new(DMP.from_list(rep, level, domain), *gens)
@classmethod
def _from_poly(cls, rep, opt):
"""Construct a polynomial from a polynomial. """
if cls != rep.__class__:
rep = cls.new(rep.rep, *rep.gens)
gens = opt.gens
field = opt.field
domain = opt.domain
if gens and rep.gens != gens:
if set(rep.gens) != set(gens):
return cls._from_expr(rep.as_expr(), opt)
else:
rep = rep.reorder(*gens)
if 'domain' in opt and domain:
rep = rep.set_domain(domain)
elif field is True:
rep = rep.to_field()
return rep
@classmethod
def _from_expr(cls, rep, opt):
"""Construct a polynomial from an expression. """
rep, opt = _dict_from_expr(rep, opt)
return cls._from_dict(rep, opt)
def _hashable_content(self):
"""Allow SymPy to hash Poly instances. """
return (self.rep, self.gens)
def __hash__(self):
return super(Poly, self).__hash__()
@property
def free_symbols(self):
"""
Free symbols of a polynomial expression.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1).free_symbols
set([x])
>>> Poly(x**2 + y).free_symbols
set([x, y])
>>> Poly(x**2 + y, x).free_symbols
set([x, y])
"""
symbols = set([])
for gen in self.gens:
symbols |= gen.free_symbols
return symbols | self.free_symbols_in_domain
@property
def free_symbols_in_domain(self):
"""
Free symbols of the domain of ``self``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1).free_symbols_in_domain
set()
>>> Poly(x**2 + y).free_symbols_in_domain
set()
>>> Poly(x**2 + y, x).free_symbols_in_domain
set([y])
"""
domain, symbols = self.rep.dom, set()
if domain.is_Composite:
for gen in domain.symbols:
symbols |= gen.free_symbols
elif domain.is_EX:
for coeff in self.coeffs():
symbols |= coeff.free_symbols
return symbols
@property
def args(self):
"""
Don't mess up with the core.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).args
(x**2 + 1,)
"""
return (self.as_expr(),)
@property
def gen(self):
"""
Return the principal generator.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).gen
x
"""
return self.gens[0]
@property
def domain(self):
"""Get the ground domain of ``self``. """
return self.get_domain()
@property
def zero(self):
"""Return zero polynomial with ``self``'s properties. """
return self.new(self.rep.zero(self.rep.lev, self.rep.dom), *self.gens)
@property
def one(self):
"""Return one polynomial with ``self``'s properties. """
return self.new(self.rep.one(self.rep.lev, self.rep.dom), *self.gens)
@property
def unit(self):
"""Return unit polynomial with ``self``'s properties. """
return self.new(self.rep.unit(self.rep.lev, self.rep.dom), *self.gens)
def unify(f, g):
"""
Make ``f`` and ``g`` belong to the same domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f, g = Poly(x/2 + 1), Poly(2*x + 1)
>>> f
Poly(1/2*x + 1, x, domain='QQ')
>>> g
Poly(2*x + 1, x, domain='ZZ')
>>> F, G = f.unify(g)
>>> F
Poly(1/2*x + 1, x, domain='QQ')
>>> G
Poly(2*x + 1, x, domain='QQ')
"""
_, per, F, G = f._unify(g)
return per(F), per(G)
def _unify(f, g):
g = sympify(g)
if not g.is_Poly:
try:
return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g))
except CoercionFailed:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if isinstance(f.rep, DMP) and isinstance(g.rep, DMP):
gens = _unify_gens(f.gens, g.gens)
dom, lev = f.rep.dom.unify(g.rep.dom, gens), len(gens) - 1
if f.gens != gens:
f_monoms, f_coeffs = _dict_reorder(
f.rep.to_dict(), f.gens, gens)
if f.rep.dom != dom:
f_coeffs = [dom.convert(c, f.rep.dom) for c in f_coeffs]
F = DMP(dict(list(zip(f_monoms, f_coeffs))), dom, lev)
else:
F = f.rep.convert(dom)
if g.gens != gens:
g_monoms, g_coeffs = _dict_reorder(
g.rep.to_dict(), g.gens, gens)
if g.rep.dom != dom:
g_coeffs = [dom.convert(c, g.rep.dom) for c in g_coeffs]
G = DMP(dict(list(zip(g_monoms, g_coeffs))), dom, lev)
else:
G = g.rep.convert(dom)
else:
raise UnificationFailed("can't unify %s with %s" % (f, g))
cls = f.__class__
def per(rep, dom=dom, gens=gens, remove=None):
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return dom.to_sympy(rep)
return cls.new(rep, *gens)
return dom, per, F, G
def per(f, rep, gens=None, remove=None):
"""
Create a Poly out of the given representation.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x, y
>>> from sympy.polys.polyclasses import DMP
>>> a = Poly(x**2 + 1)
>>> a.per(DMP([ZZ(1), ZZ(1)], ZZ), gens=[y])
Poly(y + 1, y, domain='ZZ')
"""
if gens is None:
gens = f.gens
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return f.rep.dom.to_sympy(rep)
return f.__class__.new(rep, *gens)
def set_domain(f, domain):
"""Set the ground domain of ``f``. """
opt = options.build_options(f.gens, {'domain': domain})
return f.per(f.rep.convert(opt.domain))
def get_domain(f):
"""Get the ground domain of ``f``. """
return f.rep.dom
def set_modulus(f, modulus):
"""
Set the modulus of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(5*x**2 + 2*x - 1, x).set_modulus(2)
Poly(x**2 + 1, x, modulus=2)
"""
modulus = options.Modulus.preprocess(modulus)
return f.set_domain(FF(modulus))
def get_modulus(f):
"""
Get the modulus of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, modulus=2).get_modulus()
2
"""
domain = f.get_domain()
if domain.is_FiniteField:
return Integer(domain.characteristic())
else:
raise PolynomialError("not a polynomial over a Galois field")
def _eval_subs(f, old, new):
"""Internal implementation of :func:`subs`. """
if old in f.gens:
if new.is_number:
return f.eval(old, new)
else:
try:
return f.replace(old, new)
except PolynomialError:
pass
return f.as_expr().subs(old, new)
def exclude(f):
"""
Remove unnecessary generators from ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import a, b, c, d, x
>>> Poly(a + x, a, b, c, d, x).exclude()
Poly(a + x, a, x, domain='ZZ')
"""
J, new = f.rep.exclude()
gens = []
for j in range(len(f.gens)):
if j not in J:
gens.append(f.gens[j])
return f.per(new, gens=gens)
def replace(f, x, y=None):
"""
Replace ``x`` with ``y`` in generators list.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1, x).replace(x, y)
Poly(y**2 + 1, y, domain='ZZ')
"""
if y is None:
if f.is_univariate:
x, y = f.gen, x
else:
raise PolynomialError(
"syntax supported only in univariate case")
if x == y:
return f
if x in f.gens and y not in f.gens:
dom = f.get_domain()
if not dom.is_Composite or y not in dom.symbols:
gens = list(f.gens)
gens[gens.index(x)] = y
return f.per(f.rep, gens=gens)
raise PolynomialError("can't replace %s with %s in %s" % (x, y, f))
def reorder(f, *gens, **args):
"""
Efficiently apply new order of generators.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x*y**2, x, y).reorder(y, x)
Poly(y**2*x + x**2, y, x, domain='ZZ')
"""
opt = options.Options((), args)
if not gens:
gens = _sort_gens(f.gens, opt=opt)
elif set(f.gens) != set(gens):
raise PolynomialError(
"generators list can differ only up to order of elements")
rep = dict(list(zip(*_dict_reorder(f.rep.to_dict(), f.gens, gens))))
return f.per(DMP(rep, f.rep.dom, len(gens) - 1), gens=gens)
def ltrim(f, gen):
"""
Remove dummy generators from the "left" of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(y**2 + y*z**2, x, y, z).ltrim(y)
Poly(y**2 + y*z**2, y, z, domain='ZZ')
"""
rep = f.as_dict(native=True)
j = f._gen_to_level(gen)
terms = {}
for monom, coeff in rep.items():
monom = monom[j:]
if monom not in terms:
terms[monom] = coeff
else:
raise PolynomialError("can't left trim %s" % f)
gens = f.gens[j:]
return f.new(DMP.from_dict(terms, len(gens) - 1, f.rep.dom), *gens)
def has_only_gens(f, *gens):
"""
Return ``True`` if ``Poly(f, *gens)`` retains ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x*y + 1, x, y, z).has_only_gens(x, y)
True
>>> Poly(x*y + z, x, y, z).has_only_gens(x, y)
False
"""
indices = set([])
for gen in gens:
try:
index = f.gens.index(gen)
except ValueError:
raise GeneratorsError(
"%s doesn't have %s as generator" % (f, gen))
else:
indices.add(index)
for monom in f.monoms():
for i, elt in enumerate(monom):
if i not in indices and elt:
return False
return True
def to_ring(f):
"""
Make the ground domain a ring.
Examples
========
>>> from sympy import Poly, QQ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, domain=QQ).to_ring()
Poly(x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'to_ring'):
result = f.rep.to_ring()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_ring')
return f.per(result)
def to_field(f):
"""
Make the ground domain a field.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x, domain=ZZ).to_field()
Poly(x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'to_field'):
result = f.rep.to_field()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_field')
return f.per(result)
def to_exact(f):
"""
Make the ground domain exact.
Examples
========
>>> from sympy import Poly, RR
>>> from sympy.abc import x
>>> Poly(x**2 + 1.0, x, domain=RR).to_exact()
Poly(x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'to_exact'):
result = f.rep.to_exact()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_exact')
return f.per(result)
def retract(f, field=None):
"""
Recalculate the ground domain of a polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**2 + 1, x, domain='QQ[y]')
>>> f
Poly(x**2 + 1, x, domain='QQ[y]')
>>> f.retract()
Poly(x**2 + 1, x, domain='ZZ')
>>> f.retract(field=True)
Poly(x**2 + 1, x, domain='QQ')
"""
dom, rep = construct_domain(f.as_dict(zero=True),
field=field, composite=f.domain.is_Composite or None)
return f.from_dict(rep, f.gens, domain=dom)
def slice(f, x, m, n=None):
"""Take a continuous subsequence of terms of ``f``. """
if n is None:
j, m, n = 0, x, m
else:
j = f._gen_to_level(x)
m, n = int(m), int(n)
if hasattr(f.rep, 'slice'):
result = f.rep.slice(m, n, j)
else: # pragma: no cover
raise OperationNotSupported(f, 'slice')
return f.per(result)
def coeffs(f, order=None):
"""
Returns all non-zero coefficients from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x + 3, x).coeffs()
[1, 2, 3]
See Also
========
all_coeffs
coeff_monomial
nth
"""
return [f.rep.dom.to_sympy(c) for c in f.rep.coeffs(order=order)]
def monoms(f, order=None):
"""
Returns all non-zero monomials from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).monoms()
[(2, 0), (1, 2), (1, 1), (0, 1)]
See Also
========
all_monoms
"""
return f.rep.monoms(order=order)
def terms(f, order=None):
"""
Returns all non-zero terms from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).terms()
[((2, 0), 1), ((1, 2), 2), ((1, 1), 1), ((0, 1), 3)]
See Also
========
all_terms
"""
return [(m, f.rep.dom.to_sympy(c)) for m, c in f.rep.terms(order=order)]
def all_coeffs(f):
"""
Returns all coefficients from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_coeffs()
[1, 0, 2, -1]
"""
return [f.rep.dom.to_sympy(c) for c in f.rep.all_coeffs()]
def all_monoms(f):
"""
Returns all monomials from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_monoms()
[(3,), (2,), (1,), (0,)]
See Also
========
all_terms
"""
return f.rep.all_monoms()
def all_terms(f):
"""
Returns all terms from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_terms()
[((3,), 1), ((2,), 0), ((1,), 2), ((0,), -1)]
"""
return [(m, f.rep.dom.to_sympy(c)) for m, c in f.rep.all_terms()]
def termwise(f, func, *gens, **args):
"""
Apply a function to all terms of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> def func(k, coeff):
... k = k[0]
... return coeff//10**(2-k)
>>> Poly(x**2 + 20*x + 400).termwise(func)
Poly(x**2 + 2*x + 4, x, domain='ZZ')
"""
terms = {}
for monom, coeff in f.terms():
result = func(monom, coeff)
if isinstance(result, tuple):
monom, coeff = result
else:
coeff = result
if coeff:
if monom not in terms:
terms[monom] = coeff
else:
raise PolynomialError(
"%s monomial was generated twice" % monom)
return f.from_dict(terms, *(gens or f.gens), **args)
def length(f):
"""
Returns the number of non-zero terms in ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 2*x - 1).length()
3
"""
return len(f.as_dict())
def as_dict(f, native=False, zero=False):
"""
Switch to a ``dict`` representation.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 - y, x, y).as_dict()
{(0, 1): -1, (1, 2): 2, (2, 0): 1}
"""
if native:
return f.rep.to_dict(zero=zero)
else:
return f.rep.to_sympy_dict(zero=zero)
def as_list(f, native=False):
"""Switch to a ``list`` representation. """
if native:
return f.rep.to_list()
else:
return f.rep.to_sympy_list()
def as_expr(f, *gens):
"""
Convert a Poly instance to an Expr instance.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2 + 2*x*y**2 - y, x, y)
>>> f.as_expr()
x**2 + 2*x*y**2 - y
>>> f.as_expr({x: 5})
10*y**2 - y + 25
>>> f.as_expr(5, 6)
379
"""
if not gens:
gens = f.gens
elif len(gens) == 1 and isinstance(gens[0], dict):
mapping = gens[0]
gens = list(f.gens)
for gen, value in mapping.items():
try:
index = gens.index(gen)
except ValueError:
raise GeneratorsError(
"%s doesn't have %s as generator" % (f, gen))
else:
gens[index] = value
return basic_from_dict(f.rep.to_sympy_dict(), *gens)
def lift(f):
"""
Convert algebraic coefficients to rationals.
Examples
========
>>> from sympy import Poly, I
>>> from sympy.abc import x
>>> Poly(x**2 + I*x + 1, x, extension=I).lift()
Poly(x**4 + 3*x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'lift'):
result = f.rep.lift()
else: # pragma: no cover
raise OperationNotSupported(f, 'lift')
return f.per(result)
def deflate(f):
"""
Reduce degree of ``f`` by mapping ``x_i**m`` to ``y_i``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**6*y**2 + x**3 + 1, x, y).deflate()
((3, 2), Poly(x**2*y + x + 1, x, y, domain='ZZ'))
"""
if hasattr(f.rep, 'deflate'):
J, result = f.rep.deflate()
else: # pragma: no cover
raise OperationNotSupported(f, 'deflate')
return J, f.per(result)
def inject(f, front=False):
"""
Inject ground domain generators into ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2*y + x*y**3 + x*y + 1, x)
>>> f.inject()
Poly(x**2*y + x*y**3 + x*y + 1, x, y, domain='ZZ')
>>> f.inject(front=True)
Poly(y**3*x + y*x**2 + y*x + 1, y, x, domain='ZZ')
"""
dom = f.rep.dom
if dom.is_Numerical:
return f
elif not dom.is_Poly:
raise DomainError("can't inject generators over %s" % dom)
if hasattr(f.rep, 'inject'):
result = f.rep.inject(front=front)
else: # pragma: no cover
raise OperationNotSupported(f, 'inject')
if front:
gens = dom.symbols + f.gens
else:
gens = f.gens + dom.symbols
return f.new(result, *gens)
def eject(f, *gens):
"""
Eject selected generators into the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2*y + x*y**3 + x*y + 1, x, y)
>>> f.eject(x)
Poly(x*y**3 + (x**2 + x)*y + 1, y, domain='ZZ[x]')
>>> f.eject(y)
Poly(y*x**2 + (y**3 + y)*x + 1, x, domain='ZZ[y]')
"""
dom = f.rep.dom
if not dom.is_Numerical:
raise DomainError("can't eject generators over %s" % dom)
n, k = len(f.gens), len(gens)
if f.gens[:k] == gens:
_gens, front = f.gens[k:], True
elif f.gens[-k:] == gens:
_gens, front = f.gens[:-k], False
else:
raise NotImplementedError(
"can only eject front or back generators")
dom = dom.inject(*gens)
if hasattr(f.rep, 'eject'):
result = f.rep.eject(dom, front=front)
else: # pragma: no cover
raise OperationNotSupported(f, 'eject')
return f.new(result, *_gens)
def terms_gcd(f):
"""
Remove GCD of terms from the polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**6*y**2 + x**3*y, x, y).terms_gcd()
((3, 1), Poly(x**3*y + 1, x, y, domain='ZZ'))
"""
if hasattr(f.rep, 'terms_gcd'):
J, result = f.rep.terms_gcd()
else: # pragma: no cover
raise OperationNotSupported(f, 'terms_gcd')
return J, f.per(result)
def add_ground(f, coeff):
"""
Add an element of the ground domain to ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).add_ground(2)
Poly(x + 3, x, domain='ZZ')
"""
if hasattr(f.rep, 'add_ground'):
result = f.rep.add_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'add_ground')
return f.per(result)
def sub_ground(f, coeff):
"""
Subtract an element of the ground domain from ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).sub_ground(2)
Poly(x - 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'sub_ground'):
result = f.rep.sub_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'sub_ground')
return f.per(result)
def mul_ground(f, coeff):
"""
Multiply ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).mul_ground(2)
Poly(2*x + 2, x, domain='ZZ')
"""
if hasattr(f.rep, 'mul_ground'):
result = f.rep.mul_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'mul_ground')
return f.per(result)
def quo_ground(f, coeff):
"""
Quotient of ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x + 4).quo_ground(2)
Poly(x + 2, x, domain='ZZ')
>>> Poly(2*x + 3).quo_ground(2)
Poly(x + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'quo_ground'):
result = f.rep.quo_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'quo_ground')
return f.per(result)
def exquo_ground(f, coeff):
"""
Exact quotient of ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x + 4).exquo_ground(2)
Poly(x + 2, x, domain='ZZ')
>>> Poly(2*x + 3).exquo_ground(2)
Traceback (most recent call last):
...
ExactQuotientFailed: 2 does not divide 3 in ZZ
"""
if hasattr(f.rep, 'exquo_ground'):
result = f.rep.exquo_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'exquo_ground')
return f.per(result)
def abs(f):
"""
Make all coefficients in ``f`` positive.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).abs()
Poly(x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'abs'):
result = f.rep.abs()
else: # pragma: no cover
raise OperationNotSupported(f, 'abs')
return f.per(result)
def neg(f):
"""
Negate all coefficients in ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).neg()
Poly(-x**2 + 1, x, domain='ZZ')
>>> -Poly(x**2 - 1, x)
Poly(-x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'neg'):
result = f.rep.neg()
else: # pragma: no cover
raise OperationNotSupported(f, 'neg')
return f.per(result)
def add(f, g):
"""
Add two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).add(Poly(x - 2, x))
Poly(x**2 + x - 1, x, domain='ZZ')
>>> Poly(x**2 + 1, x) + Poly(x - 2, x)
Poly(x**2 + x - 1, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.add_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'add'):
result = F.add(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'add')
return per(result)
def sub(f, g):
"""
Subtract two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).sub(Poly(x - 2, x))
Poly(x**2 - x + 3, x, domain='ZZ')
>>> Poly(x**2 + 1, x) - Poly(x - 2, x)
Poly(x**2 - x + 3, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.sub_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'sub'):
result = F.sub(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'sub')
return per(result)
def mul(f, g):
"""
Multiply two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).mul(Poly(x - 2, x))
Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ')
>>> Poly(x**2 + 1, x)*Poly(x - 2, x)
Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.mul_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'mul'):
result = F.mul(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'mul')
return per(result)
def sqr(f):
"""
Square a polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x - 2, x).sqr()
Poly(x**2 - 4*x + 4, x, domain='ZZ')
>>> Poly(x - 2, x)**2
Poly(x**2 - 4*x + 4, x, domain='ZZ')
"""
if hasattr(f.rep, 'sqr'):
result = f.rep.sqr()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqr')
return f.per(result)
def pow(f, n):
"""
Raise ``f`` to a non-negative power ``n``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x - 2, x).pow(3)
Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ')
>>> Poly(x - 2, x)**3
Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ')
"""
n = int(n)
if hasattr(f.rep, 'pow'):
result = f.rep.pow(n)
else: # pragma: no cover
raise OperationNotSupported(f, 'pow')
return f.per(result)
def pdiv(f, g):
"""
Polynomial pseudo-division of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).pdiv(Poly(2*x - 4, x))
(Poly(2*x + 4, x, domain='ZZ'), Poly(20, x, domain='ZZ'))
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pdiv'):
q, r = F.pdiv(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'pdiv')
return per(q), per(r)
def prem(f, g):
"""
Polynomial pseudo-remainder of ``f`` by ``g``.
Caveat: The function prem(f, g, x) can be safely used to compute
in Z[x] _only_ subresultant polynomial remainder sequences (prs's).
To safely compute Euclidean and Sturmian prs's in Z[x]
employ anyone of the corresponding functions found in
the module sympy.polys.subresultants_qq_zz. The functions
in the module with suffix _pg compute prs's in Z[x] employing
rem(f, g, x), whereas the functions with suffix _amv
compute prs's in Z[x] employing rem_z(f, g, x).
The function rem_z(f, g, x) differs from prem(f, g, x) in that
to compute the remainder polynomials in Z[x] it premultiplies
the divident times the absolute value of the leading coefficient
of the divisor raised to the power degree(f, x) - degree(g, x) + 1.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).prem(Poly(2*x - 4, x))
Poly(20, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'prem'):
result = F.prem(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'prem')
return per(result)
def pquo(f, g):
"""
Polynomial pseudo-quotient of ``f`` by ``g``.
See the Caveat note in the function prem(f, g).
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).pquo(Poly(2*x - 4, x))
Poly(2*x + 4, x, domain='ZZ')
>>> Poly(x**2 - 1, x).pquo(Poly(2*x - 2, x))
Poly(2*x + 2, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pquo'):
result = F.pquo(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'pquo')
return per(result)
def pexquo(f, g):
"""
Polynomial exact pseudo-quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).pexquo(Poly(2*x - 2, x))
Poly(2*x + 2, x, domain='ZZ')
>>> Poly(x**2 + 1, x).pexquo(Poly(2*x - 4, x))
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pexquo'):
try:
result = F.pexquo(G)
except ExactQuotientFailed as exc:
raise exc.new(f.as_expr(), g.as_expr())
else: # pragma: no cover
raise OperationNotSupported(f, 'pexquo')
return per(result)
def div(f, g, auto=True):
"""
Polynomial division with remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x))
(Poly(1/2*x + 1, x, domain='QQ'), Poly(5, x, domain='QQ'))
>>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x), auto=False)
(Poly(0, x, domain='ZZ'), Poly(x**2 + 1, x, domain='ZZ'))
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'div'):
q, r = F.div(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'div')
if retract:
try:
Q, R = q.to_ring(), r.to_ring()
except CoercionFailed:
pass
else:
q, r = Q, R
return per(q), per(r)
def rem(f, g, auto=True):
"""
Computes the polynomial remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x))
Poly(5, x, domain='ZZ')
>>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x), auto=False)
Poly(x**2 + 1, x, domain='ZZ')
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'rem'):
r = F.rem(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'rem')
if retract:
try:
r = r.to_ring()
except CoercionFailed:
pass
return per(r)
def quo(f, g, auto=True):
"""
Computes polynomial quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).quo(Poly(2*x - 4, x))
Poly(1/2*x + 1, x, domain='QQ')
>>> Poly(x**2 - 1, x).quo(Poly(x - 1, x))
Poly(x + 1, x, domain='ZZ')
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'quo'):
q = F.quo(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'quo')
if retract:
try:
q = q.to_ring()
except CoercionFailed:
pass
return per(q)
def exquo(f, g, auto=True):
"""
Computes polynomial exact quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).exquo(Poly(x - 1, x))
Poly(x + 1, x, domain='ZZ')
>>> Poly(x**2 + 1, x).exquo(Poly(2*x - 4, x))
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'exquo'):
try:
q = F.exquo(G)
except ExactQuotientFailed as exc:
raise exc.new(f.as_expr(), g.as_expr())
else: # pragma: no cover
raise OperationNotSupported(f, 'exquo')
if retract:
try:
q = q.to_ring()
except CoercionFailed:
pass
return per(q)
def _gen_to_level(f, gen):
"""Returns level associated with the given generator. """
if isinstance(gen, int):
length = len(f.gens)
if -length <= gen < length:
if gen < 0:
return length + gen
else:
return gen
else:
raise PolynomialError("-%s <= gen < %s expected, got %s" %
(length, length, gen))
else:
try:
return f.gens.index(sympify(gen))
except ValueError:
raise PolynomialError(
"a valid generator expected, got %s" % gen)
def degree(f, gen=0):
"""
Returns degree of ``f`` in ``x_j``.
The degree of 0 is negative infinity.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).degree()
2
>>> Poly(x**2 + y*x + y, x, y).degree(y)
1
>>> Poly(0, x).degree()
-oo
"""
j = f._gen_to_level(gen)
if hasattr(f.rep, 'degree'):
return f.rep.degree(j)
else: # pragma: no cover
raise OperationNotSupported(f, 'degree')
def degree_list(f):
"""
Returns a list of degrees of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).degree_list()
(2, 1)
"""
if hasattr(f.rep, 'degree_list'):
return f.rep.degree_list()
else: # pragma: no cover
raise OperationNotSupported(f, 'degree_list')
def total_degree(f):
"""
Returns the total degree of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).total_degree()
2
>>> Poly(x + y**5, x, y).total_degree()
5
"""
if hasattr(f.rep, 'total_degree'):
return f.rep.total_degree()
else: # pragma: no cover
raise OperationNotSupported(f, 'total_degree')
def homogenize(f, s):
"""
Returns the homogeneous polynomial of ``f``.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. If you only
want to check if a polynomial is homogeneous, then use
:func:`Poly.is_homogeneous`. If you want not only to check if a
polynomial is homogeneous but also compute its homogeneous order,
then use :func:`Poly.homogeneous_order`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> f = Poly(x**5 + 2*x**2*y**2 + 9*x*y**3)
>>> f.homogenize(z)
Poly(x**5 + 2*x**2*y**2*z + 9*x*y**3*z, x, y, z, domain='ZZ')
"""
if not isinstance(s, Symbol):
raise TypeError("``Symbol`` expected, got %s" % type(s))
if s in f.gens:
i = f.gens.index(s)
gens = f.gens
else:
i = len(f.gens)
gens = f.gens + (s,)
if hasattr(f.rep, 'homogenize'):
return f.per(f.rep.homogenize(i), gens=gens)
raise OperationNotSupported(f, 'homogeneous_order')
def homogeneous_order(f):
"""
Returns the homogeneous order of ``f``.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. This degree is
the homogeneous order of ``f``. If you only want to check if a
polynomial is homogeneous, then use :func:`Poly.is_homogeneous`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**5 + 2*x**3*y**2 + 9*x*y**4)
>>> f.homogeneous_order()
5
"""
if hasattr(f.rep, 'homogeneous_order'):
return f.rep.homogeneous_order()
else: # pragma: no cover
raise OperationNotSupported(f, 'homogeneous_order')
def LC(f, order=None):
"""
Returns the leading coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(4*x**3 + 2*x**2 + 3*x, x).LC()
4
"""
if order is not None:
return f.coeffs(order)[0]
if hasattr(f.rep, 'LC'):
result = f.rep.LC()
else: # pragma: no cover
raise OperationNotSupported(f, 'LC')
return f.rep.dom.to_sympy(result)
def TC(f):
"""
Returns the trailing coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x**2 + 3*x, x).TC()
0
"""
if hasattr(f.rep, 'TC'):
result = f.rep.TC()
else: # pragma: no cover
raise OperationNotSupported(f, 'TC')
return f.rep.dom.to_sympy(result)
def EC(f, order=None):
"""
Returns the last non-zero coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x**2 + 3*x, x).EC()
3
"""
if hasattr(f.rep, 'coeffs'):
return f.coeffs(order)[-1]
else: # pragma: no cover
raise OperationNotSupported(f, 'EC')
def coeff_monomial(f, monom):
"""
Returns the coefficient of ``monom`` in ``f`` if there, else None.
Examples
========
>>> from sympy import Poly, exp
>>> from sympy.abc import x, y
>>> p = Poly(24*x*y*exp(8) + 23*x, x, y)
>>> p.coeff_monomial(x)
23
>>> p.coeff_monomial(y)
0
>>> p.coeff_monomial(x*y)
24*exp(8)
Note that ``Expr.coeff()`` behaves differently, collecting terms
if possible; the Poly must be converted to an Expr to use that
method, however:
>>> p.as_expr().coeff(x)
24*y*exp(8) + 23
>>> p.as_expr().coeff(y)
24*x*exp(8)
>>> p.as_expr().coeff(x*y)
24*exp(8)
See Also
========
nth: more efficient query using exponents of the monomial's generators
"""
return f.nth(*Monomial(monom, f.gens).exponents)
def nth(f, *N):
"""
Returns the ``n``-th coefficient of ``f`` where ``N`` are the
exponents of the generators in the term of interest.
Examples
========
>>> from sympy import Poly, sqrt
>>> from sympy.abc import x, y
>>> Poly(x**3 + 2*x**2 + 3*x, x).nth(2)
2
>>> Poly(x**3 + 2*x*y**2 + y**2, x, y).nth(1, 2)
2
>>> Poly(4*sqrt(x)*y)
Poly(4*y*(sqrt(x)), y, sqrt(x), domain='ZZ')
>>> _.nth(1, 1)
4
See Also
========
coeff_monomial
"""
if hasattr(f.rep, 'nth'):
if len(N) != len(f.gens):
raise ValueError('exponent of each generator must be specified')
result = f.rep.nth(*list(map(int, N)))
else: # pragma: no cover
raise OperationNotSupported(f, 'nth')
return f.rep.dom.to_sympy(result)
def coeff(f, x, n=1, right=False):
# the semantics of coeff_monomial and Expr.coeff are different;
# if someone is working with a Poly, they should be aware of the
# differences and chose the method best suited for the query.
# Alternatively, a pure-polys method could be written here but
# at this time the ``right`` keyword would be ignored because Poly
# doesn't work with non-commutatives.
raise NotImplementedError(
'Either convert to Expr with `as_expr` method '
'to use Expr\'s coeff method or else use the '
'`coeff_monomial` method of Polys.')
def LM(f, order=None):
"""
Returns the leading monomial of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LM()
x**2*y**0
"""
return Monomial(f.monoms(order)[0], f.gens)
def EM(f, order=None):
"""
Returns the last non-zero monomial of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).EM()
x**0*y**1
"""
return Monomial(f.monoms(order)[-1], f.gens)
def LT(f, order=None):
"""
Returns the leading term of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LT()
(x**2*y**0, 4)
"""
monom, coeff = f.terms(order)[0]
return Monomial(monom, f.gens), coeff
def ET(f, order=None):
"""
Returns the last non-zero term of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).ET()
(x**0*y**1, 3)
"""
monom, coeff = f.terms(order)[-1]
return Monomial(monom, f.gens), coeff
def max_norm(f):
"""
Returns maximum norm of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(-x**2 + 2*x - 3, x).max_norm()
3
"""
if hasattr(f.rep, 'max_norm'):
result = f.rep.max_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'max_norm')
return f.rep.dom.to_sympy(result)
def l1_norm(f):
"""
Returns l1 norm of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(-x**2 + 2*x - 3, x).l1_norm()
6
"""
if hasattr(f.rep, 'l1_norm'):
result = f.rep.l1_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'l1_norm')
return f.rep.dom.to_sympy(result)
def clear_denoms(self, convert=False):
"""
Clear denominators, but keep the ground domain.
Examples
========
>>> from sympy import Poly, S, QQ
>>> from sympy.abc import x
>>> f = Poly(x/2 + S(1)/3, x, domain=QQ)
>>> f.clear_denoms()
(6, Poly(3*x + 2, x, domain='QQ'))
>>> f.clear_denoms(convert=True)
(6, Poly(3*x + 2, x, domain='ZZ'))
"""
f = self
if not f.rep.dom.has_Field:
return S.One, f
dom = f.get_domain()
if dom.has_assoc_Ring:
dom = f.rep.dom.get_ring()
if hasattr(f.rep, 'clear_denoms'):
coeff, result = f.rep.clear_denoms()
else: # pragma: no cover
raise OperationNotSupported(f, 'clear_denoms')
coeff, f = dom.to_sympy(coeff), f.per(result)
if not convert or not dom.has_assoc_Ring:
return coeff, f
else:
return coeff, f.to_ring()
def rat_clear_denoms(self, g):
"""
Clear denominators in a rational function ``f/g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2/y + 1, x)
>>> g = Poly(x**3 + y, x)
>>> p, q = f.rat_clear_denoms(g)
>>> p
Poly(x**2 + y, x, domain='ZZ[y]')
>>> q
Poly(y*x**3 + y**2, x, domain='ZZ[y]')
"""
f = self
dom, per, f, g = f._unify(g)
f = per(f)
g = per(g)
if not (dom.has_Field and dom.has_assoc_Ring):
return f, g
a, f = f.clear_denoms(convert=True)
b, g = g.clear_denoms(convert=True)
f = f.mul_ground(b)
g = g.mul_ground(a)
return f, g
def integrate(self, *specs, **args):
"""
Computes indefinite integral of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x + 1, x).integrate()
Poly(1/3*x**3 + x**2 + x, x, domain='QQ')
>>> Poly(x*y**2 + x, x, y).integrate((0, 1), (1, 0))
Poly(1/2*x**2*y**2 + 1/2*x**2, x, y, domain='QQ')
"""
f = self
if args.get('auto', True) and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'integrate'):
if not specs:
return f.per(f.rep.integrate(m=1))
rep = f.rep
for spec in specs:
if type(spec) is tuple:
gen, m = spec
else:
gen, m = spec, 1
rep = rep.integrate(int(m), f._gen_to_level(gen))
return f.per(rep)
else: # pragma: no cover
raise OperationNotSupported(f, 'integrate')
def diff(f, *specs, **kwargs):
"""
Computes partial derivative of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x + 1, x).diff()
Poly(2*x + 2, x, domain='ZZ')
>>> Poly(x*y**2 + x, x, y).diff((0, 0), (1, 1))
Poly(2*x*y, x, y, domain='ZZ')
"""
if not kwargs.get('evaluate', True):
return Derivative(f, *specs, **kwargs)
if hasattr(f.rep, 'diff'):
if not specs:
return f.per(f.rep.diff(m=1))
rep = f.rep
for spec in specs:
if type(spec) is tuple:
gen, m = spec
else:
gen, m = spec, 1
rep = rep.diff(int(m), f._gen_to_level(gen))
return f.per(rep)
else: # pragma: no cover
raise OperationNotSupported(f, 'diff')
_eval_derivative = diff
_eval_diff = diff
def eval(self, x, a=None, auto=True):
"""
Evaluate ``f`` at ``a`` in the given variable.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x**2 + 2*x + 3, x).eval(2)
11
>>> Poly(2*x*y + 3*x + y + 2, x, y).eval(x, 2)
Poly(5*y + 8, y, domain='ZZ')
>>> f = Poly(2*x*y + 3*x + y + 2*z, x, y, z)
>>> f.eval({x: 2})
Poly(5*y + 2*z + 6, y, z, domain='ZZ')
>>> f.eval({x: 2, y: 5})
Poly(2*z + 31, z, domain='ZZ')
>>> f.eval({x: 2, y: 5, z: 7})
45
>>> f.eval((2, 5))
Poly(2*z + 31, z, domain='ZZ')
>>> f(2, 5)
Poly(2*z + 31, z, domain='ZZ')
"""
f = self
if a is None:
if isinstance(x, dict):
mapping = x
for gen, value in mapping.items():
f = f.eval(gen, value)
return f
elif isinstance(x, (tuple, list)):
values = x
if len(values) > len(f.gens):
raise ValueError("too many values provided")
for gen, value in zip(f.gens, values):
f = f.eval(gen, value)
return f
else:
j, a = 0, x
else:
j = f._gen_to_level(x)
if not hasattr(f.rep, 'eval'): # pragma: no cover
raise OperationNotSupported(f, 'eval')
try:
result = f.rep.eval(a, j)
except CoercionFailed:
if not auto:
raise DomainError("can't evaluate at %s in %s" % (a, f.rep.dom))
else:
a_domain, [a] = construct_domain([a])
new_domain = f.get_domain().unify_with_symbols(a_domain, f.gens)
f = f.set_domain(new_domain)
a = new_domain.convert(a, a_domain)
result = f.rep.eval(a, j)
return f.per(result, remove=j)
def __call__(f, *values):
"""
Evaluate ``f`` at the give values.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> f = Poly(2*x*y + 3*x + y + 2*z, x, y, z)
>>> f(2)
Poly(5*y + 2*z + 6, y, z, domain='ZZ')
>>> f(2, 5)
Poly(2*z + 31, z, domain='ZZ')
>>> f(2, 5, 7)
45
"""
return f.eval(values)
def half_gcdex(f, g, auto=True):
"""
Half extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> Poly(f).half_gcdex(Poly(g))
(Poly(-1/5*x + 3/5, x, domain='QQ'), Poly(x + 1, x, domain='QQ'))
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'half_gcdex'):
s, h = F.half_gcdex(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'half_gcdex')
return per(s), per(h)
def gcdex(f, g, auto=True):
"""
Extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> Poly(f).gcdex(Poly(g))
(Poly(-1/5*x + 3/5, x, domain='QQ'),
Poly(1/5*x**2 - 6/5*x + 2, x, domain='QQ'),
Poly(x + 1, x, domain='QQ'))
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'gcdex'):
s, t, h = F.gcdex(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'gcdex')
return per(s), per(t), per(h)
def invert(f, g, auto=True):
"""
Invert ``f`` modulo ``g`` when possible.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).invert(Poly(2*x - 1, x))
Poly(-4/3, x, domain='QQ')
>>> Poly(x**2 - 1, x).invert(Poly(x - 1, x))
Traceback (most recent call last):
...
NotInvertible: zero divisor
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'invert'):
result = F.invert(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'invert')
return per(result)
def revert(f, n):
"""Compute ``f**(-1)`` mod ``x**n``. """
if hasattr(f.rep, 'revert'):
result = f.rep.revert(int(n))
else: # pragma: no cover
raise OperationNotSupported(f, 'revert')
return f.per(result)
def subresultants(f, g):
"""
Computes the subresultant PRS of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).subresultants(Poly(x**2 - 1, x))
[Poly(x**2 + 1, x, domain='ZZ'),
Poly(x**2 - 1, x, domain='ZZ'),
Poly(-2, x, domain='ZZ')]
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'subresultants'):
result = F.subresultants(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'subresultants')
return list(map(per, result))
def resultant(f, g, includePRS=False):
"""
Computes the resultant of ``f`` and ``g`` via PRS.
If includePRS=True, it includes the subresultant PRS in the result.
Because the PRS is used to calculate the resultant, this is more
efficient than calling :func:`subresultants` separately.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**2 + 1, x)
>>> f.resultant(Poly(x**2 - 1, x))
4
>>> f.resultant(Poly(x**2 - 1, x), includePRS=True)
(4, [Poly(x**2 + 1, x, domain='ZZ'), Poly(x**2 - 1, x, domain='ZZ'),
Poly(-2, x, domain='ZZ')])
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'resultant'):
if includePRS:
result, R = F.resultant(G, includePRS=includePRS)
else:
result = F.resultant(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'resultant')
if includePRS:
return (per(result, remove=0), list(map(per, R)))
return per(result, remove=0)
def discriminant(f):
"""
Computes the discriminant of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 2*x + 3, x).discriminant()
-8
"""
if hasattr(f.rep, 'discriminant'):
result = f.rep.discriminant()
else: # pragma: no cover
raise OperationNotSupported(f, 'discriminant')
return f.per(result, remove=0)
def dispersionset(f, g=None):
r"""Compute the *dispersion set* of two polynomials.
For two polynomials `f(x)` and `g(x)` with `\deg f > 0`
and `\deg g > 0` the dispersion set `\operatorname{J}(f, g)` is defined as:
.. math::
\operatorname{J}(f, g)
& := \{a \in \mathbb{N}_0 | \gcd(f(x), g(x+a)) \neq 1\} \\
& = \{a \in \mathbb{N}_0 | \deg \gcd(f(x), g(x+a)) \geq 1\}
For a single polynomial one defines `\operatorname{J}(f) := \operatorname{J}(f, f)`.
Examples
========
>>> from sympy import poly
>>> from sympy.polys.dispersion import dispersion, dispersionset
>>> from sympy.abc import x
Dispersion set and dispersion of a simple polynomial:
>>> fp = poly((x - 3)*(x + 3), x)
>>> sorted(dispersionset(fp))
[0, 6]
>>> dispersion(fp)
6
Note that the definition of the dispersion is not symmetric:
>>> fp = poly(x**4 - 3*x**2 + 1, x)
>>> gp = fp.shift(-3)
>>> sorted(dispersionset(fp, gp))
[2, 3, 4]
>>> dispersion(fp, gp)
4
>>> sorted(dispersionset(gp, fp))
[]
>>> dispersion(gp, fp)
-oo
Computing the dispersion also works over field extensions:
>>> from sympy import sqrt
>>> fp = poly(x**2 + sqrt(5)*x - 1, x, domain='QQ<sqrt(5)>')
>>> gp = poly(x**2 + (2 + sqrt(5))*x + sqrt(5), x, domain='QQ<sqrt(5)>')
>>> sorted(dispersionset(fp, gp))
[2]
>>> sorted(dispersionset(gp, fp))
[1, 4]
We can even perform the computations for polynomials
having symbolic coefficients:
>>> from sympy.abc import a
>>> fp = poly(4*x**4 + (4*a + 8)*x**3 + (a**2 + 6*a + 4)*x**2 + (a**2 + 2*a)*x, x)
>>> sorted(dispersionset(fp))
[0, 1]
See Also
========
dispersion
References
==========
1. [ManWright94]_
2. [Koepf98]_
3. [Abramov71]_
4. [Man93]_
"""
from sympy.polys.dispersion import dispersionset
return dispersionset(f, g)
def dispersion(f, g=None):
r"""Compute the *dispersion* of polynomials.
For two polynomials `f(x)` and `g(x)` with `\deg f > 0`
and `\deg g > 0` the dispersion `\operatorname{dis}(f, g)` is defined as:
.. math::
\operatorname{dis}(f, g)
& := \max\{ J(f,g) \cup \{0\} \} \\
& = \max\{ \{a \in \mathbb{N} | \gcd(f(x), g(x+a)) \neq 1\} \cup \{0\} \}
and for a single polynomial `\operatorname{dis}(f) := \operatorname{dis}(f, f)`.
Examples
========
>>> from sympy import poly
>>> from sympy.polys.dispersion import dispersion, dispersionset
>>> from sympy.abc import x
Dispersion set and dispersion of a simple polynomial:
>>> fp = poly((x - 3)*(x + 3), x)
>>> sorted(dispersionset(fp))
[0, 6]
>>> dispersion(fp)
6
Note that the definition of the dispersion is not symmetric:
>>> fp = poly(x**4 - 3*x**2 + 1, x)
>>> gp = fp.shift(-3)
>>> sorted(dispersionset(fp, gp))
[2, 3, 4]
>>> dispersion(fp, gp)
4
>>> sorted(dispersionset(gp, fp))
[]
>>> dispersion(gp, fp)
-oo
Computing the dispersion also works over field extensions:
>>> from sympy import sqrt
>>> fp = poly(x**2 + sqrt(5)*x - 1, x, domain='QQ<sqrt(5)>')
>>> gp = poly(x**2 + (2 + sqrt(5))*x + sqrt(5), x, domain='QQ<sqrt(5)>')
>>> sorted(dispersionset(fp, gp))
[2]
>>> sorted(dispersionset(gp, fp))
[1, 4]
We can even perform the computations for polynomials
having symbolic coefficients:
>>> from sympy.abc import a
>>> fp = poly(4*x**4 + (4*a + 8)*x**3 + (a**2 + 6*a + 4)*x**2 + (a**2 + 2*a)*x, x)
>>> sorted(dispersionset(fp))
[0, 1]
See Also
========
dispersionset
References
==========
1. [ManWright94]_
2. [Koepf98]_
3. [Abramov71]_
4. [Man93]_
"""
from sympy.polys.dispersion import dispersion
return dispersion(f, g)
def cofactors(f, g):
"""
Returns the GCD of ``f`` and ``g`` and their cofactors.
Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and
``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors
of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).cofactors(Poly(x**2 - 3*x + 2, x))
(Poly(x - 1, x, domain='ZZ'),
Poly(x + 1, x, domain='ZZ'),
Poly(x - 2, x, domain='ZZ'))
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'cofactors'):
h, cff, cfg = F.cofactors(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'cofactors')
return per(h), per(cff), per(cfg)
def gcd(f, g):
"""
Returns the polynomial GCD of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).gcd(Poly(x**2 - 3*x + 2, x))
Poly(x - 1, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'gcd'):
result = F.gcd(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'gcd')
return per(result)
def lcm(f, g):
"""
Returns polynomial LCM of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).lcm(Poly(x**2 - 3*x + 2, x))
Poly(x**3 - 2*x**2 - x + 2, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'lcm'):
result = F.lcm(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'lcm')
return per(result)
def trunc(f, p):
"""
Reduce ``f`` modulo a constant ``p``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 + 3*x**2 + 5*x + 7, x).trunc(3)
Poly(-x**3 - x + 1, x, domain='ZZ')
"""
p = f.rep.dom.convert(p)
if hasattr(f.rep, 'trunc'):
result = f.rep.trunc(p)
else: # pragma: no cover
raise OperationNotSupported(f, 'trunc')
return f.per(result)
def monic(self, auto=True):
"""
Divides all coefficients by ``LC(f)``.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x
>>> Poly(3*x**2 + 6*x + 9, x, domain=ZZ).monic()
Poly(x**2 + 2*x + 3, x, domain='QQ')
>>> Poly(3*x**2 + 4*x + 2, x, domain=ZZ).monic()
Poly(x**2 + 4/3*x + 2/3, x, domain='QQ')
"""
f = self
if auto and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'monic'):
result = f.rep.monic()
else: # pragma: no cover
raise OperationNotSupported(f, 'monic')
return f.per(result)
def content(f):
"""
Returns the GCD of polynomial coefficients.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(6*x**2 + 8*x + 12, x).content()
2
"""
if hasattr(f.rep, 'content'):
result = f.rep.content()
else: # pragma: no cover
raise OperationNotSupported(f, 'content')
return f.rep.dom.to_sympy(result)
def primitive(f):
"""
Returns the content and a primitive form of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 + 8*x + 12, x).primitive()
(2, Poly(x**2 + 4*x + 6, x, domain='ZZ'))
"""
if hasattr(f.rep, 'primitive'):
cont, result = f.rep.primitive()
else: # pragma: no cover
raise OperationNotSupported(f, 'primitive')
return f.rep.dom.to_sympy(cont), f.per(result)
def compose(f, g):
"""
Computes the functional composition of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + x, x).compose(Poly(x - 1, x))
Poly(x**2 - x, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'compose'):
result = F.compose(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'compose')
return per(result)
def decompose(f):
"""
Computes a functional decomposition of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**4 + 2*x**3 - x - 1, x, domain='ZZ').decompose()
[Poly(x**2 - x - 1, x, domain='ZZ'), Poly(x**2 + x, x, domain='ZZ')]
"""
if hasattr(f.rep, 'decompose'):
result = f.rep.decompose()
else: # pragma: no cover
raise OperationNotSupported(f, 'decompose')
return list(map(f.per, result))
def shift(f, a):
"""
Efficiently compute Taylor shift ``f(x + a)``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).shift(2)
Poly(x**2 + 2*x + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'shift'):
result = f.rep.shift(a)
else: # pragma: no cover
raise OperationNotSupported(f, 'shift')
return f.per(result)
def sturm(self, auto=True):
"""
Computes the Sturm sequence of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 - 2*x**2 + x - 3, x).sturm()
[Poly(x**3 - 2*x**2 + x - 3, x, domain='QQ'),
Poly(3*x**2 - 4*x + 1, x, domain='QQ'),
Poly(2/9*x + 25/9, x, domain='QQ'),
Poly(-2079/4, x, domain='QQ')]
"""
f = self
if auto and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'sturm'):
result = f.rep.sturm()
else: # pragma: no cover
raise OperationNotSupported(f, 'sturm')
return list(map(f.per, result))
def gff_list(f):
"""
Computes greatest factorial factorization of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**5 + 2*x**4 - x**3 - 2*x**2
>>> Poly(f).gff_list()
[(Poly(x, x, domain='ZZ'), 1), (Poly(x + 2, x, domain='ZZ'), 4)]
"""
if hasattr(f.rep, 'gff_list'):
result = f.rep.gff_list()
else: # pragma: no cover
raise OperationNotSupported(f, 'gff_list')
return [(f.per(g), k) for g, k in result]
def sqf_norm(f):
"""
Computes square-free norm of ``f``.
Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and
``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``,
where ``a`` is the algebraic extension of the ground domain.
Examples
========
>>> from sympy import Poly, sqrt
>>> from sympy.abc import x
>>> s, f, r = Poly(x**2 + 1, x, extension=[sqrt(3)]).sqf_norm()
>>> s
1
>>> f
Poly(x**2 - 2*sqrt(3)*x + 4, x, domain='QQ<sqrt(3)>')
>>> r
Poly(x**4 - 4*x**2 + 16, x, domain='QQ')
"""
if hasattr(f.rep, 'sqf_norm'):
s, g, r = f.rep.sqf_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_norm')
return s, f.per(g), f.per(r)
def sqf_part(f):
"""
Computes square-free part of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 - 3*x - 2, x).sqf_part()
Poly(x**2 - x - 2, x, domain='ZZ')
"""
if hasattr(f.rep, 'sqf_part'):
result = f.rep.sqf_part()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_part')
return f.per(result)
def sqf_list(f, all=False):
"""
Returns a list of square-free factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = 2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16
>>> Poly(f).sqf_list()
(2, [(Poly(x + 1, x, domain='ZZ'), 2),
(Poly(x + 2, x, domain='ZZ'), 3)])
>>> Poly(f).sqf_list(all=True)
(2, [(Poly(1, x, domain='ZZ'), 1),
(Poly(x + 1, x, domain='ZZ'), 2),
(Poly(x + 2, x, domain='ZZ'), 3)])
"""
if hasattr(f.rep, 'sqf_list'):
coeff, factors = f.rep.sqf_list(all)
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_list')
return f.rep.dom.to_sympy(coeff), [(f.per(g), k) for g, k in factors]
def sqf_list_include(f, all=False):
"""
Returns a list of square-free factors of ``f``.
Examples
========
>>> from sympy import Poly, expand
>>> from sympy.abc import x
>>> f = expand(2*(x + 1)**3*x**4)
>>> f
2*x**7 + 6*x**6 + 6*x**5 + 2*x**4
>>> Poly(f).sqf_list_include()
[(Poly(2, x, domain='ZZ'), 1),
(Poly(x + 1, x, domain='ZZ'), 3),
(Poly(x, x, domain='ZZ'), 4)]
>>> Poly(f).sqf_list_include(all=True)
[(Poly(2, x, domain='ZZ'), 1),
(Poly(1, x, domain='ZZ'), 2),
(Poly(x + 1, x, domain='ZZ'), 3),
(Poly(x, x, domain='ZZ'), 4)]
"""
if hasattr(f.rep, 'sqf_list_include'):
factors = f.rep.sqf_list_include(all)
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_list_include')
return [(f.per(g), k) for g, k in factors]
def factor_list(f):
"""
Returns a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y
>>> Poly(f).factor_list()
(2, [(Poly(x + y, x, y, domain='ZZ'), 1),
(Poly(x**2 + 1, x, y, domain='ZZ'), 2)])
"""
if hasattr(f.rep, 'factor_list'):
try:
coeff, factors = f.rep.factor_list()
except DomainError:
return S.One, [(f, 1)]
else: # pragma: no cover
raise OperationNotSupported(f, 'factor_list')
return f.rep.dom.to_sympy(coeff), [(f.per(g), k) for g, k in factors]
def factor_list_include(f):
"""
Returns a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y
>>> Poly(f).factor_list_include()
[(Poly(2*x + 2*y, x, y, domain='ZZ'), 1),
(Poly(x**2 + 1, x, y, domain='ZZ'), 2)]
"""
if hasattr(f.rep, 'factor_list_include'):
try:
factors = f.rep.factor_list_include()
except DomainError:
return [(f, 1)]
else: # pragma: no cover
raise OperationNotSupported(f, 'factor_list_include')
return [(f.per(g), k) for g, k in factors]
def intervals(f, all=False, eps=None, inf=None, sup=None, fast=False, sqf=False):
"""
Compute isolating intervals for roots of ``f``.
For real roots the Vincent-Akritas-Strzebonski (VAS) continued fractions method is used.
References:
===========
1. Alkiviadis G. Akritas and Adam W. Strzebonski: A Comparative Study of Two Real Root
Isolation Methods . Nonlinear Analysis: Modelling and Control, Vol. 10, No. 4, 297-304, 2005.
2. Alkiviadis G. Akritas, Adam W. Strzebonski and Panagiotis S. Vigklas: Improving the
Performance of the Continued Fractions Method Using new Bounds of Positive Roots. Nonlinear
Analysis: Modelling and Control, Vol. 13, No. 3, 265-279, 2008.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3, x).intervals()
[((-2, -1), 1), ((1, 2), 1)]
>>> Poly(x**2 - 3, x).intervals(eps=1e-2)
[((-26/15, -19/11), 1), ((19/11, 26/15), 1)]
"""
if eps is not None:
eps = QQ.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if inf is not None:
inf = QQ.convert(inf)
if sup is not None:
sup = QQ.convert(sup)
if hasattr(f.rep, 'intervals'):
result = f.rep.intervals(
all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
else: # pragma: no cover
raise OperationNotSupported(f, 'intervals')
if sqf:
def _real(interval):
s, t = interval
return (QQ.to_sympy(s), QQ.to_sympy(t))
if not all:
return list(map(_real, result))
def _complex(rectangle):
(u, v), (s, t) = rectangle
return (QQ.to_sympy(u) + I*QQ.to_sympy(v),
QQ.to_sympy(s) + I*QQ.to_sympy(t))
real_part, complex_part = result
return list(map(_real, real_part)), list(map(_complex, complex_part))
else:
def _real(interval):
(s, t), k = interval
return ((QQ.to_sympy(s), QQ.to_sympy(t)), k)
if not all:
return list(map(_real, result))
def _complex(rectangle):
((u, v), (s, t)), k = rectangle
return ((QQ.to_sympy(u) + I*QQ.to_sympy(v),
QQ.to_sympy(s) + I*QQ.to_sympy(t)), k)
real_part, complex_part = result
return list(map(_real, real_part)), list(map(_complex, complex_part))
def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False):
"""
Refine an isolating interval of a root to the given precision.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3, x).refine_root(1, 2, eps=1e-2)
(19/11, 26/15)
"""
if check_sqf and not f.is_sqf:
raise PolynomialError("only square-free polynomials supported")
s, t = QQ.convert(s), QQ.convert(t)
if eps is not None:
eps = QQ.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if steps is not None:
steps = int(steps)
elif eps is None:
steps = 1
if hasattr(f.rep, 'refine_root'):
S, T = f.rep.refine_root(s, t, eps=eps, steps=steps, fast=fast)
else: # pragma: no cover
raise OperationNotSupported(f, 'refine_root')
return QQ.to_sympy(S), QQ.to_sympy(T)
def count_roots(f, inf=None, sup=None):
"""
Return the number of roots of ``f`` in ``[inf, sup]`` interval.
Examples
========
>>> from sympy import Poly, I
>>> from sympy.abc import x
>>> Poly(x**4 - 4, x).count_roots(-3, 3)
2
>>> Poly(x**4 - 4, x).count_roots(0, 1 + 3*I)
1
"""
inf_real, sup_real = True, True
if inf is not None:
inf = sympify(inf)
if inf is S.NegativeInfinity:
inf = None
else:
re, im = inf.as_real_imag()
if not im:
inf = QQ.convert(inf)
else:
inf, inf_real = list(map(QQ.convert, (re, im))), False
if sup is not None:
sup = sympify(sup)
if sup is S.Infinity:
sup = None
else:
re, im = sup.as_real_imag()
if not im:
sup = QQ.convert(sup)
else:
sup, sup_real = list(map(QQ.convert, (re, im))), False
if inf_real and sup_real:
if hasattr(f.rep, 'count_real_roots'):
count = f.rep.count_real_roots(inf=inf, sup=sup)
else: # pragma: no cover
raise OperationNotSupported(f, 'count_real_roots')
else:
if inf_real and inf is not None:
inf = (inf, QQ.zero)
if sup_real and sup is not None:
sup = (sup, QQ.zero)
if hasattr(f.rep, 'count_complex_roots'):
count = f.rep.count_complex_roots(inf=inf, sup=sup)
else: # pragma: no cover
raise OperationNotSupported(f, 'count_complex_roots')
return Integer(count)
def root(f, index, radicals=True):
"""
Get an indexed root of a polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(2*x**3 - 7*x**2 + 4*x + 4)
>>> f.root(0)
-1/2
>>> f.root(1)
2
>>> f.root(2)
2
>>> f.root(3)
Traceback (most recent call last):
...
IndexError: root index out of [-3, 2] range, got 3
>>> Poly(x**5 + x + 1).root(0)
CRootOf(x**3 - x**2 + 1, 0)
"""
return sympy.polys.rootoftools.rootof(f, index, radicals=radicals)
def real_roots(f, multiple=True, radicals=True):
"""
Return a list of real roots with multiplicities.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 - 7*x**2 + 4*x + 4).real_roots()
[-1/2, 2, 2]
>>> Poly(x**3 + x + 1).real_roots()
[CRootOf(x**3 + x + 1, 0)]
"""
reals = sympy.polys.rootoftools.CRootOf.real_roots(f, radicals=radicals)
if multiple:
return reals
else:
return group(reals, multiple=False)
def all_roots(f, multiple=True, radicals=True):
"""
Return a list of real and complex roots with multiplicities.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 - 7*x**2 + 4*x + 4).all_roots()
[-1/2, 2, 2]
>>> Poly(x**3 + x + 1).all_roots()
[CRootOf(x**3 + x + 1, 0),
CRootOf(x**3 + x + 1, 1),
CRootOf(x**3 + x + 1, 2)]
"""
roots = sympy.polys.rootoftools.CRootOf.all_roots(f, radicals=radicals)
if multiple:
return roots
else:
return group(roots, multiple=False)
def nroots(f, n=15, maxsteps=50, cleanup=True):
"""
Compute numerical approximations of roots of ``f``.
Parameters
==========
n ... the number of digits to calculate
maxsteps ... the maximum number of iterations to do
If the accuracy `n` cannot be reached in `maxsteps`, it will raise an
exception. You need to rerun with higher maxsteps.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3).nroots(n=15)
[-1.73205080756888, 1.73205080756888]
>>> Poly(x**2 - 3).nroots(n=30)
[-1.73205080756887729352744634151, 1.73205080756887729352744634151]
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"can't compute numerical roots of %s" % f)
if f.degree() <= 0:
return []
# For integer and rational coefficients, convert them to integers only
# (for accuracy). Otherwise just try to convert the coefficients to
# mpmath.mpc and raise an exception if the conversion fails.
if f.rep.dom is ZZ:
coeffs = [int(coeff) for coeff in f.all_coeffs()]
elif f.rep.dom is QQ:
denoms = [coeff.q for coeff in f.all_coeffs()]
from sympy.core.numbers import ilcm
fac = ilcm(*denoms)
coeffs = [int(coeff*fac) for coeff in f.all_coeffs()]
else:
coeffs = [coeff.evalf(n=n).as_real_imag()
for coeff in f.all_coeffs()]
try:
coeffs = [mpmath.mpc(*coeff) for coeff in coeffs]
except TypeError:
raise DomainError("Numerical domain expected, got %s" % \
f.rep.dom)
dps = mpmath.mp.dps
mpmath.mp.dps = n
try:
# We need to add extra precision to guard against losing accuracy.
# 10 times the degree of the polynomial seems to work well.
roots = mpmath.polyroots(coeffs, maxsteps=maxsteps,
cleanup=cleanup, error=False, extraprec=f.degree()*10)
# Mpmath puts real roots first, then complex ones (as does all_roots)
# so we make sure this convention holds here, too.
roots = list(map(sympify,
sorted(roots, key=lambda r: (1 if r.imag else 0, r.real, r.imag))))
except NoConvergence:
raise NoConvergence(
'convergence to root failed; try n < %s or maxsteps > %s' % (
n, maxsteps))
finally:
mpmath.mp.dps = dps
return roots
def ground_roots(f):
"""
Compute roots of ``f`` by factorization in the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**6 - 4*x**4 + 4*x**3 - x**2).ground_roots()
{0: 2, 1: 2}
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"can't compute ground roots of %s" % f)
roots = {}
for factor, k in f.factor_list()[1]:
if factor.is_linear:
a, b = factor.all_coeffs()
roots[-b/a] = k
return roots
def nth_power_roots_poly(f, n):
"""
Construct a polynomial with n-th powers of roots of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**4 - x**2 + 1)
>>> f.nth_power_roots_poly(2)
Poly(x**4 - 2*x**3 + 3*x**2 - 2*x + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(3)
Poly(x**4 + 2*x**2 + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(4)
Poly(x**4 + 2*x**3 + 3*x**2 + 2*x + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(12)
Poly(x**4 - 4*x**3 + 6*x**2 - 4*x + 1, x, domain='ZZ')
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"must be a univariate polynomial")
N = sympify(n)
if N.is_Integer and N >= 1:
n = int(N)
else:
raise ValueError("'n' must an integer and n >= 1, got %s" % n)
x = f.gen
t = Dummy('t')
r = f.resultant(f.__class__.from_expr(x**n - t, x, t))
return r.replace(t, x)
def cancel(f, g, include=False):
"""
Cancel common factors in a rational function ``f/g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x))
(1, Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ'))
>>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x), include=True)
(Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ'))
"""
dom, per, F, G = f._unify(g)
if hasattr(F, 'cancel'):
result = F.cancel(G, include=include)
else: # pragma: no cover
raise OperationNotSupported(f, 'cancel')
if not include:
if dom.has_assoc_Ring:
dom = dom.get_ring()
cp, cq, p, q = result
cp = dom.to_sympy(cp)
cq = dom.to_sympy(cq)
return cp/cq, per(p), per(q)
else:
return tuple(map(per, result))
@property
def is_zero(f):
"""
Returns ``True`` if ``f`` is a zero polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(0, x).is_zero
True
>>> Poly(1, x).is_zero
False
"""
return f.rep.is_zero
@property
def is_one(f):
"""
Returns ``True`` if ``f`` is a unit polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(0, x).is_one
False
>>> Poly(1, x).is_one
True
"""
return f.rep.is_one
@property
def is_sqf(f):
"""
Returns ``True`` if ``f`` is a square-free polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).is_sqf
False
>>> Poly(x**2 - 1, x).is_sqf
True
"""
return f.rep.is_sqf
@property
def is_monic(f):
"""
Returns ``True`` if the leading coefficient of ``f`` is one.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 2, x).is_monic
True
>>> Poly(2*x + 2, x).is_monic
False
"""
return f.rep.is_monic
@property
def is_primitive(f):
"""
Returns ``True`` if GCD of the coefficients of ``f`` is one.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 + 6*x + 12, x).is_primitive
False
>>> Poly(x**2 + 3*x + 6, x).is_primitive
True
"""
return f.rep.is_primitive
@property
def is_ground(f):
"""
Returns ``True`` if ``f`` is an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x, x).is_ground
False
>>> Poly(2, x).is_ground
True
>>> Poly(y, x).is_ground
True
"""
return f.rep.is_ground
@property
def is_linear(f):
"""
Returns ``True`` if ``f`` is linear in all its variables.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x + y + 2, x, y).is_linear
True
>>> Poly(x*y + 2, x, y).is_linear
False
"""
return f.rep.is_linear
@property
def is_quadratic(f):
"""
Returns ``True`` if ``f`` is quadratic in all its variables.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x*y + 2, x, y).is_quadratic
True
>>> Poly(x*y**2 + 2, x, y).is_quadratic
False
"""
return f.rep.is_quadratic
@property
def is_monomial(f):
"""
Returns ``True`` if ``f`` is zero or has only one term.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(3*x**2, x).is_monomial
True
>>> Poly(3*x**2 + 1, x).is_monomial
False
"""
return f.rep.is_monomial
@property
def is_homogeneous(f):
"""
Returns ``True`` if ``f`` is a homogeneous polynomial.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. If you want not
only to check if a polynomial is homogeneous but also compute its
homogeneous order, then use :func:`Poly.homogeneous_order`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x*y, x, y).is_homogeneous
True
>>> Poly(x**3 + x*y, x, y).is_homogeneous
False
"""
return f.rep.is_homogeneous
@property
def is_irreducible(f):
"""
Returns ``True`` if ``f`` has no factors over its domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + x + 1, x, modulus=2).is_irreducible
True
>>> Poly(x**2 + 1, x, modulus=2).is_irreducible
False
"""
return f.rep.is_irreducible
@property
def is_univariate(f):
"""
Returns ``True`` if ``f`` is a univariate polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x + 1, x).is_univariate
True
>>> Poly(x*y**2 + x*y + 1, x, y).is_univariate
False
>>> Poly(x*y**2 + x*y + 1, x).is_univariate
True
>>> Poly(x**2 + x + 1, x, y).is_univariate
False
"""
return len(f.gens) == 1
@property
def is_multivariate(f):
"""
Returns ``True`` if ``f`` is a multivariate polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x + 1, x).is_multivariate
False
>>> Poly(x*y**2 + x*y + 1, x, y).is_multivariate
True
>>> Poly(x*y**2 + x*y + 1, x).is_multivariate
False
>>> Poly(x**2 + x + 1, x, y).is_multivariate
True
"""
return len(f.gens) != 1
@property
def is_cyclotomic(f):
"""
Returns ``True`` if ``f`` is a cyclotomic polnomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1
>>> Poly(f).is_cyclotomic
False
>>> g = x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1
>>> Poly(g).is_cyclotomic
True
"""
return f.rep.is_cyclotomic
def __abs__(f):
return f.abs()
def __neg__(f):
return f.neg()
@_sympifyit('g', NotImplemented)
def __add__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr() + g
return f.add(g)
@_sympifyit('g', NotImplemented)
def __radd__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g + f.as_expr()
return g.add(f)
@_sympifyit('g', NotImplemented)
def __sub__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr() - g
return f.sub(g)
@_sympifyit('g', NotImplemented)
def __rsub__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g - f.as_expr()
return g.sub(f)
@_sympifyit('g', NotImplemented)
def __mul__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr()*g
return f.mul(g)
@_sympifyit('g', NotImplemented)
def __rmul__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g*f.as_expr()
return g.mul(f)
@_sympifyit('n', NotImplemented)
def __pow__(f, n):
if n.is_Integer and n >= 0:
return f.pow(n)
else:
return f.as_expr()**n
@_sympifyit('g', NotImplemented)
def __divmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.div(g)
@_sympifyit('g', NotImplemented)
def __rdivmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.div(f)
@_sympifyit('g', NotImplemented)
def __mod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.rem(g)
@_sympifyit('g', NotImplemented)
def __rmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.rem(f)
@_sympifyit('g', NotImplemented)
def __floordiv__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.quo(g)
@_sympifyit('g', NotImplemented)
def __rfloordiv__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.quo(f)
@_sympifyit('g', NotImplemented)
def __div__(f, g):
return f.as_expr()/g.as_expr()
@_sympifyit('g', NotImplemented)
def __rdiv__(f, g):
return g.as_expr()/f.as_expr()
__truediv__ = __div__
__rtruediv__ = __rdiv__
@_sympifyit('other', NotImplemented)
def __eq__(self, other):
f, g = self, other
if not g.is_Poly:
try:
g = f.__class__(g, f.gens, domain=f.get_domain())
except (PolynomialError, DomainError, CoercionFailed):
return False
if f.gens != g.gens:
return False
if f.rep.dom != g.rep.dom:
try:
dom = f.rep.dom.unify(g.rep.dom, f.gens)
except UnificationFailed:
return False
f = f.set_domain(dom)
g = g.set_domain(dom)
return f.rep == g.rep
@_sympifyit('g', NotImplemented)
def __ne__(f, g):
return not f.__eq__(g)
def __nonzero__(f):
return not f.is_zero
__bool__ = __nonzero__
def eq(f, g, strict=False):
if not strict:
return f.__eq__(g)
else:
return f._strict_eq(sympify(g))
def ne(f, g, strict=False):
return not f.eq(g, strict=strict)
def _strict_eq(f, g):
return isinstance(g, f.__class__) and f.gens == g.gens and f.rep.eq(g.rep, strict=True)
@public
class PurePoly(Poly):
"""Class for representing pure polynomials. """
def _hashable_content(self):
"""Allow SymPy to hash Poly instances. """
return (self.rep,)
def __hash__(self):
return super(PurePoly, self).__hash__()
@property
def free_symbols(self):
"""
Free symbols of a polynomial.
Examples
========
>>> from sympy import PurePoly
>>> from sympy.abc import x, y
>>> PurePoly(x**2 + 1).free_symbols
set()
>>> PurePoly(x**2 + y).free_symbols
set()
>>> PurePoly(x**2 + y, x).free_symbols
set([y])
"""
return self.free_symbols_in_domain
@_sympifyit('other', NotImplemented)
def __eq__(self, other):
f, g = self, other
if not g.is_Poly:
try:
g = f.__class__(g, f.gens, domain=f.get_domain())
except (PolynomialError, DomainError, CoercionFailed):
return False
if len(f.gens) != len(g.gens):
return False
if f.rep.dom != g.rep.dom:
try:
dom = f.rep.dom.unify(g.rep.dom, f.gens)
except UnificationFailed:
return False
f = f.set_domain(dom)
g = g.set_domain(dom)
return f.rep == g.rep
def _strict_eq(f, g):
return isinstance(g, f.__class__) and f.rep.eq(g.rep, strict=True)
def _unify(f, g):
g = sympify(g)
if not g.is_Poly:
try:
return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g))
except CoercionFailed:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if len(f.gens) != len(g.gens):
raise UnificationFailed("can't unify %s with %s" % (f, g))
if not (isinstance(f.rep, DMP) and isinstance(g.rep, DMP)):
raise UnificationFailed("can't unify %s with %s" % (f, g))
cls = f.__class__
gens = f.gens
dom = f.rep.dom.unify(g.rep.dom, gens)
F = f.rep.convert(dom)
G = g.rep.convert(dom)
def per(rep, dom=dom, gens=gens, remove=None):
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return dom.to_sympy(rep)
return cls.new(rep, *gens)
return dom, per, F, G
@public
def poly_from_expr(expr, *gens, **args):
"""Construct a polynomial from an expression. """
opt = options.build_options(gens, args)
return _poly_from_expr(expr, opt)
def _poly_from_expr(expr, opt):
"""Construct a polynomial from an expression. """
orig, expr = expr, sympify(expr)
if not isinstance(expr, Basic):
raise PolificationFailed(opt, orig, expr)
elif expr.is_Poly:
poly = expr.__class__._from_poly(expr, opt)
opt.gens = poly.gens
opt.domain = poly.domain
if opt.polys is None:
opt.polys = True
return poly, opt
elif opt.expand:
expr = expr.expand()
rep, opt = _dict_from_expr(expr, opt)
if not opt.gens:
raise PolificationFailed(opt, orig, expr)
monoms, coeffs = list(zip(*list(rep.items())))
domain = opt.domain
if domain is None:
opt.domain, coeffs = construct_domain(coeffs, opt=opt)
else:
coeffs = list(map(domain.from_sympy, coeffs))
rep = dict(list(zip(monoms, coeffs)))
poly = Poly._from_dict(rep, opt)
if opt.polys is None:
opt.polys = False
return poly, opt
@public
def parallel_poly_from_expr(exprs, *gens, **args):
"""Construct polynomials from expressions. """
opt = options.build_options(gens, args)
return _parallel_poly_from_expr(exprs, opt)
def _parallel_poly_from_expr(exprs, opt):
"""Construct polynomials from expressions. """
from sympy.functions.elementary.piecewise import Piecewise
if len(exprs) == 2:
f, g = exprs
if isinstance(f, Poly) and isinstance(g, Poly):
f = f.__class__._from_poly(f, opt)
g = g.__class__._from_poly(g, opt)
f, g = f.unify(g)
opt.gens = f.gens
opt.domain = f.domain
if opt.polys is None:
opt.polys = True
return [f, g], opt
origs, exprs = list(exprs), []
_exprs, _polys = [], []
failed = False
for i, expr in enumerate(origs):
expr = sympify(expr)
if isinstance(expr, Basic):
if expr.is_Poly:
_polys.append(i)
else:
_exprs.append(i)
if opt.expand:
expr = expr.expand()
else:
failed = True
exprs.append(expr)
if failed:
raise PolificationFailed(opt, origs, exprs, True)
if _polys:
# XXX: this is a temporary solution
for i in _polys:
exprs[i] = exprs[i].as_expr()
reps, opt = _parallel_dict_from_expr(exprs, opt)
if not opt.gens:
raise PolificationFailed(opt, origs, exprs, True)
for k in opt.gens:
if isinstance(k, Piecewise):
raise PolynomialError("Piecewise generators do not make sense")
coeffs_list, lengths = [], []
all_monoms = []
all_coeffs = []
for rep in reps:
monoms, coeffs = list(zip(*list(rep.items())))
coeffs_list.extend(coeffs)
all_monoms.append(monoms)
lengths.append(len(coeffs))
domain = opt.domain
if domain is None:
opt.domain, coeffs_list = construct_domain(coeffs_list, opt=opt)
else:
coeffs_list = list(map(domain.from_sympy, coeffs_list))
for k in lengths:
all_coeffs.append(coeffs_list[:k])
coeffs_list = coeffs_list[k:]
polys = []
for monoms, coeffs in zip(all_monoms, all_coeffs):
rep = dict(list(zip(monoms, coeffs)))
poly = Poly._from_dict(rep, opt)
polys.append(poly)
if opt.polys is None:
opt.polys = bool(_polys)
return polys, opt
def _update_args(args, key, value):
"""Add a new ``(key, value)`` pair to arguments ``dict``. """
args = dict(args)
if key not in args:
args[key] = value
return args
@public
def degree(f, *gens, **args):
"""
Return the degree of ``f`` in the given variable.
The degree of 0 is negative infinity.
Examples
========
>>> from sympy import degree
>>> from sympy.abc import x, y
>>> degree(x**2 + y*x + 1, gen=x)
2
>>> degree(x**2 + y*x + 1, gen=y)
1
>>> degree(0, x)
-oo
"""
options.allowed_flags(args, ['gen', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('degree', 1, exc)
return sympify(F.degree(opt.gen))
@public
def degree_list(f, *gens, **args):
"""
Return a list of degrees of ``f`` in all variables.
Examples
========
>>> from sympy import degree_list
>>> from sympy.abc import x, y
>>> degree_list(x**2 + y*x + 1)
(2, 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('degree_list', 1, exc)
degrees = F.degree_list()
return tuple(map(Integer, degrees))
@public
def LC(f, *gens, **args):
"""
Return the leading coefficient of ``f``.
Examples
========
>>> from sympy import LC
>>> from sympy.abc import x, y
>>> LC(4*x**2 + 2*x*y**2 + x*y + 3*y)
4
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LC', 1, exc)
return F.LC(order=opt.order)
@public
def LM(f, *gens, **args):
"""
Return the leading monomial of ``f``.
Examples
========
>>> from sympy import LM
>>> from sympy.abc import x, y
>>> LM(4*x**2 + 2*x*y**2 + x*y + 3*y)
x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LM', 1, exc)
monom = F.LM(order=opt.order)
return monom.as_expr()
@public
def LT(f, *gens, **args):
"""
Return the leading term of ``f``.
Examples
========
>>> from sympy import LT
>>> from sympy.abc import x, y
>>> LT(4*x**2 + 2*x*y**2 + x*y + 3*y)
4*x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LT', 1, exc)
monom, coeff = F.LT(order=opt.order)
return coeff*monom.as_expr()
@public
def pdiv(f, g, *gens, **args):
"""
Compute polynomial pseudo-division of ``f`` and ``g``.
Examples
========
>>> from sympy import pdiv
>>> from sympy.abc import x
>>> pdiv(x**2 + 1, 2*x - 4)
(2*x + 4, 20)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pdiv', 2, exc)
q, r = F.pdiv(G)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
@public
def prem(f, g, *gens, **args):
"""
Compute polynomial pseudo-remainder of ``f`` and ``g``.
Examples
========
>>> from sympy import prem
>>> from sympy.abc import x
>>> prem(x**2 + 1, 2*x - 4)
20
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('prem', 2, exc)
r = F.prem(G)
if not opt.polys:
return r.as_expr()
else:
return r
@public
def pquo(f, g, *gens, **args):
"""
Compute polynomial pseudo-quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import pquo
>>> from sympy.abc import x
>>> pquo(x**2 + 1, 2*x - 4)
2*x + 4
>>> pquo(x**2 - 1, 2*x - 1)
2*x + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pquo', 2, exc)
try:
q = F.pquo(G)
except ExactQuotientFailed:
raise ExactQuotientFailed(f, g)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def pexquo(f, g, *gens, **args):
"""
Compute polynomial exact pseudo-quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import pexquo
>>> from sympy.abc import x
>>> pexquo(x**2 - 1, 2*x - 2)
2*x + 2
>>> pexquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pexquo', 2, exc)
q = F.pexquo(G)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def div(f, g, *gens, **args):
"""
Compute polynomial division of ``f`` and ``g``.
Examples
========
>>> from sympy import div, ZZ, QQ
>>> from sympy.abc import x
>>> div(x**2 + 1, 2*x - 4, domain=ZZ)
(0, x**2 + 1)
>>> div(x**2 + 1, 2*x - 4, domain=QQ)
(x/2 + 1, 5)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('div', 2, exc)
q, r = F.div(G, auto=opt.auto)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
@public
def rem(f, g, *gens, **args):
"""
Compute polynomial remainder of ``f`` and ``g``.
Examples
========
>>> from sympy import rem, ZZ, QQ
>>> from sympy.abc import x
>>> rem(x**2 + 1, 2*x - 4, domain=ZZ)
x**2 + 1
>>> rem(x**2 + 1, 2*x - 4, domain=QQ)
5
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('rem', 2, exc)
r = F.rem(G, auto=opt.auto)
if not opt.polys:
return r.as_expr()
else:
return r
@public
def quo(f, g, *gens, **args):
"""
Compute polynomial quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import quo
>>> from sympy.abc import x
>>> quo(x**2 + 1, 2*x - 4)
x/2 + 1
>>> quo(x**2 - 1, x - 1)
x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('quo', 2, exc)
q = F.quo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def exquo(f, g, *gens, **args):
"""
Compute polynomial exact quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import exquo
>>> from sympy.abc import x
>>> exquo(x**2 - 1, x - 1)
x + 1
>>> exquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('exquo', 2, exc)
q = F.exquo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def half_gcdex(f, g, *gens, **args):
"""
Half extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
Examples
========
>>> from sympy import half_gcdex
>>> from sympy.abc import x
>>> half_gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(-x/5 + 3/5, x + 1)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
s, h = domain.half_gcdex(a, b)
except NotImplementedError:
raise ComputationFailed('half_gcdex', 2, exc)
else:
return domain.to_sympy(s), domain.to_sympy(h)
s, h = F.half_gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), h.as_expr()
else:
return s, h
@public
def gcdex(f, g, *gens, **args):
"""
Extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
Examples
========
>>> from sympy import gcdex
>>> from sympy.abc import x
>>> gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(-x/5 + 3/5, x**2/5 - 6*x/5 + 2, x + 1)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
s, t, h = domain.gcdex(a, b)
except NotImplementedError:
raise ComputationFailed('gcdex', 2, exc)
else:
return domain.to_sympy(s), domain.to_sympy(t), domain.to_sympy(h)
s, t, h = F.gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), t.as_expr(), h.as_expr()
else:
return s, t, h
@public
def invert(f, g, *gens, **args):
"""
Invert ``f`` modulo ``g`` when possible.
Examples
========
>>> from sympy import invert, S
>>> from sympy.core.numbers import mod_inverse
>>> from sympy.abc import x
>>> invert(x**2 - 1, 2*x - 1)
-4/3
>>> invert(x**2 - 1, x - 1)
Traceback (most recent call last):
...
NotInvertible: zero divisor
For more efficient inversion of Rationals,
use the ``mod_inverse`` function:
>>> mod_inverse(3, 5)
2
>>> (S(2)/5).invert(S(7)/3)
5/2
See Also
========
sympy.core.numbers.mod_inverse
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.invert(a, b))
except NotImplementedError:
raise ComputationFailed('invert', 2, exc)
h = F.invert(G, auto=opt.auto)
if not opt.polys:
return h.as_expr()
else:
return h
@public
def subresultants(f, g, *gens, **args):
"""
Compute subresultant PRS of ``f`` and ``g``.
Examples
========
>>> from sympy import subresultants
>>> from sympy.abc import x
>>> subresultants(x**2 + 1, x**2 - 1)
[x**2 + 1, x**2 - 1, -2]
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('subresultants', 2, exc)
result = F.subresultants(G)
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def resultant(f, g, *gens, **args):
"""
Compute resultant of ``f`` and ``g``.
Examples
========
>>> from sympy import resultant
>>> from sympy.abc import x
>>> resultant(x**2 + 1, x**2 - 1)
4
"""
includePRS = args.pop('includePRS', False)
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('resultant', 2, exc)
if includePRS:
result, R = F.resultant(G, includePRS=includePRS)
else:
result = F.resultant(G)
if not opt.polys:
if includePRS:
return result.as_expr(), [r.as_expr() for r in R]
return result.as_expr()
else:
if includePRS:
return result, R
return result
@public
def discriminant(f, *gens, **args):
"""
Compute discriminant of ``f``.
Examples
========
>>> from sympy import discriminant
>>> from sympy.abc import x
>>> discriminant(x**2 + 2*x + 3)
-8
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('discriminant', 1, exc)
result = F.discriminant()
if not opt.polys:
return result.as_expr()
else:
return result
@public
def cofactors(f, g, *gens, **args):
"""
Compute GCD and cofactors of ``f`` and ``g``.
Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and
``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors
of ``f`` and ``g``.
Examples
========
>>> from sympy import cofactors
>>> from sympy.abc import x
>>> cofactors(x**2 - 1, x**2 - 3*x + 2)
(x - 1, x + 1, x - 2)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
h, cff, cfg = domain.cofactors(a, b)
except NotImplementedError:
raise ComputationFailed('cofactors', 2, exc)
else:
return domain.to_sympy(h), domain.to_sympy(cff), domain.to_sympy(cfg)
h, cff, cfg = F.cofactors(G)
if not opt.polys:
return h.as_expr(), cff.as_expr(), cfg.as_expr()
else:
return h, cff, cfg
@public
def gcd_list(seq, *gens, **args):
"""
Compute GCD of a list of polynomials.
Examples
========
>>> from sympy import gcd_list
>>> from sympy.abc import x
>>> gcd_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x - 1
"""
seq = sympify(seq)
def try_non_polynomial_gcd(seq):
if not gens and not args:
domain, numbers = construct_domain(seq)
if not numbers:
return domain.zero
elif domain.is_Numerical:
result, numbers = numbers[0], numbers[1:]
for number in numbers:
result = domain.gcd(result, number)
if domain.is_one(result):
break
return domain.to_sympy(result)
return None
result = try_non_polynomial_gcd(seq)
if result is not None:
return result
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed as exc:
result = try_non_polynomial_gcd(exc.exprs)
if result is not None:
return result
else:
raise ComputationFailed('gcd_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.Zero
else:
return Poly(0, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.gcd(poly)
if result.is_one:
break
if not opt.polys:
return result.as_expr()
else:
return result
@public
def gcd(f, g=None, *gens, **args):
"""
Compute GCD of ``f`` and ``g``.
Examples
========
>>> from sympy import gcd
>>> from sympy.abc import x
>>> gcd(x**2 - 1, x**2 - 3*x + 2)
x - 1
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return gcd_list(f, *gens, **args)
elif g is None:
raise TypeError("gcd() takes 2 arguments or a sequence of arguments")
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.gcd(a, b))
except NotImplementedError:
raise ComputationFailed('gcd', 2, exc)
result = F.gcd(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def lcm_list(seq, *gens, **args):
"""
Compute LCM of a list of polynomials.
Examples
========
>>> from sympy import lcm_list
>>> from sympy.abc import x
>>> lcm_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x**5 - x**4 - 2*x**3 - x**2 + x + 2
"""
seq = sympify(seq)
def try_non_polynomial_lcm(seq):
if not gens and not args:
domain, numbers = construct_domain(seq)
if not numbers:
return domain.one
elif domain.is_Numerical:
result, numbers = numbers[0], numbers[1:]
for number in numbers:
result = domain.lcm(result, number)
return domain.to_sympy(result)
return None
result = try_non_polynomial_lcm(seq)
if result is not None:
return result
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed as exc:
result = try_non_polynomial_lcm(exc.exprs)
if result is not None:
return result
else:
raise ComputationFailed('lcm_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.One
else:
return Poly(1, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.lcm(poly)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def lcm(f, g=None, *gens, **args):
"""
Compute LCM of ``f`` and ``g``.
Examples
========
>>> from sympy import lcm
>>> from sympy.abc import x
>>> lcm(x**2 - 1, x**2 - 3*x + 2)
x**3 - 2*x**2 - x + 2
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return lcm_list(f, *gens, **args)
elif g is None:
raise TypeError("lcm() takes 2 arguments or a sequence of arguments")
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.lcm(a, b))
except NotImplementedError:
raise ComputationFailed('lcm', 2, exc)
result = F.lcm(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def terms_gcd(f, *gens, **args):
"""
Remove GCD of terms from ``f``.
If the ``deep`` flag is True, then the arguments of ``f`` will have
terms_gcd applied to them.
If a fraction is factored out of ``f`` and ``f`` is an Add, then
an unevaluated Mul will be returned so that automatic simplification
does not redistribute it. The hint ``clear``, when set to False, can be
used to prevent such factoring when all coefficients are not fractions.
Examples
========
>>> from sympy import terms_gcd, cos
>>> from sympy.abc import x, y
>>> terms_gcd(x**6*y**2 + x**3*y, x, y)
x**3*y*(x**3*y + 1)
The default action of polys routines is to expand the expression
given to them. terms_gcd follows this behavior:
>>> terms_gcd((3+3*x)*(x+x*y))
3*x*(x*y + x + y + 1)
If this is not desired then the hint ``expand`` can be set to False.
In this case the expression will be treated as though it were comprised
of one or more terms:
>>> terms_gcd((3+3*x)*(x+x*y), expand=False)
(3*x + 3)*(x*y + x)
In order to traverse factors of a Mul or the arguments of other
functions, the ``deep`` hint can be used:
>>> terms_gcd((3 + 3*x)*(x + x*y), expand=False, deep=True)
3*x*(x + 1)*(y + 1)
>>> terms_gcd(cos(x + x*y), deep=True)
cos(x*(y + 1))
Rationals are factored out by default:
>>> terms_gcd(x + y/2)
(2*x + y)/2
Only the y-term had a coefficient that was a fraction; if one
does not want to factor out the 1/2 in cases like this, the
flag ``clear`` can be set to False:
>>> terms_gcd(x + y/2, clear=False)
x + y/2
>>> terms_gcd(x*y/2 + y**2, clear=False)
y*(x/2 + y)
The ``clear`` flag is ignored if all coefficients are fractions:
>>> terms_gcd(x/3 + y/2, clear=False)
(2*x + 3*y)/6
See Also
========
sympy.core.exprtools.gcd_terms, sympy.core.exprtools.factor_terms
"""
from sympy.core.relational import Equality
orig = sympify(f)
if not isinstance(f, Expr) or f.is_Atom:
return orig
if args.get('deep', False):
new = f.func(*[terms_gcd(a, *gens, **args) for a in f.args])
args.pop('deep')
args['expand'] = False
return terms_gcd(new, *gens, **args)
if isinstance(f, Equality):
return f
clear = args.pop('clear', True)
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
return exc.expr
J, f = F.terms_gcd()
if opt.domain.has_Ring:
if opt.domain.has_Field:
denom, f = f.clear_denoms(convert=True)
coeff, f = f.primitive()
if opt.domain.has_Field:
coeff /= denom
else:
coeff = S.One
term = Mul(*[x**j for x, j in zip(f.gens, J)])
if coeff == 1:
coeff = S.One
if term == 1:
return orig
if clear:
return _keep_coeff(coeff, term*f.as_expr())
# base the clearing on the form of the original expression, not
# the (perhaps) Mul that we have now
coeff, f = _keep_coeff(coeff, f.as_expr(), clear=False).as_coeff_Mul()
return _keep_coeff(coeff, term*f, clear=False)
@public
def trunc(f, p, *gens, **args):
"""
Reduce ``f`` modulo a constant ``p``.
Examples
========
>>> from sympy import trunc
>>> from sympy.abc import x
>>> trunc(2*x**3 + 3*x**2 + 5*x + 7, 3)
-x**3 - x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('trunc', 1, exc)
result = F.trunc(sympify(p))
if not opt.polys:
return result.as_expr()
else:
return result
@public
def monic(f, *gens, **args):
"""
Divide all coefficients of ``f`` by ``LC(f)``.
Examples
========
>>> from sympy import monic
>>> from sympy.abc import x
>>> monic(3*x**2 + 4*x + 2)
x**2 + 4*x/3 + 2/3
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('monic', 1, exc)
result = F.monic(auto=opt.auto)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def content(f, *gens, **args):
"""
Compute GCD of coefficients of ``f``.
Examples
========
>>> from sympy import content
>>> from sympy.abc import x
>>> content(6*x**2 + 8*x + 12)
2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('content', 1, exc)
return F.content()
@public
def primitive(f, *gens, **args):
"""
Compute content and the primitive form of ``f``.
Examples
========
>>> from sympy.polys.polytools import primitive
>>> from sympy.abc import x
>>> primitive(6*x**2 + 8*x + 12)
(2, 3*x**2 + 4*x + 6)
>>> eq = (2 + 2*x)*x + 2
Expansion is performed by default:
>>> primitive(eq)
(2, x**2 + x + 1)
Set ``expand`` to False to shut this off. Note that the
extraction will not be recursive; use the as_content_primitive method
for recursive, non-destructive Rational extraction.
>>> primitive(eq, expand=False)
(1, x*(2*x + 2) + 2)
>>> eq.as_content_primitive()
(2, x*(x + 1) + 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('primitive', 1, exc)
cont, result = F.primitive()
if not opt.polys:
return cont, result.as_expr()
else:
return cont, result
@public
def compose(f, g, *gens, **args):
"""
Compute functional composition ``f(g)``.
Examples
========
>>> from sympy import compose
>>> from sympy.abc import x
>>> compose(x**2 + x, x - 1)
x**2 - x
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('compose', 2, exc)
result = F.compose(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def decompose(f, *gens, **args):
"""
Compute functional decomposition of ``f``.
Examples
========
>>> from sympy import decompose
>>> from sympy.abc import x
>>> decompose(x**4 + 2*x**3 - x - 1)
[x**2 - x - 1, x**2 + x]
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('decompose', 1, exc)
result = F.decompose()
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def sturm(f, *gens, **args):
"""
Compute Sturm sequence of ``f``.
Examples
========
>>> from sympy import sturm
>>> from sympy.abc import x
>>> sturm(x**3 - 2*x**2 + x - 3)
[x**3 - 2*x**2 + x - 3, 3*x**2 - 4*x + 1, 2*x/9 + 25/9, -2079/4]
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sturm', 1, exc)
result = F.sturm(auto=opt.auto)
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def gff_list(f, *gens, **args):
"""
Compute a list of greatest factorial factors of ``f``.
Examples
========
>>> from sympy import gff_list, ff
>>> from sympy.abc import x
>>> f = x**5 + 2*x**4 - x**3 - 2*x**2
>>> gff_list(f)
[(x, 1), (x + 2, 4)]
>>> (ff(x, 1)*ff(x + 2, 4)).expand() == f
True
>>> f = x**12 + 6*x**11 - 11*x**10 - 56*x**9 + 220*x**8 + 208*x**7 - \
1401*x**6 + 1090*x**5 + 2715*x**4 - 6720*x**3 - 1092*x**2 + 5040*x
>>> gff_list(f)
[(x**3 + 7, 2), (x**2 + 5*x, 3)]
>>> ff(x**3 + 7, 2)*ff(x**2 + 5*x, 3) == f
True
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('gff_list', 1, exc)
factors = F.gff_list()
if not opt.polys:
return [(g.as_expr(), k) for g, k in factors]
else:
return factors
@public
def gff(f, *gens, **args):
"""Compute greatest factorial factorization of ``f``. """
raise NotImplementedError('symbolic falling factorial')
@public
def sqf_norm(f, *gens, **args):
"""
Compute square-free norm of ``f``.
Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and
``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``,
where ``a`` is the algebraic extension of the ground domain.
Examples
========
>>> from sympy import sqf_norm, sqrt
>>> from sympy.abc import x
>>> sqf_norm(x**2 + 1, extension=[sqrt(3)])
(1, x**2 - 2*sqrt(3)*x + 4, x**4 - 4*x**2 + 16)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sqf_norm', 1, exc)
s, g, r = F.sqf_norm()
if not opt.polys:
return Integer(s), g.as_expr(), r.as_expr()
else:
return Integer(s), g, r
@public
def sqf_part(f, *gens, **args):
"""
Compute square-free part of ``f``.
Examples
========
>>> from sympy import sqf_part
>>> from sympy.abc import x
>>> sqf_part(x**3 - 3*x - 2)
x**2 - x - 2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sqf_part', 1, exc)
result = F.sqf_part()
if not opt.polys:
return result.as_expr()
else:
return result
def _sorted_factors(factors, method):
"""Sort a list of ``(expr, exp)`` pairs. """
if method == 'sqf':
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (exp, len(rep), len(poly.gens), rep)
else:
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (len(rep), len(poly.gens), exp, rep)
return sorted(factors, key=key)
def _factors_product(factors):
"""Multiply a list of ``(expr, exp)`` pairs. """
return Mul(*[f.as_expr()**k for f, k in factors])
def _symbolic_factor_list(expr, opt, method):
"""Helper function for :func:`_symbolic_factor`. """
coeff, factors = S.One, []
args = [i._eval_factor() if hasattr(i, '_eval_factor') else i
for i in Mul.make_args(expr)]
for arg in args:
if arg.is_Number:
coeff *= arg
continue
if arg.is_Mul:
args.extend(arg.args)
continue
if arg.is_Pow:
base, exp = arg.args
if base.is_Number and exp.is_Number:
coeff *= arg
continue
if base.is_Number:
factors.append((base, exp))
continue
else:
base, exp = arg, S.One
try:
poly, _ = _poly_from_expr(base, opt)
except PolificationFailed as exc:
factors.append((exc.expr, exp))
else:
func = getattr(poly, method + '_list')
_coeff, _factors = func()
if _coeff is not S.One:
if exp.is_Integer:
coeff *= _coeff**exp
elif _coeff.is_positive:
factors.append((_coeff, exp))
else:
_factors.append((_coeff, S.One))
if exp is S.One:
factors.extend(_factors)
elif exp.is_integer:
factors.extend([(f, k*exp) for f, k in _factors])
else:
other = []
for f, k in _factors:
if f.as_expr().is_positive:
factors.append((f, k*exp))
else:
other.append((f, k))
factors.append((_factors_product(other), exp))
return coeff, factors
def _symbolic_factor(expr, opt, method):
"""Helper function for :func:`_factor`. """
if isinstance(expr, Expr) and not expr.is_Relational:
if hasattr(expr,'_eval_factor'):
return expr._eval_factor()
coeff, factors = _symbolic_factor_list(together(expr), opt, method)
return _keep_coeff(coeff, _factors_product(factors))
elif hasattr(expr, 'args'):
return expr.func(*[_symbolic_factor(arg, opt, method) for arg in expr.args])
elif hasattr(expr, '__iter__'):
return expr.__class__([_symbolic_factor(arg, opt, method) for arg in expr])
else:
return expr
def _generic_factor_list(expr, gens, args, method):
"""Helper function for :func:`sqf_list` and :func:`factor_list`. """
options.allowed_flags(args, ['frac', 'polys'])
opt = options.build_options(gens, args)
expr = sympify(expr)
if isinstance(expr, Expr) and not expr.is_Relational:
numer, denom = together(expr).as_numer_denom()
cp, fp = _symbolic_factor_list(numer, opt, method)
cq, fq = _symbolic_factor_list(denom, opt, method)
if fq and not opt.frac:
raise PolynomialError("a polynomial expected, got %s" % expr)
_opt = opt.clone(dict(expand=True))
for factors in (fp, fq):
for i, (f, k) in enumerate(factors):
if not f.is_Poly:
f, _ = _poly_from_expr(f, _opt)
factors[i] = (f, k)
fp = _sorted_factors(fp, method)
fq = _sorted_factors(fq, method)
if not opt.polys:
fp = [(f.as_expr(), k) for f, k in fp]
fq = [(f.as_expr(), k) for f, k in fq]
coeff = cp/cq
if not opt.frac:
return coeff, fp
else:
return coeff, fp, fq
else:
raise PolynomialError("a polynomial expected, got %s" % expr)
def _generic_factor(expr, gens, args, method):
"""Helper function for :func:`sqf` and :func:`factor`. """
options.allowed_flags(args, [])
opt = options.build_options(gens, args)
return _symbolic_factor(sympify(expr), opt, method)
def to_rational_coeffs(f):
"""
try to transform a polynomial to have rational coefficients
try to find a transformation ``x = alpha*y``
``f(x) = lc*alpha**n * g(y)`` where ``g`` is a polynomial with
rational coefficients, ``lc`` the leading coefficient.
If this fails, try ``x = y + beta``
``f(x) = g(y)``
Returns ``None`` if ``g`` not found;
``(lc, alpha, None, g)`` in case of rescaling
``(None, None, beta, g)`` in case of translation
Notes
=====
Currently it transforms only polynomials without roots larger than 2.
Examples
========
>>> from sympy import sqrt, Poly, simplify
>>> from sympy.polys.polytools import to_rational_coeffs
>>> from sympy.abc import x
>>> p = Poly(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}), x, domain='EX')
>>> lc, r, _, g = to_rational_coeffs(p)
>>> lc, r
(7 + 5*sqrt(2), -2*sqrt(2) + 2)
>>> g
Poly(x**3 + x**2 - 1/4*x - 1/4, x, domain='QQ')
>>> r1 = simplify(1/r)
>>> Poly(lc*r**3*(g.as_expr()).subs({x:x*r1}), x, domain='EX') == p
True
"""
from sympy.simplify.simplify import simplify
def _try_rescale(f, f1=None):
"""
try rescaling ``x -> alpha*x`` to convert f to a polynomial
with rational coefficients.
Returns ``alpha, f``; if the rescaling is successful,
``alpha`` is the rescaling factor, and ``f`` is the rescaled
polynomial; else ``alpha`` is ``None``.
"""
from sympy.core.add import Add
if not len(f.gens) == 1 or not (f.gens[0]).is_Atom:
return None, f
n = f.degree()
lc = f.LC()
f1 = f1 or f1.monic()
coeffs = f1.all_coeffs()[1:]
coeffs = [simplify(coeffx) for coeffx in coeffs]
if coeffs[-2]:
rescale1_x = simplify(coeffs[-2]/coeffs[-1])
coeffs1 = []
for i in range(len(coeffs)):
coeffx = simplify(coeffs[i]*rescale1_x**(i + 1))
if not coeffx.is_rational:
break
coeffs1.append(coeffx)
else:
rescale_x = simplify(1/rescale1_x)
x = f.gens[0]
v = [x**n]
for i in range(1, n + 1):
v.append(coeffs1[i - 1]*x**(n - i))
f = Add(*v)
f = Poly(f)
return lc, rescale_x, f
return None
def _try_translate(f, f1=None):
"""
try translating ``x -> x + alpha`` to convert f to a polynomial
with rational coefficients.
Returns ``alpha, f``; if the translating is successful,
``alpha`` is the translating factor, and ``f`` is the shifted
polynomial; else ``alpha`` is ``None``.
"""
from sympy.core.add import Add
if not len(f.gens) == 1 or not (f.gens[0]).is_Atom:
return None, f
n = f.degree()
f1 = f1 or f1.monic()
coeffs = f1.all_coeffs()[1:]
c = simplify(coeffs[0])
if c and not c.is_rational:
func = Add
if c.is_Add:
args = c.args
func = c.func
else:
args = [c]
sifted = sift(args, lambda z: z.is_rational)
c1, c2 = sifted[True], sifted[False]
alpha = -func(*c2)/n
f2 = f1.shift(alpha)
return alpha, f2
return None
def _has_square_roots(p):
"""
Return True if ``f`` is a sum with square roots but no other root
"""
from sympy.core.exprtools import Factors
coeffs = p.coeffs()
has_sq = False
for y in coeffs:
for x in Add.make_args(y):
f = Factors(x).factors
r = [wx.q for b, wx in f.items() if
b.is_number and wx.is_Rational and wx.q >= 2]
if not r:
continue
if min(r) == 2:
has_sq = True
if max(r) > 2:
return False
return has_sq
if f.get_domain().is_EX and _has_square_roots(f):
f1 = f.monic()
r = _try_rescale(f, f1)
if r:
return r[0], r[1], None, r[2]
else:
r = _try_translate(f, f1)
if r:
return None, None, r[0], r[1]
return None
def _torational_factor_list(p, x):
"""
helper function to factor polynomial using to_rational_coeffs
Examples
========
>>> from sympy.polys.polytools import _torational_factor_list
>>> from sympy.abc import x
>>> from sympy import sqrt, expand, Mul
>>> p = expand(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}))
>>> factors = _torational_factor_list(p, x); factors
(-2, [(-x*(1 + sqrt(2))/2 + 1, 1), (-x*(1 + sqrt(2)) - 1, 1), (-x*(1 + sqrt(2)) + 1, 1)])
>>> expand(factors[0]*Mul(*[z[0] for z in factors[1]])) == p
True
>>> p = expand(((x**2-1)*(x-2)).subs({x:x + sqrt(2)}))
>>> factors = _torational_factor_list(p, x); factors
(1, [(x - 2 + sqrt(2), 1), (x - 1 + sqrt(2), 1), (x + 1 + sqrt(2), 1)])
>>> expand(factors[0]*Mul(*[z[0] for z in factors[1]])) == p
True
"""
from sympy.simplify.simplify import simplify
p1 = Poly(p, x, domain='EX')
n = p1.degree()
res = to_rational_coeffs(p1)
if not res:
return None
lc, r, t, g = res
factors = factor_list(g.as_expr())
if lc:
c = simplify(factors[0]*lc*r**n)
r1 = simplify(1/r)
a = []
for z in factors[1:][0]:
a.append((simplify(z[0].subs({x: x*r1})), z[1]))
else:
c = factors[0]
a = []
for z in factors[1:][0]:
a.append((z[0].subs({x: x - t}), z[1]))
return (c, a)
@public
def sqf_list(f, *gens, **args):
"""
Compute a list of square-free factors of ``f``.
Examples
========
>>> from sympy import sqf_list
>>> from sympy.abc import x
>>> sqf_list(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
(2, [(x + 1, 2), (x + 2, 3)])
"""
return _generic_factor_list(f, gens, args, method='sqf')
@public
def sqf(f, *gens, **args):
"""
Compute square-free factorization of ``f``.
Examples
========
>>> from sympy import sqf
>>> from sympy.abc import x
>>> sqf(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
2*(x + 1)**2*(x + 2)**3
"""
return _generic_factor(f, gens, args, method='sqf')
@public
def factor_list(f, *gens, **args):
"""
Compute a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import factor_list
>>> from sympy.abc import x, y
>>> factor_list(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
(2, [(x + y, 1), (x**2 + 1, 2)])
"""
return _generic_factor_list(f, gens, args, method='factor')
@public
def factor(f, *gens, **args):
"""
Compute the factorization of expression, ``f``, into irreducibles. (To
factor an integer into primes, use ``factorint``.)
There two modes implemented: symbolic and formal. If ``f`` is not an
instance of :class:`Poly` and generators are not specified, then the
former mode is used. Otherwise, the formal mode is used.
In symbolic mode, :func:`factor` will traverse the expression tree and
factor its components without any prior expansion, unless an instance
of :class:`Add` is encountered (in this case formal factorization is
used). This way :func:`factor` can handle large or symbolic exponents.
By default, the factorization is computed over the rationals. To factor
over other domain, e.g. an algebraic or finite field, use appropriate
options: ``extension``, ``modulus`` or ``domain``.
Examples
========
>>> from sympy import factor, sqrt
>>> from sympy.abc import x, y
>>> factor(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
2*(x + y)*(x**2 + 1)**2
>>> factor(x**2 + 1)
x**2 + 1
>>> factor(x**2 + 1, modulus=2)
(x + 1)**2
>>> factor(x**2 + 1, gaussian=True)
(x - I)*(x + I)
>>> factor(x**2 - 2, extension=sqrt(2))
(x - sqrt(2))*(x + sqrt(2))
>>> factor((x**2 - 1)/(x**2 + 4*x + 4))
(x - 1)*(x + 1)/(x + 2)**2
>>> factor((x**2 + 4*x + 4)**10000000*(x**2 + 1))
(x + 2)**20000000*(x**2 + 1)
By default, factor deals with an expression as a whole:
>>> eq = 2**(x**2 + 2*x + 1)
>>> factor(eq)
2**(x**2 + 2*x + 1)
If the ``deep`` flag is True then subexpressions will
be factored:
>>> factor(eq, deep=True)
2**((x + 1)**2)
See Also
========
sympy.ntheory.factor_.factorint
"""
f = sympify(f)
if args.pop('deep', False):
partials = {}
muladd = f.atoms(Mul, Add)
for p in muladd:
fac = factor(p, *gens, **args)
if (fac.is_Mul or fac.is_Pow) and fac != p:
partials[p] = fac
return f.xreplace(partials)
try:
return _generic_factor(f, gens, args, method='factor')
except PolynomialError as msg:
if not f.is_commutative:
from sympy.core.exprtools import factor_nc
return factor_nc(f)
else:
raise PolynomialError(msg)
@public
def intervals(F, all=False, eps=None, inf=None, sup=None, strict=False, fast=False, sqf=False):
"""
Compute isolating intervals for roots of ``f``.
Examples
========
>>> from sympy import intervals
>>> from sympy.abc import x
>>> intervals(x**2 - 3)
[((-2, -1), 1), ((1, 2), 1)]
>>> intervals(x**2 - 3, eps=1e-2)
[((-26/15, -19/11), 1), ((19/11, 26/15), 1)]
"""
if not hasattr(F, '__iter__'):
try:
F = Poly(F)
except GeneratorsNeeded:
return []
return F.intervals(all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
else:
polys, opt = parallel_poly_from_expr(F, domain='QQ')
if len(opt.gens) > 1:
raise MultivariatePolynomialError
for i, poly in enumerate(polys):
polys[i] = poly.rep.rep
if eps is not None:
eps = opt.domain.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if inf is not None:
inf = opt.domain.convert(inf)
if sup is not None:
sup = opt.domain.convert(sup)
intervals = dup_isolate_real_roots_list(polys, opt.domain,
eps=eps, inf=inf, sup=sup, strict=strict, fast=fast)
result = []
for (s, t), indices in intervals:
s, t = opt.domain.to_sympy(s), opt.domain.to_sympy(t)
result.append(((s, t), indices))
return result
@public
def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False):
"""
Refine an isolating interval of a root to the given precision.
Examples
========
>>> from sympy import refine_root
>>> from sympy.abc import x
>>> refine_root(x**2 - 3, 1, 2, eps=1e-2)
(19/11, 26/15)
"""
try:
F = Poly(f)
except GeneratorsNeeded:
raise PolynomialError(
"can't refine a root of %s, not a polynomial" % f)
return F.refine_root(s, t, eps=eps, steps=steps, fast=fast, check_sqf=check_sqf)
@public
def count_roots(f, inf=None, sup=None):
"""
Return the number of roots of ``f`` in ``[inf, sup]`` interval.
If one of ``inf`` or ``sup`` is complex, it will return the number of roots
in the complex rectangle with corners at ``inf`` and ``sup``.
Examples
========
>>> from sympy import count_roots, I
>>> from sympy.abc import x
>>> count_roots(x**4 - 4, -3, 3)
2
>>> count_roots(x**4 - 4, 0, 1 + 3*I)
1
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError("can't count roots of %s, not a polynomial" % f)
return F.count_roots(inf=inf, sup=sup)
@public
def real_roots(f, multiple=True):
"""
Return a list of real roots with multiplicities of ``f``.
Examples
========
>>> from sympy import real_roots
>>> from sympy.abc import x
>>> real_roots(2*x**3 - 7*x**2 + 4*x + 4)
[-1/2, 2, 2]
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError(
"can't compute real roots of %s, not a polynomial" % f)
return F.real_roots(multiple=multiple)
@public
def nroots(f, n=15, maxsteps=50, cleanup=True):
"""
Compute numerical approximations of roots of ``f``.
Examples
========
>>> from sympy import nroots
>>> from sympy.abc import x
>>> nroots(x**2 - 3, n=15)
[-1.73205080756888, 1.73205080756888]
>>> nroots(x**2 - 3, n=30)
[-1.73205080756887729352744634151, 1.73205080756887729352744634151]
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError(
"can't compute numerical roots of %s, not a polynomial" % f)
return F.nroots(n=n, maxsteps=maxsteps, cleanup=cleanup)
@public
def ground_roots(f, *gens, **args):
"""
Compute roots of ``f`` by factorization in the ground domain.
Examples
========
>>> from sympy import ground_roots
>>> from sympy.abc import x
>>> ground_roots(x**6 - 4*x**4 + 4*x**3 - x**2)
{0: 2, 1: 2}
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('ground_roots', 1, exc)
return F.ground_roots()
@public
def nth_power_roots_poly(f, n, *gens, **args):
"""
Construct a polynomial with n-th powers of roots of ``f``.
Examples
========
>>> from sympy import nth_power_roots_poly, factor, roots
>>> from sympy.abc import x
>>> f = x**4 - x**2 + 1
>>> g = factor(nth_power_roots_poly(f, 2))
>>> g
(x**2 - x + 1)**2
>>> R_f = [ (r**2).expand() for r in roots(f) ]
>>> R_g = roots(g).keys()
>>> set(R_f) == set(R_g)
True
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('nth_power_roots_poly', 1, exc)
result = F.nth_power_roots_poly(n)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def cancel(f, *gens, **args):
"""
Cancel common factors in a rational function ``f``.
Examples
========
>>> from sympy import cancel, sqrt, Symbol
>>> from sympy.abc import x
>>> A = Symbol('A', commutative=False)
>>> cancel((2*x**2 - 2)/(x**2 - 2*x + 1))
(2*x + 2)/(x - 1)
>>> cancel((sqrt(3) + sqrt(15)*A)/(sqrt(2) + sqrt(10)*A))
sqrt(6)/2
"""
from sympy.core.exprtools import factor_terms
from sympy.functions.elementary.piecewise import Piecewise
options.allowed_flags(args, ['polys'])
f = sympify(f)
if not isinstance(f, (tuple, Tuple)):
if f.is_Number or isinstance(f, Relational) or not isinstance(f, Expr):
return f
f = factor_terms(f, radical=True)
p, q = f.as_numer_denom()
elif len(f) == 2:
p, q = f
elif isinstance(f, Tuple):
return factor_terms(f)
else:
raise ValueError('unexpected argument: %s' % f)
try:
(F, G), opt = parallel_poly_from_expr((p, q), *gens, **args)
except PolificationFailed:
if not isinstance(f, (tuple, Tuple)):
return f
else:
return S.One, p, q
except PolynomialError as msg:
if f.is_commutative and not f.has(Piecewise):
raise PolynomialError(msg)
# Handling of noncommutative and/or piecewise expressions
if f.is_Add or f.is_Mul:
sifted = sift(f.args, lambda x: x.is_commutative is True and not x.has(Piecewise))
c, nc = sifted[True], sifted[False]
nc = [cancel(i) for i in nc]
return f.func(cancel(f.func._from_args(c)), *nc)
else:
reps = []
pot = preorder_traversal(f)
next(pot)
for e in pot:
# XXX: This should really skip anything that's not Expr.
if isinstance(e, (tuple, Tuple, BooleanAtom)):
continue
try:
reps.append((e, cancel(e)))
pot.skip() # this was handled successfully
except NotImplementedError:
pass
return f.xreplace(dict(reps))
c, P, Q = F.cancel(G)
if not isinstance(f, (tuple, Tuple)):
return c*(P.as_expr()/Q.as_expr())
else:
if not opt.polys:
return c, P.as_expr(), Q.as_expr()
else:
return c, P, Q
@public
def reduced(f, G, *gens, **args):
"""
Reduces a polynomial ``f`` modulo a set of polynomials ``G``.
Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,
computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``
such that ``f = q_1*g_1 + ... + q_n*g_n + r``, where ``r`` vanishes or ``r``
is a completely reduced polynomial with respect to ``G``.
Examples
========
>>> from sympy import reduced
>>> from sympy.abc import x, y
>>> reduced(2*x**4 + y**2 - x**2 + y**3, [x**3 - x, y**3 - y])
([2*x, 1], x**2 + y**2 + y)
"""
options.allowed_flags(args, ['polys', 'auto'])
try:
polys, opt = parallel_poly_from_expr([f] + list(G), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('reduced', 0, exc)
domain = opt.domain
retract = False
if opt.auto and domain.has_Ring and not domain.has_Field:
opt = opt.clone(dict(domain=domain.get_field()))
retract = True
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
Q, r = polys[0].div(polys[1:])
Q = [Poly._from_dict(dict(q), opt) for q in Q]
r = Poly._from_dict(dict(r), opt)
if retract:
try:
_Q, _r = [q.to_ring() for q in Q], r.to_ring()
except CoercionFailed:
pass
else:
Q, r = _Q, _r
if not opt.polys:
return [q.as_expr() for q in Q], r.as_expr()
else:
return Q, r
@public
def groebner(F, *gens, **args):
"""
Computes the reduced Groebner basis for a set of polynomials.
Use the ``order`` argument to set the monomial ordering that will be
used to compute the basis. Allowed orders are ``lex``, ``grlex`` and
``grevlex``. If no order is specified, it defaults to ``lex``.
For more information on Groebner bases, see the references and the docstring
of `solve_poly_system()`.
Examples
========
Example taken from [1].
>>> from sympy import groebner
>>> from sympy.abc import x, y
>>> F = [x*y - 2*y, 2*y**2 - x**2]
>>> groebner(F, x, y, order='lex')
GroebnerBasis([x**2 - 2*y**2, x*y - 2*y, y**3 - 2*y], x, y,
domain='ZZ', order='lex')
>>> groebner(F, x, y, order='grlex')
GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y,
domain='ZZ', order='grlex')
>>> groebner(F, x, y, order='grevlex')
GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y,
domain='ZZ', order='grevlex')
By default, an improved implementation of the Buchberger algorithm is
used. Optionally, an implementation of the F5B algorithm can be used.
The algorithm can be set using ``method`` flag or with the :func:`setup`
function from :mod:`sympy.polys.polyconfig`:
>>> F = [x**2 - x - 1, (2*x - 1) * y - (x**10 - (1 - x)**10)]
>>> groebner(F, x, y, method='buchberger')
GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex')
>>> groebner(F, x, y, method='f5b')
GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex')
References
==========
1. [Buchberger01]_
2. [Cox97]_
"""
return GroebnerBasis(F, *gens, **args)
@public
def is_zero_dimensional(F, *gens, **args):
"""
Checks if the ideal generated by a Groebner basis is zero-dimensional.
The algorithm checks if the set of monomials not divisible by the
leading monomial of any element of ``F`` is bounded.
References
==========
David A. Cox, John B. Little, Donal O'Shea. Ideals, Varieties and
Algorithms, 3rd edition, p. 230
"""
return GroebnerBasis(F, *gens, **args).is_zero_dimensional
@public
class GroebnerBasis(Basic):
"""Represents a reduced Groebner basis. """
def __new__(cls, F, *gens, **args):
"""Compute a reduced Groebner basis for a system of polynomials. """
options.allowed_flags(args, ['polys', 'method'])
try:
polys, opt = parallel_poly_from_expr(F, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('groebner', len(F), exc)
from sympy.polys.rings import PolyRing
ring = PolyRing(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
polys[i] = ring.from_dict(poly.rep.to_dict())
G = _groebner(polys, ring, method=opt.method)
G = [Poly._from_dict(g, opt) for g in G]
return cls._new(G, opt)
@classmethod
def _new(cls, basis, options):
obj = Basic.__new__(cls)
obj._basis = tuple(basis)
obj._options = options
return obj
@property
def args(self):
return (Tuple(*self._basis), Tuple(*self._options.gens))
@property
def exprs(self):
return [poly.as_expr() for poly in self._basis]
@property
def polys(self):
return list(self._basis)
@property
def gens(self):
return self._options.gens
@property
def domain(self):
return self._options.domain
@property
def order(self):
return self._options.order
def __len__(self):
return len(self._basis)
def __iter__(self):
if self._options.polys:
return iter(self.polys)
else:
return iter(self.exprs)
def __getitem__(self, item):
if self._options.polys:
basis = self.polys
else:
basis = self.exprs
return basis[item]
def __hash__(self):
return hash((self._basis, tuple(self._options.items())))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._basis == other._basis and self._options == other._options
elif iterable(other):
return self.polys == list(other) or self.exprs == list(other)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def is_zero_dimensional(self):
"""
Checks if the ideal generated by a Groebner basis is zero-dimensional.
The algorithm checks if the set of monomials not divisible by the
leading monomial of any element of ``F`` is bounded.
References
==========
David A. Cox, John B. Little, Donal O'Shea. Ideals, Varieties and
Algorithms, 3rd edition, p. 230
"""
def single_var(monomial):
return sum(map(bool, monomial)) == 1
exponents = Monomial([0]*len(self.gens))
order = self._options.order
for poly in self.polys:
monomial = poly.LM(order=order)
if single_var(monomial):
exponents *= monomial
# If any element of the exponents vector is zero, then there's
# a variable for which there's no degree bound and the ideal
# generated by this Groebner basis isn't zero-dimensional.
return all(exponents)
def fglm(self, order):
"""
Convert a Groebner basis from one ordering to another.
The FGLM algorithm converts reduced Groebner bases of zero-dimensional
ideals from one ordering to another. This method is often used when it
is infeasible to compute a Groebner basis with respect to a particular
ordering directly.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import groebner
>>> F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1]
>>> G = groebner(F, x, y, order='grlex')
>>> list(G.fglm('lex'))
[2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]
>>> list(groebner(F, x, y, order='lex'))
[2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]
References
==========
J.C. Faugere, P. Gianni, D. Lazard, T. Mora (1994). Efficient
Computation of Zero-dimensional Groebner Bases by Change of
Ordering
"""
opt = self._options
src_order = opt.order
dst_order = monomial_key(order)
if src_order == dst_order:
return self
if not self.is_zero_dimensional:
raise NotImplementedError("can't convert Groebner bases of ideals with positive dimension")
polys = list(self._basis)
domain = opt.domain
opt = opt.clone(dict(
domain=domain.get_field(),
order=dst_order,
))
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, src_order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
G = matrix_fglm(polys, _ring, dst_order)
G = [Poly._from_dict(dict(g), opt) for g in G]
if not domain.has_Field:
G = [g.clear_denoms(convert=True)[1] for g in G]
opt.domain = domain
return self._new(G, opt)
def reduce(self, expr, auto=True):
"""
Reduces a polynomial modulo a Groebner basis.
Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,
computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``
such that ``f = q_1*f_1 + ... + q_n*f_n + r``, where ``r`` vanishes or ``r``
is a completely reduced polynomial with respect to ``G``.
Examples
========
>>> from sympy import groebner, expand
>>> from sympy.abc import x, y
>>> f = 2*x**4 - x**2 + y**3 + y**2
>>> G = groebner([x**3 - x, y**3 - y])
>>> G.reduce(f)
([2*x, 1], x**2 + y**2 + y)
>>> Q, r = _
>>> expand(sum(q*g for q, g in zip(Q, G)) + r)
2*x**4 - x**2 + y**3 + y**2
>>> _ == f
True
"""
poly = Poly._from_expr(expr, self._options)
polys = [poly] + list(self._basis)
opt = self._options
domain = opt.domain
retract = False
if auto and domain.has_Ring and not domain.has_Field:
opt = opt.clone(dict(domain=domain.get_field()))
retract = True
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
Q, r = polys[0].div(polys[1:])
Q = [Poly._from_dict(dict(q), opt) for q in Q]
r = Poly._from_dict(dict(r), opt)
if retract:
try:
_Q, _r = [q.to_ring() for q in Q], r.to_ring()
except CoercionFailed:
pass
else:
Q, r = _Q, _r
if not opt.polys:
return [q.as_expr() for q in Q], r.as_expr()
else:
return Q, r
def contains(self, poly):
"""
Check if ``poly`` belongs the ideal generated by ``self``.
Examples
========
>>> from sympy import groebner
>>> from sympy.abc import x, y
>>> f = 2*x**3 + y**3 + 3*y
>>> G = groebner([x**2 + y**2 - 1, x*y - 2])
>>> G.contains(f)
True
>>> G.contains(f + 1)
False
"""
return self.reduce(poly)[1] == 0
@public
def poly(expr, *gens, **args):
"""
Efficiently transform an expression into a polynomial.
Examples
========
>>> from sympy import poly
>>> from sympy.abc import x
>>> poly(x*(x**2 + x - 1)**2)
Poly(x**5 + 2*x**4 - x**3 - 2*x**2 + x, x, domain='ZZ')
"""
options.allowed_flags(args, [])
def _poly(expr, opt):
terms, poly_terms = [], []
for term in Add.make_args(expr):
factors, poly_factors = [], []
for factor in Mul.make_args(term):
if factor.is_Add:
poly_factors.append(_poly(factor, opt))
elif factor.is_Pow and factor.base.is_Add and factor.exp.is_Integer:
poly_factors.append(
_poly(factor.base, opt).pow(factor.exp))
else:
factors.append(factor)
if not poly_factors:
terms.append(term)
else:
product = poly_factors[0]
for factor in poly_factors[1:]:
product = product.mul(factor)
if factors:
factor = Mul(*factors)
if factor.is_Number:
product = product.mul(factor)
else:
product = product.mul(Poly._from_expr(factor, opt))
poly_terms.append(product)
if not poly_terms:
result = Poly._from_expr(expr, opt)
else:
result = poly_terms[0]
for term in poly_terms[1:]:
result = result.add(term)
if terms:
term = Add(*terms)
if term.is_Number:
result = result.add(term)
else:
result = result.add(Poly._from_expr(term, opt))
return result.reorder(*opt.get('gens', ()), **args)
expr = sympify(expr)
if expr.is_Poly:
return Poly(expr, *gens, **args)
if 'expand' not in args:
args['expand'] = False
opt = options.build_options(gens, args)
return _poly(expr, opt)
|
souravsingh/sympy
|
sympy/polys/polytools.py
|
Python
|
bsd-3-clause
| 174,099
|
[
"Gaussian"
] |
e4426b2827545a1b3a48b5e6e852db7140b0c40c8915db1a67a571c7f60d8d58
|
# Copyright (c) 2011 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Documentation on PRESUBMIT.py can be found at:
# http://www.chromium.org/developers/how-tos/depottools/presubmit-scripts
import os
import subprocess
PYTHON = 'build_tools/python_wrapper'
_EXCLUDED_PATHS = (
# patch_configure.py contains long lines embedded in multi-line
# strings.
r"^build_tools[\\\/]patch_configure.py",
)
def RunPylint(input_api, output_api):
output = []
canned = input_api.canned_checks
disabled_warnings = [
'W0613', # Unused argument
]
black_list = list(input_api.DEFAULT_BLACK_LIST) + [
r'ports[\/\\]ipython-ppapi[\/\\]kernel\.py',
]
output.extend(canned.RunPylint(input_api, output_api, black_list=black_list,
disabled_warnings=disabled_warnings, extra_paths_list=['lib']))
return output
def RunCommand(name, cmd, input_api, output_api):
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
message = '%s failed.' % name
return [output_api.PresubmitError(message)]
return []
def RunPythonCommand(cmd, input_api, output_api):
return RunCommand(cmd[0], [PYTHON] + cmd, input_api, output_api)
def CheckPartioning(input_api, output_api):
return RunPythonCommand(['build_tools/partition.py', '--check'],
input_api,
output_api)
def CheckDeps(input_api, output_api):
return RunPythonCommand(['build_tools/check_deps.py'],
input_api,
output_api)
def CheckMirror(input_api, output_api):
return RunPythonCommand(['build_tools/update_mirror.py', '--check'],
input_api,
output_api)
def RunUnittests(input_api, output_api):
return RunCommand('unittests', ['make', 'test'], input_api, output_api)
# This check was copied from the chromium version.
# TODO(sbc): should we add this to canned_checks?
def CheckAuthorizedAuthor(input_api, output_api):
"""Verify the author's email address is in AUTHORS.
"""
import fnmatch
author = input_api.change.author_email
if not author:
input_api.logging.info('No author, skipping AUTHOR check')
return []
authors_path = input_api.os_path.join(
input_api.PresubmitLocalPath(), 'AUTHORS')
valid_authors = (
input_api.re.match(r'[^#]+\s+\<(.+?)\>\s*$', line)
for line in open(authors_path))
valid_authors = [item.group(1).lower() for item in valid_authors if item]
if not any(fnmatch.fnmatch(author.lower(), valid) for valid in valid_authors):
input_api.logging.info('Valid authors are %s', ', '.join(valid_authors))
return [output_api.PresubmitPromptWarning(
('%s is not in AUTHORS file. If you are a new contributor, please visit'
'\n'
'http://www.chromium.org/developers/contributing-code and read the '
'"Legal" section.\n') % author)]
return []
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(CheckAuthorizedAuthor(input_api, output_api))
report.extend(RunPylint(input_api, output_api))
report.extend(RunUnittests(input_api, output_api))
report.extend(CheckDeps(input_api, output_api))
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, project_name='Native Client',
excluded_paths=_EXCLUDED_PATHS))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
report.extend(CheckChangeOnUpload(input_api, output_api))
report.extend(CheckMirror(input_api, output_api))
report.extend(CheckPartioning(input_api, output_api))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
json_url='http://naclports-status.appspot.com/current?format=json'))
return report
TRYBOTS = [
'naclports-linux-glibc-0',
'naclports-linux-glibc-1',
'naclports-linux-glibc-2',
'naclports-linux-glibc-3',
'naclports-linux-glibc-4',
'naclports-linux-newlib-0',
'naclports-linux-newlib-1',
'naclports-linux-newlib-2',
'naclports-linux-newlib-3',
'naclports-linux-newlib-4',
'naclports-linux-pnacl-0',
'naclports-linux-pnacl-1',
'naclports-linux-pnacl-2',
'naclports-linux-pnacl-3',
'naclports-linux-pnacl-4',
'naclports-linux-clang-0',
'naclports-linux-clang-1',
'naclports-linux-clang-2',
'naclports-linux-clang-3',
'naclports-linux-clang-4',
'naclports-linux-emscripten-0',
]
def GetPreferredTryMasters(_, change):
return {
'tryserver.nacl': { t: set(['defaulttests']) for t in TRYBOTS },
}
|
yeyus/naclports
|
PRESUBMIT.py
|
Python
|
bsd-3-clause
| 4,690
|
[
"VisIt"
] |
ba97df838e0c0a356ced8fd36fb8fb1f8c732f5f673970034b7d3dbd9fd9e9f0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.