text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
Instructor Dashboard Views
"""
import logging
import datetime
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
import uuid
import pytz
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from edxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.http import Http404, HttpResponseServerError
from django.conf import settings
from util.json_request import JsonResponse
from mock import patch
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from openedx.core.lib.xblock_utils import wrap_xblock
from xmodule.html_module import HtmlDescriptor
from xmodule.modulestore.django import modulestore, SignalHandler
from xmodule.tabs import CourseTab
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from courseware.access import has_access
from courseware.courses import get_course_by_id, get_studio_url
from django_comment_client.utils import has_forum_access
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR
from student.models import CourseEnrollment
from shoppingcart.models import Coupon, PaidCourseRegistration, CourseRegCodeItem
from course_modes.models import CourseMode, CourseModesArchive
from student.roles import CourseFinanceAdminRole, CourseSalesAdminRole
from certificates.models import (
CertificateGenerationConfiguration,
CertificateWhitelist,
GeneratedCertificate,
CertificateStatuses,
CertificateGenerationHistory,
CertificateInvalidation,
)
from certificates import api as certs_api
from util.date_utils import get_default_time_display
from class_dashboard.dashboard_data import get_section_display_name, get_array_section_has_problem
from .tools import get_units_with_due_date, title_or_url, bulk_email_is_enabled_for_course
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from openedx.core.djangolib.markup import Text, HTML
log = logging.getLogger(__name__)
class InstructorDashboardTab(CourseTab):
"""
Defines the Instructor Dashboard view type that is shown as a course tab.
"""
type = "instructor"
title = ugettext_noop('Instructor')
view_name = "instructor_dashboard"
is_dynamic = True # The "Instructor" tab is instead dynamically added when it is enabled
@classmethod
def is_enabled(cls, course, user=None):
"""
Returns true if the specified user has staff access.
"""
return bool(user and has_access(user, 'staff', course, course.id))
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard_2(request, course_id):
""" Display the instructor dashboard for a course. """
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.error(u"Unable to find course with course key %s while loading the Instructor Dashboard.", course_id)
return HttpResponseServerError()
course = get_course_by_id(course_key, depth=0)
access = {
'admin': request.user.is_staff,
'instructor': bool(has_access(request.user, 'instructor', course)),
'finance_admin': CourseFinanceAdminRole(course_key).has_user(request.user),
'sales_admin': CourseSalesAdminRole(course_key).has_user(request.user),
'staff': bool(has_access(request.user, 'staff', course)),
'forum_admin': has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR),
}
if not access['staff']:
raise Http404()
is_white_label = CourseMode.is_white_label(course_key)
sections = [
_section_course_info(course, access),
_section_membership(course, access, is_white_label),
_section_cohort_management(course, access),
_section_student_admin(course, access),
_section_data_download(course, access),
]
analytics_dashboard_message = None
if settings.ANALYTICS_DASHBOARD_URL:
# Construct a URL to the external analytics dashboard
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = HTML("<a href=\"{}\" target=\"_blank\">").format(analytics_dashboard_url)
analytics_dashboard_message = _(
"To gain insights into student enrollment and participation {link_start}"
"visit {analytics_dashboard_name}, our new course analytics product{link_end}."
)
analytics_dashboard_message = Text(analytics_dashboard_message).format(
link_start=link_start, link_end=HTML("</a>"), analytics_dashboard_name=settings.ANALYTICS_DASHBOARD_NAME)
# Temporarily show the "Analytics" section until we have a better way of linking to Insights
sections.append(_section_analytics(course, access))
# Check if there is corresponding entry in the CourseMode Table related to the Instructor Dashboard course
course_mode_has_price = False
paid_modes = CourseMode.paid_modes_for_course(course_key)
if len(paid_modes) == 1:
course_mode_has_price = True
elif len(paid_modes) > 1:
log.error(
u"Course %s has %s course modes with payment options. Course must only have "
u"one paid course mode to enable eCommerce options.",
unicode(course_key), len(paid_modes)
)
if settings.FEATURES.get('INDIVIDUAL_DUE_DATES') and access['instructor']:
sections.insert(3, _section_extensions(course))
# Gate access to course email by feature flag & by course-specific authorization
if bulk_email_is_enabled_for_course(course_key):
sections.append(_section_send_email(course, access))
# Gate access to Metrics tab by featue flag and staff authorization
if settings.FEATURES['CLASS_DASHBOARD'] and access['staff']:
sections.append(_section_metrics(course, access))
# Gate access to Ecommerce tab
if access['finance_admin'] or access['sales_admin'] or access['admin']:
if course_mode_has_price:
paid_mode = paid_modes[0]
else:
paid_mode = CourseMode.DEFAULT_SHOPPINGCART_MODE
sections.append(_section_e_commerce(course, access, paid_mode, is_white_label, is_white_label))
# Gate access to Special Exam tab depending if either timed exams or proctored exams
# are enabled in the course
# NOTE: For now, if we only have procotred exams enabled, then only platform Staff
# (user.is_staff) will be able to view the special exams tab. This may
# change in the future
can_see_special_exams = (
((course.enable_proctored_exams and request.user.is_staff) or course.enable_timed_exams) and
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False)
)
if can_see_special_exams:
sections.append(_section_special_exams(course, access))
# Certificates panel
# This is used to generate example certificates
# and enable self-generated certificates for a course.
certs_enabled = CertificateGenerationConfiguration.current().enabled
if certs_enabled and (access['admin'] or access['instructor']):
sections.append(_section_certificates(course))
disable_buttons = not _is_small_course(course_key)
certificate_white_list = CertificateWhitelist.get_certificate_white_list(course_key)
generate_certificate_exceptions_url = reverse( # pylint: disable=invalid-name
'generate_certificate_exceptions',
kwargs={'course_id': unicode(course_key), 'generate_for': ''}
)
generate_bulk_certificate_exceptions_url = reverse( # pylint: disable=invalid-name
'generate_bulk_certificate_exceptions',
kwargs={'course_id': unicode(course_key)}
)
certificate_exception_view_url = reverse(
'certificate_exception_view',
kwargs={'course_id': unicode(course_key)}
)
certificate_invalidation_view_url = reverse( # pylint: disable=invalid-name
'certificate_invalidation_view',
kwargs={'course_id': unicode(course_key)}
)
certificate_invalidations = CertificateInvalidation.get_certificate_invalidations(course_key)
context = {
'course': course,
'studio_url': get_studio_url(course, 'course'),
'sections': sections,
'disable_buttons': disable_buttons,
'analytics_dashboard_message': analytics_dashboard_message,
'certificate_white_list': certificate_white_list,
'certificate_invalidations': certificate_invalidations,
'generate_certificate_exceptions_url': generate_certificate_exceptions_url,
'generate_bulk_certificate_exceptions_url': generate_bulk_certificate_exceptions_url,
'certificate_exception_view_url': certificate_exception_view_url,
'certificate_invalidation_view_url': certificate_invalidation_view_url,
}
return render_to_response('instructor/instructor_dashboard_2/instructor_dashboard_2.html', context)
## Section functions starting with _section return a dictionary of section data.
## The dictionary must include at least {
## 'section_key': 'circus_expo'
## 'section_display_name': 'Circus Expo'
## }
## section_key will be used as a css attribute, javascript tie-in, and template import filename.
## section_display_name will be used to generate link titles in the nav bar.
def _section_e_commerce(course, access, paid_mode, coupons_enabled, reports_enabled):
""" Provide data for the corresponding dashboard section """
course_key = course.id
coupons = Coupon.objects.filter(course_id=course_key).order_by('-is_active')
course_price = paid_mode.min_price
total_amount = None
if access['finance_admin']:
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_key)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_key)
total_amount = single_purchase_total + bulk_purchase_total
section_data = {
'section_key': 'e-commerce',
'section_display_name': _('E-Commerce'),
'access': access,
'course_id': unicode(course_key),
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'ajax_remove_coupon_url': reverse('remove_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_get_coupon_info': reverse('get_coupon_info', kwargs={'course_id': unicode(course_key)}),
'get_user_invoice_preference_url': reverse('get_user_invoice_preference', kwargs={'course_id': unicode(course_key)}),
'sale_validation_url': reverse('sale_validation', kwargs={'course_id': unicode(course_key)}),
'ajax_update_coupon': reverse('update_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_add_coupon': reverse('add_coupon', kwargs={'course_id': unicode(course_key)}),
'get_sale_records_url': reverse('get_sale_records', kwargs={'course_id': unicode(course_key)}),
'get_sale_order_records_url': reverse('get_sale_order_records', kwargs={'course_id': unicode(course_key)}),
'instructor_url': reverse('instructor_dashboard', kwargs={'course_id': unicode(course_key)}),
'get_registration_code_csv_url': reverse('get_registration_codes', kwargs={'course_id': unicode(course_key)}),
'generate_registration_code_csv_url': reverse('generate_registration_codes', kwargs={'course_id': unicode(course_key)}),
'active_registration_code_csv_url': reverse('active_registration_codes', kwargs={'course_id': unicode(course_key)}),
'spent_registration_code_csv_url': reverse('spent_registration_codes', kwargs={'course_id': unicode(course_key)}),
'set_course_mode_url': reverse('set_course_mode_price', kwargs={'course_id': unicode(course_key)}),
'download_coupon_codes_url': reverse('get_coupon_codes', kwargs={'course_id': unicode(course_key)}),
'enrollment_report_url': reverse('get_enrollment_report', kwargs={'course_id': unicode(course_key)}),
'exec_summary_report_url': reverse('get_exec_summary_report', kwargs={'course_id': unicode(course_key)}),
'list_financial_report_downloads_url': reverse('list_financial_report_downloads',
kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'look_up_registration_code': reverse('look_up_registration_code', kwargs={'course_id': unicode(course_key)}),
'coupons': coupons,
'sales_admin': access['sales_admin'],
'coupons_enabled': coupons_enabled,
'reports_enabled': reports_enabled,
'course_price': course_price,
'total_amount': total_amount
}
return section_data
def _section_special_exams(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'special_exams',
'section_display_name': _('Special Exams'),
'access': access,
'course_id': unicode(course_key)
}
return section_data
def _section_certificates(course):
"""Section information for the certificates panel.
The certificates panel allows global staff to generate
example certificates and enable self-generated certificates
for a course.
Arguments:
course (Course)
Returns:
dict
"""
example_cert_status = None
html_cert_enabled = certs_api.has_html_certificates_enabled(course.id, course)
if html_cert_enabled:
can_enable_for_course = True
else:
example_cert_status = certs_api.example_certificates_status(course.id)
# Allow the user to enable self-generated certificates for students
# *only* once a set of example certificates has been successfully generated.
# If certificates have been misconfigured for the course (for example, if
# the PDF template hasn't been uploaded yet), then we don't want
# to turn on self-generated certificates for students!
can_enable_for_course = (
example_cert_status is not None and
all(
cert_status['status'] == 'success'
for cert_status in example_cert_status
)
)
instructor_generation_enabled = settings.FEATURES.get('CERTIFICATES_INSTRUCTOR_GENERATION', False)
certificate_statuses_with_count = {
certificate['status']: certificate['count']
for certificate in GeneratedCertificate.get_unique_statuses(course_key=course.id)
}
return {
'section_key': 'certificates',
'section_display_name': _('Certificates'),
'example_certificate_status': example_cert_status,
'can_enable_for_course': can_enable_for_course,
'enabled_for_course': certs_api.cert_generation_enabled(course.id),
'is_self_paced': course.self_paced,
'instructor_generation_enabled': instructor_generation_enabled,
'html_cert_enabled': html_cert_enabled,
'active_certificate': certs_api.get_active_web_certificate(course),
'certificate_statuses_with_count': certificate_statuses_with_count,
'status': CertificateStatuses,
'certificate_generation_history':
CertificateGenerationHistory.objects.filter(course_id=course.id).order_by("-created"),
'urls': {
'generate_example_certificates': reverse(
'generate_example_certificates',
kwargs={'course_id': course.id}
),
'enable_certificate_generation': reverse(
'enable_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_generation': reverse(
'start_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_regeneration': reverse(
'start_certificate_regeneration',
kwargs={'course_id': course.id}
),
'list_instructor_tasks_url': reverse(
'list_instructor_tasks',
kwargs={'course_id': course.id}
),
}
}
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_POST
@login_required
def set_course_mode_price(request, course_id):
"""
set the new course price and add new entry in the CourseModesArchive Table
"""
try:
course_price = int(request.POST['course_price'])
except ValueError:
return JsonResponse(
{'message': _("Please Enter the numeric value for the course price")},
status=400) # status code 400: Bad Request
currency = request.POST['currency']
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course_honor_mode = CourseMode.objects.filter(mode_slug='honor', course_id=course_key)
if not course_honor_mode:
return JsonResponse(
{'message': _("CourseMode with the mode slug({mode_slug}) DoesNotExist").format(mode_slug='honor')},
status=400) # status code 400: Bad Request
CourseModesArchive.objects.create(
course_id=course_id, mode_slug='honor', mode_display_name='Honor Code Certificate',
min_price=course_honor_mode[0].min_price, currency=course_honor_mode[0].currency,
expiration_datetime=datetime.datetime.now(pytz.utc), expiration_date=datetime.date.today()
)
course_honor_mode.update(
min_price=course_price,
currency=currency
)
signal_handler = SignalHandler(modulestore())
signal_handler.send('course_published', course_key=course_key)
return JsonResponse({'message': _("CourseMode price updated successfully")})
def _section_course_info(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'course_info',
'section_display_name': _('Course Info'),
'access': access,
'course_id': course_key,
'course_display_name': course.display_name,
'has_started': course.has_started(),
'has_ended': course.has_ended(),
'start_date': get_default_time_display(course.start),
'end_date': get_default_time_display(course.end) or _('No end date set'),
'num_sections': len(course.children),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
}
if settings.FEATURES.get('DISPLAY_ANALYTICS_ENROLLMENTS'):
section_data['enrollment_count'] = CourseEnrollment.objects.enrollment_counts(course_key)
if settings.ANALYTICS_DASHBOARD_URL:
dashboard_link = _get_dashboard_link(course_key)
message = _("Enrollment data is now available in {dashboard_link}.").format(dashboard_link=dashboard_link)
section_data['enrollment_message'] = message
if settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'):
section_data['detailed_gitlogs_url'] = reverse('gitlogs_detail', kwargs={'course_id': unicode(course_key)})
try:
sorted_cutoffs = sorted(course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True)
advance = lambda memo, (letter, score): "{}: {}, ".format(letter, score) + memo
section_data['grade_cutoffs'] = reduce(advance, sorted_cutoffs, "")[:-2]
except Exception: # pylint: disable=broad-except
section_data['grade_cutoffs'] = "Not Available"
# section_data['offline_grades'] = offline_grades_available(course_key)
try:
section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]
except Exception: # pylint: disable=broad-except
section_data['course_errors'] = [('Error fetching errors', '')]
return section_data
def _section_membership(course, access, is_white_label):
""" Provide data for the corresponding dashboard section """
course_key = course.id
ccx_enabled = settings.FEATURES.get('CUSTOM_COURSES_EDX', False) and course.enable_ccx
section_data = {
'section_key': 'membership',
'section_display_name': _('Membership'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'is_white_label': is_white_label,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'upload_student_csv_button_url': reverse('register_and_enroll_students', kwargs={'course_id': unicode(course_key)}),
'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': unicode(course_key)}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': unicode(course_key)}),
'modify_access_url': reverse('modify_access', kwargs={'course_id': unicode(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': unicode(course_key)}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_cohort_management(course, access):
""" Provide data for the corresponding cohort management section """
course_key = course.id
section_data = {
'section_key': 'cohort_management',
'section_display_name': _('Cohorts'),
'access': access,
'course_cohort_settings_url': reverse(
'course_cohort_settings',
kwargs={'course_key_string': unicode(course_key)}
),
'cohorts_url': reverse('cohorts', kwargs={'course_key_string': unicode(course_key)}),
'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': unicode(course_key)}),
'discussion_topics_url': reverse('cohort_discussion_topics', kwargs={'course_key_string': unicode(course_key)}),
'verified_track_cohorting_url': reverse(
'verified_track_cohorting', kwargs={'course_key_string': unicode(course_key)}
),
}
return section_data
def _is_small_course(course_key):
""" Compares against MAX_ENROLLMENT_INSTR_BUTTONS to determine if course enrollment is considered small. """
is_small_course = False
enrollment_count = CourseEnrollment.objects.num_enrolled_in(course_key)
max_enrollment_for_buttons = settings.FEATURES.get("MAX_ENROLLMENT_INSTR_BUTTONS")
if max_enrollment_for_buttons is not None:
is_small_course = enrollment_count <= max_enrollment_for_buttons
return is_small_course
def _section_student_admin(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
is_small_course = _is_small_course(course_key)
section_data = {
'section_key': 'student_admin',
'section_display_name': _('Student Admin'),
'access': access,
'is_small_course': is_small_course,
'get_student_progress_url_url': reverse('get_student_progress_url', kwargs={'course_id': unicode(course_key)}),
'enrollment_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_url': reverse('reset_student_attempts', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_for_entrance_exam_url': reverse(
'reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'rescore_problem_url': reverse('rescore_problem', kwargs={'course_id': unicode(course_key)}),
'rescore_entrance_exam_url': reverse('rescore_entrance_exam', kwargs={'course_id': unicode(course_key)}),
'student_can_skip_entrance_exam_url': reverse(
'mark_student_can_skip_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_entrace_exam_instructor_tasks_url': reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(course_key)}),
'spoc_gradebook_url': reverse('spoc_gradebook', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_extensions(course):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'extensions',
'section_display_name': _('Extensions'),
'units_with_due_dates': [(title_or_url(unit), unicode(unit.location))
for unit in get_units_with_due_date(course)],
'change_due_date_url': reverse('change_due_date', kwargs={'course_id': unicode(course.id)}),
'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': unicode(course.id)}),
'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': unicode(course.id)}),
'show_student_extensions_url': reverse('show_student_extensions', kwargs={'course_id': unicode(course.id)}),
}
return section_data
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
show_proctored_report_button = (
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False) and
course.enable_proctored_exams
)
section_data = {
'section_key': 'data_download',
'section_display_name': _('Data Download'),
'access': access,
'show_generate_proctored_exam_report_button': show_proctored_report_button,
'get_problem_responses_url': reverse('get_problem_responses', kwargs={'course_id': unicode(course_key)}),
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': unicode(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': unicode(course_key)}),
'get_issued_certificates_url': reverse(
'get_issued_certificates', kwargs={'course_id': unicode(course_key)}
),
'get_students_who_may_enroll_url': reverse(
'get_students_who_may_enroll', kwargs={'course_id': unicode(course_key)}
),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': unicode(course_key)}),
'list_proctored_results_url': reverse('get_proctored_exam_results', kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': unicode(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': unicode(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': unicode(course_key)}),
'course_has_survey': True if course.course_survey_name else False,
'course_survey_results_url': reverse('get_course_survey_results', kwargs={'course_id': unicode(course_key)}),
'export_ora2_data_url': reverse('export_ora2_data', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def null_applicable_aside_types(block): # pylint: disable=unused-argument
"""
get_aside method for monkey-patching into applicable_aside_types
while rendering an HtmlDescriptor for email text editing. This returns
an empty list.
"""
return []
def _section_send_email(course, access):
""" Provide data for the corresponding bulk email section """
course_key = course.id
# Monkey-patch applicable_aside_types to return no asides for the duration of this render
with patch.object(course.runtime, 'applicable_aside_types', null_applicable_aside_types):
# This HtmlDescriptor is only being used to generate a nice text editor.
html_module = HtmlDescriptor(
course.system,
DictFieldData({'data': ''}),
ScopeIds(None, None, None, course_key.make_usage_key('html', 'fake'))
)
fragment = course.system.render(html_module, 'studio_view')
fragment = wrap_xblock(
'LmsRuntime', html_module, 'studio_view', fragment, None,
extra_data={"course-id": unicode(course_key)},
usage_id_serializer=lambda usage_id: quote_slashes(unicode(usage_id)),
# Generate a new request_token here at random, because this module isn't connected to any other
# xblock rendering.
request_token=uuid.uuid1().get_hex()
)
email_editor = fragment.content
section_data = {
'section_key': 'send_email',
'section_display_name': _('Email'),
'access': access,
'send_email': reverse('send_email', kwargs={'course_id': unicode(course_key)}),
'editor': email_editor,
'list_instructor_tasks_url': reverse(
'list_instructor_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_background_tasks_url': reverse(
'list_background_email_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_content_history_url': reverse(
'list_email_content', kwargs={'course_id': unicode(course_key)}
),
}
return section_data
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link = HTML(u"<a href=\"{0}\" target=\"_blank\">{1}</a>").format(
analytics_dashboard_url, settings.ANALYTICS_DASHBOARD_NAME
)
return link
def _section_analytics(course, access):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'instructor_analytics',
'section_display_name': _('Analytics'),
'access': access,
'course_id': unicode(course.id),
}
return section_data
def _section_metrics(course, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'metrics',
'section_display_name': _('Metrics'),
'access': access,
'course_id': unicode(course_key),
'sub_section_display_name': get_section_display_name(course_key),
'section_has_problem': get_array_section_has_problem(course_key),
'get_students_opened_subsection_url': reverse('get_students_opened_subsection'),
'get_students_problem_grades_url': reverse('get_students_problem_grades'),
'post_metrics_data_csv_url': reverse('post_metrics_data_csv'),
}
return section_data
|
10clouds/edx-platform
|
lms/djangoapps/instructor/views/instructor_dashboard.py
|
Python
|
agpl-3.0
| 31,218
|
[
"VisIt"
] |
2f9adfb14409ce2dd9e2fe7d01784af7596b758c9f2fc7758c4e610b4af6a8c1
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
from ..base import components, LatexRenderer, HTMLRenderer, MarkdownReader
from ..tree import tokens, html, latex
from . import command, materialicon
def make_extension(**kwargs):
return AlertExtension(**kwargs)
AlertToken = tokens.newToken('AlertToken', brand='')
AlertTitle = tokens.newToken('AlertTitle', brand='', prefix=True, center=False, icon=True, icon_name=None)
AlertContent = tokens.newToken('AlertContent')
# LaTeX alert environment that uses tcolorbox package
ALERT_LATEX = """\\setlength\\intextsep{0pt}
\\NewDocumentEnvironment{alert}{O{#2}moO{white}}{%
\\ifthenelse{\\isempty{#1}}{%
\\IfValueT{#3}{\\tcbset{title=#3}}
}{%
\\tcbset{title=\\MakeUppercase{#1}\\IfValueT{#3}{: #3}}
}
\\begin{tcolorbox}[arc=0mm,fonttitle=\\bfseries,colback=alert-#2!5,colframe=alert-#2,coltitle=#4]
}{%
\\end{tcolorbox}
}
"""
class AlertExtension(command.CommandExtension):
"""
Adds alert boxes (note, tip, error, warning, and construction) to display important information.
"""
@staticmethod
def defaultConfig():
config = command.CommandExtension.defaultConfig()
config['use-title-prefix'] = (True, "Enable/disable including the brand (e.g., ERROR) as " \
"prefix for the alert title.")
return config
def extend(self, reader, renderer):
self.requires(command)
self.addCommand(reader, AlertCommand())
renderer.add('AlertToken', RenderAlertToken())
renderer.add('AlertTitle', RenderAlertTitle())
renderer.add('AlertContent', RenderAlertContent())
if isinstance(renderer, LatexRenderer):
renderer.addPackage('xcolor')
renderer.addPackage('xparse')
renderer.addPackage('xifthen')
renderer.addPackage('tcolorbox')
renderer.addPackage('wrapfig')
renderer.addPackage('graphicx')
renderer.addPreamble('\\definecolor{alert-error}{RGB}{153,0,0}')
renderer.addPreamble('\\definecolor{alert-note}{RGB}{0,88,151}')
renderer.addPreamble('\\definecolor{alert-warning}{RGB}{220,200,100}')
renderer.addPreamble('\\definecolor{alert-info}{RGB}{0,128,21}')
renderer.addPreamble('\\definecolor{alert-construction}{RGB}{255,114,33}')
renderer.addPreamble(ALERT_LATEX)
if isinstance(renderer, HTMLRenderer):
renderer.addCSS('alert_moose', "css/alert_moose.css")
class AlertCommand(command.CommandComponent):
COMMAND = 'alert'
SUBCOMMAND = ('error', 'warning', 'note', 'tip', 'construction')
@staticmethod
def defaultSettings():
settings = command.CommandComponent.defaultSettings()
settings['title'] = (None, "The optional alert title.")
settings['center-title'] = (False, "Center the title.")
settings['prefix'] = (None, "Enable/disable the title being prefixed with the alert brand.")
settings['icon'] = (True, "Enable/disable the title icon.")
settings['icon-name'] = (None, "Set the icon name, see material icon for available options.")
return settings
def createToken(self, parent, info, page):
title = self.settings.pop('title', None)
brand = info['subcommand']
icon_name = self.settings['icon-name']
if icon_name is None:
if brand == 'note':
icon_name = 'comment'
elif brand == 'construction':
icon_name = 'build'
elif brand == 'tip':
icon_name = 'school'
elif brand == 'error':
icon_name = 'report'
else:
icon_name = brand
if self.settings['prefix'] is not None:
prefix = self.settings['prefix']
else:
prefix = self.extension.get('use-title-prefix', True)
alert_token = AlertToken(parent, brand=brand)
title_token = AlertTitle(alert_token, prefix=prefix, brand=brand,
icon=self.settings['icon'],
icon_name=icon_name,
center=self.settings['center-title'])
if title:
self.reader.tokenize(title_token, title, page, MarkdownReader.INLINE)
return AlertContent(alert_token, brand=brand)
class RenderAlertToken(components.RenderComponent):
def createHTML(self, parent, token, page):
div = html.Tag(parent, 'div', class_='moose-alert moose-alert-{}'.format(token['brand']))
content = html.Tag(div, 'div', class_='moose-alert-content')
return content
def createMaterialize(self, parent, token, page):
return html.Tag(parent, 'div',
class_='card moose-alert moose-alert-{}'.format(token['brand']))
def createLatex(self, parent, token, page):
# Argument list (see ALERT above)
args = []
if token(0)['prefix']:
args.append(latex.Bracket(string=token['brand']))
else:
args.append(latex.Bracket())
args.append(latex.Brace(string=token['brand']))
if token(0).children:
title = latex.Bracket()
self.renderer.render(title, token(0), page)
args.append(title)
env = latex.Environment(parent, 'alert', args=args)
token(0).parent = None
return env
class RenderAlertContent(components.RenderComponent):
def createHTML(self, parent, token, page):
return html.Tag(parent, 'p')
def createMaterialize(self, parent, token, page):
card_content = html.Tag(parent, 'div', class_='card-content')
content = html.Tag(card_content, 'div', class_='moose-alert-content')
return html.Tag(content, 'p')
def createLatex(self, parent, token, page):
return parent
class RenderAlertTitle(components.RenderComponent):
def createHTML(self, parent, token, page):
return html.Tag(parent, 'p')
def createMaterialize(self, parent, token, page):
title = html.Tag(parent, 'div', class_='card-title moose-alert-title')
if token.get('icon'):
i = html.Tag(title, 'i', token, string=token['icon_name'])
i.addClass('material-icons')
i.addClass('moose-inline-icon')
if token.get('prefix'):
brand = token['brand']
prefix = html.Tag(title, 'span', string=brand, class_='moose-alert-title-brand')
if token.children:
html.String(prefix, content=':')
if token.get('center'):
title.addClass('center')
return title
def createLatex(self, parent, token, page):
return parent
|
harterj/moose
|
python/MooseDocs/extensions/alert.py
|
Python
|
lgpl-2.1
| 7,034
|
[
"MOOSE"
] |
3d646523df50591d53d8fba417e6146aa55e5468daf9867b13707c25859b2142
|
# -*- coding: utf-8 -*-
#
# __init__.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
r"""PyNEST - Python interface for the NEST simulator
* ``nest.helpdesk()`` opens the NEST documentation in your browser.
* ``nest.__version__`` displays the NEST version.
* ``nest.Models()`` shows all available neuron, device and synapse models.
* ``nest.help('model_name') displays help for the given model, e.g., ``nest.help('iaf_psc_exp')``
* To get help on functions in the ``nest`` package, use Python's ``help()`` function
or IPython's ``?``, e.g.
- ``help(nest.Create)``
- ``nest.Connect?``
For more information visit https://www.nest-simulator.org.
"""
# Store interpreter-given module attributes to copy into replacement module
# instance later on. Use `.copy()` to prevent pollution with other variables
_original_module_attrs = globals().copy()
from .ll_api import KernelAttribute # noqa
import sys # noqa
import types # noqa
import importlib # noqa
try:
import versionchecker
except ImportError:
pass
def _rel_import_star(module, import_module_name):
"""Emulates `from X import *` into `module`"""
imported = importlib.import_module(import_module_name, __name__)
imp_iter = vars(imported).items()
if hasattr(module, "__all__"):
# If a public api is defined using the `__all__` attribute, copy that.
module.update(kv for kv in imp_iter if kv[0] in imported.__all__)
else:
# Otherwise follow "underscore is private" convention.
module.update(kv for kv in imp_iter if not kv[0].startswith("_"))
def _lazy_module_property(module_name):
"""
Returns a property that lazy loads a module and substitutes itself with it.
The class variable name must match given `module_name`::
class ModuleClass(types.ModuleType):
lazy_module_xy = _lazy_module_property("lazy_module_xy")
"""
def lazy_loader(self):
cls = type(self)
delattr(cls, module_name)
module = importlib.import_module("." + module_name, __name__)
setattr(cls, module_name, module)
return module
return property(lazy_loader)
class NestModule(types.ModuleType):
"""
A module class for the `nest` root module to control the dynamic generation
of module level attributes such as the KernelAttributes and lazy loading
some submodules.
"""
from . import ll_api # noqa
from .ll_api import set_communicator # noqa
from . import pynestkernel as kernel # noqa
from . import random # noqa
from . import math # noqa
from . import spatial_distributions # noqa
from . import logic # noqa
try:
from . import server # noqa
except ImportError:
pass
__version__ = ll_api.sli_func("statusdict /version get")
# Lazy load the `spatial` module to avoid circular imports.
spatial = _lazy_module_property("spatial")
# Define the kernel attributes.
#
# FORMATTING NOTES:
# * Multiline strings render incorrectly, join multiple single-quote
# strings instead.
# * Strings containing `:` render incorrectly.
# * Do not end docstrings with punctuation. A `.` or `,` is added by the
# formatting logic.
kernel_status = KernelAttribute(
"dict", "Get the complete kernel status", readonly=True
)
resolution = KernelAttribute(
"float", "The resolution of the simulation (in ms)", default=0.1
)
biological_time = KernelAttribute(
"float", "The current simulation time (in ms)"
)
to_do = KernelAttribute(
"int", "The number of steps yet to be simulated", readonly=True
)
max_delay = KernelAttribute(
"float", "The maximum delay in the network", default=0.1
)
min_delay = KernelAttribute(
"float", "The minimum delay in the network", default=0.1
)
ms_per_tic = KernelAttribute(
"float", "The number of milliseconds per tic", default=0.001
)
tics_per_ms = KernelAttribute(
"float", "The number of tics per millisecond", default=1000.0
)
tics_per_step = KernelAttribute(
"int", "The number of tics per simulation time step", default=100
)
T_max = KernelAttribute(
"float", "The largest representable time value", readonly=True
)
T_min = KernelAttribute(
"float", "The smallest representable time value", readonly=True
)
rng_types = KernelAttribute(
"list[str]",
"List of available random number generator types",
readonly=True,
)
rng_type = KernelAttribute(
"str",
"Name of random number generator type used by NEST",
default="mt19937_64",
)
rng_seed = KernelAttribute(
"int",
(
"Seed value used as base for seeding NEST random number generators "
+ r"(:math:`1 \leq s\leq 2^{32}-1`)"
),
default=143202461,
)
total_num_virtual_procs = KernelAttribute(
"int", "The total number of virtual processes", default=1
)
local_num_threads = KernelAttribute(
"int", "The local number of threads", default=1
)
num_processes = KernelAttribute(
"int", "The number of MPI processes", readonly=True
)
off_grid_spiking = KernelAttribute(
"bool",
"Whether to transmit precise spike times in MPI communication",
readonly=True,
)
adaptive_spike_buffers = KernelAttribute(
"bool",
"Whether MPI buffers for communication of spikes resize on the fly",
default=True,
)
adaptive_target_buffers = KernelAttribute(
"bool",
"Whether MPI buffers for communication of connections resize on the fly",
default=True,
)
buffer_size_secondary_events = KernelAttribute(
"int",
(
"Size of MPI buffers for communicating secondary events "
+ "(in bytes, per MPI rank, for developers)"
),
readonly=True,
)
buffer_size_spike_data = KernelAttribute(
"int",
"Total size of MPI buffer for communication of spikes",
default=2,
)
buffer_size_target_data = KernelAttribute(
"int",
"Total size of MPI buffer for communication of connections",
default=2,
)
growth_factor_buffer_spike_data = KernelAttribute(
"float",
(
"If MPI buffers for communication of spikes resize on the fly, "
+ "grow them by this factor each round"
),
default=1.5,
)
growth_factor_buffer_target_data = KernelAttribute(
"float",
(
"If MPI buffers for communication of connections resize on the "
+ "fly, grow them by this factor each round"
),
default=1.5,
)
max_buffer_size_spike_data = KernelAttribute(
"int",
"Maximal size of MPI buffers for communication of spikes",
default=8388608,
)
max_buffer_size_target_data = KernelAttribute(
"int",
"Maximal size of MPI buffers for communication of connections",
default=16777216,
)
use_wfr = KernelAttribute(
"bool", "Whether to use waveform relaxation method", default=True
)
wfr_comm_interval = KernelAttribute(
"float",
"Desired waveform relaxation communication interval",
default=1.0,
)
wfr_tol = KernelAttribute(
"float",
"Convergence tolerance of waveform relaxation method",
default=0.0001,
)
wfr_max_iterations = KernelAttribute(
"int",
"Maximal number of iterations used for waveform relaxation",
default=15,
)
wfr_interpolation_order = KernelAttribute(
"int",
"Interpolation order of polynomial used in wfr iterations",
default=3
)
max_num_syn_models = KernelAttribute(
"int", "Maximal number of synapse models supported", readonly=True
)
sort_connections_by_source = KernelAttribute(
"bool",
(
"Whether to sort connections by their source; increases"
+ " construction time of presynaptic data structures, decreases"
+ " simulation time if the average number of outgoing connections"
+ " per neuron is smaller than the total number of threads"
),
default=True,
)
structural_plasticity_synapses = KernelAttribute(
"dict",
(
"Defines all synapses which are plastic for the structural"
+ " plasticity algorithm. Each entry in the dictionary is composed"
+ " of a synapse model, the presynaptic element and the"
+ " postsynaptic element"
),
)
structural_plasticity_update_interval = KernelAttribute(
"int",
(
"Defines the time interval in ms at which the structural plasticity"
+ " manager will make changes in the structure of the network ("
+ " creation and deletion of plastic synapses)"
),
default=10000.0,
)
use_compressed_spikes = KernelAttribute(
"bool",
(
"Whether to use spike compression; if a neuron has targets on"
+ " multiple threads of a process, this switch makes sure that only"
+ " a single packet is sent to the process instead of one packet"
+ " per target thread; requires"
+ " ``nest.sort_connections_by_source = True``"
),
default=True,
)
data_path = KernelAttribute(
"str",
"A path, where all data is written to, defaults to current directory",
)
data_prefix = KernelAttribute("str", "A common prefix for all data files")
overwrite_files = KernelAttribute(
"bool", "Whether to overwrite existing data files", default=False
)
print_time = KernelAttribute(
"bool",
"Whether to print progress information during the simulation",
default=False,
)
network_size = KernelAttribute(
"int", "The number of nodes in the network", readonly=True
)
num_connections = KernelAttribute(
"int",
"The number of connections in the network",
readonly=True,
localonly=True,
)
local_spike_counter = KernelAttribute(
"int",
(
"Number of spikes fired by neurons on a given MPI rank during the"
+ " most recent call to :py:func:`.Simulate`. Only spikes from"
+ " \"normal\" neurons are counted, not spikes generated by devices"
+ " such as ``poisson_generator``"
),
readonly=True,
)
recording_backends = KernelAttribute(
"dict[str, dict]",
(
"Dict of backends for recording devices. Each recording backend can"
+ " have a set of global parameters that can be modified through"
+ " this attribute by passing a dictionary with the name of the"
+ " recording backend as key and a dictionary with the global"
+ " parameters to be overwritten as value.\n\n"
+ "Example\n"
+ "~~~~~~~\n\n"
+ "Please note that NEST must be compiled with SionLIB for the"
+ " ``sionlib`` backend to be available.\n\n"
+ ".. code-block:: python\n\n"
+ " nest.recording_backends = dict(sionlib=dict(buffer_size=1024))"
+ "\n\n"
+ ".. seealso:: The valid global parameters are listed in the"
+ " documentation of each recording backend"
),
)
dict_miss_is_error = KernelAttribute(
"bool",
"Whether missed dictionary entries are treated as errors",
default=True,
)
keep_source_table = KernelAttribute(
"bool",
"Whether to keep source table after connection setup is complete",
default=True,
)
min_update_time = KernelAttribute(
"float",
"Shortest wall-clock time measured so far for a full update step [seconds]",
readonly=True,
)
max_update_time = KernelAttribute(
"float",
"Longest wall-clock time measured so far for a full update step [seconds]",
readonly=True,
)
update_time_limit = KernelAttribute(
"float",
(
"Maximum wall-clock time for one full update step [seconds]."
+ " This can be used to terminate simulations that slow down"
+ " significantly. Simulations may still get stuck if the slowdown"
+ " occurs within a single update step"
),
default=float("+inf"),
)
_kernel_attr_names = set(
k for k, v in vars().items() if isinstance(v, KernelAttribute)
)
_readonly_kernel_attrs = set(
k for k, v in vars().items() if isinstance(v, KernelAttribute) and v._readonly
)
def set(self, **kwargs):
return self.SetKernelStatus(kwargs)
def get(self, *args):
if len(args) == 0:
return self.GetKernelStatus()
if len(args) == 1:
return self.GetKernelStatus(args[0])
else:
return self.GetKernelStatus(args)
def __dir__(self):
return list(set(vars(self).keys()) | set(self.__all__))
# Instantiate a NestModule to replace the nest Python module. Based on
# https://mail.python.org/pipermail/python-ideas/2012-May/014969.html
_module = NestModule(__name__)
# We manipulate the nest module instance through its `__dict__` (= vars())
_module_dict = vars(_module)
# Copy over the original module attributes to preverse all interpreter given
# magic attributes such as `__name__`, `__path__`, `__package__`, ...
_module_dict.update(_original_module_attrs)
# Import public API of `.hl_api` into the nest module instance
_rel_import_star(_module_dict, ".hl_api")
# Finalize the nest module instance by generating its public API.
_api = list(k for k in _module_dict if not k.startswith("_"))
_api.extend(k for k in dir(NestModule) if not k.startswith("_"))
_module.__all__ = list(set(_api))
# Set the nest module object as the return value of `import nest` using sys
sys.modules[__name__] = _module
# Some compiled/binary components (`pynestkernel.pyx` for example) of NEST
# obtain a reference to this file's original module object instead of what's in
# `sys.modules`. For these edge cases we make available all attributes of the
# nest module instance to this file's module object.
globals().update(_module_dict)
# Clean up obsolete references
del _rel_import_star, _lazy_module_property, _module, _module_dict, \
_original_module_attrs
|
niltonlk/nest-simulator
|
pynest/nest/__init__.py
|
Python
|
gpl-2.0
| 15,517
|
[
"NEURON",
"VisIt"
] |
eef9a0775d575ad743b132754aeac385e2565a40a3d4dab1aa924675fd89cb61
|
#
# Copyright (C) 2002-2008 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
import os.path
import unittest
from rdkit import Chem
from rdkit import RDConfig
from rdkit.Chem import ChemicalFeatures
from rdkit.Chem.Pharm2D import Generate, SigFactory, Matcher, Gobbi_Pharm2D
from rdkit.TestRunner import redirect_stdout
from io import StringIO
class TestCase(unittest.TestCase):
def setUp(self):
fdefFile = os.path.join(RDConfig.RDCodeDir, 'Chem', 'Pharm2D',
'test_data', 'BaseFeatures.fdef')
featFactory = ChemicalFeatures.BuildFeatureFactory(fdefFile)
self.factory = SigFactory.SigFactory(featFactory, minPointCount=2, maxPointCount=3)
self.factory.SetBins([(0, 2), (2, 5), (5, 8)])
self.factory.Init()
def test1_simple(self):
mol = Chem.MolFromSmiles('OCC(=O)CCCN')
self.factory.skipFeats = ['Donor']
self.factory.Init()
self.assertEqual(self.factory.GetSigSize(), 510)
Generate._verbose = False
sig = Generate.Gen2DFingerprint(mol, self.factory)
Generate._verbose = False
tgt = (1, 2, 11, 52, 117)
onBits = sig.GetOnBits()
self.assertEqual(tuple(onBits), tgt)
self.assertEqual(len(onBits), len(tgt))
bitMatches = ([((0, ), (3, ))],
[((0, ), (7, )), ((3, ), (7, ))],
[((0, ), (3, ), (7, ))], )
for i, bit in enumerate(onBits):
matches = Matcher.GetAtomsMatchingBit(self.factory, bit, mol)
# print bit,matches
# tgt = bitMatches[i]
# self.assertEqual(matches,tgt)
def test2Bug28(self):
smi = r'Cc([s]1)nnc1SCC(\CS2)=C(/C([O-])=O)N3C(=O)[C@H]([C@@H]23)NC(=O)C[n]4cnnn4'
mol = Chem.MolFromSmiles(smi)
factory = Gobbi_Pharm2D.factory
factory.SetBins([(2, 3), (3, 4), (4, 5), (5, 8), (8, 100)])
sig = Generate.Gen2DFingerprint(mol, factory)
onBits = sig.GetOnBits()
for bit in onBits:
atoms = Matcher.GetAtomsMatchingBit(factory, bit, mol, justOne=1)
self.assertTrue(len(atoms))
def test3Roundtrip(self):
# longer-running Bug 28 test
nToDo = 20
with open(os.path.join(RDConfig.RDDataDir, 'NCI', 'first_5K.smi'), 'r') as inF:
inD = inF.readlines()[:nToDo]
factory = Gobbi_Pharm2D.factory
factory.SetBins([(2, 3), (3, 4), (4, 5), (5, 8), (8, 100)])
for line in inD:
smi = line.split('\t')[0]
mol = Chem.MolFromSmiles(smi)
sig = Generate.Gen2DFingerprint(mol, factory)
onBits = sig.GetOnBits()
for bit in onBits:
atoms = Matcher.GetAtomsMatchingBit(factory, bit, mol, justOne=1)
assert len(atoms), 'bit %d failed to match for smi %s' % (bit, smi)
def test_exampleCode(self):
# We make sure that the example code runs
f = StringIO()
with redirect_stdout(f):
Matcher._exampleCode()
self.assertIn('finished', f.getvalue())
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
greglandrum/rdkit
|
rdkit/Chem/Pharm2D/UnitTestMatcher.py
|
Python
|
bsd-3-clause
| 3,370
|
[
"RDKit"
] |
8ba5fdb763dd6d267f8a9e606b26def76668b23a5e9a4cefbc5ba1bc677f1664
|
#!/usr/bin/env python3
# ver 0.1 - copy from massf_1d.py (v1.1) and modify codes on 1/22/2018
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='partial rdf(s) at interfaces using various domain info. from domain.py')
## args
parser.add_argument('-i', '--input', default='traj.trr', nargs='?',
help='input trajectory file')
parser.add_argument('-s', '--structure', default='topol.tpr', nargs='?',
help='.tpr structure file')
parser.add_argument('-select1', '--select1', nargs='?',
help='a file1 with a command-line for select_atoms in MDAnalysis')
parser.add_argument('-select2', '--select2', nargs='?',
help='a file2 with a command-line for select_atoms in MDAnalysis')
parser.add_argument('-nbin', '--nbin', nargs='?', type=int,
help='number of bins when you did conv. alignmnet')
parser.add_argument('-d', '--domain', default='a.massf.domain', nargs='?',
help='.npz file from domain.py')
parser.add_argument('-di', '--domain_i', default='a.massf.domain.dic', nargs='?',
help='input text file for determining domain.py')
parser.add_argument('-hnbin', '--hist_nbin', default=20, nargs='?', type=int,
help='number of bins for rdfs')
parser.add_argument('-hmax', '--hist_max', default=-1.0, nargs='?', type=float,
help='the maximum distance for rdf (if negative, use half box_x)')
parser.add_argument('-temp', '--temp', default=20, nargs='?', type=int,
help='generate tmp file to save intermediate data')
parser.add_argument('-o', '--output', default='.rdf', nargs='?',
help='output prefix for rdf files')
parser.add_argument('args', nargs=argparse.REMAINDER)
parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.1')
## read args
args = parser.parse_args()
## Check arguments for log
print(" input arguments: {0}".format(args))
## import modules
import sys
sys.path.append('/home/htjung/Utility/python/')
import hjung
from hjung import *
import numpy as np
import math
import multiprocessing as mp
#from multiprocessing import Pool
#import os
#import resource
import time
import copy # copy array
import ast # reading a file of dictionary list
from scipy.spatial.distance import cdist
# default for args
odomain = args.domain + args.output
idomain = args.domain + '.npy'
## timer
start_proc, start_prof = hjung.time.init()
#if args.n_proc < 0:
# args.n_proc = mp.cpu_count() - 1
#if args.n_proc > mp.cpu_count():
# raise ValueError(" args.n_proc is greater than number of cpus in this node")
## load domain data files
domain_info = np.load(idomain)
n_frames = len(domain_info)
#if n_frames != len(domain_info):
# raise ValueError(" Wrong trajectory length. Check if your trajectory file is changed.")
iframes_list = domain_info[:,0]
align_shift = domain_info[:,2]
step_up_down = np.zeros((n_frames,2))
step_up_down[:,0] = domain_info[:,3]
step_up_down[:,1] = domain_info[:,4]
domain_size = domain_info[:,5]
def reduce_traj_output(file_gro, file_xtc, file_select1, file_select2, list_frames):
coordinates1, coordinates2, unit_cells = hjung.io.read_trr_3d_select2(file_gro, file_xtc, file_select1, file_select2, 'pos')
unit_cells = hjung.array.convert_unitcell_3d(unit_cells, file_gro, file_xtc)
return coordinates1[list_frames], coordinates2[list_frames], unit_cells[list_frames]
# read a topology and a trajectory using module MDAnalysis with selection
coordinates1, coordinates2, unit_cells = reduce_traj_output(args.structure, args.input, args.select1, args.select2, iframes_list)
# read domain_dic
with open(args.domain_i,'r') as file_dic:
dic_domain = ast.literal_eval(file_dic.read())
print(" Here is dic_domain: {}".format(dic_domain))
n_rdfs = int(np.amax(np.array(list(dic_domain.values())))) + 1
# interface position calculation
itf = np.zeros(2) # [0]: (down -> up) massf interface, [1]: (up -> down) massf interface
itf[0] = int(args.nbin/4.0)
itf[1] = int(args.nbin*3.0/4.0)
def select_atoms_indices_1d(coord,r_min,r_max):
right_indice_atoms = np.where(coord > r_min)
left_indice_atoms = np.where(coord < r_max)
return np.intersect1d(right_indice_atoms,left_indice_atoms)
# initialize processing bar
# output: process tiem and wall time
# Example: mod_frame = process_init()
def process_init():
print("io.process_init: ")
return 10 # initial mode number = 10
# print process bar
# input: itime, current time
# ftime, final time
# mod_time, frequency of printing.
# output: mod_time, new or old printing frequency
# Example: mod_frame = process_print(itime+1, ftime, mod_frame)
def process_print(itime, ftime, mod_time):
if itime%mod_time == 0:
print("... {0} th frame reading ({1:.0%}) ...".format(itime,itime/ftime))
if (itime/mod_time)%10 == 0:
mod_time = mod_time*10
return mod_time
# save rdf files
def save_rdf(rdf,n_rdfs,bin_edges,count_frames,odomain,prefix):
rdf_tmp = copy.copy(rdf)
# normalization against volume of bins
for i_bin in range(len(bin_edges)-1):
vol_bin = (4.0/3.0)*math.pi*((bin_edges[i_bin+1])**3-(bin_edges[i_bin])**3)
## normalization against number of frames
for i_rdf in range(n_rdfs):
rdf_tmp[i_rdf][i_bin] = rdf[i_rdf][i_bin]/vol_bin/float(count_frames[i_rdf])
# bin position (center)
hist_nbins = int(len(bin_edges)-1)
hist_bin_pos = np.zeros(hist_nbins,dtype=float)
for ibin in range(hist_nbins):
hist_bin_pos[ibin] = (bin_edges[ibin+1] + bin_edges[ibin])/2.0
# save number histogram trajectory
for i_rdf in range(n_rdfs):
dataset = np.column_stack((hist_bin_pos,rdf_tmp[i_rdf]))
np.savetxt(odomain+str(prefix)+str(i_rdf), dataset,
header='radial distribution function at domain median +- {} bins with {} frames'.format(i_rdf,count_frames), fmt='%e', comments='# ')
np.save(odomain+str(prefix)+str(i_rdf), dataset)
print(" saved (temp) rdf files")
# calculation rdfs
count_frames = np.zeros(n_rdfs, dtype=np.int)
hist_nbins = args.hist_nbin
rdf = np.zeros((n_rdfs,hist_nbins), dtype=np.float)
current_i = 0
box_z = unit_cells[0][2] # assume unit cell dimension is fixed during simulation (NVT)
bin_size = box_z/float(args.nbin)
box_x_half = unit_cells[0][0]/2.0
print("bin size on z = {}".format(bin_size))
print("bin_x = {}".format(unit_cells[0][0]))
if args.hist_max < 0.0:
args.hist_max = box_x_half
elif args.hist_max > box_x_half:
raise ValueError(" wrong args.hist_max")
print("set hist_max = {}".format(args.hist_max))
## multiprocessing but so slow
#def calc_dist(atom1,atom2):
# global coordinates1_tmp
# global coordinates2_tmp
# return np.linalg.norm(coordinates1_tmp[atom1] - coordinates2_tmp[atom2])
#
#def pool_calc_dist_hist(nproc, pair_list, nbins, max_dist):
# pool = Pool(nproc)
# t = time.time()
# distances = pool.starmap(calc_dist, pair_list)
# #print(" total {} pairs".format(len(dist_list)))
# print(" time = {}".format(time.time()-t))
# hist_dist, bin_edges = np.histogram(distances,bins=nbins,range=(0.0,max_dist),density=False)
# return hist_dist, bin_edges
for iframe in range(n_frames):
i_rdf = int(dic_domain[domain_size[iframe]])
pos_itf = ((itf - align_shift[iframe])%args.nbin)*bin_size
for up_down in range(2):
new_coord_1_z = coordinates1[iframe][:,2] - pos_itf[up_down] + box_z/2.0 # to set interface at center of box
new_coord_2_z = coordinates2[iframe][:,2] - pos_itf[up_down] + box_z/2.0 # to set interface at center of box
new_coord_1_z = hjung.coord.pbc_nojump_1d(new_coord_1_z, unit_cells[iframe][2])
new_coord_2_z = hjung.coord.pbc_nojump_1d(new_coord_2_z, unit_cells[iframe][2])
# selection selection1 of atoms within a range
#print("{} {}".format(step_up_down[iframe][up_down], itf[up_down]))
t_itf = abs(step_up_down[iframe][up_down] - itf[up_down])*bin_size/2.0 # (half_thickness between interface and acf 50% point) / 2
#print(" selection1 range = [{},{}] = {} +- {}".format(box_z/2.0 - t_itf, box_z/2.0 + t_itf,box_z/2.0, t_itf))
select_atoms_1 = select_atoms_indices_1d(new_coord_1_z, box_z/2.0 - t_itf, box_z/2.0 + t_itf)
print(" {} frame: selected {} atoms in select1 ".format(iframe,len(select_atoms_1)))
# selection atoms pool of selection2 atoms
select_atoms_2 = select_atoms_indices_1d(new_coord_2_z, box_z/2.0 - t_itf - box_x_half, box_z/2.0 + t_itf + box_x_half)
#print(" selection2 range = [{},{}]".format(box_z/2.0 - t_itf - box_x_half,box_z/2.0 + t_itf + box_x_half))
print(" {} frame: selected {} atoms in select2".format(iframe,len(select_atoms_2)))
## rdf of A-B near interface using multiprocessing (slower by 400 times than scipy)
#comb_list = [(i,j) for i in select_atoms_1 for j in select_atoms_2]
#hist_dist, bin_edges = pool_calc_dist_hist(args.n_proc, comb_list,hist_nbins,args.hist_max)
# use cdist
#t = time.time()
result = cdist(coordinates1[iframe][select_atoms_1], coordinates2[iframe][select_atoms_2], 'euclidean')
dist_tmp = np.ndarray.flatten(result)
dist_tmp_reduced = dist_tmp[np.nonzero(dist_tmp)]
#print(" time = {}".format(time.time()-t))
hist_dist, bin_edges = np.histogram(dist_tmp_reduced,bins=hist_nbins,range=(0.0,args.hist_max),density=False)
# save rdfs
rdf[i_rdf] = rdf[i_rdf] + hist_dist/len(select_atoms_1) # normalizaed for atom1
count_frames[i_rdf] = count_frames[i_rdf] + 1
if (current_i != 0) and (current_i%10 == 0):
save_rdf(rdf,n_rdfs,bin_edges,count_frames,odomain,'.tmp.')
save_rdf(rdf,n_rdfs,bin_edges,count_frames,odomain,'.')
## timer
hjung.time.end_print(start_proc, start_prof)
|
jht0664/Utility_python_gromacs
|
python/rdf_itf.py
|
Python
|
mit
| 9,604
|
[
"MDAnalysis"
] |
2e0b0ecef7e0cb0ed3aea01713e90e460b5e79a4264baea5f7a0077a5d1786c0
|
"""
This module use used to extend genes predicted with Genemark and then annotate them using
the function extendGenes.
"""
"""
Copyright 2010 Jarl Haggerty
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import sys
import functools
from neofelis import utils
#Have to make sure that a directory to store the blasts this module creates exists.
if not os.path.isdir("extendedBlasts"):
os.mkdir("extendedBlasts")
def getStops(genes):
"""
genes: A list of Iteration objects.
return: A 2-tuple, first object is a list of where all the forward coding genes stop,
second is a list of where all the reverse coding genes stop.
"""
forwardStops = map(lambda x: x.location[1], filter(lambda x: x.location[0] < x.location[1], genes))
reverseStops = map(lambda x: x.location[1], filter(lambda x: x.location[1] < x.location[0], genes))
return forwardStops, reverseStops
def getExtensions(genome, genes):
"""
genome: The genome as a string.
genes: A list of Iteration objects.
return: A dictionary mapping genes(Iteration objects) to alternative locations where that gene could start.
The alternate starts are calculated by starting at the original start of the gene and iterating backwards.
When a start codon is found the start of that start codon is added to the list of alternate starts. If this start
codon comes before the start of the previous gene then is it still added to the list but the search terminates.
"""
forwardStops, reverseStops = getStops(genes)
forwardStops.append(1)
reverseStops.append(len(genome))
results = {}
for gene in genes:
results[gene] = []
if gene.location[0] < gene.location[1]:
bound = max(filter(lambda x: x < gene.location[1], forwardStops))
for i in xrange(gene.location[0]-1, 0, -3):
if genome[i-3:i] in utils.startCodons:
results[gene].append(i-3)
if i <= bound-1:
break
elif genome[i-3:i] in utils.stopCodons:
break
else:
bound = min(filter(lambda x: x > gene.location[1], reverseStops))
for i in xrange(gene.location[0], len(genome), 3):
if utils.reverseComplement(genome[i:i+3]) in utils.startCodons:
results[gene].append(i+3)
if i >= bound-1:
break
elif utils.reverseComplement(genome[i:i+3]) in utils.stopCodons:
break
return results
def writeExtensions(genome, extensions):
"""
genome: The genome as a string.
extensions: A dictionary mapping genes(Iteration objects) to alternative locations where that gene could start.
This function will write the translation of each possible extension to the file, "extensions.fas".
"""
output = open("extensions.fas", "w")
q = 0
for gene, extensionList in extensions.items():
for extension in extensionList:
q += 1
if gene.location[0] < gene.location[1]:
ext = extension+1
proteins = utils.translate(genome[extension:gene.location[1]])
else:
ext = extension
proteins = utils.translate(utils.reverseComplement(genome[gene.location[1]-1:extension]))
output.write(">" + gene.query + "~" + str(q) + ":" +
"-".join(map(str, [ext, gene.location[1]])) + "\n")
for i in xrange(0, len(proteins), 50):
output.write(proteins[i:min(i+50, len(proteins))] + "\n")
output.close()
def applyExtensions(genome, genes, extendedGenes):
"""
genome: The genome as a string.
genes: A dictionary that maps query names to Iteration objects
extendedGenes: A dictionary that maps query names to Iteration objects, extended versions of genes
return: A merging of genes with extendedGenes consisting of the, "better" gene in the event of a conflict
The merging is done by iterating over the dictionary genes, for each entry in genes extendedGenes
is iterated over. If an entry in extendedGenes has a query name that starts with the query name
of the original gene then that entry is an extension of the original gene. This extension will replace
the gene in the new dictionary if it either has an eValue that is lower than the original gene or the extension places
it within 100 bps of the preceeding gene and is closer to the stop of the preceding gene.
"""
forwardStops, reverseStops = getStops(genes.values())
forwardStops.append(1)
reverseStops.append(len(genome))
def reduceFunction(gene, x, y):
if re.sub(r"(~\d+)~\d+", r"\1", y.query) == gene.query:
if gene.location[0] < gene.location[1]:
stop = max(filter(lambda z: z < gene.location[1], forwardStops))
gapSize = y.location[0] - stop
else:
stop = min(filter(lambda z: z > gene.location[1], reverseStops))
gapSize = stop - y.location[0]
if gapSize < 0:
return min(x, y, key = lambda z: abs(z.location[0] - stop))
elif gapSize < 100 or abs(x.eValue - y.eValue) < 10e-5 or utils.isNaN(x.eValue-y.eValue):
return max(x, y, key = lambda z: abs(z.location[1] - z.location[0]))
else:
return min(x, y, key = lambda z: z.eValue)
else:
return x
result = {}
for gene, geneData in genes.items():
result[gene] = reduce(functools.partial(reduceFunction, geneData), extendedGenes.values(), geneData)
if result[gene] != geneData:
result[gene].color = "0 255 0"
result[gene].note = "Extended"
return result
def extendGenes(query, genes, name, blast, database, eValue, pipeline):
"""
query: File name of the query.
ganes: A dictionary that maps query names to Iteration objects
name: Name of the genome
blast: Location of the installation of blast.
database: The database to use with blast.
eValue: The E Value to use with blast.
return: A new dictionary mapping query names to Iteration objects with any better extensions replacing the originals.
This function will search for any possible extensions of the genes in the query. An extension will replace the original gene in the resulting
dictionary if it either brings the start of the gene sufficiently close to the end of a previous gene or it has
a lower eValue.
"""
genome = utils.loadGenome(query)
extensions = getExtensions(genome, genes.values())
writeExtensions(genome, extensions)
extendedGenes = utils.cachedBlast("extendedBlasts/" + name + ".blastp.xml", blast, database, eValue, "extensions.fas", pipeline)
os.remove("extensions.fas")
return applyExtensions(genome, genes, extendedGenes)
|
jarl-haggerty/profelis
|
src/main/jython/profelis/extend.py
|
Python
|
apache-2.0
| 7,042
|
[
"BLAST"
] |
77c1b556ea87590d3b0f29ebdb3f3d87be27c124c85252dc042cf95d54ed2f91
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2013 Zuza Software Foundation
# Copyright 2014 Evernote Corporation
#
# This file is part of Pootle.
#
# Pootle is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# Pootle is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Pootle; if not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_noop as _
from pootle_app.models import Directory, Revision
from pootle_app.models.permissions import PermissionSet, get_pootle_permission
from pootle_language.models import Language
from pootle_project.models import Project
def initdb():
"""Populate the database with default initial data.
This creates the default database to get a working Pootle installation.
"""
create_revision()
create_essential_users()
create_root_directories()
create_template_languages()
create_terminology_project()
create_pootle_permissions()
create_pootle_permission_sets()
create_default_projects()
create_default_languages()
create_default_admin()
def create_revision():
Revision.objects.get_or_create(id=1)
def create_essential_users():
"""Create the 'default' and 'nobody' User instances.
These users are required for Pootle's permission system.
"""
User = get_user_model()
# The nobody user is used to represent an anonymous user in cases where
# we need to associate model information with such a user. An example is
# in the permission system: we need a way to store rights for anonymous
# users; thus we use the nobody user.
criteria = {
'username': u"nobody",
'first_name': u"any anonymous user",
'is_active': True,
}
nobody, created = User.objects.get_or_create(**criteria)
if created:
nobody.set_unusable_password()
nobody.save()
# The 'default' user represents any valid, non-anonymous user and is used
# to associate information any such user. An example is in the permission
# system: we need a way to store default rights for users. We use the
# 'default' user for this.
#
# In a future version of Pootle we should think about using Django's
# groups to do better permissions handling.
criteria = {
'username': u"default",
'first_name': u"any authenticated user",
'is_active': True,
}
default, created = User.objects.get_or_create(**criteria)
if created:
default.set_unusable_password()
default.save()
# The system user represents a system, and is used to
# associate updates done by bulk commands as update_stores.
criteria = {
'username': u"system",
'first_name': u"system user",
'is_active': True,
}
system, created = User.objects.get_or_create(**criteria)
if created:
system.set_unusable_password()
system.save()
def create_pootle_permissions():
"""Create Pootle's directory level permissions."""
args = {
'app_label': "pootle_app",
'model': "directory",
}
pootle_content_type, created = ContentType.objects.get_or_create(**args)
pootle_content_type.name = 'pootle'
pootle_content_type.save()
# Create the permissions.
permissions = [
{
'name': _("Can view a project"),
'codename': "view",
},
{
'name': _("Can make a suggestion for a translation"),
'codename': "suggest",
},
{
'name': _("Can submit a translation"),
'codename': "translate",
},
{
'name': _("Can review translations"),
'codename': "review",
},
{
'name': _("Can administrate a translation project"),
'codename': "administrate",
},
]
criteria = {
'content_type': pootle_content_type,
}
for permission in permissions:
criteria.update(permission)
obj, created = Permission.objects.get_or_create(**criteria)
def create_pootle_permission_sets():
"""Create the default permission set for the 'nobody' and 'default' users.
'nobody' is the anonymous (non-logged in) user, and 'default' is the logged
in user.
"""
User = get_user_model()
nobody = User.objects.get(username='nobody')
default = User.objects.get(username='default')
view = get_pootle_permission('view')
suggest = get_pootle_permission('suggest')
translate = get_pootle_permission('translate')
# Default permissions for tree root.
criteria = {
'profile': nobody,
'directory': Directory.objects.root,
}
permission_set, created = PermissionSet.objects.get_or_create(**criteria)
if created:
permission_set.positive_permissions = [view, suggest]
permission_set.save()
criteria['profile'] = default
permission_set, created = PermissionSet.objects.get_or_create(**criteria)
if created:
permission_set.positive_permissions = [view, suggest, translate]
permission_set.save()
# Default permissions for templates language.
# Override with no permissions for templates language.
criteria = {
'profile': nobody,
'directory': Directory.objects.get(pootle_path="/templates/"),
}
permission_set, created = PermissionSet.objects.get_or_create(**criteria)
if created:
permission_set.positive_permissions = []
permission_set.save()
criteria['profile'] = default
permission_set, created = PermissionSet.objects.get_or_create(**criteria)
if created:
permission_set.positive_permissions = []
permission_set.save()
def require_english():
"""Create the English Language item."""
criteria = {
'code': "en",
'fullname': u"English",
'nplurals': 2,
'pluralequation': "(n != 1)",
}
en, created = Language.objects.get_or_create(**criteria)
return en
def create_root_directories():
"""Create the root Directory items."""
root, created = Directory.objects.get_or_create(name='')
projects, created = Directory.objects.get_or_create(name='projects',
parent=root)
def create_template_languages():
"""Create the 'templates' and English languages.
The 'templates' language is used to give users access to the untranslated
template files.
"""
templates, created = Language.objects.get_or_create(code="templates",
fullname=u'Templates')
require_english()
def create_terminology_project():
"""Create the terminology project.
The terminology project is used to display terminology suggestions while
translating.
"""
criteria = {
'code': "terminology",
'fullname': u"Terminology",
'source_language': require_english(),
'checkstyle': "terminology",
}
terminology, created = Project.objects.get_or_create(**criteria)
def create_default_projects():
"""Create the default projects that we host.
You might want to add your projects here, although you can also add things
through the web interface later.
"""
from pootle_project.models import Project
en = require_english()
#criteria = {
# 'code': u"pootle",
# 'source_language': en,
# 'fullname': u"Pootle",
# 'description': ('<div dir="ltr" lang="en">Interface translations for '
# 'Pootle.<br />See the <a href="http://'
# 'pootle.locamotion.org">official Pootle server</a> '
# 'for the translations of Pootle.</div>')
# 'checkstyle': "standard",
# 'localfiletype': "po",
# 'treestyle': "auto",
#}
#pootle = Project(**criteria)
#pootle.save()
criteria = {
'code': u"tutorial",
'source_language': en,
'fullname': u"Tutorial",
'description': ('<div dir="ltr" lang="en">Tutorial project where '
'users can play with Pootle and learn more about '
'translation and localisation.<br />For more help on '
'localisation, visit the <a href="http://'
'docs.translatehouse.org/projects/localization-guide/'
'en/latest/guide/start.html">localisation guide</a>.'
'</div>'),
'checkstyle': "standard",
'localfiletype': "po",
'treestyle': "auto",
}
tutorial = Project(**criteria)
tutorial.save()
def create_default_languages():
"""Create the default languages."""
from translate.lang import data, factory
from pootle_language.models import Language
# import languages from toolkit
for code in data.languages.keys():
try:
tk_lang = factory.getlanguage(code)
criteria = {
'code': code,
'fullname': tk_lang.fullname,
'nplurals': tk_lang.nplurals,
'pluralequation': tk_lang.pluralequation,
}
try:
criteria['specialchars'] = tk_lang.specialchars
except AttributeError:
pass
lang, created = Language.objects.get_or_create(**criteria)
except:
pass
def create_default_admin():
"""Create the default admin user for Pootle.
You definitely want to change the admin account so that your default
install is not accessible with the default credentials. The users 'noboby'
and 'default' should be left as is.
"""
User = get_user_model()
criteria = {
'username': u"admin",
'first_name': u"Administrator",
'is_active': True,
'is_superuser': True,
'is_staff': True,
}
admin = User(**criteria)
admin.set_password("admin")
admin.save()
|
evernote/pootle
|
pootle/core/initdb.py
|
Python
|
gpl-2.0
| 10,571
|
[
"VisIt"
] |
c9820e175db51b4ce311ae270e6b7b64192c5ff68f50a010b7ad5cf8996ece40
|
#
# @file TestSpecies.py
# @brief Species unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# $Id$
# $HeadURL$
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestSpecies.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestSpecies(unittest.TestCase):
global S
S = None
def setUp(self):
self.S = libsbml.Species(2,4)
if (self.S == None):
pass
pass
def tearDown(self):
_dummyList = [ self.S ]; _dummyList[:] = []; del _dummyList
pass
def test_Species_create(self):
self.assert_( self.S.getTypeCode() == libsbml.SBML_SPECIES )
self.assert_( self.S.getMetaId() == "" )
self.assert_( self.S.getNotes() == None )
self.assert_( self.S.getAnnotation() == None )
self.assert_( self.S.getId() == "" )
self.assert_( self.S.getName() == "" )
self.assert_( self.S.getCompartment() == "" )
self.assert_( self.S.getInitialAmount() == 0.0 )
self.assert_( self.S.getInitialConcentration() == 0.0 )
self.assert_( self.S.getSubstanceUnits() == "" )
self.assert_( self.S.getSpatialSizeUnits() == "" )
self.assert_( self.S.getHasOnlySubstanceUnits() == False )
self.assert_( self.S.getBoundaryCondition() == False )
self.assert_( self.S.getCharge() == 0 )
self.assert_( self.S.getConstant() == False )
self.assertEqual( False, self.S.isSetId() )
self.assertEqual( False, self.S.isSetName() )
self.assertEqual( False, self.S.isSetCompartment() )
self.assertEqual( False, self.S.isSetInitialAmount() )
self.assertEqual( False, self.S.isSetInitialConcentration() )
self.assertEqual( False, self.S.isSetSubstanceUnits() )
self.assertEqual( False, self.S.isSetSpatialSizeUnits() )
self.assertEqual( False, self.S.isSetUnits() )
self.assertEqual( False, self.S.isSetCharge() )
self.assertEqual( True, self.S.isSetBoundaryCondition() )
self.assertEqual( True, self.S.isSetHasOnlySubstanceUnits() )
self.assertEqual( True, self.S.isSetConstant() )
pass
def test_Species_createWithNS(self):
xmlns = libsbml.XMLNamespaces()
xmlns.add( "http://www.sbml.org", "testsbml")
sbmlns = libsbml.SBMLNamespaces(2,1)
sbmlns.addNamespaces(xmlns)
object = libsbml.Species(sbmlns)
self.assert_( object.getTypeCode() == libsbml.SBML_SPECIES )
self.assert_( object.getMetaId() == "" )
self.assert_( object.getNotes() == None )
self.assert_( object.getAnnotation() == None )
self.assert_( object.getLevel() == 2 )
self.assert_( object.getVersion() == 1 )
self.assert_( object.getNamespaces() != None )
self.assert_( object.getNamespaces().getLength() == 2 )
_dummyList = [ object ]; _dummyList[:] = []; del _dummyList
pass
def test_Species_free_NULL(self):
_dummyList = [ None ]; _dummyList[:] = []; del _dummyList
pass
def test_Species_setCompartment(self):
compartment = "cell";
self.S.setCompartment(compartment)
self.assert_(( compartment == self.S.getCompartment() ))
self.assertEqual( True, self.S.isSetCompartment() )
if (self.S.getCompartment() == compartment):
pass
self.S.setCompartment(self.S.getCompartment())
self.assert_(( compartment == self.S.getCompartment() ))
self.S.setCompartment("")
self.assertEqual( False, self.S.isSetCompartment() )
if (self.S.getCompartment() != None):
pass
pass
def test_Species_setId(self):
id = "Glucose";
self.S.setId(id)
self.assert_(( id == self.S.getId() ))
self.assertEqual( True, self.S.isSetId() )
if (self.S.getId() == id):
pass
self.S.setId(self.S.getId())
self.assert_(( id == self.S.getId() ))
self.S.setId("")
self.assertEqual( False, self.S.isSetId() )
if (self.S.getId() != None):
pass
pass
def test_Species_setInitialAmount(self):
self.assertEqual( False, self.S.isSetInitialAmount() )
self.assertEqual( False, self.S.isSetInitialConcentration() )
self.S.setInitialAmount(1.2)
self.assertEqual( True, self.S.isSetInitialAmount() )
self.assertEqual( False, self.S.isSetInitialConcentration() )
self.assert_( self.S.getInitialAmount() == 1.2 )
pass
def test_Species_setInitialConcentration(self):
self.assertEqual( False, self.S.isSetInitialAmount() )
self.assertEqual( False, self.S.isSetInitialConcentration() )
self.S.setInitialConcentration(3.4)
self.assertEqual( False, self.S.isSetInitialAmount() )
self.assertEqual( True, self.S.isSetInitialConcentration() )
self.assert_( self.S.getInitialConcentration() == 3.4 )
pass
def test_Species_setName(self):
name = "So_Sweet";
self.S.setName(name)
self.assert_(( name == self.S.getName() ))
self.assertEqual( True, self.S.isSetName() )
if (self.S.getName() == name):
pass
self.S.setName(self.S.getName())
self.assert_(( name == self.S.getName() ))
self.S.setName("")
self.assertEqual( False, self.S.isSetName() )
if (self.S.getName() != None):
pass
pass
def test_Species_setSpatialSizeUnits(self):
s = libsbml.Species(2,1)
units = "volume";
s.setSpatialSizeUnits(units)
self.assert_(( units == s.getSpatialSizeUnits() ))
self.assertEqual( True, s.isSetSpatialSizeUnits() )
if (s.getSpatialSizeUnits() == units):
pass
s.setSpatialSizeUnits(s.getSpatialSizeUnits())
self.assert_(( units == s.getSpatialSizeUnits() ))
s.setSpatialSizeUnits("")
self.assertEqual( False, s.isSetSpatialSizeUnits() )
if (s.getSpatialSizeUnits() != None):
pass
_dummyList = [ s ]; _dummyList[:] = []; del _dummyList
pass
def test_Species_setSubstanceUnits(self):
units = "item";
self.S.setSubstanceUnits(units)
self.assert_(( units == self.S.getSubstanceUnits() ))
self.assertEqual( True, self.S.isSetSubstanceUnits() )
if (self.S.getSubstanceUnits() == units):
pass
self.S.setSubstanceUnits(self.S.getSubstanceUnits())
self.assert_(( units == self.S.getSubstanceUnits() ))
self.S.setSubstanceUnits("")
self.assertEqual( False, self.S.isSetSubstanceUnits() )
if (self.S.getSubstanceUnits() != None):
pass
pass
def test_Species_setUnits(self):
units = "mole";
self.S.setUnits(units)
self.assert_(( units == self.S.getUnits() ))
self.assertEqual( True, self.S.isSetUnits() )
if (self.S.getSubstanceUnits() == units):
pass
self.S.setUnits(self.S.getSubstanceUnits())
self.assert_(( units == self.S.getUnits() ))
self.S.setUnits("")
self.assertEqual( False, self.S.isSetUnits() )
if (self.S.getSubstanceUnits() != None):
pass
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestSpecies))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
alexholehouse/SBMLIntegrator
|
libsbml-5.0.0/src/bindings/python/test/sbml/TestSpecies.py
|
Python
|
gpl-3.0
| 8,165
|
[
"VisIt"
] |
cc24de1818c89526c7a78ec2d53ff2a15848b5826ea05b30f15310cc81488f69
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import math
from mantid.kernel import *
from mantid.api import *
import mantid.simpleapi as mantid
class EnggFitPeaks(PythonAlgorithm):
EXPECTED_DIM_TYPE = 'Time-of-flight'
PEAK_TYPE = 'BackToBackExponential'
# Max limit on the estimated error of a center for it to be accepted as a good fit
# (in percentage of the center value)
CENTER_ERROR_LIMIT = 10
_expected_peaks_are_in_tof = True
def category(self):
return "Diffraction\\Engineering;Diffraction\\Fitting"
def seeAlso(self):
return [ "EnggFitDIFCFromPeaks","GSASIIRefineFitPeaks","Fit" ]
def name(self):
return "EnggFitPeaks"
def summary(self):
return ("The algorithm fits an expected diffraction pattern to a spectrum from a workspace "
"by fitting one peak at a time (single peak fits).")
def PyInit(self):
self.declareProperty(MatrixWorkspaceProperty("InputWorkspace", "", Direction.Input),
"Workspace to fit peaks in. The X units must be time of flight (TOF).")
self.declareProperty("WorkspaceIndex", 0,
"Index of the spectra to fit peaks in")
self.declareProperty(FloatArrayProperty("ExpectedPeaks", (self._get_default_peaks())),
"A list of peak centre values to be translated into TOF (if required) to find expected "
"peaks.")
self.declareProperty(FileProperty(name="ExpectedPeaksFromFile", defaultValue="",
action=FileAction.OptionalLoad, extensions=[".csv"]),
"Load from file a list of peak centre values to be translated into TOF (if required) to "
"find expected peaks. This takes precedence over 'ExpectedPeaks' if both "
"options are given.")
peaks_grp = 'Peaks to fit'
self.setPropertyGroup('ExpectedPeaks', peaks_grp)
self.setPropertyGroup('ExpectedPeaksFromFile', peaks_grp)
self.declareProperty('OutFittedPeaksTable', '', direction=Direction.Input,
doc='Name for a table workspace with the parameters of the peaks found and '
'fitted. If not given, the table workspace is not created.')
self.declareProperty(ITableWorkspaceProperty("FittedPeaks", "", Direction.Output),
doc="Information on fitted peaks. The table contains, for every peak fitted "
"the expected peak value (in d-spacing), and the parameters fitted. The expected "
"values are given in the column labelled 'dSpacing'. When fitting "
"back-to-back exponential functions, the 'X0' column has the fitted peak center.")
def PyExec(self):
import EnggUtils
# Get peaks in dSpacing from file
expected_peaks = EnggUtils.read_in_expected_peaks(self.getPropertyValue("ExpectedPeaksFromFile"),
self.getProperty('ExpectedPeaks').value)
if len(expected_peaks) < 1:
raise ValueError("Cannot run this algorithm without any input expected peaks")
# Get expected peaks in TOF for the detector
in_wks = self.getProperty("InputWorkspace").value
dim_type = in_wks.getXDimension().name
if self.EXPECTED_DIM_TYPE != dim_type:
raise ValueError("This algorithm expects a workspace with %s X dimension, but "
"the X dimension of the input workspace is: '%s'" % (self.EXPECTED_DIM_TYPE, dim_type))
wks_index = self.getProperty("WorkspaceIndex").value
if self._any_expected_peaks_in_ws_range(in_wks, expected_peaks):
expected_peaks_tof = sorted(expected_peaks)
else:
expected_peaks_tof = sorted(self._expected_peaks_in_tof(expected_peaks, in_wks, wks_index))
self._expected_peaks_are_in_tof = False
if not self._any_expected_peaks_in_ws_range(in_wks, expected_peaks_tof):
raise ValueError("Expected peak centres lie outside the limits of the workspace x axis")
found_peaks = self._peaks_from_find_peaks(in_wks, expected_peaks_tof, wks_index)
if found_peaks.rowCount() < len(expected_peaks_tof):
txt = "Peaks effectively found: " + str(found_peaks)[1:-1]
self.log().warning("Some peaks from the list of expected peaks were not found by the algorithm "
"FindPeaks which this algorithm uses to check that the data has the the "
"expected peaks. " + txt)
peaks_table_name = self.getPropertyValue("OutFittedPeaksTable")
fitted_peaks = self._fit_all_peaks(in_wks, wks_index,
(found_peaks, expected_peaks), peaks_table_name)
# mandatory output
self.setProperty('FittedPeaks', fitted_peaks)
def _any_expected_peaks_in_ws_range(self, input_ws, expected_peaks):
x_axis = input_ws.readX(0)
x_min = min(x_axis)
x_max = max(x_axis)
for peak_centre in expected_peaks:
if self._expected_peak_in_ws_range(x_min, x_max, peak_centre):
return True
return False
def _expected_peak_in_ws_range(self, ws_x_min, ws_x_max, expected_peak_centre):
return ws_x_min <= expected_peak_centre <= ws_x_max
def _get_default_peaks(self):
"""
Gets default peaks for Engg algorithms. Values from CeO2
"""
import EnggUtils
return EnggUtils.default_ceria_expected_peaks()
def _estimate_start_end_fitting_range(self, center, width):
"""
Try to predict a fit window for the peak (using magic numbers). The heuristic
+-COEF_LEFT/RIGHT sometimes produces ranges that are too narrow and contain too few
samples (one or a handful) for the fitting to run correctly. A minimum is enforced.
@Returns :: a tuple with the range (start and end values) for fitting a peak.
"""
# Magic numbers, approx. represanting the shape/proportions of a B2BExponential peak
COEF_LEFT = 2
COEF_RIGHT = 3
# Current approach: don't force a minimum width. If the width initial guess is too
# narrow we might miss some peaks.
# To prevent that, the minimum could be set to for example the arbitrary '175' which
# seemed to have good effects overall, but that can lead to fitting the wrong
# (neighbor) peaks.
MIN_RANGE_WIDTH = 1
startx = center - (width * COEF_LEFT)
endx = center + (width * COEF_RIGHT)
x_diff = endx-startx
if x_diff < MIN_RANGE_WIDTH:
inc = (min_width-x_diff)/5
endx = endx + 3*inc
startx = startx - 2*inc
return startx, endx
def _fit_all_peaks(self, in_wks, wks_index, peaks, peaks_table_name):
"""
This method is the core of EnggFitPeaks. It tries to fit as many peaks as there are in the list of
expected peaks passed to the algorithm. This is a single peak fitting, in the sense that peaks
are fitted separately, one at a time.
The parameters from the (Gaussian) peaks fitted by FindPeaks elsewhere (before calling this method)
are used as initial guesses.
@param in_wks :: input workspace with spectra for fitting
@param wks_index :: workspace index of the spectrum where the given peaks should be fitted
@param peaks :: tuple made of two lists: found_peaks (peaks found by FindPeaks or similar
algorithm), and expected_peaks_dsp (expected peaks given as input to this algorithm
(in dSpacing units)
@param peaks_table_name :: name of an (output) table with peaks parameters. If empty, the table is anonymous
@returns a table with parameters for every fitted peak.
"""
if self._expected_peaks_are_in_tof:
peaks = (peaks[0], self._expected_peaks_in_d(peaks[1], in_wks))
found_peaks = peaks[0]
fitted_peaks = self._create_fitted_peaks_table(peaks_table_name)
prog = Progress(self, start=0, end=1, nreports=found_peaks.rowCount())
for i in range(found_peaks.rowCount()):
prog.report('Fitting peak number ' + str(i+1))
row = found_peaks.row(i)
# Peak parameters estimated by FindPeaks
initial_params = (row['centre'], row['width'], row['height'])
# Oh oh, this actually happens sometimes for some spectra of the system test dataset
# and it should be clarified when the role of FindPeaks etc. is fixed (trac ticket #10907)
width = initial_params[2]
if width <= 0.:
failure_msg = ("Cannot fit a peak with these initial parameters from FindPeaks, center: %s "
", width: %s, height: %s" % (initial_params[0], width, initial_params[1]))
self.log().notice('For workspace index ' + str(wks_index) + ', a peak that is in the list of '
'expected peaks and was found by FindPeaks has not been fitted correctly. '
'It will be ignored. ' + "Expected, dSpacing: {0}. Details: {1}".
format(peaks[1][i], failure_msg))
continue
try:
param_table, chi_over_dof = self._fit_single_peak(peaks[1][i], initial_params, in_wks, wks_index)
except RuntimeError:
self.log().warning("Problem found when trying to fit a peak centered at {0} (dSpacing), "
"for which the initial guess from FindPeaks is at {1} (ToF). Single "
"peak fitting failed. Skipping this peak."
.format(peaks[1][i], initial_params[0]))
continue
fitted_params = {}
fitted_params['dSpacing'] = peaks[1][i]
fitted_params['Chi'] = chi_over_dof
self._add_parameters_to_map(fitted_params, param_table)
if self._peak_is_acceptable(fitted_params, in_wks, wks_index):
fitted_peaks.addRow(fitted_params)
else:
self.log().notice("Discarding peak found with wrong center and/or excessive or suspicious "
"error estimate in the center estimate: {0} (ToF) ({1}, dSpacing), "
"with error: {2}, for dSpacing {3}".
format(fitted_params['X0'], peaks[1][i],
fitted_params['X0_Err'], fitted_params['dSpacing']))
# Check if we were able to really fit any peak
if 0 == fitted_peaks.rowCount():
failure_msg = ("Could find " + str(len(found_peaks)) + " peaks using the algorithm FindPeaks but " +
"then it was not possible to fit any peak starting from these peaks found and using '" +
self.PEAK_TYPE + "' as peak function.")
self.log().warning('Could not fit any peak. Please check the list of expected peaks, as it does not '
'seem to be appropriate for the workspace given. More details: ' +
failure_msg)
raise RuntimeError('Could not fit any peak. Failed to fit peaks with peak type ' +
self.PEAK_TYPE + ' even though FindPeaks found ' + str(found_peaks.rowCount()) +
' peaks in principle. See the logs for further details.')
self.log().information("Fitted {0} peaks in total.".format(fitted_peaks.rowCount()))
return fitted_peaks
def _fit_single_peak(self, expected_center, initial_params, wks, wks_index):
"""
Fits one peak, given an initial guess of parameters (center, width, height).
@param expected_center :: expected peak position
@param initial_params :: tuple with initial guess of the peak center, width and height
@param wks :: workspace with data (spectra) to fit
@param wks_index :: index of the spectrum to fit
@return parameters from Fit, and the goodness of fit estimation from Fit (as Chi^2/DoF)
"""
center, width, height = initial_params
# Sigma value of the peak, assuming Gaussian shape
sigma = width / (2 * math.sqrt(2 * math.log(2)))
# Approximate peak intensity, assuming Gaussian shape
intensity = height * sigma * math.sqrt(2 * math.pi)
peak = FunctionFactory.createFunction(self.PEAK_TYPE)
peak.setParameter('X0', center)
peak.setParameter('S', sigma)
peak.setParameter('I', intensity)
# Fit using predicted window and a proper function with approximated initial values
fit_alg = self.createChildAlgorithm('Fit')
fit_function = 'name=LinearBackground;{0}'.format(peak)
fit_alg.setProperty('Function', fit_function)
fit_alg.setProperty('InputWorkspace', wks)
fit_alg.setProperty('WorkspaceIndex', wks_index)
fit_alg.setProperty('CreateOutput', True)
(startx, endx) = self._estimate_start_end_fitting_range(center, width)
fit_alg.setProperty('StartX', startx)
fit_alg.setProperty('EndX', endx)
self.log().debug("Fitting for peak expected in (d-spacing): {0}, Fitting peak function: "
"{1}, with startx: {2}, endx: {3}".
format(expected_center, fit_function, startx, endx))
fit_alg.execute()
param_table = fit_alg.getProperty('OutputParameters').value
chi_over_dof = fit_alg.getProperty('OutputChi2overDoF').value
return param_table, chi_over_dof
def _peaks_from_find_peaks(self, in_wks, expected_peaks_tof, wks_index):
"""
Use the algorithm FindPeaks to check that the expected peaks are there.
@param in_wks data workspace
@param expected_peaks_tof vector/list of expected peak values
@param wks_index workspace index
@return list of peaks found by FindPeaks. If there are no issues, the length
of this list should be the same as the number of expected peaks received.
"""
# Find approximate peak positions, asumming Gaussian shapes
find_peaks_alg = self.createChildAlgorithm('FindPeaks')
find_peaks_alg.setProperty('InputWorkspace', in_wks)
find_peaks_alg.setProperty('PeakPositions', expected_peaks_tof)
find_peaks_alg.setProperty('PeakFunction', 'Gaussian')
find_peaks_alg.setProperty('WorkspaceIndex', wks_index)
find_peaks_alg.execute()
found_peaks = find_peaks_alg.getProperty('PeaksList').value
return found_peaks
def _expected_peaks_in_d(self, expected_peaks, input_ws):
run = input_ws.getRun()
if run.hasProperty("difc"):
difc = run.getLogData("difc").value
return self._gsas_convert_to_d(expected_peaks, run, difc)
return self._convert_peaks_to_d_using_convert_units(expected_peaks, input_ws)
def _gsas_convert_to_d(self, expected_peaks, run, difc):
tzero = run.getLogData("tzero").value if run.hasProperty("tzero") else 0
difa = run.getLogData("difa").value if run.hasProperty("difa") else 0
return [self._gsas_convert_single_peak_to_d(peak, difa, difc, tzero) for peak in expected_peaks]
def _gsas_convert_single_peak_to_d(self, peak_tof, difa, difc, tzero):
if difa < 0:
return (-difc / (2 * difa)) - math.sqrt(peak_tof / difa + math.pow(difc / 2 * difa, 2) - tzero / difa)
if difa > 0:
return (-difc / (2 * difa)) + math.sqrt(peak_tof / difa + math.pow(difc / 2 * difa, 2) - tzero / difa)
return (peak_tof - tzero) / difc
def _convert_peaks_to_d_using_convert_units(self, expected_peaks, input_ws):
y_values = [1] * (len(expected_peaks) - 1)
ws_tof = mantid.CreateWorkspace(UnitX="TOF", DataX=expected_peaks, DataY=y_values, ParentWorkspace=input_ws)
ws_d = mantid.ConvertUnits(InputWorkspace=ws_tof, Target="dSpacing")
return ws_d.readX(0)
def _expected_peaks_in_tof(self, expected_peaks, in_wks, wks_index):
"""
Converts expected peak dSpacing values to TOF values for the
detector. Implemented by using the Mantid algorithm ConvertUnits. A
simple user script to do what this function does would be
as follows:
import mantid.simpleapi as sapi
yVals = [1] * (len(expected_peaks) - 1)
ws_from = sapi.CreateWorkspace(UnitX='dSpacing', DataX=expected_peaks, DataY=yVals,
ParentWorkspace=in_wks)
target_units = 'TOF'
wsTo = sapi.ConvertUnits(InputWorkspace=ws_from, Target=target_units)
peaks_ToF = wsTo.dataX(0)
values = [peaks_ToF[i] for i in range(0,len(peaks_ToF))]
@param expected_peaks :: vector of expected peaks, in dSpacing units
@param in_wks :: input workspace with the relevant instrument/geometry
@param wks_index workspace index
Returns:
a vector of ToF values converted from the input (dSpacing) vector.
"""
# This and the next exception, below, still need revisiting:
# https://github.com/mantidproject/mantid/issues/12930
run = in_wks.getRun()
if 1 == in_wks.getNumberHistograms() and run.hasProperty('difc'):
difc = run.getLogData('difc').value
if run.hasProperty('tzero'):
tzero = run.getLogData('tzero').value
else:
tzero = 0
# If the log difc is present, then use these GSAS calibration parameters from the logs
return [(epd * difc + tzero) for epd in expected_peaks]
# When receiving a (for example) focused workspace we still do not know how
# to properly deal with it. CreateWorkspace won't copy the instrument sample
# and source even if given the option ParentWorkspace. Resort to old style
# hard-coded calculation.
# The present behavior of 'ConvertUnits' is to show an information log:
# "Unable to calculate sample-detector distance for 1 spectra. Masking spectrum"
# and silently produce a wrong output workspace. That might need revisiting.
if 1 == in_wks.getNumberHistograms():
return self._do_approx_hard_coded_convert_units_to_ToF(expected_peaks, in_wks, wks_index)
# Create workspace just to convert dSpacing -> ToF, yVals are irrelevant
# this used to be calculated with:
# lambda d: 252.816 * 2 * (50 + detL2) * math.sin(detTwoTheta / 2.0) * d
# which is approximately what ConverUnits will do
# remember the -1, we must produce a histogram data workspace, which is what
# for example EnggCalibrate expects
y_vals = [1] * (len(expected_peaks) - 1)
# Do like: ws_from = sapi.CreateWorkspace(UnitX='dSpacing', DataX=expected_peaks, DataY=yVals,
# ParentWorkspace=self.getProperty("InputWorkspace").value)
create_alg = self.createChildAlgorithm("CreateWorkspace")
create_alg.setProperty("UnitX", 'dSpacing')
create_alg.setProperty("DataX", expected_peaks)
create_alg.setProperty("DataY", y_vals)
create_alg.setProperty("ParentWorkspace", in_wks)
create_alg.execute()
ws_from = create_alg.getProperty("OutputWorkspace").value
# finally convert units, like: sapi.ConvertUnits(InputWorkspace=ws_from, Target=target_units)
conv_alg = self.createChildAlgorithm("ConvertUnits")
conv_alg.setProperty("InputWorkspace", ws_from)
target_units = 'TOF'
conv_alg.setProperty("Target", target_units)
# note: this implicitly uses default property "EMode" value 'Elastic'
good_exec = conv_alg.execute()
if not good_exec:
raise RuntimeError("Conversion of units went wrong. Failed to run ConvertUnits for {0} "
"peaks. Details: {1}".format(len(expected_peaks), expected_peaks))
output_ws = conv_alg.getProperty('OutputWorkspace').value
peaks_tof = output_ws.readX(0)
if len(peaks_tof) != len(expected_peaks):
raise RuntimeError("Conversion of units went wrong. Converted {0} peaks from the "
"original list of {1} peaks. The instrument definition might be "
"incomplete for the original workspace / file.".
format(len(peaks_tof), len(expected_peaks)))
tof_values = [peaks_tof[i] for i in range(0, len(peaks_tof))]
# catch potential failures because of geometry issues, etc.
if tof_values == expected_peaks:
vals = self._do_approx_hard_coded_convert_units_to_ToF(expected_peaks, in_wks, wks_index)
return vals
return tof_values
def _do_approx_hard_coded_convert_units_to_ToF(self, dsp_values, ws, wks_index):
"""
Converts from dSpacing to Time-of-flight, for one spectrum/detector. This method
is here for exceptional cases that presently need clarification / further work,
here and elsewhere in Mantid, and should ideally be removed in favor of the more
general method that uses the algorithm ConvertUnits.
@param dsp_values to convert from dSpacing
@param ws workspace with the appropriate instrument / geometry definition
@param wks_index index of the spectrum
Returns:
input values converted from dSpacing to ToF
"""
det = ws.getDetector(wks_index)
# Current detector parameters
detL2 = det.getDistance(ws.getInstrument().getSample())
detTwoTheta = ws.detectorTwoTheta(det)
# hard coded equation to convert dSpacing -> TOF for the single detector
# Values (in principle, expected peak positions) in TOF for the detector
tof_values = [252.816 * 2 * (50 + detL2) * math.sin(detTwoTheta / 2.0) * ep for ep in dsp_values]
return tof_values
def _create_fitted_peaks_table(self, tbl_name):
"""
Creates a table where to put peak fitting results to
@param tbl_name :: name of the table workspace (can be empty)
"""
if not tbl_name:
alg = self.createChildAlgorithm('CreateEmptyTableWorkspace')
alg.execute()
table = alg.getProperty('OutputWorkspace').value
else:
import mantid.simpleapi as sapi
table = sapi.CreateEmptyTableWorkspace(OutputWorkspace=tbl_name)
table.addColumn('double', 'dSpacing')
for param in ['A0', 'A1', 'X0', 'A', 'B', 'S', 'I']:
table.addColumn('double', param)
table.addColumn('double', param + '_Err')
table.addColumn('double', 'Chi')
return table
def _peak_is_acceptable(self, fitted_params, wks, wks_index):
"""
Decide whether a peak fitted looks acceptable, based on the values fitted for the
parameters of the peak and other metrics from Fit (Chi^2).
It applies for example a simple rule: if the peak center is
negative, it is obviously a fit failure. This is sometimes not so straightforward
from the error estimates and Chi^2 values returned from Fit, as there seem to be
a small percentage of cases with numberical issues (nan, zeros, etc.).
@param fitted_params :: parameters fitted from Fit algorithm
@param in_wks :: input workspace where a spectrum was fitted
@param wks_index :: workspace index of the spectrum that was fitted
Returns:
True if the peak function parameters and error estimates look acceptable
so the peak should be used.
"""
spec_x_axis = wks.readX(wks_index)
center = self._find_peak_center_in_params(fitted_params)
intensity = self._find_peak_intensity_in_params(fitted_params)
return (spec_x_axis.min() <= center <= spec_x_axis.max() and
intensity > 0 and
fitted_params['Chi'] < 10 and self._b2bexp_is_acceptable(fitted_params))
def _find_peak_center_in_params(self, fitted_params):
"""
Retrieve the fitted peak center/position from the set of parameters fitted.
Returns:
The peak center from the fitted parameters
"""
if 'BackToBackExponential' == self.PEAK_TYPE:
return fitted_params['X0']
else:
raise ValueError('Inconsistency found. I do not know how to deal with centers of peaks '
'of types other than {0}'.format(PEAK_TYPE))
def _find_peak_intensity_in_params(self, fitted_params):
"""
Retrieve the fitted peak intensity/height/amplitude from the set of parameters fitted.
Returns:
The peak intensity from the fitted parameters
"""
if 'BackToBackExponential' == self.PEAK_TYPE:
return fitted_params['I']
else:
raise ValueError('Inconsistency found. I do not know how to deal with intensities of '
'peaks of types other than {0}'.format(PEAK_TYPE))
def _b2bexp_is_acceptable(self, fitted_params):
"""
Checks specific to Back2BackExponential peak functions.
@param fitted_params :: parameters fitted, where it is assumed that the
standard Back2BackExponential parameter names are used
Returns:
True if the Bk2BkExponential parameters and error estimates look acceptable
so the peak should be used.
"""
# Ban: negative centers, negative left (A) and right (B) exponential coefficient,
# and Gaussian spread (S).
# Also ban strange error estimates (nan, all zero error)
# And make sure that the error on the center (X0) is not too big in relative terms
return (fitted_params['X0'] > 0
and fitted_params['A'] > 0 and fitted_params['B'] > 0 and fitted_params['S'] > 0
and not math.isnan(fitted_params['X0_Err'])
and not math.isnan(fitted_params['A_Err'])
and not math.isnan(fitted_params['B_Err'])
and fitted_params['X0_Err'] < (fitted_params['X0'] * 100.0 / self.CENTER_ERROR_LIMIT)
and
(0 != fitted_params['X0_Err'] and 0 != fitted_params['A_Err'] and
0 != fitted_params['B_Err'] and 0 != fitted_params['S_Err'] and
0 != fitted_params['I_Err'])
)
def _add_parameters_to_map(self, param_map, param_table):
"""
Takes parameters from a table that contains output parameters from a Fit run, and adds
them as name:value and name_Err:error_value pairs to the map.
@param param_map :: map where to add the fitting parameters
@param param_table :: table with parameters from a Fit algorithm run
"""
for i in range(param_table.rowCount() - 1): # Skip the last (fit goodness) row
row = param_table.row(i)
# Get local func. param name. E.g., not f1.A0, but just A0
name = (row['Name'].rpartition('.'))[2]
param_map[name] = row['Value']
param_map[name + '_Err'] = row['Error']
AlgorithmFactory.subscribe(EnggFitPeaks)
|
mganeva/mantid
|
Framework/PythonInterface/plugins/algorithms/EnggFitPeaks.py
|
Python
|
gpl-3.0
| 28,172
|
[
"Gaussian"
] |
8e70103c5d66ff9440be01fc8d9f1520002002db6c3d7db2e1a763369e506d3d
|
"""
Acceptance tests for Studio's Setting pages
"""
from unittest import skip
from .base_studio_test import StudioCourseTest
from ...pages.studio.settings_certificates import CertificatesPage
class CertificatesTest(StudioCourseTest):
"""
Tests for settings/certificates Page.
"""
def setUp(self): # pylint: disable=arguments-differ
super(CertificatesTest, self).setUp(is_staff=True)
self.certificates_page = CertificatesPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def make_signatory_data(self, prefix='First'):
"""
Makes signatory dict which can be used in the tests to create certificates
"""
return {
'name': '{prefix} Signatory Name'.format(prefix=prefix),
'title': '{prefix} Signatory Title'.format(prefix=prefix),
'organization': '{prefix} Signatory Organization'.format(prefix=prefix),
}
def create_and_verify_certificate(self, course_title_override, existing_certs, signatories):
"""
Creates a new certificate and verifies that it was properly created.
"""
self.assertEqual(existing_certs, len(self.certificates_page.certificates))
if existing_certs == 0:
self.certificates_page.wait_for_first_certificate_button()
self.certificates_page.click_first_certificate_button()
else:
self.certificates_page.wait_for_add_certificate_button()
self.certificates_page.click_add_certificate_button()
certificate = self.certificates_page.certificates[existing_certs]
# Set the certificate properties
certificate.course_title = course_title_override
# add signatories
added_signatories = 0
for idx, signatory in enumerate(signatories):
certificate.signatories[idx].name = signatory['name']
certificate.signatories[idx].title = signatory['title']
certificate.signatories[idx].organization = signatory['organization']
certificate.signatories[idx].upload_signature_image('Signature-{}.png'.format(idx))
added_signatories += 1
if len(signatories) > added_signatories:
certificate.click_add_signatory_button()
# Save the certificate
self.assertEqual(certificate.get_text('.action-primary'), "Create")
certificate.click_create_certificate_button()
self.assertIn(course_title_override, certificate.course_title)
return certificate
def test_no_certificates_by_default(self):
"""
Scenario: Ensure that message telling me to create a new certificate is
shown when no certificate exist.
Given I have a course without certificates
When I go to the Certificates page in Studio
Then I see "You have not created any certificates yet." message
"""
self.certificates_page.visit()
self.assertTrue(self.certificates_page.no_certificates_message_shown)
self.assertIn(
"You have not created any certificates yet.",
self.certificates_page.no_certificates_message_text
)
def test_can_create_and_edit_certficate(self):
"""
Scenario: Ensure that the certificates can be created and edited correctly.
Given I have a course without certificates
When I click button 'Add your first Certificate'
And I set new the course title override and signatory and click the button 'Create'
Then I see the new certificate is added and has correct data
When I edit the certificate
And I change the name and click the button 'Save'
Then I see the certificate is saved successfully and has the new name
"""
self.certificates_page.visit()
self.certificates_page.wait_for_first_certificate_button()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[self.make_signatory_data('first'), self.make_signatory_data('second')]
)
# Edit the certificate
certificate.click_edit_certificate_button()
certificate.course_title = "Updated Course Title Override 2"
self.assertEqual(certificate.get_text('.action-primary'), "Save")
certificate.click_save_certificate_button()
self.assertIn("Updated Course Title Override 2", certificate.course_title)
@skip # TODO fix this, see SOL-1053
def test_can_delete_certificate(self):
"""
Scenario: Ensure that the user can delete certificate.
Given I have a course with 1 certificate
And I go to the Certificates page
When I delete the Certificate with name "New Certificate"
Then I see that there is no certificate
When I refresh the page
Then I see that the certificate has been deleted
"""
self.certificates_page.visit()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[self.make_signatory_data('first'), self.make_signatory_data('second')]
)
certificate.wait_for_certificate_delete_button()
self.assertEqual(len(self.certificates_page.certificates), 1)
# Delete the certificate we just created
certificate.click_delete_certificate_button()
self.certificates_page.click_confirmation_prompt_primary_button()
# Reload the page and confirm there are no certificates
self.certificates_page.visit()
self.assertEqual(len(self.certificates_page.certificates), 0)
def test_can_create_and_edit_signatories_of_certficate(self):
"""
Scenario: Ensure that the certificates can be created with signatories and edited correctly.
Given I have a course without certificates
When I click button 'Add your first Certificate'
And I set new the course title override and signatory and click the button 'Create'
Then I see the new certificate is added and has one signatory inside it
When I click 'Edit' button of signatory panel
And I set the name and click the button 'Save' icon
Then I see the signatory name updated with newly set name
When I refresh the certificates page
Then I can see course has one certificate with new signatory name
When I click 'Edit' button of signatory panel
And click on 'Close' button
Then I can see no change in signatory detail
"""
self.certificates_page.visit()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[self.make_signatory_data('first')]
)
self.assertEqual(len(self.certificates_page.certificates), 1)
# Edit the signatory in certificate
signatory = certificate.signatories[0]
signatory.edit()
signatory.name = 'Updated signatory name'
signatory.title = 'Update signatory title'
signatory.organization = 'Updated signatory organization'
signatory.save()
self.assertEqual(len(self.certificates_page.certificates), 1)
signatory = self.certificates_page.certificates[0].signatories[0]
self.assertIn("Updated signatory name", signatory.name)
self.assertIn("Update signatory title", signatory.title)
self.assertIn("Updated signatory organization", signatory.organization)
signatory.edit()
signatory.close()
self.assertIn("Updated signatory name", signatory.name)
def test_can_cancel_creation_of_certificate(self):
"""
Scenario: Ensure that creation of a certificate can be canceled correctly.
Given I have a course without certificates
When I click button 'Add your first Certificate'
And I set name of certificate and click the button 'Cancel'
Then I see that there is no certificates in the course
"""
self.certificates_page.visit()
self.certificates_page.click_first_certificate_button()
certificate = self.certificates_page.certificates[0]
certificate.course_title = "Title Override"
certificate.click_cancel_edit_certificate()
self.assertEqual(len(self.certificates_page.certificates), 0)
|
jbassen/edx-platform
|
common/test/acceptance/tests/studio/test_studio_settings_certificates.py
|
Python
|
agpl-3.0
| 8,448
|
[
"VisIt"
] |
86281647a961b27ba8ef9ef7a36bc28991ad9d2592f7aed43a6538128acf7844
|
"""
Tests for kpoint functions.
"""
# pylint: disable=redefined-outer-name
import pytest
import numpy as np
import z2pack
@pytest.fixture
def kpt(line):
"""
Return a list of k-points for a given 'line' fixture.
"""
kpt = [
np.array(line(tval))
for tval in [0., 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.]
]
return kpt
VALS = [0., 0.25, 0.5, 0.75, 1.]
STRAIGHT_SIMPLE = [
lambda t, s1=s1, s2=s2: [s1, s2, t] for s1 in VALS for s2 in VALS
]
STRAIGHT_SIMPLE.extend([
lambda t, s1=s1, s2=s2: [s1, t, s2] for s1 in VALS for s2 in VALS
])
STRAIGHT_SIMPLE.extend([
lambda t, s1=s1, s2=s2: [t, s1, s2] for s1 in VALS for s2 in VALS
])
STRAIGHT_ANY_DIRECTION = [
lambda t: [0, t, t], lambda t: [t, 0, t], lambda t: [0, 1 - t, 0],
lambda t: [0.2, 0.2 + t, 0]
]
STRAIGHT_UNEQUAL_SPACING = [lambda t: [0, 0, t**2]]
STRAIGHT_MULTIPLE_BZ = [lambda t: [0, 0, 2 * t]]
NON_STRAIGHT = [
lambda t: [0, np.cos(2 * np.pi * t),
np.sin(2 * np.pi * t)],
lambda t: [0, 0.2 * np.cos(2 * np.pi * t), 0.3 * np.sin(2 * np.pi * t)],
]
VALID_COMPARABLE = STRAIGHT_SIMPLE + STRAIGHT_ANY_DIRECTION + STRAIGHT_UNEQUAL_SPACING + STRAIGHT_MULTIPLE_BZ
VALID_INCOMPARABLE = NON_STRAIGHT
ALL_VALID = VALID_COMPARABLE + VALID_INCOMPARABLE
INVALID = [lambda t: [0, 0, 0.9 * t], lambda t: [0, t]]
ALL_LINES = ALL_VALID + INVALID
VALID_LINES = {
z2pack.fp.kpoint.vasp.__name__: {
'fct':
z2pack.fp.kpoint.vasp,
'valid_comparable':
STRAIGHT_SIMPLE,
'valid_incomparable': [],
'invalid':
STRAIGHT_ANY_DIRECTION + STRAIGHT_UNEQUAL_SPACING +
STRAIGHT_MULTIPLE_BZ + NON_STRAIGHT + INVALID
},
z2pack.fp.kpoint.qe.__name__: {
'fct': z2pack.fp.kpoint.qe,
'valid_comparable': VALID_COMPARABLE,
'valid_incomparable': VALID_INCOMPARABLE,
'invalid': INVALID
},
z2pack.fp.kpoint.qe_explicit.__name__: {
'fct': z2pack.fp.kpoint.qe_explicit,
'valid_comparable': VALID_COMPARABLE,
'valid_incomparable': VALID_INCOMPARABLE,
'invalid': INVALID
},
z2pack.fp.kpoint.abinit.__name__: {
'fct': z2pack.fp.kpoint.abinit,
'valid_comparable':
STRAIGHT_SIMPLE + STRAIGHT_ANY_DIRECTION + STRAIGHT_MULTIPLE_BZ,
'valid_incomparable': [],
'invalid': STRAIGHT_UNEQUAL_SPACING + NON_STRAIGHT + INVALID
},
z2pack.fp.kpoint.wannier90.__name__: {
'fct': z2pack.fp.kpoint.wannier90,
'valid_comparable': VALID_COMPARABLE,
'valid_incomparable': VALID_INCOMPARABLE,
'invalid': INVALID
},
z2pack.fp.kpoint.wannier90_nnkpts.__name__: {
'fct': z2pack.fp.kpoint.wannier90_nnkpts,
'valid_comparable': VALID_COMPARABLE,
'valid_incomparable': VALID_INCOMPARABLE,
'invalid': INVALID
},
z2pack.fp.kpoint.wannier90_full.__name__: {
'fct': z2pack.fp.kpoint.wannier90_full,
'valid_comparable': VALID_COMPARABLE,
'valid_incomparable': VALID_INCOMPARABLE,
'invalid': INVALID
}
}
@pytest.mark.parametrize('fct', sorted(VALID_LINES.keys()))
@pytest.mark.parametrize('line', ALL_LINES)
def test_lines(kpt, fct, line, compare_equal):
"""
For each k-point function, test that it works for the lines it can handle, and raises ValueError for those it cannot.
"""
line_mapping = VALID_LINES[fct]
if line in line_mapping['valid_comparable']:
compare_equal(VALID_LINES[fct]['fct'](kpt))
elif line in line_mapping['valid_incomparable']:
VALID_LINES[fct]['fct'](kpt)
elif line in line_mapping['invalid']:
with pytest.raises(ValueError):
line_mapping['fct'](kpt)
else:
raise ValueError('missing test for this line and function')
|
Z2PackDev/Z2Pack
|
tests/fp/test_fp_kpoint.py
|
Python
|
gpl-3.0
| 3,820
|
[
"ABINIT",
"VASP",
"Wannier90"
] |
4a73b32d781ad23817fc3cff2c9752c1d0974fd01ba7e22bd8fc74a1321e59fd
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines the VaspInputSet abstract base class and a concrete
implementation for the parameters developed and tested by the core team
of pymatgen, including the Materials Virtual Lab, Materials Project and the MIT
high throughput project. The basic concept behind an input set is to specify
a scheme to generate a consistent set of VASP inputs from a structure
without further user intervention. This ensures comparability across
runs.
Read the following carefully before implementing new input sets:
1. 99% of what needs to be done can be done by specifying user_incar_settings
to override some of the defaults of various input sets. Unless there is an
extremely good reason to add a new set, DO NOT add one. E.g., if you want
to turn the hubbard U off, just set "LDAU": False as a user_incar_setting.
2. All derivative input sets should inherit from one of the usual MPRelaxSet or
MITRelaxSet, and proper superclass delegation should be used where possible.
In particular, you are not supposed to implement your own as_dict or
from_dict for derivative sets unless you know what you are doing.
Improper overriding the as_dict and from_dict protocols is the major
cause of implementation headaches. If you need an example, look at how the
MPStaticSet or MPNonSCFSets are constructed.
The above are recommendations. The following are UNBREAKABLE rules:
1. All input sets must take in a structure or list of structures as the first
argument.
2. user_incar_settings, user_kpoints_settings and user_<whatever>_settings are
ABSOLUTE. Any new sets you implement must obey this. If a user wants to
override your settings, you assume he knows what he is doing. Do not
magically override user supplied settings. You can issue a warning if you
think the user is wrong.
3. All input sets must save all supplied args and kwargs as instance variables.
E.g., self.my_arg = my_arg and self.kwargs = kwargs in the __init__. This
ensures the as_dict and from_dict work correctly.
"""
import abc
import re
import glob
import shutil
import warnings
from itertools import chain
from copy import deepcopy
import numpy as np
from pathlib import Path
from monty.serialization import loadfn
from monty.io import zopen
from monty.dev import deprecated
from pymatgen.core.periodic_table import Specie, Element
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.inputs import Incar, Poscar, Potcar, Kpoints, VaspInput
from pymatgen.io.vasp.outputs import Vasprun, Outcar
from monty.json import MSONable
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.symmetry.bandstructure import HighSymmKpath
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.sites import PeriodicSite
__author__ = "Shyue Ping Ong, Wei Chen, Will Richards, Geoffroy Hautier, " \
"Anubhav Jain"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "May 28 2016"
MODULE_DIR = Path(__file__).resolve().parent
class VaspInputSet(MSONable, metaclass=abc.ABCMeta):
"""
Base class representing a set of Vasp input parameters with a structure
supplied as init parameters. Typically, you should not inherit from this
class. Start from DictSet or MPRelaxSet or MITRelaxSet.
"""
@property
@abc.abstractmethod
def incar(self):
"""Incar object"""
pass
@property
@abc.abstractmethod
def kpoints(self):
"""Kpoints object"""
pass
@property
@abc.abstractmethod
def poscar(self):
"""Poscar object"""
pass
@property
def potcar_symbols(self):
"""
List of POTCAR symbols.
"""
elements = self.poscar.site_symbols
potcar_symbols = []
settings = self._config_dict["POTCAR"]
if isinstance(settings[elements[-1]], dict):
for el in elements:
potcar_symbols.append(settings[el]['symbol']
if el in settings else el)
else:
for el in elements:
potcar_symbols.append(settings.get(el, el))
return potcar_symbols
@property
def potcar(self):
"""
Potcar object.
"""
return Potcar(self.potcar_symbols, functional=self.potcar_functional)
@property
@deprecated(message="Use the get_vasp_input() method instead.")
def all_input(self):
"""
Returns all input files as a dict of {filename: vasp object}
Returns:
dict of {filename: object}, e.g., {'INCAR': Incar object, ...}
"""
return {'INCAR': self.incar,
'KPOINTS': self.kpoints,
'POSCAR': self.poscar,
'POTCAR': self.potcar}
def get_vasp_input(self) -> VaspInput:
"""
Returns:
VaspInput
"""
return VaspInput(incar=self.incar,
kpoints=self.kpoints,
poscar=self.poscar,
potcar=self.potcar)
def write_input(self, output_dir,
make_dir_if_not_present=True, include_cif=False):
"""
Writes a set of VASP input to a directory.
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
include_cif (bool): Whether to write a CIF file in the output
directory for easier opening by VESTA.
"""
vinput = self.get_vasp_input()
vinput.write_input(
output_dir, make_dir_if_not_present=make_dir_if_not_present)
if include_cif:
s = vinput["POSCAR"].structure
fname = Path(output_dir) / ("%s.cif" % re.sub(r'\s', "", s.formula))
s.to(filename=fname)
def as_dict(self, verbosity=2):
d = MSONable.as_dict(self)
if verbosity == 1:
d.pop("structure", None)
return d
def _load_yaml_config(fname):
config = loadfn(str(MODULE_DIR / ("%s.yaml" % fname)))
config["INCAR"].update(loadfn(str(MODULE_DIR / "VASPIncarBase.yaml")))
return config
class DictSet(VaspInputSet):
"""
Concrete implementation of VaspInputSet that is initialized from a dict
settings. This allows arbitrary settings to be input. In general,
this is rarely used directly unless there is a source of settings in yaml
format (e.g., from a REST interface). It is typically used by other
VaspInputSets for initialization.
Special consideration should be paid to the way the MAGMOM initialization
for the INCAR is done. The initialization differs depending on the type of
structure and the configuration settings. The order in which the magmom is
determined is as follows:
1. If the site itself has a magmom setting, that is used.
2. If the species on the site has a spin setting, that is used.
3. If the species itself has a particular setting in the config file, that
is used, e.g., Mn3+ may have a different magmom than Mn4+.
4. Lastly, the element symbol itself is checked in the config file. If
there are no settings, VASP's default of 0.6 is used.
Args:
structure (Structure): The Structure to create inputs for.
config_dict (dict): The config dictionary to use.
files_to_transfer (dict): A dictionary of {filename: filepath}. This
allows the transfer of files from a previous calculation.
user_incar_settings (dict): User INCAR settings. This allows a user
to override INCAR settings, e.g., setting a different MAGMOM for
various elements or species. Note that in the new scheme,
ediff_per_atom and hubbard_u are no longer args. Instead, the
config_dict supports EDIFF_PER_ATOM and EDIFF keys. The former
scales with # of atoms, the latter does not. If both are
present, EDIFF is preferred. To force such settings, just supply
user_incar_settings={"EDIFF": 1e-5, "LDAU": False} for example.
The keys 'LDAUU', 'LDAUJ', 'LDAUL' are special cases since
pymatgen defines different values depending on what anions are
present in the structure, so these keys can be defined in one
of two ways, e.g. either {"LDAUU":{"O":{"Fe":5}}} to set LDAUU
for Fe to 5 in an oxide, or {"LDAUU":{"Fe":5}} to set LDAUU to
5 regardless of the input structure.
If a None value is given, that key is unset. For example,
{"ENCUT": None} will remove ENCUT from the incar settings.
user_kpoints_settings (dict or Kpoints): Allow user to override kpoints
setting by supplying a dict E.g., {"reciprocal_density": 1000}.
User can also supply Kpoints object. Default is None.
user_potcar_settings (dict: Allow user to override POTCARs. E.g.,
{"Gd": "Gd_3"}. This is generally not recommended. Default is None.
constrain_total_magmom (bool): Whether to constrain the total magmom
(NUPDOWN in INCAR) to be the sum of the expected MAGMOM for all
species. Defaults to False.
sort_structure (bool): Whether to sort the structure (using the
default sort order of electronegativity) before generating input
files. Defaults to True, the behavior you would want most of the
time. This ensures that similar atomic species are grouped
together.
potcar_functional (str): Functional to use. Default (None) is to use
the functional in Potcar.DEFAULT_FUNCTIONAL. Valid values:
"PBE", "PBE_52", "PBE_54", "LDA", "LDA_52", "LDA_54", "PW91",
"LDA_US", "PW91_US".
force_gamma (bool): Force gamma centered kpoint generation. Default
(False) is to use the Automatic Density kpoint scheme, which
will use the Gamma centered generation scheme for hexagonal
cells, and Monkhorst-Pack otherwise.
reduce_structure (None/str): Before generating the input files,
generate the reduced structure. Default (None), does not
alter the structure. Valid values: None, "niggli", "LLL".
vdw: Adds default parameters for van-der-Waals functionals supported
by VASP to INCAR. Supported functionals are: DFT-D2, undamped
DFT-D3, DFT-D3 with Becke-Jonson damping, Tkatchenko-Scheffler,
Tkatchenko-Scheffler with iterative Hirshfeld partitioning,
MBD@rSC, dDsC, Dion's vdW-DF, DF2, optPBE, optB88, optB86b and
rVV10.
use_structure_charge (bool): If set to True, then the public
variable used for setting the overall charge of the
structure (structure.charge) is used to set the NELECT
variable in the INCAR
Default is False (structure's overall charge is not used)
standardize (float): Whether to standardize to a primitive standard
cell. Defaults to False.
sym_prec (float): Tolerance for symmetry finding.
international_monoclinic (bool): Whether to use international convention
(vs Curtarolo) for monoclinic. Defaults True.
"""
def __init__(self, structure, config_dict,
files_to_transfer=None, user_incar_settings=None,
user_kpoints_settings=None, user_potcar_settings=None,
constrain_total_magmom=False, sort_structure=True,
potcar_functional="PBE", force_gamma=False,
reduce_structure=None, vdw=None,
use_structure_charge=False, standardize=False, sym_prec=0.1,
international_monoclinic=True):
if reduce_structure:
structure = structure.get_reduced_structure(reduce_structure)
if sort_structure:
structure = structure.get_sorted_structure()
self._structure = structure
self._config_dict = deepcopy(config_dict)
self.files_to_transfer = files_to_transfer or {}
self.constrain_total_magmom = constrain_total_magmom
self.sort_structure = sort_structure
self.potcar_functional = potcar_functional
self.force_gamma = force_gamma
self.reduce_structure = reduce_structure
self.user_incar_settings = user_incar_settings or {}
self.user_kpoints_settings = user_kpoints_settings
self.user_potcar_settings = user_potcar_settings
self.vdw = vdw.lower() if vdw is not None else None
self.use_structure_charge = use_structure_charge
self.standardize = standardize
self.sym_prec = sym_prec
self.international_monoclinic = international_monoclinic
if self.vdw:
vdw_par = loadfn(str(MODULE_DIR / "vdW_parameters.yaml"))
try:
self._config_dict["INCAR"].update(vdw_par[self.vdw])
except KeyError:
raise KeyError("Invalid or unsupported van-der-Waals "
"functional. Supported functionals are "
"%s." % vdw_par.keys())
if self.user_potcar_settings:
warnings.warn(
"Overriding POTCARs is generally not recommended as it "
"significantly affect the results of calculations and "
"compatibility with other calculations done with the same "
"input set. In many instances, it is better to write a "
"subclass of a desired input set and override the POTCAR in "
"the subclass to be explicit on the differences.",
BadInputSetWarning)
for k, v in self.user_potcar_settings.items():
self._config_dict["POTCAR"][k] = v
@property
def structure(self):
if self.standardize and self.sym_prec:
return standardize_structure(
self._structure, sym_prec=self.sym_prec,
international_monoclinic=self.international_monoclinic)
else:
return self._structure
@property
def incar(self):
settings = dict(self._config_dict["INCAR"])
for k, v in self.user_incar_settings.items():
if v is None:
try:
del settings[k]
except KeyError:
settings[k] = v
else:
settings[k] = v
structure = self.structure
incar = Incar()
comp = structure.composition
elements = sorted([el for el in comp.elements if comp[el] > 0],
key=lambda e: e.X)
most_electroneg = elements[-1].symbol
poscar = Poscar(structure)
hubbard_u = settings.get("LDAU", False)
for k, v in settings.items():
if k == "MAGMOM":
mag = []
for site in structure:
if hasattr(site, 'magmom'):
mag.append(site.magmom)
elif hasattr(site.specie, 'spin'):
mag.append(site.specie.spin)
elif str(site.specie) in v:
mag.append(v.get(str(site.specie)))
else:
mag.append(v.get(site.specie.symbol, 0.6))
incar[k] = mag
elif k in ('LDAUU', 'LDAUJ', 'LDAUL'):
if hubbard_u:
if hasattr(structure[0], k.lower()):
m = dict([(site.specie.symbol, getattr(site, k.lower()))
for site in structure])
incar[k] = [m[sym] for sym in poscar.site_symbols]
# lookup specific LDAU if specified for most_electroneg atom
elif most_electroneg in v.keys() and \
isinstance(v[most_electroneg], dict):
incar[k] = [v[most_electroneg].get(sym, 0)
for sym in poscar.site_symbols]
# else, use fallback LDAU value if it exists
else:
incar[k] = [v.get(sym, 0)
if isinstance(v.get(sym, 0), (float, int))
else 0 for sym in poscar.site_symbols]
elif k.startswith("EDIFF") and k != "EDIFFG":
if "EDIFF" not in settings and k == "EDIFF_PER_ATOM":
incar["EDIFF"] = float(v) * structure.num_sites
else:
incar["EDIFF"] = float(settings["EDIFF"])
else:
incar[k] = v
has_u = hubbard_u and sum(incar['LDAUU']) > 0
if has_u:
# modify LMAXMIX if LSDA+U and you have d or f electrons
# note that if the user explicitly sets LMAXMIX in settings it will
# override this logic.
if 'LMAXMIX' not in settings.keys():
# contains f-electrons
if any([el.Z > 56 for el in structure.composition]):
incar['LMAXMIX'] = 6
# contains d-electrons
elif any([el.Z > 20 for el in structure.composition]):
incar['LMAXMIX'] = 4
else:
for key in list(incar.keys()):
if key.startswith('LDAU'):
del incar[key]
if self.constrain_total_magmom:
nupdown = sum([mag if abs(mag) > 0.6 else 0
for mag in incar['MAGMOM']])
incar['NUPDOWN'] = nupdown
if self.use_structure_charge:
incar["NELECT"] = self.nelect
if np.product(self.kpoints.kpts) < 4 and incar.get("ISMEAR", 0) == -5:
incar["ISMEAR"] = 0
if all([k.is_metal for k in structure.composition.keys()]):
if incar.get("NSW", 0) > 0 and incar.get("ISMEAR", 1) < 1:
warnings.warn("Relaxation of likely metal with ISMEAR < 1 "
"detected. Please see VASP recommendations on "
"ISMEAR for metals.", BadInputSetWarning)
return incar
@property
def poscar(self):
return Poscar(self.structure)
@property
def nelect(self):
"""
Gets the default number of electrons for a given structure.
"""
# if structure is not sorted this can cause problems, so must take
# care to remove redundant symbols when counting electrons
site_symbols = list(set(self.poscar.site_symbols))
structure = self.structure
nelect = 0.
for ps in self.potcar:
if ps.element in site_symbols:
site_symbols.remove(ps.element)
nelect += structure.composition.element_composition[
ps.element] * ps.ZVAL
if self.use_structure_charge:
return nelect - structure.charge
else:
return nelect
@property
def kpoints(self):
"""
Writes out a KPOINTS file using the fully automated grid method. Uses
Gamma centered meshes for hexagonal cells and Monk grids otherwise.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
"""
settings = self.user_kpoints_settings or self._config_dict["KPOINTS"]
if isinstance(settings, Kpoints):
return settings
# If grid_density is in the kpoints_settings use
# Kpoints.automatic_density
if settings.get('grid_density'):
return Kpoints.automatic_density(
self.structure, int(settings['grid_density']),
self.force_gamma)
# If reciprocal_density is in the kpoints_settings use
# Kpoints.automatic_density_by_vol
elif settings.get('reciprocal_density'):
return Kpoints.automatic_density_by_vol(
self.structure, int(settings['reciprocal_density']),
self.force_gamma)
# If length is in the kpoints_settings use Kpoints.automatic
elif settings.get('length'):
return Kpoints.automatic(settings['length'])
# Raise error. Unsure of which kpoint generation to use
else:
raise ValueError(
"Invalid KPoint Generation algo : Supported Keys are "
"grid_density: for Kpoints.automatic_density generation, "
"reciprocal_density: for KPoints.automatic_density_by_vol "
"generation, and length : for Kpoints.automatic generation")
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return self.__class__.__name__
def write_input(self, output_dir,
make_dir_if_not_present=True, include_cif=False):
super().write_input(
output_dir=output_dir,
make_dir_if_not_present=make_dir_if_not_present,
include_cif=include_cif)
for k, v in self.files_to_transfer.items():
with zopen(v, "rb") as fin, \
zopen(str(Path(output_dir) / k), "wb") as fout:
shutil.copyfileobj(fin, fout)
class MITRelaxSet(DictSet):
"""
Standard implementation of VaspInputSet utilizing parameters in the MIT
High-throughput project.
The parameters are chosen specifically for a high-throughput project,
which means in general pseudopotentials with fewer electrons were chosen.
Please refer::
A Jain, G. Hautier, C. Moore, S. P. Ong, C. Fischer, T. Mueller,
K. A. Persson, G. Ceder. A high-throughput infrastructure for density
functional theory calculations. Computational Materials Science,
2011, 50(8), 2295-2310. doi:10.1016/j.commatsci.2011.02.023
"""
CONFIG = _load_yaml_config("MITRelaxSet")
def __init__(self, structure, **kwargs):
super().__init__(structure, MITRelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPRelaxSet(DictSet):
"""
Implementation of VaspInputSet utilizing parameters in the public
Materials Project. Typically, the pseudopotentials chosen contain more
electrons than the MIT parameters, and the k-point grid is ~50% more dense.
The LDAUU parameters are also different due to the different psps used,
which result in different fitted values.
"""
CONFIG = _load_yaml_config("MPRelaxSet")
def __init__(self, structure, **kwargs):
super().__init__(structure, MPRelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPMetalRelaxSet(MPRelaxSet):
"""
Implementation of VaspInputSet utilizing parameters in the public
Materials Project, but with tuning for metals. Key things are a denser
k point density, and a
"""
CONFIG = _load_yaml_config("MPRelaxSet")
def __init__(self, structure, **kwargs):
super().__init__(structure, **kwargs)
self._config_dict["INCAR"].update({
"ISMEAR": 1,
"SIGMA": 0.2
})
self._config_dict["KPOINTS"].update({
"reciprocal_density": 200
})
self.kwargs = kwargs
class MPHSERelaxSet(DictSet):
"""
Same as the MPRelaxSet, but with HSE parameters.
"""
CONFIG = _load_yaml_config("MPHSERelaxSet")
def __init__(self, structure, **kwargs):
super().__init__(structure, MPHSERelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPStaticSet(MPRelaxSet):
"""
Run a static calculation.
Args:
structure (Structure): Structure from previous run.
prev_incar (Incar): Incar file from previous run.
prev_kpoints (Kpoints): Kpoints from previous run.
lepsilon (bool): Whether to add static dielectric calculation
reciprocal_density (int): For static calculations, we usually set the
reciprocal density by volume. This is a convenience arg to change
that, rather than using user_kpoints_settings. Defaults to 100,
which is ~50% more than that of standard relaxation calculations.
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
**kwargs: kwargs supported by MPRelaxSet.
"""
def __init__(self, structure, prev_incar=None, prev_kpoints=None,
lepsilon=False, lcalcpol=False, reciprocal_density=100,
small_gap_multiply=None, **kwargs):
super().__init__(structure, **kwargs)
if isinstance(prev_incar, str):
prev_incar = Incar.from_file(prev_incar)
if isinstance(prev_kpoints, str):
prev_kpoints = Kpoints.from_file(prev_kpoints)
self.prev_incar = prev_incar
self.prev_kpoints = prev_kpoints
self.reciprocal_density = reciprocal_density
self.kwargs = kwargs
self.lepsilon = lepsilon
self.lcalcpol = lcalcpol
self.small_gap_multiply = small_gap_multiply
@property
def incar(self):
parent_incar = super().incar
incar = Incar(self.prev_incar) if self.prev_incar is not None else \
Incar(parent_incar)
incar.update(
{"IBRION": -1, "ISMEAR": -5, "LAECHG": True, "LCHARG": True,
"LORBIT": 11, "LVHAR": True, "LWAVE": False, "NSW": 0,
"ICHARG": 0, "ALGO": "Normal"})
if self.lepsilon:
incar["IBRION"] = 8
incar["LEPSILON"] = True
# LPEAD=T: numerical evaluation of overlap integral prevents
# LRF_COMMUTATOR errors and can lead to better expt. agreement
# but produces slightly different results
incar["LPEAD"] = True
# Note that DFPT calculations MUST unset NSW. NSW = 0 will fail
# to output ionic.
incar.pop("NSW", None)
incar.pop("NPAR", None)
if self.lcalcpol:
incar["LCALCPOL"] = True
for k in ["MAGMOM", "NUPDOWN"] + list(self.kwargs.get(
"user_incar_settings", {}).keys()):
# For these parameters as well as user specified settings, override
# the incar settings.
if parent_incar.get(k, None) is not None:
incar[k] = parent_incar[k]
else:
incar.pop(k, None)
# use new LDAUU when possible b/c the Poscar might have changed
# representation
if incar.get('LDAU'):
u = incar.get('LDAUU', [])
j = incar.get('LDAUJ', [])
if sum([u[x] - j[x] for x, y in enumerate(u)]) > 0:
for tag in ('LDAUU', 'LDAUL', 'LDAUJ'):
incar.update({tag: parent_incar[tag]})
# ensure to have LMAXMIX for GGA+U static run
if "LMAXMIX" not in incar:
incar.update({"LMAXMIX": parent_incar["LMAXMIX"]})
# Compare ediff between previous and staticinputset values,
# choose the tighter ediff
incar["EDIFF"] = min(incar.get("EDIFF", 1), parent_incar["EDIFF"])
return incar
@property
def kpoints(self):
self._config_dict["KPOINTS"]["reciprocal_density"] = \
self.reciprocal_density
kpoints = super().kpoints
# Prefer to use k-point scheme from previous run
# except for when lepsilon = True is specified
if self.prev_kpoints and self.prev_kpoints.style != kpoints.style:
if (self.prev_kpoints.style == Kpoints.supported_modes.Monkhorst) \
and (not self.lepsilon):
k_div = [kp + 1 if kp % 2 == 1 else kp
for kp in kpoints.kpts[0]]
kpoints = Kpoints.monkhorst_automatic(k_div)
else:
kpoints = Kpoints.gamma_automatic(kpoints.kpts[0])
return kpoints
def override_from_prev_calc(self, prev_calc_dir='.'):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
self.prev_kpoints = vasprun.kpoints
if self.standardize:
warnings.warn("Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized "
"structure.")
self._structure = get_structure_from_prev_run(vasprun, outcar)
# multiply the reciprocal density if needed
if self.small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= self.small_gap_multiply[0]:
self.reciprocal_density = (self.reciprocal_density *
self.small_gap_multiply[1])
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for static calculations from a
directory of previous Vasp run.
Args:
prev_calc_dir (str): Directory containing the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPStaticSet, other than prev_incar
and prev_structure and prev_kpoints which are determined from
the prev_calc_dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPHSEBSSet(MPHSERelaxSet):
"""
Implementation of a VaspInputSet for HSE band structure computations.
Remember that HSE band structures must be self-consistent in VASP. A
band structure along symmetry lines for instance needs BOTH a uniform
grid with appropriate weights AND a path along the lines with weight 0.
Thus, the "Uniform" mode is just like regular static SCF but allows
adding custom kpoints (e.g., corresponding to known VBM/CBM) to the
uniform grid that have zero weight (e.g., for better gap estimate).
The "Gap" mode behaves just like the "Uniform" mode, however, if starting
from a previous calculation, the VBM and CBM k-points will automatically
be added to ``added_kpoints``.
The "Line" mode is just like Uniform mode, but additionally adds
k-points along symmetry lines with zero weight.
Args:
structure (Structure): Structure to compute
user_incar_settings (dict): A dict specifying additional incar
settings
added_kpoints (list): a list of kpoints (list of 3 number list)
added to the run. The k-points are in fractional coordinates
mode (str): "Line" - generate k-points along symmetry lines for
bandstructure. "Uniform" - generate uniform k-points grid.
reciprocal_density (int): k-point density to use for uniform mesh.
copy_chgcar (bool): Whether to copy the CHGCAR of a previous run.
kpoints_line_density (int): k-point density for high symmetry lines
**kwargs (dict): Any other parameters to pass into DictSet.
"""
def __init__(self, structure, user_incar_settings=None, added_kpoints=None,
mode="Gap", reciprocal_density=None, copy_chgcar=True,
kpoints_line_density=20, **kwargs):
super().__init__(structure, **kwargs)
self.user_incar_settings = user_incar_settings or {}
self._config_dict["INCAR"].update({
"NSW": 0,
"ISMEAR": 0,
"SIGMA": 0.05,
"ISYM": 3,
"LCHARG": False,
"NELMIN": 5
})
self.added_kpoints = added_kpoints if added_kpoints is not None else []
self.mode = mode
if (not reciprocal_density or
"reciprocal_density" not in self.user_kpoints_settings):
self.reciprocal_density = 50
else:
self.reciprocal_density = reciprocal_density or \
self.user_kpoints_settings['reciprocal_density']
self.kpoints_line_density = kpoints_line_density
self.copy_chgcar = copy_chgcar
@property
def kpoints(self):
kpts = []
weights = []
all_labels = []
structure = self.structure
# for both modes, include the Uniform mesh w/standard weights
grid = Kpoints.automatic_density_by_vol(structure,
self.reciprocal_density).kpts
ir_kpts = SpacegroupAnalyzer(structure, symprec=0.1) \
.get_ir_reciprocal_mesh(grid[0])
for k in ir_kpts:
kpts.append(k[0])
weights.append(int(k[1]))
all_labels.append(None)
# for both modes, include any user-added kpoints w/zero weight
for k in self.added_kpoints:
kpts.append(k)
weights.append(0.0)
all_labels.append("user-defined")
# for line mode only, add the symmetry lines w/zero weight
if self.mode.lower() == "line":
kpath = HighSymmKpath(structure)
frac_k_points, labels = kpath.get_kpoints(
line_density=self.kpoints_line_density,
coords_are_cartesian=False)
for k in range(len(frac_k_points)):
kpts.append(frac_k_points[k])
weights.append(0.0)
all_labels.append(labels[k])
comment = ("HSE run along symmetry lines"
if self.mode.lower() == "line"
else "HSE run on uniform grid")
return Kpoints(comment=comment,
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(kpts), kpts=kpts, kpts_weights=weights,
labels=all_labels)
def override_from_prev_calc(self, prev_calc_dir='.'):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self._structure = get_structure_from_prev_run(vasprun, outcar)
# note: recommend not standardizing the cell because we want to retain
# k-points
if self.standardize:
warnings.warn("Use of standardize=True with from_prev_calc is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized "
"structure.")
if self.mode.lower() == "gap":
added_kpoints = []
bs = vasprun.get_band_structure()
vbm, cbm = bs.get_vbm()["kpoint"], bs.get_cbm()["kpoint"]
if vbm:
added_kpoints.append(vbm.frac_coords)
if cbm:
added_kpoints.append(cbm.frac_coords)
self.added_kpoints.extend(added_kpoints)
files_to_transfer = {}
if self.copy_chgcar:
chgcars = sorted(glob.glob(str(Path(prev_calc_dir) / "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
self.files_to_transfer.update(files_to_transfer)
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for HSE calculations from a
directory of previous Vasp run.
Args:
prev_calc_dir (str): Directory containing the outputs
(vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPHSEBSStaticSet, other than
prev_structure which is determined from the previous calc dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPNonSCFSet(MPRelaxSet):
"""
Init a MPNonSCFSet. Typically, you would use the classmethod
from_prev_calc to initialize from a previous SCF run.
Args:
structure (Structure): Structure to compute
prev_incar (Incar/string): Incar file from previous run.
mode (str): Line, Uniform or Boltztrap mode supported.
nedos (int): nedos parameter. Default to 2001.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
sym_prec (float): Symmetry precision (for Uniform mode).
kpoints_line_density (int): Line density for Line mode.
optics (bool): whether to add dielectric function
copy_chgcar: Whether to copy the old CHGCAR when starting from a
previous calculation.
nbands_factor (float): Multiplicative factor for NBANDS when starting
from a previous calculation. Choose a higher number if you are
doing an LOPTICS calculation.
small_gap_multiply ([float, float]): When starting from a previous
calculation, if the gap is less than 1st index, multiply the default
reciprocal_density by the 2nd index.
**kwargs: kwargs supported by MPRelaxSet.
"""
def __init__(self, structure, prev_incar=None,
mode="line", nedos=2001, reciprocal_density=100, sym_prec=0.1,
kpoints_line_density=20, optics=False, copy_chgcar=True,
nbands_factor=1.2, small_gap_multiply=None, **kwargs):
super().__init__(structure, **kwargs)
if isinstance(prev_incar, str):
prev_incar = Incar.from_file(prev_incar)
self.prev_incar = prev_incar
self.kwargs = kwargs
self.nedos = nedos
self.reciprocal_density = reciprocal_density
self.sym_prec = sym_prec
self.kpoints_line_density = kpoints_line_density
self.optics = optics
self.mode = mode.lower()
self.copy_chgcar = copy_chgcar
self.nbands_factor = nbands_factor
self.small_gap_multiply = small_gap_multiply
if self.mode.lower() not in ["line", "uniform", "boltztrap"]:
raise ValueError("Supported modes for NonSCF runs are 'Line', "
"'Uniform' and 'Boltztrap!")
if (self.mode.lower() != "uniform" or nedos < 2000) and optics:
warnings.warn("It is recommended to use Uniform mode with a high "
"NEDOS for optics calculations.")
@property
def incar(self):
incar = super().incar
if self.prev_incar is not None:
incar.update({k: v for k, v in self.prev_incar.items()})
# Overwrite necessary INCAR parameters from previous runs
incar.update({"IBRION": -1, "LCHARG": False, "LORBIT": 11,
"LWAVE": False, "NSW": 0, "ISYM": 0, "ICHARG": 11})
if self.mode.lower() == 'uniform':
# use tetrahedron method for DOS and optics calculations
incar.update({"ISMEAR": -5})
else:
# if line mode, can't use ISMEAR=-5; also use small sigma to avoid
# partial occupancies for small band gap materials.
# finally, explicit k-point generation (needed for bolztrap mode)
# is incompatible with ISMEAR = -5.
incar.update({"ISMEAR": 0, "SIGMA": 0.01})
incar.update(self.kwargs.get("user_incar_settings", {}))
if self.mode.lower() in "uniform":
# Set smaller steps for DOS and optics output
incar["NEDOS"] = self.nedos
if self.optics:
incar["LOPTICS"] = True
incar.pop("MAGMOM", None)
return incar
@property
def kpoints(self):
if self.mode.lower() == "line":
kpath = HighSymmKpath(self.structure)
frac_k_points, k_points_labels = kpath.get_kpoints(
line_density=self.kpoints_line_density,
coords_are_cartesian=False)
kpoints = Kpoints(
comment="Non SCF run along symmetry lines",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(frac_k_points),
kpts=frac_k_points, labels=k_points_labels,
kpts_weights=[1] * len(frac_k_points))
elif self.mode.lower() == "boltztrap":
kpoints = Kpoints.automatic_density_by_vol(self.structure,
self.reciprocal_density)
mesh = kpoints.kpts[0]
ir_kpts = SpacegroupAnalyzer(
self.structure,
symprec=self.sym_prec).get_ir_reciprocal_mesh(mesh)
kpts = []
weights = []
for k in ir_kpts:
kpts.append(k[0])
weights.append(int(k[1]))
kpoints = Kpoints(comment="Non SCF run on uniform grid",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(ir_kpts),
kpts=kpts, kpts_weights=weights)
else:
self._config_dict["KPOINTS"]["reciprocal_density"] = \
self.reciprocal_density
kpoints = super().kpoints
# override pymatgen kpoints if provided
user_kpoints = self.kwargs.get("user_kpoints_settings", None)
if isinstance(user_kpoints, Kpoints):
kpoints = user_kpoints
return kpoints
def override_from_prev_calc(self, prev_calc_dir='.'):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
# Get a Magmom-decorated structure
self._structure = get_structure_from_prev_run(vasprun, outcar)
if self.standardize:
warnings.warn("Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized"
" structure. copy_chgcar is enforced to be false.")
self.copy_chgcar = False
# Turn off spin when magmom for every site is smaller than 0.02.
if outcar and outcar.magnetization:
site_magmom = np.array([i['tot'] for i in outcar.magnetization])
ispin = 2 if np.any(site_magmom[np.abs(site_magmom) > 0.02]) else 1
elif vasprun.is_spin:
ispin = 2
else:
ispin = 1
nbands = int(np.ceil(vasprun.parameters["NBANDS"] * self.nbands_factor))
self.prev_incar.update({"ISPIN": ispin, "NBANDS": nbands})
files_to_transfer = {}
if self.copy_chgcar:
chgcars = sorted(glob.glob(str(Path(prev_calc_dir) / "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
self.files_to_transfer.update(files_to_transfer)
# multiply the reciprocal density if needed:
if self.small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= self.small_gap_multiply[0]:
self.reciprocal_density = (self.reciprocal_density *
self.small_gap_multiply[1])
self.kpoints_line_density = (self.kpoints_line_density *
self.small_gap_multiply[1])
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for NonSCF calculations from a
directory of previous static Vasp run.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPNonSCFSet, other than structure,
prev_incar and prev_chgcar which are determined from the
prev_calc_dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPSOCSet(MPStaticSet):
"""
An input set for running spin-orbit coupling (SOC) calculations.
Args:
structure (Structure): the structure must have the 'magmom' site
property and each magnetic moment value must have 3
components. eg: ``magmom = [[0,0,2], ...]``
saxis (tuple): magnetic moment orientation
copy_chgcar: Whether to copy the old CHGCAR. Defaults to True.
nbands_factor (float): Multiplicative factor for NBANDS. Choose a
higher number if you are doing an LOPTICS calculation.
reciprocal_density (int): density of k-mesh by reciprocal volume.
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
magmom (list[list[float]]): Override for the structure magmoms.
**kwargs: kwargs supported by MPStaticSet.
"""
def __init__(self, structure, saxis=(0, 0, 1), copy_chgcar=True,
nbands_factor=1.2, reciprocal_density=100,
small_gap_multiply=None, magmom=None, **kwargs):
if (not hasattr(structure[0], "magmom") and
not isinstance(structure[0].magmom, list)):
raise ValueError(
"The structure must have the 'magmom' site "
"property and each magnetic moment value must have 3 "
"components. eg:- magmom = [0,0,2]")
super().__init__(structure, reciprocal_density=reciprocal_density,
**kwargs)
self.saxis = saxis
self.copy_chgcar = copy_chgcar
self.nbands_factor = nbands_factor
self.small_gap_multiply = small_gap_multiply
self.magmom = magmom
@property
def incar(self):
incar = super().incar
if self.prev_incar is not None:
incar.update({k: v for k, v in self.prev_incar.items()})
# Overwrite necessary INCAR parameters from previous runs
incar.update({"ISYM": -1, "LSORBIT": "T", "ICHARG": 11,
"SAXIS": list(self.saxis)})
incar.update(self.kwargs.get("user_incar_settings", {}))
return incar
def override_from_prev_calc(self, prev_calc_dir='.'):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
# Remove magmoms from previous INCAR, since we will prefer
# the final calculated magmoms
# TODO: revisit in context of MPStaticSet incar logic
if 'MAGMOM' in self.prev_incar:
del self.prev_incar['magmom']
# Get a magmom-decorated structure
self._structure = get_structure_from_prev_run(vasprun, outcar)
if self.standardize:
warnings.warn("Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized"
" structure. copy_chgcar is enforced to be false.")
self.copy_chgcar = False
# override magmom if provided
if self.magmom:
self._structure = self._structure.copy(
site_properties={"magmom": self.magmom})
# magmom has to be 3D for SOC calculation.
if hasattr(self._structure[0], "magmom"):
if not isinstance(self._structure[0].magmom, list):
self._structure = self._structure.copy(
site_properties={"magmom": [[0, 0, site.magmom]
for site in self._structure]})
else:
raise ValueError("Neither the previous structure has magmom "
"property nor magmom provided")
nbands = int(np.ceil(vasprun.parameters["NBANDS"] * self.nbands_factor))
self.prev_incar.update({"NBANDS": nbands})
files_to_transfer = {}
if self.copy_chgcar:
chgcars = sorted(glob.glob(str(Path(prev_calc_dir) / "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
self.files_to_transfer.update(files_to_transfer)
# multiply the reciprocal density if needed:
if self.small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= self.small_gap_multiply[0]:
self.reciprocal_density = (self.reciprocal_density *
self.small_gap_multiply[1])
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for SOC calculations from a
directory of previous static Vasp run. SOC calc requires all 3
components for MAGMOM for each atom in the structure.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPSOCSet, other than structure,
prev_incar and prev_chgcar which are determined from the
prev_calc_dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPNMRSet(MPStaticSet):
"""
Init a MPNMRSet.
Args:
structure (Structure): Structure to compute
mode (str): The NMR calculation to run
"cs": for Chemical Shift
"efg" for Electric Field Gradient
isotopes (list): list of Isotopes for quadrupole moments
prev_incar (Incar): Incar file from previous run.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
**kwargs: kwargs supported by MPStaticSet.
"""
def __init__(self, structure, mode="cs", isotopes=None,
prev_incar=None, reciprocal_density=100, **kwargs):
self.mode = mode
self.isotopes = isotopes if isotopes else []
super().__init__(structure, prev_incar=prev_incar,
reciprocal_density=reciprocal_density, **kwargs)
@property
def incar(self):
incar = super().incar
if self.mode.lower() == "cs":
incar.update({"LCHIMAG": True,
"EDIFF": -1.0e-10,
"ISYM": 0,
"LCHARG": False,
"LNMR_SYM_RED": True,
"NELMIN": 10,
"NSLPLINE": True,
"PREC": "ACCURATE",
"SIGMA": 0.01})
elif self.mode.lower() == "efg":
isotopes = {ist.split("-")[0]: ist for ist in self.isotopes}
quad_efg = [
Specie(p).get_nmr_quadrupole_moment(isotopes.get(p, None)) for p
in self.poscar.site_symbols]
incar.update({"ALGO": "FAST",
"EDIFF": -1.0e-10,
"ISYM": 0,
"LCHARG": False,
"LEFG": True,
"QUAD_EFG": quad_efg,
"NELMIN": 10,
"PREC": "ACCURATE",
"SIGMA": 0.01})
incar.update(self.kwargs.get("user_incar_settings", {}))
return incar
class MVLElasticSet(MPRelaxSet):
"""
MVL denotes VASP input sets that are implemented by the Materials Virtual
Lab (http://www.materialsvirtuallab.org) for various research.
This input set is used to calculate elastic constants in VASP. It is used
in the following work::
Z. Deng, Z. Wang, I.-H. Chu, J. Luo, S. P. Ong.
“Elastic Properties of Alkali Superionic Conductor Electrolytes
from First Principles Calculations”, J. Electrochem. Soc.
2016, 163(2), A67-A74. doi: 10.1149/2.0061602jes
To read the elastic constants, you may use the Outcar class which parses the
elastic constants.
Args:
scale (float): POTIM parameter. The default of 0.015 is usually fine,
but some structures may require a smaller step.
user_incar_settings (dict): A dict specifying additional incar
settings.
"""
def __init__(self, structure, potim=0.015, **kwargs):
super().__init__(structure, **kwargs)
self._config_dict["INCAR"].update({"IBRION": 6, "NFREE": 2,
"POTIM": potim})
self._config_dict["INCAR"].pop("NPAR", None)
class MVLGWSet(DictSet):
"""
MVL denotes VASP input sets that are implemented by the Materials Virtual
Lab (http://www.materialsvirtuallab.org) for various research. This is a
flexible input set for GW calculations.
Note that unlike all other input sets in this module, the PBE_54 series of
functional is set as the default. These have much improved performance for
GW calculations.
A typical sequence is mode="STATIC" -> mode="DIAG" -> mode="GW" ->
mode="BSE". For all steps other than the first one (static), the
recommendation is to use from_prev_calculation on the preceding run in
the series.
Args:
structure (Structure): Input structure.
prev_incar (Incar/string): Incar file from previous run.
mode (str): Supported modes are "STATIC" (default), "DIAG", "GW",
and "BSE".
nbands (int): For subsequent calculations, it is generally
recommended to perform NBANDS convergence starting from the
NBANDS of the previous run for DIAG, and to use the exact same
NBANDS for GW and BSE. This parameter is used by
from_previous_calculation to set nband.
potcar_functional (str): Defaults to "PBE_54".
copy_wavecar: Whether to copy the old WAVECAR, WAVEDER and associated
files when starting from a previous calculation.
nbands_factor (int): Multiplicative factor for NBANDS when starting
from a previous calculation. Only applies if mode=="DIAG".
Need to be tested for convergence.
ncores (int): Numbers of cores used for the calculation. VASP will alter
NBANDS if it was not dividable by ncores. Only applies if
mode=="DIAG".
**kwargs: All kwargs supported by DictSet. Typically,
user_incar_settings is a commonly used option.
"""
CONFIG = _load_yaml_config("MVLGWSet")
SUPPORTED_MODES = ("DIAG", "GW", "STATIC", "BSE")
def __init__(self, structure, prev_incar=None, nbands=None,
potcar_functional="PBE_54", reciprocal_density=100,
mode="STATIC", copy_wavecar=True, nbands_factor=5, ncores=16,
**kwargs):
super().__init__(structure, MVLGWSet.CONFIG, **kwargs)
self.prev_incar = prev_incar
self.nbands = nbands
self.potcar_functional = potcar_functional
self.reciprocal_density = reciprocal_density
self.mode = mode.upper()
if self.mode not in MVLGWSet.SUPPORTED_MODES:
raise ValueError("%s not one of the support modes : %s" %
(self.mode, MVLGWSet.SUPPORTED_MODES))
self.kwargs = kwargs
self.copy_wavecar = copy_wavecar
self.nbands_factor = nbands_factor
self.ncores = ncores
@property
def kpoints(self):
"""
Generate gamma center k-points mesh grid for GW calc,
which is requested by GW calculation.
"""
return Kpoints.automatic_density_by_vol(self.structure,
self.reciprocal_density,
force_gamma=True)
@property
def incar(self):
parent_incar = super().incar
incar = Incar(self.prev_incar) if self.prev_incar is not None else \
Incar(parent_incar)
if self.mode == "DIAG":
# Default parameters for diagonalization calculation.
incar.update({
"ALGO": "Exact",
"NELM": 1,
"LOPTICS": True,
"LPEAD": True
})
elif self.mode == "GW":
# Default parameters for GW calculation.
incar.update({
"ALGO": "GW0",
"NELM": 1,
"NOMEGA": 80,
"ENCUTGW": 250
})
incar.pop("EDIFF", None)
incar.pop("LOPTICS", None)
incar.pop("LPEAD", None)
elif self.mode == "BSE":
# Default parameters for BSE calculation.
incar.update({
"ALGO": "BSE",
"ANTIRES": 0,
"NBANDSO": 20,
"NBANDSV": 20
})
if self.nbands:
incar["NBANDS"] = self.nbands
# Respect user set INCAR.
incar.update(self.kwargs.get("user_incar_settings", {}))
return incar
def override_from_prev_calc(self, prev_calc_dir='.'):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
self._structure = vasprun.final_structure
if self.standardize:
warnings.warn("Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized "
"structure.")
self.nbands = int(vasprun.parameters["NBANDS"])
if self.mode.upper() == "DIAG":
self.nbands = int(np.ceil(self.nbands * self.nbands_factor /
self.ncores) * self.ncores)
# copy WAVECAR, WAVEDER (derivatives)
files_to_transfer = {}
if self.copy_wavecar:
for fname in ("WAVECAR", "WAVEDER", "WFULL"):
w = sorted(glob.glob(str(Path(prev_calc_dir) / (fname + "*"))))
if w:
if fname == "WFULL":
for f in w:
fname = Path(f).name
fname = fname.split(".")[0]
files_to_transfer[fname] = f
else:
files_to_transfer[fname] = str(w[-1])
self.files_to_transfer.update(files_to_transfer)
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, mode="DIAG", **kwargs):
"""
Generate a set of Vasp input files for GW or BSE calculations from a
directory of previous Exact Diag Vasp run.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml of previous vasp run.
mode (str): Supported modes are "STATIC", "DIAG" (default), "GW",
and "BSE".
**kwargs: All kwargs supported by MVLGWSet, other than structure,
prev_incar and mode, which are determined from the
prev_calc_dir.
"""
input_set = cls(_dummy_structure, mode=mode, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MVLSlabSet(MPRelaxSet):
"""
Class for writing a set of slab vasp runs,
including both slabs (along the c direction) and orient unit cells (bulk),
to ensure the same KPOINTS, POTCAR and INCAR criterion.
Args:
k_product: default to 50, kpoint number * length for a & b directions,
also for c direction in bulk calculations
bulk (bool): Set to True for bulk calculation. Defaults to False.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
def __init__(self, structure, k_product=50, bulk=False,
auto_dipole=False, set_mix=True, sort_structure=True,
**kwargs):
super().__init__(structure, **kwargs)
if sort_structure:
structure = structure.get_sorted_structure()
self.k_product = k_product
self.bulk = bulk
self.auto_dipole = auto_dipole
self.kwargs = kwargs
self.set_mix = set_mix
self.kpt_calc = None
slab_incar = {"EDIFF": 1e-4, "EDIFFG": -0.02, "ENCUT": 400,
"ISMEAR": 0, "SIGMA": 0.05, "ISIF": 3}
if not self.bulk:
slab_incar["ISIF"] = 2
slab_incar["LVTOT"] = True
if self.set_mix:
slab_incar["AMIN"] = 0.01
slab_incar["AMIX"] = 0.2
slab_incar["BMIX"] = 0.001
slab_incar["NELMIN"] = 8
if self.auto_dipole:
weights = [s.species.weight for s in structure]
center_of_mass = np.average(structure.frac_coords,
weights=weights, axis=0)
slab_incar["IDIPOL"] = 3
slab_incar["LDIPOL"] = True
slab_incar["DIPOL"] = center_of_mass
self._config_dict["INCAR"].update(slab_incar)
@property
def kpoints(self):
"""
k_product, default to 50, is kpoint number * length for a & b
directions, also for c direction in bulk calculations
Automatic mesh & Gamma is the default setting.
"""
# To get input sets, the input structure has to has the same number
# of required parameters as a Structure object (ie. 4). Slab
# attributes aren't going to affect the VASP inputs anyways so
# converting the slab into a structure should not matter
kpt = super().kpoints
kpt.comment = "Automatic mesh"
kpt.style = 'Gamma'
# use k_product to calculate kpoints, k_product = kpts[0][0] * a
lattice_abc = self.structure.lattice.abc
kpt_calc = [int(self.k_product / lattice_abc[0] + 0.5),
int(self.k_product / lattice_abc[1] + 0.5), 1]
self.kpt_calc = kpt_calc
# calculate kpts (c direction) for bulk. (for slab, set to 1)
if self.bulk:
kpt_calc[2] = int(self.k_product / lattice_abc[2] + 0.5)
kpt.kpts[0] = kpt_calc
return kpt
def as_dict(self, verbosity=2):
d = MSONable.as_dict(self)
if verbosity == 1:
d.pop("structure", None)
return d
class MVLGBSet(MPRelaxSet):
"""
Class for writing a vasp input files for grain boundary calculations, slab
or bulk.
Args:
structure(Structure): provide the structure
k_product: Kpoint number * length for a & b directions, also for c
direction in bulk calculations. Default to 40.
slab_mode (bool): Defaults to False. Use default (False) for a
bulk supercell. Use True if you are performing calculations on a
slab-like (i.e., surface) of the GB, for example, when you are
calculating the work of separation.
is_metal (bool): Defaults to True. This determines whether an ISMEAR of
1 is used (for metals) or not (for insulators and semiconductors)
by default. Note that it does *not* override user_incar_settings,
which can be set by the user to be anything desired.
**kwargs:
Other kwargs supported by :class:`MPRelaxSet`.
"""
def __init__(self, structure, k_product=40, slab_mode=False, is_metal=True,
**kwargs):
super().__init__(structure, **kwargs)
self.k_product = k_product
self.slab_mode = slab_mode
self.is_metal = is_metal
@property
def kpoints(self):
"""
k_product, default to 40, is kpoint number * length for a & b
directions, also for c direction in bulk calculations
Automatic mesh & Gamma is the default setting.
"""
# To get input sets, the input structure has to has the same number
# of required parameters as a Structure object.
kpt = super().kpoints
kpt.comment = "Generated by pymatgen's MVLGBSet"
kpt.style = 'Gamma'
# use k_product to calculate kpoints, k_product = kpts[0][0] * a
lengths = self.structure.lattice.abc
kpt_calc = [int(self.k_product / lengths[0] + 0.5),
int(self.k_product / lengths[1] + 0.5),
int(self.k_product / lengths[2] + 0.5)]
if self.slab_mode:
kpt_calc[2] = 1
kpt.kpts[0] = kpt_calc
return kpt
@property
def incar(self):
incar = super().incar
# The default incar setting is used for metallic system, for
# insulator or semiconductor, ISMEAR need to be changed.
incar.update({
"LCHARG": False,
"NELM": 60,
"PREC": "Normal",
"EDIFFG": -0.02,
"ICHARG": 0,
"NSW": 200,
"EDIFF": 0.0001
})
if self.is_metal:
incar["ISMEAR"] = 1
incar["LDAU"] = False
if self.slab_mode:
# for clean grain boundary and bulk relaxation, full optimization
# relaxation (ISIF=3) is used. For slab relaxation (ISIF=2) is used.
incar["ISIF"] = 2
incar["NELMIN"] = 8
incar.update(self.user_incar_settings)
return incar
class MVLRelax52Set(DictSet):
"""
Implementation of VaspInputSet utilizing the public Materials Project
parameters for INCAR & KPOINTS and VASP's recommended PAW potentials for
POTCAR.
Keynotes from VASP manual:
1. Recommended potentials for calculations using vasp.5.2+
2. If dimers with short bonds are present in the compound (O2, CO,
N2, F2, P2, S2, Cl2), it is recommended to use the h potentials.
Specifically, C_h, O_h, N_h, F_h, P_h, S_h, Cl_h
3. Released on Oct 28, 2018 by VASP. Please refer to VASP
Manual 1.2, 1.3 & 10.2.1 for more details.
Args:
structure (Structure): input structure.
potcar_functional (str): choose from "PBE_52" and "PBE_54".
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
CONFIG = _load_yaml_config("MVLRelax52Set")
def __init__(self, structure, potcar_functional="PBE_52", **kwargs):
if potcar_functional not in ["PBE_52", "PBE_54"]:
raise ValueError("Please select from PBE_52 and PBE_54!")
super().__init__(structure, MVLRelax52Set.CONFIG,
potcar_functional=potcar_functional, **kwargs)
self.kwargs = kwargs
class MITNEBSet(MITRelaxSet):
"""
Class for writing NEB inputs. Note that EDIFF is not on a per atom
basis for this input set.
Args:
unset_encut (bool): Whether to unset ENCUT.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
def __init__(self, structures, unset_encut=False, **kwargs):
if len(structures) < 3:
raise ValueError("You need at least 3 structures for an NEB.")
kwargs["sort_structure"] = False
super().__init__(structures[0], **kwargs)
self.structures = self._process_structures(structures)
self.unset_encut = False
if unset_encut:
self._config_dict["INCAR"].pop("ENCUT", None)
if "EDIFF" not in self._config_dict["INCAR"]:
self._config_dict["INCAR"]["EDIFF"] = self._config_dict[
"INCAR"].pop("EDIFF_PER_ATOM")
# NEB specific defaults
defaults = {'IMAGES': len(structures) - 2, 'IBRION': 1, 'ISYM': 0,
'LCHARG': False, "LDAU": False}
self._config_dict["INCAR"].update(defaults)
@property
def poscar(self):
return Poscar(self.structures[0])
@property
def poscars(self):
return [Poscar(s) for s in self.structures]
@staticmethod
def _process_structures(structures):
"""
Remove any atom jumps across the cell
"""
input_structures = structures
structures = [input_structures[0]]
for s in input_structures[1:]:
prev = structures[-1]
for i in range(len(s)):
t = np.round(prev[i].frac_coords - s[i].frac_coords)
if np.any(np.abs(t) > 0.5):
s.translate_sites([i], t, to_unit_cell=False)
structures.append(s)
return structures
def write_input(self, output_dir, make_dir_if_not_present=True,
write_cif=False, write_path_cif=False,
write_endpoint_inputs=False):
"""
NEB inputs has a special directory structure where inputs are in 00,
01, 02, ....
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
write_cif (bool): If true, writes a cif along with each POSCAR.
write_path_cif (bool): If true, writes a cif for each image.
write_endpoint_inputs (bool): If true, writes input files for
running endpoint calculations.
"""
output_dir = Path(output_dir)
if make_dir_if_not_present and not output_dir.exists():
output_dir.mkdir(parents=True)
self.incar.write_file(str(output_dir / 'INCAR'))
self.kpoints.write_file(str(output_dir / 'KPOINTS'))
self.potcar.write_file(str(output_dir / 'POTCAR'))
for i, p in enumerate(self.poscars):
d = output_dir / str(i).zfill(2)
if not d.exists():
d.mkdir(parents=True)
p.write_file(str(d / 'POSCAR'))
if write_cif:
p.structure.to(filename=str(d / '{}.cif'.format(i)))
if write_endpoint_inputs:
end_point_param = MITRelaxSet(
self.structures[0],
user_incar_settings=self.user_incar_settings)
for image in ['00', str(len(self.structures) - 1).zfill(2)]:
end_point_param.incar.write_file(
str(output_dir / image / 'INCAR'))
end_point_param.kpoints.write_file(
str(output_dir / image / 'KPOINTS'))
end_point_param.potcar.write_file(
str(output_dir / image / 'POTCAR'))
if write_path_cif:
sites = set()
lat = self.structures[0].lattice
for site in chain(*(s.sites for s in self.structures)):
sites.add(
PeriodicSite(site.species, site.frac_coords, lat))
nebpath = Structure.from_sites(sorted(sites))
nebpath.to(filename=str(output_dir / 'path.cif'))
class MITMDSet(MITRelaxSet):
"""
Clas for writing a vasp md run. This DOES NOT do multiple stage
runs.
Args:
structure (Structure): Input structure.
start_temp (int): Starting temperature.
end_temp (int): Final temperature.
nsteps (int): Number of time steps for simulations. NSW parameter.
time_step (int): The time step for the simulation. The POTIM
parameter. Defaults to 2fs.
spin_polarized (bool): Whether to do spin polarized calculations.
The ISPIN parameter. Defaults to False.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
def __init__(self, structure, start_temp, end_temp, nsteps, time_step=2,
spin_polarized=False, **kwargs):
# MD default settings
defaults = {'TEBEG': start_temp, 'TEEND': end_temp, 'NSW': nsteps,
'EDIFF_PER_ATOM': 0.000001, 'LSCALU': False,
'LCHARG': False,
'LPLANE': False, 'LWAVE': True, 'ISMEAR': 0,
'NELMIN': 4, 'LREAL': True, 'BMIX': 1,
'MAXMIX': 20, 'NELM': 500, 'NSIM': 4, 'ISYM': 0,
'ISIF': 0, 'IBRION': 0, 'NBLOCK': 1, 'KBLOCK': 100,
'SMASS': 0, 'POTIM': time_step, 'PREC': 'Low',
'ISPIN': 2 if spin_polarized else 1,
"LDAU": False}
super().__init__(structure, **kwargs)
self.start_temp = start_temp
self.end_temp = end_temp
self.nsteps = nsteps
self.time_step = time_step
self.spin_polarized = spin_polarized
self.kwargs = kwargs
# use VASP default ENCUT
self._config_dict["INCAR"].pop('ENCUT', None)
if defaults['ISPIN'] == 1:
self._config_dict["INCAR"].pop('MAGMOM', None)
self._config_dict["INCAR"].update(defaults)
@property
def kpoints(self):
return Kpoints.gamma_automatic()
class MPMDSet(MPRelaxSet):
"""
This a modified version of the old MITMDSet pre 2018/03/12.
This set serves as the basis for the amorphous skyline paper.
(1) Aykol, M.; Dwaraknath, S. S.; Sun, W.; Persson, K. A. Thermodynamic
Limit for Synthesis of Metastable Inorganic Materials. Sci. Adv. 2018,
4 (4).
Class for writing a vasp md run. This DOES NOT do multiple stage runs.
Precision remains normal, to increase accuracy of stress tensor.
Args:
structure (Structure): Input structure.
start_temp (int): Starting temperature.
end_temp (int): Final temperature.
nsteps (int): Number of time steps for simulations. NSW parameter.
time_step (int): The time step for the simulation. The POTIM
parameter. Defaults to 2fs.
spin_polarized (bool): Whether to do spin polarized calculations.
The ISPIN parameter. Defaults to False.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
def __init__(self, structure, start_temp, end_temp, nsteps,
spin_polarized=False, **kwargs):
# MD default settings
defaults = {'TEBEG': start_temp, 'TEEND': end_temp, 'NSW': nsteps,
'EDIFF_PER_ATOM': 0.00001, 'LSCALU': False,
'LCHARG': False,
'LPLANE': False, 'LWAVE': True, 'ISMEAR': 0,
'NELMIN': 4, 'LREAL': True, 'BMIX': 1,
'MAXMIX': 20, 'NELM': 500, 'NSIM': 4, 'ISYM': 0,
'ISIF': 0, 'IBRION': 0, 'NBLOCK': 1, 'KBLOCK': 100,
'SMASS': 0, 'POTIM': 2, 'PREC': 'Normal',
'ISPIN': 2 if spin_polarized else 1,
"LDAU": False, 'ADDGRID': True}
if Element('H') in structure.species:
defaults['POTIM'] = 0.5
defaults['NSW'] = defaults['NSW'] * 4
super().__init__(structure, **kwargs)
self.start_temp = start_temp
self.end_temp = end_temp
self.nsteps = nsteps
self.spin_polarized = spin_polarized
self.kwargs = kwargs
# use VASP default ENCUT
self._config_dict["INCAR"].pop('ENCUT', None)
if defaults['ISPIN'] == 1:
self._config_dict["INCAR"].pop('MAGMOM', None)
self._config_dict["INCAR"].update(defaults)
@property
def kpoints(self):
return Kpoints.gamma_automatic()
class MVLNPTMDSet(MITMDSet):
"""
Class for writing a vasp md run in NPT ensemble.
Notes:
To eliminate Pulay stress, the default ENCUT is set to a rather large
value of ENCUT, which is 1.5 * ENMAX.
Args:
structure (Structure): input structure.
start_temp (int): Starting temperature.
end_temp (int): Final temperature.
nsteps(int): Number of time steps for simulations. NSW parameter.
time_step (int): The time step for the simulation. The POTIM
parameter. Defaults to 2fs.
spin_polarized (bool): Whether to do spin polarized calculations.
The ISPIN parameter. Defaults to False.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
def __init__(self, structure, start_temp, end_temp, nsteps, time_step=2,
spin_polarized=False, **kwargs):
user_incar_settings = kwargs.get("user_incar_settings", {})
# NPT-AIMD default settings
defaults = {"IALGO": 48,
"ISIF": 3,
"LANGEVIN_GAMMA": [10] * structure.ntypesp,
"LANGEVIN_GAMMA_L": 1,
"MDALGO": 3,
"PMASS": 10,
"PSTRESS": 0,
"SMASS": 0}
defaults.update(user_incar_settings)
kwargs["user_incar_settings"] = defaults
super().__init__(structure, start_temp, end_temp,
nsteps, time_step, spin_polarized, **kwargs)
# Set NPT-AIMD ENCUT = 1.5 * VASP_default
enmax = [self.potcar[i].keywords['ENMAX']
for i in range(structure.ntypesp)]
encut = max(enmax) * 1.5
self._config_dict["INCAR"]["ENCUT"] = encut
class MVLScanRelaxSet(MPRelaxSet):
"""
Class for writing a relax input set using Strongly Constrained and
Appropriately Normed (SCAN) semilocal density functional.
Notes:
1. This functional is only available from VASP.5.4.3 upwards.
2. Meta-GGA calculations require POTCAR files that include
information on the kinetic energy density of the core-electrons,
i.e. "PBE_52" or "PBE_54". Make sure the POTCAR including the
following lines (see VASP wiki for more details):
$ grep kinetic POTCAR
kinetic energy-density
mkinetic energy-density pseudized
kinetic energy density (partial)
Args:
structure (Structure): input structure.
potcar_functional (str): choose from "PBE_52" and "PBE_54".
vdw (str): set "rVV10" to enable SCAN+rVV10, which is a versatile
van der Waals density functional by combing the SCAN functional
with the rVV10 non-local correlation functional.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
def __init__(self, structure, potcar_functional="PBE_52", **kwargs):
if potcar_functional not in ["PBE_52", "PBE_54"]:
raise ValueError("SCAN calculations required PBE_52 or PBE_54!")
super().__init__(structure, potcar_functional=potcar_functional,
**kwargs)
self._config_dict["INCAR"].update({"ADDGRID": True,
"EDIFF": 1e-05,
"EDIFFG": -0.05,
"LASPH": True,
"LDAU": False,
"METAGGA": "SCAN",
"NELM": 200})
def get_vasprun_outcar(path, parse_dos=True, parse_eigen=True):
path = Path(path)
vruns = list(glob.glob(str(path / "vasprun.xml*")))
outcars = list(glob.glob(str(path / "OUTCAR*")))
if len(vruns) == 0 or len(outcars) == 0:
raise ValueError(
"Unable to get vasprun.xml/OUTCAR from prev calculation in %s" %
path)
vsfile_fullpath = str(path / "vasprun.xml")
outcarfile_fullpath = str(path / "OUTCAR")
vsfile = vsfile_fullpath if vsfile_fullpath in vruns else sorted(vruns)[-1]
outcarfile = outcarfile_fullpath if outcarfile_fullpath in outcars else \
sorted(outcars)[-1]
return (Vasprun(vsfile, parse_dos=parse_dos, parse_eigen=parse_eigen),
Outcar(outcarfile))
def get_structure_from_prev_run(vasprun, outcar=None):
"""
Process structure from previous run.
Args:
vasprun (Vasprun): Vasprun that contains the final structure
from previous run.
outcar (Outcar): Outcar that contains the magnetization info from
previous run.
Returns:
Returns the magmom-decorated structure that can be passed to get
Vasp input files, e.g. get_kpoints.
"""
structure = vasprun.final_structure
site_properties = {}
# magmom
if vasprun.is_spin:
if outcar and outcar.magnetization:
site_properties.update({"magmom": [i['tot']
for i in outcar.magnetization]})
else:
site_properties.update({"magmom": vasprun.parameters['MAGMOM']})
# ldau
if vasprun.parameters.get("LDAU", False):
for k in ("LDAUU", "LDAUJ", "LDAUL"):
vals = vasprun.incar[k]
m = {}
l_val = []
s = 0
for site in structure:
if site.specie.symbol not in m:
m[site.specie.symbol] = vals[s]
s += 1
l_val.append(m[site.specie.symbol])
if len(l_val) == len(structure):
site_properties.update({k.lower(): l_val})
else:
raise ValueError("length of list {} not the same as"
"structure".format(l_val))
return structure.copy(site_properties=site_properties)
def standardize_structure(structure, sym_prec=0.1,
international_monoclinic=True):
"""
Get the symmetrically standardized structure.
Args:
structure (Structure): The structure.
sym_prec (float): Tolerance for symmetry finding for standardization.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
Returns:
The symmetrized structure.
"""
sym_finder = SpacegroupAnalyzer(structure, symprec=sym_prec)
new_structure = sym_finder.get_primitive_standard_structure(
international_monoclinic=international_monoclinic)
# the primitive structure finding has had several bugs in the past
# defend through validation
vpa_old = structure.volume / structure.num_sites
vpa_new = new_structure.volume / new_structure.num_sites
if abs(vpa_old - vpa_new) / vpa_old > 0.02:
raise ValueError(
"Standardizing cell failed! VPA old: {}, VPA new: {}".format(
vpa_old, vpa_new))
sm = StructureMatcher()
if not sm.fit(structure, new_structure):
raise ValueError(
"Standardizing cell failed! Old structure doesn't match new.")
return new_structure
class BadInputSetWarning(UserWarning):
pass
def batch_write_input(structures, vasp_input_set=MPRelaxSet, output_dir=".",
make_dir_if_not_present=True, subfolder=None,
sanitize=False, include_cif=False, **kwargs):
"""
Batch write vasp input for a sequence of structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
structures ([Structure]): Sequence of Structures.
vasp_input_set (VaspInputSet): VaspInputSet class that creates
vasp input files from structures. Note that a class should be
supplied. Defaults to MPRelaxSet.
output_dir (str): Directory to output files. Defaults to current
directory ".".
make_dir_if_not_present (bool): Create the directory if not present.
Defaults to True.
subfolder (callable): Function to create subdirectory name from
structure. Defaults to simply "formula_count".
sanitize (bool): Boolean indicating whether to sanitize the
structure before writing the VASP input files. Sanitized output
are generally easier for viewing and certain forms of analysis.
Defaults to False.
include_cif (bool): Whether to output a CIF as well. CIF files are
generally better supported in visualization programs.
**kwargs: Additional kwargs are passed to the vasp_input_set class
in addition to structure.
"""
output_dir = Path(output_dir)
for i, s in enumerate(structures):
formula = re.sub(r'\s+', "", s.formula)
if subfolder is not None:
subdir = subfolder(s)
d = output_dir / subdir
else:
d = output_dir / '{}_{}'.format(formula, i)
if sanitize:
s = s.copy(sanitize=True)
v = vasp_input_set(s, **kwargs)
v.write_input(str(d), make_dir_if_not_present=make_dir_if_not_present,
include_cif=include_cif)
_dummy_structure = Structure([1, 0, 0, 0, 1, 0, 0, 0, 1], ['I'], [[0, 0, 0]],
site_properties={"magmom": [[0, 0, 1]]})
|
dongsenfo/pymatgen
|
pymatgen/io/vasp/sets.py
|
Python
|
mit
| 86,776
|
[
"BoltzTrap",
"VASP",
"pymatgen"
] |
8e997456685ff1ff4d05b094162afafd7af0ccf0413f81bcc96af0ebcea7a15c
|
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class PlanMedicare(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, carrier_name=None, display_name=None, effective_date=None, expiration_date=None, identifiers=None, name=None, network_ids=None, network_size=None, plan_type=None, service_area_id=None, source=None, id=None):
"""
PlanMedicare - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'carrier_name': 'str',
'display_name': 'str',
'effective_date': 'str',
'expiration_date': 'str',
'identifiers': 'list[PlanIdentifier]',
'name': 'str',
'network_ids': 'list[int]',
'network_size': 'int',
'plan_type': 'str',
'service_area_id': 'str',
'source': 'str',
'id': 'str'
}
self.attribute_map = {
'carrier_name': 'carrier_name',
'display_name': 'display_name',
'effective_date': 'effective_date',
'expiration_date': 'expiration_date',
'identifiers': 'identifiers',
'name': 'name',
'network_ids': 'network_ids',
'network_size': 'network_size',
'plan_type': 'plan_type',
'service_area_id': 'service_area_id',
'source': 'source',
'id': 'id'
}
self._carrier_name = carrier_name
self._display_name = display_name
self._effective_date = effective_date
self._expiration_date = expiration_date
self._identifiers = identifiers
self._name = name
self._network_ids = network_ids
self._network_size = network_size
self._plan_type = plan_type
self._service_area_id = service_area_id
self._source = source
self._id = id
@property
def carrier_name(self):
"""
Gets the carrier_name of this PlanMedicare.
Name of the insurance carrier
:return: The carrier_name of this PlanMedicare.
:rtype: str
"""
return self._carrier_name
@carrier_name.setter
def carrier_name(self, carrier_name):
"""
Sets the carrier_name of this PlanMedicare.
Name of the insurance carrier
:param carrier_name: The carrier_name of this PlanMedicare.
:type: str
"""
self._carrier_name = carrier_name
@property
def display_name(self):
"""
Gets the display_name of this PlanMedicare.
Alternate name for the Plan
:return: The display_name of this PlanMedicare.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this PlanMedicare.
Alternate name for the Plan
:param display_name: The display_name of this PlanMedicare.
:type: str
"""
self._display_name = display_name
@property
def effective_date(self):
"""
Gets the effective_date of this PlanMedicare.
Effective date of coverage.
:return: The effective_date of this PlanMedicare.
:rtype: str
"""
return self._effective_date
@effective_date.setter
def effective_date(self, effective_date):
"""
Sets the effective_date of this PlanMedicare.
Effective date of coverage.
:param effective_date: The effective_date of this PlanMedicare.
:type: str
"""
self._effective_date = effective_date
@property
def expiration_date(self):
"""
Gets the expiration_date of this PlanMedicare.
Expiration date of coverage.
:return: The expiration_date of this PlanMedicare.
:rtype: str
"""
return self._expiration_date
@expiration_date.setter
def expiration_date(self, expiration_date):
"""
Sets the expiration_date of this PlanMedicare.
Expiration date of coverage.
:param expiration_date: The expiration_date of this PlanMedicare.
:type: str
"""
self._expiration_date = expiration_date
@property
def identifiers(self):
"""
Gets the identifiers of this PlanMedicare.
List of identifiers of this Plan
:return: The identifiers of this PlanMedicare.
:rtype: list[PlanIdentifier]
"""
return self._identifiers
@identifiers.setter
def identifiers(self, identifiers):
"""
Sets the identifiers of this PlanMedicare.
List of identifiers of this Plan
:param identifiers: The identifiers of this PlanMedicare.
:type: list[PlanIdentifier]
"""
self._identifiers = identifiers
@property
def name(self):
"""
Gets the name of this PlanMedicare.
Marketing name of the plan
:return: The name of this PlanMedicare.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this PlanMedicare.
Marketing name of the plan
:param name: The name of this PlanMedicare.
:type: str
"""
self._name = name
@property
def network_ids(self):
"""
Gets the network_ids of this PlanMedicare.
List of Vericred-generated network_ids
:return: The network_ids of this PlanMedicare.
:rtype: list[int]
"""
return self._network_ids
@network_ids.setter
def network_ids(self, network_ids):
"""
Sets the network_ids of this PlanMedicare.
List of Vericred-generated network_ids
:param network_ids: The network_ids of this PlanMedicare.
:type: list[int]
"""
self._network_ids = network_ids
@property
def network_size(self):
"""
Gets the network_size of this PlanMedicare.
Total number of Providers in network
:return: The network_size of this PlanMedicare.
:rtype: int
"""
return self._network_size
@network_size.setter
def network_size(self, network_size):
"""
Sets the network_size of this PlanMedicare.
Total number of Providers in network
:param network_size: The network_size of this PlanMedicare.
:type: int
"""
self._network_size = network_size
@property
def plan_type(self):
"""
Gets the plan_type of this PlanMedicare.
Category of the plan (e.g. EPO, HMO, PPO, POS, Indemnity, PACE, Medicare-Medicaid, HMO w/POS, Cost, FFS, MSA)
:return: The plan_type of this PlanMedicare.
:rtype: str
"""
return self._plan_type
@plan_type.setter
def plan_type(self, plan_type):
"""
Sets the plan_type of this PlanMedicare.
Category of the plan (e.g. EPO, HMO, PPO, POS, Indemnity, PACE, Medicare-Medicaid, HMO w/POS, Cost, FFS, MSA)
:param plan_type: The plan_type of this PlanMedicare.
:type: str
"""
self._plan_type = plan_type
@property
def service_area_id(self):
"""
Gets the service_area_id of this PlanMedicare.
Foreign key for service area
:return: The service_area_id of this PlanMedicare.
:rtype: str
"""
return self._service_area_id
@service_area_id.setter
def service_area_id(self, service_area_id):
"""
Sets the service_area_id of this PlanMedicare.
Foreign key for service area
:param service_area_id: The service_area_id of this PlanMedicare.
:type: str
"""
self._service_area_id = service_area_id
@property
def source(self):
"""
Gets the source of this PlanMedicare.
Source of the plan benefit data
:return: The source of this PlanMedicare.
:rtype: str
"""
return self._source
@source.setter
def source(self, source):
"""
Sets the source of this PlanMedicare.
Source of the plan benefit data
:param source: The source of this PlanMedicare.
:type: str
"""
self._source = source
@property
def id(self):
"""
Gets the id of this PlanMedicare.
Government-issued MedicareAdvantage plan ID
:return: The id of this PlanMedicare.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this PlanMedicare.
Government-issued MedicareAdvantage plan ID
:param id: The id of this PlanMedicare.
:type: str
"""
self._id = id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
vericred/vericred-python
|
vericred_client/models/plan_medicare.py
|
Python
|
apache-2.0
| 20,037
|
[
"VisIt"
] |
c69dd6c12ef74c661a338f8df6d89d6b6b7386e9c44d2b0d8a8c4b53ad294cb6
|
""" Encapsulate here the logic for limiting the matching of jobs
Utilities and classes here are used by the Matcher
"""
from DIRAC import S_OK, S_ERROR
from DIRAC import gLogger
from DIRAC.Core.Utilities.DictCache import DictCache
from DIRAC.Core.Utilities.DErrno import cmpError, ESECTION
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.Client import JobStatus
class Limiter:
# static variables shared between all instances of this class
csDictCache = DictCache()
condCache = DictCache()
delayMem = {}
def __init__(self, jobDB=None, opsHelper=None, pilotRef=None):
"""Constructor"""
self.__runningLimitSection = "JobScheduling/RunningLimit"
self.__matchingDelaySection = "JobScheduling/MatchingDelay"
if jobDB:
self.jobDB = jobDB
else:
self.jobDB = JobDB()
if pilotRef:
self.log = gLogger.getSubLogger("[%s]Limiter" % pilotRef)
self.jobDB.log = gLogger.getSubLogger("[%s]Limiter" % pilotRef)
else:
self.log = gLogger.getSubLogger("Limiter")
if opsHelper:
self.__opsHelper = opsHelper
else:
self.__opsHelper = Operations()
def getNegativeCond(self):
"""Get negative condition for ALL sites"""
orCond = self.condCache.get("GLOBAL")
if orCond:
return orCond
negCond = {}
# Run Limit
result = self.__opsHelper.getSections(self.__runningLimitSection)
sites = []
if result["OK"]:
sites = result["Value"]
for siteName in sites:
result = self.__getRunningCondition(siteName)
if not result["OK"]:
continue
data = result["Value"]
if data:
negCond[siteName] = data
# Delay limit
result = self.__opsHelper.getSections(self.__matchingDelaySection)
sites = []
if result["OK"]:
sites = result["Value"]
for siteName in sites:
result = self.__getDelayCondition(siteName)
if not result["OK"]:
continue
data = result["Value"]
if not data:
continue
if siteName in negCond:
negCond[siteName] = self.__mergeCond(negCond[siteName], data)
else:
negCond[siteName] = data
orCond = []
for siteName in negCond:
negCond[siteName]["Site"] = siteName
orCond.append(negCond[siteName])
self.condCache.add("GLOBAL", 10, orCond)
return orCond
def getNegativeCondForSite(self, siteName, gridCE=None):
"""Generate a negative query based on the limits set on the site"""
# Check if Limits are imposed onto the site
negativeCond = {}
if self.__opsHelper.getValue("JobScheduling/CheckJobLimits", True):
result = self.__getRunningCondition(siteName)
if not result["OK"]:
self.log.error("Issue getting running conditions", result["Message"])
else:
negativeCond = result["Value"]
self.log.verbose(
"Negative conditions for site", "%s after checking limits are: %s" % (siteName, str(negativeCond))
)
if gridCE:
result = self.__getRunningCondition(siteName, gridCE)
if not result["OK"]:
self.log.error("Issue getting running conditions", result["Message"])
else:
negativeCondCE = result["Value"]
negativeCond = self.__mergeCond(negativeCond, negativeCondCE)
if self.__opsHelper.getValue("JobScheduling/CheckMatchingDelay", True):
result = self.__getDelayCondition(siteName)
if result["OK"]:
delayCond = result["Value"]
self.log.verbose(
"Negative conditions for site", "%s after delay checking are: %s" % (siteName, str(delayCond))
)
negativeCond = self.__mergeCond(negativeCond, delayCond)
if negativeCond:
self.log.info("Negative conditions for site", "%s are: %s" % (siteName, str(negativeCond)))
return negativeCond
def __mergeCond(self, negCond, addCond):
"""Merge two negative dicts"""
# Merge both negative dicts
for attr in addCond:
if attr not in negCond:
negCond[attr] = []
for value in addCond[attr]:
if value not in negCond[attr]:
negCond[attr].append(value)
return negCond
def __extractCSData(self, section):
"""Extract limiting information from the CS in the form:
{ 'JobType' : { 'Merge' : 20, 'MCGen' : 1000 } }
"""
stuffDict = self.csDictCache.get(section)
if stuffDict:
return S_OK(stuffDict)
result = self.__opsHelper.getSections(section)
if not result["OK"]:
if cmpError(result, ESECTION):
return S_OK({})
return result
attribs = result["Value"]
stuffDict = {}
for attName in attribs:
result = self.__opsHelper.getOptionsDict("%s/%s" % (section, attName))
if not result["OK"]:
return result
attLimits = result["Value"]
try:
attLimits = dict([(k, int(attLimits[k])) for k in attLimits])
except Exception as excp:
errMsg = "%s/%s has to contain numbers: %s" % (section, attName, str(excp))
self.log.error(errMsg)
return S_ERROR(errMsg)
stuffDict[attName] = attLimits
self.csDictCache.add(section, 300, stuffDict)
return S_OK(stuffDict)
def __getRunningCondition(self, siteName, gridCE=None):
"""Get extra conditions allowing site throttling"""
if gridCE:
csSection = "%s/%s/CEs/%s" % (self.__runningLimitSection, siteName, gridCE)
else:
csSection = "%s/%s" % (self.__runningLimitSection, siteName)
result = self.__extractCSData(csSection)
if not result["OK"]:
return result
limitsDict = result["Value"]
# limitsDict is something like { 'JobType' : { 'Merge' : 20, 'MCGen' : 1000 } }
if not limitsDict:
return S_OK({})
# Check if the site exceeding the given limits
negCond = {}
for attName in limitsDict:
if attName not in self.jobDB.jobAttributeNames:
self.log.error("Attribute does not exist", "(%s). Check the job limits" % attName)
continue
cK = "Running:%s:%s" % (siteName, attName)
data = self.condCache.get(cK)
if not data:
result = self.jobDB.getCounters(
"Jobs",
[attName],
{"Site": siteName, "Status": [JobStatus.RUNNING, JobStatus.MATCHED, JobStatus.STALLED]},
)
if not result["OK"]:
return result
data = result["Value"]
data = dict([(k[0][attName], k[1]) for k in data])
self.condCache.add(cK, 10, data)
for attValue in limitsDict[attName]:
limit = limitsDict[attName][attValue]
running = data.get(attValue, 0)
if running >= limit:
self.log.verbose(
"Job Limit imposed",
"at %s on %s/%s=%d, %d jobs already deployed" % (siteName, attName, attValue, limit, running),
)
if attName not in negCond:
negCond[attName] = []
negCond[attName].append(attValue)
# negCond is something like : {'JobType': ['Merge']}
return S_OK(negCond)
def updateDelayCounters(self, siteName, jid):
# Get the info from the CS
siteSection = "%s/%s" % (self.__matchingDelaySection, siteName)
result = self.__extractCSData(siteSection)
if not result["OK"]:
return result
delayDict = result["Value"]
# limitsDict is something like { 'JobType' : { 'Merge' : 20, 'MCGen' : 1000 } }
if not delayDict:
return S_OK()
attNames = []
for attName in delayDict:
if attName not in self.jobDB.jobAttributeNames:
self.log.error("Attribute does not exist in the JobDB. Please fix it!", "(%s)" % attName)
else:
attNames.append(attName)
result = self.jobDB.getJobAttributes(jid, attNames)
if not result["OK"]:
self.log.error("Error while retrieving attributes", "coming from %s: %s" % (siteSection, result["Message"]))
return result
atts = result["Value"]
# Create the DictCache if not there
if siteName not in self.delayMem:
self.delayMem[siteName] = DictCache()
# Update the counters
delayCounter = self.delayMem[siteName]
for attName in atts:
attValue = atts[attName]
if attValue in delayDict[attName]:
delayTime = delayDict[attName][attValue]
self.log.notice("Adding delay for %s/%s=%s of %s secs" % (siteName, attName, attValue, delayTime))
delayCounter.add((attName, attValue), delayTime)
return S_OK()
def __getDelayCondition(self, siteName):
"""Get extra conditions allowing matching delay"""
if siteName not in self.delayMem:
return S_OK({})
lastRun = self.delayMem[siteName].getKeys()
negCond = {}
for attName, attValue in lastRun:
if attName not in negCond:
negCond[attName] = []
negCond[attName].append(attValue)
return S_OK(negCond)
|
DIRACGrid/DIRAC
|
src/DIRAC/WorkloadManagementSystem/Client/Limiter.py
|
Python
|
gpl-3.0
| 10,119
|
[
"DIRAC"
] |
0658d71c300e5683d13d2248b3984b8c277d7982ec6dbeb5526478b8732665c6
|
# -*- coding: utf-8 -*-
#
# tsodyks_facilitating.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
tsodyks facilitating example
-----------------------
This scripts simulates two neurons. One is driven with dc-input and
connected to the other one with a facilitating tsodyks synapse. The
membrane potential trace of the second neuron is recorded.
This example reproduces figure 1B of Tsodyks et al. (1998)
**Neural Networks with Dynamic Synapses**.
*Neural computation*, http://dx.doi.org/10.1162/089976698300017502
This example is analog to ``tsodyks_depressing.py``, except that
different synapse parameters are used. Here, a small facilitation
parameter ``U`` causes a slow saturation of the synaptic efficacy
(Eq. 2.2), enabling a facilitating behavior.
'''
'''
First, we import all necessary modules for simulation and plotting.
'''
import nest
import nest.voltage_trace
import pylab
from numpy import exp
'''
Second, the simulation parameters are assigned to variables. The
neuron and synapse parameters are stored into a dictionary.
'''
h = 0.1 # simulation step size (ms)
Tau = 40. # membrane time constant
Theta = 15. # threshold
E_L = 0. # reset potential of membrane potential
R = 1. # membrane resistance (GOhm)
C = Tau / R # Tau (ms)/R in NEST units
TauR = 2. # refractory time
Tau_psc = 1.5 # time constant of PSC (= Tau_inact)
Tau_rec = 130. # recovery time
Tau_fac = 530. # facilitation time
U = 0.03 # facilitation parameter U
A = 1540. # PSC weight in pA
f = 20. / 1000. # frequency in Hz converted to 1/ms
Tend = 1200. # simulation time
TIstart = 50. # start time of dc
TIend = 1050. # end time of dc
I0 = Theta * C / Tau / (1 - exp(-(1 / f - TauR) / Tau)) # dc amplitude
neuron_param = {"tau_m": Tau,
"t_ref": TauR,
"tau_syn_ex": Tau_psc,
"tau_syn_in": Tau_psc,
"C_m": C,
"V_reset": E_L,
"E_L": E_L,
"V_m": E_L,
"V_th": Theta}
syn_param = {"tau_psc": Tau_psc,
"tau_rec": Tau_rec,
"tau_fac": Tau_fac,
"U": U,
"delay": 0.1,
"weight": A,
"u": 0.0,
"x": 1.0}
'''
Third, we reset the kernel and set the resolution using `SetKernelStatus`.
'''
nest.ResetKernel()
nest.SetKernelStatus({"resolution": h})
'''
Fourth, the nodes are created using `Create`. We store the returned
handles in variables for later reference.
'''
neurons = nest.Create("iaf_psc_exp", 2)
dc_gen = nest.Create("dc_generator")
volts = nest.Create("voltmeter")
'''
Fifth, the `iaf_psc_exp`-neurons, the `dc_generator` and the
`voltmeter` are configured using `SetStatus`, which expects a list of
node handles and a parameter dictionary or a list of parameter
dictionaries.
'''
nest.SetStatus(neurons, neuron_param)
nest.SetStatus(dc_gen, {"amplitude": I0, "start": TIstart, "stop": TIend})
nest.SetStatus(volts, {"label": "voltmeter", "withtime": True, "withgid": True,
"interval": 1.})
'''
Sixth, the `dc_generator` is connected to the first neuron
(``neurons[0]``) and the `voltmeter` is connected to the second neuron
(``neurons[1]``). The command `Connect` has different variants. Plain
`Connect` just takes the handles of pre- and post-synaptic nodes and
uses the default values for weight and delay. Note that the connection
direction for the `voltmeter` reflects the signal flow in the
simulation kernel, because it observes the neuron instead of receiving
events from it.
'''
nest.Connect(dc_gen, [neurons[0]])
nest.Connect(volts, [neurons[1]])
'''
Seventh, the first neuron (``neurons[0]``) is connected to the
second neuron (``neurons[1]``). The command `CopyModel` copies the
`tsodyks_synapse` model to the new name ``syn`` with parameters
``syn_param``. The manually defined model ``syn`` is used in the
connection routine via the ``syn_spec`` parameter.
'''
nest.CopyModel("tsodyks_synapse", "syn", syn_param)
nest.Connect([neurons[0]], [neurons[1]], syn_spec="syn")
'''
Finally, we simulate the configuration using the command
`Simulate`, where the simulation time ``Tend`` is passed as the
argument. We plot the target neuron's membrane potential as a
function of time.
'''
nest.Simulate(Tend)
nest.voltage_trace.from_device(volts)
|
tobikausk/nest-simulator
|
pynest/examples/tsodyks_facilitating.py
|
Python
|
gpl-2.0
| 4,981
|
[
"NEURON"
] |
61cbccffb41ac931069ee54e0f28f80bb29ec37668de7752682842692d6453d6
|
#!/usr/bin/env python
import json
import os
import sys
import shutil
from setuptools import setup, find_packages
import pyct.build
setup_args = {}
install_requires = [
"param >=1.9.3,<2.0",
"numpy >=1.0",
"pyviz_comms >=0.7.4",
"panel >=0.9.5",
"colorcet",
"pandas >=0.20.0",
]
extras_require = {}
# Notebook dependencies
extras_require["notebook"] = ["ipython >=5.4.0", "notebook"]
# IPython Notebook + pandas + matplotlib + bokeh
extras_require["recommended"] = extras_require["notebook"] + [
"matplotlib >=3",
"bokeh >=1.1.0",
]
# Requirements to run all examples
extras_require["examples"] = extras_require["recommended"] + [
"networkx",
"pillow",
"xarray >=0.10.4",
"plotly >=4.0",
'dash >=1.16',
"streamz >=0.5.0",
"datashader >=0.11.1",
"ffmpeg",
"cftime",
"netcdf4",
"dask",
"scipy",
"shapely",
"scikit-image"
]
if sys.version_info.major > 2:
extras_require["examples"].extend(
[
"pyarrow",
"ibis-framework >=1.3",
] # spatialpandas incompatibility
)
# Extra third-party libraries
extras_require["extras"] = extras_require["examples"] + [
"pscript ==0.7.1",
]
# Test requirements
extras_require['tests'] = [
'pytest',
'pytest-cov',
'mock',
'flake8',
'coveralls',
'path.py',
'matplotlib >=3',
'nbsmoke >=0.2.0',
'nbconvert <6',
'twine',
'rfc3986',
'keyring'
]
extras_require["unit_tests"] = extras_require["examples"] + extras_require["tests"]
extras_require["basic_tests"] = (
extras_require["tests"]
+ ["matplotlib >=3", "bokeh >=1.1.0", "pandas"]
+ extras_require["notebook"]
)
extras_require["nbtests"] = extras_require["recommended"] + [
"nose",
"awscli",
"deepdiff",
]
extras_require['doc'] = extras_require['examples'] + [
'nbsite >=0.6.8a36',
'sphinx',
'sphinx_holoviz_theme',
'mpl_sample_data >=3.1.3',
'awscli',
'pscript',
'graphviz',
'bokeh >2.2'
]
extras_require["build"] = [
"param >=1.7.0",
"setuptools >=30.3.0",
"pyct >=0.4.4",
]
# Everything for examples and nosetests
extras_require["all"] = list(
set(extras_require["unit_tests"]) | set(extras_require["nbtests"])
)
def get_setup_version(reponame):
"""
Helper to get the current version from either git describe or the
.version file (if available).
"""
basepath = os.path.split(__file__)[0]
version_file_path = os.path.join(basepath, reponame, ".version")
try:
from param import version
except ImportError:
version = None
if version is not None:
return version.Version.setup_version(
basepath, reponame, archive_commit="$Format:%h$"
)
else:
print(
"WARNING: param>=1.6.0 unavailable. If you are installing a package, this warning can safely be ignored. If you are creating a package or otherwise operating in a git repository, you should install param>=1.6.0."
)
return json.load(open(version_file_path, "r"))["version_string"]
setup_args.update(
dict(
name="holoviews",
version=get_setup_version("holoviews"),
python_requires=">=2.7",
install_requires=install_requires,
extras_require=extras_require,
description="Stop plotting your data - annotate your data and let it visualize itself.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
author="Jean-Luc Stevens and Philipp Rudiger",
author_email="holoviews@gmail.com",
maintainer="PyViz Developers",
maintainer_email="developers@pyviz.org",
platforms=["Windows", "Mac OS X", "Linux"],
license="BSD",
url="https://www.holoviews.org",
entry_points={"console_scripts": ["holoviews = holoviews.util.command:main"]},
packages=find_packages(),
include_package_data=True,
classifiers=[
"License :: OSI Approved :: BSD License",
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Natural Language :: English",
"Framework :: Matplotlib",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries",
],
)
)
if __name__ == "__main__":
example_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "holoviews/examples"
)
if "develop" not in sys.argv and "egg_info" not in sys.argv:
pyct.build.examples(example_path, __file__, force=True)
if "install" in sys.argv:
header = "HOLOVIEWS INSTALLATION INFORMATION"
bars = "=" * len(header)
extras = "\n".join("holoviews[%s]" % e for e in setup_args["extras_require"])
print("%s\n%s\n%s" % (bars, header, bars))
print("\nHoloViews supports the following installation types:\n")
print("%s\n" % extras)
print("Users should consider using one of these options.\n")
print("By default only a core installation is performed and ")
print("only the minimal set of dependencies are fetched.\n\n")
print("For more information please visit http://holoviews.org/install.html\n")
print(bars + "\n")
setup(**setup_args)
if os.path.isdir(example_path):
shutil.rmtree(example_path)
|
ioam/holoviews
|
setup.py
|
Python
|
bsd-3-clause
| 5,692
|
[
"VisIt"
] |
d6e8a0c9284a57464c1458e4edae577d1953757e7ffe3977371c14898a82512d
|
""" Main class for doing consistency checks, between files in:
- File Catalog
- TransformationSystem
Should be extended to include the Storage (in DIRAC)
"""
import os
import time
import sys
import re
import errno
import DIRAC
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Interfaces.API.Dirac import Dirac
from DIRAC.DataManagementSystem.Client.DataIntegrityClient import DataIntegrityClient
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC.Core.Utilities.Adler import compareAdler
class ConsistencyInspector(object):
""" A class for handling some consistency checks
"""
def __init__(self, interactive=True, transClient=None, dm=None, fc=None, dic=None):
""" c'tor
interactive: Data Manager (True) or DIRAC Agente (False)
transClient: TransformationClient() if None, else transClient params
dm: DataManager() if None, else dm params
fc: FileCatalog() if None, else fc params
One object for every production/directoriesList...
"""
self.interactive = interactive
self.transClient = TransformationClient() if transClient is None else transClient
self.dataManager = dm if dm else DataManager()
self.fileCatalog = fc if fc else FileCatalog()
self.dic = dic if dic else DataIntegrityClient()
self.dirac = Dirac()
# Base elements from which to start the consistency checks
self._prod = 0
self._bkQuery = None
self._fileType = []
self._fileTypesExcluded = []
self._lfns = []
self.directories = []
# Accessory elements
self.runsList = []
self.runStatus = None
self.fromProd = None
self.transType = ''
self.cachedReplicas = {}
self.prcdWithDesc = []
self.prcdWithoutDesc = []
self.prcdWithMultDesc = []
self.nonPrcdWithDesc = []
self.nonPrcdWithoutDesc = []
self.nonPrcdWithMultDesc = []
self.descForPrcdLFNs = []
self.descForNonPrcdLFNs = []
self.removedFiles = []
self.absentLFNsInFC = []
self.existLFNsNoSE = {}
self.existLFNsBadReplicas = {}
self.existLFNsBadFiles = {}
self.existLFNsNotExisting = {}
self.commonAncestors = {}
self.multipleDescendants = {}
self.ancestors = {}
self._verbose = False
def __logVerbose(self, msg, msg1=''):
""" logger helper for verbose information """
if self._verbose:
newMsg = '[ConsistencyChecks] ' + ('[%s] ' % str(self.prod)) if self.prod else ''
# Add that prefix to all lines of the message
newMsg1 = msg1.replace('\n', '\n' + newMsg)
newMsg += msg.replace('\n', '\n' + newMsg)
gLogger.notice(newMsg, newMsg1)
else:
gLogger.verbose(msg, msg1)
##########################################################################
def checkFC2SE(self):
""" check files vs SE information """
repDict = self.compareChecksum(self.lfns)
self.existLFNsNoSE = repDict['MissingReplica']
self.existLFNsNotExisting = repDict['MissingAllReplicas']
self.existLFNsBadReplicas = repDict['SomeReplicasCorrupted']
self.existLFNsBadFiles = repDict['AllReplicasCorrupted']
def getReplicasPresence(self, lfns):
""" get the replicas using the standard FileCatalog.getReplicas()
"""
present = set()
notPresent = set()
chunkSize = 100
printProgress = (len(lfns) > chunkSize)
startTime = time.time()
self.__write("Checking replicas for %d files%s" %
(len(lfns), (' (chunks of %d)' % chunkSize) if printProgress else '... '))
for chunk in breakListIntoChunks(lfns, chunkSize):
if printProgress:
self.__write('.')
for _ in xrange(1, 10):
res = self.fileCatalog.getReplicas(chunk)
if res['OK']:
present.update(res['Value']['Successful'])
self.cachedReplicas.update(res['Value']['Successful'])
notPresent.update(res['Value']['Failed'])
break
else:
time.sleep(0.1)
self.__write(' (%.1f seconds)\n' % (time.time() - startTime))
if notPresent:
self.__logVerbose("Files without replicas:",
'\n'.join([''] + sorted(notPresent)))
return list(present), list(notPresent)
##########################################################################
def getReplicasPresenceFromDirectoryScan(self, lfns):
""" Get replicas scanning the directories. Might be faster.
"""
dirs = {}
present = []
notPresent = []
compare = True
for lfn in lfns:
dirN = os.path.dirname(lfn)
if lfn == dirN + '/':
compare = False
dirs.setdefault(dirN, []).append(lfn)
if compare:
self.__write("Checking File Catalog for %d files from %d directories " % (
len(lfns), len(dirs)))
else:
self.__write("Getting files from %d directories " % len(dirs))
startTime = time.time()
for dirN in sorted(dirs):
startTime1 = time.time()
self.__write('.')
lfnsFound = self._getFilesFromDirectoryScan(dirN)
gLogger.verbose("Obtained %d files in %.1f seconds" %
(len(lfnsFound), time.time() - startTime1))
if compare:
pr, notPr = self.__compareLFNLists(dirs[dirN], lfnsFound)
notPresent += notPr
present += pr
else:
present += lfnsFound
self.__write(' (%.1f seconds)\n' % (time.time() - startTime))
gLogger.info("Found %d files with replicas and %d without" %
(len(present), len(notPresent)))
return present, notPresent
##########################################################################
def __compareLFNLists(self, lfns, lfnsFound):
""" return files in both lists and files in lfns and not in lfnsFound
"""
present = []
notPresent = lfns
startTime = time.time()
self.__logVerbose("Comparing list of %d LFNs with second list of %d" % (
len(lfns), len(lfnsFound)))
if lfnsFound:
setLfns = set(lfns)
setLfnsFound = set(lfnsFound)
present = list(setLfns & setLfnsFound)
notPresent = list(setLfns - setLfnsFound)
self.__logVerbose("End of comparison: %.1f seconds" %
(time.time() - startTime))
return present, notPresent
def _getFilesFromDirectoryScan(self, dirs):
""" calls dm.getFilesFromDirectory
"""
level = gLogger.getLevel()
gLogger.setLevel('FATAL')
res = self.dataManager.getFilesFromDirectory(dirs)
gLogger.setLevel(level)
if not res['OK']:
if 'No such file or directory' not in res['Message']:
gLogger.error("Error getting files from directories %s:" %
dirs, res['Message'])
return []
if res['Value']:
lfnsFound = res['Value']
else:
lfnsFound = []
return lfnsFound
##########################################################################
def _getTSFiles(self):
""" Helper function - get files from the TS
"""
selectDict = {'TransformationID': self.prod}
if self._lfns:
selectDict['LFN'] = self._lfns
elif self.runStatus and self.fromProd:
res = self.transClient.getTransformationRuns(
{'TransformationID': self.fromProd, 'Status': self.runStatus})
if not res['OK']:
gLogger.error("Failed to get runs for transformation %d" % self.prod)
else:
if res['Value']:
self.runsList.extend(
[run['RunNumber'] for run in res['Value'] if run['RunNumber'] not in self.runsList])
gLogger.notice("%d runs selected" % len(res['Value']))
elif not self.runsList:
gLogger.notice("No runs selected, check completed")
DIRAC.exit(0)
if not self._lfns and self.runsList:
selectDict['RunNumber'] = self.runsList
res = self.transClient.getTransformation(self.prod)
if not res['OK']:
gLogger.error("Failed to find transformation %s" % self.prod)
return [], [], []
status = res['Value']['Status']
if status not in ('Active', 'Stopped', 'Completed', 'Idle'):
gLogger.notice("Transformation %s in status %s, will not check if files are processed" % (
self.prod, status))
processedLFNs = []
nonProcessedLFNs = []
nonProcessedStatuses = []
if self._lfns:
processedLFNs = self._lfns
else:
res = self.transClient.getTransformationFiles(selectDict)
if not res['OK']:
gLogger.error("Failed to get files for transformation %d" %
self.prod, res['Message'])
return [], [], []
else:
processedLFNs = [item['LFN']
for item in res['Value'] if item['Status'] == 'Processed']
nonProcessedLFNs = [item['LFN']
for item in res['Value'] if item['Status'] != 'Processed']
nonProcessedStatuses = list(
set(item['Status'] for item in res['Value'] if item['Status'] != 'Processed'))
return processedLFNs, nonProcessedLFNs, nonProcessedStatuses
def __getDirectories(self):
""" get the directories where to look into (they are either given, or taken from the transformation ID
"""
if self.directories:
directories = []
printout = False
for directory in self.directories:
if not directory.endswith('...'):
directories.append(directory)
else:
printout = True
topDir = os.path.dirname(directory)
res = self.fileCatalog.listDirectory(topDir)
if not res['OK']:
# DError(errno.ENOENT, res['Message'] )
return S_ERROR(errno.ENOENT, res['Message'])
else:
matchDir = directory.split('...')[0]
directories += [d for d in res['Value']['Successful'].get(topDir, {}).get('SubDirs', [])
if d.startswith(matchDir)]
if printout:
gLogger.always('Expanded list of %d directories:\n%s' %
(len(directories), '\n'.join(directories)))
return directories
else:
return S_ERROR(errno.ENOENT, 'Need to specify the directories')
##########################################################################
def __write(self, text):
if self.interactive:
sys.stdout.write(text)
sys.stdout.flush()
##########################################################################
def _selectByFileType(self, lfnDict, fileTypes=None, fileTypesExcluded=None):
""" Select only those files from the values of lfnDict that have a certain type
"""
if not lfnDict:
return {}
if not fileTypes:
fileTypes = self.fileType
if not fileTypesExcluded:
fileTypesExcluded = self.fileTypesExcluded
else:
fileTypesExcluded += [
ft for ft in self.fileTypesExcluded if ft not in fileTypesExcluded]
# lfnDict is a dictionary of dictionaries including the metadata, create a
# deep copy to get modified
ancDict = dict(lfnDict)
if fileTypes == ['']:
fileTypes = []
# and loop on the original dictionaries
for ancestor in lfnDict:
for desc in lfnDict[ancestor].keys():
ft = lfnDict[ancestor][desc]['FileType']
if ft in fileTypesExcluded or (fileTypes and ft not in fileTypes):
ancDict[ancestor].pop(desc)
if not len(ancDict[ancestor]):
ancDict.pop(ancestor)
return ancDict
@staticmethod
def _getFileTypesCount(lfnDict):
""" return file types count
"""
ft_dict = {}
for ancestor in lfnDict:
t_dict = {}
for desc in lfnDict[ancestor]:
ft = lfnDict[ancestor][desc]['FileType']
t_dict[ft] = t_dict.setdefault(ft, 0) + 1
ft_dict[ancestor] = t_dict
return ft_dict
def __getLFNsFromFC(self):
""" Check if a list of LFNs is in the FC or not """
if not self.lfns:
directories = []
for dirName in self.__getDirectories():
if not dirName.endswith('/'):
dirName += '/'
directories.append(dirName)
present, notPresent = self.getReplicasPresenceFromDirectoryScan(
directories)
else:
present, notPresent = self.getReplicasPresence(self.lfns)
return present, notPresent
def compareChecksum(self, lfns):
"""compare the checksum of the file in the FC and the checksum of the physical replicas.
Returns a dictionary containing 3 sub-dictionaries: one with files with missing PFN, one with
files with all replicas corrupted, and one with files with some replicas corrupted and at least
one good replica
"""
retDict = {'AllReplicasCorrupted': {},
'SomeReplicasCorrupted': {},
'MissingReplica': {},
'MissingAllReplicas': {},
'NoReplicas': {}}
chunkSize = 100
replicas = {}
setLfns = set(lfns)
cachedLfns = setLfns & set(self.cachedReplicas)
for lfn in cachedLfns:
replicas[lfn] = self.cachedReplicas[lfn]
lfnsLeft = list(setLfns - cachedLfns)
if lfnsLeft:
self.__write("Get replicas for %d files (chunks of %d): " %
(len(lfnsLeft), chunkSize))
for lfnChunk in breakListIntoChunks(lfnsLeft, chunkSize):
self.__write('.')
replicasRes = self.fileCatalog.getReplicas(lfnChunk)
if not replicasRes['OK']:
gLogger.error("error: %s" % replicasRes['Message'])
return S_ERROR(errno.ENOENT, "error: %s" % replicasRes['Message'])
replicasRes = replicasRes['Value']
if replicasRes['Failed']:
retDict['NoReplicas'].update(replicasRes['Failed'])
replicas.update(replicasRes['Successful'])
self.__write("Get FC metadata for %d files to be checked: " % len(lfns))
metadata = {}
for lfnChunk in breakListIntoChunks(replicas, chunkSize):
self.__write('.')
res = self.fileCatalog.getFileMetadata(lfnChunk)
if not res['OK']:
return S_ERROR(errno.ENOENT, "error %s" % res['Message'])
metadata.update(res['Value']['Successful'])
gLogger.notice("Check existence and compare checksum file by file...")
csDict = {}
seFiles = {}
# Reverse the LFN->SE dictionary
nReps = 0
for lfn in replicas:
csDict.setdefault(lfn, {})['FCChecksum'] = metadata.get(
lfn, {}).get('Checksum')
for se in replicas[lfn]:
seFiles.setdefault(se, []).append(lfn)
nReps += 1
gLogger.notice('Getting checksum of %d replicas in %d SEs' %
(nReps, len(seFiles)))
checkSum = {}
lfnNotExisting = {}
lfnNoInfo = {}
logLevel = gLogger.getLevel()
gLogger.setLevel('FATAL')
for num, se in enumerate(sorted(seFiles)):
self.__write('\n%d. At %s (%d files): ' % (num, se, len(seFiles[se])))
oSe = StorageElement(se)
notFound = 0
for surlChunk in breakListIntoChunks(seFiles[se], chunkSize):
self.__write('.')
metadata = oSe.getFileMetadata(surlChunk)
if not metadata['OK']:
gLogger.error("Error: getFileMetadata returns %s. Ignore those replicas" % (
metadata['Message']))
# Remove from list of replicas as we don't know whether it is OK or
# not
for lfn in seFiles[se]:
lfnNoInfo.setdefault(lfn, []).append(se)
else:
metadata = metadata['Value']
notFound += len(metadata['Failed'])
for lfn in metadata['Failed']:
lfnNotExisting.setdefault(lfn, []).append(se)
for lfn in metadata['Successful']:
checkSum.setdefault(
lfn, {})[se] = metadata['Successful'][lfn]['Checksum']
if notFound:
gLogger.error('%d files not found' % notFound)
gLogger.setLevel(logLevel)
gLogger.notice('Verifying checksum of %d files' % len(replicas))
for lfn in replicas:
# get the lfn checksum from the FC
replicaDict = replicas[lfn]
oneGoodReplica = False
allGoodReplicas = True
fcChecksum = csDict[lfn].pop('FCChecksum')
for se in replicaDict:
# If replica doesn't exist skip check
if se in lfnNotExisting.get(lfn, []):
allGoodReplicas = False
continue
if se in lfnNoInfo.get(lfn, []):
# If there is no info, a priori it could be good
oneGoodReplica = True
continue
# get the surls metadata and compare the checksum
surlChecksum = checkSum.get(lfn, {}).get(se, '')
if not surlChecksum or not compareAdler(fcChecksum, surlChecksum):
# if fcChecksum does not match surlChecksum
csDict[lfn][se] = {'PFNChecksum': surlChecksum}
gLogger.info("ERROR!! checksum mismatch at %s for LFN %s: FC checksum: %s , PFN checksum : %s "
% (se, lfn, fcChecksum, surlChecksum))
allGoodReplicas = False
else:
oneGoodReplica = True
if not oneGoodReplica:
if lfn in lfnNotExisting:
gLogger.info("=> All replicas are missing", lfn)
retDict['MissingAllReplicas'][lfn] = 'All'
else:
gLogger.info("=> All replicas have bad checksum", lfn)
retDict['AllReplicasCorrupted'][lfn] = csDict[lfn]
elif not allGoodReplicas:
if lfn in lfnNotExisting:
gLogger.info("=> At least one replica missing", lfn)
retDict['MissingReplica'][lfn] = lfnNotExisting[lfn]
else:
gLogger.info("=> At least one replica with good Checksum", lfn)
retDict['SomeReplicasCorrupted'][lfn] = csDict[lfn]
return S_OK(retDict)
##########################################################################
# properties
def set_prod(self, value):
""" Setter """
if value:
value = int(value)
res = self.transClient.getTransformation(value, extraParams=False)
if not res['OK']:
S_ERROR(errno.ENOENT, "Couldn't find transformation %d: %s" %
(value, res['Message']))
else:
self.transType = res['Value']['Type']
if self.interactive:
gLogger.info("Production %d has type %s" % (value, self.transType))
else:
value = 0
self._prod = value
def get_prod(self):
""" Getter """
return self._prod
prod = property(get_prod, set_prod)
def set_fileType(self, value):
""" Setter """
self._fileType = [ft.upper() for ft in value]
def get_fileType(self):
""" Getter """
return self._fileType
fileType = property(get_fileType, set_fileType)
def set_fileTypesExcluded(self, value):
""" Setter """
self._fileTypesExcluded = [ft.upper() for ft in value]
def get_fileTypesExcluded(self):
""" Getter """
return self._fileTypesExcluded
fileTypesExcluded = property(get_fileTypesExcluded, set_fileTypesExcluded)
def set_lfns(self, value):
""" Setter """
if isinstance(value, basestring):
value = [value]
value = [v.replace(' ', '').replace('//', '/') for v in value]
self._lfns = value
def get_lfns(self):
""" Getter """
return self._lfns
lfns = property(get_lfns, set_lfns)
##########################################################################
#
# This part was backported from DataIntegrityClient
#
#
# This section contains the specific methods for File Catalog->SE checks
#
def catalogDirectoryToSE(self, lfnDir):
""" This obtains the replica and metadata information from the catalog
for the supplied directory and checks against the storage elements.
"""
gLogger.info("-" * 40)
gLogger.info("Performing the FC->SE check")
gLogger.info("-" * 40)
if isinstance(lfnDir, basestring):
lfnDir = [lfnDir]
res = self._getCatalogDirectoryContents(lfnDir)
if not res['OK']:
return res
replicas = res['Value']['Replicas']
catalogMetadata = res['Value']['Metadata']
res = self.checkPhysicalFiles(replicas, catalogMetadata)
if not res['OK']:
return res
resDict = {'CatalogMetadata': catalogMetadata, 'CatalogReplicas': replicas}
return S_OK(resDict)
def catalogFileToSE(self, lfns):
""" This obtains the replica and metadata information from the catalog and checks against the storage elements.
"""
gLogger.info("-" * 40)
gLogger.info("Performing the FC->SE check")
gLogger.info("-" * 40)
if isinstance(lfns, basestring):
lfns = [lfns]
res = self._getCatalogMetadata(lfns)
if not res['OK']:
return res
catalogMetadata, _missingCatalogFiles, _zeroSizeFiles = res['Value']
res = self._getCatalogReplicas(catalogMetadata.keys())
if not res['OK']:
return res
replicas, _zeroReplicaFiles = res['Value']
res = self.checkPhysicalFiles(replicas, catalogMetadata)
if not res['OK']:
return res
resDict = {'CatalogMetadata': catalogMetadata, 'CatalogReplicas': replicas}
return S_OK(resDict)
def checkPhysicalFiles(self, replicas, catalogMetadata, ses=None):
""" This method takes the supplied replica and metadata information obtained
from the catalog and checks against the storage elements.
"""
# FIXME: we better use the compareChecksum function instead of this one!
# or maybe directly checkFC2SE
gLogger.info("-" * 40)
gLogger.info("Performing the FC->SE check")
gLogger.info("-" * 40)
seLfns = {}
for lfn, replicaDict in replicas.iteritems():
for se, _url in replicaDict.iteritems():
if (ses) and (se not in ses):
continue
seLfns.setdefault(se, []).append(lfn)
gLogger.info('%s %s' % ('Storage Element'.ljust(20), 'Replicas'.rjust(20)))
for se in sorted(seLfns):
files = len(seLfns[se])
gLogger.info('%s %s' % (se.ljust(20), str(files).rjust(20)))
lfns = seLfns[se]
sizeMismatch = []
res = self.__checkPhysicalFileMetadata(lfns, se)
if not res['OK']:
gLogger.error('Failed to get physical file metadata.', res['Message'])
return res
for lfn, metadata in res['Value'].iteritems():
if lfn in catalogMetadata:
# and ( metadata['Size'] != 0 ):
if metadata['Size'] != catalogMetadata[lfn]['Size']:
sizeMismatch.append(
(lfn, 'deprecatedUrl', se, 'CatalogPFNSizeMismatch'))
if sizeMismatch:
self.dic.reportProblematicReplicas(
sizeMismatch, se, 'CatalogPFNSizeMismatch')
return S_OK()
def __checkPhysicalFileMetadata(self, lfns, se):
""" Check obtain the physical file metadata and check the files are available
"""
gLogger.info('Checking the integrity of %s physical files at %s' %
(len(lfns), se))
res = StorageElement(se).getFileMetadata(lfns)
if not res['OK']:
gLogger.error('Failed to get metadata for lfns.', res['Message'])
return res
pfnMetadata = res['Value']['Successful']
# If the replicas are completely missing
missingReplicas = []
for lfn, reason in res['Value']['Failed'].iteritems():
if re.search('File does not exist', reason):
missingReplicas.append((lfn, 'deprecatedUrl', se, 'PFNMissing'))
if missingReplicas:
self.dic.reportProblematicReplicas(missingReplicas, se, 'PFNMissing')
lostReplicas = []
unavailableReplicas = []
zeroSizeReplicas = []
# If the files are not accessible
for lfn, metadata in pfnMetadata.iteritems():
if metadata.get('Lost'):
lostReplicas.append((lfn, 'deprecatedUrl', se, 'PFNLost'))
if metadata.get('Unavailable') or not metadata['Accessible']:
unavailableReplicas.append(
(lfn, 'deprecatedUrl', se, 'PFNUnavailable'))
if not metadata['Size']:
zeroSizeReplicas.append((lfn, 'deprecatedUrl', se, 'PFNZeroSize'))
if lostReplicas:
self.dic.reportProblematicReplicas(lostReplicas, se, 'PFNLost')
if unavailableReplicas:
self.dic.reportProblematicReplicas(
unavailableReplicas, se, 'PFNUnavailable')
if zeroSizeReplicas:
self.dic.reportProblematicReplicas(zeroSizeReplicas, se, 'PFNZeroSize')
gLogger.info(
'Checking the integrity of physical files at %s complete' % se)
return S_OK(pfnMetadata)
##########################################################################
#
# This section contains the specific methods for SE->File Catalog checks
#
def _getCatalogDirectoryContents(self, lfnDirs):
""" Obtain the contents of the supplied directory, recursively
"""
def _getDirectoryContent(directory):
""" Inner function: recursively scan a directory, returns list of LFNs
"""
filesInDirectory = {}
gLogger.debug("Examining %s" % directory)
res = self.fileCatalog.listDirectory(directory)
if not res['OK']:
gLogger.error('Failed to get directory contents', res['Message'])
return res
if directory in res['Value']['Failed']:
gLogger.error('Failed to get directory content', '%s %s' %
(directory, res['Value']['Failed'][directory]))
return S_ERROR('Failed to get directory content')
if directory not in res['Value']['Successful']:
return S_ERROR('Directory not existing?')
# first, adding the files found in the current directory
gLogger.debug("Files in %s: %d" % (directory, len(
res['Value']['Successful'][directory]['Files'])))
filesInDirectory.update(res['Value']['Successful'][directory]['Files'])
# then, looking for subDirectories content
if res['Value']['Successful'][directory]['SubDirs']:
for l_dir in res['Value']['Successful'][directory]['SubDirs']:
# recursion here
subDirContent = _getDirectoryContent(l_dir)
if not subDirContent['OK']:
return subDirContent
else:
filesInDirectory.update(subDirContent['Value'])
return S_OK(filesInDirectory)
gLogger.info(
'Obtaining the catalog contents for %d directories' % len(lfnDirs))
allFiles = {}
for lfnDir in lfnDirs:
dirContent = _getDirectoryContent(lfnDir)
if not dirContent['OK']:
return dirContent
else:
gLogger.debug("Content of directory %s: %d files" %
(lfnDir, len(dirContent['Value'])))
allFiles.update(dirContent['Value'])
gLogger.debug("Content of directories examined: %d files" % len(allFiles))
replicas = self.fileCatalog.getReplicas(list(allFiles))
if not replicas['OK']:
return replicas
if replicas['Value']['Failed']:
return S_ERROR("Failures in replicas discovery")
return S_OK({'Metadata': allFiles, 'Replicas': replicas['Value']['Successful']})
def _getCatalogReplicas(self, lfns):
""" Obtain the file replicas from the catalog while checking that there are replicas
"""
gLogger.info('Obtaining the replicas for %s files' % len(lfns))
zeroReplicaFiles = []
res = self.fileCatalog.getReplicas(lfns, allStatus=True)
if not res['OK']:
gLogger.error('Failed to get catalog replicas', res['Message'])
return res
allReplicas = res['Value']['Successful']
for lfn, error in res['Value']['Failed'].iteritems():
if re.search('File has zero replicas', error):
zeroReplicaFiles.append(lfn)
gLogger.info('Obtaining the replicas for files complete')
return S_OK((allReplicas, zeroReplicaFiles))
def _getCatalogMetadata(self, lfns):
""" Obtain the file metadata from the catalog while checking they exist
"""
if not lfns:
return S_OK({})
gLogger.info('Obtaining the catalog metadata for %s files' % len(lfns))
missingCatalogFiles = []
zeroSizeFiles = []
res = self.fileCatalog.getFileMetadata(lfns)
if not res['OK']:
gLogger.error('Failed to get catalog metadata', res['Message'])
return res
allMetadata = res['Value']['Successful']
for lfn, error in res['Value']['Failed'].iteritems():
if re.search('No such file or directory', error):
missingCatalogFiles.append(lfn)
gLogger.info('Obtaining the catalog metadata complete')
return S_OK((allMetadata, missingCatalogFiles, zeroSizeFiles))
|
fstagni/DIRAC
|
DataManagementSystem/Client/ConsistencyInspector.py
|
Python
|
gpl-3.0
| 28,413
|
[
"DIRAC"
] |
2726d2512aa3eb3fb63c3b15589e9d81576aa137049f7bccec362166db61b4c7
|
"""Tools easing the work with OpenBabel
Copyright (C) 2010 Troels Kofoed Jacobsen
Code released under GPLv2 (or later). See COPYING for details.
"""
import numpy as np
import pybel
import openbabel as ob
from ase import Atom, Atoms, units
from obcalc.obwrap import forces_ni
def get_forces(mol):
N = mol.NumAtoms()
f = np.zeros((N, 3))
for i in range(N):
for j in range(3):
f[i, j] = forces_ni(mol, i, j)
return f
def add_bonds(mol):
"""Automatically add bonds to molecule"""
mol.ConnectTheDots()
mol.PerceiveBondOrders()
return mol
def atoms_to_obmol(atoms, bonds=None):
"""Convert an Atoms object to an OBMol object.
Parameters
==========
atoms: Atoms
bonds: list of lists of 3xint
Define bonds between atoms such as:
[[begin atom index, end atom index, bond order],
...
]
If None the OpenBabel will try to construct the bonds
automatically.
"""
mol = ob.OBMol()
for atom in atoms:
a = mol.NewAtom()
a.SetAtomicNum(int(atom.number))
a.SetVector(atom.position[0], atom.position[1], atom.position[2])
if bonds is None:
mol = add_bonds(mol)
else:
for bond in bonds:
mol.AddBond(bond[0] + 1, bond[1] + 1, bond[2])
return mol
def obmol_to_atoms(mol, return_bonds=False):
"""Convert an OBMol object to an Atoms object.
Parameters
==========
mol: OBMol
return_bonds: bool
If True, a list of list of 3xint describing the bonds will be returned.
"""
atoms = Atoms()
for i in range(mol.NumAtoms()):
obatom = mol.GetAtom(i + 1)
atoms.append(Atom(obatom.GetAtomicNum(),
[obatom.GetX(),
obatom.GetY(),
obatom.GetZ()]
)
)
if return_bonds:
return atoms, get_bonds(mol)
else:
return atoms
def get_bonds(mol):
if isinstance(mol, Atoms):
mol = atoms_to_obmol(mol)
bonds = []
for i in range(mol.NumBonds()):
obbond = mol.GetBond(i)
bond = [obbond.GetBeginAtomIdx() - 1,
obbond.GetEndAtomIdx() - 1,
obbond.GetBondOrder()]
bonds.append(bond)
return bonds
def build_molecule(smi, return_bonds=False, relax=True):
"""Build molecule from SMILES string.
Parameters
==========
smi: str
SMILES string specifying the molecule to build
return_bonds: bool
If True, a list of list of 3xint describing the bonds will be returned.
relax: bool
If True, the geometry will be relaxed.
See the wikipedia article for info:
http://en.wikipedia.org/wiki/Simplified_molecular_input_line_entry_specification
"""
obconv = ob.OBConversion()
obconv.SetInAndOutFormats("smi", "mol")
mol = ob.OBMol()
obconv.ReadString(mol, smi)
# Create 3D geometry
obbuild = ob.OBBuilder()
obbuild.Build(mol)
mol.AddHydrogens()
if relax:
ff = ob.OBForceField.FindForceField('UFF')
ff.Setup(mol)
ff.ConjugateGradients(250, 1.0e-4);
ff.UpdateCoordinates(mol);
return obmol_to_atoms(mol, return_bonds=return_bonds)
|
tkjacobsen/obcalc
|
obcalc/tools.py
|
Python
|
gpl-2.0
| 3,308
|
[
"ASE",
"Pybel"
] |
fc7b1673a808693fd2a7170ef1e31ba8abb3541b00637c92444e64ee036a305c
|
# -*- coding: utf-8 -*-
u"""
Functions to mask spatial coordinates.
“Geographic masking is the process of altering the coordinates of point
location data to limit the risk of reidentification upon release of the
data. In effect, the purpose of geographic masking is to make it much
more difficult to accurately reverse geocode the released data.”
-- from `chapter 6 of Ensuring Confidentiality of Geocoded Health Data:
Assessing Geographic Masking Strategies for Individual-Level Data
<https://www.hindawi.com/journals/amed/2014/567049/#sec6>`_
Some implementations are inspired by `chapter 7 of Ensuring Confidentiality of
Geocoded Health Data: Assessing Geographic Masking Strategies for Individual-
Level Data <https://www.hindawi.com/journals/amed/2014/567049/#sec7>`_.
"""
from geopy.point import Point
import math
import random
def _random_angle_in_radians():
# This implementation uses human readable degrees with reduced precision
# random.uniform(0, 360) * math.pi / 180
# We simplfy the implementation by calculating directly in radians
return random.uniform(0, 2) * math.pi
def limit_precision(point, precisions=(None, None, None)):
"""
Masked points have a limited precision, hence we cut decimal places.
In the given `precisions` tupel positive integers simply cut decimal places
after the comma. Negative integers define the amount decimals to keep
before the comma, while cutting all decimal places after the comma. In
both cases standard mathematical rounding is applied to the first digit
which won't be cut, before cutting the other digits. The first value is
the longitude's precision, the second value is the latitude's precision and
the third value is the altitude's precision.
If we define a coordinate like ...
>>> coordinate = Point(12.3456, 12.3456, 123.456)
... and call this function like ...
>>> limit_precision(coordinate)
Point(12.3456, 12.3456, 123.456)
>>> limit_precision(coordinate, (0, 0, 0))
Point(12.3456, 12.3456, 123.456)
>>> limit_precision(coordinate, (None, None, None))
Point(12.3456, 12.3456, 123.456)
... the given point returns unchanged.
If we assign to the second parameter tuple some positive integers the given
point will return with altered decimal places after the comma and rounding
applied:
>>> limit_precision(coordinate, (1, 1, 1))
Point(12.3, 12.3, 123.5)
>>> limit_precision(coordinate, (2, 2, 2))
Point(12.35, 12.35, 123.46)
>>> limit_precision(coordinate, (3, 3, 3))
Point(12.346, 12.346, 123.456)
Assigning negative integers to the second parameter tuple will return a
point with altered decimal places before the comma and all decimal places
cut after the comma:
>>> limit_precision(coordinate, (-1, -1, -1))
Point(10.0, 10.0, 120.0)
>>> limit_precision(coordinate, (-2, -2, -2))
Point(10.0, 10.0, 100.0)
Mind that decimals with less digits than the given absolute precision, keep
the last remaining digit intact.
>>> limit_precision(coordinate, (-3, -3, -3))
Point(10.0, 10.0, 100.0)
The mathematical rounding can lead to unexpected results, if applied before
the comma:
>>> limit_precision(Point(65.4321, 65.4321, 654.321), (-1, -1, -1))
Point(70.0, 70.0, 650.0)
>>> limit_precision(Point(65.4321, 65.4321, 654.321), (-2, -2, -2))
Point(70.0, 70.0, 700.0)
"""
def _limit(value, precision):
if 0 < precision:
value = round(value, precision)
elif 0 > precision:
calculus = 10 ** (-1 * precision)
if calculus < value:
value = round(value / calculus) * calculus
else:
value = _limit(value, precision + 1)
return value
return Point(
_limit(point[0], precisions[0] or 0),
_limit(point[1], precisions[1] or 0),
_limit(point[2], precisions[2] or 0)
)
def add_vector(point, vector=(None, None, None)):
"""
Masked points are displaced by a fixed vector, hence we move the point.
If we define a coordinate like ...
>>> coordinate = Point(12.3456, 12.3456, 12.3456)
... and call this function like ...
>>> add_vector(coordinate)
Point(12.3456, 12.3456, 12.3456)
>>> add_vector(coordinate, (0, 0, 0))
Point(12.3456, 12.3456, 12.3456)
>>> add_vector(coordinate, (None, None, None))
Point(12.3456, 12.3456, 12.3456)
... the given point returns unchanged.
In all other cases the given point will be moved by the given values:
>>> add_vector(coordinate, (1.0, 1.0, 1.0))
Point(13.3456, 13.3456, 13.3456)
Mind that geodesic points with latitude, longitude, and altitude are used.
This results in points with limited value range, hence we rotate the points
around the globe:
>>> add_vector(coordinate, (100.0, 100.0, 100.0))
Point(-67.65440000000001, 112.3456, 112.3456)
>>> add_vector(coordinate, (-100.0, -100.0, -100.0))
Point(-87.6544, -87.6544, -87.6544)
"""
return Point(
point[0] + (vector[0] or 0.0),
point[1] + (vector[1] or 0.0),
point[2] + (vector[2] or 0.0)
)
def displace_on_a_circle(point, radius=0.0):
"""
Masked points are placed on a random location on a circle around the
original location. Masked points are not placed inside the circle itself.
If we define a coordinate like ...
>>> coordinate = Point(0.0, 0.0, 0.0)
... and call this function without any radius ...
>>> displace_on_a_circle(coordinate)
Point(0.0, 0.0, 0.0)
>>> displace_on_a_circle(coordinate, 0)
Point(0.0, 0.0, 0.0)
>>> displace_on_a_circle(coordinate, None)
Point(0.0, 0.0, 0.0)
... the given point returns unchanged.
With a given radius, a randomly circular displaced point will return. That
implies the altitude always remains untouched. The given coodinates are
the circle's center, the given radius is the distance between given and
resulting coodinate:
>>> displace_on_a_circle(coordinate, 1) # doctest: +ELLIPSIS
Point(..., 0.0)
>>> displace_on_a_circle(coordinate, -1) # doctest: +ELLIPSIS
Point(..., 0.0)
>>> random.seed(1)
>>> displace_on_a_circle(coordinate, 1)
Point(0.7474634341555553, 0.6643029539301958, 0.0)
"""
if radius is None or radius is 0:
return point
elif 0 > radius:
radius *= -1
a = _random_angle_in_radians()
x = math.cos(a) * radius
y = math.sin(a) * radius
# beware that longitude is x and latitude is y !
return add_vector(point, (y, x, 0))
def displace_on_a_sphere(point, radius=0.0):
"""
Masked points are placed on a random location on a sphere around the
original location. Masked points are not placed inside the sphere itself.
If we define a coordinate like ...
>>> coordinate = Point(0.0, 0.0, 0.0)
... and call this function without any radius ...
>>> displace_on_a_sphere(coordinate)
Point(0.0, 0.0, 0.0)
>>> displace_on_a_sphere(coordinate, 0)
Point(0.0, 0.0, 0.0)
>>> displace_on_a_sphere(coordinate, None)
Point(0.0, 0.0, 0.0)
... the given point returns unchanged.
With a given radius, a randomly spherical displaced point will return. The
given coodinates are the sphere's center, the given radius is the distance
between given and resulting coodinate:
>>> displace_on_a_sphere(coordinate, 1) # doctest: +ELLIPSIS
Point(...)
>>> displace_on_a_sphere(coordinate, -1) # doctest: +ELLIPSIS
Point(...)
>>> random.seed(1)
>>> displace_on_a_sphere(coordinate, 1)
Point(-0.6117158867827159, -0.5436582607079324, 0.5746645712253897)
"""
if radius is None or radius is 0:
return point
elif 0 > radius:
radius *= -1
a1 = _random_angle_in_radians()
a2 = _random_angle_in_radians()
x = math.cos(a1) * math.sin(a2) * radius
y = math.sin(a1) * math.sin(a2) * radius
z = math.cos(a2) * radius
# beware that longitude is x and latitude is y !
return add_vector(point, (y, x, z))
def displace_within_a_circle(point, radius=0.0):
"""
Masked locations are placed anywhere within a circular area around the
original location. Since every location within the circle is equally
likely, masked locations are more likely to be placed at larger distances
compared to small distances. A variation on this technique is the use of
random direction and random radius. In this technique, masked points are
displaced using a vector with random direction and random radius. The
radius is constrained by a maximum value. This effectively results in a
circular area where masked locations can be placed, but the masked
locations are as likely to be at large distances compared to small
distances. These two techniques therefore only differ slightly in the
probability of how close masked locations are placed to the original
locations.
If we define a coordinate like ...
>>> coordinate = Point(0.0, 0.0, 0.0)
... and call this function without any radius ...
>>> displace_within_a_circle(coordinate)
Point(0.0, 0.0, 0.0)
>>> displace_within_a_circle(coordinate, 0)
Point(0.0, 0.0, 0.0)
>>> displace_within_a_circle(coordinate, None)
Point(0.0, 0.0, 0.0)
... the given point returns unchanged.
With a given radius, a randomly circular displaced point will return. That
implies the altitude always remains untouched. The given coodinates are
the circle's center, the given radius is the maximum distance between given
and resulting coodinate:
>>> displace_within_a_circle(coordinate, 1) # doctest: +ELLIPSIS
Point(..., 0.0)
>>> displace_within_a_circle(coordinate, -1) # doctest: +ELLIPSIS
Point(..., 0.0)
>>> random.seed(1)
>>> displace_within_a_circle(coordinate, 1)
Point(-0.10996222555283103, 0.07721437073087664, 0.0)
"""
if radius is None or radius is 0:
return point
elif 0 > radius:
radius *= -1
radius = random.uniform(0, radius)
return displace_on_a_circle(point, radius)
def displace_within_a_sphere(point, radius=0.0):
"""
Masked locations are placed anywhere within a spherical space around the
original location. Since every location within the sphere is equally
likely, masked locations are more likely to be placed at larger distances
compared to small distances. A variation on this technique is the use of
random direction and random radius. In this technique, masked points are
displaced using a vector with random direction and random radius. The
radius is constrained by a maximum value. This effectively results in a
spherical space where masked locations can be placed, but the masked
locations are as likely to be at large distances compared to small
distances. These two techniques therefore only differ slightly in the
probability of how close masked locations are placed to the original
locations.
If we define a coordinate like ...
>>> coordinate = Point(0.0, 0.0, 0.0)
... and call this function with any radius ...
>>> displace_within_a_sphere(coordinate)
Point(0.0, 0.0, 0.0)
>>> displace_within_a_sphere(coordinate, 0)
Point(0.0, 0.0, 0.0)
>>> displace_within_a_sphere(coordinate, None)
Point(0.0, 0.0, 0.0)
... the given point returns unchanged.
With a given radius, a randomly spherical displaced point will return. The
given coodinates are the sphere's center, the given radius is the maximum
distance between given and resulting coodinate:
>>> displace_within_a_sphere(coordinate, 1) # doctest: +ELLIPSIS
Point(...)
>>> displace_within_a_sphere(coordinate, -1) # doctest: +ELLIPSIS
Point(...)
>>> random.seed(1)
>>> displace_within_a_sphere(coordinate, 1)
Point(0.10955063884671598, -0.07692535867829137, 0.011614508874230087)
"""
if radius is None or radius is 0:
return point
elif 0 > radius:
radius *= -1
radius = random.uniform(0, radius)
return displace_on_a_sphere(point, radius)
def displace_within_a_circular_donut(point,
radius_inner=0.5,
radius_outer=1.0):
"""
This technique is similar to random displacement within a circle, but a
smaller internal circle is utilized within which displacement is not
allowed. In effect, this sets a minimum and maximum level for the
displacement. Masked locations are placed anywhere within the allowable
area. A slightly different approach to donut masking is the use of a
random direction and two random radii: one for maximum and one for minimum
displacement. These two techniques only differ slightly in the probability
of how close masked locations are placed to the original locations. Both
approaches enforce a minimum amount of displacement.
With a given radius, a randomly circular displaced point will return. That
implies the altitude always remains untouched. The given coodinates are
the circle's center, the given radii are the minimum and maximum distance
between given and resulting coodinate:
>>> coordinate = Point(0.0, 0.0, 0.0)
>>> random.seed(1)
>>> displace_within_a_circular_donut(coordinate)
Point(-0.4641756357658914, 0.32593947097813314, 0.0)
>>> random.seed(1)
>>> displace_within_a_circular_donut(coordinate, 0.5, 1.0)
Point(-0.4641756357658914, 0.32593947097813314, 0.0)
"""
radius = random.uniform(radius_inner, radius_outer)
return displace_on_a_circle(point, radius)
def displace_within_a_spherical_donut(point,
radius_inner=0.5,
radius_outer=1.0):
"""
This technique is similar to random displacement within a sphere, but a
smaller internal sphere is utilized within which displacement is not
allowed. In effect, this sets a minimum and maximum level for the
displacement. Masked locations are placed anywhere within the allowable
space. A slightly different approach to donut masking is the use of a
random direction and two random radii: one for maximum and one for minimum
displacement. These two techniques only differ slightly in the probability
of how close masked locations are placed to the original locations. Both
approaches enforce a minimum amount of displacement.
With a given radius, a randomly spherical displaced point will return. The
given coodinates are the sphere's center, the given radii are the minimum
and maximum distance between given and resulting coodinate:
>>> coordinate = Point(0.0, 0.0, 0.0)
>>> random.seed(1)
>>> displace_within_a_spherical_donut(coordinate)
Point(0.4624382343989833, -0.32471948518229893, 0.049027491156171304)
>>> random.seed(1)
>>> displace_within_a_spherical_donut(coordinate, 0.5, 1.0)
Point(0.4624382343989833, -0.32471948518229893, 0.049027491156171304)
"""
radius = random.uniform(radius_inner, radius_outer)
return displace_on_a_sphere(point, radius)
def circular_gaussian_displacement(point, mu=1.0, sigma=1.0):
"""
The direction of displacement is random, but the distance follows a
Gaussian distribution, where `mu` is the mean and `sigma` is the standard
deviation. The dispersion of the distribution can be varied based on other
parameters of interest, such as local population density.
With a given radius, a randomly circular displaced point will return. That
implies the altitude always remains untouched. The given coodinates are
the circle's center:
>>> coordinate = Point(0.0, 0.0, 0.0)
>>> random.seed(1)
>>> circular_gaussian_displacement(coordinate)
Point(-2.279620117247094, 0.19779177337662887, 0.0)
>>> random.seed(1)
>>> circular_gaussian_displacement(coordinate, 1.0, 1.0)
Point(-2.279620117247094, 0.19779177337662887, 0.0)
"""
radius = random.gauss(mu, sigma)
return displace_on_a_circle(point, radius)
def spherical_gaussian_displacement(point, mu=1.0, sigma=1.0):
"""
The direction of displacement is random, but the distance follows a
Gaussian distribution, where `mu` is the mean and `sigma` is the standard
deviation. The dispersion of the distribution can be varied based on other
parameters of interest, such as local population density.
With a given radius, a randomly spherical displaced point will return. The
given coodinates are the sphere's center:
>>> coordinate = Point(0.0, 0.0, 0.0)
>>> random.seed(1)
>>> spherical_gaussian_displacement(coordinate)
Point(-2.278463993019554, 0.19769146198725365, -0.07286551271936394)
>>> random.seed(1)
>>> spherical_gaussian_displacement(coordinate, 1.0, 1.0)
Point(-2.278463993019554, 0.19769146198725365, -0.07286551271936394)
"""
radius = random.gauss(mu, sigma)
return displace_on_a_sphere(point, radius)
def circular_bimodal_gaussian_displacement(point,
inner_mu=1.0,
inner_sigma=1.0,
outer_mu=2.0,
outer_sigma=1.0):
"""
This is a variation on the Gaussian masking technique, employing a bimodal
Gaussian distribution for the random distance function. In effect, this
approximates donut masking, but with a less uniform probability of
placement.
With a given radius, a randomly circular displaced point will return. That
implies the altitude always remains untouched. The given coodinates are
the circle's center:
>>> coordinate = Point(0.0, 0.0, 0.0)
>>> random.seed(1)
>>> circular_bimodal_gaussian_displacement(coordinate)
Point(3.1735160345853632, -0.10110949606778825, 0.0)
>>> random.seed(1)
>>> circular_bimodal_gaussian_displacement(coordinate,
... 1.0, 1.0, 2.0, 1.0)
Point(3.1735160345853632, -0.10110949606778825, 0.0)
"""
inner_radius = random.gauss(inner_mu, inner_sigma)
outer_radius = random.gauss(outer_mu, outer_sigma)
return displace_within_a_circular_donut(point, inner_radius, outer_radius)
def spherical_bimodal_gaussian_displacement(point,
inner_mu=1.0,
inner_sigma=1.0,
outer_mu=2.0,
outer_sigma=1.0):
"""
This is a variation on the Gaussian masking technique, employing a bimodal
Gaussian distribution for the random distance function. In effect, this
approximates donut masking, but with a less uniform probability of
placement.
With a given radius, a randomly spherical displaced point will return. The
given coodinates are the sphere's center:
>>> coordinate = Point(0.0, 0.0, 0.0)
>>> random.seed(1)
>>> spherical_bimodal_gaussian_displacement(coordinate)
Point(0.09101092182341257, -0.002899644539981792, -3.173820372380246)
>>> random.seed(1)
>>> spherical_bimodal_gaussian_displacement(coordinate,
... 1.0, 1.0, 2.0, 1.0)
Point(0.09101092182341257, -0.002899644539981792, -3.173820372380246)
"""
inner_radius = random.gauss(inner_mu, inner_sigma)
outer_radius = random.gauss(outer_mu, outer_sigma)
return displace_within_a_spherical_donut(point, inner_radius, outer_radius)
|
sjorek/geoanonymizer.py
|
geoanonymizer/spatial/mask.py
|
Python
|
mit
| 20,449
|
[
"Gaussian"
] |
0b470573270f4f7504b7ae4a776053ab9f689a574518f9d671af995d3d033cb0
|
# $Id$
#
# Copyright (c) 2007-2013, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Greg Landrum, July 2007
#
from __future__ import print_function
_version = "0.14.0"
_usage="""
SearchDb [optional arguments] <sdfilename>
The sd filename argument can be either an SD file or an MDL mol
file.
NOTES:
- The property names may have been altered on loading the
database. Any non-alphanumeric character in a property name
will be replaced with '_'. e.g."Gold.Goldscore.Constraint.Score" becomes
"Gold_Goldscore_Constraint_Score".
- Property names are not case sensitive in the database.
"""
from rdkit import RDConfig
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.RDLogger import logger
logger=logger()
import zlib
from rdkit import Chem
from rdkit.Chem.MolDb.FingerprintUtils import supportedSimilarityMethods,BuildSigFactory,DepickleFP,LayeredOptions
from rdkit.Chem.MolDb import FingerprintUtils
from rdkit import DataStructs
def _molFromPkl(pkl):
if isinstance(pkl,(bytes,str)):
mol = Chem.Mol(pkl)
else:
mol = Chem.Mol(str(pkl))
return mol
def GetNeighborLists(probes,topN,pool,
simMetric=DataStructs.DiceSimilarity,
simThresh=-1.,
silent=False,
**kwargs):
probeFps = [x[1] for x in probes]
validProbes = [x for x in range(len(probeFps)) if probeFps[x] is not None]
validFps=[probeFps[x] for x in validProbes]
from rdkit.DataStructs.TopNContainer import TopNContainer
if simThresh<=0:
nbrLists = [TopNContainer(topN) for x in range(len(probeFps))]
else:
nbrLists=[TopNContainer(-1) for x in range(len(probeFps))]
nDone=0
for nm,fp in pool:
nDone+=1
if not silent and not nDone%1000: logger.info(' searched %d rows'%nDone)
if(simMetric==DataStructs.DiceSimilarity):
scores = DataStructs.BulkDiceSimilarity(fp,validFps)
for i,score in enumerate(scores):
if score>simThresh:
nbrLists[validProbes[i]].Insert(score,nm)
elif(simMetric==DataStructs.TanimotoSimilarity):
scores = DataStructs.BulkTanimotoSimilarity(fp,validFps)
for i,score in enumerate(scores):
if score>simThresh:
nbrLists[validProbes[i]].Insert(score,nm)
elif(simMetric==DataStructs.TverskySimilarity):
av = float(kwargs.get('tverskyA',0.5))
bv = float(kwargs.get('tverskyB',0.5))
scores = DataStructs.BulkTverskySimilarity(fp,validFps,av,bv)
for i,score in enumerate(scores):
if score>simThresh:
nbrLists[validProbes[i]].Insert(score,nm)
else:
for i in range(len(probeFps)):
pfp = probeFps[i]
if pfp is not None:
score = simMetric(probeFps[i],fp)
if score>simThresh:
nbrLists[validProbes[i]].Insert(score,nm)
return nbrLists
def GetMolsFromSmilesFile(dataFilename,errFile,nameProp):
dataFile=open(dataFilename,'r')
for idx,line in enumerate(dataFile):
try:
smi,nm = line.strip().split(' ')
except:
continue
try:
m = Chem.MolFromSmiles(smi)
except:
m=None
if not m:
if errfile:
print(idx,nm,smi,file=errfile)
continue
yield (nm,smi,m)
def GetMolsFromSDFile(dataFilename,errFile,nameProp):
suppl = Chem.SDMolSupplier(dataFilename)
for idx,m in enumerate(suppl):
if not m:
if errFile:
if hasattr(suppl,'GetItemText'):
d = suppl.GetItemText(idx)
errFile.write(d)
else:
logger.warning('full error file support not complete')
continue
smi = Chem.MolToSmiles(m,True)
if m.HasProp(nameProp):
nm = m.GetProp(nameProp)
if not nm:
logger.warning('molecule found with empty name property')
else:
nm = 'Mol_%d'%(idx+1)
yield nm,smi,m
def RunSearch(options,queryFilename):
global sigFactory
if options.similarityType=='AtomPairs':
fpBuilder=FingerprintUtils.BuildAtomPairFP
simMetric=DataStructs.DiceSimilarity
dbName = os.path.join(options.dbDir,options.pairDbName)
fpTableName = options.pairTableName
fpColName = options.pairColName
elif options.similarityType=='TopologicalTorsions':
fpBuilder=FingerprintUtils.BuildTorsionsFP
simMetric=DataStructs.DiceSimilarity
dbName = os.path.join(options.dbDir,options.torsionsDbName)
fpTableName = options.torsionsTableName
fpColName = options.torsionsColName
elif options.similarityType=='RDK':
fpBuilder=FingerprintUtils.BuildRDKitFP
simMetric=DataStructs.FingerprintSimilarity
dbName = os.path.join(options.dbDir,options.fpDbName)
fpTableName = options.fpTableName
if not options.fpColName:
options.fpColName='rdkfp'
fpColName = options.fpColName
elif options.similarityType=='Pharm2D':
fpBuilder=FingerprintUtils.BuildPharm2DFP
simMetric=DataStructs.DiceSimilarity
dbName = os.path.join(options.dbDir,options.fpDbName)
fpTableName = options.pharm2DTableName
if not options.fpColName:
options.fpColName='pharm2dfp'
fpColName = options.fpColName
FingerprintUtils.sigFactory = BuildSigFactory(options)
elif options.similarityType=='Gobbi2D':
from rdkit.Chem.Pharm2D import Gobbi_Pharm2D
fpBuilder=FingerprintUtils.BuildPharm2DFP
simMetric=DataStructs.TanimotoSimilarity
dbName = os.path.join(options.dbDir,options.fpDbName)
fpTableName = options.gobbi2DTableName
if not options.fpColName:
options.fpColName='gobbi2dfp'
fpColName = options.fpColName
FingerprintUtils.sigFactory = Gobbi_Pharm2D.factory
elif options.similarityType=='Morgan':
fpBuilder=FingerprintUtils.BuildMorganFP
simMetric=DataStructs.DiceSimilarity
dbName = os.path.join(options.dbDir,options.morganFpDbName)
fpTableName = options.morganFpTableName
fpColName = options.morganFpColName
extraArgs={}
if options.similarityMetric=='tanimoto':
simMetric = DataStructs.TanimotoSimilarity
elif options.similarityMetric=='dice':
simMetric = DataStructs.DiceSimilarity
elif options.similarityMetric=='tversky':
simMetric = DataStructs.TverskySimilarity
extraArgs['tverskyA']=options.tverskyA
extraArgs['tverskyB']=options.tverskyB
if options.smilesQuery:
mol=Chem.MolFromSmiles(options.smilesQuery)
if not mol:
logger.error('could not build query molecule from smiles "%s"'%options.smilesQuery)
sys.exit(-1)
options.queryMol = mol
elif options.smartsQuery:
mol=Chem.MolFromSmarts(options.smartsQuery)
if not mol:
logger.error('could not build query molecule from smarts "%s"'%options.smartsQuery)
sys.exit(-1)
options.queryMol = mol
if options.outF=='-':
outF=sys.stdout
elif options.outF=='':
outF=None
else:
outF = open(options.outF,'w+')
molsOut=False
if options.sdfOut:
molsOut=True
if options.sdfOut=='-':
sdfOut=sys.stdout
else:
sdfOut = open(options.sdfOut,'w+')
else:
sdfOut=None
if options.smilesOut:
molsOut=True
if options.smilesOut=='-':
smilesOut=sys.stdout
else:
smilesOut = open(options.smilesOut,'w+')
else:
smilesOut=None
if queryFilename:
try:
tmpF = open(queryFilename,'r')
except IOError:
logger.error('could not open query file %s'%queryFilename)
sys.exit(1)
if options.molFormat=='smiles':
func=GetMolsFromSmilesFile
elif options.molFormat=='sdf':
func=GetMolsFromSDFile
if not options.silent:
msg='Reading query molecules'
if fpBuilder: msg+=' and generating fingerprints'
logger.info(msg)
probes=[]
i=0
nms=[]
for nm,smi,mol in func(queryFilename,None,options.nameProp):
i+=1
nms.append(nm)
if not mol:
logger.error('query molecule %d could not be built'%(i))
probes.append((None,None))
continue
if fpBuilder:
probes.append((mol,fpBuilder(mol)))
else:
probes.append((mol,None))
if not options.silent and not i%1000:
logger.info(" done %d"%i)
else:
probes=None
conn=None
idName = options.molIdName
ids=None
names=None
molDbName = os.path.join(options.dbDir,options.molDbName)
molIdName = options.molIdName
mConn = DbConnect(molDbName)
cns = [(x.lower(),y) for x,y in mConn.GetColumnNamesAndTypes('molecules')]
idCol,idTyp=cns[0]
if options.propQuery or options.queryMol:
conn = DbConnect(molDbName)
curs = conn.GetCursor()
if options.queryMol:
if not options.silent: logger.info('Doing substructure query')
if options.propQuery:
where='where %s'%options.propQuery
else:
where=''
if not options.silent:
curs.execute('select count(*) from molecules %(where)s'%locals())
nToDo = curs.fetchone()[0]
join=''
doSubstructFPs=False
fpDbName = os.path.join(options.dbDir,options.fpDbName)
if os.path.exists(fpDbName) and not options.negateQuery :
curs.execute("attach database '%s' as fpdb"%(fpDbName))
try:
curs.execute('select * from fpdb.%s limit 1'%options.layeredTableName)
except:
pass
else:
doSubstructFPs=True
join = 'join fpdb.%s using (%s)'%(options.layeredTableName,idCol)
query = LayeredOptions.GetQueryText(options.queryMol)
if query:
if not where:
where='where'
else:
where += ' and'
where += ' '+query
cmd = 'select %(idCol)s,molpkl from molecules %(join)s %(where)s'%locals()
curs.execute(cmd)
row=curs.fetchone()
nDone=0
ids=[]
while row:
id,molpkl = row
if not options.zipMols:
m = _molFromPkl(molpkl)
else:
m = Chem.Mol(zlib.decompress(molpkl))
matched=m.HasSubstructMatch(options.queryMol)
if options.negateQuery:
matched = not matched
if matched:
ids.append(id)
nDone+=1
if not options.silent and not nDone%500:
if not doSubstructFPs:
logger.info(' searched %d (of %d) molecules; %d hits so far'%(nDone,nToDo,len(ids)))
else:
logger.info(' searched through %d molecules; %d hits so far'%(nDone,len(ids)))
row=curs.fetchone()
if not options.silent and doSubstructFPs and nToDo:
nFiltered = nToDo-nDone
logger.info(' Fingerprint screenout rate: %d of %d (%%%.2f)'%(nFiltered,nToDo,100.*nFiltered/nToDo))
elif options.propQuery:
if not options.silent: logger.info('Doing property query')
propQuery=options.propQuery.split(';')[0]
curs.execute('select %(idCol)s from molecules where %(propQuery)s'%locals())
ids = [x[0] for x in curs.fetchall()]
if not options.silent:
logger.info('Found %d molecules matching the query'%(len(ids)))
t1=time.time()
if probes:
if not options.silent: logger.info('Finding Neighbors')
conn = DbConnect(dbName)
cns = conn.GetColumnNames(fpTableName)
curs = conn.GetCursor()
if ids:
ids = [(x,) for x in ids]
curs.execute('create temporary table _tmpTbl (%(idCol)s %(idTyp)s)'%locals())
curs.executemany('insert into _tmpTbl values (?)',ids)
join='join _tmpTbl using (%(idCol)s)'%locals()
else:
join=''
if cns[0].lower() != idCol.lower():
# backwards compatibility to the days when mol tables had a guid and
# the fps tables did not:
curs.execute("attach database '%(molDbName)s' as mols"%locals())
curs.execute("""
select %(idCol)s,%(fpColName)s from %(fpTableName)s join
(select %(idCol)s,%(molIdName)s from mols.molecules %(join)s)
using (%(molIdName)s)
"""%(locals()))
else:
curs.execute('select %(idCol)s,%(fpColName)s from %(fpTableName)s %(join)s'%locals())
def poolFromCurs(curs,similarityMethod):
row = curs.fetchone()
while row:
id,pkl = row
fp = DepickleFP(pkl,similarityMethod)
yield (id,fp)
row = curs.fetchone()
topNLists = GetNeighborLists(probes,options.topN,poolFromCurs(curs,options.similarityType),
simMetric=simMetric,simThresh=options.simThresh,**extraArgs)
uniqIds=set()
nbrLists = {}
for i,nm in enumerate(nms):
topNLists[i].reverse()
scores=topNLists[i].GetPts()
nbrNames = topNLists[i].GetExtras()
nbrs = []
for j,nbrGuid in enumerate(nbrNames):
if nbrGuid is None:
break
else:
uniqIds.add(nbrGuid)
nbrs.append((nbrGuid,scores[j]))
nbrLists[(i,nm)] = nbrs
t2=time.time()
if not options.silent: logger.info('The search took %.1f seconds'%(t2-t1))
if not options.silent: logger.info('Creating output')
curs = mConn.GetCursor()
ids = list(uniqIds)
ids = [(x,) for x in ids]
curs.execute('create temporary table _tmpTbl (%(idCol)s %(idTyp)s)'%locals())
curs.executemany('insert into _tmpTbl values (?)',ids)
curs.execute('select %(idCol)s,%(molIdName)s from molecules join _tmpTbl using (%(idCol)s)'%locals())
nmDict={}
for guid,id in curs.fetchall():
nmDict[guid]=str(id)
ks = list(nbrLists.keys())
ks.sort()
if not options.transpose:
for i,nm in ks:
nbrs= nbrLists[(i,nm)]
nbrTxt=options.outputDelim.join([nm]+['%s%s%.3f'%(nmDict[id],options.outputDelim,score) for id,score in nbrs])
if outF: print(nbrTxt,file=outF)
else:
labels = ['%s%sSimilarity'%(x[1],options.outputDelim) for x in ks]
if outF: print(options.outputDelim.join(labels),file=outF)
for i in range(options.topN):
outL = []
for idx,nm in ks:
nbr = nbrLists[(idx,nm)][i]
outL.append(nmDict[nbr[0]])
outL.append('%.3f'%nbr[1])
if outF: print(options.outputDelim.join(outL),file=outF)
else:
if not options.silent: logger.info('Creating output')
curs = mConn.GetCursor()
ids = [(x,) for x in set(ids)]
curs.execute('create temporary table _tmpTbl (%(idCol)s %(idTyp)s)'%locals())
curs.executemany('insert into _tmpTbl values (?)',ids)
molIdName = options.molIdName
curs.execute('select %(idCol)s,%(molIdName)s from molecules join _tmpTbl using (%(idCol)s)'%locals())
nmDict={}
for guid,id in curs.fetchall():
nmDict[guid]=str(id)
if outF: print('\n'.join(nmDict.values()),file=outF)
if molsOut and ids:
molDbName = os.path.join(options.dbDir,options.molDbName)
cns = [x.lower() for x in mConn.GetColumnNames('molecules')]
if cns[-1]!='molpkl':
cns.remove('molpkl')
cns.append('molpkl')
curs = mConn.GetCursor()
#curs.execute('create temporary table _tmpTbl (guid integer)'%locals())
#curs.executemany('insert into _tmpTbl values (?)',ids)
cnText=','.join(cns)
curs.execute('select %(cnText)s from molecules join _tmpTbl using (%(idCol)s)'%locals())
row=curs.fetchone()
molD = {}
while row:
row = list(row)
m = _molFromPkl(row[-1])
guid = row[0]
nm = nmDict[guid]
if sdfOut:
m.SetProp('_Name',nm)
print(Chem.MolToMolBlock(m),file=sdfOut)
for i in range(1,len(cns)-1):
pn = cns[i]
pv = str(row[i])
print >>sdfOut,'> <%s>\n%s\n'%(pn,pv)
print('$$$$',file=sdfOut)
if smilesOut:
smi=Chem.MolToSmiles(m,options.chiralSmiles)
if smilesOut:
print('%s %s'%(smi,str(row[1])),file=smilesOut)
row=curs.fetchone()
if not options.silent: logger.info('Done!')
# ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
import os
from optparse import OptionParser
parser=OptionParser(_usage,version='%prog '+_version)
parser.add_option('--dbDir',default='/db/camm/CURRENT/rdk_db',
help='name of the directory containing the database information. The default is %default')
parser.add_option('--molDbName',default='Compounds.sqlt',
help='name of the molecule database')
parser.add_option('--molIdName',default='compound_id',
help='name of the database key column')
parser.add_option('--regName',default='molecules',
help='name of the molecular registry table')
parser.add_option('--pairDbName',default='AtomPairs.sqlt',
help='name of the atom pairs database')
parser.add_option('--pairTableName',default='atompairs',
help='name of the atom pairs table')
parser.add_option('--pairColName',default='atompairfp',
help='name of the atom pair column')
parser.add_option('--torsionsDbName',default='AtomPairs.sqlt',
help='name of the topological torsions database (usually the same as the atom pairs database)')
parser.add_option('--torsionsTableName',default='atompairs',
help='name of the topological torsions table (usually the same as the atom pairs table)')
parser.add_option('--torsionsColName',default='torsionfp',
help='name of the atom pair column')
parser.add_option('--fpDbName',default='Fingerprints.sqlt',
help='name of the 2D fingerprints database')
parser.add_option('--fpTableName',default='rdkitfps',
help='name of the 2D fingerprints table')
parser.add_option('--layeredTableName',default='layeredfps',
help='name of the layered fingerprints table')
parser.add_option('--fpColName',default='',
help='name of the 2D fingerprint column, a sensible default is used')
parser.add_option('--descrDbName',default='Descriptors.sqlt',
help='name of the descriptor database')
parser.add_option('--descrTableName',default='descriptors_v1',
help='name of the descriptor table')
parser.add_option('--descriptorCalcFilename',default=os.path.join(RDConfig.RDBaseDir,'Projects',
'DbCLI','moe_like.dsc'),
help='name of the file containing the descriptor calculator')
parser.add_option('--outputDelim',default=',',
help='the delimiter for the output file. The default is %default')
parser.add_option('--topN',default=20,type='int',
help='the number of neighbors to keep for each query compound. The default is %default')
parser.add_option('--outF','--outFile',default='-',
help='The name of the output file. The default is the console (stdout).')
parser.add_option('--transpose',default=False,action="store_true",
help='print the results out in a transposed form: e.g. neighbors in rows and probe compounds in columns')
parser.add_option('--molFormat',default='sdf',choices=('smiles','sdf'),
help='specify the format of the input file')
parser.add_option('--nameProp',default='_Name',
help='specify the SD property to be used for the molecule names. Default is to use the mol block name')
parser.add_option('--smartsQuery','--smarts','--sma',default='',
help='provide a SMARTS to be used as a substructure query')
parser.add_option('--smilesQuery','--smiles','--smi',default='',
help='provide a SMILES to be used as a substructure query')
parser.add_option('--negateQuery','--negate',default=False,action='store_true',
help='negate the results of the smarts query.')
parser.add_option('--propQuery','--query','-q',default='',
help='provide a property query (see the NOTE about property names)')
parser.add_option('--sdfOut','--sdOut',default='',
help='export an SD file with the matching molecules')
parser.add_option('--smilesOut','--smiOut',default='',
help='export a smiles file with the matching molecules')
parser.add_option('--nonchiralSmiles',dest='chiralSmiles',default=True,action='store_false',
help='do not use chiral SMILES in the output')
parser.add_option('--silent',default=False,action='store_true',
help='Do not generate status messages.')
parser.add_option('--zipMols','--zip',default=False,action='store_true',
help='read compressed mols from the database')
parser.add_option('--pharm2DTableName',default='pharm2dfps',
help='name of the Pharm2D fingerprints table')
parser.add_option('--fdefFile','--fdef',
default=os.path.join(RDConfig.RDDataDir,'Novartis1.fdef'),
help='provide the name of the fdef file to use for 2d pharmacophores')
parser.add_option('--gobbi2DTableName',default='gobbi2dfps',
help='name of the Gobbi2D fingerprints table')
parser.add_option('--similarityType','--simType','--sim',
default='RDK',choices=supportedSimilarityMethods,
help='Choose the type of similarity to use, possible values: RDK, AtomPairs, TopologicalTorsions, Pharm2D, Gobbi2D, Avalon, Morgan. The default is %default')
parser.add_option('--morganFpDbName',default='Fingerprints.sqlt',
help='name of the morgan fingerprints database')
parser.add_option('--morganFpTableName',default='morganfps',
help='name of the morgan fingerprints table')
parser.add_option('--morganFpColName',default='morganfp',
help='name of the morgan fingerprint column')
parser.add_option('--similarityMetric','--simMetric','--metric',
default='',choices=('tanimoto','dice','tversky',''),
help='Choose the type of similarity to use, possible values: tanimoto, dice, tversky. The default is determined by the fingerprint type')
parser.add_option('--tverskyA',default=0.5,type='float',
help='Tversky A value')
parser.add_option('--tverskyB',default=0.5,type='float',
help='Tversky B value')
parser.add_option('--simThresh',default=-1,type='float',
help='threshold to use for similarity searching. If provided, this supersedes the topN argument')
if __name__=='__main__':
import sys,getopt,time
options,args = parser.parse_args()
if len(args)!=1 and not (options.smilesQuery or options.smartsQuery or options.propQuery):
parser.error('please either provide a query filename argument or do a data or smarts query')
if len(args):
queryFilename=args[0]
else:
queryFilename=None
options.queryMol=None
RunSearch(options,queryFilename)
|
soerendip42/rdkit
|
Projects/DbCLI/SearchDb.py
|
Python
|
bsd-3-clause
| 24,186
|
[
"RDKit"
] |
20de1dce08eeb8c408fa877a4ca4d3cffd21160485a879c7a8604a1fdb04637a
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 26 00:09:21 2017
@author: Mayukh Sarkar
"""
import math
import matplotlib.pyplot as plt
N= 6.022e23 # Avogadro's number
k= 1.3806488e-23 # in SI base units
atm= 101325 # atmospheric pressure in Pa
L= .001 # 1 L = .001 m^3
plt.xlabel("Volume (m^3)")
plt.ylabel("Pressure (Pa)")
class Deltas:
# A delta object contains {dW, dQ, dS} for work performed by gas,
# heat absorbed by the gas, and entropy increase of the universe, respectively
# d represents delta; these are not necessarily infinitesimal changes.
def __init__(self,dW,dQ,dS):
self.dW= dW
self.dQ= dQ
self.dS= dS
def plus(self,d):
return Deltas(self.dW+d.dW, self.dQ+d.dQ, self.dS+d.dS)
class gas:
def __init__(self,N,f):
self.N= float(N)
self.f= float(f)
self.gamma= (self.f+2)/self.f
def getU(self):
return self.N*k*self.f*self.T/2
def drawPoint(self):
plt.plot(self.V, self.p, 'ro')
def set_pV(self,p,V):
self.p= float(p)
self.V= float(V)
self.T= self.p*self.V/(self.N*k)
self.drawPoint()
def set_VT(self,V,T):
self.V= float(V)
self.T= float(T)
self.p= self.N*k*self.T/self.V
self.drawPoint()
def set_pT(self,p,T):
self.p= float(p)
self.T= float(T)
self.V= self.N*k*self.T/self.p
self.drawPoint()
def adiabat_T(g,T):
# Given gas g and target temperature T, returns delta dictionary for an
# adiabat to the target temperature. Also moves g to the appropriate
# endpoint in pVT-space.
Ui= g.getU()
g.T= T
dW= Ui - g.getU()
# Still need to set new p, V
g.V= (g.p*g.V**g.gamma/(g.N*k*g.T))**(1/(g.gamma-1))
g.set_VT(g.V, g.T)
return Deltas(dW,0,0)
def adiabat_p(g,p):
# Gas g and target pressure p
K= g.p*g.V**g.gamma
Vi= g.V
g.V= Vi*(g.p/p)**(1/g.gamma)
dW= (K/(1-g.gamma))*((g.V)**(1-g.gamma)-Vi**(1-g.gamma))
g.set_pV(p, g.V)
return Deltas(dW,0,0)
def adiabat_V(g,V):
# Gas g and target volume V
K= g.p*g.V**g.gamma
dW= (K/(1-g.gamma))*(V**(1-g.gamma)-g.V**(1-g.gamma))
g.p= K/(V**g.gamma)
g.set_pV(g.p, V)
return Deltas(dW,0,0)
def isotherm_p(g,p):
# Gas g and target pressure p
Vi= g.V
g.set_pT(p, g.T)
dW= g.N*k*g.T*math.log(g.V/Vi)
dQ= dW
dS= abs(dQ/g.T)
return Deltas(dW,dQ,dS)
def isotherm_V(g,V):
# Gas g and target volume V
dW= g.N*k*g.T*math.log(V/g.V)
g.set_VT(V, g.T)
dQ= dW
dS= abs(dQ/g.T)
return Deltas(dW,dQ,dS)
def isochor_p(g,p):
dW= 0
Ti= g.T
Ui= g.getU()
g.set_pV(p, g.V)
dQ= g.getU() - Ui
dS= abs((g.N*k*g.f/2)*math.log(g.T/Ti))
return Deltas(dW,dQ,dS)
def isochor_T(g,T):
dW= 0
Ti= g.T
Ui= g.getU()
g.set_VT(g.V, T)
dQ= g.getU() - Ui
dS= abs((g.N*k*g.f/2)*math.log(g.T/Ti))
return Deltas(dW,dQ,dS)
def isobar_V(g,V):
Ti= g.T
Vi= g.V
g.set_pV(g.p, V)
dW= g.p*(g.V-Vi)
dQ= g.N*k*((g.f+2)/2)*(g.T-Ti)
dS= abs(((g.f+2)/2)*g.N*k*math.log(g.T/Ti))
return Deltas(dW,dQ,dS)
def isobar_T(g,T):
Ti= g.T
Vi= g.V
g.set_pT(g.p, T)
dW= g.p*(g.V-Vi)
dQ= g.N*k*((g.f+2)/2)*(g.T-Ti)
dS= abs(((g.f+2)/2)*g.N*k*math.log(g.T/Ti))
return Deltas(dW,dQ,dS)
|
blackburn66/Carnot-cycle
|
engines.py
|
Python
|
gpl-3.0
| 3,391
|
[
"Avogadro"
] |
a5d2564cc07a9df4f4035917c74ef1d7ae9d91a71b05ded7e9c313b5c323d1e4
|
import numpy as np
import orthopy
import pytest
import quadpy
@pytest.mark.parametrize(
"scheme",
[quadpy.enr2.cools_haegemans_1(n) for n in range(1, 6)]
+ [quadpy.enr2.cools_haegemans_2(n) for n in range(1, 6)]
+ [quadpy.enr2.cools_haegemans_3(n) for n in range(2, 6)]
+ [quadpy.enr2.lu_darmofal_1(n) for n in range(2, 6)]
+ [quadpy.enr2.mcnamee_stenger_3(n) for n in range(1, 6)]
+ [quadpy.enr2.mcnamee_stenger_5(n) for n in range(2, 6)]
+ [quadpy.enr2.mcnamee_stenger_7a(n) for n in range(3, 6)]
+ [quadpy.enr2.mcnamee_stenger_7b(n) for n in range(3, 6)]
+ [quadpy.enr2.mcnamee_stenger_9a(n) for n in range(4, 6)]
+ [quadpy.enr2.mcnamee_stenger_9b(n) for n in range(4, 6)]
+ [quadpy.enr2.phillips(n) for n in range(2, 6)]
+ [quadpy.enr2.stenger_7a(n) for n in [3, 4]]
+ [quadpy.enr2.stenger_7b(n) for n in range(3, 6)]
+ [quadpy.enr2.stenger_9a(n) for n in range(3, 6)]
+ [quadpy.enr2.stenger_9b(n) for n in range(4, 6)]
+ [quadpy.enr2.stenger_11a(n) for n in range(3, 5)]
+ [quadpy.enr2.stenger_11b(n) for n in range(3, 6)]
+ [quadpy.enr2.stroud_enr2_3_1(n) for n in range(2, 8)]
+ [quadpy.enr2.stroud_enr2_3_2(n) for n in range(2, 8)]
+ [quadpy.enr2.stroud_enr2_5_1a(n) for n in range(2, 7)]
+ [quadpy.enr2.stroud_enr2_5_1b(n) for n in [3, 5, 6]]
+ [quadpy.enr2.stroud_enr2_5_2(n) for n in range(2, 8)]
+ [quadpy.enr2.stroud_enr2_5_3(n) for n in range(3, 8)]
+ [quadpy.enr2.stroud_enr2_5_4(n) for n in range(2, 8)]
+ [quadpy.enr2.stroud_enr2_5_5a(n) for n in range(2, 8)]
+ [quadpy.enr2.stroud_enr2_5_5b(n) for n in [2]]
+ [quadpy.enr2.stroud_enr2_5_6(n) for n in range(5, 8)]
+ [quadpy.enr2.stroud_enr2_7_1a(n) for n in [2, 3, 4, 6, 7]]
+ [quadpy.enr2.stroud_enr2_7_1b(n) for n in [3, 4]]
+ [quadpy.enr2.stroud_enr2_7_2(n) for n in range(3, 8)]
+ [quadpy.enr2.stroud_enr2_7_3a(n) for n in [3, 4]]
+ [quadpy.enr2.stroud_enr2_7_3b(n) for n in range(3, 6)]
+ [quadpy.enr2.stroud_enr2_9_1a(n) for n in range(3, 7)]
+ [quadpy.enr2.stroud_enr2_9_1b(n) for n in range(4, 7)]
+ [quadpy.enr2.stroud_enr2_11_1a(n) for n in range(3, 5)]
+ [quadpy.enr2.stroud_enr2_11_1b(n) for n in range(3, 6)]
+ [quadpy.enr2.stroud_1967_5_a(n) for n in range(2, 7)]
+ [quadpy.enr2.stroud_1967_5_b(n) for n in [3, 5, 6, 7]]
+ [quadpy.enr2.stroud_1967_7_2a(n) for n in [2, 3, 4, 6, 7]]
+ [quadpy.enr2.stroud_1967_7_2b(n) for n in [3, 4]]
+ [quadpy.enr2.stroud_1967_7_4(n) for n in range(3, 8)]
+ [quadpy.enr2.stroud_secrest_1(n) for n in range(2, 8)]
+ [quadpy.enr2.stroud_secrest_2(n) for n in range(2, 8)]
+ [quadpy.enr2.stroud_secrest_3(n) for n in range(2, 8)]
+ [quadpy.enr2.stroud_secrest_4(n) for n in range(2, 8)]
+ [quadpy.enr2.xiu(n) for n in range(2, 8)],
)
def test_scheme(scheme):
assert scheme.points.dtype == np.float64, scheme.name
assert scheme.weights.dtype == np.float64, scheme.name
print(scheme)
evaluator = orthopy.enr2.Eval(scheme.points, "physicists")
k = 0
while True:
approximate = scheme.integrate(lambda x: next(evaluator))
exact = evaluator.int_p0 if k == 0 else 0.0
err = np.abs(approximate - exact)
if np.any(err > 1.0e-11):
break
k += 1
max_err = np.max(err)
assert k - 1 == scheme.degree, (
f"{scheme.name} -- observed: {k - 1}, expected: {scheme.degree} "
f"(max err: {max_err:.3e})"
)
if __name__ == "__main__":
dim_ = 5
# quadpy.e3r2.show(quadpy.enr2.Stroud(dim_, '5-1a'), backend='vtk')
scheme_ = quadpy.enr2.Stroud(dim_, "11-1b")
test_scheme(scheme_, 1.0e-14)
|
nschloe/quadpy
|
tests/test_enr2.py
|
Python
|
mit
| 3,693
|
[
"VTK"
] |
c69a3b5f38d9adcb132b8d6a08fb7b4e07df232d7eee44d9afe69c2604050075
|
# ==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==========================================================================*/
import re
from typing import Optional, Union, Dict, Any, List, Tuple, Sequence, TYPE_CHECKING
from sys import stderr as system_error_stream
import numpy as np
try:
from numpy.typing import ArrayLike
except ImportError:
from numpy import ndarray as ArrayLike
import warnings
from sys import stderr as system_error_stream
import os
import builtins
fileiotype = Union[str, bytes, os.PathLike]
import itk.support.types as itkt
from .helpers import wasm_type_from_image_type, image_type_from_wasm_type
if TYPE_CHECKING:
try:
import xarray as xr
except ImportError:
pass
try:
import vtk
except ImportError:
pass
__all__ = [
"output",
"image",
"set_nthreads",
"get_nthreads",
"echo",
"size",
"physical_size",
"spacing",
"origin",
"index",
"region",
"GetArrayFromImage",
"array_from_image",
"GetArrayViewFromImage",
"array_view_from_image",
"GetImageFromArray",
"image_from_array",
"GetImageViewFromArray",
"image_view_from_array",
"array_from_vector_container",
"array_view_from_vector_container",
"vector_container_from_array",
"GetArrayFromVnlVector",
"array_from_vnl_vector",
"GetVnlVectorFromArray",
"vnl_vector_from_array",
"GetArrayViewFromVnlVector",
"array_view_from_vnl_vector",
"GetVnlMatrixFromArray",
"vnl_matrix_from_array",
"GetArrayFromVnlMatrix",
"array_from_vnl_matrix",
"GetArrayViewFromVnlMatrix",
"array_view_from_vnl_matrix",
"GetArrayFromMatrix",
"array_from_matrix",
"GetMatrixFromArray",
"matrix_from_array",
"xarray_from_image",
"image_from_xarray",
"vtk_image_from_image",
"image_from_vtk_image",
"dict_from_image",
"image_from_dict",
"image_intensity_min_max",
"imwrite",
"imread",
"meshwrite",
"meshread",
"transformwrite",
"transformread",
"search",
"set_inputs",
"templated_class",
"pipeline",
"auto_pipeline",
"down_cast",
"template",
"class_",
"ctype",
"python_type",
"range",
"TemplateTypeError",
]
def output(input):
"""
If input object has attribute "GetOutput()" then return an itk image,
otherwise this function simply returns the input value
"""
if hasattr(input, "GetOutput"):
return input.GetOutput()
return input
def image(input):
warnings.warn(
"WrapITK warning: itk.image() is deprecated. " "Use itk.output() instead."
)
return output(input)
def set_nthreads(number_of_threads: int) -> None:
"""
Support convenient set of the number of threads.
Use example (in python):
import itk
itk.set_nthreads(4) ## use 4 threads
"""
assert number_of_threads > 0, (
"Please set a positive number of threads instead of %d" % number_of_threads
)
import itk
threader = itk.MultiThreaderBase.New()
threader.SetGlobalDefaultNumberOfThreads(number_of_threads)
def get_nthreads() -> int:
"""
Get the number of threads
"""
import itk
threader = itk.MultiThreaderBase.New()
return threader.GetGlobalDefaultNumberOfThreads()
def echo(obj, f=system_error_stream) -> None:
"""Print an object to stream
If the object has a method Print(), this method is used.
repr(obj) is used otherwise
"""
print(f, obj)
def size(image_or_filter: "itkt.ImageOrImageSource") -> Sequence[int]:
"""Return the size of an image, or of the output image of a filter
This method take care of updating the needed information
"""
# we don't need the entire output, only its size
import itk
image_or_filter.UpdateOutputInformation()
img = itk.output(image_or_filter)
return img.GetLargestPossibleRegion().GetSize()
def physical_size(image_or_filter: "itkt.ImageOrImageSource") -> Sequence[float]:
"""Return the physical size of an image, or of the output image of a filter
This method take care of updating the needed information
"""
# required because range is overloaded in this module
from builtins import range
spacing_ = spacing(image_or_filter)
size_ = size(image_or_filter)
result = []
for i in range(0, spacing_.Size()):
result.append(spacing_.GetElement(i) * size_.GetElement(i))
return result
def spacing(image_or_filter: "itkt.ImageOrImageSource") -> Sequence[float]:
"""Return the spacing of an image, or of the output image of a filter
This method take care of updating the needed information
"""
import itk
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = itk.output(image_or_filter)
return img.GetSpacing()
def origin(image_or_filter: "itkt.ImageOrImageSource") -> Sequence[float]:
"""Return the origin of an image, or of the output image of a filter
This method take care of updating the needed information
"""
import itk
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = itk.output(image_or_filter)
return img.GetOrigin()
def index(image_or_filter: "itkt.ImageOrImageSource") -> Sequence[int]:
"""Return the index of an image, or of the output image of a filter
This method take care of updating the needed information
"""
import itk
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = itk.output(image_or_filter)
return img.GetLargestPossibleRegion().GetIndex()
def region(image_or_filter: "itkt.ImageOrImageSource") -> "itkt.ImageRegion":
"""Return the region of an image, or of the output image of a filter
This method take care of updating the needed information
"""
import itk
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = itk.output(image_or_filter)
return img.GetLargestPossibleRegion()
def _get_itk_pixelid(numpy_array_type):
"""Returns a ITK PixelID given a numpy array."""
import itk
# This is a Mapping from numpy array types to itk pixel types.
_np_itk = {
np.uint8: itk.UC,
np.uint16: itk.US,
np.uint32: itk.UI,
np.uint64: itk.UL,
np.int8: itk.SC,
np.int16: itk.SS,
np.int32: itk.SI,
np.int64: itk.SL,
np.float32: itk.F,
np.float64: itk.D,
np.complex64: itk.complex[itk.F],
np.complex128: itk.complex[itk.D],
}
try:
return _np_itk[numpy_array_type.dtype.type]
except KeyError as e:
for key in _np_itk:
if np.issubdtype(numpy_array_type.dtype.type, key):
return _np_itk[key]
raise e
def _GetArrayFromImage(
image_or_filter, function_name: str, keep_axes: bool, update: bool, ttype
) -> np.ndarray:
"""Get an Array with the content of the image buffer"""
# Finds the image type
import itk
img = itk.output(image_or_filter)
if ttype is not None:
if isinstance(ttype, (tuple, list)):
if len(ttype) != 1:
raise RuntimeError("Expected 1 component in ttype tuple.")
ImageType = ttype[0]
else:
ImageType = ttype
else:
ImageType = img.__class__
keys = [k for k in itk.PyBuffer.keys() if k[0] == ImageType]
if len(keys) == 0:
raise RuntimeError("No suitable template parameter can be found.")
# Create a numpy array of the type of the input image
templatedFunction = getattr(itk.PyBuffer[keys[0]], function_name)
return templatedFunction(img, keep_axes, update)
def GetArrayFromImage(
image_or_filter: "itkt.ImageOrImageSource",
keep_axes: bool = False,
update: bool = True,
ttype=None,
) -> np.ndarray:
"""Get an array with the content of the image buffer"""
return _GetArrayFromImage(
image_or_filter, "GetArrayFromImage", keep_axes, update, ttype
)
array_from_image = GetArrayFromImage
def GetArrayViewFromImage(
image_or_filter: "itkt.ImageOrImageSource",
keep_axes: bool = False,
update: bool = True,
ttype=None,
) -> np.ndarray:
"""Get an array view with the content of the image buffer"""
return _GetArrayFromImage(
image_or_filter, "GetArrayViewFromImage", keep_axes, update, ttype
)
array_view_from_image = GetArrayViewFromImage
def _GetImageFromArray(arr: ArrayLike, function_name: str, is_vector: bool, ttype):
"""Get an ITK image from a Python array."""
import itk
# Verify inputs
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr)
if ttype is not None:
if is_vector:
raise RuntimeError("Cannot specify both `is_vector` and `ttype`.")
if isinstance(ttype, (tuple, list)):
if len(ttype) != 1:
raise RuntimeError("Expected 1 component in ttype tuple.")
ImageType = ttype[0]
else:
ImageType = ttype
if type(itk.template(ImageType)) != tuple or len(itk.template(ImageType)) < 2:
raise RuntimeError("Cannot determine pixel type from supplied ttype.")
is_vector = (
type(itk.template(ImageType)[1][0]) != itk.support.types.itkCType
or itk.template(ImageType)[0] == itk.VectorImage
)
else:
PixelType = _get_itk_pixelid(arr)
Dimension = arr.ndim
if is_vector:
Dimension = arr.ndim - 1
if arr.flags["C_CONTIGUOUS"]:
VectorDimension = arr.shape[-1]
else:
VectorDimension = arr.shape[0]
if PixelType == itk.UC:
if VectorDimension == 3:
ImageType = itk.Image[itk.RGBPixel[itk.UC], Dimension]
elif VectorDimension == 4:
ImageType = itk.Image[itk.RGBAPixel[itk.UC], Dimension]
else:
ImageType = itk.VectorImage[PixelType, Dimension]
else:
ImageType = itk.VectorImage[PixelType, Dimension]
else:
ImageType = itk.Image[PixelType, Dimension]
keys = [k for k in itk.PyBuffer.keys() if k[0] == ImageType]
if len(keys) == 0:
raise RuntimeError(
"""No suitable template parameter can be found.
Please specify an output type via the 'ttype' keyword parameter."""
)
templatedFunction = getattr(itk.PyBuffer[keys[0]], function_name)
return templatedFunction(arr, is_vector)
def GetImageFromArray(
arr: ArrayLike, is_vector: bool = False, ttype=None
) -> "itkt.ImageBase":
"""Get an ITK image from a Python array."""
return _GetImageFromArray(arr, "GetImageFromArray", is_vector, ttype)
image_from_array = GetImageFromArray
def GetImageViewFromArray(
arr: ArrayLike, is_vector: bool = False, ttype=None
) -> "itkt.ImageBase":
"""Get an ITK image view from a Python array."""
return _GetImageFromArray(arr, "GetImageViewFromArray", is_vector, ttype)
image_view_from_array = GetImageViewFromArray
def array_from_vector_container(
container: "itkt.VectorContainer", ttype=None
) -> np.ndarray:
"""Get an Array with the content of the vector container"""
import itk
container_template = itk.template(container)
IndexType = container_template[1][0]
# Find container data type
if ttype is not None:
if isinstance(ttype, (tuple, list)):
if len(ttype) != 1:
raise RuntimeError("Expected 1 component in ttype tuple.")
DataType = ttype[0]
else:
DataType = ttype
else:
DataType = container_template[1][1]
keys = [k for k in itk.PyVectorContainer.keys() if k == (IndexType, DataType)]
if len(keys) == 0:
raise RuntimeError("No suitable template parameter can be found.")
# Create numpy array of the type of the input container
return itk.PyVectorContainer[keys[0]].array_from_vector_container(container)
def array_view_from_vector_container(
container: "itkt.VectorContainer", ttype=None
) -> np.ndarray:
"""Get an Array view with the content of the vector container"""
import itk
container_template = itk.template(container)
IndexType = container_template[1][0]
# Find container type
if ttype is not None:
if isinstance(ttype, (tuple, list)):
if len(ttype) != 1:
raise RuntimeError("Expected 1 component in ttype tuple.")
DataType = ttype[0]
else:
DataType = ttype
else:
DataType = container_template[1][1]
keys = [k for k in itk.PyVectorContainer.keys() if k == (IndexType, DataType)]
if len(keys) == 0:
raise RuntimeError("No suitable template parameter can be found.")
# Create numpy array of the type of the input container
return itk.PyVectorContainer[keys[0]].array_view_from_vector_container(container)
def vector_container_from_array(arr: ArrayLike, ttype=None) -> "itkt.VectorContainer":
"""Get a vector container from a Python array"""
import itk
# Verify inputs
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr)
# Return VectorContainer with 64-bit index type
if os.name == "nt":
IndexType = itk.ULL
else:
IndexType = itk.UL
# Find container type
if ttype is not None:
if isinstance(ttype, (tuple, list)):
if len(ttype) != 1:
raise RuntimeError("Expected 1 component in ttype tuple.")
DataType = ttype[0]
else:
DataType = ttype
else:
DataType = _get_itk_pixelid(arr)
keys = [k for k in itk.PyVectorContainer.keys() if k == (IndexType, DataType)]
if len(keys) == 0:
raise RuntimeError("No suitable template parameter can be found.")
# Create numpy array of the type of the input container
return itk.PyVectorContainer[keys[0]].vector_container_from_array(arr)
def _GetArrayFromVnlObject(vnl_object, function_name: str, ttype) -> np.ndarray:
"""Get an array with the content of vnl_object"""
# Finds the vnl object type
import itk
if ttype is not None:
if isinstance(ttype, (tuple, list)):
if len(ttype) != 1:
raise RuntimeError("Expected 1 component in ttype tuple.")
PixelType = ttype[0]
else:
PixelType = ttype
else:
PixelType = itk.template(vnl_object)[1][0]
keys = [k for k in itk.PyVnl.keys() if k[0] == PixelType]
if len(keys) == 0:
raise RuntimeError("No suitable template parameter can be found.")
# Create a numpy array of the type of the vnl object
templatedFunction = getattr(itk.PyVnl[keys[0]], function_name)
return templatedFunction(vnl_object)
def GetArrayFromVnlVector(vnl_vector, ttype=None) -> np.ndarray:
"""Get an array with the content of vnl_vector"""
return _GetArrayFromVnlObject(vnl_vector, "GetArrayFromVnlVector", ttype)
array_from_vnl_vector = GetArrayFromVnlVector
def GetArrayViewFromVnlVector(vnl_vector, ttype=None) -> np.ndarray:
"""Get an array view of vnl_vector"""
return _GetArrayFromVnlObject(vnl_vector, "GetArrayViewFromVnlVector", ttype)
array_view_from_vnl_vector = GetArrayFromVnlVector
def GetArrayFromVnlMatrix(vnl_matrix, ttype=None) -> np.ndarray:
"""Get an array with the content of vnl_matrix"""
return _GetArrayFromVnlObject(vnl_matrix, "GetArrayFromVnlMatrix", ttype)
array_from_vnl_matrix = GetArrayFromVnlMatrix
def GetArrayViewFromVnlMatrix(vnl_matrix, ttype=None) -> np.ndarray:
"""Get an array view of vnl_matrix"""
return _GetArrayFromVnlObject(vnl_matrix, "GetArrayViewFromVnlMatrix", ttype)
array_view_from_vnl_matrix = GetArrayViewFromVnlMatrix
def _GetVnlObjectFromArray(arr: ArrayLike, function_name: str, ttype):
"""Get a vnl object from a Python array."""
import itk
# Verify inputs
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr)
if ttype is not None:
if isinstance(ttype, (tuple, list)):
if len(ttype) != 1:
raise RuntimeError("Expected 1 component in ttype tuple.")
PixelType = ttype[0]
else:
PixelType = ttype
else:
PixelType = _get_itk_pixelid(arr)
keys = [k for k in itk.PyVnl.keys() if k[0] == PixelType]
if len(keys) == 0:
raise RuntimeError("No suitable template parameter can be found.")
templatedFunction = getattr(itk.PyVnl[keys[0]], function_name)
return templatedFunction(arr)
def GetVnlVectorFromArray(arr: ArrayLike, ttype=None):
"""Get a vnl vector from a Python array."""
return _GetVnlObjectFromArray(arr, "GetVnlVectorFromArray", ttype)
vnl_vector_from_array = GetVnlVectorFromArray
def GetVnlMatrixFromArray(arr: ArrayLike, ttype=None):
"""Get a vnl matrix from a Python array."""
return _GetVnlObjectFromArray(arr, "GetVnlMatrixFromArray", ttype)
vnl_matrix_from_array = GetVnlMatrixFromArray
def GetArrayFromMatrix(itk_matrix) -> np.ndarray:
return GetArrayFromVnlMatrix(itk_matrix.GetVnlMatrix().as_matrix())
array_from_matrix = GetArrayFromMatrix
def GetMatrixFromArray(arr: ArrayLike) -> "itkt.Matrix":
import itk
# Verify inputs
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr)
vnl_matrix = GetVnlMatrixFromArray(arr)
dims = arr.shape
PixelType = _get_itk_pixelid(arr)
m = itk.Matrix[PixelType, dims[0], dims[1]](vnl_matrix)
return m
matrix_from_array = GetMatrixFromArray
def xarray_from_image(l_image: "itkt.ImageOrImageSource") -> "xr.DataArray":
"""Convert an itk.Image to an xarray.DataArray.
Origin and spacing metadata is preserved in the xarray's coords. The
Direction is set in the `direction` attribute.
Dims are labeled as `x`, `y`, `z`, `t`, and `c`.
This interface is and behavior is experimental and is subject to possible
future changes."""
import xarray as xr
import itk
import numpy as np
array_view = itk.array_view_from_image(l_image)
l_spacing = itk.spacing(l_image)
l_origin = itk.origin(l_image)
l_size = itk.size(l_image)
direction = np.flip(itk.array_from_matrix(l_image.GetDirection()))
image_dimension = l_image.GetImageDimension()
image_dims: Tuple[str, str, str] = ("x", "y", "z", "t")
coords = {}
for l_index, dim in enumerate(image_dims[:image_dimension]):
coords[dim] = np.linspace(
l_origin[l_index],
l_origin[l_index] + (l_size[l_index] - 1) * l_spacing[l_index],
l_size[l_index],
dtype=np.float64,
)
dims = list(reversed(image_dims[:image_dimension]))
components = l_image.GetNumberOfComponentsPerPixel()
if components > 1:
dims.append("c")
coords["c"] = np.arange(components, dtype=np.uint32)
direction = np.flip(itk.array_from_matrix(l_image.GetDirection()))
attrs = {"direction": direction}
metadata = dict(l_image)
ignore_keys = {"direction", "origin", "spacing"}
for key in metadata:
if not key in ignore_keys:
attrs[key] = metadata[key]
data_array = xr.DataArray(array_view, dims=dims, coords=coords, attrs=attrs)
return data_array
def image_from_xarray(data_array: "xr.DataArray") -> "itkt.ImageBase":
"""Convert an xarray.DataArray to an itk.Image.
Metadata encoded with xarray_from_image is applied to the itk.Image.
This interface is and behavior is experimental and is subject to possible
future changes."""
import numpy as np
import itk
if not {"t", "z", "y", "x", "c"}.issuperset(data_array.dims):
raise ValueError('Unsupported dims, supported dims: "t", "z", "y", "x", "c".')
image_dims = list({"t", "z", "y", "x"}.intersection(set(data_array.dims)))
image_dims.sort(reverse=True)
image_dimension = len(image_dims)
ordered_dims = ("t", "z", "y", "x")[-image_dimension:]
is_vector = "c" in data_array.dims
if is_vector:
ordered_dims = ordered_dims + ("c",)
values = data_array.values
if ordered_dims != data_array.dims:
dest = list(builtins.range(len(ordered_dims)))
source = dest.copy()
for ii in builtins.range(len(ordered_dims)):
source[ii] = data_array.dims.index(ordered_dims[ii])
values = np.moveaxis(values, source, dest).copy()
itk_image = itk.image_view_from_array(values, is_vector=is_vector)
l_origin = [0.0] * image_dimension
l_spacing = [1.0] * image_dimension
for l_index, dim in enumerate(image_dims):
coords = data_array.coords[dim]
if coords.shape[0] > 1:
l_origin[l_index] = float(coords[0])
l_spacing[l_index] = float(coords[1]) - float(coords[0])
l_spacing.reverse()
itk_image.SetSpacing(l_spacing)
l_origin.reverse()
itk_image.SetOrigin(l_origin)
if "direction" in data_array.attrs:
direction = data_array.attrs["direction"]
itk_image.SetDirection(np.flip(direction))
ignore_keys = {"direction", "origin", "spacing"}
for key in data_array.attrs:
if not key in ignore_keys:
itk_image[key] = data_array.attrs[key]
return itk_image
def vtk_image_from_image(l_image: "itkt.ImageOrImageSource") -> "vtk.vtkImageData":
"""Convert an itk.Image to a vtk.vtkImageData."""
import itk
import vtk
from vtk.util.numpy_support import numpy_to_vtk
array = itk.array_view_from_image(l_image)
vtk_image = vtk.vtkImageData()
data_array = numpy_to_vtk(array.reshape(-1))
data_array.SetNumberOfComponents(l_image.GetNumberOfComponentsPerPixel())
data_array.SetName("Scalars")
# Always set Scalars for (future?) multi-component volume rendering
vtk_image.GetPointData().SetScalars(data_array)
dim = l_image.GetImageDimension()
l_spacing = [1.0] * 3
l_spacing[:dim] = l_image.GetSpacing()
vtk_image.SetSpacing(l_spacing)
l_origin = [0.0] * 3
l_origin[:dim] = l_image.GetOrigin()
vtk_image.SetOrigin(l_origin)
dims = [1] * 3
dims[:dim] = itk.size(l_image)
vtk_image.SetDimensions(dims)
# Copy direction matrix for VTK>=9
import vtk
if vtk.vtkVersion.GetVTKMajorVersion() >= 9:
l_direction = l_image.GetDirection()
direction = itk.array_from_matrix(l_direction).flatten().tolist()
if len(direction) == 4:
# Change 2d matrix to 3d
direction = [
direction[0],
direction[1],
0.0,
direction[2],
direction[3],
0.0,
0.0,
0.0,
1.0,
]
vtk_image.SetDirectionMatrix(direction)
if l_image.GetImageDimension() == 3:
PixelType = itk.template(l_image)[1][0]
if PixelType == itk.Vector:
vtk_image.GetPointData().SetVectors(data_array)
elif PixelType == itk.CovariantVector:
vtk_image.GetPointData().SetVectors(data_array)
elif PixelType == itk.SymmetricSecondRankTensor:
vtk_image.GetPointData().SetTensors(data_array)
elif PixelType == itk.DiffusionTensor3D:
vtk_image.GetPointData().SetTensors(data_array)
return vtk_image
def image_from_vtk_image(vtk_image: "vtk.vtkImageData") -> "itkt.ImageBase":
"""Convert a vtk.vtkImageData to an itk.Image."""
import itk
from vtk.util.numpy_support import vtk_to_numpy
point_data = vtk_image.GetPointData()
array = vtk_to_numpy(point_data.GetScalars())
array = array.reshape(-1)
is_vector = point_data.GetScalars().GetNumberOfComponents() != 1
dims = list(vtk_image.GetDimensions())
if is_vector and dims[-1] == 1:
# 2D
dims = dims[:2]
dims.reverse()
dims.append(point_data.GetScalars().GetNumberOfComponents())
else:
dims.reverse()
array.shape = tuple(dims)
l_image = itk.image_view_from_array(array, is_vector)
dim = l_image.GetImageDimension()
l_spacing = [1.0] * dim
l_spacing[:dim] = vtk_image.GetSpacing()[:dim]
l_image.SetSpacing(l_spacing)
l_origin = [0.0] * dim
l_origin[:dim] = vtk_image.GetOrigin()[:dim]
l_image.SetOrigin(l_origin)
# Direction support with VTK 9
import vtk
if vtk.vtkVersion.GetVTKMajorVersion() >= 9:
direction = vtk_image.GetDirectionMatrix()
if dim == 3:
direction_array = np.identity(3)
for y in (0, 1, 2):
for x in (0, 1, 2):
direction_array[x, y] = direction.GetElement(x, y)
elif dim == 2:
direction_array = np.identity(2)
for y in (0, 1):
for x in (0, 1):
direction_array[x, y] = direction.GetElement(x, y)
l_direction = itk.matrix_from_array(direction_array)
l_image.SetDirection(l_direction)
return l_image
def dict_from_image(image: "itkt.Image") -> Dict:
"""Serialize a Python itk.Image object to a pickable Python dictionary."""
import itk
pixel_arr = itk.array_view_from_image(image)
imageType = wasm_type_from_image_type(image)
return dict(
imageType=imageType,
origin=tuple(image.GetOrigin()),
spacing=tuple(image.GetSpacing()),
size=tuple(image.GetBufferedRegion().GetSize()),
direction=np.asarray(image.GetDirection()),
data=pixel_arr,
)
def image_from_dict(image_dict: Dict) -> "itkt.Image":
"""Deserialize an dictionary representing an itk.Image object."""
import itk
ImageType = image_type_from_wasm_type(image_dict["imageType"])
image = itk.PyBuffer[ImageType].GetImageViewFromArray(image_dict["data"])
image.SetOrigin(image_dict["origin"])
image.SetSpacing(image_dict["spacing"])
image.SetDirection(image_dict["direction"])
return image
# return an image
def image_intensity_min_max(image_or_filter: "itkt.ImageOrImageSource"):
"""Return the minimum and maximum of values in a image of in the output image of a filter
The minimum and maximum values are returned in a tuple: (min, max)
image_intensity_min_max() take care of updating the pipeline
"""
import itk
img = itk.output(image_or_filter)
img.UpdateOutputInformation()
img.Update()
# don't put that calculator in the automatic pipeline
tmp_auto_pipeline = auto_pipeline.current
auto_pipeline.current = None
comp = itk.MinimumMaximumImageCalculator[img].New(Image=img)
auto_pipeline.current = tmp_auto_pipeline
comp.Compute()
return comp.GetMinimum(), comp.GetMaximum()
# range is a python function, and should not be overridden
# the current use of the function name "range" is for backward
# compatibility, but should be considered for removal in the future
def range(image_or_filter):
return image_intensity_min_max(image_or_filter)
def imwrite(
image_or_filter: "itkt.ImageOrImageSource",
filename: fileiotype,
compression: bool = False,
imageio: Optional["itkt.ImageIOBase"] = None,
) -> None:
"""Write a image or the output image of a filter to a file.
Parameters
----------
image_or_filter :
Image or filter that produces an image to write to the file.
filename :
Target output file path.
compression :
Use compression when writing if the format supports it.
imageio :
Use the provided itk.ImageIOBase derived instance to write the file.
The writer is instantiated with the image type of the image in
parameter (or, again, with the output image of the filter in parameter).
"""
import itk
img = itk.output(image_or_filter)
img.UpdateOutputInformation()
# don't put that writer in the automatic pipeline
tmp_auto_pipeline = auto_pipeline.current
auto_pipeline.current = None
writer = itk.ImageFileWriter[type(img)].New(
Input=img, FileName=f"{filename}", UseCompression=compression
)
auto_pipeline.current = tmp_auto_pipeline
if imageio:
writer.SetImageIO(imageio)
writer.Update()
def imread(
filename: fileiotype,
pixel_type: Optional["itkt.PixelTypes"] = None,
fallback_only: bool = False,
imageio: Optional["itkt.ImageIOBase"] = None,
) -> "itkt.ImageBase":
"""Read an image from a file or series of files and return an itk.Image.
Parameters
----------
filename :
File path for a single file, a list of files for an image series, or a
directory for a DICOM image series.
pixel_type :
Image pixel type to cast to when loading.
fallback_only :
If true, first try to automatically deduce the image pixel type, and
only use the given `pixel_type` if automatic deduction fails.
imageio :
Use the provided itk.ImageIOBase derived instance to read the file.
Returns
-------
image :
The resulting itk.Image.
The reader is instantiated with the image type of the image file if
`pixel_type` is not provided (default). The dimension of the image is
automatically deduced from the dimension stored on disk.
If the filename provided is a directory then the directory is assumed to
be for a DICOM series volume. If there is exactly one DICOM series
volume in that directory, the reader will use an itk.ImageSeriesReader
object to read the the DICOM filenames within that directory.
If the given filename is a list or a tuple of file names, the reader
will use an itk.ImageSeriesReader object to read the files.
If `fallback_only` is set to `True`, `imread()` will first try to
automatically deduce the image pixel_type, and only use the given
`pixel_type` if automatic deduction fails. Failures typically happen if
the pixel type is not supported (e.g. it is not currently wrapped).
"""
import itk
from itk.support.extras import TemplateTypeError
if fallback_only:
if pixel_type is None:
raise Exception(
"pixel_type must be set when using the fallback_only option"
)
try:
return imread(filename)
except (KeyError, TemplateTypeError):
pass
if type(filename) not in [list, tuple]:
import os
if os.path.isdir(filename):
# read DICOM series of 1 image in a folder, refer to: https://github.com/RSIP-Vision/medio
names_generator = itk.GDCMSeriesFileNames.New()
names_generator.SetUseSeriesDetails(True)
names_generator.AddSeriesRestriction("0008|0021") # Series Date
names_generator.SetDirectory(f"{filename}")
series_uid = names_generator.GetSeriesUIDs()
if len(series_uid) == 0:
raise FileNotFoundError(f"no DICOMs in: {filename}.")
if len(series_uid) > 1:
raise OSError(
f"the directory: {filename} contains more than one DICOM series."
)
series_identifier = series_uid[0]
filename = names_generator.GetFileNames(series_identifier)
if type(filename) in [list, tuple]:
template_reader_type = itk.ImageSeriesReader
io_filename = f"{filename[0]}"
increase_dimension = True
kwargs = {"FileNames": [f"{f}" for f in filename]}
else:
template_reader_type = itk.ImageFileReader
io_filename = f"{filename}"
increase_dimension = False
kwargs = {"FileName": f"{filename}"}
if imageio:
kwargs["ImageIO"] = imageio
if pixel_type:
image_IO = itk.ImageIOFactory.CreateImageIO(
io_filename, itk.CommonEnums.IOFileMode_ReadMode
)
if not image_IO:
raise RuntimeError("No ImageIO is registered to handle the given file.")
image_IO.SetFileName(io_filename)
image_IO.ReadImageInformation()
dimension = image_IO.GetNumberOfDimensions()
# Increase dimension if last dimension is not of size one.
if increase_dimension and image_IO.GetDimensions(dimension - 1) != 1:
dimension += 1
is_vlv = False
try:
is_vlv = itk.template(pixel_type)[0] is itk.VariableLengthVector
except KeyError:
pass
if is_vlv:
ImageType = itk.VectorImage[itk.template(pixel_type)[1][0], dimension]
else:
ImageType = itk.Image[pixel_type, dimension]
reader = template_reader_type[ImageType].New(**kwargs)
else:
reader = template_reader_type.New(**kwargs)
reader.Update()
return reader.GetOutput()
def meshwrite(
mesh: "itkt.Mesh", filename: fileiotype, compression: bool = False
) -> None:
"""Write a mesh to a file.
The writer is instantiated according to the type of the input mesh.
"""
import itk
mesh.UpdateOutputInformation()
# don't put that writer in the automatic pipeline
tmp_auto_pipeline = auto_pipeline.current
auto_pipeline.current = None
writer = itk.MeshFileWriter[type(mesh)].New(
Input=mesh, FileName=f"{filename}", UseCompression=compression
)
auto_pipeline.current = tmp_auto_pipeline
writer.Update()
def meshread(
filename: fileiotype,
pixel_type: Optional["itkt.PixelTypes"] = None,
fallback_only: bool = False,
) -> "itkt.Mesh":
"""Read a mesh from a file and return an itk.Mesh.
The reader is instantiated with the mesh type of the mesh file if
`pixel_type` is not provided (default). The dimension of the mesh is
automatically found.
If `fallback_only` is set to `True`, `meshread()` will first try to
automatically deduce the image pixel_type, and only use the given
`pixel_type` if automatic deduction fails. Failures typically
happen if the pixel type is not supported (e.g. it is not currently
wrapped).
"""
import itk
if fallback_only:
if pixel_type is None:
raise Exception(
"pixel_type must be set when using the fallback_only option"
)
try:
return meshread(filename)
except (KeyError, itk.TemplateTypeError):
pass
TemplateReaderType = itk.MeshFileReader
io_filename = f"{filename}"
increase_dimension = False
kwargs = {"FileName": f"{filename}"}
if pixel_type:
meshIO = itk.MeshIOFactory.CreateMeshIO(
io_filename, itk.CommonEnums.IOFileMode_ReadMode
)
if not meshIO:
raise RuntimeError("No MeshIO is registered to handle the given file.")
meshIO.SetFileName(io_filename)
meshIO.ReadMeshInformation()
dimension = meshIO.GetPointDimension()
# Increase dimension if last dimension is not of size one.
if increase_dimension and meshIO.GetDimensions(dimension - 1) != 1:
dimension += 1
MeshType = itk.Mesh[pixel_type, dimension]
reader = TemplateReaderType[MeshType].New(**kwargs)
else:
reader = TemplateReaderType.New(**kwargs)
reader.Update()
return reader.GetOutput()
def transformread(filename: fileiotype) -> List["itkt.TransformBase"]:
"""Read an itk Transform file.
Parameters
----------
filename:
Path to the transform file (typically a .h5 file).
Returns
-------
A Python list containing the transforms in the file.
"""
import itk
reader = itk.TransformFileReaderTemplate[itk.D].New()
reader.SetFileName(f"{filename}")
reader.Update()
transforms = []
transform_list = reader.GetModifiableTransformList()
while not transform_list.empty():
transform = transform_list.pop()
transforms.append(itk.down_cast(transform))
transforms.reverse()
return transforms
def transformwrite(
transforms: List["itkt.TransformBase"],
filename: fileiotype,
compression: bool = False,
) -> None:
"""Write an itk Transform file.
Parameters
----------
transforms: list of itk.TransformBaseTemplate[itk.D]
Python list of the transforms to write.
filename:
Path to the transform file (typically a .h5 file).
compression:
Use compression, if the file format supports it.
"""
import itk
writer = itk.TransformFileWriterTemplate[itk.D].New()
writer.SetFileName(f"{filename}")
writer.SetUseCompression(compression)
for transform in transforms:
writer.AddTransform(transform)
writer.Update()
def search(s: str, case_sensitive: bool = False) -> List[str]: # , fuzzy=True):
"""Search for a class name in the itk module."""
s = s.replace(" ", "")
if not case_sensitive:
s = s.lower()
import itk
names = sorted(dir(itk))
# exact match first
if case_sensitive:
res = [n for n in names if s == n]
else:
res = [n for n in names if s == n.lower()]
# then exact match inside the name
if case_sensitive:
res += [n for n in names if s in n and s != n]
else:
res += [n for n in names if s in n.lower() and s != n.lower()]
# if fuzzy:
# try:
# everything now requires editdist
# import editdist
# if case_sensitive:
# res.sort(key=lambda x: editdist.distance(x, s))
# else:
# res.sort(key=lambda x: (editdist.distance(x.lower(), s), x))
# except:
# pass
return res
def _snake_to_camel(keyword: str):
# Helpers for set_inputs snake case to CamelCase keyword argument conversion
_snake_underscore_re = re.compile("(_)([a-z0-9A-Z])")
def _underscore_upper(match_obj):
return match_obj.group(2).upper()
camel = keyword[0].upper()
if _snake_underscore_re.search(keyword[1:]):
return camel + _snake_underscore_re.sub(_underscore_upper, keyword[1:])
return camel + keyword[1:]
def set_inputs(
new_itk_object,
inargs: Optional[Sequence[Any]] = None,
inkargs: Optional[Dict[str, Any]] = None,
):
"""Set the inputs of the given objects, according to the non named or the
named parameters in args and kargs
This function tries to assign all the non named parameters in the input of
the new_itk_object
- the first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name
prefixed by 'Set'.
set_inputs( obj, kargs={'Threshold': 10} ) calls obj.SetThreshold(10)
This is the function use in the enhanced New() method to manage the inputs.
It can be used to produce a similar behavior:
def SetInputs(self, *args, **kargs):
import itk
itk.set_inputs(self, *args, **kargs)
"""
# Fix bug with Mutable Default Arguments
# https://docs.python-guide.org/writing/gotchas/
args: List[Any] = inargs if inargs else []
kargs: Dict[str, Any] = inkargs if inkargs else {}
# try to get the images from the filters in args
args = [output(arg) for arg in args]
# args without name are filter used to set input image
#
# count SetInput calls to call SetInput, SetInput2, SetInput3, ...
# useful with filter which take 2 input (or more) like SubtractImageFiler
# Ex: subtract image2.png to image1.png and save the result in result.png
# r1 = itk.ImageFileReader.US2.New(FileName='image1.png')
# r2 = itk.ImageFileReader.US2.New(FileName='image2.png')
# s = itk.SubtractImageFilter.US2US2US2.New(r1, r2)
# itk.ImageFileWriter.US2.New(s, FileName='result.png').Update()
setInputNb: int = -1
try:
for setInputNb, arg in enumerate(args):
methodName = "SetInput%i" % (setInputNb + 1)
if methodName in dir(new_itk_object):
# first try to use methods called SetInput1, SetInput2, ...
# those method should have more chances to work in case of
# multiple input types
getattr(new_itk_object, methodName)(arg)
else:
# no method called SetInput?
# try with the standard SetInput(nb, input)
new_itk_object.SetInput(setInputNb, arg)
except TypeError as e:
# the exception have (at least) to possible reasons:
# + the filter don't take the input number as first argument
# + arg is an object of wrong type
#
# if it's not the first input, re-raise the exception
if setInputNb != 0:
raise e
# it's the first input, try to use the SetInput() method without input
# number
new_itk_object.SetInput(args[0])
# but raise an exception if there is more than 1 argument
if len(args) > 1:
raise TypeError("Object accepts only 1 input.")
except AttributeError:
# There is no SetInput() method, try SetImage
# but before, check the number of inputs
if len(args) > 1:
raise TypeError("Object accepts only 1 input.")
methodList = ["SetImage", "SetInputImage"]
methodName = None
for m in methodList:
if m in dir(new_itk_object):
methodName = m
if methodName:
getattr(new_itk_object, methodName)(args[0])
else:
raise AttributeError("No method found to set the input.")
# named args : name is the function name, value is argument(s)
for attribName, value in kargs.items():
# use Set as prefix. It allow to use a shorter and more intuitive
# call (Ex: itk.ImageFileReader.UC2.New(FileName='image.png')) than
# with the full name
# (Ex: itk.ImageFileReader.UC2.New(SetFileName='image.png'))
if attribName not in ["auto_progress", "template_parameters"]:
if attribName.islower():
attribName = _snake_to_camel(attribName)
attrib = getattr(new_itk_object, "Set" + attribName)
# Do not use try-except mechanism as this leads to
# segfaults. Instead limit the number of types that are
# tested. The list of tested type could maybe be replaced by
# a test that would check for iterables.
import itk
if type(value) in [list, tuple]:
try:
output_value = [itk.output(x) for x in value]
attrib(*output_value)
except Exception:
attrib(itk.output(value))
else:
attrib(itk.output(value))
class templated_class:
"""This class is used to mimic the behavior of the templated C++ classes.
It is used this way:
class CustomClass:
# class definition here
CustomClass = templated_class(CustomClass)
customObject = CustomClass[template, parameters].New()
The template parameters are passed to the custom class constructor as a
named parameter 'template_parameters' in a tuple.
The custom class may implement a static method
check_template_parameters(parameters) which should raise an exception if
the template parameters provided are not suitable to instantiate the custom
class.
"""
def __init__(self, cls) -> None:
"""cls is the custom class"""
self.__cls__ = cls
self.__templates__ = {}
def New(self, *args, **kargs):
"""Use the parameters to infer the types of the template parameters."""
# extract the types from the arguments to instantiate the class
import itk
types = tuple(class_(o) for o in args)
return self[types].New(*args, **kargs)
def __getitem__(self, template_parameters):
"""Return a pair class-template parameters ready to be instantiated.
The template parameters may be validated if the custom class provide
the static method check_template_parameters(parameters).
"""
if not isinstance(template_parameters, tuple):
template_parameters = (template_parameters,)
return templated_class.__templated_class_and_parameters__(
self, template_parameters
)
def check_template_parameters(self, template_parameters) -> None:
"""Check the template parameters passed in parameter."""
# this method is there mainly to make possible to reuse it in the
# custom class constructor after having used templated_class().
# Without that, the following example doesn't work:
#
# class CustomClass:
# def __init__(self, *args, **kargs):
# template_parameters = kargs["template_parameters"]
# CustomClass.check_template_parameters(template_parameters)
# other init stuff
# def check_template_parameters(template_parameters):
# check, really
# pass
# CustomClass = templated_class(CustomClass)
#
self.__cls__.check_template_parameters(template_parameters)
def add_template(self, name: str, params):
if not isinstance(params, list) and not isinstance(params, tuple):
params = (params,)
params = tuple(params)
val = self[params]
self.__templates__[params] = val
setattr(self, name, val)
def add_image_templates(self, *args) -> None:
import itk
if not args:
return
combinations = [[t] for t in args[0]]
for types in args[1:]:
temp = []
for t in types:
for c in combinations:
temp.append(c + [t])
combinations = temp
for d in itk.DIMS:
for c in combinations:
parameters = []
name = ""
for t in c:
parameters.append(itk.Image[t, d])
name += "I" + t.short_name + str(d)
self.add_template(name, tuple(parameters))
class __templated_class_and_parameters__:
"""Inner class used to store the pair class-template parameters ready
to instantiate.
"""
def __init__(self, l_templated_class, l_template_parameters) -> None:
self.__templated_class__ = l_templated_class
self.__template_parameters__ = l_template_parameters
if "check_template_parameters" in dir(l_templated_class.__cls__):
l_templated_class.__cls__.check_template_parameters(
l_template_parameters
)
def New(self, *args, **kargs):
"""A New() method to mimic the ITK default behavior, even if the
class doesn't provide any New() method.
"""
kargs["template_parameters"] = self.__template_parameters__
if "New" in dir(self.__templated_class__.__cls__):
obj = self.__templated_class__.__cls__.New(*args, **kargs)
else:
obj = self.__templated_class__.__cls__(*args, **kargs)
setattr(obj, "__template_parameters__", self.__template_parameters__)
setattr(obj, "__templated_class__", self.__templated_class__)
return obj
def __call__(self, *args, **kargs):
return self.New(*args, **kargs)
def keys(self):
return self.__templates__.keys()
def values(self):
return list(self.__templates__.values())
def items(self):
return list(self.__templates__.items())
# everything after this comment is for dict interface
# and is a copy/paste from DictMixin
# only methods to edit dictionary are not there
def __iter__(self) -> str:
yield from self.keys()
def has_key(self, key: str):
return key in self.__templates__
def __contains__(self, key: str):
return key in self
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
return self.get(key, default)
def __len__(self):
return len(self.keys())
class pipeline:
"""A convenient class to store the reference to the filters of a pipeline
With this class, a method can create a pipeline of several filters and
return it without losing the references to the filters in this pipeline.
The pipeline object act almost like a filter (it has a GetOutput() method)
and thus can be simply integrated in another pipeline.
"""
def __init__(self, *args, **kargs) -> None:
self.clear()
self.input = None
self.filters: List[Any] = []
set_inputs(self, args, kargs)
def connect(self, l_filter) -> None:
"""Connect a new l_filter to the pipeline
The output of the first l_filter will be used as the input of this
one and the l_filter passed as parameter will be added to the list
"""
if self.GetOutput() is not None:
set_inputs(l_filter, [self.GetOutput()])
self.append(l_filter)
def append(self, l_filter) -> None:
"""Add a new l_filter to the pipeline
The new l_filter will not be connected. The user must connect it.
"""
self.filters.append(l_filter)
def clear(self) -> None:
"""Clear the filter list"""
self.filters = []
def GetOutput(self, l_index: int = 0):
"""Return the output of the pipeline
If another output is needed, use
pipeline.filters[-1].GetAnotherOutput() instead of this method,
subclass pipeline to implement another GetOutput() method, or use
expose()
"""
if len(self.filters) == 0:
return self.GetInput()
else:
l_filter = self.filters[-1]
if hasattr(l_filter, "__getitem__"):
return l_filter[l_index]
try:
return l_filter.GetOutput(l_index)
except Exception:
if l_index == 0:
return l_filter.GetOutput()
else:
raise ValueError("Index can only be 0 on that object")
def GetNumberOfOutputs(self) -> int:
"""Return the number of outputs"""
if len(self.filters) == 0:
return 1
else:
return self.filters[-1].GetNumberOfOutputs()
def SetInput(self, l_input) -> None:
"""Set the l_input of the pipeline"""
if len(self.filters) != 0:
set_inputs(self.filters[0], [l_input])
self.l_input = l_input
def GetInput(self):
"""Get the input of the pipeline"""
return self.input
def Update(self):
"""Update the pipeline"""
if len(self.filters) > 0:
return self.filters[-1].Update()
def UpdateLargestPossibleRegion(self):
"""Update the pipeline"""
if len(self.filters) > 0:
return self.filters[-1].UpdateLargestPossibleRegion()
def UpdateOutputInformation(self) -> None:
if "UpdateOutputInformation" in dir(self.filters[-1]):
self.filters[-1].UpdateOutputInformation()
else:
self.Update()
def __len__(self):
return self.GetNumberOfOutputs()
def __getitem__(self, item):
return self.GetOutput(item)
def __call__(self, *args, **kargs):
set_inputs(self, args, kargs)
self.UpdateLargestPossibleRegion()
return self
def expose(self, name: str, new_name: Optional[str] = None, position: int = -1):
"""Expose an attribute from a filter of the mini-pipeline.
Once called, the pipeline instance has a new Set/Get set of methods to
access directly the corresponding method of one of the filter of the
pipeline.
Ex: p.expose( "Radius" )
p.SetRadius( 5 )
p.GetRadius( 5 )
By default, the attribute usable on the pipeline instance has the same
name than the one of the filter, but it can be changed by providing a
value to new_name.
The last filter of the pipeline is used by default, but another one may
be used by giving its position.
Ex: p.expose("Radius", "SmoothingNeighborhood", 2)
p.GetSmoothingNeighborhood()
"""
if new_name is None:
new_name = name
src = self.filters[position]
ok: bool = False
set_name: str = "Set" + name
if set_name in dir(src):
setattr(self, "Set" + new_name, getattr(src, set_name))
ok = True
get_name = "Get" + name
if get_name in dir(src):
setattr(self, "Get" + new_name, getattr(src, get_name))
ok = True
if not ok:
raise RuntimeError(f"No attribute {name} at position {position}.")
class auto_pipeline(pipeline):
current = None
def __init__(self, *args, **kargs) -> None:
pipeline.__init__(self, *args, **kargs)
self.Start()
def Start(self) -> None:
auto_pipeline.current = self
@staticmethod
def Stop() -> None:
auto_pipeline.current = None
def down_cast(obj: "itkt.LightObject"):
"""Down cast an itk.LightObject (or a object of a subclass) to its most
specialized type.
"""
import itk
from itk.support.template_class import itkTemplate
class_name: str = obj.GetNameOfClass()
t = getattr(itk, class_name)
if isinstance(t, itkTemplate):
for c in t.values():
try:
return c.cast(obj)
except Exception:
# fail silently for now
pass
raise RuntimeError(f"Can't downcast to a specialization of {class_name}")
else:
return t.cast(obj)
def attribute_list(inputobject, name: str):
"""Returns a list of the specified attributes for the objects in the image.
i: the input LabelImage
name: the attribute name
"""
import itk
img = itk.output(inputobject)
relabel = itk.StatisticsRelabelLabelMapFilter[img].New(
img, Attribute=name, ReverseOrdering=True, InPlace=False
)
relabel.UpdateLargestPossibleRegion()
r = relabel.GetOutput()
l_list: List[Any] = []
# required because range is overloaded in this module
import sys
from builtins import range
for i in range(1, r.GetNumberOfLabelObjects() + 1):
l_list.append(r.GetLabelObject(i).__getattribute__("Get" + name)())
return l_list
def attributes_list(inputObject, names: List[str]):
"""Returns a list of the specified attributes for the objects in the image.
i: the input LabelImage
name: the attribute name
"""
import itk
img = itk.output(inputObject)
relabel = itk.StatisticsRelabelLabelMapFilter[img].New(
img, Attribute=names[0], ReverseOrdering=True, InPlace=False
)
relabel.UpdateLargestPossibleRegion()
r = relabel.GetOutput()
l_list: List[Any] = []
# required because range is overloaded in this module
from builtins import range
for i in range(1, r.GetNumberOfLabelObjects() + 1):
attrs = []
for name in names:
attrs.append(r.GetLabelObject(i).__getattribute__("Get" + name)())
l_list.append(tuple(attrs))
return l_list
def attribute_dict(inputobject, name: str):
"""Returns a dict with the attribute values in keys and a list of the
corresponding objects in value
i: the input LabelImage
name: the name of the attribute
"""
import itk
img = itk.output(inputobject)
relabel = itk.StatisticsRelabelLabelMapFilter[img].New(
img, Attribute=name, ReverseOrdering=True, InPlace=False
)
relabel.UpdateLargestPossibleRegion()
r = relabel.GetOutput()
d = {}
# required because range is overloaded in this module
from builtins import range
for i in range(1, r.GetNumberOfLabelObjects() + 1):
lo = r.GetLabelObject(i)
v = lo.__getattribute__("Get" + name)()
l_list = d.get(v, [])
l_list.append(lo)
d[v] = l_list
return d
def number_of_objects(image_or_filter) -> int:
"""Returns the number of objets in the image.
img: the input LabelImage
"""
import itk
image_or_filter.UpdateLargestPossibleRegion()
img = itk.output(image_or_filter)
return img.GetNumberOfLabelObjects()
def ipython_kw_matches(text: str):
"""Match named ITK object's named parameters"""
import IPython
import itk
import re
import inspect
from itk.support import template_class
regexp = re.compile(
r"""
'.*?' | # single quoted strings or
".*?" | # double quoted strings or
\w+ | # identifier
\S # other characters
""",
re.VERBOSE | re.DOTALL,
)
ip = IPython.get_ipython()
if "." in text: # a parameter cannot be dotted
return []
# 1. Find the nearest identifier that comes before an unclosed
# parenthesis e.g. for "foo (1+bar(x), pa", the candidate is "foo".
if ip.Completer.readline:
text_until_cursor = ip.Completer.readline.get_line_buffer()[
: ip.Completer.readline.get_endidx()
]
else:
# IPython >= 5.0.0, which is based on the Python Prompt Toolkit
text_until_cursor = ip.Completer.text_until_cursor
tokens = regexp.findall(text_until_cursor)
tokens.reverse()
iter_tokens = iter(tokens)
open_par = 0
for token in iter_tokens:
if token == ")":
open_par -= 1
elif token == "(":
open_par += 1
if open_par > 0:
# found the last unclosed parenthesis
break
else:
return []
# 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
ids = []
is_id = re.compile(r"\w+$").match
while True:
try:
ids.append(iter_tokens.next())
if not is_id(ids[-1]):
ids.pop()
break
if not iter_tokens.next() == ".":
break
except StopIteration:
break
# lookup the candidate callable matches either using global_matches
# or attr_matches for dotted names
if len(ids) == 1:
callable_matches = ip.Completer.global_matches(ids[0])
else:
callable_matches = ip.Completer.attr_matches(".".join(ids[::-1]))
arg_matches = []
for callable_match in callable_matches:
# drop the .New at this end, so we can search in the class members
if callable_match.endswith(".New"):
callable_match = callable_match[:-4]
elif not re.findall("([A-Z])", callable_match): # True if snake case
# Split at the last '.' occurrence
split_name_parts = callable_match.split(".")
namespace = split_name_parts[:-1]
function_name = split_name_parts[-1]
# Find corresponding object name
object_name = _snake_to_camel(function_name)
# Check that this object actually exists
try:
object_callable_match = ".".join(namespace + [object_name])
eval(object_callable_match, ip.Completer.namespace)
# Reconstruct full object name
callable_match = object_callable_match
except AttributeError:
# callable_match is not a snake case function with a
# corresponding object.
pass
try:
l_object = eval(callable_match, ip.Completer.namespace)
if isinstance(l_object, template_class.itkTemplate):
# this is a template - lets grab the first entry to search for
# the methods
l_object = l_object.values()[0]
named_args = []
is_in: bool = isinstance(l_object, itk.LightObject)
if is_in or (
inspect.isclass(l_object) and issubclass(l_object, itk.LightObject)
):
named_args = [n[3:] for n in dir(l_object) if n.startswith("Set")]
except Exception as e:
print(e)
continue
for namedArg in named_args:
if namedArg.startswith(text):
arg_matches.append(f"{namedArg}=")
return arg_matches
def template(cl):
"""Return the template of a class (or of the class of an object) and
its parameters
template() returns a tuple with 2 elements:
- the first one is the itkTemplate object
- the second is a tuple containing the template parameters
"""
from itk.support.template_class import itkTemplateBase
return itkTemplateBase.__template_instantiations_object_to_name__[class_(cl)]
def ctype(s: str) -> "itkt.itkCType":
"""Return the c type corresponding to the string passed in parameter
The string can contain some extra spaces.
see also itkCType
"""
from itk.support.types import itkCType
ret = itkCType.GetCType(" ".join(s.split()))
if ret is None:
raise KeyError(f"Unrecognized C type '{s}'")
return ret
def class_(obj):
"""Return a class from an object
Often in itk, the __class__ is not what the user is expecting.
class_() should do a better job
"""
import inspect
if inspect.isclass(obj):
# obj is already a class !
return obj
else:
return obj.__class__
def python_type(object_ref) -> str:
"""Returns the Python type name of an object
The Python name corresponding to the given instantiated object is printed.
This includes both the Python name and the parameters of the object. A user
can copy and paste the printed value to instantiate a new object of the
same type."""
from itk.support.template_class import itkTemplate
from itk.support.types import itkCType
def in_itk(name):
import itk
# Remove "itk::" and "std::" from template name.
# Only happens for ITK objects.
shortname: str = name.split("::")[-1]
shortname = shortname.split("itk")[-1]
namespace = itk
# A type cannot be part of ITK if its name was not modified above. This
# check avoids having an input of type `list` and return `itk.list` that
# also exists.
likely_itk: bool = shortname != name or name[:3] == "vnl"
if likely_itk and hasattr(namespace, shortname):
return namespace.__name__ + "." + shortname # Prepend name with 'itk.'
else:
return name
def recursive(l_obj, level: int):
try:
type_name, param_list = template(l_obj)
name = in_itk(type_name.__name__)
parameters = []
for t in param_list:
parameters.append(recursive(t, level + 1))
return name + "[" + ",".join(parameters) + "]"
except KeyError:
if isinstance(l_obj, itkCType): # Handles CTypes differently
return "itk." + l_obj.short_name
elif hasattr(l_obj, "__name__"):
# This should be where most ITK types end up.
return in_itk(l_obj.__name__)
elif (
not isinstance(l_obj, type)
and type(l_obj) != itkTemplate
and level != 0
):
# l_obj should actually be considered a value, not a type,
# or it is already an itkTemplate type.
# A value can be an integer that is a template parameter.
# This does not happen at the first level of the recursion
# as it is not possible that this object would be a template
# parameter. Checking the level `0` allows e.g. to find the
# type of an object that is a `list` or an `int`.
return str(l_obj)
else:
return in_itk(type(l_obj).__name__)
return recursive(object_ref, 0)
class TemplateTypeError(TypeError):
def __init__(self, template_type, input_type):
def tuple_to_string_type(t):
if type(t) == tuple:
return ", ".join(python_type(x) for x in t)
else:
python_type(t)
import itk
# Special case for ITK readers: Add extra information.
extra_eg: str = ""
if template_type in [
itk.ImageFileReader,
itk.ImageSeriesReader,
itk.MeshFileReader,
]:
extra_eg = """
or
e.g.: image = itk.imread(my_input_filename, itk.F)
"""
python_template_type = python_type(template_type)
python_input_type = tuple_to_string_type(input_type)
type_list = "\n".join([python_type(x[0]) for x in template_type.keys()])
eg_type = ", ".join([python_type(x) for x in list(template_type.keys())[0]])
msg: str = """{template_type} is not wrapped for input type `{input_type}`.
To limit the size of the package, only a limited number of
types are available in ITK Python. To print the supported
types, run the following command in your python environment:
{template_type}.GetTypes()
Possible solutions:
* If you are an application user:
** Convert your input image into a supported format (see below).
** Contact developer to report the issue.
* If you are an application developer, force input images to be
loaded in a supported pixel type.
e.g.: instance = {template_type}[{eg_type}].New(my_input){extra_eg}
* (Advanced) If you are an application developer, build ITK Python yourself and
turned to `ON` the corresponding CMake option to wrap the pixel type or image
dimension you need. When configuring ITK with CMake, you can set
`ITK_WRAP_${{type}}` (replace ${{type}} with appropriate pixel type such as
`double`). If you need to support images with 4 or 5 dimensions, you can add
these dimensions to the list of dimensions in the CMake variable
`ITK_WRAP_IMAGE_DIMS`.
Supported input types:
{type_list}
""".format(
template_type=python_template_type,
input_type=python_input_type,
type_list=type_list,
eg_type=eg_type,
extra_eg=extra_eg,
)
TypeError.__init__(self, msg)
# install progress callback and custom completer if we are in ipython
# interpreter
try:
import itkConfig
import IPython
if IPython.get_ipython():
IPython.get_ipython().Completer.matchers.insert(0, ipython_kw_matches)
# some cleanup
del itkConfig, IPython
except (ImportError, AttributeError):
# fail silently
pass
|
BRAINSia/ITK
|
Wrapping/Generators/Python/itk/support/extras.py
|
Python
|
apache-2.0
| 67,567
|
[
"VTK"
] |
90d814650947e82faaebcdbc982019d32ae39c9bad97db275c13af463f588a1d
|
import unittest
from phevaluator.tables import NO_FLUSH_6
from .utils import BaseTestNoFlushTable
class TestNoFlush6Table(BaseTestNoFlushTable):
TOCOMPARE = NO_FLUSH_6
TABLE = [0] * len(TOCOMPARE)
VISIT = [0] * len(TOCOMPARE)
NUM_CARDS = 6
@classmethod
def setUpClass(cls):
super().setUpClass()
def test_noflush6_table(self):
self.assertListEqual(self.TABLE, self.TOCOMPARE)
if __name__ == "__main__":
unittest.main()
|
HenryRLee/PokerHandEvaluator
|
python/tests/table_tests/test_hashtable6.py
|
Python
|
apache-2.0
| 474
|
[
"VisIt"
] |
0dfff4779f31c7c3e9bb867bd1280e6133ee104962febdb167e80dc599ce14a5
|
# $Id$
#
# Copyright (C) 2007 by Greg Landrum
# All rights reserved
#
from rdkit import Chem
from rdkit import Geometry
class SkeletonPoint(object):
location = None
shapeMoments = None
shapeDirs = None
molFeatures = None
featmapFeatures = None
fracVol = 0.0
def __init__(self, *args, **kwargs):
self._initMemberData()
self.location = kwargs.get('location', None)
def _initMemberData(self):
self.shapeMoments = (0.0, ) * 3
self.shapeDirs = [None] * 3
self.molFeatures = []
self.featmapFeatures = []
class ShapeWithSkeleton(object):
grid = None
skelPts = None
def __init__(self, *args, **kwargs):
self._initMemberData()
def _initMemberData(self):
self.skelPts = []
class SubshapeShape(object):
shapes = None # a list of ShapeWithSkeleton objects at multiple resolutions (low to high)
featMap = None
keyFeat = None
def __init__(self, *args, **kwargs):
self._initMemberData()
def _initMemberData(self):
self.shapes = []
def _displaySubshapeSkelPt(viewer, skelPt, cgoNm, color):
viewer.server.sphere(tuple(skelPt.location), .5, color, cgoNm)
if hasattr(skelPt, 'shapeDirs'):
momBeg = skelPt.location - skelPt.shapeDirs[0]
momEnd = skelPt.location + skelPt.shapeDirs[0]
viewer.server.cylinder(tuple(momBeg), tuple(momEnd), .1, color, cgoNm)
def DisplaySubshapeSkeleton(viewer, shape, name, color=(1, 0, 1), colorByOrder=False):
orderColors = ((1, 0, 0), (0, 1, 0), (0, 0, 1), (1, 1, 0), (1, 0, 1), (0, 1, 1))
cgoNm = '%s-skeleton' % name
viewer.server.resetCGO(cgoNm)
for i, pt in enumerate(shape.skelPts):
if colorByOrder:
color = orderColors[i % len(orderColors)]
_displaySubshapeSkelPt(viewer, pt, cgoNm, color)
def DisplaySubshape(viewer, shape, name, showSkelPts=True, color=(1, 0, 1)):
from rdkit import Geometry
import os, tempfile
fName = tempfile.mktemp('.grd')
Geometry.WriteGridToFile(shape.grid, fName)
viewer.server.loadSurface(fName, name, '', 2.5)
if showSkelPts:
DisplaySubshapeSkeleton(viewer, shape, name, color)
# On Windows, the file cannot be deleted if the viewer still has the file open.
# Pause for a moment, to give the viewer a chance, then try again.
try:
os.unlink(fName)
except Exception:
import time
time.sleep(.5)
try:
os.unlink(fName)
except Exception:
# Fall back to the default of letting the system clean up the temporary directory.
pass
|
jandom/rdkit
|
rdkit/Chem/Subshape/SubshapeObjects.py
|
Python
|
bsd-3-clause
| 2,462
|
[
"RDKit"
] |
bd4ea03ae164bc3e2d9859aaa920d799de9833e82029c0168d2a61313c143fc3
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=missing-return-doc, invalid-unary-operand-type
"""Module for builtin continuous pulse functions."""
import functools
from typing import Union, Tuple, Optional
import numpy as np
def constant(times: np.ndarray, amp: complex) -> np.ndarray:
"""Continuous constant pulse.
Args:
times: Times to output pulse for.
amp: Complex pulse amplitude.
"""
return np.full(len(times), amp, dtype=np.complex_)
def zero(times: np.ndarray) -> np.ndarray:
"""Continuous zero pulse.
Args:
times: Times to output pulse for.
"""
return constant(times, 0)
def square(times: np.ndarray, amp: complex, period: float, phase: float = 0) -> np.ndarray:
"""Continuous square wave.
Args:
times: Times to output wave for.
amp: Pulse amplitude. Wave range is [-amp, amp].
period: Pulse period, units of dt.
phase: Pulse phase.
"""
x = times/period+phase/np.pi
return amp*(2*(2*np.floor(x) - np.floor(2*x)) + 1).astype(np.complex_)
def sawtooth(times: np.ndarray, amp: complex, period: float, phase: float = 0) -> np.ndarray:
"""Continuous sawtooth wave.
Args:
times: Times to output wave for.
amp: Pulse amplitude. Wave range is [-amp, amp].
period: Pulse period, units of dt.
phase: Pulse phase.
"""
x = times/period+phase/np.pi
return amp*2*(x-np.floor(1/2+x)).astype(np.complex_)
def triangle(times: np.ndarray, amp: complex, period: float, phase: float = 0) -> np.ndarray:
"""Continuous triangle wave.
Args:
times: Times to output wave for.
amp: Pulse amplitude. Wave range is [-amp, amp].
period: Pulse period, units of dt.
phase: Pulse phase.
"""
return amp*(-2*np.abs(sawtooth(times, 1, period, (phase-np.pi/2)/2)) + 1).astype(np.complex_)
def cos(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:
"""Continuous cosine wave.
Args:
times: Times to output wave for.
amp: Pulse amplitude.
freq: Pulse frequency, units of 1/dt.
phase: Pulse phase.
"""
return amp*np.cos(2*np.pi*freq*times+phase).astype(np.complex_)
def sin(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:
"""Continuous cosine wave.
Args:
times: Times to output wave for.
amp: Pulse amplitude.
freq: Pulse frequency, units of 1/dt.
phase: Pulse phase.
"""
return amp*np.sin(2*np.pi*freq*times+phase).astype(np.complex_)
def _fix_gaussian_width(gaussian_samples, amp: float, center: float, sigma: float,
zeroed_width: Optional[float] = None, rescale_amp: bool = False,
ret_scale_factor: bool = False) -> np.ndarray:
r"""Enforce that the supplied gaussian pulse is zeroed at a specific width.
This is achieved by subtracting $\Omega_g(center \pm zeroed_width/2)$ from all samples.
amp: Pulse amplitude at `2\times center+1`.
center: Center (mean) of pulse.
sigma: Width (standard deviation) of pulse.
zeroed_width: Subtract baseline to gaussian pulses to make sure
$\Omega_g(center \pm zeroed_width/2)=0$ is satisfied. This is used to avoid
large discontinuities at the start of a gaussian pulse. If unsupplied,
defaults to $2*(center+1)$ such that the samples are zero at $\Omega_g(-1)$.
rescale_amp: If `zeroed_width` is not `None` and `rescale_amp=True` the pulse will
be rescaled so that $\Omega_g(center)-\Omega_g(center\pm zeroed_width/2)=amp$.
ret_scale_factor: Return amplitude scale factor.
"""
if zeroed_width is None:
zeroed_width = 2*(center+1)
zero_offset = gaussian(np.array([-zeroed_width/2]), amp, center, sigma)
gaussian_samples -= zero_offset
amp_scale_factor = 1.
if rescale_amp:
amp_scale_factor = amp/(amp-zero_offset) if amp-zero_offset != 0 else 1.
gaussian_samples *= amp_scale_factor
if ret_scale_factor:
return gaussian_samples, amp_scale_factor
return gaussian_samples
def gaussian(times: np.ndarray, amp: complex, center: float, sigma: float,
zeroed_width: Optional[float] = None, rescale_amp: bool = False,
ret_x: bool = False) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
r"""Continuous unnormalized gaussian pulse.
Integrated area under curve is $\Omega_g(amp, sigma) = amp \times np.sqrt(2\pi \sigma^2)$
Args:
times: Times to output pulse for.
amp: Pulse amplitude at `center`. If `zeroed_width` is set pulse amplitude at center
will be $amp-\Omega_g(center\pm zeroed_width/2)$ unless `rescale_amp` is set,
in which case all samples will be rescaled such that the center
amplitude will be `amp`.
center: Center (mean) of pulse.
sigma: Width (standard deviation) of pulse.
zeroed_width: Subtract baseline to gaussian pulses to make sure
$\Omega_g(center \pm zeroed_width/2)=0$ is satisfied. This is used to avoid
large discontinuities at the start of a gaussian pulse.
rescale_amp: If `zeroed_width` is not `None` and `rescale_amp=True` the pulse will
be rescaled so that $\Omega_g(center)-\Omega_g(center\pm zeroed_width/2)=amp$.
ret_x: Return centered and standard deviation normalized pulse location.
$x=(times-center)/sigma.
"""
times = np.asarray(times, dtype=np.complex_)
x = (times-center)/sigma
gauss = amp*np.exp(-x**2/2).astype(np.complex_)
if zeroed_width is not None:
gauss = _fix_gaussian_width(gauss, amp=amp, center=center, sigma=sigma,
zeroed_width=zeroed_width, rescale_amp=rescale_amp)
if ret_x:
return gauss, x
return gauss
def gaussian_deriv(times: np.ndarray, amp: complex, center: float, sigma: float,
ret_gaussian: bool = False) -> np.ndarray:
"""Continuous unnormalized gaussian derivative pulse.
Args:
times: Times to output pulse for.
amp: Pulse amplitude at `center`.
center: Center (mean) of pulse.
sigma: Width (standard deviation) of pulse.
ret_gaussian: Return gaussian with which derivative was taken with.
"""
gauss, x = gaussian(times, amp=amp, center=center, sigma=sigma, ret_x=True)
gauss_deriv = -x/sigma*gauss
if ret_gaussian:
return gauss_deriv, gauss
return gauss_deriv
def sech_fn(x, *args, **kwargs):
r"""Hyperbolic secant function"""
return 1.0 / np.cosh(x, *args, **kwargs)
def sech(times: np.ndarray, amp: complex, center: float, sigma: float,
ret_x: bool = False) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
r"""Continuous unnormalized sech pulse.
Args:
times: Times to output pulse for.
amp: Pulse amplitude at `center`.
center: Center (mean) of pulse.
sigma: Width (standard deviation) of pulse.
ret_x: Return centered and standard deviation normalized pulse location.
$x=(times-center)/sigma.
"""
times = np.asarray(times, dtype=np.complex_)
x = (times-center)/sigma
sech_out = amp*sech_fn(x).astype(np.complex_)
if ret_x:
return sech_out, x
return sech_out
def sech_deriv(times: np.ndarray, amp: complex, center: float, sigma: float,
ret_sech: bool = False) -> np.ndarray:
"""Continuous unnormalized sech derivative pulse.
Args:
times: Times to output pulse for.
amp: Pulse amplitude at `center`.
center: Center (mean) of pulse.
sigma: Width (standard deviation) of pulse.
ret_sech: Return sech with which derivative was taken with.
"""
sech_out, x = sech(times, amp=amp, center=center, sigma=sigma, ret_x=True)
sech_out_deriv = - sech_out * np.tanh(x) / sigma
if ret_sech:
return sech_out_deriv, sech_out
return sech_out_deriv
def gaussian_square(times: np.ndarray, amp: complex, center: float, width: float,
sigma: float, zeroed_width: Optional[float] = None) -> np.ndarray:
r"""Continuous gaussian square pulse.
Args:
times: Times to output pulse for.
amp: Pulse amplitude.
center: Center of the square pulse component.
width: Width of the square pulse component.
sigma: Width (standard deviation) of gaussian rise/fall portion of the pulse.
zeroed_width: Subtract baseline of gaussian square pulse
to enforce $\OmegaSquare(center \pm zeroed_width/2)=0$.
"""
square_start = center-width/2
square_stop = center+width/2
if zeroed_width:
zeroed_width = min(width, zeroed_width)
gauss_zeroed_width = zeroed_width-width
else:
gauss_zeroed_width = None
funclist = [functools.partial(gaussian, amp=amp, center=square_start, sigma=sigma,
zeroed_width=gauss_zeroed_width, rescale_amp=True),
functools.partial(gaussian, amp=amp, center=square_stop, sigma=sigma,
zeroed_width=gauss_zeroed_width, rescale_amp=True),
functools.partial(constant, amp=amp)]
condlist = [times <= square_start, times >= square_stop]
return np.piecewise(times.astype(np.complex_), condlist, funclist)
def drag(times: np.ndarray, amp: complex, center: float, sigma: float, beta: float,
zeroed_width: Optional[float] = None, rescale_amp: bool = False) -> np.ndarray:
r"""Continuous Y-only correction DRAG pulse for standard nonlinear oscillator (SNO) [1].
[1] Gambetta, J. M., Motzoi, F., Merkel, S. T. & Wilhelm, F. K.
Analytic control methods for high-fidelity unitary operations
in a weakly nonlinear oscillator. Phys. Rev. A 83, 012308 (2011).
Args:
times: Times to output pulse for.
amp: Pulse amplitude at `center`.
center: Center (mean) of pulse.
sigma: Width (standard deviation) of pulse.
beta: Y correction amplitude. For the SNO this is $\beta=-\frac{\lambda_1^2}{4\Delta_2}$.
Where $\lambds_1$ is the relative coupling strength between the first excited and second
excited states and $\Delta_2$ is the detuning between the respective excited states.
zeroed_width: Subtract baseline to gaussian pulses to make sure
$\Omega_g(center \pm zeroed_width/2)=0$ is satisfied. This is used to avoid
large discontinuities at the start of a gaussian pulse.
rescale_amp: If `zeroed_width` is not `None` and `rescale_amp=True` the pulse will
be rescaled so that $\Omega_g(center)-\Omega_g(center\pm zeroed_width/2)=amp$.
"""
gauss_deriv, gauss = gaussian_deriv(times, amp=amp, center=center, sigma=sigma,
ret_gaussian=True)
if zeroed_width is not None:
gauss, scale_factor = _fix_gaussian_width(gauss, amp=amp, center=center, sigma=sigma,
zeroed_width=zeroed_width,
rescale_amp=rescale_amp,
ret_scale_factor=True)
gauss_deriv *= scale_factor
return gauss + 1j*beta*gauss_deriv
|
QISKit/qiskit-sdk-py
|
qiskit/pulse/pulse_lib/continuous.py
|
Python
|
apache-2.0
| 11,951
|
[
"Gaussian"
] |
20b40a131ff71625a8083944d485b3e478345214302db4b997cd597a71e6c6aa
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Example highlighting the ability to mix different optimizers in different
layers, or different components of the same layer.
"""
from neon.data import ArrayIterator, load_mnist
from neon.initializers import Gaussian, Constant
from neon.layers import GeneralizedCost, Affine
from neon.models import Model
from neon.optimizers import GradientDescentMomentum, MultiOptimizer, RMSProp
from neon.transforms import Rectlin, Logistic, CrossEntropyBinary
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
(X_train, y_train), (X_test, y_test), nclass = load_mnist(args.data_dir)
train_set = ArrayIterator(X_train, y_train, nclass=nclass, lshape=(1, 28, 28))
valid_set = ArrayIterator(X_test, y_test, nclass=nclass, lshape=(1, 28, 28))
# weight initialization
init_norm = Gaussian(loc=0.0, scale=0.01)
# initialize model
layers = []
layers.append(Affine(nout=100, init=init_norm, bias=Constant(0),
activation=Rectlin()))
layers.append(Affine(nout=10, init=init_norm, bias=Constant(0),
activation=Logistic(shortcut=True),
name='special_linear'))
cost = GeneralizedCost(costfunc=CrossEntropyBinary())
mlp = Model(layers=layers)
# fit and validate
optimizer_one = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)
optimizer_two = RMSProp()
# all bias layers and the last linear layer will use
# optimizer_two. all other layers will use optimizer_one.
opt = MultiOptimizer({'default': optimizer_one,
'Bias': optimizer_two,
'special_linear': optimizer_two})
# configure callbacks
callbacks = Callbacks(mlp, eval_set=valid_set, **args.callback_args)
mlp.fit(train_set, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
|
dongjoon-hyun/neon
|
examples/multi_optimizer.py
|
Python
|
apache-2.0
| 2,657
|
[
"Gaussian"
] |
d8db16b96f9a1bdc79a7a5a3ffc9d6c5b428e2032017ca6f65729a97b5d3ac07
|
# Copyright 2013 anthony cantor
# This file is part of pyc.
#
# pyc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyc. If not, see <http://www.gnu.org/licenses/>.
import pyc_vis
from pyc_log import log
import ast
import copy
class ASTVisitor(pyc_vis.Visitor):
def __init__(self):
pyc_vis.Visitor.__init__(self)
self.pass_fields = False
def default_accumulator(self):
return set([])
def default_accumulate(self, current, output):
if output is None:
return current
return current | output
def default(self, node, *args, **kwargs):
result = self.default_accumulator()
if isinstance(node, ast.AST):
result = self.default_accumulate(result, self.default_ast(node, *args, **kwargs))
for (fld, value) in ast.iter_fields(node):
#print "%s => %s" % (fld, value.__class__.__name__)
if isinstance(value, list):
for i in range(0, len(value) ):
if self.pass_fields:
kwargs["field"] = fld + ("[%d]" % i)
result = self.default_accumulate(
result,
pyc_vis.visit(self, value[i], *args, **kwargs)
)
else:
if self.pass_fields:
kwargs["field"] = fld
result = self.default_accumulate(
result,
pyc_vis.visit(self, value, *args, **kwargs)
)
else:
#print "non ast:"
result = self.default_non_ast(node, *args, **kwargs)
return result
class ASTSearcher(ASTVisitor):
def default_ast(self, node, *args, **kwargs):
return set([])
def default_non_ast(self, node, *args, **kwargs):
return set([])
class ASTTxformer(pyc_vis.Visitor):
def __init__(self):
pyc_vis.Visitor.__init__(self)
def default_accumulator(self):
return None
def default_accumulate(self, current, output):
return (output, None)
def default(self, node, *args, **kwargs):
result = self.default_accumulator()
#print "%s" % node.__class__.__name__
new_node = node.__class__()
for field, old_value in ast.iter_fields(node):
#print "%s => %s" % (field, old_value.__class__.__name__)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, ast.AST):
value = pyc_vis.visit(self, value, *args, **kwargs)
(value, result) = self.default_accumulate(result, value)
if value is None:
continue
elif not isinstance(value, ast.AST):
if value.__class__ not in set([list, tuple]):
raise Exception("didnt expect returned value of (%s) %r" % (value.__class__.__name__, value) )
new_values.extend(value)
continue
new_values.append(value)
setattr(new_node, field, new_values)
elif isinstance(old_value, ast.AST):
new_child = pyc_vis.visit(self, old_value, *args, **kwargs)
(new_child, result) = self.default_accumulate(result, new_child)
if not new_child is None:
setattr(new_node, field, new_child)
elif isinstance(old_value, int) \
or isinstance(old_value, str) \
or old_value is None:
setattr(new_node, field, old_value)
else:
raise Exception(
"didnt expect to copy field %r with class %r in node %s" % (
old_value,
old_value.__class__,
ast.dump(node)
)
)
if result is None:
return new_node
else:
return (new_node, result)
def names(node):
class NameFinder(ASTSearcher):
def visit_Name(self, node, *args, **kwargs):
return set([node.id])
v = NameFinder()
#v.log = lambda s: log("NameFinder : %s" % s)
return pyc_vis.walk(v, node)
|
cantora/pyc
|
pyc_astvisitor.py
|
Python
|
gpl-3.0
| 3,969
|
[
"VisIt"
] |
07788500b67ff8bc247dc38dd334770313494d890ac36f78436a0d5bb422e058
|
import pysam
import sys
import Pyrex
import os
import math
BamFile = sys.argv[1]
samFile = pysam.Samfile(BamFile,"rb")
AllMapped = samFile.mapped
headerOriginal = samFile.header
Range1C = 0
Range2C = 0
Range3C = 0
Range4C = 0
Range5C = 0
count = 0
outfileRange1 = pysam.Samfile(str(BamFile)+"Range1.bam","wb",header=headerOriginal)
outfileRange2 = pysam.Samfile(str(BamFile)+"Range2.bam","wb",header=headerOriginal)
outfileRange3 = pysam.Samfile(str(BamFile)+"Range3.bam","wb",header=headerOriginal)
outfileRange4 = pysam.Samfile(str(BamFile)+"Range4.bam","wb",header=headerOriginal)
outfileRange5 = pysam.Samfile(str(BamFile)+"Range5.bam","wb",header=headerOriginal)
for alignedread in samFile.fetch():
if math.fabs(alignedread.isize) < 30:
outfileRange1.write(alignedread)
Range1C += 1
if (math.fabs(alignedread.isize) >= 30) & (math.fabs(alignedread.isize) < 80):
outfileRange2.write(alignedread)
Range2C += 1
if (math.fabs(alignedread.isize) >= 80) & (math.fabs(alignedread.isize) < 110):
outfileRange3.write(alignedread)
Range3C += 1
if (math.fabs(alignedread.isize) >= 110) & (math.fabs(alignedread.isize) < 140):
outfileRange4.write(alignedread)
Range4C += 1
if (math.fabs(alignedread.isize) >= 140):
outfileRange5.write(alignedread)
Range5C += 1
count += 1
outfileRange1.close()
outfileRange2.close()
outfileRange3.close()
outfileRange4.close()
outfileRange5.close()
pysam.index(str(BamFile)+"Range1.bam")
pysam.index(str(BamFile)+"Range2.bam")
pysam.index(str(BamFile)+"Range3.bam")
pysam.index(str(BamFile)+"Range4.bam")
pysam.index(str(BamFile)+"Range5.bam")
samFile.close()
fileLog = open(str(BamFile)+"_fileLog.log","wb")
fileLog.write(str(Range1C)+"\n"+str(Range2C)+"\n"+str(Range3C)+str(Range4C)+"\n"+str(Range5C)+"\n")
fileLog.close()
|
hjanime/mrcchip
|
CountIszie.py
|
Python
|
gpl-2.0
| 1,781
|
[
"pysam"
] |
dfbcdb5b3fedcd4306c3d0ff0f53800d02645c208a1cd5898443ac7a44ed7a82
|
# -*- coding:Utf-8 -*-
import numpy as np
from traits.api import HasTraits, Instance, Array, on_trait_change
from traitsui.api import View, Item, HGroup, Group
from tvtk.api import tvtk
from tvtk.pyface.scene import Scene
from mayavi import mlab
from mayavi.core.api import PipelineBase, Source
from mayavi.core.ui.api import SceneEditor, MayaviScene, MlabSceneModel
class VolumeSlicer(HasTraits):
""" volume slicer
adapted from >http://docs.enthought.com/mayavi/mayavi/auto/example_volume_slicer.html
Examples
--------
>>> x, y, z = np.ogrid[-5:5:64j, -5:5:64j, -5:5:64j]
>>> data = np.sin(3*x)/x + 0.05*z**2 + np.cos(3*y)
>>> m = VolumeSlicer(data=data)
>>> m.configure_traits()
"""
# The data to plot
data = Array()
# The 4 views displayed
scene3d = Instance(MlabSceneModel, ())
scene_x = Instance(MlabSceneModel, ())
scene_y = Instance(MlabSceneModel, ())
scene_z = Instance(MlabSceneModel, ())
# The data source
data_src3d = Instance(Source)
# The image plane widgets of the 3D scene
ipw_3d_x = Instance(PipelineBase)
ipw_3d_y = Instance(PipelineBase)
ipw_3d_z = Instance(PipelineBase)
_axis_names = dict(x=0, y=1, z=2)
#---------------------------------------------------------------------------
def __init__(self, **traits):
super(VolumeSlicer, self).__init__(**traits)
# Force the creation of the image_plane_widgets:
self.ipw_3d_x
self.ipw_3d_y
self.ipw_3d_z
#---------------------------------------------------------------------------
# Default values
#---------------------------------------------------------------------------
def _data_src3d_default(self):
return mlab.pipeline.scalar_field(self.data,
figure=self.scene3d.mayavi_scene)
def make_ipw_3d(self, axis_name):
ipw = mlab.pipeline.image_plane_widget(self.data_src3d,
figure=self.scene3d.mayavi_scene,
plane_orientation='%s_axes' % axis_name)
return ipw
def _ipw_3d_x_default(self):
return self.make_ipw_3d('x')
def _ipw_3d_y_default(self):
return self.make_ipw_3d('y')
def _ipw_3d_z_default(self):
return self.make_ipw_3d('z')
#---------------------------------------------------------------------------
# Scene activation callbaks
#---------------------------------------------------------------------------
@on_trait_change('scene3d.activated')
def display_scene3d(self):
outline = mlab.pipeline.outline(self.data_src3d,
figure=self.scene3d.mayavi_scene,
)
self.scene3d.mlab.view(40, 50)
# Interaction properties can only be changed after the scene
# has been created, and thus the interactor exists
for ipw in (self.ipw_3d_x, self.ipw_3d_y, self.ipw_3d_z):
# Turn the interaction off
ipw.ipw.interaction = 0
self.scene3d.scene.background = (0, 0, 0)
# Keep the view always pointing up
self.scene3d.scene.interactor.interactor_style = \
tvtk.InteractorStyleTerrain()
def make_side_view(self, axis_name):
scene = getattr(self, 'scene_%s' % axis_name)
# To avoid copying the data, we take a reference to the
# raw VTK dataset, and pass it on to mlab. Mlab will create
# a Mayavi source from the VTK without copying it.
# We have to specify the figure so that the data gets
# added on the figure we are interested in.
outline = mlab.pipeline.outline(
self.data_src3d.mlab_source.dataset,
figure=scene.mayavi_scene,
)
ipw = mlab.pipeline.image_plane_widget(
outline,
plane_orientation='%s_axes' % axis_name)
setattr(self, 'ipw_%s' % axis_name, ipw)
# Synchronize positions between the corresponding image plane
# widgets on different views.
ipw.ipw.sync_trait('slice_position',
getattr(self, 'ipw_3d_%s'% axis_name).ipw)
# Make left-clicking create a crosshair
ipw.ipw.left_button_action = 0
# Add a callback on the image plane widget interaction to
# move the others
def move_view(obj, evt):
position = obj.GetCurrentCursorPosition()
for other_axis, axis_number in self._axis_names.iteritems():
if other_axis == axis_name:
continue
ipw3d = getattr(self, 'ipw_3d_%s' % other_axis)
ipw3d.ipw.slice_position = position[axis_number]
ipw.ipw.add_observer('InteractionEvent', move_view)
ipw.ipw.add_observer('StartInteractionEvent', move_view)
# Center the image plane widget
ipw.ipw.slice_position = 0.5*self.data.shape[
self._axis_names[axis_name]]
# Position the view for the scene
views = dict(x=( 0, 90),
y=(90, 90),
z=( 0, 0),
)
scene.mlab.view(*views[axis_name])
# 2D interaction: only pan and zoom
scene.scene.interactor.interactor_style = \
tvtk.InteractorStyleImage()
scene.scene.background = (0, 0, 0)
@on_trait_change('scene_x.activated')
def display_scene_x(self):
return self.make_side_view('x')
@on_trait_change('scene_y.activated')
def display_scene_y(self):
return self.make_side_view('y')
@on_trait_change('scene_z.activated')
def display_scene_z(self):
return self.make_side_view('z')
#---------------------------------------------------------------------------
# The layout of the dialog created
#---------------------------------------------------------------------------
view = View(HGroup(
Group(
Item('scene_y',
editor=SceneEditor(scene_class=Scene),
height=250, width=300),
Item('scene_z',
editor=SceneEditor(scene_class=Scene),
height=250, width=300),
show_labels=False,
),
Group(
Item('scene_x',
editor=SceneEditor(scene_class=Scene),
height=250, width=300),
Item('scene3d',
editor=SceneEditor(scene_class=MayaviScene),
height=250, width=300),
show_labels=False,
),
),
resizable=True,
title='Volume Slicer',
)
def savefig(filename,mlabview=[],magnification = 3):
"""
Save mayavi figure
Parameters
----------
filename : str
name of the figure
mlabview : [] |(x,y,z, np.array([ xroll,yroll,zroll]))
specifyy angle of camera view ( see mayavi.view )
magnification : int
resolution of the generated image ( see mayavi.savefig)
"""
import os
path = os.path.dirname(filename)
if not mlabview == []:
mlab.view(mlabview)
if os.path.exists(path):
mlab.savefig(filename+'.png',magnification=magnification )
else:
os.mkdir(path)
mlab.savefig(filename+'.png',magnification=magnification )
mlab.close()
def inotshow(filename,**kwargs):
""" IPython notebook show saved mayavi file
Parameters
-----------
doc : bool
if doc, image is supposed to be generated for documentation
See IPython.display.Image
"""
import os
defaults = {'mlabview':[],
'magnification':3,
'width':500,
'height':500,
'doc':False
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
mlabview=kwargs.pop('mlabview')
magnification=kwargs.pop('magnification')
doc=kwargs.pop('doc')
if doc:
path = os.path.join('..','maya_images')
else :
path = os.path.join('.','maya_images')
savefig(os.path.join(path,filename),mlabview,magnification)
try :
inb = Image(filename=os.path.join(path,filename+'.png'),**kwargs)
except:
from IPython.display import Image,display
inb = Image(filename=os.path.join(path,filename+'.png'),**kwargs)
display(inb)
|
buguen/pylayers
|
pylayers/util/mayautil.py
|
Python
|
lgpl-3.0
| 8,758
|
[
"Mayavi",
"VTK"
] |
39fe85407383df715e5a045fff299f880a7f063f05631eb87e378565a79b962b
|
#!/usr/bin/env python
#import argparse
#from glob import glob
#-s test_samples.txt
#-b /mnt/lfs2/hend6746/devils/reference/sarHar1.fa
#-k /mnt/lfs2/hend6746/taz/filtered_plink_files/export_data_150907/seventy.1-2.nodoubletons.noparalogs.noX.plink.oneperlocus.vcf
from os.path import join as jp
from os.path import abspath
import os
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-s', "--samples", help="Samples.txt file with sample ID.", required=True)
parser.add_argument('-b', "--bwaindex", help="Path to bwa index file.", required=True)
parser.add_argument('-k', "--knownsites", help="Path and fileName of filteredSNP.vcf.", required=True)
args = parser.parse_args()
#args = parser.parse_args('-s samples.txt -r /mnt/lfs2/hend6746/fox_cancer/0rawdata_test -b /mnt/lfs2/hend6746/wolves/reference/canfam31/canfam31.fa'.split())
VERBOSE=False
#Function definitions:
def log(txt, out):
if VERBOSE:
print(txt)
out.write(txt+'\n')
out.flush()
## Read in samples and put them in a list:
samples = []
for l in open(args.samples):
if len(l) > 1:
samples.append(l.split('/')[-1].replace('.fastq.1.gz', '').strip())
# Setup folders and paths variables:
bamFolder = abspath('02-Mapped')
variantFolder = abspath('03-Calls')
PBS_scripts = abspath('BQSR_PBS_scripts')
#rawdataDir = abspath(args.rawdata)
bwaIndex = abspath(args.bwaindex)
knownSites = abspath(args.knownsites)
gatkCall = 'java -jar /opt/modules/biology/gatk/3.5/bin/GenomeAnalysisTK.jar -R %s' % bwaIndex
os.system('mkdir -p %s' % bamFolder)
os.system('mkdir -p %s' % variantFolder)
os.system('mkdir -p %s' % PBS_scripts)
##### Run pipeline ###
for sample in samples:
print "Processing", sample, "....."
# Set up files:
logFile = jp(bamFolder, sample + '_BQSR.log')
logCommands = open(jp(PBS_scripts, sample + '_BQSR_commands.sh'), 'w')
#Setup for qsub
log('#!/bin/bash', logCommands)
log('#PBS -N %s' % sample, logCommands)
log('#PBS -j oe', logCommands)
log('#PBS -o %s_job.log' % sample, logCommands)
log('#PBS -m abe', logCommands)
log('#PBS -M shendri4@gmail.com', logCommands)
log('#PBS -q short', logCommands)
log('#PBS -l mem=100gb', logCommands)
log(". /usr/modules/init/bash", logCommands)
log("module load python/2.7.10", logCommands)
log("module load grc", logCommands)
####################
# BaseQualityRecalibration
# Step 1: First run of BQSR: BaseRecalibrator
####################
cmd = ' '.join([gatkCall, ' -nct 24 ',
' -T BaseRecalibrator ', ' -I ' + jp(bamFolder, sample) + '_markdup.bam', ' -knownSites ' + knownSites,
' -o ' + jp(bamFolder, sample) + '_BQSR.table', '>>', logFile, '2>&1'])
log(cmd, logCommands)
####################
# BaseQualityRecalibration
# Step 2: BaseRecalibrator on recalibrated files
####################
cmd = ' '.join([gatkCall, ' -nct 24 ',
' -T BaseRecalibrator ',
' -I ' + jp(bamFolder, sample) + '_markdup.bam',
' -knownSites ' + knownSites,
' -BQSR ' + jp(bamFolder, sample) + '_BQSR.table'
' -o ' + jp(bamFolder, sample) + '_BQSR_FIXED.table', '>>', logFile, '2>&1'])
log(cmd, logCommands)
####################
# BaseQualityRecalibration
# Step 3: PrintReads
# Apply recalibration table to original bam file
####################
cmd = ' '.join([gatkCall, ' -nct 24 ',
' -T PrintReads ',
' -I ' + jp(bamFolder, sample) + '_markdup.bam',
' -BQSR ' + jp(bamFolder, sample) + '_BQSR_FIXED.table'
' -o ' + jp(bamFolder, sample) + '_markdup_BQSR_FIXED.bam', '>>', logFile, '2>&1'])
log(cmd, logCommands)
logCommands.close()
|
shendri4/devil_wgs
|
BQSR_perInd_Devils.py
|
Python
|
apache-2.0
| 3,647
|
[
"BWA"
] |
4cf1c6c82ed65d70af61387fb85433141170158816c1f333a80f6705b89e121f
|
"""
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from .utils import deprecated
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.multiclass import check_classification_targets
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
# rescale
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_classes, n_features)
Class means.
"""
classes, y = np.unique(y, return_inverse=True)
cnt = np.bincount(y)
means = np.zeros(shape=(len(classes), X.shape[1]))
np.add.at(means, y, X)
means /= cnt[:, None]
return means
def _class_cov(X, y, priors, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
cov = np.zeros(shape=(X.shape[1], X.shape[1]))
for idx, group in enumerate(classes):
Xg = X[y == group, :]
cov += priors[idx] * np.atleast_2d(_cov(Xg, shrinkage))
return cov
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
.. versionadded:: 0.17
*LinearDiscriminantAnalysis*.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default).
Does not compute the covariance matrix, therefore this solver is
recommended for data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False), used
only in 'svd' solver.
.. versionadded:: 0.17
tol : float, optional, (default 1.0e-4)
Threshold used for rank estimation in SVD solver.
.. versionadded:: 0.17
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
or svd solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals)
)[::-1][:self._max_components]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
evecs /= np.linalg.norm(evecs, axis=0)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
self.explained_variance_ratio_ = (S**2 / np.sum(
S**2))[:self._max_components]
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1) +
np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
.. versionchanged:: 0.19
*store_covariance* has been moved to main constructor.
.. versionchanged:: 0.19
*tol* has been moved to main constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = np.bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if self.priors_.sum() != 1:
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
# Get the maximum number of components
if self.n_components is None:
self._max_components = len(self.classes_) - 1
else:
self._max_components = min(len(self.classes_) - 1,
self.n_components)
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
return X_new[:, :self._max_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
.. versionadded:: 0.17
*QuadraticDiscriminantAnalysis*
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
store_covariance : boolean
If True the covariance matrices are computed and stored in the
`self.covariance_` attribute.
.. versionadded:: 0.17
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
.. versionadded:: 0.17
Attributes
----------
covariance_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,
store_covariance=False,
store_covariances=None, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariance=False,
tol=1.0e-4, store_covariances=None):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariances = store_covariances
self.store_covariance = store_covariance
self.tol = tol
@property
@deprecated("Attribute covariances_ was deprecated in version"
" 0.19 and will be removed in 0.21. Use "
"covariance_ instead")
def covariances_(self):
return self.covariance_
def fit(self, X, y):
"""Fit the model according to the given training data and parameters.
.. versionchanged:: 0.19
``store_covariances`` has been moved to main constructor as
``store_covariance``
.. versionchanged:: 0.19
``tol`` has been moved to main constructor.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('The number of classes has to be greater than'
' one; got %d class' % (n_classes))
if self.priors is None:
self.priors_ = np.bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
store_covariance = self.store_covariance or self.store_covariances
if self.store_covariances:
warnings.warn("'store_covariances' was renamed to store_covariance"
" in version 0.19 and will be removed in 0.21.",
DeprecationWarning)
if store_covariance:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariance or store_covariance:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariance or store_covariance:
self.covariance_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
|
BiaDarkia/scikit-learn
|
sklearn/discriminant_analysis.py
|
Python
|
bsd-3-clause
| 27,666
|
[
"Gaussian"
] |
a51095bc8a33f3e0fb24ede77e8c1cb13cbe2776a5037dddd637a535adafaf7c
|
from ase.lattice.cubic import FaceCenteredCubic
from gpaw import GPAW, PW
element = 'Pt'
atoms = FaceCenteredCubic(symbol=element,
size=(2, 2, 2),
directions=[[1, 1, 0],
[-1, 1, 0],
[0, 0, 1]])
del atoms[4]
del atoms[3]
del atoms[2]
h = 0.16
kpts=(8, 8, 4)
ecut = 800
xc1 = 'PBE'
atoms.calc = GPAW(mode=PW(ecut=ecut),
kpts=kpts,
xc=xc1)
ncpus = 8
|
robwarm/gpaw-symm
|
gpaw/test/big/scf/trouble/pt13.py
|
Python
|
gpl-3.0
| 512
|
[
"ASE",
"GPAW"
] |
0044fd0ef36e5a8db0e9514bc47412f426530e85fe8f3d7f2a3e2ce320cd92e9
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Canonicalizes the ternary conditional operator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.py2tf.pyct import templates
from tensorflow.contrib.py2tf.pyct import transformer
class IfExp(transformer.Base):
"""Canonicalizes all IfExp nodes into plain conditionals."""
def visit_IfExp(self, node):
template = """
py2tf_utils.run_cond(test, lambda: body, lambda: orelse)
"""
desugared_ifexp = templates.replace_as_expression(
template, test=node.test, body=node.body, orelse=node.orelse)
return desugared_ifexp
def transform(node, context):
"""Desugar IfExp nodes into plain conditionals.
Args:
node: an AST node to transform
context: a context object
Returns:
new_node: an AST with no IfExp nodes, only conditionals.
"""
node = IfExp(context).visit(node)
return node
|
Xeralux/tensorflow
|
tensorflow/contrib/py2tf/converters/ifexp.py
|
Python
|
apache-2.0
| 1,621
|
[
"VisIt"
] |
a7bf36ee9d385689d79c735f8d4e94e03e46c8777eb809824008cf75700be0c1
|
# -*- coding: utf-8 -*-
"""Compiles nodes from the parser into Python code."""
from collections import namedtuple
from functools import update_wrapper
from itertools import chain
from keyword import iskeyword as is_python_keyword
from markupsafe import escape
from markupsafe import Markup
from . import nodes
from ._compat import imap
from ._compat import iteritems
from ._compat import izip
from ._compat import NativeStringIO
from ._compat import range_type
from ._compat import string_types
from ._compat import text_type
from .exceptions import TemplateAssertionError
from .idtracking import Symbols
from .idtracking import VAR_LOAD_ALIAS
from .idtracking import VAR_LOAD_PARAMETER
from .idtracking import VAR_LOAD_RESOLVE
from .idtracking import VAR_LOAD_UNDEFINED
from .nodes import EvalContext
from .optimizer import Optimizer
from .utils import concat
from .visitor import NodeVisitor
operators = {
"eq": "==",
"ne": "!=",
"gt": ">",
"gteq": ">=",
"lt": "<",
"lteq": "<=",
"in": "in",
"notin": "not in",
}
# what method to iterate over items do we want to use for dict iteration
# in generated code? on 2.x let's go with iteritems, on 3.x with items
if hasattr(dict, "iteritems"):
dict_item_iter = "iteritems"
else:
dict_item_iter = "items"
code_features = ["division"]
# does this python version support generator stops? (PEP 0479)
try:
exec("from __future__ import generator_stop")
code_features.append("generator_stop")
except SyntaxError:
pass
# does this python version support yield from?
try:
exec("def f(): yield from x()")
except SyntaxError:
supports_yield_from = False
else:
supports_yield_from = True
def optimizeconst(f):
def new_func(self, node, frame, **kwargs):
# Only optimize if the frame is not volatile
if self.optimized and not frame.eval_ctx.volatile:
new_node = self.optimizer.visit(node, frame.eval_ctx)
if new_node != node:
return self.visit(new_node, frame)
return f(self, node, frame, **kwargs)
return update_wrapper(new_func, f)
def generate(
node, environment, name, filename, stream=None, defer_init=False, optimized=True
):
"""Generate the python source for a node tree."""
if not isinstance(node, nodes.Template):
raise TypeError("Can't compile non template nodes")
generator = environment.code_generator_class(
environment, name, filename, stream, defer_init, optimized
)
generator.visit(node)
if stream is None:
return generator.stream.getvalue()
def has_safe_repr(value):
"""Does the node have a safe representation?"""
if value is None or value is NotImplemented or value is Ellipsis:
return True
if type(value) in (bool, int, float, complex, range_type, Markup) + string_types:
return True
if type(value) in (tuple, list, set, frozenset):
for item in value:
if not has_safe_repr(item):
return False
return True
elif type(value) is dict:
for key, value in iteritems(value):
if not has_safe_repr(key):
return False
if not has_safe_repr(value):
return False
return True
return False
def find_undeclared(nodes, names):
"""Check if the names passed are accessed undeclared. The return value
is a set of all the undeclared names from the sequence of names found.
"""
visitor = UndeclaredNameVisitor(names)
try:
for node in nodes:
visitor.visit(node)
except VisitorExit:
pass
return visitor.undeclared
class MacroRef(object):
def __init__(self, node):
self.node = node
self.accesses_caller = False
self.accesses_kwargs = False
self.accesses_varargs = False
class Frame(object):
"""Holds compile time information for us."""
def __init__(self, eval_ctx, parent=None, level=None):
self.eval_ctx = eval_ctx
self.symbols = Symbols(parent and parent.symbols or None, level=level)
# a toplevel frame is the root + soft frames such as if conditions.
self.toplevel = False
# the root frame is basically just the outermost frame, so no if
# conditions. This information is used to optimize inheritance
# situations.
self.rootlevel = False
# in some dynamic inheritance situations the compiler needs to add
# write tests around output statements.
self.require_output_check = parent and parent.require_output_check
# inside some tags we are using a buffer rather than yield statements.
# this for example affects {% filter %} or {% macro %}. If a frame
# is buffered this variable points to the name of the list used as
# buffer.
self.buffer = None
# the name of the block we're in, otherwise None.
self.block = parent and parent.block or None
# the parent of this frame
self.parent = parent
if parent is not None:
self.buffer = parent.buffer
def copy(self):
"""Create a copy of the current one."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.symbols = self.symbols.copy()
return rv
def inner(self, isolated=False):
"""Return an inner frame."""
if isolated:
return Frame(self.eval_ctx, level=self.symbols.level + 1)
return Frame(self.eval_ctx, self)
def soft(self):
"""Return a soft frame. A soft frame may not be modified as
standalone thing as it shares the resources with the frame it
was created of, but it's not a rootlevel frame any longer.
This is only used to implement if-statements.
"""
rv = self.copy()
rv.rootlevel = False
return rv
__copy__ = copy
class VisitorExit(RuntimeError):
"""Exception used by the `UndeclaredNameVisitor` to signal a stop."""
class DependencyFinderVisitor(NodeVisitor):
"""A visitor that collects filter and test calls."""
def __init__(self):
self.filters = set()
self.tests = set()
def visit_Filter(self, node):
self.generic_visit(node)
self.filters.add(node.name)
def visit_Test(self, node):
self.generic_visit(node)
self.tests.add(node.name)
def visit_Block(self, node):
"""Stop visiting at blocks."""
class UndeclaredNameVisitor(NodeVisitor):
"""A visitor that checks if a name is accessed without being
declared. This is different from the frame visitor as it will
not stop at closure frames.
"""
def __init__(self, names):
self.names = set(names)
self.undeclared = set()
def visit_Name(self, node):
if node.ctx == "load" and node.name in self.names:
self.undeclared.add(node.name)
if self.undeclared == self.names:
raise VisitorExit()
else:
self.names.discard(node.name)
def visit_Block(self, node):
"""Stop visiting a blocks."""
class CompilerExit(Exception):
"""Raised if the compiler encountered a situation where it just
doesn't make sense to further process the code. Any block that
raises such an exception is not further processed.
"""
class CodeGenerator(NodeVisitor):
def __init__(
self, environment, name, filename, stream=None, defer_init=False, optimized=True
):
if stream is None:
stream = NativeStringIO()
self.environment = environment
self.name = name
self.filename = filename
self.stream = stream
self.created_block_context = False
self.defer_init = defer_init
self.optimized = optimized
if optimized:
self.optimizer = Optimizer(environment)
# aliases for imports
self.import_aliases = {}
# a registry for all blocks. Because blocks are moved out
# into the global python scope they are registered here
self.blocks = {}
# the number of extends statements so far
self.extends_so_far = 0
# some templates have a rootlevel extends. In this case we
# can safely assume that we're a child template and do some
# more optimizations.
self.has_known_extends = False
# the current line number
self.code_lineno = 1
# registry of all filters and tests (global, not block local)
self.tests = {}
self.filters = {}
# the debug information
self.debug_info = []
self._write_debug_info = None
# the number of new lines before the next write()
self._new_lines = 0
# the line number of the last written statement
self._last_line = 0
# true if nothing was written so far.
self._first_write = True
# used by the `temporary_identifier` method to get new
# unique, temporary identifier
self._last_identifier = 0
# the current indentation
self._indentation = 0
# Tracks toplevel assignments
self._assign_stack = []
# Tracks parameter definition blocks
self._param_def_block = []
# Tracks the current context.
self._context_reference_stack = ["context"]
# -- Various compilation helpers
def fail(self, msg, lineno):
"""Fail with a :exc:`TemplateAssertionError`."""
raise TemplateAssertionError(msg, lineno, self.name, self.filename)
def temporary_identifier(self):
"""Get a new unique identifier."""
self._last_identifier += 1
return "t_%d" % self._last_identifier
def buffer(self, frame):
"""Enable buffering for the frame from that point onwards."""
frame.buffer = self.temporary_identifier()
self.writeline("%s = []" % frame.buffer)
def return_buffer_contents(self, frame, force_unescaped=False):
"""Return the buffer contents of the frame."""
if not force_unescaped:
if frame.eval_ctx.volatile:
self.writeline("if context.eval_ctx.autoescape:")
self.indent()
self.writeline("return Markup(concat(%s))" % frame.buffer)
self.outdent()
self.writeline("else:")
self.indent()
self.writeline("return concat(%s)" % frame.buffer)
self.outdent()
return
elif frame.eval_ctx.autoescape:
self.writeline("return Markup(concat(%s))" % frame.buffer)
return
self.writeline("return concat(%s)" % frame.buffer)
def indent(self):
"""Indent by one."""
self._indentation += 1
def outdent(self, step=1):
"""Outdent by step."""
self._indentation -= step
def start_write(self, frame, node=None):
"""Yield or write into the frame buffer."""
if frame.buffer is None:
self.writeline("yield ", node)
else:
self.writeline("%s.append(" % frame.buffer, node)
def end_write(self, frame):
"""End the writing process started by `start_write`."""
if frame.buffer is not None:
self.write(")")
def simple_write(self, s, frame, node=None):
"""Simple shortcut for start_write + write + end_write."""
self.start_write(frame, node)
self.write(s)
self.end_write(frame)
def blockvisit(self, nodes, frame):
"""Visit a list of nodes as block in a frame. If the current frame
is no buffer a dummy ``if 0: yield None`` is written automatically.
"""
try:
self.writeline("pass")
for node in nodes:
self.visit(node, frame)
except CompilerExit:
pass
def write(self, x):
"""Write a string into the output stream."""
if self._new_lines:
if not self._first_write:
self.stream.write("\n" * self._new_lines)
self.code_lineno += self._new_lines
if self._write_debug_info is not None:
self.debug_info.append((self._write_debug_info, self.code_lineno))
self._write_debug_info = None
self._first_write = False
self.stream.write(" " * self._indentation)
self._new_lines = 0
self.stream.write(x)
def writeline(self, x, node=None, extra=0):
"""Combination of newline and write."""
self.newline(node, extra)
self.write(x)
def newline(self, node=None, extra=0):
"""Add one or more newlines before the next write."""
self._new_lines = max(self._new_lines, 1 + extra)
if node is not None and node.lineno != self._last_line:
self._write_debug_info = node.lineno
self._last_line = node.lineno
def signature(self, node, frame, extra_kwargs=None):
"""Writes a function call to the stream for the current node.
A leading comma is added automatically. The extra keyword
arguments may not include python keywords otherwise a syntax
error could occur. The extra keyword arguments should be given
as python dict.
"""
# if any of the given keyword arguments is a python keyword
# we have to make sure that no invalid call is created.
kwarg_workaround = False
for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
if is_python_keyword(kwarg):
kwarg_workaround = True
break
for arg in node.args:
self.write(", ")
self.visit(arg, frame)
if not kwarg_workaround:
for kwarg in node.kwargs:
self.write(", ")
self.visit(kwarg, frame)
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write(", %s=%s" % (key, value))
if node.dyn_args:
self.write(", *")
self.visit(node.dyn_args, frame)
if kwarg_workaround:
if node.dyn_kwargs is not None:
self.write(", **dict({")
else:
self.write(", **{")
for kwarg in node.kwargs:
self.write("%r: " % kwarg.key)
self.visit(kwarg.value, frame)
self.write(", ")
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write("%r: %s, " % (key, value))
if node.dyn_kwargs is not None:
self.write("}, **")
self.visit(node.dyn_kwargs, frame)
self.write(")")
else:
self.write("}")
elif node.dyn_kwargs is not None:
self.write(", **")
self.visit(node.dyn_kwargs, frame)
def pull_dependencies(self, nodes):
"""Pull all the dependencies."""
visitor = DependencyFinderVisitor()
for node in nodes:
visitor.visit(node)
for dependency in "filters", "tests":
mapping = getattr(self, dependency)
for name in sorted(getattr(visitor, dependency)):
if name not in mapping:
mapping[name] = self.temporary_identifier()
self.writeline(
"%s = environment.%s[%r]" % (mapping[name], dependency, name)
)
def enter_frame(self, frame):
undefs = []
for target, (action, param) in iteritems(frame.symbols.loads):
if action == VAR_LOAD_PARAMETER:
pass
elif action == VAR_LOAD_RESOLVE:
self.writeline("%s = %s(%r)" % (target, self.get_resolve_func(), param))
elif action == VAR_LOAD_ALIAS:
self.writeline("%s = %s" % (target, param))
elif action == VAR_LOAD_UNDEFINED:
undefs.append(target)
else:
raise NotImplementedError("unknown load instruction")
if undefs:
self.writeline("%s = missing" % " = ".join(undefs))
def leave_frame(self, frame, with_python_scope=False):
if not with_python_scope:
undefs = []
for target, _ in iteritems(frame.symbols.loads):
undefs.append(target)
if undefs:
self.writeline("%s = missing" % " = ".join(undefs))
def func(self, name):
if self.environment.is_async:
return "async def %s" % name
return "def %s" % name
def macro_body(self, node, frame):
"""Dump the function def of a macro or call block."""
frame = frame.inner()
frame.symbols.analyze_node(node)
macro_ref = MacroRef(node)
explicit_caller = None
skip_special_params = set()
args = []
for idx, arg in enumerate(node.args):
if arg.name == "caller":
explicit_caller = idx
if arg.name in ("kwargs", "varargs"):
skip_special_params.add(arg.name)
args.append(frame.symbols.ref(arg.name))
undeclared = find_undeclared(node.body, ("caller", "kwargs", "varargs"))
if "caller" in undeclared:
# In older Jinja versions there was a bug that allowed caller
# to retain the special behavior even if it was mentioned in
# the argument list. However thankfully this was only really
# working if it was the last argument. So we are explicitly
# checking this now and error out if it is anywhere else in
# the argument list.
if explicit_caller is not None:
try:
node.defaults[explicit_caller - len(node.args)]
except IndexError:
self.fail(
"When defining macros or call blocks the "
'special "caller" argument must be omitted '
"or be given a default.",
node.lineno,
)
else:
args.append(frame.symbols.declare_parameter("caller"))
macro_ref.accesses_caller = True
if "kwargs" in undeclared and "kwargs" not in skip_special_params:
args.append(frame.symbols.declare_parameter("kwargs"))
macro_ref.accesses_kwargs = True
if "varargs" in undeclared and "varargs" not in skip_special_params:
args.append(frame.symbols.declare_parameter("varargs"))
macro_ref.accesses_varargs = True
# macros are delayed, they never require output checks
frame.require_output_check = False
frame.symbols.analyze_node(node)
self.writeline("%s(%s):" % (self.func("macro"), ", ".join(args)), node)
self.indent()
self.buffer(frame)
self.enter_frame(frame)
self.push_parameter_definitions(frame)
for idx, arg in enumerate(node.args):
ref = frame.symbols.ref(arg.name)
self.writeline("if %s is missing:" % ref)
self.indent()
try:
default = node.defaults[idx - len(node.args)]
except IndexError:
self.writeline(
"%s = undefined(%r, name=%r)"
% (ref, "parameter %r was not provided" % arg.name, arg.name)
)
else:
self.writeline("%s = " % ref)
self.visit(default, frame)
self.mark_parameter_stored(ref)
self.outdent()
self.pop_parameter_definitions()
self.blockvisit(node.body, frame)
self.return_buffer_contents(frame, force_unescaped=True)
self.leave_frame(frame, with_python_scope=True)
self.outdent()
return frame, macro_ref
def macro_def(self, macro_ref, frame):
"""Dump the macro definition for the def created by macro_body."""
arg_tuple = ", ".join(repr(x.name) for x in macro_ref.node.args)
name = getattr(macro_ref.node, "name", None)
if len(macro_ref.node.args) == 1:
arg_tuple += ","
self.write(
"Macro(environment, macro, %r, (%s), %r, %r, %r, "
"context.eval_ctx.autoescape)"
% (
name,
arg_tuple,
macro_ref.accesses_kwargs,
macro_ref.accesses_varargs,
macro_ref.accesses_caller,
)
)
def position(self, node):
"""Return a human readable position for the node."""
rv = "line %d" % node.lineno
if self.name is not None:
rv += " in " + repr(self.name)
return rv
def dump_local_context(self, frame):
return "{%s}" % ", ".join(
"%r: %s" % (name, target)
for name, target in sorted(iteritems(frame.symbols.dump_stores()))
)
def write_commons(self):
"""Writes a common preamble that is used by root and block functions.
Primarily this sets up common local helpers and enforces a generator
through a dead branch.
"""
self.writeline("resolve = context.resolve_or_missing")
self.writeline("undefined = environment.undefined")
# always use the standard Undefined class for the implicit else of
# conditional expressions
self.writeline("cond_expr_undefined = Undefined")
self.writeline("if 0: yield None")
def push_parameter_definitions(self, frame):
"""Pushes all parameter targets from the given frame into a local
stack that permits tracking of yet to be assigned parameters. In
particular this enables the optimization from `visit_Name` to skip
undefined expressions for parameters in macros as macros can reference
otherwise unbound parameters.
"""
self._param_def_block.append(frame.symbols.dump_param_targets())
def pop_parameter_definitions(self):
"""Pops the current parameter definitions set."""
self._param_def_block.pop()
def mark_parameter_stored(self, target):
"""Marks a parameter in the current parameter definitions as stored.
This will skip the enforced undefined checks.
"""
if self._param_def_block:
self._param_def_block[-1].discard(target)
def push_context_reference(self, target):
self._context_reference_stack.append(target)
def pop_context_reference(self):
self._context_reference_stack.pop()
def get_context_ref(self):
return self._context_reference_stack[-1]
def get_resolve_func(self):
target = self._context_reference_stack[-1]
if target == "context":
return "resolve"
return "%s.resolve" % target
def derive_context(self, frame):
return "%s.derived(%s)" % (
self.get_context_ref(),
self.dump_local_context(frame),
)
def parameter_is_undeclared(self, target):
"""Checks if a given target is an undeclared parameter."""
if not self._param_def_block:
return False
return target in self._param_def_block[-1]
def push_assign_tracking(self):
"""Pushes a new layer for assignment tracking."""
self._assign_stack.append(set())
def pop_assign_tracking(self, frame):
"""Pops the topmost level for assignment tracking and updates the
context variables if necessary.
"""
vars = self._assign_stack.pop()
if not frame.toplevel or not vars:
return
public_names = [x for x in vars if x[:1] != "_"]
if len(vars) == 1:
name = next(iter(vars))
ref = frame.symbols.ref(name)
self.writeline("context.vars[%r] = %s" % (name, ref))
else:
self.writeline("context.vars.update({")
for idx, name in enumerate(vars):
if idx:
self.write(", ")
ref = frame.symbols.ref(name)
self.write("%r: %s" % (name, ref))
self.write("})")
if public_names:
if len(public_names) == 1:
self.writeline("context.exported_vars.add(%r)" % public_names[0])
else:
self.writeline(
"context.exported_vars.update((%s))"
% ", ".join(imap(repr, public_names))
)
# -- Statement Visitors
def visit_Template(self, node, frame=None):
assert frame is None, "no root frame allowed"
eval_ctx = EvalContext(self.environment, self.name)
from .runtime import exported
self.writeline("from __future__ import %s" % ", ".join(code_features))
self.writeline("from jinja2.runtime import " + ", ".join(exported))
if self.environment.is_async:
self.writeline(
"from jinja2.asyncsupport import auto_await, "
"auto_aiter, AsyncLoopContext"
)
# if we want a deferred initialization we cannot move the
# environment into a local name
envenv = not self.defer_init and ", environment=environment" or ""
# do we have an extends tag at all? If not, we can save some
# overhead by just not processing any inheritance code.
have_extends = node.find(nodes.Extends) is not None
# find all blocks
for block in node.find_all(nodes.Block):
if block.name in self.blocks:
self.fail("block %r defined twice" % block.name, block.lineno)
self.blocks[block.name] = block
# find all imports and import them
for import_ in node.find_all(nodes.ImportedName):
if import_.importname not in self.import_aliases:
imp = import_.importname
self.import_aliases[imp] = alias = self.temporary_identifier()
if "." in imp:
module, obj = imp.rsplit(".", 1)
self.writeline("from %s import %s as %s" % (module, obj, alias))
else:
self.writeline("import %s as %s" % (imp, alias))
# add the load name
self.writeline("name = %r" % self.name)
# generate the root render function.
self.writeline(
"%s(context, missing=missing%s):" % (self.func("root"), envenv), extra=1
)
self.indent()
self.write_commons()
# process the root
frame = Frame(eval_ctx)
if "self" in find_undeclared(node.body, ("self",)):
ref = frame.symbols.declare_parameter("self")
self.writeline("%s = TemplateReference(context)" % ref)
frame.symbols.analyze_node(node)
frame.toplevel = frame.rootlevel = True
frame.require_output_check = have_extends and not self.has_known_extends
if have_extends:
self.writeline("parent_template = None")
self.enter_frame(frame)
self.pull_dependencies(node.body)
self.blockvisit(node.body, frame)
self.leave_frame(frame, with_python_scope=True)
self.outdent()
# make sure that the parent root is called.
if have_extends:
if not self.has_known_extends:
self.indent()
self.writeline("if parent_template is not None:")
self.indent()
if supports_yield_from and not self.environment.is_async:
self.writeline("yield from parent_template.root_render_func(context)")
else:
self.writeline(
"%sfor event in parent_template."
"root_render_func(context):"
% (self.environment.is_async and "async " or "")
)
self.indent()
self.writeline("yield event")
self.outdent()
self.outdent(1 + (not self.has_known_extends))
# at this point we now have the blocks collected and can visit them too.
for name, block in iteritems(self.blocks):
self.writeline(
"%s(context, missing=missing%s):"
% (self.func("block_" + name), envenv),
block,
1,
)
self.indent()
self.write_commons()
# It's important that we do not make this frame a child of the
# toplevel template. This would cause a variety of
# interesting issues with identifier tracking.
block_frame = Frame(eval_ctx)
undeclared = find_undeclared(block.body, ("self", "super"))
if "self" in undeclared:
ref = block_frame.symbols.declare_parameter("self")
self.writeline("%s = TemplateReference(context)" % ref)
if "super" in undeclared:
ref = block_frame.symbols.declare_parameter("super")
self.writeline("%s = context.super(%r, block_%s)" % (ref, name, name))
block_frame.symbols.analyze_node(block)
block_frame.block = name
self.enter_frame(block_frame)
self.pull_dependencies(block.body)
self.blockvisit(block.body, block_frame)
self.leave_frame(block_frame, with_python_scope=True)
self.outdent()
self.writeline(
"blocks = {%s}" % ", ".join("%r: block_%s" % (x, x) for x in self.blocks),
extra=1,
)
# add a function that returns the debug info
self.writeline(
"debug_info = %r" % "&".join("%s=%s" % x for x in self.debug_info)
)
def visit_Block(self, node, frame):
"""Call a block and register it for the template."""
level = 0
if frame.toplevel:
# if we know that we are a child template, there is no need to
# check if we are one
if self.has_known_extends:
return
if self.extends_so_far > 0:
self.writeline("if parent_template is None:")
self.indent()
level += 1
if node.scoped:
context = self.derive_context(frame)
else:
context = self.get_context_ref()
if (
supports_yield_from
and not self.environment.is_async
and frame.buffer is None
):
self.writeline(
"yield from context.blocks[%r][0](%s)" % (node.name, context), node
)
else:
loop = self.environment.is_async and "async for" or "for"
self.writeline(
"%s event in context.blocks[%r][0](%s):" % (loop, node.name, context),
node,
)
self.indent()
self.simple_write("event", frame)
self.outdent()
self.outdent(level)
def visit_Extends(self, node, frame):
"""Calls the extender."""
if not frame.toplevel:
self.fail("cannot use extend from a non top-level scope", node.lineno)
# if the number of extends statements in general is zero so
# far, we don't have to add a check if something extended
# the template before this one.
if self.extends_so_far > 0:
# if we have a known extends we just add a template runtime
# error into the generated code. We could catch that at compile
# time too, but i welcome it not to confuse users by throwing the
# same error at different times just "because we can".
if not self.has_known_extends:
self.writeline("if parent_template is not None:")
self.indent()
self.writeline("raise TemplateRuntimeError(%r)" % "extended multiple times")
# if we have a known extends already we don't need that code here
# as we know that the template execution will end here.
if self.has_known_extends:
raise CompilerExit()
else:
self.outdent()
self.writeline("parent_template = environment.get_template(", node)
self.visit(node.template, frame)
self.write(", %r)" % self.name)
self.writeline(
"for name, parent_block in parent_template.blocks.%s():" % dict_item_iter
)
self.indent()
self.writeline("context.blocks.setdefault(name, []).append(parent_block)")
self.outdent()
# if this extends statement was in the root level we can take
# advantage of that information and simplify the generated code
# in the top level from this point onwards
if frame.rootlevel:
self.has_known_extends = True
# and now we have one more
self.extends_so_far += 1
def visit_Include(self, node, frame):
"""Handles includes."""
if node.ignore_missing:
self.writeline("try:")
self.indent()
func_name = "get_or_select_template"
if isinstance(node.template, nodes.Const):
if isinstance(node.template.value, string_types):
func_name = "get_template"
elif isinstance(node.template.value, (tuple, list)):
func_name = "select_template"
elif isinstance(node.template, (nodes.Tuple, nodes.List)):
func_name = "select_template"
self.writeline("template = environment.%s(" % func_name, node)
self.visit(node.template, frame)
self.write(", %r)" % self.name)
if node.ignore_missing:
self.outdent()
self.writeline("except TemplateNotFound:")
self.indent()
self.writeline("pass")
self.outdent()
self.writeline("else:")
self.indent()
skip_event_yield = False
if node.with_context:
loop = self.environment.is_async and "async for" or "for"
self.writeline(
"%s event in template.root_render_func("
"template.new_context(context.get_all(), True, "
"%s)):" % (loop, self.dump_local_context(frame))
)
elif self.environment.is_async:
self.writeline(
"for event in (await "
"template._get_default_module_async())"
"._body_stream:"
)
else:
if supports_yield_from:
self.writeline("yield from template._get_default_module()._body_stream")
skip_event_yield = True
else:
self.writeline(
"for event in template._get_default_module()._body_stream:"
)
if not skip_event_yield:
self.indent()
self.simple_write("event", frame)
self.outdent()
if node.ignore_missing:
self.outdent()
def visit_Import(self, node, frame):
"""Visit regular imports."""
self.writeline("%s = " % frame.symbols.ref(node.target), node)
if frame.toplevel:
self.write("context.vars[%r] = " % node.target)
if self.environment.is_async:
self.write("await ")
self.write("environment.get_template(")
self.visit(node.template, frame)
self.write(", %r)." % self.name)
if node.with_context:
self.write(
"make_module%s(context.get_all(), True, %s)"
% (
self.environment.is_async and "_async" or "",
self.dump_local_context(frame),
)
)
elif self.environment.is_async:
self.write("_get_default_module_async()")
else:
self.write("_get_default_module()")
if frame.toplevel and not node.target.startswith("_"):
self.writeline("context.exported_vars.discard(%r)" % node.target)
def visit_FromImport(self, node, frame):
"""Visit named imports."""
self.newline(node)
self.write(
"included_template = %senvironment.get_template("
% (self.environment.is_async and "await " or "")
)
self.visit(node.template, frame)
self.write(", %r)." % self.name)
if node.with_context:
self.write(
"make_module%s(context.get_all(), True, %s)"
% (
self.environment.is_async and "_async" or "",
self.dump_local_context(frame),
)
)
elif self.environment.is_async:
self.write("_get_default_module_async()")
else:
self.write("_get_default_module()")
var_names = []
discarded_names = []
for name in node.names:
if isinstance(name, tuple):
name, alias = name
else:
alias = name
self.writeline(
"%s = getattr(included_template, "
"%r, missing)" % (frame.symbols.ref(alias), name)
)
self.writeline("if %s is missing:" % frame.symbols.ref(alias))
self.indent()
self.writeline(
"%s = undefined(%r %% "
"included_template.__name__, "
"name=%r)"
% (
frame.symbols.ref(alias),
"the template %%r (imported on %s) does "
"not export the requested name %s"
% (self.position(node), repr(name)),
name,
)
)
self.outdent()
if frame.toplevel:
var_names.append(alias)
if not alias.startswith("_"):
discarded_names.append(alias)
if var_names:
if len(var_names) == 1:
name = var_names[0]
self.writeline(
"context.vars[%r] = %s" % (name, frame.symbols.ref(name))
)
else:
self.writeline(
"context.vars.update({%s})"
% ", ".join(
"%r: %s" % (name, frame.symbols.ref(name)) for name in var_names
)
)
if discarded_names:
if len(discarded_names) == 1:
self.writeline("context.exported_vars.discard(%r)" % discarded_names[0])
else:
self.writeline(
"context.exported_vars.difference_"
"update((%s))" % ", ".join(imap(repr, discarded_names))
)
def visit_For(self, node, frame):
loop_frame = frame.inner()
test_frame = frame.inner()
else_frame = frame.inner()
# try to figure out if we have an extended loop. An extended loop
# is necessary if the loop is in recursive mode if the special loop
# variable is accessed in the body.
extended_loop = node.recursive or "loop" in find_undeclared(
node.iter_child_nodes(only=("body",)), ("loop",)
)
loop_ref = None
if extended_loop:
loop_ref = loop_frame.symbols.declare_parameter("loop")
loop_frame.symbols.analyze_node(node, for_branch="body")
if node.else_:
else_frame.symbols.analyze_node(node, for_branch="else")
if node.test:
loop_filter_func = self.temporary_identifier()
test_frame.symbols.analyze_node(node, for_branch="test")
self.writeline("%s(fiter):" % self.func(loop_filter_func), node.test)
self.indent()
self.enter_frame(test_frame)
self.writeline(self.environment.is_async and "async for " or "for ")
self.visit(node.target, loop_frame)
self.write(" in ")
self.write(self.environment.is_async and "auto_aiter(fiter)" or "fiter")
self.write(":")
self.indent()
self.writeline("if ", node.test)
self.visit(node.test, test_frame)
self.write(":")
self.indent()
self.writeline("yield ")
self.visit(node.target, loop_frame)
self.outdent(3)
self.leave_frame(test_frame, with_python_scope=True)
# if we don't have an recursive loop we have to find the shadowed
# variables at that point. Because loops can be nested but the loop
# variable is a special one we have to enforce aliasing for it.
if node.recursive:
self.writeline(
"%s(reciter, loop_render_func, depth=0):" % self.func("loop"), node
)
self.indent()
self.buffer(loop_frame)
# Use the same buffer for the else frame
else_frame.buffer = loop_frame.buffer
# make sure the loop variable is a special one and raise a template
# assertion error if a loop tries to write to loop
if extended_loop:
self.writeline("%s = missing" % loop_ref)
for name in node.find_all(nodes.Name):
if name.ctx == "store" and name.name == "loop":
self.fail(
"Can't assign to special loop variable in for-loop target",
name.lineno,
)
if node.else_:
iteration_indicator = self.temporary_identifier()
self.writeline("%s = 1" % iteration_indicator)
self.writeline(self.environment.is_async and "async for " or "for ", node)
self.visit(node.target, loop_frame)
if extended_loop:
if self.environment.is_async:
self.write(", %s in AsyncLoopContext(" % loop_ref)
else:
self.write(", %s in LoopContext(" % loop_ref)
else:
self.write(" in ")
if node.test:
self.write("%s(" % loop_filter_func)
if node.recursive:
self.write("reciter")
else:
if self.environment.is_async and not extended_loop:
self.write("auto_aiter(")
self.visit(node.iter, frame)
if self.environment.is_async and not extended_loop:
self.write(")")
if node.test:
self.write(")")
if node.recursive:
self.write(", undefined, loop_render_func, depth):")
else:
self.write(extended_loop and ", undefined):" or ":")
self.indent()
self.enter_frame(loop_frame)
self.blockvisit(node.body, loop_frame)
if node.else_:
self.writeline("%s = 0" % iteration_indicator)
self.outdent()
self.leave_frame(
loop_frame, with_python_scope=node.recursive and not node.else_
)
if node.else_:
self.writeline("if %s:" % iteration_indicator)
self.indent()
self.enter_frame(else_frame)
self.blockvisit(node.else_, else_frame)
self.leave_frame(else_frame)
self.outdent()
# if the node was recursive we have to return the buffer contents
# and start the iteration code
if node.recursive:
self.return_buffer_contents(loop_frame)
self.outdent()
self.start_write(frame, node)
if self.environment.is_async:
self.write("await ")
self.write("loop(")
if self.environment.is_async:
self.write("auto_aiter(")
self.visit(node.iter, frame)
if self.environment.is_async:
self.write(")")
self.write(", loop)")
self.end_write(frame)
def visit_If(self, node, frame):
if_frame = frame.soft()
self.writeline("if ", node)
self.visit(node.test, if_frame)
self.write(":")
self.indent()
self.blockvisit(node.body, if_frame)
self.outdent()
for elif_ in node.elif_:
self.writeline("elif ", elif_)
self.visit(elif_.test, if_frame)
self.write(":")
self.indent()
self.blockvisit(elif_.body, if_frame)
self.outdent()
if node.else_:
self.writeline("else:")
self.indent()
self.blockvisit(node.else_, if_frame)
self.outdent()
def visit_Macro(self, node, frame):
macro_frame, macro_ref = self.macro_body(node, frame)
self.newline()
if frame.toplevel:
if not node.name.startswith("_"):
self.write("context.exported_vars.add(%r)" % node.name)
self.writeline("context.vars[%r] = " % node.name)
self.write("%s = " % frame.symbols.ref(node.name))
self.macro_def(macro_ref, macro_frame)
def visit_CallBlock(self, node, frame):
call_frame, macro_ref = self.macro_body(node, frame)
self.writeline("caller = ")
self.macro_def(macro_ref, call_frame)
self.start_write(frame, node)
self.visit_Call(node.call, frame, forward_caller=True)
self.end_write(frame)
def visit_FilterBlock(self, node, frame):
filter_frame = frame.inner()
filter_frame.symbols.analyze_node(node)
self.enter_frame(filter_frame)
self.buffer(filter_frame)
self.blockvisit(node.body, filter_frame)
self.start_write(frame, node)
self.visit_Filter(node.filter, filter_frame)
self.end_write(frame)
self.leave_frame(filter_frame)
def visit_With(self, node, frame):
with_frame = frame.inner()
with_frame.symbols.analyze_node(node)
self.enter_frame(with_frame)
for target, expr in izip(node.targets, node.values):
self.newline()
self.visit(target, with_frame)
self.write(" = ")
self.visit(expr, frame)
self.blockvisit(node.body, with_frame)
self.leave_frame(with_frame)
def visit_ExprStmt(self, node, frame):
self.newline(node)
self.visit(node.node, frame)
_FinalizeInfo = namedtuple("_FinalizeInfo", ("const", "src"))
#: The default finalize function if the environment isn't configured
#: with one. Or if the environment has one, this is called on that
#: function's output for constants.
_default_finalize = text_type
_finalize = None
def _make_finalize(self):
"""Build the finalize function to be used on constants and at
runtime. Cached so it's only created once for all output nodes.
Returns a ``namedtuple`` with the following attributes:
``const``
A function to finalize constant data at compile time.
``src``
Source code to output around nodes to be evaluated at
runtime.
"""
if self._finalize is not None:
return self._finalize
finalize = default = self._default_finalize
src = None
if self.environment.finalize:
src = "environment.finalize("
env_finalize = self.environment.finalize
def finalize(value):
return default(env_finalize(value))
if getattr(env_finalize, "contextfunction", False) is True:
src += "context, "
finalize = None # noqa: F811
elif getattr(env_finalize, "evalcontextfunction", False) is True:
src += "context.eval_ctx, "
finalize = None
elif getattr(env_finalize, "environmentfunction", False) is True:
src += "environment, "
def finalize(value):
return default(env_finalize(self.environment, value))
self._finalize = self._FinalizeInfo(finalize, src)
return self._finalize
def _output_const_repr(self, group):
"""Given a group of constant values converted from ``Output``
child nodes, produce a string to write to the template module
source.
"""
return repr(concat(group))
def _output_child_to_const(self, node, frame, finalize):
"""Try to optimize a child of an ``Output`` node by trying to
convert it to constant, finalized data at compile time.
If :exc:`Impossible` is raised, the node is not constant and
will be evaluated at runtime. Any other exception will also be
evaluated at runtime for easier debugging.
"""
const = node.as_const(frame.eval_ctx)
if frame.eval_ctx.autoescape:
const = escape(const)
# Template data doesn't go through finalize.
if isinstance(node, nodes.TemplateData):
return text_type(const)
return finalize.const(const)
def _output_child_pre(self, node, frame, finalize):
"""Output extra source code before visiting a child of an
``Output`` node.
"""
if frame.eval_ctx.volatile:
self.write("(escape if context.eval_ctx.autoescape else to_string)(")
elif frame.eval_ctx.autoescape:
self.write("escape(")
else:
self.write("to_string(")
if finalize.src is not None:
self.write(finalize.src)
def _output_child_post(self, node, frame, finalize):
"""Output extra source code after visiting a child of an
``Output`` node.
"""
self.write(")")
if finalize.src is not None:
self.write(")")
def visit_Output(self, node, frame):
# If an extends is active, don't render outside a block.
if frame.require_output_check:
# A top-level extends is known to exist at compile time.
if self.has_known_extends:
return
self.writeline("if parent_template is None:")
self.indent()
finalize = self._make_finalize()
body = []
# Evaluate constants at compile time if possible. Each item in
# body will be either a list of static data or a node to be
# evaluated at runtime.
for child in node.nodes:
try:
if not (
# If the finalize function requires runtime context,
# constants can't be evaluated at compile time.
finalize.const
# Unless it's basic template data that won't be
# finalized anyway.
or isinstance(child, nodes.TemplateData)
):
raise nodes.Impossible()
const = self._output_child_to_const(child, frame, finalize)
except (nodes.Impossible, Exception):
# The node was not constant and needs to be evaluated at
# runtime. Or another error was raised, which is easier
# to debug at runtime.
body.append(child)
continue
if body and isinstance(body[-1], list):
body[-1].append(const)
else:
body.append([const])
if frame.buffer is not None:
if len(body) == 1:
self.writeline("%s.append(" % frame.buffer)
else:
self.writeline("%s.extend((" % frame.buffer)
self.indent()
for item in body:
if isinstance(item, list):
# A group of constant data to join and output.
val = self._output_const_repr(item)
if frame.buffer is None:
self.writeline("yield " + val)
else:
self.writeline(val + ",")
else:
if frame.buffer is None:
self.writeline("yield ", item)
else:
self.newline(item)
# A node to be evaluated at runtime.
self._output_child_pre(item, frame, finalize)
self.visit(item, frame)
self._output_child_post(item, frame, finalize)
if frame.buffer is not None:
self.write(",")
if frame.buffer is not None:
self.outdent()
self.writeline(")" if len(body) == 1 else "))")
if frame.require_output_check:
self.outdent()
def visit_Assign(self, node, frame):
self.push_assign_tracking()
self.newline(node)
self.visit(node.target, frame)
self.write(" = ")
self.visit(node.node, frame)
self.pop_assign_tracking(frame)
def visit_AssignBlock(self, node, frame):
self.push_assign_tracking()
block_frame = frame.inner()
# This is a special case. Since a set block always captures we
# will disable output checks. This way one can use set blocks
# toplevel even in extended templates.
block_frame.require_output_check = False
block_frame.symbols.analyze_node(node)
self.enter_frame(block_frame)
self.buffer(block_frame)
self.blockvisit(node.body, block_frame)
self.newline(node)
self.visit(node.target, frame)
self.write(" = (Markup if context.eval_ctx.autoescape else identity)(")
if node.filter is not None:
self.visit_Filter(node.filter, block_frame)
else:
self.write("concat(%s)" % block_frame.buffer)
self.write(")")
self.pop_assign_tracking(frame)
self.leave_frame(block_frame)
# -- Expression Visitors
def visit_Name(self, node, frame):
if node.ctx == "store" and frame.toplevel:
if self._assign_stack:
self._assign_stack[-1].add(node.name)
ref = frame.symbols.ref(node.name)
# If we are looking up a variable we might have to deal with the
# case where it's undefined. We can skip that case if the load
# instruction indicates a parameter which are always defined.
if node.ctx == "load":
load = frame.symbols.find_load(ref)
if not (
load is not None
and load[0] == VAR_LOAD_PARAMETER
and not self.parameter_is_undeclared(ref)
):
self.write(
"(undefined(name=%r) if %s is missing else %s)"
% (node.name, ref, ref)
)
return
self.write(ref)
def visit_NSRef(self, node, frame):
# NSRefs can only be used to store values; since they use the normal
# `foo.bar` notation they will be parsed as a normal attribute access
# when used anywhere but in a `set` context
ref = frame.symbols.ref(node.name)
self.writeline("if not isinstance(%s, Namespace):" % ref)
self.indent()
self.writeline(
"raise TemplateRuntimeError(%r)"
% "cannot assign attribute on non-namespace object"
)
self.outdent()
self.writeline("%s[%r]" % (ref, node.attr))
def visit_Const(self, node, frame):
val = node.as_const(frame.eval_ctx)
if isinstance(val, float):
self.write(str(val))
else:
self.write(repr(val))
def visit_TemplateData(self, node, frame):
try:
self.write(repr(node.as_const(frame.eval_ctx)))
except nodes.Impossible:
self.write(
"(Markup if context.eval_ctx.autoescape else identity)(%r)" % node.data
)
def visit_Tuple(self, node, frame):
self.write("(")
idx = -1
for idx, item in enumerate(node.items):
if idx:
self.write(", ")
self.visit(item, frame)
self.write(idx == 0 and ",)" or ")")
def visit_List(self, node, frame):
self.write("[")
for idx, item in enumerate(node.items):
if idx:
self.write(", ")
self.visit(item, frame)
self.write("]")
def visit_Dict(self, node, frame):
self.write("{")
for idx, item in enumerate(node.items):
if idx:
self.write(", ")
self.visit(item.key, frame)
self.write(": ")
self.visit(item.value, frame)
self.write("}")
def binop(operator, interceptable=True): # noqa: B902
@optimizeconst
def visitor(self, node, frame):
if (
self.environment.sandboxed
and operator in self.environment.intercepted_binops
):
self.write("environment.call_binop(context, %r, " % operator)
self.visit(node.left, frame)
self.write(", ")
self.visit(node.right, frame)
else:
self.write("(")
self.visit(node.left, frame)
self.write(" %s " % operator)
self.visit(node.right, frame)
self.write(")")
return visitor
def uaop(operator, interceptable=True): # noqa: B902
@optimizeconst
def visitor(self, node, frame):
if (
self.environment.sandboxed
and operator in self.environment.intercepted_unops
):
self.write("environment.call_unop(context, %r, " % operator)
self.visit(node.node, frame)
else:
self.write("(" + operator)
self.visit(node.node, frame)
self.write(")")
return visitor
visit_Add = binop("+")
visit_Sub = binop("-")
visit_Mul = binop("*")
visit_Div = binop("/")
visit_FloorDiv = binop("//")
visit_Pow = binop("**")
visit_Mod = binop("%")
visit_And = binop("and", interceptable=False)
visit_Or = binop("or", interceptable=False)
visit_Pos = uaop("+")
visit_Neg = uaop("-")
visit_Not = uaop("not ", interceptable=False)
del binop, uaop
@optimizeconst
def visit_Concat(self, node, frame):
if frame.eval_ctx.volatile:
func_name = "(context.eval_ctx.volatile and markup_join or unicode_join)"
elif frame.eval_ctx.autoescape:
func_name = "markup_join"
else:
func_name = "unicode_join"
self.write("%s((" % func_name)
for arg in node.nodes:
self.visit(arg, frame)
self.write(", ")
self.write("))")
@optimizeconst
def visit_Compare(self, node, frame):
self.write("(")
self.visit(node.expr, frame)
for op in node.ops:
self.visit(op, frame)
self.write(")")
def visit_Operand(self, node, frame):
self.write(" %s " % operators[node.op])
self.visit(node.expr, frame)
@optimizeconst
def visit_Getattr(self, node, frame):
if self.environment.is_async:
self.write("(await auto_await(")
self.write("environment.getattr(")
self.visit(node.node, frame)
self.write(", %r)" % node.attr)
if self.environment.is_async:
self.write("))")
@optimizeconst
def visit_Getitem(self, node, frame):
# slices bypass the environment getitem method.
if isinstance(node.arg, nodes.Slice):
self.visit(node.node, frame)
self.write("[")
self.visit(node.arg, frame)
self.write("]")
else:
if self.environment.is_async:
self.write("(await auto_await(")
self.write("environment.getitem(")
self.visit(node.node, frame)
self.write(", ")
self.visit(node.arg, frame)
self.write(")")
if self.environment.is_async:
self.write("))")
def visit_Slice(self, node, frame):
if node.start is not None:
self.visit(node.start, frame)
self.write(":")
if node.stop is not None:
self.visit(node.stop, frame)
if node.step is not None:
self.write(":")
self.visit(node.step, frame)
@optimizeconst
def visit_Filter(self, node, frame):
if self.environment.is_async:
self.write("await auto_await(")
self.write(self.filters[node.name] + "(")
func = self.environment.filters.get(node.name)
if func is None:
self.fail("no filter named %r" % node.name, node.lineno)
if getattr(func, "contextfilter", False) is True:
self.write("context, ")
elif getattr(func, "evalcontextfilter", False) is True:
self.write("context.eval_ctx, ")
elif getattr(func, "environmentfilter", False) is True:
self.write("environment, ")
# if the filter node is None we are inside a filter block
# and want to write to the current buffer
if node.node is not None:
self.visit(node.node, frame)
elif frame.eval_ctx.volatile:
self.write(
"(context.eval_ctx.autoescape and"
" Markup(concat(%s)) or concat(%s))" % (frame.buffer, frame.buffer)
)
elif frame.eval_ctx.autoescape:
self.write("Markup(concat(%s))" % frame.buffer)
else:
self.write("concat(%s)" % frame.buffer)
self.signature(node, frame)
self.write(")")
if self.environment.is_async:
self.write(")")
@optimizeconst
def visit_Test(self, node, frame):
self.write(self.tests[node.name] + "(")
if node.name not in self.environment.tests:
self.fail("no test named %r" % node.name, node.lineno)
self.visit(node.node, frame)
self.signature(node, frame)
self.write(")")
@optimizeconst
def visit_CondExpr(self, node, frame):
def write_expr2():
if node.expr2 is not None:
return self.visit(node.expr2, frame)
self.write(
"cond_expr_undefined(%r)"
% (
"the inline if-"
"expression on %s evaluated to false and "
"no else section was defined." % self.position(node)
)
)
self.write("(")
self.visit(node.expr1, frame)
self.write(" if ")
self.visit(node.test, frame)
self.write(" else ")
write_expr2()
self.write(")")
@optimizeconst
def visit_Call(self, node, frame, forward_caller=False):
if self.environment.is_async:
self.write("await auto_await(")
if self.environment.sandboxed:
self.write("environment.call(context, ")
else:
self.write("context.call(")
self.visit(node.node, frame)
extra_kwargs = forward_caller and {"caller": "caller"} or None
self.signature(node, frame, extra_kwargs)
self.write(")")
if self.environment.is_async:
self.write(")")
def visit_Keyword(self, node, frame):
self.write(node.key + "=")
self.visit(node.value, frame)
# -- Unused nodes for extensions
def visit_MarkSafe(self, node, frame):
self.write("Markup(")
self.visit(node.expr, frame)
self.write(")")
def visit_MarkSafeIfAutoescape(self, node, frame):
self.write("(context.eval_ctx.autoescape and Markup or identity)(")
self.visit(node.expr, frame)
self.write(")")
def visit_EnvironmentAttribute(self, node, frame):
self.write("environment." + node.name)
def visit_ExtensionAttribute(self, node, frame):
self.write("environment.extensions[%r].%s" % (node.identifier, node.name))
def visit_ImportedName(self, node, frame):
self.write(self.import_aliases[node.importname])
def visit_InternalName(self, node, frame):
self.write(node.name)
def visit_ContextReference(self, node, frame):
self.write("context")
def visit_DerivedContextReference(self, node, frame):
self.write(self.derive_context(frame))
def visit_Continue(self, node, frame):
self.writeline("continue", node)
def visit_Break(self, node, frame):
self.writeline("break", node)
def visit_Scope(self, node, frame):
scope_frame = frame.inner()
scope_frame.symbols.analyze_node(node)
self.enter_frame(scope_frame)
self.blockvisit(node.body, scope_frame)
self.leave_frame(scope_frame)
def visit_OverlayScope(self, node, frame):
ctx = self.temporary_identifier()
self.writeline("%s = %s" % (ctx, self.derive_context(frame)))
self.writeline("%s.vars = " % ctx)
self.visit(node.context, frame)
self.push_context_reference(ctx)
scope_frame = frame.inner(isolated=True)
scope_frame.symbols.analyze_node(node)
self.enter_frame(scope_frame)
self.blockvisit(node.body, scope_frame)
self.leave_frame(scope_frame)
self.pop_context_reference()
def visit_EvalContextModifier(self, node, frame):
for keyword in node.options:
self.writeline("context.eval_ctx.%s = " % keyword.key)
self.visit(keyword.value, frame)
try:
val = keyword.value.as_const(frame.eval_ctx)
except nodes.Impossible:
frame.eval_ctx.volatile = True
else:
setattr(frame.eval_ctx, keyword.key, val)
def visit_ScopedEvalContextModifier(self, node, frame):
old_ctx_name = self.temporary_identifier()
saved_ctx = frame.eval_ctx.save()
self.writeline("%s = context.eval_ctx.save()" % old_ctx_name)
self.visit_EvalContextModifier(node, frame)
for child in node.body:
self.visit(child, frame)
frame.eval_ctx.revert(saved_ctx)
self.writeline("context.eval_ctx.revert(%s)" % old_ctx_name)
|
chromium/chromium
|
third_party/jinja2/compiler.py
|
Python
|
bsd-3-clause
| 66,300
|
[
"VisIt"
] |
200147eb3e7710f4e233e23bf65a9f69d8e2bc24b61a68631df5992bdcb003d5
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import os
import re
import itertools
import warnings
import logging
import math
import six
import numpy as np
from numpy.linalg import det
from collections import OrderedDict, namedtuple
from hashlib import md5
from monty.io import zopen
from monty.os.path import zpath
from monty.json import MontyDecoder
from enum import Enum
from tabulate import tabulate
import scipy.constants as const
from pymatgen import SETTINGS
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.periodic_table import Element, get_el_sp
from pymatgen.electronic_structure.core import Magmom
from monty.design_patterns import cached_class
from pymatgen.util.string import str_delimited
from pymatgen.util.io_utils import clean_lines
from monty.json import MSONable
"""
Classes for reading/manipulating/writing VASP input files. All major VASP input
files.
"""
__author__ = "Shyue Ping Ong, Geoffroy Hautier, Rickard Armiento, " + \
"Vincent L Chevrier, Stephen Dacek"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Jul 16, 2012"
logger = logging.getLogger(__name__)
class Poscar(MSONable):
"""
Object for representing the data in a POSCAR or CONTCAR file.
Please note that this current implementation. Most attributes can be set
directly.
Args:
structure (Structure): Structure object.
comment (str): Optional comment line for POSCAR. Defaults to unit
cell formula of structure. Defaults to None.
selective_dynamics (Nx3 array): bool values for selective dynamics,
where N is number of sites. Defaults to None.
true_names (bool): Set to False is the names in the POSCAR are not
well-defined and ambiguous. This situation arises commonly in
vasp < 5 where the POSCAR sometimes does not contain element
symbols. Defaults to True.
velocities (Nx3 array): Velocities for the POSCAR. Typically parsed
in MD runs or can be used to initialize velocities.
predictor_corrector (Nx3 array): Predictor corrector for the POSCAR.
Typically parsed in MD runs.
.. attribute:: structure
Associated Structure.
.. attribute:: comment
Optional comment string.
.. attribute:: true_names
Boolean indication whether Poscar contains actual real names parsed
from either a POTCAR or the POSCAR itself.
.. attribute:: selective_dynamics
Selective dynamics attribute for each site if available. A Nx3 array of
booleans.
.. attribute:: velocities
Velocities for each site (typically read in from a CONTCAR). A Nx3
array of floats.
.. attribute:: predictor_corrector
Predictor corrector coordinates and derivatives for each site; i.e.
a list of three 1x3 arrays for each site (typically read in from a MD
CONTCAR).
.. attribute:: predictor_corrector_preamble
Predictor corrector preamble contains the predictor-corrector key,
POTIM, and thermostat parameters that precede the site-specic predictor
corrector data in MD CONTCAR
.. attribute:: temperature
Temperature of velocity Maxwell-Boltzmann initialization. Initialized
to -1 (MB hasn"t been performed).
"""
def __init__(self, structure, comment=None, selective_dynamics=None,
true_names=True, velocities=None, predictor_corrector=None,
predictor_corrector_preamble=None):
if structure.is_ordered:
site_properties = {}
if selective_dynamics:
site_properties["selective_dynamics"] = selective_dynamics
if velocities:
site_properties["velocities"] = velocities
if predictor_corrector:
site_properties["predictor_corrector"] = predictor_corrector
self.structure = structure.copy(site_properties=site_properties)
self.true_names = true_names
self.comment = structure.formula if comment is None else comment
self.predictor_corrector_preamble = predictor_corrector_preamble
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into POSCAR!")
self.temperature = -1
@property
def velocities(self):
return self.structure.site_properties.get("velocities")
@property
def selective_dynamics(self):
return self.structure.site_properties.get("selective_dynamics")
@property
def predictor_corrector(self):
return self.structure.site_properties.get("predictor_corrector")
@velocities.setter
def velocities(self, velocities):
self.structure.add_site_property("velocities", velocities)
@selective_dynamics.setter
def selective_dynamics(self, selective_dynamics):
self.structure.add_site_property("selective_dynamics",
selective_dynamics)
@predictor_corrector.setter
def predictor_corrector(self, predictor_corrector):
self.structure.add_site_property("predictor_corrector",
predictor_corrector)
@property
def site_symbols(self):
"""
Sequence of symbols associated with the Poscar. Similar to 6th line in
vasp 5+ POSCAR.
"""
syms = [site.specie.symbol for site in self.structure]
return [a[0] for a in itertools.groupby(syms)]
@property
def natoms(self):
"""
Sequence of number of sites of each type associated with the Poscar.
Similar to 7th line in vasp 5+ POSCAR or the 6th line in vasp 4 POSCAR.
"""
syms = [site.specie.symbol for site in self.structure]
return [len(tuple(a[1])) for a in itertools.groupby(syms)]
def __setattr__(self, name, value):
if name in ("selective_dynamics", "velocities"):
if value is not None and len(value) > 0:
value = np.array(value)
dim = value.shape
if dim[1] != 3 or dim[0] != len(self.structure):
raise ValueError(name + " array must be same length as" +
" the structure.")
value = value.tolist()
super(Poscar, self).__setattr__(name, value)
@staticmethod
def from_file(filename, check_for_POTCAR=True, read_velocities=True):
"""
Reads a Poscar from a file.
The code will try its best to determine the elements in the POSCAR in
the following order:
1. If check_for_POTCAR is True, the code will try to check if a POTCAR
is in the same directory as the POSCAR and use elements from that by
default. (This is the VASP default sequence of priority).
2. If the input file is Vasp5-like and contains element symbols in the
6th line, the code will use that if check_for_POTCAR is False or there
is no POTCAR found.
3. Failing (2), the code will check if a symbol is provided at the end
of each coordinate.
If all else fails, the code will just assign the first n elements in
increasing atomic number, where n is the number of species, to the
Poscar. For example, H, He, Li, .... This will ensure at least a
unique element is assigned to each site and any analysis that does not
require specific elemental properties should work fine.
Args:
filename (str): File name containing Poscar data.
check_for_POTCAR (bool): Whether to check if a POTCAR is present
in the same directory as the POSCAR. Defaults to True.
read_velocities (bool): Whether to read or not velocities if they
are present in the POSCAR. Default is True.
Returns:
Poscar object.
"""
dirname = os.path.dirname(os.path.abspath(filename))
names = None
if check_for_POTCAR:
for f in os.listdir(dirname):
if f == "POTCAR":
try:
potcar = Potcar.from_file(os.path.join(dirname, f))
names = [sym.split("_")[0] for sym in potcar.symbols]
[get_el_sp(n) for n in names] # ensure valid names
except:
names = None
with zopen(filename, "rt") as f:
return Poscar.from_string(f.read(), names,
read_velocities=read_velocities)
@staticmethod
def from_string(data, default_names=None, read_velocities=True):
"""
Reads a Poscar from a string.
The code will try its best to determine the elements in the POSCAR in
the following order:
1. If default_names are supplied and valid, it will use those. Usually,
default names comes from an external source, such as a POTCAR in the
same directory.
2. If there are no valid default names but the input file is Vasp5-like
and contains element symbols in the 6th line, the code will use that.
3. Failing (2), the code will check if a symbol is provided at the end
of each coordinate.
If all else fails, the code will just assign the first n elements in
increasing atomic number, where n is the number of species, to the
Poscar. For example, H, He, Li, .... This will ensure at least a
unique element is assigned to each site and any analysis that does not
require specific elemental properties should work fine.
Args:
data (str): String containing Poscar data.
default_names ([str]): Default symbols for the POSCAR file,
usually coming from a POTCAR in the same directory.
read_velocities (bool): Whether to read or not velocities if they
are present in the POSCAR. Default is True.
Returns:
Poscar object.
"""
# "^\s*$" doesn't match lines with no whitespace
chunks = re.split(r"\n\s*\n", data.rstrip(), flags=re.MULTILINE)
try:
if chunks[0] == "":
chunks.pop(0)
chunks[0] = "\n" + chunks[0]
except IndexError:
raise ValueError("Empty POSCAR")
# Parse positions
lines = tuple(clean_lines(chunks[0].split("\n"), False))
comment = lines[0]
scale = float(lines[1])
lattice = np.array([[float(i) for i in line.split()]
for line in lines[2:5]])
if scale < 0:
# In vasp, a negative scale factor is treated as a volume. We need
# to translate this to a proper lattice vector scaling.
vol = abs(det(lattice))
lattice *= (-scale / vol) ** (1 / 3)
else:
lattice *= scale
vasp5_symbols = False
try:
natoms = [int(i) for i in lines[5].split()]
ipos = 6
except ValueError:
vasp5_symbols = True
symbols = lines[5].split()
"""
Atoms and number of atoms in POSCAR written with vasp appear on
multiple lines when atoms of the same type are not grouped together
and more than 20 groups are then defined ...
Example :
Cr16 Fe35 Ni2
1.00000000000000
8.5415010000000002 -0.0077670000000000 -0.0007960000000000
-0.0077730000000000 8.5224019999999996 0.0105580000000000
-0.0007970000000000 0.0105720000000000 8.5356889999999996
Fe Cr Fe Cr Fe Cr Fe Cr Fe Cr Fe Cr Fe Cr Fe Ni Fe Cr Fe Cr
Fe Ni Fe Cr Fe
1 1 2 4 2 1 1 1 2 1 1 1 4 1 1 1 5 3 6 1
2 1 3 2 5
Direct
...
"""
nlines_symbols = 1
for nlines_symbols in range(1, 11):
try:
int(lines[5+nlines_symbols].split()[0])
break
except ValueError:
pass
for iline_symbols in range(6, 5+nlines_symbols):
symbols.extend(lines[iline_symbols].split())
natoms = []
iline_natoms_start = 5+nlines_symbols
for iline_natoms in range(iline_natoms_start,
iline_natoms_start+nlines_symbols):
natoms.extend([int(i) for i in lines[iline_natoms].split()])
atomic_symbols = list()
for i in range(len(natoms)):
atomic_symbols.extend([symbols[i]] * natoms[i])
ipos = 5+2*nlines_symbols
postype = lines[ipos].split()[0]
sdynamics = False
# Selective dynamics
if postype[0] in "sS":
sdynamics = True
ipos += 1
postype = lines[ipos].split()[0]
cart = postype[0] in "cCkK"
nsites = sum(natoms)
# If default_names is specified (usually coming from a POTCAR), use
# them. This is in line with Vasp"s parsing order that the POTCAR
# specified is the default used.
if default_names:
try:
atomic_symbols = []
for i in range(len(natoms)):
atomic_symbols.extend([default_names[i]] * natoms[i])
vasp5_symbols = True
except IndexError:
pass
if not vasp5_symbols:
ind = 3 if not sdynamics else 6
try:
# Check if names are appended at the end of the coordinates.
atomic_symbols = [l.split()[ind]
for l in lines[ipos + 1:ipos + 1 + nsites]]
# Ensure symbols are valid elements
if not all([Element.is_valid_symbol(sym)
for sym in atomic_symbols]):
raise ValueError("Non-valid symbols detected.")
vasp5_symbols = True
except (ValueError, IndexError):
# Defaulting to false names.
atomic_symbols = []
for i in range(len(natoms)):
sym = Element.from_Z(i + 1).symbol
atomic_symbols.extend([sym] * natoms[i])
warnings.warn("Elements in POSCAR cannot be determined. "
"Defaulting to false names %s." %
" ".join(atomic_symbols))
# read the atomic coordinates
coords = []
selective_dynamics = list() if sdynamics else None
for i in range(nsites):
toks = lines[ipos + 1 + i].split()
crd_scale = scale if cart else 1
coords.append([float(j) * crd_scale for j in toks[:3]])
if sdynamics:
selective_dynamics.append([tok.upper()[0] == "T"
for tok in toks[3:6]])
struct = Structure(lattice, atomic_symbols, coords,
to_unit_cell=False, validate_proximity=False,
coords_are_cartesian=cart)
if read_velocities:
# Parse velocities if any
velocities = []
if len(chunks) > 1:
for line in chunks[1].strip().split("\n"):
velocities.append([float(tok) for tok in line.split()])
# Parse the predictor-corrector data
predictor_corrector = []
predictor_corrector_preamble = None
if len(chunks) > 2:
lines = chunks[2].strip().split("\n")
# There are 3 sets of 3xN Predictor corrector parameters
# So can't be stored as a single set of "site_property"
# First line in chunk is a key in CONTCAR
# Second line is POTIM
# Third line is the thermostat parameters
predictor_corrector_preamble = (lines[0] + "\n" + lines[1]
+ "\n" + lines[2])
# Rest is three sets of parameters, each set contains
# x, y, z predictor-corrector parameters for every atom in orde
lines = lines[3:]
for st in range(nsites):
d1 = [float(tok) for tok in lines[st].split()]
d2 = [float(tok) for tok in lines[st+nsites].split()]
d3 = [float(tok) for tok in lines[st+2*nsites].split()]
predictor_corrector.append([d1,d2,d3])
else:
velocities = None
predictor_corrector = None
predictor_corrector_preamble = None
return Poscar(struct, comment, selective_dynamics, vasp5_symbols,
velocities=velocities,
predictor_corrector=predictor_corrector,
predictor_corrector_preamble=predictor_corrector_preamble)
def get_string(self, direct=True, vasp4_compatible=False,
significant_figures=6):
"""
Returns a string to be written as a POSCAR file. By default, site
symbols are written, which means compatibility is for vasp >= 5.
Args:
direct (bool): Whether coordinates are output in direct or
cartesian. Defaults to True.
vasp4_compatible (bool): Set to True to omit site symbols on 6th
line to maintain backward vasp 4.x compatibility. Defaults
to False.
significant_figures (int): No. of significant figures to
output all quantities. Defaults to 6. Note that positions are
output in fixed point, while velocities are output in
scientific format.
Returns:
String representation of POSCAR.
"""
# This corrects for VASP really annoying bug of crashing on lattices
# which have triple product < 0. We will just invert the lattice
# vectors.
latt = self.structure.lattice
if np.linalg.det(latt.matrix) < 0:
latt = Lattice(-latt.matrix)
lines = [self.comment, "1.0", str(latt)]
if self.true_names and not vasp4_compatible:
lines.append(" ".join(self.site_symbols))
lines.append(" ".join([str(x) for x in self.natoms]))
if self.selective_dynamics:
lines.append("Selective dynamics")
lines.append("direct" if direct else "cartesian")
format_str = "{{:.{0}f}}".format(significant_figures)
for (i, site) in enumerate(self.structure):
coords = site.frac_coords if direct else site.coords
line = " ".join([format_str.format(c) for c in coords])
if self.selective_dynamics is not None:
sd = ["T" if j else "F" for j in self.selective_dynamics[i]]
line += " %s %s %s" % (sd[0], sd[1], sd[2])
line += " " + site.species_string
lines.append(line)
if self.velocities:
try:
lines.append("")
for v in self.velocities:
lines.append(" ".join([format_str.format(i) for i in v]))
except:
warnings.warn("Velocities are missing or corrupted.")
if self.predictor_corrector:
lines.append("")
if self.predictor_corrector_preamble:
lines.append(self.predictor_corrector_preamble)
pred = np.array(self.predictor_corrector)
for col in range(3):
for z in pred[:,col]:
lines.append(" ".join([format_str.format(i) for i in z]))
else:
warnings.warn(
"Preamble information missing or corrupt. "
"Writing Poscar with no predictor corrector data.")
return "\n".join(lines) + "\n"
def __repr__(self):
return self.get_string()
def __str__(self):
"""
String representation of Poscar file.
"""
return self.get_string()
def write_file(self, filename, **kwargs):
"""
Writes POSCAR to a file. The supported kwargs are the same as those for
the Poscar.get_string method and are passed through directly.
"""
with zopen(filename, "wt") as f:
f.write(self.get_string(**kwargs))
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"true_names": self.true_names,
"selective_dynamics": np.array(
self.selective_dynamics).tolist(),
"velocities": self.velocities,
"predictor_corrector": self.predictor_corrector,
"comment": self.comment}
@classmethod
def from_dict(cls, d):
return Poscar(Structure.from_dict(d["structure"]),
comment=d["comment"],
selective_dynamics=d["selective_dynamics"],
true_names=d["true_names"],
velocities=d.get("velocities", None),
predictor_corrector=d.get("predictor_corrector", None))
def set_temperature(self, temperature):
"""
Initializes the velocities based on Maxwell-Boltzmann distribution.
Removes linear, but not angular drift (same as VASP)
Scales the energies to the exact temperature (microcanonical ensemble)
Velocities are given in A/fs. This is the vasp default when
direct/cartesian is not specified (even when positions are given in
direct coordinates)
Overwrites imported velocities, if any.
Args:
temperature (float): Temperature in Kelvin.
"""
# mean 0 variance 1
velocities = np.random.randn(len(self.structure), 3)
# in AMU, (N,1) array
atomic_masses = np.array([site.specie.atomic_mass.to("kg")
for site in self.structure])
dof = 3 * len(self.structure) - 3
# scale velocities due to atomic masses
# mean 0 std proportional to sqrt(1/m)
velocities /= atomic_masses[:, np.newaxis] ** (1 / 2)
# remove linear drift (net momentum)
velocities -= np.average(atomic_masses[:, np.newaxis] * velocities,
axis=0) / np.average(atomic_masses)
# scale velocities to get correct temperature
energy = np.sum(1 / 2 * atomic_masses *
np.sum(velocities ** 2, axis=1))
scale = (temperature * dof / (2 * energy / const.k)) ** (1 / 2)
velocities *= scale * 1e-5 # these are in A/fs
self.temperature = temperature
try:
del self.structure.site_properties["selective_dynamics"]
except KeyError:
pass
try:
del self.structure.site_properties["predictor_corrector"]
except KeyError:
pass
# returns as a list of lists to be consistent with the other
# initializations
self.structure.add_site_property("velocities", velocities.tolist())
class Incar(dict, MSONable):
"""
INCAR object for reading and writing INCAR files. Essentially consists of
a dictionary with some helper functions
"""
def __init__(self, params=None):
"""
Creates an Incar object.
Args:
params (dict): A set of input parameters as a dictionary.
"""
super(Incar, self).__init__()
if params:
if params.get("MAGMOM") and (params.get("LSORBIT") or
params.get("LNONCOLLINEAR")):
val = []
for i in range(len(params["MAGMOM"])//3):
val.append(params["MAGMOM"][i*3:(i+1)*3])
params["MAGMOM"] = val
self.update(params)
def __setitem__(self, key, val):
"""
Add parameter-val pair to Incar. Warns if parameter is not in list of
valid INCAR tags. Also cleans the parameter and val by stripping
leading and trailing white spaces.
"""
super(Incar, self).__setitem__(
key.strip(), Incar.proc_val(key.strip(), val.strip())
if isinstance(val, six.string_types) else val)
def as_dict(self):
d = dict(self)
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
return Incar({k: v for k, v in d.items() if k not in ("@module",
"@class")})
def get_string(self, sort_keys=False, pretty=False):
"""
Returns a string representation of the INCAR. The reason why this
method is different from the __str__ method is to provide options for
pretty printing.
Args:
sort_keys (bool): Set to True to sort the INCAR parameters
alphabetically. Defaults to False.
pretty (bool): Set to True for pretty aligned output. Defaults
to False.
"""
keys = self.keys()
if sort_keys:
keys = sorted(keys)
lines = []
for k in keys:
if k == "MAGMOM" and isinstance(self[k], list):
value = []
if (isinstance(self[k][0], list) or isinstance(self[k][0], Magmom)) and \
(self.get("LSORBIT") or self.get("LNONCOLLINEAR")):
value.append(" ".join(str(i) for j in self[k] for i in j))
elif self.get("LSORBIT") or self.get("LNONCOLLINEAR"):
for m, g in itertools.groupby(self[k]):
value.append("3*{}*{}".format(len(tuple(g)), m))
else:
# float() to ensure backwards compatibility between
# float magmoms and Magmom objects
for m, g in itertools.groupby(self[k], lambda x: float(x)):
value.append("{}*{}".format(len(tuple(g)), m))
lines.append([k, " ".join(value)])
elif isinstance(self[k], list):
lines.append([k, " ".join([str(i) for i in self[k]])])
else:
lines.append([k, self[k]])
if pretty:
return str(tabulate([[l[0], "=", l[1]] for l in lines],
tablefmt="plain"))
else:
return str_delimited(lines, None, " = ") + "\n"
def __str__(self):
return self.get_string(sort_keys=True, pretty=False)
def write_file(self, filename):
"""
Write Incar to a file.
Args:
filename (str): filename to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
@staticmethod
def from_file(filename):
"""
Reads an Incar object from a file.
Args:
filename (str): Filename for file
Returns:
Incar object
"""
with zopen(filename, "rt") as f:
return Incar.from_string(f.read())
@staticmethod
def from_string(string):
"""
Reads an Incar object from a string.
Args:
string (str): Incar string
Returns:
Incar object
"""
lines = list(clean_lines(string.splitlines()))
params = {}
for line in lines:
m = re.match(r'(\w+)\s*=\s*(.*)', line)
if m:
key = m.group(1).strip()
val = m.group(2).strip()
val = Incar.proc_val(key, val)
params[key] = val
return Incar(params)
@staticmethod
def proc_val(key, val):
"""
Static helper method to convert INCAR parameters to proper types, e.g.,
integers, floats, lists, etc.
Args:
key: INCAR parameter key
val: Actual value of INCAR parameter.
"""
list_keys = ("LDAUU", "LDAUL", "LDAUJ", "MAGMOM", "DIPOL", "LANGEVIN_GAMMA",
"QUAD_EFG")
bool_keys = ("LDAU", "LWAVE", "LSCALU", "LCHARG", "LPLANE",
"LHFCALC", "ADDGRID", "LSORBIT", "LNONCOLLINEAR")
float_keys = ("EDIFF", "SIGMA", "TIME", "ENCUTFOCK", "HFSCREEN",
"POTIM", "EDIFFG")
int_keys = ("NSW", "NBANDS", "NELMIN", "ISIF", "IBRION", "ISPIN",
"ICHARG", "NELM", "ISMEAR", "NPAR", "LDAUPRINT", "LMAXMIX",
"ENCUT", "NSIM", "NKRED", "NUPDOWN", "ISPIND", "LDAUTYPE")
def smart_int_or_float(numstr):
if numstr.find(".") != -1 or numstr.lower().find("e") != -1:
return float(numstr)
else:
return int(numstr)
try:
if key in list_keys:
output = []
toks = re.findall(
r"(-?\d+\.?\d*)\*?(-?\d+\.?\d*)?\*?(-?\d+\.?\d*)?", val)
for tok in toks:
if tok[2] and "3" in tok[0]:
output.extend(
[smart_int_or_float(tok[2])] * int(tok[0])
* int(tok[1]))
elif tok[1]:
output.extend([smart_int_or_float(tok[1])] *
int(tok[0]))
else:
output.append(smart_int_or_float(tok[0]))
return output
if key in bool_keys:
m = re.match(r"^\.?([T|F|t|f])[A-Za-z]*\.?", val)
if m:
if m.group(1) == "T" or m.group(1) == "t":
return True
else:
return False
raise ValueError(key + " should be a boolean type!")
if key in float_keys:
return float(re.search(r"^-?\d*\.?\d*[e|E]?-?\d*", val).group(0))
if key in int_keys:
return int(re.match(r"^-?[0-9]+", val).group(0))
except ValueError:
pass
# Not in standard keys. We will try a hierarchy of conversions.
try:
val = int(val)
return val
except ValueError:
pass
try:
val = float(val)
return val
except ValueError:
pass
if "true" in val.lower():
return True
if "false" in val.lower():
return False
try:
if key not in ("TITEL", "SYSTEM"):
return re.search(r"^-?[0-9]+", val.capitalize()).group(0)
else:
return val.capitalize()
except:
return val.capitalize()
def diff(self, other):
"""
Diff function for Incar. Compares two Incars and indicates which
parameters are the same and which are not. Useful for checking whether
two runs were done using the same parameters.
Args:
other (Incar): The other Incar object to compare to.
Returns:
Dict of the following format:
{"Same" : parameters_that_are_the_same,
"Different": parameters_that_are_different}
Note that the parameters are return as full dictionaries of values.
E.g. {"ISIF":3}
"""
similar_param = {}
different_param = {}
for k1, v1 in self.items():
if k1 not in other:
different_param[k1] = {"INCAR1": v1, "INCAR2": None}
elif v1 != other[k1]:
different_param[k1] = {"INCAR1": v1, "INCAR2": other[k1]}
else:
similar_param[k1] = v1
for k2, v2 in other.items():
if k2 not in similar_param and k2 not in different_param:
if k2 not in self:
different_param[k2] = {"INCAR1": None, "INCAR2": v2}
return {"Same": similar_param, "Different": different_param}
def __add__(self, other):
"""
Add all the values of another INCAR object to this object.
Facilitates the use of "standard" INCARs.
"""
params = {k: v for k, v in self.items()}
for k, v in other.items():
if k in self and v != self[k]:
raise ValueError("Incars have conflicting values!")
else:
params[k] = v
return Incar(params)
class Kpoints_supported_modes(Enum):
Automatic = 0
Gamma = 1
Monkhorst = 2
Line_mode = 3
Cartesian = 4
Reciprocal = 5
def __str__(self):
return self.name
@staticmethod
def from_string(s):
c = s.lower()[0]
for m in Kpoints_supported_modes:
if m.name.lower()[0] == c:
return m
raise ValueError("Can't interprete Kpoint mode %s" % s)
class Kpoints(MSONable):
"""
KPOINT reader/writer.
"""
supported_modes = Kpoints_supported_modes
def __init__(self, comment="Default gamma", num_kpts=0,
style=supported_modes.Gamma,
kpts=((1, 1, 1),), kpts_shift=(0, 0, 0),
kpts_weights=None, coord_type=None, labels=None,
tet_number=0, tet_weight=0, tet_connections=None):
"""
Highly flexible constructor for Kpoints object. The flexibility comes
at the cost of usability and in general, it is recommended that you use
the default constructor only if you know exactly what you are doing and
requires the flexibility. For most usage cases, the three automatic
schemes can be constructed far more easily using the convenience static
constructors (automatic, gamma_automatic, monkhorst_automatic) and it
is recommended that you use those.
Args:
comment (str): String comment for Kpoints
num_kpts: Following VASP method of defining the KPOINTS file, this
parameter is the number of kpoints specified. If set to 0
(or negative), VASP automatically generates the KPOINTS.
style: Style for generating KPOINTS. Use one of the
Kpoints.supported_modes enum types.
kpts (2D array): 2D array of kpoints. Even when only a single
specification is required, e.g. in the automatic scheme,
the kpts should still be specified as a 2D array. e.g.,
[[20]] or [[2,2,2]].
kpts_shift (3x1 array): Shift for Kpoints.
kpts_weights: Optional weights for kpoints. Weights should be
integers. For explicit kpoints.
coord_type: In line-mode, this variable specifies whether the
Kpoints were given in Cartesian or Reciprocal coordinates.
labels: In line-mode, this should provide a list of labels for
each kpt. It is optional in explicit kpoint mode as comments for
k-points.
tet_number: For explicit kpoints, specifies the number of
tetrahedrons for the tetrahedron method.
tet_weight: For explicit kpoints, specifies the weight for each
tetrahedron for the tetrahedron method.
tet_connections: For explicit kpoints, specifies the connections
of the tetrahedrons for the tetrahedron method.
Format is a list of tuples, [ (sym_weight, [tet_vertices]),
...]
The default behavior of the constructor is for a Gamma centered,
1x1x1 KPOINTS with no shift.
"""
if num_kpts > 0 and (not labels) and (not kpts_weights):
raise ValueError("For explicit or line-mode kpoints, either the "
"labels or kpts_weights must be specified.")
self.comment = comment
self.num_kpts = num_kpts
self.kpts = kpts
self.style = style
self.coord_type = coord_type
self.kpts_weights = kpts_weights
self.kpts_shift = kpts_shift
self.labels = labels
self.tet_number = tet_number
self.tet_weight = tet_weight
self.tet_connections = tet_connections
@property
def style(self):
return self._style
@style.setter
def style(self, style):
if isinstance(style, six.string_types):
style = Kpoints.supported_modes.from_string(style)
if style in (Kpoints.supported_modes.Automatic,
Kpoints.supported_modes.Gamma,
Kpoints.supported_modes.Monkhorst) and len(self.kpts) > 1:
raise ValueError("For fully automatic or automatic gamma or monk "
"kpoints, only a single line for the number of "
"divisions is allowed.")
self._style = style
@staticmethod
def automatic(subdivisions):
"""
Convenient static constructor for a fully automatic Kpoint grid, with
gamma centered Monkhorst-Pack grids and the number of subdivisions
along each reciprocal lattice vector determined by the scheme in the
VASP manual.
Args:
subdivisions: Parameter determining number of subdivisions along
each reciprocal lattice vector.
Returns:
Kpoints object
"""
return Kpoints("Fully automatic kpoint scheme", 0,
style=Kpoints.supported_modes.Automatic,
kpts=[[subdivisions]])
@staticmethod
def gamma_automatic(kpts=(1, 1, 1), shift=(0, 0, 0)):
"""
Convenient static constructor for an automatic Gamma centered Kpoint
grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice
vectors. Defaults to (1,1,1)
shift: Shift to be applied to the kpoints. Defaults to (0,0,0).
Returns:
Kpoints object
"""
return Kpoints("Automatic kpoint scheme", 0,
Kpoints.supported_modes.Gamma, kpts=[kpts],
kpts_shift=shift)
@staticmethod
def monkhorst_automatic(kpts=(2, 2, 2), shift=(0, 0, 0)):
"""
Convenient static constructor for an automatic Monkhorst pack Kpoint
grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice
vectors. Defaults to (2,2,2)
shift: Shift to be applied to the kpoints. Defaults to (0,0,0).
Returns:
Kpoints object
"""
return Kpoints("Automatic kpoint scheme", 0,
Kpoints.supported_modes.Monkhorst, kpts=[kpts],
kpts_shift=shift)
@staticmethod
def automatic_density(structure, kppa, force_gamma=False):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density. Uses Gamma centered meshes for hexagonal cells and
Monkhorst-Pack grids otherwise.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
Args:
structure (Structure): Input structure
kppa (int): Grid density
force_gamma (bool): Force a gamma centered mesh (default is to
use gamma only for hexagonal cells or odd meshes)
Returns:
Kpoints
"""
comment = "pymatgen 4.7.6+ generated KPOINTS with grid density = " + \
"%.0f / atom" % kppa
latt = structure.lattice
lengths = latt.abc
ngrid = kppa / structure.num_sites
mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3)
num_div = [int(math.floor(max(mult / l, 1))) for l in lengths]
is_hexagonal = latt.is_hexagonal()
has_odd = any([i % 2 == 1 for i in num_div])
if has_odd or is_hexagonal or force_gamma:
style = Kpoints.supported_modes.Gamma
else:
style = Kpoints.supported_modes.Monkhorst
return Kpoints(comment, 0, style, [num_div], [0, 0, 0])
@staticmethod
def automatic_gamma_density(structure, kppa):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density. Uses Gamma centered meshes always. For GW.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
Args:
structure:
Input structure
kppa:
Grid density
"""
latt = structure.lattice
lengths = latt.abc
ngrid = kppa / structure.num_sites
mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3)
num_div = [int(round(mult / l)) for l in lengths]
# ensure that numDiv[i] > 0
num_div = [i if i > 0 else 1 for i in num_div]
# VASP documentation recommends to use even grids for n <= 8 and odd
# grids for n > 8.
num_div = [i + i % 2 if i <= 8 else i - i % 2 + 1 for i in num_div]
style = Kpoints.supported_modes.Gamma
comment = "pymatgen 4.7.6+ generated KPOINTS with grid density = " + \
"{} / atom".format(kppa)
num_kpts = 0
return Kpoints(comment, num_kpts, style, [num_div], [0, 0, 0])
@staticmethod
def automatic_density_by_vol(structure, kppvol, force_gamma=False):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density per inverse Angstrom of reciprocal cell.
Algorithm:
Same as automatic_density()
Args:
structure (Structure): Input structure
kppvol (int): Grid density per Angstrom^(-3) of reciprocal cell
force_gamma (bool): Force a gamma centered mesh
Returns:
Kpoints
"""
vol = structure.lattice.reciprocal_lattice.volume
kppa = kppvol * vol * structure.num_sites
return Kpoints.automatic_density(structure, kppa,
force_gamma=force_gamma)
@staticmethod
def automatic_linemode(divisions, ibz):
"""
Convenient static constructor for a KPOINTS in mode line_mode.
gamma centered Monkhorst-Pack grids and the number of subdivisions
along each reciprocal lattice vector determined by the scheme in the
VASP manual.
Args:
divisions: Parameter determining the number of k-points along each
hight symetry lines.
ibz: HighSymmKpath object (pymatgen.symmetry.bandstructure)
Returns:
Kpoints object
"""
kpoints = list()
labels = list()
for path in ibz.kpath["path"]:
kpoints.append(ibz.kpath["kpoints"][path[0]])
labels.append(path[0])
for i in range(1, len(path) - 1):
kpoints.append(ibz.kpath["kpoints"][path[i]])
labels.append(path[i])
kpoints.append(ibz.kpath["kpoints"][path[i]])
labels.append(path[i])
kpoints.append(ibz.kpath["kpoints"][path[-1]])
labels.append(path[-1])
return Kpoints("Line_mode KPOINTS file",
style=Kpoints.supported_modes.Line_mode,
coord_type="Reciprocal",
kpts=kpoints,
labels=labels,
num_kpts=int(divisions))
@staticmethod
def from_file(filename):
"""
Reads a Kpoints object from a KPOINTS file.
Args:
filename (str): filename to read from.
Returns:
Kpoints object
"""
with zopen(filename, "rt") as f:
return Kpoints.from_string(f.read())
@staticmethod
def from_string(string):
"""
Reads a Kpoints object from a KPOINTS string.
Args:
string (str): KPOINTS string.
Returns:
Kpoints object
"""
lines = [line.strip() for line in string.splitlines()]
comment = lines[0]
num_kpts = int(lines[1].split()[0].strip())
style = lines[2].lower()[0]
# Fully automatic KPOINTS
if style == "a":
return Kpoints.automatic(int(lines[3]))
coord_pattern = re.compile(r'^\s*([\d+.\-Ee]+)\s+([\d+.\-Ee]+)\s+'
r'([\d+.\-Ee]+)')
# Automatic gamma and Monk KPOINTS, with optional shift
if style == "g" or style == "m":
kpts = [int(i) for i in lines[3].split()]
kpts_shift = (0, 0, 0)
if len(lines) > 4 and coord_pattern.match(lines[4]):
try:
kpts_shift = [int(i) for i in lines[4].split()]
except ValueError:
pass
return Kpoints.gamma_automatic(kpts, kpts_shift) if style == "g" \
else Kpoints.monkhorst_automatic(kpts, kpts_shift)
# Automatic kpoints with basis
if num_kpts <= 0:
style = Kpoints.supported_modes.Cartesian if style in "ck" \
else Kpoints.supported_modes.Reciprocal
kpts = [[float(j) for j in lines[i].split()] for i in range(3, 6)]
kpts_shift = [float(i) for i in lines[6].split()]
return Kpoints(comment=comment, num_kpts=num_kpts, style=style,
kpts=kpts, kpts_shift=kpts_shift)
# Line-mode KPOINTS, usually used with band structures
if style == "l":
coord_type = "Cartesian" if lines[3].lower()[0] in "ck" \
else "Reciprocal"
style = Kpoints.supported_modes.Line_mode
kpts = []
labels = []
patt = re.compile(r'([e0-9.\-]+)\s+([e0-9.\-]+)\s+([e0-9.\-]+)'
r'\s*!*\s*(.*)')
for i in range(4, len(lines)):
line = lines[i]
m = patt.match(line)
if m:
kpts.append([float(m.group(1)), float(m.group(2)),
float(m.group(3))])
labels.append(m.group(4).strip())
return Kpoints(comment=comment, num_kpts=num_kpts, style=style,
kpts=kpts, coord_type=coord_type, labels=labels)
# Assume explicit KPOINTS if all else fails.
style = Kpoints.supported_modes.Cartesian if style in "ck" \
else Kpoints.supported_modes.Reciprocal
kpts = []
kpts_weights = []
labels = []
tet_number = 0
tet_weight = 0
tet_connections = None
for i in range(3, 3 + num_kpts):
toks = lines[i].split()
kpts.append([float(j) for j in toks[0:3]])
kpts_weights.append(float(toks[3]))
if len(toks) > 4:
labels.append(toks[4])
else:
labels.append(None)
try:
# Deal with tetrahedron method
if lines[3 + num_kpts].strip().lower()[0] == "t":
toks = lines[4 + num_kpts].split()
tet_number = int(toks[0])
tet_weight = float(toks[1])
tet_connections = []
for i in range(5 + num_kpts, 5 + num_kpts + tet_number):
toks = lines[i].split()
tet_connections.append((int(toks[0]),
[int(toks[j])
for j in range(1, 5)]))
except IndexError:
pass
return Kpoints(comment=comment, num_kpts=num_kpts,
style=Kpoints.supported_modes[str(style)],
kpts=kpts, kpts_weights=kpts_weights,
tet_number=tet_number, tet_weight=tet_weight,
tet_connections=tet_connections, labels=labels)
def write_file(self, filename):
"""
Write Kpoints to a file.
Args:
filename (str): Filename to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
def __repr__(self):
return self.__str__()
def __str__(self):
lines = [self.comment, str(self.num_kpts), self.style.name]
style = self.style.name.lower()[0]
if style == "l":
lines.append(self.coord_type)
for i in range(len(self.kpts)):
lines.append(" ".join([str(x) for x in self.kpts[i]]))
if style == "l":
lines[-1] += " ! " + self.labels[i]
if i % 2 == 1:
lines[-1] += "\n"
elif self.num_kpts > 0:
if self.labels is not None:
lines[-1] += " %i %s" % (self.kpts_weights[i],
self.labels[i])
else:
lines[-1] += " %i" % (self.kpts_weights[i])
# Print tetrahedron parameters if the number of tetrahedrons > 0
if style not in "lagm" and self.tet_number > 0:
lines.append("Tetrahedron")
lines.append("%d %f" % (self.tet_number, self.tet_weight))
for sym_weight, vertices in self.tet_connections:
lines.append("%d %d %d %d %d" % (sym_weight, vertices[0],
vertices[1], vertices[2],
vertices[3]))
# Print shifts for automatic kpoints types if not zero.
if self.num_kpts <= 0 and tuple(self.kpts_shift) != (0, 0, 0):
lines.append(" ".join([str(x) for x in self.kpts_shift]))
return "\n".join(lines) + "\n"
def as_dict(self):
"""json friendly dict representation of Kpoints"""
d = {"comment": self.comment, "nkpoints": self.num_kpts,
"generation_style": self.style.name, "kpoints": self.kpts,
"usershift": self.kpts_shift,
"kpts_weights": self.kpts_weights, "coord_type": self.coord_type,
"labels": self.labels, "tet_number": self.tet_number,
"tet_weight": self.tet_weight,
"tet_connections": self.tet_connections}
optional_paras = ["genvec1", "genvec2", "genvec3", "shift"]
for para in optional_paras:
if para in self.__dict__:
d[para] = self.__dict__[para]
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
comment = d.get("comment", "")
generation_style = d.get("generation_style")
kpts = d.get("kpoints", [[1, 1, 1]])
kpts_shift = d.get("usershift", [0, 0, 0])
num_kpts = d.get("nkpoints", 0)
return cls(comment=comment, kpts=kpts, style=generation_style,
kpts_shift=kpts_shift, num_kpts=num_kpts,
kpts_weights=d.get("kpts_weights"),
coord_type=d.get("coord_type"),
labels=d.get("labels"), tet_number=d.get("tet_number", 0),
tet_weight=d.get("tet_weight", 0),
tet_connections=d.get("tet_connections"))
def parse_string(s):
return "{}".format(s.strip())
def parse_bool(s):
m = re.match(r"^\.?([TFtf])[A-Za-z]*\.?", s)
if m:
if m.group(1) == "T" or m.group(1) == "t":
return True
else:
return False
raise ValueError(s + " should be a boolean type!")
def parse_float(s):
return float(re.search(r"^-?\d*\.?\d*[eE]?-?\d*", s).group(0))
def parse_int(s):
return int(re.match(r"^-?[0-9]+", s).group(0))
def parse_list(s):
return [float(y) for y in re.split(r"\s+", s.strip()) if not y.isalpha()]
@cached_class
class PotcarSingle(object):
"""
Object for a **single** POTCAR. The builder assumes the complete string is
the POTCAR contains the complete untouched data in "data" as a string and
a dict of keywords.
Args:
data:
Complete and single potcar file as a string.
.. attribute:: data
POTCAR data as a string.
.. attribute:: keywords
Keywords parsed from the POTCAR as a dict. All keywords are also
accessible as attributes in themselves. E.g., potcar.enmax,
potcar.encut, etc.
"""
functional_dir = {"PBE": "POT_GGA_PAW_PBE",
"PBE_52": "POT_GGA_PAW_PBE_52",
"PBE_54": "POT_GGA_PAW_PBE_54",
"LDA": "POT_LDA_PAW",
"LDA_52": "POT_LDA_PAW_52",
"LDA_54": "POT_LDA_PAW_54",
"PW91": "POT_GGA_PAW_PW91",
"LDA_US": "POT_LDA_US",
"PW91_US": "POT_GGA_US_PW91"}
functional_tags = {"pe": {"name": "PBE", "class": "GGA"},
"91": {"name": "PW91", "class": "GGA"},
"rp": {"name": "revPBE", "class": "GGA"},
"am": {"name": "AM05", "class": "GGA"},
"ps": {"name": "PBEsol", "class": "GGA"},
"pw": {"name": "PW86", "class": "GGA"},
"lm": {"name": "Langreth-Mehl-Hu", "class": "GGA"},
"pb": {"name": "Perdew-Becke", "class": "GGA"},
"ca": {"name": "Perdew-Zunger81", "class": "LDA"},
"hl": {"name": "Hedin-Lundquist", "class": "LDA"},
"wi": {"name": "Wigner Interpoloation", "class": "LDA"}}
parse_functions = {"LULTRA": parse_bool,
"LCOR": parse_bool,
"LPAW": parse_bool,
"EATOM": parse_float,
"RPACOR": parse_float,
"POMASS": parse_float,
"ZVAL": parse_float,
"RCORE": parse_float,
"RWIGS": parse_float,
"ENMAX": parse_float,
"ENMIN": parse_float,
"EAUG": parse_float,
"DEXC": parse_float,
"RMAX": parse_float,
"RAUG": parse_float,
"RDEP": parse_float,
"RDEPT": parse_float,
"QCUT": parse_float,
"QGAM": parse_float,
"RCLOC": parse_float,
"IUNSCR": parse_int,
"ICORE": parse_int,
"NDATA": parse_int,
"VRHFIN": parse_string,
"LEXCH": parse_string,
"TITEL": parse_string,
"STEP": parse_list,
"RRKJ": parse_list,
"GGA": parse_list}
Orbital = namedtuple('Orbital', ['n', 'l', 'j', 'E', 'occ'])
Description = namedtuple('OrbitalDescription', ['l', 'E',
'Type', "Rcut",
"Type2", "Rcut2"])
def __init__(self, data):
self.data = data # raw POTCAR as a string
# Vasp parses header in vasprun.xml and this differs from the titel
self.header = data.split("\n")[0].strip()
search_lines = re.search(r"(?s)(parameters from PSCTR are:"
r".*?END of PSCTR-controll parameters)",
data).group(1)
self.keywords = {}
for key, val in re.findall(r"(\S+)\s*=\s*(.*?)(?=;|$)",
search_lines, flags=re.MULTILINE):
self.keywords[key] = self.parse_functions[key](val)
PSCTR = OrderedDict()
array_search = re.compile(r"(-*[0-9.]+)")
orbitals = []
descriptions = []
atomic_configuration = re.search(r"Atomic configuration\s*\n?"
r"(.*?)Description", search_lines)
if atomic_configuration:
lines = atomic_configuration.group(1).splitlines()
num_entries = re.search(r"([0-9]+)", lines[0]).group(1)
num_entries = int(num_entries)
PSCTR['nentries'] = num_entries
for line in lines[1:]:
orbit = array_search.findall(line)
if orbit:
orbitals.append(self.Orbital(int(orbit[0]),
int(orbit[1]),
float(orbit[2]),
float(orbit[3]),
float(orbit[4])))
PSCTR['Orbitals'] = tuple(orbitals)
description_string = re.search(r"(?s)Description\s*\n"
r"(.*?)Error from kinetic"
r" energy argument \(eV\)",
search_lines)
for line in description_string.group(1).splitlines():
description = array_search.findall(line)
if description:
descriptions.append(self.Description(int(description[0]),
float(description[1]),
int(description[2]),
float(description[3]),
int(description[4]) if
len(description) > 4
else None,
float(description[5]) if
len(description) > 4
else None))
if descriptions:
PSCTR['OrbitalDescriptions'] = tuple(descriptions)
rrkj_kinetic_energy_string = re.search(
r"(?s)Error from kinetic energy argument \(eV\)\s*\n"
r"(.*?)END of PSCTR-controll parameters",
search_lines)
rrkj_array = []
for line in rrkj_kinetic_energy_string.group(1).splitlines():
if "=" not in line:
rrkj_array += parse_list(line.strip('\n'))
if rrkj_array:
PSCTR['RRKJ'] = tuple(rrkj_array)
PSCTR.update(self.keywords)
self.PSCTR = OrderedDict(sorted(PSCTR.items(), key=lambda x: x[0]))
self.hash = self.get_potcar_hash()
def __str__(self):
return self.data + "\n"
@property
def electron_configuration(self):
el = Element.from_Z(self.atomic_no)
full_config = el.full_electronic_structure
nelect = self.nelectrons
config = []
while nelect > 0:
e = full_config.pop(-1)
config.append(e)
nelect -= e[-1]
return config
def write_file(self, filename):
with zopen(filename, "wt") as f:
f.write(self.__str__())
@staticmethod
def from_file(filename):
with zopen(filename, "rt") as f:
return PotcarSingle(f.read())
@staticmethod
def from_symbol_and_functional(symbol, functional=None):
if functional is None:
functional = SETTINGS.get("PMG_DEFAULT_FUNCTIONAL", "PBE")
funcdir = PotcarSingle.functional_dir[functional]
d = SETTINGS.get("PMG_VASP_PSP_DIR")
if d is None:
raise ValueError(
"No POTCAR for %s with functional %s found. "
"Please set the PMG_VASP_PSP_DIR environment in "
".pmgrc.yaml, or you may need to set "
"PMG_DEFAULT_FUNCTIONAL to PBE_52 or PBE_54 if you "
"are using newer psps from VASP." % (symbol, functional))
paths_to_try = [os.path.join(d, funcdir, "POTCAR.{}".format(symbol)),
os.path.join(d, funcdir, symbol, "POTCAR")]
for p in paths_to_try:
p = os.path.expanduser(p)
p = zpath(p)
if os.path.exists(p):
return PotcarSingle.from_file(p)
raise IOError("You do not have the right POTCAR with functional " +
"{} and label {} in your VASP_PSP_DIR".format(functional,
symbol))
@property
def symbol(self):
"""
Symbol of POTCAR, e.g., Fe_pv
"""
return self.keywords["TITEL"].split(" ")[1].strip()
@property
def element(self):
"""
Attempt to return the atomic symbol based on the VRHFIN keyword.
"""
element = self.keywords["VRHFIN"].split(":")[0].strip()
#VASP incorrectly gives the element symbol for Xe as "X"
return "Xe" if element == "X" else element
@property
def atomic_no(self):
"""
Attempt to return the atomic number based on the VRHFIN keyword.
"""
return Element(self.element).Z
@property
def nelectrons(self):
return self.zval
@property
def potential_type(self):
if self.lultra:
return "US"
elif self.lpaw:
return "PAW"
else:
return "NC"
@property
def functional(self):
return self.functional_tags.get(self.LEXCH.lower(), {}).get('name')
@property
def functional_class(self):
return self.functional_tags.get(self.LEXCH.lower(), {}).get('class')
def get_potcar_hash(self):
hash_str = ""
for k, v in self.PSCTR.items():
hash_str += "{}".format(k)
if isinstance(v, int):
hash_str += "{}".format(v)
elif isinstance(v, float):
hash_str += "{:.3f}".format(v)
elif isinstance(v, bool):
hash_str += "{}".format(bool)
elif isinstance(v, (tuple, list)):
for item in v:
if isinstance(item, float):
hash_str += "{:.3f}".format(item)
elif isinstance(item, (self.Orbital, self.Description)):
for item_v in item:
if isinstance(item_v, (int, str)):
hash_str += "{}".format(item_v)
elif isinstance(item_v, float):
hash_str += "{:.3f}".format(item_v)
else:
hash_str += "{}".format(item_v) if item_v else ""
else:
hash_str += v.replace(" ", "")
self.hash_str = hash_str
return md5(hash_str.lower().encode('utf-8')).hexdigest()
def __getattr__(self, a):
"""
Delegates attributes to keywords. For example, you can use
potcarsingle.enmax to get the ENMAX of the POTCAR.
For float type properties, they are converted to the correct float. By
default, all energies in eV and all length scales are in Angstroms.
"""
try:
return self.keywords[a.upper()]
except:
raise AttributeError(a)
class Potcar(list, MSONable):
"""
Object for reading and writing POTCAR files for calculations. Consists of a
list of PotcarSingle.
Args:
symbols ([str]): Element symbols for POTCAR. This should correspond
to the symbols used by VASP. E.g., "Mg", "Fe_pv", etc.
functional (str): Functional used. To know what functional options
there are, use Potcar.FUNCTIONAL_CHOICES. Note that VASP has
different versions of the same functional. By default, the old
PBE functional is used. If you want the newer ones, use PBE_52 or
PBE_54. Note that if you intend to compare your results with the
Materials Project, you should use the default setting. You can also
override the default by setting PMG_DEFAULT_FUNCTIONAL in your
.pmgrc.yaml.
sym_potcar_map (dict): Allows a user to specify a specific element
symbol to raw POTCAR mapping.
"""
FUNCTIONAL_CHOICES = list(PotcarSingle.functional_dir.keys())
def __init__(self, symbols=None, functional=None, sym_potcar_map=None):
if functional is None:
functional = SETTINGS.get("PMG_DEFAULT_FUNCTIONAL", "PBE")
super(Potcar, self).__init__()
self.functional = functional
if symbols is not None:
self.set_symbols(symbols, functional, sym_potcar_map)
def as_dict(self):
return {"functional": self.functional, "symbols": self.symbols,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return Potcar(symbols=d["symbols"], functional=d["functional"])
@staticmethod
def from_file(filename):
with zopen(filename, "rt") as reader:
fdata = reader.read()
potcar = Potcar()
potcar_strings = re.compile(r"\n?(\s*.*?End of Dataset)",
re.S).findall(fdata)
functionals = []
for p in potcar_strings:
single = PotcarSingle(p)
potcar.append(single)
functionals.append(single.functional)
if len(set(functionals)) != 1:
raise ValueError("File contains incompatible functionals!")
else:
potcar.functional = functionals[0]
return potcar
def __str__(self):
return "\n".join([str(potcar).strip("\n") for potcar in self]) + "\n"
def write_file(self, filename):
"""
Write Potcar to a file.
Args:
filename (str): filename to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
@property
def symbols(self):
"""
Get the atomic symbols of all the atoms in the POTCAR file.
"""
return [p.symbol for p in self]
@symbols.setter
def symbols(self, symbols):
self.set_symbols(symbols, functional=self.functional)
@property
def spec(self):
"""
Get the atomic symbols and hash of all the atoms in the POTCAR file.
"""
return [{"symbol": p.symbol, "hash": p.get_potcar_hash()} for p in self]
def set_symbols(self, symbols, functional=None,
sym_potcar_map=None):
"""
Initialize the POTCAR from a set of symbols. Currently, the POTCARs can
be fetched from a location specified in .pmgrc.yaml. Use pmg config
to add this setting.
Args:
symbols ([str]): A list of element symbols
functional (str): The functional to use. If None, the setting
PMG_DEFAULT_FUNCTIONAL in .pmgrc.yaml is used, or if this is
not set, it will default to PBE.
sym_potcar_map (dict): A map of symbol:raw POTCAR string. If
sym_potcar_map is specified, POTCARs will be generated from
the given map data rather than the config file location.
"""
del self[:]
if sym_potcar_map:
for el in symbols:
self.append(PotcarSingle(sym_potcar_map[el]))
else:
for el in symbols:
p = PotcarSingle.from_symbol_and_functional(el, functional)
self.append(p)
class VaspInput(dict, MSONable):
"""
Class to contain a set of vasp input objects corresponding to a run.
Args:
incar: Incar object.
kpoints: Kpoints object.
poscar: Poscar object.
potcar: Potcar object.
optional_files: Other input files supplied as a dict of {
filename: object}. The object should follow standard pymatgen
conventions in implementing a as_dict() and from_dict method.
"""
def __init__(self, incar, kpoints, poscar, potcar, optional_files=None,
**kwargs):
super(VaspInput, self).__init__(**kwargs)
self.update({'INCAR': incar,
'KPOINTS': kpoints,
'POSCAR': poscar,
'POTCAR': potcar})
if optional_files is not None:
self.update(optional_files)
def __str__(self):
output = []
for k, v in self.items():
output.append(k)
output.append(str(v))
output.append("")
return "\n".join(output)
def as_dict(self):
d = {k: v.as_dict() for k, v in self.items()}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
sub_d = {"optional_files": {}}
for k, v in d.items():
if k in ["INCAR", "POSCAR", "POTCAR", "KPOINTS"]:
sub_d[k.lower()] = dec.process_decoded(v)
elif k not in ["@module", "@class"]:
sub_d["optional_files"][k] = dec.process_decoded(v)
return cls(**sub_d)
def write_input(self, output_dir=".", make_dir_if_not_present=True):
"""
Write VASP input to a directory.
Args:
output_dir (str): Directory to write to. Defaults to current
directory (".").
make_dir_if_not_present (bool): Create the directory if not
present. Defaults to True.
"""
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
for k, v in self.items():
with zopen(os.path.join(output_dir, k), "wt") as f:
f.write(v.__str__())
@staticmethod
def from_directory(input_dir, optional_files=None):
"""
Read in a set of VASP input from a directory. Note that only the
standard INCAR, POSCAR, POTCAR and KPOINTS files are read unless
optional_filenames is specified.
Args:
input_dir (str): Directory to read VASP input from.
optional_files (dict): Optional files to read in as well as a
dict of {filename: Object type}. Object type must have a
static method from_file.
"""
sub_d = {}
for fname, ftype in [("INCAR", Incar), ("KPOINTS", Kpoints),
("POSCAR", Poscar), ("POTCAR", Potcar)]:
fullzpath = zpath(os.path.join(input_dir, fname))
sub_d[fname.lower()] = ftype.from_file(fullzpath)
sub_d["optional_files"] = {}
if optional_files is not None:
for fname, ftype in optional_files.items():
sub_d["optional_files"][fname] = \
ftype.from_file(os.path.join(input_dir, fname))
return VaspInput(**sub_d)
|
tallakahath/pymatgen
|
pymatgen/io/vasp/inputs.py
|
Python
|
mit
| 72,033
|
[
"VASP",
"pymatgen"
] |
e50d5f7952dc74ea116f170d4e8d1ed309c5faba2deb340725ae459591e6c026
|
__author__ = 'stephen'
# ===============================================================================
# GLOBAL IMPORTS:
import os,sys
import numpy as np
import argparse
# ===============================================================================
# LOCAL IMPORTS:
#HK_DataMiner_Path = os.path.relpath(os.pardir)
#HK_DataMiner_Path = os.path.abspath("/home/stephen/Dropbox/projects/work-2016.8/hk_dataminer/")
HK_DataMiner_Path = os.path.relpath(os.pardir)
print(HK_DataMiner_Path)
sys.path.append(HK_DataMiner_Path)
from cluster import KCenters
#from lumping import PCCA, PCCA_Standard, SpectralClustering, Ward
from utils import XTCReader, plot_cluster, utils, split_assignments
# ===============================================================================
cli = argparse.ArgumentParser()
cli.add_argument('-t', '--trajListFns', default = 'trajlist',
help='List of trajectory files to read in, separated by spaces.')
cli.add_argument('-a', '--atomListFns', default='atom_indices',
help='List of atom index files to read in, separated by spaces.')
cli.add_argument('-g', '--topology', default='native.pdb', help='topology file.')
cli.add_argument('-o', '--homedir', help='Home dir.', default=".", type=str)
cli.add_argument('-e', '--iext', help='''The file extension of input trajectory
files. Must be a filetype that mdtraj.load() can recognize.''',
default="xtc", type=str)
cli.add_argument('-n', '--n_clusters', help='''n_clusters.''',
default=100, type=int)
cli.add_argument('-m', '--n_macro_states', help='''n_macro_states.''',
default=6, type=int)
cli.add_argument('-s', '--stride', help='stride.',
default=None, type=int)
args = cli.parse_args()
trajlistname = args.trajListFns
atom_indicesname = args.atomListFns
trajext = args.iext
File_TOP = args.topology
homedir = args.homedir
n_clusters = args.n_clusters
n_macro_states = args.n_macro_states
stride = args.stride
# ===========================================================================
# Reading Trajs from XTC files
print("stride:", stride)
trajreader = XTCReader(trajlistname, atom_indicesname, homedir, trajext, File_TOP, nSubSample=stride)
trajs = trajreader.trajs
traj_len = trajreader.traj_len
np.savetxt("./traj_len.txt", traj_len, fmt="%d")
if os.path.isfile("./phi_angles.txt") and os.path.isfile("./psi_angles.txt") is True:
phi_angles = np.loadtxt("./phi_angles.txt", dtype=np.float32)
psi_angles = np.loadtxt("./psi_angles.txt", dtype=np.float32)
phi_psi = np.column_stack((phi_angles, psi_angles))
else:
phi_angles, psi_angles = trajreader.get_phipsi(trajs, psi=[6, 8, 14, 16], phi=[4, 6, 8, 14])
#phi_angles, psi_angles = trajreader.get_phipsi(trajs, psi=[5, 7, 13, 15], phi=[3, 5, 7, 13])
#phi_psi = np.column_stack((phi_angles, psi_angles))
np.savetxt("./phi_angles.txt", phi_angles, fmt="%f")
np.savetxt("./psi_angles.txt", psi_angles, fmt="%f")
#phi_angles, psi_angles = trajreader.get_phipsi(trajs, psi=[6, 8, 14, 16], phi=[4, 6, 8, 14])
#phi_psi=np.column_stack((phi_angles, psi_angles))
# ===========================================================================
# do Clustering using KCenters method
#cluster = KCenters(n_clusters=n_clusters, metric="euclidean", random_state=0)
cluster = KCenters(n_clusters=n_clusters, metric="rmsd", random_state=0)
print(cluster)
#cluster.fit(phi_psi)
cluster.fit(trajs)
labels = cluster.labels_
print(labels)
n_microstates = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_microstates)
cluster_centers_ = cluster.cluster_centers_
# plot micro states
clustering_name = "kcenters_n_" + str(n_microstates)
splited_assignments =split_assignments(labels, traj_len)
#np.savetxt("assignments_"+clustering_name+".txt", labels, fmt="%d")
np.savetxt("assignments_"+clustering_name+".txt", splited_assignments , fmt="%d")
np.savetxt("cluster_centers_"+clustering_name+".txt", cluster_centers_, fmt="%d")
plot_cluster(labels=labels, phi_angles=phi_angles, psi_angles=psi_angles, name=clustering_name)
trajs[cluster_centers_].save("cluster_centers.pdb")
#trajs_sub_atoms[cluster_centers_].save("cluster_centers_sub_atoms.pdb")
|
stephenliu1989/HK_DataMiner
|
hkdataminer/scripts/test_kcenters.py
|
Python
|
apache-2.0
| 4,271
|
[
"MDTraj"
] |
873889e83ab1de4e3a90a8096052da71caa1b14194eb721c51c2d3dcdf720812
|
#!/usr/bin/env python
# Run as
# ./switzerland/client/webgui/WebGUI.py --fake
# Where --fake optionally specifies the bogus demo
# that can run without a network
import web
import time
import math
import sys
import os
import random
import logging
import getopt
import socket as s
import base64
import switzerland.common.Flow
from switzerland.common.Flow import print_flow_tuple
from switzerland.client.AliceConfig import AliceConfig, alice_options
from xml.dom.minidom import parse, parseString, getDOMImplementation
from os import path
wireshark_available = True
try:
from scapy.all import wireshark
except:
try:
from scapy import wireshark
except:
wireshark_available = False
print "WARNING: Wireshark not available through Scapy."
# singleton_webgui is the most important object! All data that persists between
# calls to this web application server is tied to this instance.
singleton_webgui = None
debug_output = False
web_py_server = '0.0.0.0'
web_py_port = '8080'
config_filename = 'config/config.xml'
short_opt_list = 'Fa:w:s:p:i:l:u:L:P:f:b:hqv'
long_opt_list = ['fake', 'webaddr=', 'webport=',
'server=', 'port=', 'interface=', 'ip=', 'help',
'private-ip=', 'public-ip=', 'logfile=', 'pcap-logs=',
'quiet', 'uncertain-time', 'verbose', 'buffer=']
# The line_graph class represents the line graph and all of its data.
class line_graph:
def __init__( self,
canvas_id="cid",
canvas_context="jg",
width=800,
height=400,
graph_xbins=50,
graph_ybins=20):
self.graph_xbins = graph_xbins
self.graph_xbins_actual = graph_xbins
self.graph_ybins = graph_ybins
self.graph_ybins_actual = graph_ybins
self.width = width
self.height = height
# These get set automatically when the data gets processed
self.max_timestamp = None
self.min_timestamp = None
# Get the maximum y value (packets) that we represent in the graph
self.y_hist_max = None
self.x_bin_size = None # in seconds
self.y_bin_size = None # in packets
# how many pixels per bin
self.x_bin_pixels = None
self.y_bin_pixels = None
# blank margin
self.y_margin = 5
self.x_margin = 10
# margin allowance for drawing axes and labels
self.x_axis_margin = 35
self.y_axis_margin = 30
# actual height of graph-- size of image minus the margins
self.graph_height = height - (self.y_axis_margin + 2 * self.y_margin)
self.graph_width = width - (self.x_axis_margin + 2 * self.x_margin)
# JavaScript canvas context name
self.canvas_context = canvas_context
# HTML element ID of canvas element
self.canvas_id = canvas_id
# Cycle through these colors (10 colors) so we have some contrast
# in the lines in the graph
self.draw_colors = ["#ff0000", "#0000ff", "#009933", "#660066",
"#ff6600", "#6699ff", "#ffcc33", "#00cc00", "#cc3300", "#606060"]
# Turn the incoming packet data into a histogram
# The histogram is an array of bins, where each bin contains the total number
# packets for that time segment as well as packet information for each
# dropped, modified, injected packet in that segment
def make_histogram(self, packet_list):
histogram = list()
# Create the empty histogram
for i in range(0,int(self.graph_xbins_actual)):
histogram.append([0,list()])
try:
# This if block will happen if the packet type is
# injected, modified, or dropped
if isinstance(packet_list[0][1], xPacket):
# Handle detailed packet lists
# Count packets into bins
#for packet_ts in [p[0] for p in packet_list]:
for p in packet_list:
packet_ts = p[0]
i = packet_ts - self.min_timestamp
i = int(i/self.x_bin_size)
if i < len(histogram):
# Increase the packet count
histogram[i][0] = histogram[i][0] + 1
# Add the packet detail to the list
histogram[i][1].append(p[1])
else:
if debug_output:
''' This data is preserved for the next reload '''
print "index", i, "out of range"
else:
# Handle total packet (not detailed) list
# This else block will happen if the packet type is just a
# total packet count
packet_list.sort()
updated_packet_list = list()
prev_time = self.min_timestamp
for p in packet_list:
num_packets = int(p[1])
cur_time = p[0]
time_range = cur_time - prev_time
for i in range(num_packets):
new_time = prev_time + i * (time_range/num_packets)
updated_packet_list.append(new_time)
prev_time = cur_time
for packet_ts in updated_packet_list:
i = packet_ts - self.min_timestamp
i = int(i/self.x_bin_size)
if i < len(histogram):
histogram[i][0] = histogram[i][0] + 1
else:
if debug_output:
''' This data is preserved for the next reload '''
print "index", i, "out of range"
except:
# Prints right to console
# TODO: use logging consistent with switzerland-client (?)
print "WARNING: Something is wrong with the incoming packet data."
print "WARNING: Check to make sure that the packet_list is not None."
# Return histogram
return histogram
# Get maximum y value in histograms, so we can continue to adjust vertical
# size of graph
def get_y_hist_max(self, include_total=True):
all_packcount = list()
for ip in singleton_webgui.packet_data.active_flows:
if self.histograms[ip]['total'] != None:
all_packcount.extend([p[0] for p in self.histograms[ip]['total']])
if self.histograms[ip]['modified'] != None:
all_packcount.extend([p[0] for p in self.histograms[ip]['modified']])
if self.histograms[ip]['injected'] != None:
all_packcount.extend([p[0] for p in self.histograms[ip]['injected']])
if self.histograms[ip]['dropped'] != None:
all_packcount.extend([p[0] for p in self.histograms[ip]['dropped']])
if len(all_packcount) > 0:
self.y_hist_max = max(all_packcount)
else:
self.y_hist_max = 0
# The minimum and maximum timestamps become the range of the x-axis of the
# graph
def get_min_max_time(self):
all_timestamps = list()
for ip in singleton_webgui.packet_data.active_flows :
ts_list = [p[0] for p in singleton_webgui.packet_data.packet_data[ip]['dropped']]
# Rather than concatenating ALL the timestamps, we
# only need the mins and maxes.
if len(ts_list) > 0 :
all_timestamps.extend((min(ts_list), max(ts_list)))
ts_list = [p[0] for p in singleton_webgui.packet_data.packet_data[ip]['injected']]
if len(ts_list) > 0 :
all_timestamps.extend((min(ts_list), max(ts_list)))
ts_list = [p[0] for p in singleton_webgui.packet_data.packet_data[ip]['modified']]
if len(ts_list) > 0 :
all_timestamps.extend((min(ts_list), max(ts_list)))
ts_list = [p[0] for p in singleton_webgui.packet_data.packet_data[ip]['total']]
if len(ts_list) > 0 :
all_timestamps.extend((min(ts_list), max(ts_list)))
if len(all_timestamps) > 0:
self.max_timestamp = max(all_timestamps)
self.min_timestamp = min(all_timestamps)
else:
# If we have no data yet, set to safe value of 0
self.max_timestamp = 0
self.min_timestamp = 0
# Get bin size (for x-axis, in seconds) used if all of these were
# plotted on same graph
def get_hist_xbin_size(self):
range_timestamp = self.max_timestamp - self.min_timestamp
(self.graph_xbins_actual, self.x_bin_size) = \
self.get_round_bin_size(range_timestamp, self.graph_xbins)
# Return bin size (in seconds)
return self.x_bin_size
# Round the bin size to a whole number, or a nice round number which
# we get using log base 10
def get_round_bin_size(self, range, bins):
assert bins > 0
est_bin_size = float(range) / float(bins)
if est_bin_size > 0:
binlog = int(-math.floor(math.log10(est_bin_size)))
actual_bin_size = math.ceil(est_bin_size)
actual_bins = math.ceil((int(range) / est_bin_size))
return (actual_bins, actual_bin_size)
return (1,1) # Empty data set
# Use the data to create JavaScript statements which will be sent to the
# web page and used to create the graph
def make_graph_data(self, name, histogram, point_shape="circle",
color="black"):
if histogram == None:
return ""
if len(histogram) == 0:
return ""
(source_ip, source_port, dest_ip, dest_port, proto,
packet_type, packet_type_long) = self.split_name(name)
assert self.graph_xbins_actual > 0
i = 0
self.x_bin_pixels = int(self.graph_width/self.graph_xbins_actual)
(self.graph_ybins_actual, self.y_bin_size) = \
self.get_round_bin_size(self.y_hist_max, self.graph_ybins)
assert self.y_bin_size > 0
self.y_bin_pixels = int(self.graph_height/self.graph_ybins_actual)
html = "\n"
xhtml = "new Array("
yhtml = "new Array("
for bin in histogram:
# Get x from histogram bin
# Get y from histogram value
x = str(i * (self.x_bin_pixels) + self.x_axis_margin + self.x_margin)
y = bin[0] * self.y_bin_pixels / self.y_bin_size
y = str(self.height - (y + self.y_axis_margin + self.y_margin))
xhtml = xhtml + x + ","
yhtml = yhtml + y + ","
i = i + 1
xhtml = xhtml[:-1]
yhtml = yhtml[:-1]
xhtml = xhtml + ")"
yhtml = yhtml + ")"
indent = " "
html = html + self.canvas_id + "_data['" + name + "']"
html = html + " = new FlowData(\n" + indent
html = html + xhtml + ",\n" + indent + yhtml + ",\n" + indent + self.canvas_context
html = html + ",\n" + indent + "'" + point_shape + "', '" + color + "',\n" + indent + "'" + name + "',\n "
html = html + indent + "'" + source_ip + "', '" + source_port + "',\n "
html = html + indent + "'" + dest_ip+ "', '" + dest_port + "' , '" + proto + "', '" + packet_type + "');\n\n"
# Return JavaScript graph data (for line drawing)
return html
# Given the flow name format used for hashes (underscores instead of
# periods and colons), turn the flow name back into its constituent parts
def split_name(self, flow_name):
tuple = flow_name.replace("__", "_").split("_")
source_ip = ".".join(tuple[0:4])
dest_ip = ".".join(tuple[5:9])
source_port = tuple[4]
dest_port = tuple[9]
if len(tuple) > 10:
proto = tuple[10]
else:
proto = ""
if len(tuple) > 11:
packet_type = tuple[11]
if packet_type == "mo":
packet_type_long = "modified"
elif packet_type == "in":
packet_type_long = "injected"
elif packet_type == "dr":
packet_type_long = "dropped"
else:
packet_type_long = "total"
else:
packet_type = ""
packet_type_long = ""
return (source_ip, source_port, dest_ip, dest_port, proto, packet_type, packet_type_long)
# Legend for the graph
def make_legend(self):
if singleton_webgui.client_status != 'running':
return "The Switzerland client is stopped."
i = 0
entries = list()
for flow_name in singleton_webgui.packet_data.active_flows:
(source_ip, source_port, dest_ip, dest_port, proto, packet_type, packet_type_long) = self.split_name(flow_name)
# List each flow once only.
entries.append(('''leg_''' + flow_name + '''_to''', source_ip, source_port, dest_ip, dest_port, proto, '''total''' ))
i = i + 1
render = web.template.render('templates')
return render.packet_graph_legend(self.canvas_id,
entries, singleton_webgui.packet_data.visible_flows)
# Return graph HTML to render graph with HTML canvas element
def make_graph(self):
# Update data and active flows
# singleton_webgui object persists between calls to the web application
if singleton_webgui.client_status != 'running':
return "The Switzerland client is stopped."
singleton_webgui.packet_data.update_active_flows()
singleton_webgui.packet_data.update_packet_data()
self.get_min_max_time()
if self.max_timestamp != 0:
# Get bin size for all flows
self.get_hist_xbin_size()
self.histograms = dict()
indent = " "
graph_data_html = indent + "var " + self.canvas_id + "_data = new Array();\n"
flow_names = dict()
# For each flow considered
for flow_name in singleton_webgui.packet_data.active_flows:
# Make a histogram for each packet type
self.histograms[flow_name] = dict()
if singleton_webgui.packet_data.visible_flows.get(flow_name + "_dr") == 'on':
self.histograms[flow_name]['dropped'] = \
self.make_histogram(singleton_webgui.packet_data.packet_data[flow_name]['dropped'])
else:
self.histograms[flow_name]['dropped'] = None
if singleton_webgui.packet_data.visible_flows.get(flow_name + "_in") == 'on':
self.histograms[flow_name]['injected'] = \
self.make_histogram(singleton_webgui.packet_data.packet_data[flow_name]['injected'])
else:
self.histograms[flow_name]['injected'] = None
if singleton_webgui.packet_data.visible_flows.get(flow_name + "_mo") == 'on':
self.histograms[flow_name]['modified'] = \
self.make_histogram(singleton_webgui.packet_data.packet_data[flow_name]['modified'])
else:
self.histograms[flow_name]['modified'] = None
if singleton_webgui.packet_data.visible_flows.get(flow_name + "_to") == 'on':
self.histograms[flow_name]['total'] = \
self.make_histogram(singleton_webgui.packet_data.packet_data[flow_name]['total'])
else:
self.histograms[flow_name]['total'] = None
i = 0
# Save our histogram to the persistent object
singleton_webgui.packet_data.current_histograms = self.histograms
# Get maximum y value (# of packets)
self.get_y_hist_max(True)
for flow_name in singleton_webgui.packet_data.active_flows:
color = self.draw_colors[i%len(self.draw_colors)];
h = self.make_graph_data(flow_name + "_dr", self.histograms[flow_name]['dropped'], "x", color );
graph_data_html = graph_data_html + h
h = self.make_graph_data(flow_name + "_in", self.histograms[flow_name]['injected'], "triangle", color );
graph_data_html = graph_data_html + h
h = self.make_graph_data(flow_name + "_mo", self.histograms[flow_name]['modified'], "square", color );
graph_data_html = graph_data_html + h
h = self.make_graph_data(flow_name + "_to", self.histograms[flow_name]['total'], "total", color );
graph_data_html = graph_data_html + h
flow_names[flow_name] = flow_name
i = i + 1
# Use a temporary dict to pass all fo the graph variables to the
# JavaScript function
graph_opts = dict()
graph_opts['canvas_id'] = self.canvas_id
graph_opts['canvas_context'] = self.canvas_context
graph_opts['x_margin'] = self.x_margin
graph_opts['y_margin'] = self.y_margin
graph_opts['x_axis_margin'] = self.x_axis_margin
graph_opts['y_axis_margin'] = self.y_axis_margin
graph_opts['width'] = self.width
graph_opts['height'] = self.height
graph_opts['graph_xbins_actual'] = self.graph_xbins_actual
graph_opts['graph_ybins'] = self.graph_ybins
graph_opts['x_bin_pixels'] = self.x_bin_pixels
graph_opts['x_bin_size'] = self.x_bin_size
graph_opts['y_bin_pixels'] = self.y_bin_pixels
graph_opts['y_bin_size'] = self.y_bin_size
graph_opts['min_timestamp'] = self.min_timestamp
# Finally, plot to canvas
# return html
render = web.template.render('templates')
return render.packet_graph(graph_opts,
graph_data_html, self.canvas_id + "_data")
else:
return "No data yet."
# This function is not used except for debugging
def dump_graph_info(self):
html = ''' <br>
self.graph_height ''' + str(self.graph_height) + '''<br>
self.graph_width ''' + str(self.graph_width) + '''<br>
self.graph_xbins ''' + str(self.graph_xbins) + '''<br>
self.graph_xbins_actual ''' + str(self.graph_xbins_actual) + '''<br>
self.graph_ybins ''' + str(self.graph_ybins) + '''<br>
self.graph_ybins_actual ''' + str(self.graph_ybins_actual) + '''<br>
self.y_hist_max ''' + str(self.y_hist_max) + '''<br>
range ''' + str(self.max_timestamp - self.min_timestamp)+ '''<br>
self.max_timestamp ''' + str(self.max_timestamp)+ '''<br>
self.min_timestamp ''' + str(self.min_timestamp)+ '''<br>
time ''' + str(time.time())+ '''<br>
self.cutoff_time ''' + str(self.cutoff_time)+ '''<br>
self.x_bin_pixels ''' + str(self.x_bin_pixels) + '''<br>
self.x_bin_size ''' + str(self.x_bin_size) + '''<br>
self.y_bin_pixels ''' + str(self.y_bin_pixels) + '''<br>
self.y_bin_size ''' + str(self.y_bin_size) + '''<br>
<br>
'''
return html
# The ajax_server is a web page which receives and responds to ajax
# requests from the web application
class ajax_server:
def GET(self):
webin = web.input()
command = webin.command
print "command", command
render = web.template.render('templates/ajax_response')
if command == 'packetInfo':
return self.packet_info(webin, render)
if command == 'updateGraph':
return self.update_graph(webin, render)
if command == 'updateLegend':
return self.update_legend(webin, render)
if command == 'launchWireshark':
return self.launch_wireshark(webin, render, web)
if command == 'clientServiceControl':
return self.client_service_control(webin, render)
if command == 'getRandomKey':
return self.get_random_key(webin, render)
else:
return("no handler for command " + command)
# Update the graph data from incoming switzerland data and send new
# JavaScript to the browser
def update_graph(self, webin, render):
if singleton_webgui.client_status == 'running':
graph = line_graph()
# Call make_graph FIRST to load data into structures
if graph is not None:
graph_html = graph.make_graph()
singleton_webgui.packet_data.current_graph = graph
return graph_html
else:
return "<p>The Switzerland client is collecting packets. Please wait a few seconds...</p>"
else:
return "<p>The Switzerland client service is stopped.</p>"
# Send a new legend to the browser (important as flows change)
def update_legend(self, webin, render):
if singleton_webgui.client_status == 'running':
if singleton_webgui.packet_data is not None and singleton_webgui.packet_data.current_graph is not None:
legend_html = singleton_webgui.packet_data.current_graph.make_legend()
return legend_html
else:
return "<p>The Switzerland client is collecting packets. Please wait a few seconds...</p>"
else:
return "<p>The Switzerland client service is stopped.</p>"
# Send packet info details to the browser
# TODO: xPacket is not implemented so we have no good data to send.
def packet_info(self, webin, render):
if singleton_webgui.client_status == 'running':
flow_name = webin.flowId
hist_bin = webin.histBinId
flow_name = flow_name[:-3]
modified = singleton_webgui.packet_data.current_histograms[flow_name]['modified'][int(hist_bin)][1]
injected = singleton_webgui.packet_data.current_histograms[flow_name]['injected'][int(hist_bin)][1]
dropped = singleton_webgui.packet_data.current_histograms[flow_name]['dropped'][int(hist_bin)][1]
pi = render.packet_info(modified, injected, dropped)
return pi
else:
return None
# Attempt to launch wireshark using scapy
def launch_wireshark(self, webin, render, web):
packet_type = webin.packetType
random_key = webin.randomKey
#print " Current key: " + singleton_webgui.random_key
#print "Submitted key: " + random_key
#referer = web.ctx.env.get('HTTP_REFERER', 'INVALID')
ip_addr = web.ctx.ip
#print "Referrer: " + referer
#print "IP: " + ip_addr
if ip_addr == '127.0.0.1':
if random_key == singleton_webgui.random_key:
if wireshark_available and singleton_webgui.web_app_config['allow_wireshark']:
flow_name = webin.flowId
hist_bin = webin.histBinId
flow_name = flow_name[:-3]
packet_list = list()
if packet_type == 'dropped':
p_list = [p.raw_data() for p in singleton_webgui.packet_data.current_histograms[flow_name]['modified'][int(hist_bin)][1]]
packet_list.extend(p_list)
if packet_type == 'injected':
p_list = [p.raw_data() for p in singleton_webgui.packet_data.current_histograms[flow_name]['injected'][int(hist_bin)][1]]
packet_list.extend(p_list)
if packet_type == 'modified':
p_list = [p.raw_data() for p in singleton_webgui.packet_data.current_histograms[flow_name]['dropped'][int(hist_bin)][1]]
packet_list.extend(p_list)
if len(packet_list) > 0:
return_value = wireshark(packet_list)
print "Wireshark return value: " + str(return_value)
else:
print "Will not launch wireshark for list of 0 packets."
return_value = "Will not launch wireshark for list of 0 packets."
return return_value
else:
if not wireshark_available:
print "WARNING: Wireshark not available through Scapy."
if not singleton_webgui.web_app_config['allow_wireshark']:
print "Wireshark has been disabled in configuration."
print "WARNING: received a rogue Wireshark start request."
else:
print "WARNING: Random keys do not match. May be a rogue Wireshark start request."
print "If this was a legitimate request, the keys may have been refreshing. Please try again."
else:
print "WARNING: A request not originating at localhost was received to launch Wireshark"
print "Originating IP: " + ip_addr
def client_service_control(self, webin, render):
commandString = webin.commandString
if (commandString == 'stop'):
singleton_webgui.stopService()
return "<p>stopped</p>"
if (commandString == 'start'):
singleton_webgui.startService()
return "<p>running</p>"
def get_random_key(self, webin, render):
singleton_webgui.random_key = base64.b64encode(os.urandom(64))
#print "Returning new key: " + singleton_webgui.random_key
return "<key>" + singleton_webgui.random_key + "</key>"
class index:
def GET(self):
return self.main()
def POST(self):
return self.main()
def main(self):
# This is called only when we first call up the page
# After that all changes come through ajax calls (or going to another
# page and coming back)
render = web.template.render('templates', globals={'Flow': switzerland.common.Flow})
menu = render.menu("main")
graph = line_graph()
# Call make_graph FIRST to load data into structures
graph_html = graph.make_graph()
if singleton_webgui.client_status == 'running':
singleton_webgui.packet_data.current_graph = graph
client_info = render.client_info(singleton_webgui.x_alice.get_client_info())
server_info = render.server_info(singleton_webgui.x_alice.get_server_info())
active_flows = render.flow_list(singleton_webgui.packet_data.active_flows)
active_peers = render.peer_list(singleton_webgui.packet_data.active_peers)
else:
client_info = "<p>The Switzerland client service is stopped.</p>"
server_info = client_info
active_flows = client_info
active_peers = client_info
legend = graph.make_legend()
return render.dashboard(
singleton_webgui.client_status,
menu,
client_info,
server_info,
active_flows,
active_peers,
legend,
graph_html,
singleton_webgui.web_app_config['refresh_interval'][0],
singleton_webgui.web_app_config['allow_wireshark'][0],
singleton_webgui.random_key)
# List mutable configuration parameters, allow to change
# TODO: Options are not serialized between invocations of the client
class config:
def GET(self):
return self.main()
def POST(self):
webin = web.input()
message = ""
if webin.form == "frmApplicationOpt":
# Edit web application variables
message = "Changes saved."
try:
singleton_webgui.web_app_config['save_window'][0] = int(webin.save_window)
except:
message = "The save window must be a number of seconds."
try:
singleton_webgui.web_app_config['refresh_interval'][0] = int(webin.refresh_interval)
except:
message = "The refresh interval must be a number of seconds."
try:
singleton_webgui.web_app_config['allow_wireshark'][0]=str2bool(webin.allow_wireshark)
except:
message = "Failed to set the Wireshark variable."
elif webin.form == "frmImmutableOpt":
# These can't be changed on the fly, so they are saved to a temporary structure
# and written to file by save_config
message = "Changes saved. You must restart the Switzerland client for these changes to take effect."
for key in webin.keys():
post_value = webin.get(key)
if len(post_value) > 0 and key != 'form':
singleton_webgui.next_immutable_config[key] = str(post_value)
print "Setting " + key + " to " + post_value
else:
# Edit tweakable variables
message = "Changes saved."
if singleton_webgui.client_status == 'running' and singleton_webgui.x_alice_config is not None:
try:
#lvl = self.LOG_LEVELS.get(webin.log_level, logging.NOTSET)
singleton_webgui.x_alice_config.set_option("log_level", int(webin.log_level))
except:
message = "The log_level must be a valid python logging log level (e.g. logging.DEBUG)"
try:
singleton_webgui.x_alice_config.set_option("seriousness", int(webin.seriousness))
except:
message = "Seriousness must be an integer."
try:
singleton_webgui.x_alice_config.set_option("do_cleaning", bool(webin.do_cleaning))
except:
message = "Do cleaning format is invalid."
singleton_webgui.next_tweakable_config["log_level"] = int(webin.log_level)
singleton_webgui.next_tweakable_config["seriousness"] = int(webin.seriousness)
singleton_webgui.next_tweakable_config["do_cleaning"] = bool(webin.do_cleaning)
singleton_webgui.save_config(config_filename)
return self.main(message)
def main(self, message=""):
render = web.template.render('templates', globals={'logging': logging})
menu = render.menu("config")
return render.config(menu,
singleton_webgui.x_alice_config,
singleton_webgui.next_tweakable_config,
singleton_webgui.next_immutable_config,
singleton_webgui.web_app_config,
message)
# Packet data persists between web page calls
# Persistent data belongs in this object.
class packet_data:
def __init__(self):
self.packet_data = dict()
self.active_flows = dict()
# Flow visibility is currently handled in the client
# Currently all flows have visibility "on" as far as the
# server is concerned.
self.visible_flows = dict()
self.active_peers = list()
self.current_histograms = None
self.current_graph = None
def init_visible_flows(self):
self.update_active_flows()
for flow_name in self.active_flows:
self.visible_flows[flow_name + "_mo"] = "on"
self.visible_flows[flow_name + "_dr"] = "on"
self.visible_flows[flow_name + "_in"] = "on"
self.visible_flows[flow_name + "_to"] = "on"
def update_active_flows(self):
peers = singleton_webgui.x_alice.get_peers()
for p in peers:
# Only add a peer that is not in the list yet
try:
self.active_peers.index(s.inet_ntoa(p.ip))
except:
self.active_peers.append(s.inet_ntoa(p.ip))
flows = p.new_flows()
if isinstance(flows, list):
for f in flows:
flow_name = flow_key(f)
if self.active_flows.get(flow_name):
pass
else:
self.active_flows[flow_name] = f
self.visible_flows[flow_name + "_mo"] = "on"
self.visible_flows[flow_name + "_dr"] = "on"
self.visible_flows[flow_name + "_in"] = "on"
self.visible_flows[flow_name + "_to"] = "on"
print "ADDING", flow_name
del_flows = list()
for f in self.active_flows:
if self.active_flows[f].is_active():
pass
else:
del_flows.append(f)
for f in del_flows:
del self.active_flows[f]
# If packets are older than cutoff time (usually an hour) delete them
def delete_old_packets(self, packet_list, cutoff_time):
new_packet_list = list()
for index, packet in enumerate(packet_list):
if packet[0] > cutoff_time:
new_packet_list.append(packet)
else:
if debug_output:
print "Deleting packet..."
packet_list = None
return new_packet_list
def update_packet_data(self):
# For each active flow
for flow_ip in self.active_flows:
# If flow does not exist in dictionary object, add
f = self.active_flows[flow_ip]
if self.packet_data.get(flow_ip) :
pass
else:
self.packet_data[flow_ip] = dict()
self.packet_data[flow_ip]['dropped'] = list()
self.packet_data[flow_ip]['injected'] = list()
self.packet_data[flow_ip]['modified'] = list()
self.packet_data[flow_ip]['total'] = list()
# Each active flow has 4 lists of packets: dropped, injected,
# modified, total count
self.cutoff_time = time.time() - singleton_webgui.web_app_config['save_window'][0]
self.packet_data[flow_ip]['dropped'].extend( \
f.get_new_dropped_packets())
self.packet_data[flow_ip]['injected'].extend( \
f.get_new_injected_packets())
self.packet_data[flow_ip]['modified'].extend( \
f.get_new_modified_packets())
pack_count = f.get_new_packet_count()
self.packet_data[flow_ip]['total'].extend( \
[(time.time(), pack_count) ])
self.packet_data[flow_ip]['dropped'] = \
self.delete_old_packets(
self.packet_data[flow_ip]['dropped'],
self.cutoff_time)
self.packet_data[flow_ip]['injected'] = \
self.delete_old_packets(
self.packet_data[flow_ip]['injected'],
self.cutoff_time)
self.packet_data[flow_ip]['modified'] = \
self.delete_old_packets(
self.packet_data[flow_ip]['modified'],
self.cutoff_time)
self.packet_data[flow_ip]['total'] = \
self.delete_old_packets(
self.packet_data[flow_ip]['total'],
self.cutoff_time)
# Definition of getText from Python documentation
#http://docs.python.org/library/xml.dom.minidom.html
def getText(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def make_element(dom, tag, contents):
new_element = dom.createElement(tag)
if contents is None or contents == 'None':
contents = ""
new_element.appendChild(dom.createTextNode(str(contents)))
return new_element
# Turn a flow tuple into a suitable hash key (replace . and : with _)
def flow_key(f):
t = print_flow_tuple(f.flow_tuple)
return str(t[0].replace(".","_")) + "_" + str(t[1]) + "__" \
+ str(t[2].replace(".","_")) + "_" + str((t[3])) \
+ "_" + str(t[4])
def optlist_contains(optlist, checklist):
for option, argument in optlist:
if option in checklist:
return True
return False
# str2bool is from Brian Bondy http://stackoverflow.com/questions/715417/converting-from-a-string-to-boolean-in-python
def str2bool(s):
return s.lower() in ["yes", "true", "t", "1"]
class WebGUI():
def __init__(self):
self.random_key = base64.b64encode(os.urandom(64))
self.web_app_config = dict()
self.web_app_config['save_window'] = [60 * 60,
"Save window", "Number of seconds to save"]
self.web_app_config['refresh_interval'] = [20,
"Refresh interval", "Number of seconds between refresh"]
self.web_app_config['allow_wireshark'] = [True,
"Allow Wireshark", "Allow the client to launch Wireshark"]
self.urls = (
'/', 'index',
'', 'index',
'/ajax_server', 'ajax_server',
'/ajax_server/', 'ajax_server',
'/config', 'config')
self.client_status = 'stopped'
self.next_immutable_config = dict()
self.next_tweakable_config = dict()
for opt, info in alice_options.items():
default, type, mutable, visible = info
if visible:
if mutable:
self.next_tweakable_config[opt] = default
else:
self.next_immutable_config[opt] = default
def startService(self):
self.x_alice_config = ClientConfig()
self.load_config(config_filename)
self.x_alice = xAlice(self.x_alice_config)
self.packet_data = packet_data()
self.packet_data.init_visible_flows()
self.client_status = 'running'
print "Starting Switzerland client service..."
def stopService(self):
self.x_alice_config = None
self.x_alice = None
self.packet_data = None
self.client_status = 'stopped'
print "Stopping Switzerland client service..."
def main(self):
self.startService()
self.app = web.application(self.urls, globals())
if len(sys.argv) > 1:
alice_opts = sys.argv
sys.argv[1:] = []
# Use 127.0.0.1 instead of 0.0.0.0 to make accessible outside of localhost
sys.argv.insert(1, web_py_server + ":" + web_py_port)
self.app.run()
def load_config(self, filename, override_cmdline=False):
if os.path.isfile(filename):
print "Attempting to load configuration file."
print "If this causes problems, delete file config/config.xml to use defaults."
config_dom = parse(filename)
# Immutable
for (option_name, option_type) in self.x_alice_config.immutable_options():
temp = getText(config_dom.getElementsByTagName(option_name)[0].childNodes).strip()
print "Setting " + option_name + ": " + temp
if temp is not None and len(temp) > 0:
if option_type == int:
temp = int(temp)
if option_type == float:
temp = float(temp)
if option_type == bool:
temp = str2bool(temp)
self.x_alice_config.set_option(option_name, temp)
# Mutable
for (option_name, option_type) in self.x_alice_config.tweakable_options():
temp = getText(config_dom.getElementsByTagName(option_name)[0].childNodes).strip()
print "Setting " + option_name + ": " + temp
if temp is not None and len(temp) > 0:
if option_type == int:
temp = int(temp)
if option_type == float:
temp = float(temp)
if option_type == bool:
temp = str2bool(temp)
self.x_alice_config.set_option(option_name, temp)
# Web
for key in self.web_app_config.keys():
temp = getText(config_dom.getElementsByTagName(key)[0].childNodes).strip()
print "Setting " + key + ": " + temp
if key in ('allow_wireshark'):
temp = str2bool(temp)
self.web_app_config[key][0] = int(temp)
# optlist, args = getopt.gnu_getopt(sys.argv[1:], short_opt_list,
# long_opt_list)
# newArgList = []
# newArgList.append(sys.argv[0])
# Workaround the fact that we call the Switzerland command line client
# and must transfer some information as command line options
#===================================================================
# if override_cmdline:
# for option, argument in optlist:
# if option in ("-s", "--server", "-i", "--interface"):
# pass # Effectively delete old option
# else:
# newArgList.append(option)
# if argument is not None and len(argument) > 0:
# newArgList.append(argument)
# newArgList.append("--server")
# newArgList.append(self.x_alice_config.get_option('host'))
# newArgList.append("--interface")
# newArgList.append(self.x_alice_config.get_option('interface'))
# else:
# for option, argument in optlist:
# newArgList.append(option)
# if argument is not None and len(argument) > 0:
# newArgList.append(argument)
# if not optlist_contains(optlist, ("-s", "--server")):
# print "appending server" + self.x_alice_config.get_option('host')
# newArgList.append("--server")
# newArgList.append(self.x_alice_config.get_option('host'))
# if not optlist_contains(optlist, ("-i", "--interface")):
# print "appending interface" + self.x_alice_config.get_option('interface')
# newArgList.append("--interface")
# newArgList.append(self.x_alice_config.get_option('interface'))
#
# sys.argv = newArgList
#===================================================================
print "Configuration file loaded."
else:
print "No configuration file. Using default values."
def save_config(self, filename):
impl = getDOMImplementation()
new_config = impl.createDocument(None, "configuration", None)
top_element = new_config.documentElement
new_section = make_element(new_config,
"tweakable_options", "")
top_element.appendChild(new_section)
for option_name in singleton_webgui.next_tweakable_config.keys():
new_child = make_element(new_config,
option_name ,
singleton_webgui.next_tweakable_config[option_name])
new_section.appendChild(new_child)
new_section = make_element(new_config,
"immutable_options", "")
top_element.appendChild(new_section)
for option_name in singleton_webgui.next_immutable_config.keys():
new_child = make_element(new_config,
option_name ,
singleton_webgui.next_immutable_config[option_name])
new_section.appendChild(new_child)
new_section = make_element(new_config,
"web_application", "")
top_element.appendChild(new_section)
for option_name in self.web_app_config.keys():
config_value = self.web_app_config.get(option_name)
config_value = config_value[0]
new_section.appendChild(make_element(new_config,
option_name ,
str(config_value)))
out_file = open(filename,"w")
new_config.writexml(out_file, '', ' ', "\n", "ISO-8859-1")
if __name__ == "__main__":
# We have to change the working directory to the directory of the WebGUI script
# If we don't, the script can't find all of the static and template files
pathname = os.path.dirname(sys.argv[0])
os.chdir(os.path.abspath(pathname))
try:
optlist, args = getopt.gnu_getopt(sys.argv[1:], short_opt_list,
long_opt_list)
except:
AliceConfig().usage();
useFake = False
newArgList = []
newArgList.append(sys.argv[0])
for option, argument in optlist:
if option in ("-F", "--fake"):
useFake = True
elif option in ("-a", "--webaddr"):
web_py_server = argument
elif option in ("-w", "--webport"):
web_py_port = argument
else:
newArgList.append(option)
if argument is not None and len(argument) > 0:
newArgList.append(argument)
sys.argv = newArgList
# Use AliceAPIFake instead of AliceAPI when you have no peers or no internet connection
# It generates somewhat reasonable random data
if useFake:
from switzerland.client.AliceAPIFake import xAlice, ClientConfig, xPeer, xFlow, xPacket
else:
from switzerland.client.AliceAPI import xAlice, ClientConfig, xPeer, xFlow, xPacket
singleton_webgui = WebGUI()
singleton_webgui.main()
|
isislovecruft/switzerland
|
switzerland/client/webgui/WebGUI.py
|
Python
|
gpl-3.0
| 46,962
|
[
"Brian"
] |
ab8951580ea14348a127008ec71b9ad037add7e22f9d00cc7813979cc64b15c8
|
#!/usr/bin/python
# This code is just for educational only ;)
# coder by jimmyromanticdevil
# code for tutorial Python [ Membuat Bot Auto Clicker ]
import urllib2
import urllib
import sys
import time
import random
import re
import os
import time
proxylisttext = "proxylist.txt"
useragent = ['Mozilla/4.0 (compatible; MSIE 5.0; SunOS 5.10 sun4u; X11)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.2pre) Gecko/20100207 Ubuntu/9.04 (jaunty) Namoroka/3.6.2pre',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser;',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.1)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6)',
'Microsoft Internet Explorer/4.0b1 (Windows 95)',
'Opera/8.00 (Windows NT 5.1; U; en)',
'amaya/9.51 libwww/5.4.0',
'Mozilla/4.0 (compatible; MSIE 5.0; AOL 4.0; Windows 95; c_athome)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; ZoomSpider.net bot; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; QihooBot 1.0 qihoobot@qihoo.net)',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows ME) Opera 5.11 [en]']
referer = ['http://google.com','http://bing.com','http://facebook.com','http://twitter.com','http://www.yahoo.com']
link_invation= 'http://youtu.be/IDNhBrZGNl0?t=4s&autoplay=1'
def Autoclicker(proxy1):
try:
proxy = proxy1.split(":")
print 'Auto Click Using proxy :',proxy1
proxy_set = urllib2.ProxyHandler({"http" : "%s:%d" % (proxy[0], int(proxy[1]))})
opener = urllib2.build_opener(proxy_set, urllib2.HTTPHandler)
opener.addheaders = [('User-agent', random.choice(useragent)),
('Referer', random.choice(referer))]
urllib2.install_opener(opener)
f = urllib2.urlopen(link_invation)
if "youtu.be" in f.read():
print "[*] Link invation was clicked..."
else:
print "[*] Link invation not clicked !"
print "[!] Proxy failed"
except:
print "[!] Proxy Error "
pass
def loadproxy():
try:
get_file = open(proxylisttext, "r")
proxylist = get_file.readlines()
count = 0
proxy = []
while count < len(proxylist):
proxy.append(proxylist[count].strip())
count += 1
for i in proxy:
Autoclicker(i)
except IOError:
print "\n[-] Error: Check your proxylist path\n"
sys.exit(1)
def main():
print """
#################################
Simulation Bot Autoclicker
coder : elangoverdosis
http://adf.ly/unZg3
################################
"""
loadproxy()
if __name__ == '__main__':
main()
|
elangoverdosis2/autoclickpython
|
bot.py
|
Python
|
mpl-2.0
| 2,826
|
[
"ADF"
] |
763d8963b4f0411948b69341400eda618ab1675a597336efd536a467311f5a25
|
#!/usr/bin/env yamtbx.python
"""
(c) RIKEN 2015. All rights reserved.
Author: Keitaro Yamashita
This software is released under the new BSD License; see LICENSE.
"""
"""
Add staturation flags (SATURATED_PART, SATURATED_ALL) to mtz file.
Read MAXC column from INTEGRATE.HKL and compare it with user-specified overload= value.
For a unique reflection, if more than one (or, all) observed reflection is saturated, the part flag (or, all) is set True.
First of all, read XDS_ASCII.HKL and reject reflections with negative sigma!!
Usage: cctbx.python add_saturation_info_to_mtz.py XDS_ASCII_copy_free.mtz ../INTEGRATE.HKL ../XDS_ASCII.HKL overload=65535 hklout=test2.mtz
"""
master_params_str = """\
hklin = None
.type = path
.help = MTZ file (input)
hklout = None
.type = path
.help = MTZ file (output)
integrate_hkl = None
.type = path
.help = INTEGRATE.HKL file
xds_ascii= None
.type = path
.help = XDS_ASCII.HKL file
overload = None
.type = int
.help = Overload value (65535 for 16 bit)
"""
import iotbx.phil
import iotbx.mtz
from cctbx import miller
from cctbx.array_family import flex
from yamtbx.dataproc.xds import integrate_hkl_as_flex
from yamtbx.dataproc.xds.xds_ascii import XDS_ASCII
def run(params, log_out):
xa = XDS_ASCII(params.xds_ascii, log_out)
rejected_array = miller.array(miller_set=miller.set(crystal_symmetry=xa.symm,
indices=xa.indices,
anomalous_flag=False),
data=xa.sigma_iobs < 0)
xa_zd = miller.array(miller_set=miller.set(crystal_symmetry=xa.symm,
indices=xa.indices,
anomalous_flag=False),
data=xa.zd)
# Read ZCAL, not ZOBS, because ZOBS (and XOBS, YOBS) can be zero (in case unobserved).
integ_data = integrate_hkl_as_flex.reader(params.integrate_hkl, ["MAXC","ZCAL"]).arrays()
maxc_array, integ_zcal = integ_data["MAXC"], integ_data["ZCAL"]
assert integ_zcal.unit_cell().is_similar_to(xa_zd.unit_cell()) # two set of indices should be comparable.
overload_flags = maxc_array.customized_copy(data=maxc_array.data() == params.overload)
print "Overloaded observations in INTEGRATE.HKL:", overload_flags.data().count(True)
print "Rejected (sigma<0) observations in XDS_ASCII.HKL:", rejected_array.data().count(True)
# common_sets() does not work correctly for unmerged data!
rejected_zd = xa_zd.select(rejected_array.data())
#reject_indices = flex.bool([False for i in xrange(overload_flags.size())])
print "making indices..........."
import yamtbx_utils_ext
integ_zcal = integ_zcal.sort(by_value="packed_indices") # Must be sorted before C++ function below!!
reject_indices = yamtbx_utils_ext.make_selection_for_xds_unmerged(rejected_zd.indices(),
rejected_zd.data(),
integ_zcal.indices(),
integ_zcal.data(),
3.)
"""
# This loop is too slow!
for i in xrange(rejected_zd.size()):
sel = integ_zcal.indices() == rejected_zd.indices()[i]
sel &= (integ_zcal.data() - rejected_zd.data()[i]) < 3
reject_indices.set_selected(sel, True)
print i, rejected_zd.size(), sel.count(True)
"""
"""
# This loop is also too slow!
for j in xrange(integ_zcal.size()): # j: INTEGRATE.HKL
if rejected_zd.indices()[i] != integ_zcal.indices()[j]:
continue
if abs(rejected_zd.data()[i] - integ_zcal.data()[j]) < 3: # within 3 frames.. OK?
reject_indices[j] = True
"""
print "Found rejected observations in INTEGRATE.HKL:", reject_indices.count(True)
overload_flags.data().set_selected(reject_indices, False) # Set 'Un-overloaded'
print "Remained overloaded observations:", overload_flags.data().count(True)
overload_flags_partial = overload_flags.map_to_asu().merge_equivalents(incompatible_flags_replacement=True).array()
overload_flags_all = overload_flags.map_to_asu().merge_equivalents(incompatible_flags_replacement=False).array()
mtz_object = iotbx.mtz.object(params.hklin). \
add_crystal("crystal", "project", overload_flags_all.unit_cell()). \
add_dataset(name="dataset", wavelength=0). \
add_miller_array(miller_array=overload_flags_all, column_root_label="SATURATED_ALL"). \
add_miller_array(miller_array=overload_flags_partial, column_root_label="SATURATED_PART"). \
mtz_object()
mtz_object.write(file_name=params.hklout)
if __name__ == "__main__":
import sys
import os
cmdline = iotbx.phil.process_command_line(args=sys.argv[1:],
master_string=master_params_str)
params = cmdline.work.extract()
for arg in cmdline.remaining_args:
if params.hklin is None and arg.endswith(".mtz"):
params.hklin = arg
elif params.integrate_hkl is None and "INTEGRATE" in arg:
params.integrate_hkl = arg
elif params.xds_ascii is None and "XDS_ASCII" in arg:
params.xds_ascii = arg
print "Paramters:"
cmdline.work.format(python_object=params).show(out=sys.stdout, prefix=" ")
print
if None in (params.hklin, params.xds_ascii, params.integrate_hkl, params.overload):
print "Missing information!"
sys.exit(1)
if params.hklout is None:
params.hklout = os.path.splitext(os.path.basename(params.hklin))[0] + "_olflag.mtz"
run(params, sys.stdout)
|
keitaroyam/yamtbx
|
yamtbx/dataproc/xds/command_line/add_saturation_info_to_mtz.py
|
Python
|
bsd-3-clause
| 5,858
|
[
"CRYSTAL"
] |
c18d151c8fe6da173fad95bca673dac034d106de873fff9cf2346143ce0e9596
|
"""
===============
GMM covariances
===============
Demonstration of several covariances types for Gaussian mixture models.
See :ref:`gmm` for more information on the estimator.
Although GMM are often used for clustering, we can compare the obtained
clusters with the actual classes from the dataset. We initialize the means
of the Gaussians with the means of the classes from the training set to make
this comparison valid.
We plot predicted labels on both training and held out test data using a
variety of GMM covariance types on the iris dataset.
We compare GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
# Author: Ron Weiss <ronweiss@gmail.com>, Gael Varoquaux
# Modified by Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import StratifiedKFold
print(__doc__)
colors = ['navy', 'turquoise', 'darkorange']
def make_ellipses(gmm, ax):
for n, color in enumerate(colors):
if gmm.covariance_type == 'full':
covariances = gmm.covariances_[n][:2, :2]
elif gmm.covariance_type == 'tied':
covariances = gmm.covariances_[:2, :2]
elif gmm.covariance_type == 'diag':
covariances = np.diag(gmm.covariances_[n][:2])
elif gmm.covariance_type == 'spherical':
covariances = np.eye(gmm.means_.shape[1]) * gmm.covariances_[n]
v, w = np.linalg.eigh(covariances)
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf.split(iris.data, iris.target)))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
estimators = dict((cov_type, GaussianMixture(n_components=n_classes,
covariance_type=cov_type, max_iter=20, random_state=0))
for cov_type in ['spherical', 'diag', 'tied', 'full'])
n_estimators = len(estimators)
plt.figure(figsize=(3 * n_estimators // 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, estimator) in enumerate(estimators.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
estimator.means_init = np.array([X_train[y_train == i].mean(axis=0)
for i in range(n_classes)])
# Train the other parameters using the EM algorithm.
estimator.fit(X_train)
h = plt.subplot(2, n_estimators // 2, index + 1)
make_ellipses(estimator, h)
for n, color in enumerate(colors):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], s=0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate(colors):
data = X_test[y_test == n]
plt.scatter(data[:, 0], data[:, 1], marker='x', color=color)
y_train_pred = estimator.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = estimator.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(scatterpoints=1, loc='lower right', prop=dict(size=12))
plt.show()
|
imaculate/scikit-learn
|
examples/mixture/plot_gmm_covariances.py
|
Python
|
bsd-3-clause
| 4,723
|
[
"Gaussian"
] |
0b99ea916c69e2aecc185e35cb71130308d689225feda2e86da8d032de968ec2
|
# This file is part of cclib (http://cclib.sf.net), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2006, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
__revision__ = "$Revision$"
import re
import numpy
from . import logfileparser
from . import utils
class Gaussian(logfileparser.Logfile):
"""A Gaussian 98/03 log file."""
def __init__(self, *args, **kwargs):
# Call the __init__ method of the superclass
super(Gaussian, self).__init__(logname="Gaussian", *args, **kwargs)
def __str__(self):
"""Return a string representation of the object."""
return "Gaussian log file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'Gaussian("%s")' % (self.filename)
def normalisesym(self, label):
"""Use standard symmetry labels instead of Gaussian labels.
To normalise:
(1) If label is one of [SG, PI, PHI, DLTA], replace by [sigma, pi, phi, delta]
(2) replace any G or U by their lowercase equivalent
>>> sym = Gaussian("dummyfile").normalisesym
>>> labels = ['A1', 'AG', 'A1G', "SG", "PI", "PHI", "DLTA", 'DLTU', 'SGG']
>>> map(sym, labels)
['A1', 'Ag', 'A1g', 'sigma', 'pi', 'phi', 'delta', 'delta.u', 'sigma.g']
"""
# note: DLT must come after DLTA
greek = [('SG', 'sigma'), ('PI', 'pi'), ('PHI', 'phi'),
('DLTA', 'delta'), ('DLT', 'delta')]
for k, v in greek:
if label.startswith(k):
tmp = label[len(k):]
label = v
if tmp:
label = v + "." + tmp
ans = label.replace("U", "u").replace("G", "g")
return ans
def before_parsing(self):
# Used to index self.scftargets[].
SCFRMS, SCFMAX, SCFENERGY = list(range(3))
# Flag that indicates whether it has reached the end of a geoopt.
self.optfinished = False
# Flag for identifying Coupled Cluster runs.
self.coupledcluster = False
# Fragment number for counterpoise or fragment guess calculations
# (normally zero).
self.counterpoise = 0
# Flag for identifying ONIOM calculations.
self.oniom = False
def after_parsing(self):
# Correct the percent values in the etsecs in the case of
# a restricted calculation. The following has the
# effect of including each transition twice.
if hasattr(self, "etsecs") and len(self.homos) == 1:
new_etsecs = [[(x[0], x[1], x[2] * numpy.sqrt(2)) for x in etsec]
for etsec in self.etsecs]
self.etsecs = new_etsecs
if hasattr(self, "scanenergies"):
self.scancoords = []
self.scancoords = self.atomcoords
if (hasattr(self, 'enthaply') and hasattr(self, 'temperature')
and hasattr(self, 'freeenergy')):
self.entropy = (self.enthaply - self.freeenergy)/self.temperature
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
#Extract PES scan data
#Summary of the potential surface scan:
# N A SCF
#---- --------- -----------
# 1 109.0000 -76.43373
# 2 119.0000 -76.43011
# 3 129.0000 -76.42311
# 4 139.0000 -76.41398
# 5 149.0000 -76.40420
# 6 159.0000 -76.39541
# 7 169.0000 -76.38916
# 8 179.0000 -76.38664
# 9 189.0000 -76.38833
# 10 199.0000 -76.39391
# 11 209.0000 -76.40231
#---- --------- -----------
if "Summary of the potential surface scan:" in line:
scanenergies = []
scanparm = []
colmnames = next(inputfile)
hyphens = next(inputfile)
line = next(inputfile)
while line != hyphens:
broken = line.split()
scanenergies.append(float(broken[-1]))
scanparm.append(map(float, broken[1:-1]))
line = next(inputfile)
if not hasattr(self, "scanenergies"):
self.scanenergies = []
self.scanenergies = scanenergies
if not hasattr(self, "scanparm"):
self.scanparm = []
self.scanparm = scanparm
if not hasattr(self, "scannames"):
self.scannames = colmnames.split()[1:-1]
#Extract Thermochemistry
#Temperature 298.150 Kelvin. Pressure 1.00000 Atm.
#Zero-point correction= 0.342233 (Hartree/
#Thermal correction to Energy= 0.
#Thermal correction to Enthalpy= 0.
#Thermal correction to Gibbs Free Energy= 0.302940
#Sum of electronic and zero-point Energies= -563.649744
#Sum of electronic and thermal Energies= -563.636699
#Sum of electronic and thermal Enthalpies= -563.635755
#Sum of electronic and thermal Free Energies= -563.689037
if "Sum of electronic and thermal Enthalpies" in line:
if not hasattr(self, 'enthaply'):
self.enthaply = float(line.split()[6])
if "Sum of electronic and thermal Free Energies=" in line:
if not hasattr(self, 'freeenergy'):
self.freeenergy = float(line.split()[7])
if line[1:12] == "Temperature":
if not hasattr(self, 'temperature'):
self.temperature = float(line.split()[1])
# Number of atoms.
if line[1:8] == "NAtoms=":
self.updateprogress(inputfile, "Attributes", self.fupdate)
natom = int(line.split()[1])
if not hasattr(self, "natom"):
self.natom = natom
# Catch message about completed optimization.
if line[1:23] == "Optimization completed":
self.optfinished = True
self.optdone = True
# Extract the atomic numbers and coordinates from the input orientation,
# in the event the standard orientation isn't available.
if not self.optfinished and line.find("Input orientation") > -1 or line.find("Z-Matrix orientation") > -1:
# If this is a counterpoise calculation, this output means that
# the supermolecule is now being considered, so we can set:
self.counterpoise = 0
self.updateprogress(inputfile, "Attributes", self.cupdate)
if not hasattr(self, "inputcoords"):
self.inputcoords = []
self.inputatoms = []
hyphens = next(inputfile)
colmNames = next(inputfile)
colmNames = next(inputfile)
hyphens = next(inputfile)
atomcoords = []
line = next(inputfile)
while line != hyphens:
broken = line.split()
self.inputatoms.append(int(broken[1]))
atomcoords.append(list(map(float, broken[3:6])))
line = next(inputfile)
self.inputcoords.append(atomcoords)
if not hasattr(self, "atomnos"):
self.atomnos = numpy.array(self.inputatoms, 'i')
self.natom = len(self.atomnos)
# Extract the atomic masses.
# Typical section:
# Isotopes and Nuclear Properties:
#(Nuclear quadrupole moments (NQMom) in fm**2, nuclear magnetic moments (NMagM)
# in nuclear magnetons)
#
# Atom 1 2 3 4 5 6 7 8 9 10
# IAtWgt= 12 12 12 12 12 1 1 1 12 12
# AtmWgt= 12.0000000 12.0000000 12.0000000 12.0000000 12.0000000 1.0078250 1.0078250 1.0078250 12.0000000 12.0000000
# NucSpn= 0 0 0 0 0 1 1 1 0 0
# AtZEff= -3.6000000 -3.6000000 -3.6000000 -3.6000000 -3.6000000 -1.0000000 -1.0000000 -1.0000000 -3.6000000 -3.6000000
# NQMom= 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000
# NMagM= 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 2.7928460 2.7928460 2.7928460 0.0000000 0.0000000
# ... with blank lines dividing blocks of ten, and Leave Link 101 at the end.
# This is generally parsed before coordinates, so atomnos is not defined.
# Note that in Gaussian03 the comments are not there yet and the labels are different.
if line.strip() == "Isotopes and Nuclear Properties:":
if not hasattr(self, "atommasses"):
self.atommasses = []
line = next(inputfile)
while line[1:16] != "Leave Link 101":
if line[1:8] == "AtmWgt=":
self.atommasses.extend(list(map(float,line.split()[1:])))
line = next(inputfile)
# Extract the atomic numbers and coordinates of the atoms.
if not self.optfinished and line.strip() == "Standard orientation:":
self.updateprogress(inputfile, "Attributes", self.cupdate)
# If this is a counterpoise calculation, this output means that
# the supermolecule is now being considered, so we can set:
self.counterpoise = 0
if not hasattr(self, "atomcoords"):
self.atomcoords = []
hyphens = next(inputfile)
colmNames = next(inputfile)
colmNames = next(inputfile)
hyphens = next(inputfile)
atomnos = []
atomcoords = []
line = next(inputfile)
while line != hyphens:
broken = line.split()
atomnos.append(int(broken[1]))
atomcoords.append(list(map(float, broken[-3:])))
line = next(inputfile)
self.atomcoords.append(atomcoords)
if not hasattr(self, "natom"):
self.atomnos = numpy.array(atomnos, 'i')
self.natom = len(self.atomnos)
# make sure atomnos is added for the case where natom has already been set
elif not hasattr(self, "atomnos"):
self.atomnos = numpy.array(atomnos, 'i')
# Find the targets for SCF convergence (QM calcs).
if line[1:44] == 'Requested convergence on RMS density matrix':
if not hasattr(self, "scftargets"):
self.scftargets = []
# The following can happen with ONIOM which are mixed SCF
# and semi-empirical
if type(self.scftargets) == type(numpy.array([])):
self.scftargets = []
scftargets = []
# The RMS density matrix.
scftargets.append(self.float(line.split('=')[1].split()[0]))
line = next(inputfile)
# The MAX density matrix.
scftargets.append(self.float(line.strip().split('=')[1][:-1]))
line = next(inputfile)
# For G03, there's also the energy (not for G98).
if line[1:10] == "Requested":
scftargets.append(self.float(line.strip().split('=')[1][:-1]))
self.scftargets.append(scftargets)
# Extract SCF convergence information (QM calcs).
if line[1:10] == 'Cycle 1':
if not hasattr(self, "scfvalues"):
self.scfvalues = []
scfvalues = []
line = next(inputfile)
while line.find("SCF Done") == -1:
self.updateprogress(inputfile, "QM convergence", self.fupdate)
if line.find(' E=') == 0:
self.logger.debug(line)
# RMSDP=3.74D-06 MaxDP=7.27D-05 DE=-1.73D-07 OVMax= 3.67D-05
# or
# RMSDP=1.13D-05 MaxDP=1.08D-04 OVMax= 1.66D-04
if line.find(" RMSDP") == 0:
parts = line.split()
newlist = [self.float(x.split('=')[1]) for x in parts[0:2]]
energy = 1.0
if len(parts) > 4:
energy = parts[2].split('=')[1]
if energy == "":
energy = self.float(parts[3])
else:
energy = self.float(energy)
if len(self.scftargets[0]) == 3: # Only add the energy if it's a target criteria
newlist.append(energy)
scfvalues.append(newlist)
try:
line = next(inputfile)
# May be interupted by EOF.
except StopIteration:
break
self.scfvalues.append(scfvalues)
# Extract SCF convergence information (AM1 calcs).
if line[1:4] == 'It=':
self.scftargets = numpy.array([1E-7], "d") # This is the target value for the rms
self.scfvalues = [[]]
line = next(inputfile)
while line.find(" Energy") == -1:
if self.progress:
step = inputfile.tell()
if step != oldstep:
self.progress.update(step, "AM1 Convergence")
oldstep = step
if line[1:4] == "It=":
parts = line.strip().split()
self.scfvalues[0].append(self.float(parts[-1][:-1]))
line = next(inputfile)
# Note: this needs to follow the section where 'SCF Done' is used
# to terminate a loop when extracting SCF convergence information.
if line[1:9] == 'SCF Done' and not self.oniom:
if not hasattr(self, "scfenergies"):
self.scfenergies = []
self.scfenergies.append(utils.convertor(self.float(line.split()[4]), "hartree", "eV"))
# gmagoon 5/27/09: added scfenergies reading for PM3 case
# Example line: " Energy= -0.077520562724 NIter= 14."
# See regression Gaussian03/QVGXLLKOCUKJST-UHFFFAOYAJmult3Fixed.out
if line[1:8] == 'Energy=' and not self.oniom:
if not hasattr(self, "scfenergies"):
self.scfenergies = []
self.scfenergies.append(utils.convertor(self.float(line.split()[1]), "hartree", "eV"))
# cfare 7/11/13: added scfenergies reading for ONIOM case
# Example line: " ONIOM: extrapolated energy = -1381.592818535788"
if line[1:27] == "ONIOM: extrapolated energy":
if not hasattr(self, "scfenergies"):
self.scfenergies = []
self.scfenergies.append(utils.convertor(self.float(line.split()[4]), "hartree", "eV"))
# Total energies after Moller-Plesset corrections.
# Second order correction is always first, so its first occurance
# triggers creation of mpenergies (list of lists of energies).
# Further MP2 corrections are appended as found.
#
# Example MP2 output line:
# E2 = -0.9505918144D+00 EUMP2 = -0.28670924198852D+03
# Warning! this output line is subtly different for MP3/4/5 runs
if "EUMP2" in line[27:34]:
if not hasattr(self, "mpenergies"):
self.mpenergies = []
self.mpenergies.append([])
mp2energy = self.float(line.split("=")[2])
self.mpenergies[-1].append(utils.convertor(mp2energy, "hartree", "eV"))
# Example MP3 output line:
# E3= -0.10518801D-01 EUMP3= -0.75012800924D+02
if line[34:39] == "EUMP3":
mp3energy = self.float(line.split("=")[2])
self.mpenergies[-1].append(utils.convertor(mp3energy, "hartree", "eV"))
# Example MP4 output lines:
# E4(DQ)= -0.31002157D-02 UMP4(DQ)= -0.75015901139D+02
# E4(SDQ)= -0.32127241D-02 UMP4(SDQ)= -0.75016013648D+02
# E4(SDTQ)= -0.32671209D-02 UMP4(SDTQ)= -0.75016068045D+02
# Energy for most substitutions is used only (SDTQ by default)
if line[34:42] == "UMP4(DQ)":
mp4energy = self.float(line.split("=")[2])
line = next(inputfile)
if line[34:43] == "UMP4(SDQ)":
mp4energy = self.float(line.split("=")[2])
line = next(inputfile)
if line[34:44] == "UMP4(SDTQ)":
mp4energy = self.float(line.split("=")[2])
self.mpenergies[-1].append(utils.convertor(mp4energy, "hartree", "eV"))
# Example MP5 output line:
# DEMP5 = -0.11048812312D-02 MP5 = -0.75017172926D+02
if line[29:32] == "MP5":
mp5energy = self.float(line.split("=")[2])
self.mpenergies[-1].append(utils.convertor(mp5energy, "hartree", "eV"))
# Total energies after Coupled Cluster corrections.
# Second order MBPT energies (MP2) are also calculated for these runs,
# but the output is the same as when parsing for mpenergies.
# Read the consecutive correlated energies
# but append only the last one to ccenergies.
# Only the highest level energy is appended - ex. CCSD(T), not CCSD.
if line[1:10] == "DE(Corr)=" and line[27:35] == "E(CORR)=":
self.ccenergy = self.float(line.split()[3])
if line[1:10] == "T5(CCSD)=":
line = next(inputfile)
if line[1:9] == "CCSD(T)=":
self.ccenergy = self.float(line.split()[1])
if line[12:53] == "Population analysis using the SCF density":
if hasattr(self, "ccenergy"):
if not hasattr(self, "ccenergies"):
self.ccenergies = []
self.ccenergies.append(utils.convertor(self.ccenergy, "hartree", "eV"))
del self.ccenergy
# Geometry convergence information.
if line[49:59] == 'Converged?':
if not hasattr(self, "geotargets"):
self.geovalues = []
self.geotargets = numpy.array([0.0, 0.0, 0.0, 0.0], "d")
newlist = [0]*4
for i in range(4):
line = next(inputfile)
self.logger.debug(line)
parts = line.split()
try:
value = self.float(parts[2])
except ValueError:
self.logger.error("Problem parsing the value for geometry optimisation: %s is not a number." % parts[2])
else:
newlist[i] = value
self.geotargets[i] = self.float(parts[3])
self.geovalues.append(newlist)
# Gradients.
# Read in the cartesian energy gradients (forces) from a block like this:
# -------------------------------------------------------------------
# Center Atomic Forces (Hartrees/Bohr)
# Number Number X Y Z
# -------------------------------------------------------------------
# 1 1 -0.012534744 -0.021754635 -0.008346094
# 2 6 0.018984731 0.032948887 -0.038003451
# 3 1 -0.002133484 -0.006226040 0.023174772
# 4 1 -0.004316502 -0.004968213 0.023174772
# -2 -0.001830728 -0.000743108 -0.000196625
# ------------------------------------------------------------------
#
# The "-2" line is for a dummy atom
#
# Then optimization is done in internal coordinates, Gaussian also
# print the forces in internal coordinates, which can be produced from
# the above. This block looks like this:
# Variable Old X -DE/DX Delta X Delta X Delta X New X
# (Linear) (Quad) (Total)
# ch 2.05980 0.01260 0.00000 0.01134 0.01134 2.07114
# hch 1.75406 0.09547 0.00000 0.24861 0.24861 2.00267
# hchh 2.09614 0.01261 0.00000 0.16875 0.16875 2.26489
# Item Value Threshold Converged?
if line[37:43] == "Forces":
if not hasattr(self, "grads"):
self.grads = []
header = next(inputfile)
dashes = next(inputfile)
line = next(inputfile)
forces = []
while line != dashes:
broken = line.split()
Fx, Fy, Fz = broken[-3:]
forces.append([float(Fx), float(Fy), float(Fz)])
line = next(inputfile)
self.grads.append(forces)
# Charge and multiplicity.
# If counterpoise correction is used, multiple lines match.
# The first one contains charge/multiplicity of the whole molecule.:
# Charge = 0 Multiplicity = 1 in supermolecule
# Charge = 0 Multiplicity = 1 in fragment 1.
# Charge = 0 Multiplicity = 1 in fragment 2.
if line[1:7] == 'Charge' and line.find("Multiplicity")>=0:
regex = ".*=(.*)Mul.*=\s*-?(\d+).*"
match = re.match(regex, line)
assert match, "Something unusual about the line: '%s'" % line
if not hasattr(self, "charge"):
self.charge = int(match.groups()[0])
if not hasattr(self, "mult"):
self.mult = int(match.groups()[1])
# Orbital symmetries.
if line[1:20] == 'Orbital symmetries:' and not hasattr(self, "mosyms"):
# For counterpoise fragments, skip these lines.
if self.counterpoise != 0: return
self.updateprogress(inputfile, "MO Symmetries", self.fupdate)
self.mosyms = [[]]
line = next(inputfile)
unres = False
if line.find("Alpha Orbitals") == 1:
unres = True
line = next(inputfile)
i = 0
while len(line) > 18 and line[17] == '(':
if line.find('Virtual') >= 0:
self.homos = numpy.array([i-1], "i") # 'HOMO' indexes the HOMO in the arrays
parts = line[17:].split()
for x in parts:
self.mosyms[0].append(self.normalisesym(x.strip('()')))
i += 1
line = next(inputfile)
if unres:
line = next(inputfile)
# Repeat with beta orbital information
i = 0
self.mosyms.append([])
while len(line) > 18 and line[17] == '(':
if line.find('Virtual')>=0:
# Here we consider beta
# If there was also an alpha virtual orbital,
# we will store two indices in the array
# Otherwise there is no alpha virtual orbital,
# only beta virtual orbitals, and we initialize
# the array with one element. See the regression
# QVGXLLKOCUKJST-UHFFFAOYAJmult3Fixed.out
# donated by Gregory Magoon (gmagoon).
if (hasattr(self, "homos")):
# Extend the array to two elements
# 'HOMO' indexes the HOMO in the arrays
self.homos.resize([2])
self.homos[1] = i-1
else:
# 'HOMO' indexes the HOMO in the arrays
self.homos = numpy.array([i-1], "i")
parts = line[17:].split()
for x in parts:
self.mosyms[1].append(self.normalisesym(x.strip('()')))
i += 1
line = next(inputfile)
# Alpha/Beta electron eigenvalues.
if line[1:6] == "Alpha" and line.find("eigenvalues") >= 0:
# For counterpoise fragments, skip these lines.
if self.counterpoise != 0: return
# For ONIOM calcs, ignore this section in order to bypass assertion failure.
if self.oniom: return
self.updateprogress(inputfile, "Eigenvalues", self.fupdate)
self.moenergies = [[]]
HOMO = -2
while line.find('Alpha') == 1:
if line.split()[1] == "virt." and HOMO == -2:
# If there aren't any symmetries, this is a good way to find the HOMO.
# Also, check for consistency if homos was already parsed.
HOMO = len(self.moenergies[0])-1
if hasattr(self, "homos"):
assert HOMO == self.homos[0]
else:
self.homos = numpy.array([HOMO], "i")
# Convert to floats and append to moenergies, but sometimes Gaussian
# doesn't print correctly so test for ValueError (bug 1756789).
part = line[28:]
i = 0
while i*10+4 < len(part):
s = part[i*10:(i+1)*10]
try:
x = self.float(s)
except ValueError:
x = numpy.nan
self.moenergies[0].append(utils.convertor(x, "hartree", "eV"))
i += 1
line = next(inputfile)
# If, at this point, self.homos is unset, then there were not
# any alpha virtual orbitals
if not hasattr(self, "homos"):
HOMO = len(self.moenergies[0])-1
self.homos = numpy.array([HOMO], "i")
if line.find('Beta') == 2:
self.moenergies.append([])
HOMO = -2
while line.find('Beta') == 2:
if line.split()[1] == "virt." and HOMO == -2:
# If there aren't any symmetries, this is a good way to find the HOMO.
# Also, check for consistency if homos was already parsed.
HOMO = len(self.moenergies[1])-1
if len(self.homos) == 2:
assert HOMO == self.homos[1]
else:
self.homos.resize([2])
self.homos[1] = HOMO
part = line[28:]
i = 0
while i*10+4 < len(part):
x = part[i*10:(i+1)*10]
self.moenergies[1].append(utils.convertor(self.float(x), "hartree", "eV"))
i += 1
line = next(inputfile)
self.moenergies = [numpy.array(x, "d") for x in self.moenergies]
# Gaussian Rev <= B.0.3 (?)
# AO basis set in the form of general basis input:
# 1 0
# S 3 1.00 0.000000000000
# 0.7161683735D+02 0.1543289673D+00
# 0.1304509632D+02 0.5353281423D+00
# 0.3530512160D+01 0.4446345422D+00
# SP 3 1.00 0.000000000000
# 0.2941249355D+01 -0.9996722919D-01 0.1559162750D+00
# 0.6834830964D+00 0.3995128261D+00 0.6076837186D+00
# 0.2222899159D+00 0.7001154689D+00 0.3919573931D+00
if line[1:16] == "AO basis set in":
# For counterpoise fragment calcualtions, skip these lines.
if self.counterpoise != 0: return
self.gbasis = []
line = next(inputfile)
while line.strip():
gbasis = []
line = next(inputfile)
while line.find("*")<0:
temp = line.split()
symtype = temp[0]
numgau = int(temp[1])
gau = []
for i in range(numgau):
temp = list(map(self.float, next(inputfile).split()))
gau.append(temp)
for i, x in enumerate(symtype):
newgau = [(z[0], z[i+1]) for z in gau]
gbasis.append((x, newgau))
line = next(inputfile) # i.e. "****" or "SP ...."
self.gbasis.append(gbasis)
line = next(inputfile) # i.e. "20 0" or blank line
# Start of the IR/Raman frequency section.
# Caution is advised here, as additional frequency blocks
# can be printed by Gaussian (with slightly different formats),
# often doubling the information printed.
# See, for a non-standard exmaple, regression Gaussian98/test_H2.log
if line[1:14] == "Harmonic freq":
self.updateprogress(inputfile, "Frequency Information", self.fupdate)
removeold = False
# The whole block should not have any blank lines.
while line.strip() != "":
# The line with indices
if line[1:15].strip() == "" and line[15:23].strip().isdigit():
freqbase = int(line[15:23])
if freqbase == 1 and hasattr(self, 'vibfreqs'):
# This is a reparse of this information
removeold = True
# Lines with symmetries and symm. indices begin with whitespace.
if line[1:15].strip() == "" and not line[15:23].strip().isdigit():
if not hasattr(self, 'vibsyms'):
self.vibsyms = []
syms = line.split()
self.vibsyms.extend(syms)
if line[1:15] == "Frequencies --":
if not hasattr(self, 'vibfreqs'):
self.vibfreqs = []
if removeold: # This is a reparse, so throw away the old info
if hasattr(self, "vibsyms"):
# We have already parsed the vibsyms so don't throw away!
self.vibsyms = self.vibsyms[-len(line[15:].split()):]
if hasattr(self, "vibirs"):
self.vibirs = []
if hasattr(self, 'vibfreqs'):
self.vibfreqs = []
if hasattr(self, 'vibramans'):
self.vibramans = []
if hasattr(self, 'vibdisps'):
self.vibdisps = []
removeold = False
freqs = [self.float(f) for f in line[15:].split()]
self.vibfreqs.extend(freqs)
if line[1:15] == "IR Inten --":
if not hasattr(self, 'vibirs'):
self.vibirs = []
# irs = [self.float(f) for f in line[15:].split()]
irs = []
for ir in line[15:].split():
try:
irs.append(self.float(ir))
except ValueError:
irs.append(self.float('nan'))
self.vibirs.extend(irs)
if line[1:15] == "Raman Activ --":
if not hasattr(self, 'vibramans'):
self.vibramans = []
ramans = [self.float(f) for f in line[15:].split()]
self.vibramans.extend(ramans)
# Block with displacement should start with this.
if line.strip().split()[0:3] == ["Atom", "AN", "X"]:
if not hasattr(self, 'vibdisps'):
self.vibdisps = []
disps = []
for n in range(self.natom):
line = next(inputfile)
numbers = [float(s) for s in line[10:].split()]
N = len(numbers) // 3
if not disps:
for n in range(N):
disps.append([])
for n in range(N):
disps[n].append(numbers[3*n:3*n+3])
self.vibdisps.extend(disps)
line = next(inputfile)
# Below is the old code for the IR/Raman frequency block, can probably be removed.
# while len(line[:15].split()) == 0:
# self.logger.debug(line)
# self.vibsyms.extend(line.split()) # Adding new symmetry
# line = inputfile.next()
# # Read in frequencies.
# freqs = [self.float(f) for f in line.split()[2:]]
# self.vibfreqs.extend(freqs)
# line = inputfile.next()
# line = inputfile.next()
# line = inputfile.next()
# irs = [self.float(f) for f in line.split()[3:]]
# self.vibirs.extend(irs)
# line = inputfile.next() # Either the header or a Raman line
# if line.find("Raman") >= 0:
# if not hasattr(self, "vibramans"):
# self.vibramans = []
# ramans = [self.float(f) for f in line.split()[3:]]
# self.vibramans.extend(ramans)
# line = inputfile.next() # Depolar (P)
# line = inputfile.next() # Depolar (U)
# line = inputfile.next() # Header
# line = inputfile.next() # First line of cartesian displacement vectors
# p = [[], [], []]
# while len(line[:15].split()) > 0:
# # Store the cartesian displacement vectors
# broken = map(float, line.strip().split()[2:])
# for i in range(0, len(broken), 3):
# p[i/3].append(broken[i:i+3])
# line = inputfile.next()
# self.vibdisps.extend(p[0:len(broken)/3])
# line = inputfile.next() # Should be the line with symmetries
# self.vibfreqs = numpy.array(self.vibfreqs, "d")
# self.vibirs = numpy.array(self.vibirs, "d")
# self.vibdisps = numpy.array(self.vibdisps, "d")
# if hasattr(self, "vibramans"):
# self.vibramans = numpy.array(self.vibramans, "d")
# Electronic transitions.
if line[1:14] == "Excited State":
if not hasattr(self, "etenergies"):
self.etenergies = []
self.etoscs = []
self.etsyms = []
self.etsecs = []
# Need to deal with lines like:
# (restricted calc)
# Excited State 1: Singlet-BU 5.3351 eV 232.39 nm f=0.1695
# (unrestricted calc) (first excited state is 2!)
# Excited State 2: ?Spin -A 0.1222 eV 10148.75 nm f=0.0000
# (Gaussian 09 ZINDO)
# Excited State 1: Singlet-?Sym 2.5938 eV 478.01 nm f=0.0000 <S**2>=0.000
p = re.compile(":(?P<sym>.*?)(?P<energy>-?\d*\.\d*) eV")
groups = p.search(line).groups()
self.etenergies.append(utils.convertor(self.float(groups[1]), "eV", "cm-1"))
self.etoscs.append(self.float(line.split("f=")[-1].split()[0]))
self.etsyms.append(groups[0].strip())
line = next(inputfile)
p = re.compile("(\d+)")
CIScontrib = []
while line.find(" ->") >= 0: # This is a contribution to the transition
parts = line.split("->")
self.logger.debug(parts)
# Has to deal with lines like:
# 32 -> 38 0.04990
# 35A -> 45A 0.01921
frommoindex = 0 # For restricted or alpha unrestricted
fromMO = parts[0].strip()
if fromMO[-1] == "B":
frommoindex = 1 # For beta unrestricted
fromMO = int(p.match(fromMO).group())-1 # subtract 1 so that it is an index into moenergies
t = parts[1].split()
tomoindex = 0
toMO = t[0]
if toMO[-1] == "B":
tomoindex = 1
toMO = int(p.match(toMO).group())-1 # subtract 1 so that it is an index into moenergies
percent = self.float(t[1])
# For restricted calculations, the percentage will be corrected
# after parsing (see after_parsing() above).
CIScontrib.append([(fromMO, frommoindex), (toMO, tomoindex), percent])
line = next(inputfile)
self.etsecs.append(CIScontrib)
# Circular dichroism data (different for G03 vs G09)
# G03
## <0|r|b> * <b|rxdel|0> (Au), Rotatory Strengths (R) in
## cgs (10**-40 erg-esu-cm/Gauss)
## state X Y Z R(length)
## 1 0.0006 0.0096 -0.0082 -0.4568
## 2 0.0251 -0.0025 0.0002 -5.3846
## 3 0.0168 0.4204 -0.3707 -15.6580
## 4 0.0721 0.9196 -0.9775 -3.3553
# G09
## 1/2[<0|r|b>*<b|rxdel|0> + (<0|rxdel|b>*<b|r|0>)*]
## Rotatory Strengths (R) in cgs (10**-40 erg-esu-cm/Gauss)
## state XX YY ZZ R(length) R(au)
## 1 -0.3893 -6.7546 5.7736 -0.4568 -0.0010
## 2 -17.7437 1.7335 -0.1435 -5.3845 -0.0114
## 3 -11.8655 -297.2604 262.1519 -15.6580 -0.0332
if (line[1:52] == "<0|r|b> * <b|rxdel|0> (Au), Rotatory Strengths (R)" or
line[1:50] == "1/2[<0|r|b>*<b|rxdel|0> + (<0|rxdel|b>*<b|r|0>)*]"):
self.etrotats = []
next(inputfile) # Units
headers = next(inputfile) # Headers
Ncolms = len(headers.split())
line = next(inputfile)
parts = line.strip().split()
while len(parts) == Ncolms:
try:
R = self.float(parts[4])
except ValueError:
# nan or -nan if there is no first excited state
# (for unrestricted calculations)
pass
else:
self.etrotats.append(R)
line = next(inputfile)
temp = line.strip().split()
parts = line.strip().split()
self.etrotats = numpy.array(self.etrotats, "d")
# Number of basis sets functions.
# Has to deal with lines like:
# NBasis = 434 NAE= 97 NBE= 97 NFC= 34 NFV= 0
# and...
# NBasis = 148 MinDer = 0 MaxDer = 0
# Although the former is in every file, it doesn't occur before
# the overlap matrix is printed.
if line[1:7] == "NBasis" or line[4:10] == "NBasis":
# For counterpoise fragment, skip these lines.
if self.counterpoise != 0: return
# For ONIOM calcs, ignore this section in order to bypass assertion failure.
if self.oniom: return
# If nbasis was already parsed, check if it changed.
nbasis = int(line.split('=')[1].split()[0])
if hasattr(self, "nbasis"):
assert nbasis == self.nbasis
else:
self.nbasis = nbasis
# Number of linearly-independent basis sets.
if line[1:7] == "NBsUse":
# For counterpoise fragment, skip these lines.
if self.counterpoise != 0: return
# For ONIOM calcs, ignore this section in order to bypass assertion failure.
if self.oniom: return
nmo = int(line.split('=')[1].split()[0])
if hasattr(self, "nmo"):
assert nmo == self.nmo
else:
self.nmo = nmo
# For AM1 calculations, set nbasis by a second method,
# as nmo may not always be explicitly stated.
if line[7:22] == "basis functions, ":
nbasis = int(line.split()[0])
if hasattr(self, "nbasis"):
assert nbasis == self.nbasis
else:
self.nbasis = nbasis
# Molecular orbital overlap matrix.
# Has to deal with lines such as:
# *** Overlap ***
# ****** Overlap ******
# Note that Gaussian sometimes drops basis functions,
# causing the overlap matrix as parsed below to not be
# symmetric (which is a problem for population analyses, etc.)
if line[1:4] == "***" and (line[5:12] == "Overlap"
or line[8:15] == "Overlap"):
# Ensure that this is the main calc and not a fragment
if self.counterpoise != 0: return
self.aooverlaps = numpy.zeros( (self.nbasis, self.nbasis), "d")
# Overlap integrals for basis fn#1 are in aooverlaps[0]
base = 0
colmNames = next(inputfile)
while base < self.nbasis:
self.updateprogress(inputfile, "Overlap", self.fupdate)
for i in range(self.nbasis-base): # Fewer lines this time
line = next(inputfile)
parts = line.split()
for j in range(len(parts)-1): # Some lines are longer than others
k = float(parts[j+1].replace("D", "E"))
self.aooverlaps[base+j, i+base] = k
self.aooverlaps[i+base, base+j] = k
base += 5
colmNames = next(inputfile)
self.aooverlaps = numpy.array(self.aooverlaps, "d")
# Molecular orbital coefficients (mocoeffs).
# Essentially only produced for SCF calculations.
# This is also the place where aonames and atombasis are parsed.
if line[5:35] == "Molecular Orbital Coefficients" or line[5:41] == "Alpha Molecular Orbital Coefficients" or line[5:40] == "Beta Molecular Orbital Coefficients":
# If counterpoise fragment, return without parsing orbital info
if self.counterpoise != 0: return
# Skip this for ONIOM calcs
if self.oniom: return
if line[5:40] == "Beta Molecular Orbital Coefficients":
beta = True
if self.popregular:
return
# This was continue before refactoring the parsers.
#continue # Not going to extract mocoeffs
# Need to add an extra array to self.mocoeffs
self.mocoeffs.append(numpy.zeros((self.nmo, self.nbasis), "d"))
else:
beta = False
self.aonames = []
self.atombasis = []
mocoeffs = [numpy.zeros((self.nmo, self.nbasis), "d")]
base = 0
self.popregular = False
for base in range(0, self.nmo, 5):
self.updateprogress(inputfile, "Coefficients", self.fupdate)
colmNames = next(inputfile)
if not colmNames.split():
self.logger.warning("Molecular coefficients header found but no coefficients.")
break;
if base == 0 and int(colmNames.split()[0]) != 1:
# Implies that this is a POP=REGULAR calculation
# and so, only aonames (not mocoeffs) will be extracted
self.popregular = True
symmetries = next(inputfile)
eigenvalues = next(inputfile)
for i in range(self.nbasis):
line = next(inputfile)
if i == 0:
# Find location of the start of the basis function name
start_of_basis_fn_name = line.find(line.split()[3]) - 1
if base == 0 and not beta: # Just do this the first time 'round
parts = line[:start_of_basis_fn_name].split()
if len(parts) > 1: # New atom
if i > 0:
self.atombasis.append(atombasis)
atombasis = []
atomname = "%s%s" % (parts[2], parts[1])
orbital = line[start_of_basis_fn_name:20].strip()
self.aonames.append("%s_%s" % (atomname, orbital))
atombasis.append(i)
part = line[21:].replace("D", "E").rstrip()
temp = []
for j in range(0, len(part), 10):
temp.append(float(part[j:j+10]))
if beta:
self.mocoeffs[1][base:base + len(part) / 10, i] = temp
else:
mocoeffs[0][base:base + len(part) / 10, i] = temp
if base == 0 and not beta: # Do the last update of atombasis
self.atombasis.append(atombasis)
if self.popregular:
# We now have aonames, so no need to continue
break
if not self.popregular and not beta:
self.mocoeffs = mocoeffs
# Natural Orbital Coefficients (nocoeffs) - alternative for mocoeffs.
# Most extensively formed after CI calculations, but not only.
# Like for mocoeffs, this is also where aonames and atombasis are parsed.
if line[5:33] == "Natural Orbital Coefficients":
self.aonames = []
self.atombasis = []
nocoeffs = numpy.zeros((self.nmo, self.nbasis), "d")
base = 0
self.popregular = False
for base in range(0, self.nmo, 5):
self.updateprogress(inputfile, "Coefficients", self.fupdate)
colmNames = next(inputfile)
if base == 0 and int(colmNames.split()[0]) != 1:
# Implies that this is a POP=REGULAR calculation
# and so, only aonames (not mocoeffs) will be extracted
self.popregular = True
# No symmetry line for natural orbitals.
# symmetries = inputfile.next()
eigenvalues = next(inputfile)
for i in range(self.nbasis):
line = next(inputfile)
# Just do this the first time 'round.
if base == 0:
# Changed below from :12 to :11 to deal with Elmar Neumann's example.
parts = line[:11].split()
# New atom.
if len(parts) > 1:
if i > 0:
self.atombasis.append(atombasis)
atombasis = []
atomname = "%s%s" % (parts[2], parts[1])
orbital = line[11:20].strip()
self.aonames.append("%s_%s" % (atomname, orbital))
atombasis.append(i)
part = line[21:].replace("D", "E").rstrip()
temp = []
for j in range(0, len(part), 10):
temp.append(float(part[j:j+10]))
nocoeffs[base:base + len(part) / 10, i] = temp
# Do the last update of atombasis.
if base == 0:
self.atombasis.append(atombasis)
# We now have aonames, so no need to continue.
if self.popregular:
break
if not self.popregular:
self.nocoeffs = nocoeffs
# For FREQ=Anharm, extract anharmonicity constants
if line[1:40] == "X matrix of Anharmonic Constants (cm-1)":
Nvibs = len(self.vibfreqs)
self.vibanharms = numpy.zeros( (Nvibs, Nvibs), "d")
base = 0
colmNames = next(inputfile)
while base < Nvibs:
for i in range(Nvibs-base): # Fewer lines this time
line = next(inputfile)
parts = line.split()
for j in range(len(parts)-1): # Some lines are longer than others
k = float(parts[j+1].replace("D", "E"))
self.vibanharms[base+j, i+base] = k
self.vibanharms[i+base, base+j] = k
base += 5
colmNames = next(inputfile)
# Pseudopotential charges.
if line.find("Pseudopotential Parameters") > -1:
dashes = next(inputfile)
label1 = next(inputfile)
label2 = next(inputfile)
dashes = next(inputfile)
line = next(inputfile)
if line.find("Centers:") < 0:
return
# This was continue before parser refactoring.
# continue
# Needs to handle code like the following:
#
# Center Atomic Valence Angular Power Coordinates
# Number Number Electrons Momentum of R Exponent Coefficient X Y Z
# ===================================================================================================================================
# Centers: 1
# Centers: 16
# Centers: 21 24
# Centers: 99100101102
# 1 44 16 -4.012684 -0.696698 0.006750
# F and up
# 0 554.3796303 -0.05152700
centers = []
while line.find("Centers:") >= 0:
temp = line[10:]
for i in range(0, len(temp)-3, 3):
centers.append(int(temp[i:i+3]))
line = next(inputfile)
centers.sort() # Not always in increasing order
self.coreelectrons = numpy.zeros(self.natom, "i")
for center in centers:
front = line[:10].strip()
while not (front and int(front) == center):
line = next(inputfile)
front = line[:10].strip()
info = line.split()
self.coreelectrons[center-1] = int(info[1]) - int(info[2])
line = next(inputfile)
# This will be printed for counterpoise calcualtions only.
# To prevent crashing, we need to know which fragment is being considered.
# Other information is also printed in lines that start like this.
if line[1:14] == 'Counterpoise:':
if line[42:50] == "fragment":
self.counterpoise = int(line[51:54])
# This will be printed only during ONIOM calcs; use it to set a flag
# that will allow assertion failures to be bypassed in the code.
if line[1:7] == "ONIOM:":
self.oniom = True
if (line[1:24] == "Mulliken atomic charges" or
line[1:22] == "Lowdin Atomic Charges"):
if not hasattr(self, "atomcharges"):
self.atomcharges = {}
ones = next(inputfile)
charges = []
nline = next(inputfile)
while not "Sum of" in nline:
charges.append(float(nline.split()[2]))
nline = next(inputfile)
if "Mulliken" in line:
self.atomcharges["mulliken"] = charges
else:
self.atomcharges["lowdin"] = charges
if line.strip() == "Natural Population":
line1 = next(inputfile)
line2 = next(inputfile)
if line1.split()[0] == 'Natural' and line2.split()[2] == 'Charge':
dashes = next(inputfile)
charges = []
for i in range(self.natom):
nline = next(inputfile)
charges.append(float(nline.split()[2]))
self.atomcharges["natural"] = charges
if __name__ == "__main__":
import doctest, gaussianparser, sys
if len(sys.argv) == 1:
doctest.testmod(gaussianparser, verbose=False)
if len(sys.argv) >= 2:
parser = gaussianparser.Gaussian(sys.argv[1])
data = parser.parse()
if len(sys.argv) > 2:
for i in range(len(sys.argv[2:])):
if hasattr(data, sys.argv[2 + i]):
print(getattr(data, sys.argv[2 + i]))
|
Clyde-fare/cclib_bak
|
src/cclib/parser/gaussianparser.py
|
Python
|
lgpl-2.1
| 54,260
|
[
"Gaussian",
"cclib"
] |
9760cc2cfda72b5924921627515473a91f8ae907a482e6deae2c66e78e1f21b0
|
from django.db import models
from vendor.models import Store
class Driver(models.Model):
id = models.AutoField(primary_key=True)
username = models.CharField(max_length=40, unique=True)
password = models.CharField(max_length=30)
first_name = models.CharField(max_length=30, blank=True)
last_name = models.CharField(max_length=30, blank=True)
email = models.EmailField(max_length=40, unique=True)
phone = models.CharField(max_length=15, unique=True)
street = models.CharField(max_length=100)
city = models.CharField(max_length=40)
state = models.CharField(max_length=2)
zip_code = models.CharField(max_length=5)
def __str__(self):
return str(self.id) + ' - ' + self.username
# StoreSequence = the store_ids according to the order a driver should visit, separated by single space.
class StoreSequence(models.Model):
id = models.AutoField(primary_key=True)
seq = models.CharField(max_length=200)
driver = models.OneToOneField(Driver, on_delete=models.PROTECT, null=True)
def __str__(self):
return self.seq
# ConsumerSequence stores the consumer_ids according to the order a driver should visit, separated by single space.
class ConsumerSequence(models.Model):
id = models.AutoField(primary_key=True)
seq = models.CharField(max_length=200)
driver = models.OneToOneField(Driver, on_delete=models.PROTECT, null=True)
def __str__(self):
return self.seq
|
rayhu-osu/vcube
|
valet/models.py
|
Python
|
mit
| 1,459
|
[
"VisIt"
] |
c44150b6f58d20cf89939922ac5faa61bb325b2f5ff68d63096be282744ee495
|
from ase.transport.calculators import TransportCalculator
import numpy as np
#Aux. function to write data to a text file.
def write(fname,xs,ys):
fd = open(fname,'w')
for x,y in zip(xs,ys):
print >> fd, x, y
fd.close()
H_lead = np.zeros([4,4])
# On-site energies are zero
for i in range(4):
H_lead[i,i] = 0.0
# Nearest neighbor hopping is -1.0
for i in range(3):
H_lead[i,i+1] = -1.0
H_lead[i+1,i] = -1.0
# Next-nearest neighbor hopping is 0.2
for i in range(2):
H_lead[i,i+2] = 0.2
H_lead[i+2,i] = 0.2
H_scat = np.zeros([6,6])
# Principal layers on either side of S
H_scat[:2,:2] = H_lead[:2,:2]
H_scat[-2:,-2:] = H_lead[:2,:2]
# Scattering region
H_scat[2,2] = 0.0
H_scat[3,3] = 0.0
H_scat[2,3] = -0.8
H_scat[3,2] = -0.8
# External coupling
H_scat[1,2] = 0.2
H_scat[2,1] = 0.2
H_scat[3,4] = 0.2
H_scat[4,3] = 0.2
energies = np.arange(-3,3,0.02)
tcalc = TransportCalculator(h=H_scat,
h1=H_lead,
eta=0.02,
energies=energies)
T = tcalc.get_transmission()
tcalc.set(pdos=[2, 3])
pdos = tcalc.get_pdos()
tcalc.set(dos=True)
dos = tcalc.get_dos()
write('T.dat',tcalc.energies,T)
write('pdos0.dat', tcalc.energies,pdos[0])
write('pdos1.dat', tcalc.energies,pdos[1])
#subdiagonalize
h_rot, s_rot, eps, u = tcalc.subdiagonalize_bfs([2, 3], apply=True)
T_rot = tcalc.get_transmission()
dos_rot = tcalc.get_dos()
pdos_rot = tcalc.get_pdos()
write('T_rot.dat', tcalc.energies,T_rot)
write('pdos0_rot.dat', tcalc.energies, pdos_rot[0])
write('pdos1_rot.dat', tcalc.energies, pdos_rot[1])
print 'Subspace eigenvalues:', eps
assert sum(abs(eps-(-0.8, 0.8))) < 2.0e-15, 'Subdiagonalization. error'
print 'Max deviation of T after the rotation:', np.abs(T-T_rot).max()
assert max(abs(T-T_rot)) < 2.0e-15, 'Subdiagonalization. error'
#remove coupling
h_cut, s_cut = tcalc.cutcoupling_bfs([2], apply=True)
T_cut = tcalc.get_transmission()
dos_cut = tcalc.get_dos()
pdos_cut = tcalc.get_pdos()
write('T_cut.dat', tcalc.energies, T_cut)
write('pdos0_cut.dat', tcalc.energies,pdos_cut[0])
write('pdos1_cut.dat', tcalc.energies,pdos_cut[1])
|
grhawk/ASE
|
tools/ase/transport/test_transport_calulator.py
|
Python
|
gpl-2.0
| 2,162
|
[
"ASE"
] |
386a186e87aaea9df28dd444a09ff7f034069bce7bfa00c7ce4d7ceab21ac065
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
from pytest import fixture, mark
from pylada import vasp_program
@fixture
def path():
from os.path import dirname
return dirname(__file__)
@mark.skipif(vasp_program is None, reason="vasp not configured")
def test(tmpdir, path):
from numpy import all, abs
from quantities import kbar, eV, angstrom
from pylada.crystal import Structure
from pylada.vasp import Vasp
from pylada.vasp.relax import Relax
from pylada import default_comm
structure = Structure([[0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]], scale=5.43, name='has a name')\
.add_atom(0, 0, 0, "Si")\
.add_atom(0.25, 0.25, 0.25, "Si")
vasp = Vasp()
vasp.kpoints = "Automatic generation\n0\nMonkhorst\n2 2 2\n0 0 0"
vasp.prec = "accurate"
vasp.ediff = 1e-5
vasp.encut = 1
vasp.ismear = "fermi"
vasp.sigma = 0.01
vasp.relaxation = "volume"
vasp.add_specie = "Si", "{0}/pseudos/Si".format(path)
functional = Relax(copy=vasp)
assert abs(functional.ediff - 1e-5) < 1e-8
assert functional.prec == 'Accurate'
result = functional(structure, outdir=str(tmpdir), comm=default_comm,
relaxation="volume ionic cellshape")
assert result.success
assert result.stress.units == kbar and all(abs(result.stress) < 1e0)
assert result.forces.units == eV / angstrom and all(abs(result.forces) < 1e-1)
assert result.total_energy.units == eV and all(
abs(result.total_energy + 10.668652 * eV) < 1e-2)
|
pylada/pylada-light
|
tests/vasp/test_runrelax.py
|
Python
|
gpl-3.0
| 2,647
|
[
"CRYSTAL",
"VASP"
] |
d768355b158513ece8cf67a3c023212f0912c192bc00ef5199e97288a6f7694d
|
"""This module contains the "Viz" objects
These objects represent the backend of all the visualizations that
Superset can render.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import hashlib
import logging
import traceback
import uuid
import zlib
from collections import OrderedDict, defaultdict
from itertools import product
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
from flask import request
from flask_babel import lazy_gettext as _
from markdown import markdown
import simplejson as json
from six import string_types, PY3
from dateutil import relativedelta as rdelta
from superset import app, utils, cache
from superset.utils import DTTM_ALIAS
config = app.config
stats_logger = config.get('STATS_LOGGER')
class BaseViz(object):
"""All visualizations derive this base class"""
viz_type = None
verbose_name = "Base Viz"
credits = ""
is_timeseries = False
def __init__(self, datasource, form_data):
if not datasource:
raise Exception("Viz is missing a datasource")
self.datasource = datasource
self.request = request
self.viz_type = form_data.get("viz_type")
self.form_data = form_data
self.query = ""
self.token = self.form_data.get(
'token', 'token_' + uuid.uuid4().hex[:8])
self.metrics = self.form_data.get('metrics') or []
self.groupby = self.form_data.get('groupby') or []
self.status = None
self.error_message = None
def get_df(self, query_obj=None):
"""Returns a pandas dataframe based on the query object"""
if not query_obj:
query_obj = self.query_obj()
self.error_msg = ""
self.results = None
timestamp_format = None
if self.datasource.type == 'table':
dttm_col = self.datasource.get_col(query_obj['granularity'])
if dttm_col:
timestamp_format = dttm_col.python_date_format
# The datasource here can be different backend but the interface is common
self.results = self.datasource.query(query_obj)
self.query = self.results.query
self.status = self.results.status
self.error_message = self.results.error_message
df = self.results.df
# Transform the timestamp we received from database to pandas supported
# datetime format. If no python_date_format is specified, the pattern will
# be considered as the default ISO date format
# If the datetime format is unix, the parse will use the corresponding
# parsing logic.
if df is None or df.empty:
self.status = utils.QueryStatus.FAILED
if not self.error_message:
self.error_message = "No data."
return pd.DataFrame()
else:
if DTTM_ALIAS in df.columns:
if timestamp_format in ("epoch_s", "epoch_ms"):
df[DTTM_ALIAS] = pd.to_datetime(df[DTTM_ALIAS], utc=False)
else:
df[DTTM_ALIAS] = pd.to_datetime(
df[DTTM_ALIAS], utc=False, format=timestamp_format)
if self.datasource.offset:
df[DTTM_ALIAS] += timedelta(hours=self.datasource.offset)
df.replace([np.inf, -np.inf], np.nan)
df = df.fillna(0)
return df
def get_extra_filters(self):
extra_filters = self.form_data.get('extra_filters', [])
return {f['col']: f['val'] for f in extra_filters}
def query_obj(self):
"""Building a query object"""
form_data = self.form_data
groupby = form_data.get("groupby") or []
metrics = form_data.get("metrics") or []
# extra_filters are temporary/contextual filters that are external
# to the slice definition. We use those for dynamic interactive
# filters like the ones emitted by the "Filter Box" visualization
extra_filters = self.get_extra_filters()
granularity = (
form_data.get("granularity") or form_data.get("granularity_sqla")
)
limit = int(form_data.get("limit") or 0)
timeseries_limit_metric = form_data.get("timeseries_limit_metric")
row_limit = int(
form_data.get("row_limit") or config.get("ROW_LIMIT"))
# __form and __to are special extra_filters that target time
# boundaries. The rest of extra_filters are simple
# [column_name in list_of_values]. `__` prefix is there to avoid
# potential conflicts with column that would be named `from` or `to`
since = (
extra_filters.get('__from') or
form_data.get("since") or
config.get("SUPERSET_DEFAULT_SINCE", "1 year ago")
)
from_dttm = utils.parse_human_datetime(since)
now = datetime.now()
if from_dttm > now:
from_dttm = now - (from_dttm - now)
until = extra_filters.get('__to') or form_data.get("until", "now")
to_dttm = utils.parse_human_datetime(until)
if from_dttm > to_dttm:
raise Exception("From date cannot be larger than to date")
# extras are used to query elements specific to a datasource type
# for instance the extra where clause that applies only to Tables
extras = {
'where': form_data.get("where", ''),
'having': form_data.get("having", ''),
'having_druid': form_data.get('having_filters') \
if 'having_filters' in form_data else [],
'time_grain_sqla': form_data.get("time_grain_sqla", ''),
'druid_time_origin': form_data.get("druid_time_origin", ''),
}
filters = form_data['filters'] if 'filters' in form_data \
else []
for col, vals in self.get_extra_filters().items():
if not (col and vals) or col.startswith('__'):
continue
elif col in self.datasource.filterable_column_names:
# Quote values with comma to avoid conflict
filters += [{
'col': col,
'op': 'in',
'val': vals,
}]
d = {
'granularity': granularity,
'from_dttm': from_dttm,
'to_dttm': to_dttm,
'is_timeseries': self.is_timeseries,
'groupby': groupby,
'metrics': metrics,
'row_limit': row_limit,
'filter': filters,
'timeseries_limit': limit,
'extras': extras,
'timeseries_limit_metric': timeseries_limit_metric,
'form_data': form_data,
}
return d
@property
def cache_timeout(self):
if self.form_data.get('cache_timeout'):
return int(self.form_data.get('cache_timeout'))
if self.datasource.cache_timeout:
return self.datasource.cache_timeout
if (
hasattr(self.datasource, 'database') and
self.datasource.database.cache_timeout):
return self.datasource.database.cache_timeout
return config.get("CACHE_DEFAULT_TIMEOUT")
def get_json(self, force=False):
return json.dumps(
self.get_payload(force),
default=utils.json_int_dttm_ser, ignore_nan=True)
@property
def cache_key(self):
s = str([(k, self.form_data[k]) for k in sorted(self.form_data.keys())])
return hashlib.md5(s.encode('utf-8')).hexdigest()
def get_payload(self, force=False):
"""Handles caching around the json payload retrieval"""
cache_key = self.cache_key
payload = None
force = force if force else self.form_data.get('force') == 'true'
if not force and cache:
payload = cache.get(cache_key)
if payload:
stats_logger.incr('loaded_from_source')
is_cached = True
try:
cached_data = zlib.decompress(payload)
if PY3:
cached_data = cached_data.decode('utf-8')
payload = json.loads(cached_data)
except Exception as e:
logging.error("Error reading cache: " +
utils.error_msg_from_exception(e))
payload = None
logging.info("Serving from cache")
if not payload:
stats_logger.incr('loaded_from_cache')
data = None
is_cached = False
cache_timeout = self.cache_timeout
stacktrace = None
try:
df = self.get_df()
if not self.error_message:
data = self.get_data(df)
except Exception as e:
logging.exception(e)
if not self.error_message:
self.error_message = str(e)
self.status = utils.QueryStatus.FAILED
data = None
stacktrace = traceback.format_exc()
payload = {
'cache_key': cache_key,
'cache_timeout': cache_timeout,
'data': data,
'error': self.error_message,
'form_data': self.form_data,
'query': self.query,
'status': self.status,
'stacktrace': stacktrace,
}
payload['cached_dttm'] = datetime.utcnow().isoformat().split('.')[0]
logging.info("Caching for the next {} seconds".format(
cache_timeout))
data = self.json_dumps(payload)
if PY3:
data = bytes(data, 'utf-8')
if cache and self.status != utils.QueryStatus.FAILED:
try:
cache.set(
cache_key,
zlib.compress(data),
timeout=cache_timeout)
except Exception as e:
# cache.set call can fail if the backend is down or if
# the key is too large or whatever other reasons
logging.warning("Could not cache key {}".format(cache_key))
logging.exception(e)
cache.delete(cache_key)
payload['is_cached'] = is_cached
return payload
def json_dumps(self, obj):
return json.dumps(obj, default=utils.json_int_dttm_ser, ignore_nan=True)
@property
def data(self):
"""This is the data object serialized to the js layer"""
content = {
'form_data': self.form_data,
'token': self.token,
'viz_name': self.viz_type,
'filter_select_enabled': self.datasource.filter_select_enabled,
}
return content
def get_csv(self):
df = self.get_df()
include_index = not isinstance(df.index, pd.RangeIndex)
return df.to_csv(index=include_index, encoding="utf-8")
def get_data(self, df):
return []
@property
def json_data(self):
return json.dumps(self.data)
class TableViz(BaseViz):
"""A basic html table that is sortable and searchable"""
viz_type = "table"
verbose_name = _("Table View")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def should_be_timeseries(self):
fd = self.form_data
# TODO handle datasource-type-specific code in datasource
conditions_met = (
(fd.get('granularity') and fd.get('granularity') != 'all') or
(fd.get('granularity_sqla') and fd.get('time_grain_sqla'))
)
if fd.get('include_time') and not conditions_met:
raise Exception(
"Pick a granularity in the Time section or "
"uncheck 'Include Time'")
return fd.get('include_time')
def query_obj(self):
d = super(TableViz, self).query_obj()
fd = self.form_data
if fd.get('all_columns') and (fd.get('groupby') or fd.get('metrics')):
raise Exception(
"Choose either fields to [Group By] and [Metrics] or "
"[Columns], not both")
if fd.get('all_columns'):
d['columns'] = fd.get('all_columns')
d['groupby'] = []
order_by_cols = fd.get('order_by_cols') or []
d['orderby'] = [json.loads(t) for t in order_by_cols]
d['is_timeseries'] = self.should_be_timeseries()
return d
def get_data(self, df):
if not self.should_be_timeseries() and DTTM_ALIAS in df:
del df[DTTM_ALIAS]
return dict(
records=df.to_dict(orient="records"),
columns=list(df.columns),
)
class PivotTableViz(BaseViz):
"""A pivot table view, define your rows, columns and metrics"""
viz_type = "pivot_table"
verbose_name = _("Pivot Table")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def query_obj(self):
d = super(PivotTableViz, self).query_obj()
groupby = self.form_data.get('groupby')
columns = self.form_data.get('columns')
metrics = self.form_data.get('metrics')
if not columns:
columns = []
if not groupby:
groupby = []
if not groupby:
raise Exception("Please choose at least one \"Group by\" field ")
if not metrics:
raise Exception("Please choose at least one metric")
if (
any(v in groupby for v in columns) or
any(v in columns for v in groupby)):
raise Exception("groupby and columns can't overlap")
d['groupby'] = list(set(groupby) | set(columns))
return d
def get_data(self, df):
if (
self.form_data.get("granularity") == "all" and
DTTM_ALIAS in df):
del df[DTTM_ALIAS]
df = df.pivot_table(
index=self.form_data.get('groupby'),
columns=self.form_data.get('columns'),
values=self.form_data.get('metrics'),
aggfunc=self.form_data.get('pandas_aggfunc'),
margins=True,
)
return dict(
columns=list(df.columns),
html=df.to_html(
na_rep='',
classes=(
"dataframe table table-striped table-bordered "
"table-condensed table-hover").split(" ")),
)
class MarkupViz(BaseViz):
"""Use html or markdown to create a free form widget"""
viz_type = "markup"
verbose_name = _("Markup")
is_timeseries = False
def get_df(self):
return True
def get_data(self, df):
markup_type = self.form_data.get("markup_type")
code = self.form_data.get("code", '')
if markup_type == "markdown":
code = markdown(code)
return dict(html=code)
class SeparatorViz(MarkupViz):
"""Use to create section headers in a dashboard, similar to `Markup`"""
viz_type = "separator"
verbose_name = _("Separator")
def get_data(self, df):
code = markdown(self.form_data.get("code", ''))
return dict(html=code)
class WordCloudViz(BaseViz):
"""Build a colorful word cloud
Uses the nice library at:
https://github.com/jasondavies/d3-cloud
"""
viz_type = "word_cloud"
verbose_name = _("Word Cloud")
is_timeseries = False
def query_obj(self):
d = super(WordCloudViz, self).query_obj()
d['metrics'] = [self.form_data.get('metric')]
d['groupby'] = [self.form_data.get('series')]
return d
def get_data(self, df):
# Ordering the columns
df = df[[self.form_data.get('series'), self.form_data.get('metric')]]
# Labeling the columns for uniform json schema
df.columns = ['text', 'size']
return df.to_dict(orient="records")
class TreemapViz(BaseViz):
"""Tree map visualisation for hierarchical data."""
viz_type = "treemap"
verbose_name = _("Treemap")
credits = '<a href="https://d3js.org">d3.js</a>'
is_timeseries = False
def _nest(self, metric, df):
nlevels = df.index.nlevels
if nlevels == 1:
result = [{"name": n, "value": v}
for n, v in zip(df.index, df[metric])]
else:
result = [{"name": l, "children": self._nest(metric, df.loc[l])}
for l in df.index.levels[0]]
return result
def get_data(self, df):
df = df.set_index(self.form_data.get("groupby"))
chart_data = [{"name": metric, "children": self._nest(metric, df)}
for metric in df.columns]
return chart_data
class CalHeatmapViz(BaseViz):
"""Calendar heatmap."""
viz_type = "cal_heatmap"
verbose_name = _("Calendar Heatmap")
credits = (
'<a href=https://github.com/wa0x6e/cal-heatmap>cal-heatmap</a>')
is_timeseries = True
def get_data(self, df):
form_data = self.form_data
df.columns = ["timestamp", "metric"]
timestamps = {str(obj["timestamp"].value / 10**9):
obj.get("metric") for obj in df.to_dict("records")}
start = utils.parse_human_datetime(form_data.get("since"))
end = utils.parse_human_datetime(form_data.get("until"))
domain = form_data.get("domain_granularity")
diff_delta = rdelta.relativedelta(end, start)
diff_secs = (end - start).total_seconds()
if domain == "year":
range_ = diff_delta.years + 1
elif domain == "month":
range_ = diff_delta.years * 12 + diff_delta.months + 1
elif domain == "week":
range_ = diff_delta.years * 53 + diff_delta.weeks + 1
elif domain == "day":
range_ = diff_secs // (24*60*60) + 1
else:
range_ = diff_secs // (60*60) + 1
return {
"timestamps": timestamps,
"start": start,
"domain": domain,
"subdomain": form_data.get("subdomain_granularity"),
"range": range_,
}
def query_obj(self):
qry = super(CalHeatmapViz, self).query_obj()
qry["metrics"] = [self.form_data["metric"]]
return qry
class NVD3Viz(BaseViz):
"""Base class for all nvd3 vizs"""
credits = '<a href="http://nvd3.org/">NVD3.org</a>'
viz_type = None
verbose_name = "Base NVD3 Viz"
is_timeseries = False
class BoxPlotViz(NVD3Viz):
"""Box plot viz from ND3"""
viz_type = "box_plot"
verbose_name = _("Box Plot")
sort_series = False
is_timeseries = True
def to_series(self, df, classed='', title_suffix=''):
label_sep = " - "
chart_data = []
for index_value, row in zip(df.index, df.to_dict(orient="records")):
if isinstance(index_value, tuple):
index_value = label_sep.join(index_value)
boxes = defaultdict(dict)
for (label, key), value in row.items():
if key == "median":
key = "Q2"
boxes[label][key] = value
for label, box in boxes.items():
if len(self.form_data.get("metrics")) > 1:
# need to render data labels with metrics
chart_label = label_sep.join([index_value, label])
else:
chart_label = index_value
chart_data.append({
"label": chart_label,
"values": box,
})
return chart_data
def get_data(self, df):
form_data = self.form_data
df = df.fillna(0)
# conform to NVD3 names
def Q1(series): # need to be named functions - can't use lambdas
return np.percentile(series, 25)
def Q3(series):
return np.percentile(series, 75)
whisker_type = form_data.get('whisker_options')
if whisker_type == "Tukey":
def whisker_high(series):
upper_outer_lim = Q3(series) + 1.5 * (Q3(series) - Q1(series))
series = series[series <= upper_outer_lim]
return series[np.abs(series - upper_outer_lim).argmin()]
def whisker_low(series):
lower_outer_lim = Q1(series) - 1.5 * (Q3(series) - Q1(series))
# find the closest value above the lower outer limit
series = series[series >= lower_outer_lim]
return series[np.abs(series - lower_outer_lim).argmin()]
elif whisker_type == "Min/max (no outliers)":
def whisker_high(series):
return series.max()
def whisker_low(series):
return series.min()
elif " percentiles" in whisker_type:
low, high = whisker_type.replace(" percentiles", "").split("/")
def whisker_high(series):
return np.percentile(series, int(high))
def whisker_low(series):
return np.percentile(series, int(low))
else:
raise ValueError("Unknown whisker type: {}".format(whisker_type))
def outliers(series):
above = series[series > whisker_high(series)]
below = series[series < whisker_low(series)]
# pandas sometimes doesn't like getting lists back here
return set(above.tolist() + below.tolist())
aggregate = [Q1, np.median, Q3, whisker_high, whisker_low, outliers]
df = df.groupby(form_data.get('groupby')).agg(aggregate)
chart_data = self.to_series(df)
return chart_data
class BubbleViz(NVD3Viz):
"""Based on the NVD3 bubble chart"""
viz_type = "bubble"
verbose_name = _("Bubble Chart")
is_timeseries = False
def query_obj(self):
form_data = self.form_data
d = super(BubbleViz, self).query_obj()
d['groupby'] = [
form_data.get('entity')
]
if form_data.get('series'):
d['groupby'].append(form_data.get('series'))
self.x_metric = form_data.get('x')
self.y_metric = form_data.get('y')
self.z_metric = form_data.get('size')
self.entity = form_data.get('entity')
self.series = form_data.get('series') or self.entity
d['row_limit'] = form_data.get('limit')
d['metrics'] = [
self.z_metric,
self.x_metric,
self.y_metric,
]
if not all(d['metrics'] + [self.entity]):
raise Exception("Pick a metric for x, y and size")
return d
def get_data(self, df):
df['x'] = df[[self.x_metric]]
df['y'] = df[[self.y_metric]]
df['size'] = df[[self.z_metric]]
df['shape'] = 'circle'
df['group'] = df[[self.series]]
series = defaultdict(list)
for row in df.to_dict(orient='records'):
series[row['group']].append(row)
chart_data = []
for k, v in series.items():
chart_data.append({
'key': k,
'values': v})
return chart_data
class BulletViz(NVD3Viz):
"""Based on the NVD3 bullet chart"""
viz_type = "bullet"
verbose_name = _("Bullet Chart")
is_timeseries = False
def query_obj(self):
form_data = self.form_data
d = super(BulletViz, self).query_obj()
self.metric = form_data.get('metric')
def as_strings(field):
value = form_data.get(field)
return value.split(',') if value else []
def as_floats(field):
return [float(x) for x in as_strings(field)]
self.ranges = as_floats('ranges')
self.range_labels = as_strings('range_labels')
self.markers = as_floats('markers')
self.marker_labels = as_strings('marker_labels')
self.marker_lines = as_floats('marker_lines')
self.marker_line_labels = as_strings('marker_line_labels')
d['metrics'] = [
self.metric,
]
if not self.metric:
raise Exception("Pick a metric to display")
return d
def get_data(self, df):
df = df.fillna(0)
df['metric'] = df[[self.metric]]
values = df['metric'].values
return {
'measures': values.tolist(),
'ranges': self.ranges or [0, values.max() * 1.1],
'rangeLabels': self.range_labels or None,
'markers': self.markers or None,
'markerLabels': self.marker_labels or None,
'markerLines': self.marker_lines or None,
'markerLineLabels': self.marker_line_labels or None,
}
class BigNumberViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number"
verbose_name = _("Big Number with Trendline")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = True
def query_obj(self):
d = super(BigNumberViz, self).query_obj()
metric = self.form_data.get('metric')
if not metric:
raise Exception("Pick a metric!")
d['metrics'] = [self.form_data.get('metric')]
self.form_data['metric'] = metric
return d
def get_data(self, df):
form_data = self.form_data
df.sort_values(by=df.columns[0], inplace=True)
compare_lag = form_data.get("compare_lag")
return {
'data': df.values.tolist(),
'compare_lag': compare_lag,
'compare_suffix': form_data.get('compare_suffix', ''),
}
class BigNumberTotalViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number_total"
verbose_name = _("Big Number")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def query_obj(self):
d = super(BigNumberTotalViz, self).query_obj()
metric = self.form_data.get('metric')
if not metric:
raise Exception("Pick a metric!")
d['metrics'] = [self.form_data.get('metric')]
self.form_data['metric'] = metric
return d
def get_data(self, df):
form_data = self.form_data
df.sort_values(by=df.columns[0], inplace=True)
return {
'data': df.values.tolist(),
'subheader': form_data.get('subheader', ''),
}
class NVD3TimeSeriesViz(NVD3Viz):
"""A rich line chart component with tons of options"""
viz_type = "line"
verbose_name = _("Time Series - Line Chart")
sort_series = False
is_timeseries = True
def to_series(self, df, classed='', title_suffix=''):
cols = []
for col in df.columns:
if col == '':
cols.append('N/A')
elif col is None:
cols.append('NULL')
else:
cols.append(col)
df.columns = cols
series = df.to_dict('series')
chart_data = []
for name in df.T.index.tolist():
ys = series[name]
if df[name].dtype.kind not in "biufc":
continue
if isinstance(name, string_types):
series_title = name
else:
name = ["{}".format(s) for s in name]
if len(self.form_data.get('metrics')) > 1:
series_title = ", ".join(name)
else:
series_title = ", ".join(name[1:])
if title_suffix:
series_title += title_suffix
d = {
"key": series_title,
"classed": classed,
"values": [
{'x': ds, 'y': ys[ds] if ds in ys else None}
for ds in df.index
],
}
chart_data.append(d)
return chart_data
def get_data(self, df):
fd = self.form_data
df = df.fillna(0)
if fd.get("granularity") == "all":
raise Exception("Pick a time granularity for your time series")
df = df.pivot_table(
index=DTTM_ALIAS,
columns=fd.get('groupby'),
values=fd.get('metrics'))
fm = fd.get("resample_fillmethod")
if not fm:
fm = None
how = fd.get("resample_how")
rule = fd.get("resample_rule")
if how and rule:
df = df.resample(rule, how=how, fill_method=fm)
if not fm:
df = df.fillna(0)
if self.sort_series:
dfs = df.sum()
dfs.sort_values(ascending=False, inplace=True)
df = df[dfs.index]
if fd.get("contribution"):
dft = df.T
df = (dft / dft.sum()).T
rolling_periods = fd.get("rolling_periods")
rolling_type = fd.get("rolling_type")
if rolling_type in ('mean', 'std', 'sum') and rolling_periods:
if rolling_type == 'mean':
df = pd.rolling_mean(df, int(rolling_periods), min_periods=0)
elif rolling_type == 'std':
df = pd.rolling_std(df, int(rolling_periods), min_periods=0)
elif rolling_type == 'sum':
df = pd.rolling_sum(df, int(rolling_periods), min_periods=0)
elif rolling_type == 'cumsum':
df = df.cumsum()
num_period_compare = fd.get("num_period_compare")
if num_period_compare:
num_period_compare = int(num_period_compare)
prt = fd.get('period_ratio_type')
if prt and prt == 'growth':
df = (df / df.shift(num_period_compare)) - 1
elif prt and prt == 'value':
df = df - df.shift(num_period_compare)
else:
df = df / df.shift(num_period_compare)
df = df[num_period_compare:]
chart_data = self.to_series(df)
time_compare = fd.get('time_compare')
if time_compare:
query_object = self.query_obj()
delta = utils.parse_human_timedelta(time_compare)
query_object['inner_from_dttm'] = query_object['from_dttm']
query_object['inner_to_dttm'] = query_object['to_dttm']
query_object['from_dttm'] -= delta
query_object['to_dttm'] -= delta
df2 = self.get_df(query_object)
df2[DTTM_ALIAS] += delta
df2 = df2.pivot_table(
index=DTTM_ALIAS,
columns=fd.get('groupby'),
values=fd.get('metrics'))
chart_data += self.to_series(
df2, classed='superset', title_suffix="---")
chart_data = sorted(chart_data, key=lambda x: x['key'])
return chart_data
class NVD3DualLineViz(NVD3Viz):
"""A rich line chart with dual axis"""
viz_type = "dual_line"
verbose_name = _("Time Series - Dual Axis Line Chart")
sort_series = False
is_timeseries = True
def query_obj(self):
d = super(NVD3DualLineViz, self).query_obj()
m1 = self.form_data.get('metric')
m2 = self.form_data.get('metric_2')
d['metrics'] = [m1, m2]
if not m1:
raise Exception("Pick a metric for left axis!")
if not m2:
raise Exception("Pick a metric for right axis!")
if m1 == m2:
raise Exception("Please choose different metrics"
" on left and right axis")
return d
def to_series(self, df, classed=''):
cols = []
for col in df.columns:
if col == '':
cols.append('N/A')
elif col is None:
cols.append('NULL')
else:
cols.append(col)
df.columns = cols
series = df.to_dict('series')
chart_data = []
metrics = [
self.form_data.get('metric'),
self.form_data.get('metric_2')
]
for i, m in enumerate(metrics):
ys = series[m]
if df[m].dtype.kind not in "biufc":
continue
series_title = m
d = {
"key": series_title,
"classed": classed,
"values": [
{'x': ds, 'y': ys[ds] if ds in ys else None}
for ds in df.index
],
"yAxis": i+1,
"type": "line"
}
chart_data.append(d)
return chart_data
def get_data(self, df):
fd = self.form_data
df = df.fillna(0)
if self.form_data.get("granularity") == "all":
raise Exception("Pick a time granularity for your time series")
metric = fd.get('metric')
metric_2 = fd.get('metric_2')
df = df.pivot_table(
index=DTTM_ALIAS,
values=[metric, metric_2])
chart_data = self.to_series(df)
return chart_data
class NVD3TimeSeriesBarViz(NVD3TimeSeriesViz):
"""A bar chart where the x axis is time"""
viz_type = "bar"
sort_series = True
verbose_name = _("Time Series - Bar Chart")
class NVD3CompareTimeSeriesViz(NVD3TimeSeriesViz):
"""A line chart component where you can compare the % change over time"""
viz_type = 'compare'
verbose_name = _("Time Series - Percent Change")
class NVD3TimeSeriesStackedViz(NVD3TimeSeriesViz):
"""A rich stack area chart"""
viz_type = "area"
verbose_name = _("Time Series - Stacked")
sort_series = True
class DistributionPieViz(NVD3Viz):
"""Annoy visualization snobs with this controversial pie chart"""
viz_type = "pie"
verbose_name = _("Distribution - NVD3 - Pie Chart")
is_timeseries = False
def get_data(self, df):
df = df.pivot_table(
index=self.groupby,
values=[self.metrics[0]])
df.sort_values(by=self.metrics[0], ascending=False, inplace=True)
df = df.reset_index()
df.columns = ['x', 'y']
return df.to_dict(orient="records")
class HistogramViz(BaseViz):
"""Histogram"""
viz_type = "histogram"
verbose_name = _("Histogram")
is_timeseries = False
def query_obj(self):
"""Returns the query object for this visualization"""
d = super(HistogramViz, self).query_obj()
d['row_limit'] = self.form_data.get(
'row_limit', int(config.get('VIZ_ROW_LIMIT')))
numeric_column = self.form_data.get('all_columns_x')
if numeric_column is None:
raise Exception("Must have one numeric column specified")
d['columns'] = [numeric_column]
return d
def get_data(self, df):
"""Returns the chart data"""
chart_data = df[df.columns[0]].values.tolist()
return chart_data
class DistributionBarViz(DistributionPieViz):
"""A good old bar chart"""
viz_type = "dist_bar"
verbose_name = _("Distribution - Bar Chart")
is_timeseries = False
def query_obj(self):
d = super(DistributionBarViz, self).query_obj() # noqa
fd = self.form_data
gb = fd.get('groupby') or []
cols = fd.get('columns') or []
d['groupby'] = set(gb + cols)
if len(d['groupby']) < len(gb) + len(cols):
raise Exception("Can't have overlap between Series and Breakdowns")
if not self.metrics:
raise Exception("Pick at least one metric")
if not self.groupby:
raise Exception("Pick at least one field for [Series]")
return d
def get_data(self, df):
fd = self.form_data
row = df.groupby(self.groupby).sum()[self.metrics[0]].copy()
row.sort_values(ascending=False, inplace=True)
columns = fd.get('columns') or []
pt = df.pivot_table(
index=self.groupby,
columns=columns,
values=self.metrics)
if fd.get("contribution"):
pt = pt.fillna(0)
pt = pt.T
pt = (pt / pt.sum()).T
pt = pt.reindex(row.index)
chart_data = []
for name, ys in pt.iteritems():
if pt[name].dtype.kind not in "biufc" or name in self.groupby:
continue
if isinstance(name, string_types):
series_title = name
elif len(self.metrics) > 1:
series_title = ", ".join(name)
else:
l = [str(s) for s in name[1:]]
series_title = ", ".join(l)
values = []
for i, v in ys.iteritems():
x = i
if isinstance(x, (tuple, list)):
x = ', '.join([str(s) for s in x])
else:
x = str(x)
values.append({
'x': x,
'y': v,
})
d = {
"key": series_title,
"values": values,
}
chart_data.append(d)
return chart_data
class SunburstViz(BaseViz):
"""A multi level sunburst chart"""
viz_type = "sunburst"
verbose_name = _("Sunburst")
is_timeseries = False
credits = (
'Kerry Rodden '
'@<a href="https://bl.ocks.org/kerryrodden/7090426">bl.ocks.org</a>')
def get_data(self, df):
# if m1 == m2 duplicate the metric column
cols = self.form_data.get('groupby')
metric = self.form_data.get('metric')
secondary_metric = self.form_data.get('secondary_metric')
if metric == secondary_metric:
ndf = df
ndf.columns = [cols + ['m1', 'm2']]
else:
cols += [
self.form_data['metric'], self.form_data['secondary_metric']]
ndf = df[cols]
return json.loads(ndf.to_json(orient="values")) # TODO fix this nonsense
def query_obj(self):
qry = super(SunburstViz, self).query_obj()
qry['metrics'] = [
self.form_data['metric'], self.form_data['secondary_metric']]
return qry
class SankeyViz(BaseViz):
"""A Sankey diagram that requires a parent-child dataset"""
viz_type = "sankey"
verbose_name = _("Sankey")
is_timeseries = False
credits = '<a href="https://www.npmjs.com/package/d3-sankey">d3-sankey on npm</a>'
def query_obj(self):
qry = super(SankeyViz, self).query_obj()
if len(qry['groupby']) != 2:
raise Exception("Pick exactly 2 columns as [Source / Target]")
qry['metrics'] = [
self.form_data['metric']]
return qry
def get_data(self, df):
df.columns = ['source', 'target', 'value']
recs = df.to_dict(orient='records')
hierarchy = defaultdict(set)
for row in recs:
hierarchy[row['source']].add(row['target'])
def find_cycle(g):
"""Whether there's a cycle in a directed graph"""
path = set()
def visit(vertex):
path.add(vertex)
for neighbour in g.get(vertex, ()):
if neighbour in path or visit(neighbour):
return (vertex, neighbour)
path.remove(vertex)
for v in g:
cycle = visit(v)
if cycle:
return cycle
cycle = find_cycle(hierarchy)
if cycle:
raise Exception(
"There's a loop in your Sankey, please provide a tree. "
"Here's a faulty link: {}".format(cycle))
return recs
class DirectedForceViz(BaseViz):
"""An animated directed force layout graph visualization"""
viz_type = "directed_force"
verbose_name = _("Directed Force Layout")
credits = 'd3noob @<a href="http://bl.ocks.org/d3noob/5141278">bl.ocks.org</a>'
is_timeseries = False
def query_obj(self):
qry = super(DirectedForceViz, self).query_obj()
if len(self.form_data['groupby']) != 2:
raise Exception("Pick exactly 2 columns to 'Group By'")
qry['metrics'] = [self.form_data['metric']]
return qry
def get_data(self, df):
df.columns = ['source', 'target', 'value']
return df.to_dict(orient='records')
class ChordViz(BaseViz):
"""A Chord diagram"""
viz_type = "chord"
verbose_name = _("Directed Force Layout")
credits = '<a href="https://github.com/d3/d3-chord">Bostock</a>'
is_timeseries = False
def query_obj(self):
qry = super(ChordViz, self).query_obj()
fd = self.form_data
qry['groupby'] = [fd.get('groupby'), fd.get('columns')]
qry['metrics'] = [fd.get('metric')]
return qry
def get_data(self, df):
df.columns = ['source', 'target', 'value']
# Preparing a symetrical matrix like d3.chords calls for
nodes = list(set(df['source']) | set(df['target']))
matrix = {}
for source, target in product(nodes, nodes):
matrix[(source, target)] = 0
for source, target, value in df.to_records(index=False):
matrix[(source, target)] = value
m = [[matrix[(n1, n2)] for n1 in nodes] for n2 in nodes]
return {
'nodes': list(nodes),
'matrix': m,
}
class CountryMapViz(BaseViz):
"""A country centric"""
viz_type = "country_map"
verbose_name = _("Country Map")
is_timeseries = False
credits = 'From bl.ocks.org By john-guerra'
def query_obj(self):
qry = super(CountryMapViz, self).query_obj()
qry['metrics'] = [
self.form_data['metric']]
qry['groupby'] = [self.form_data['entity']]
return qry
def get_data(self, df):
from superset.data import countries
fd = self.form_data
cols = [fd.get('entity')]
metric = fd.get('metric')
cols += [metric]
ndf = df[cols]
df = ndf
df.columns = ['country_id', 'metric']
d = df.to_dict(orient='records')
return d
class WorldMapViz(BaseViz):
"""A country centric world map"""
viz_type = "world_map"
verbose_name = _("World Map")
is_timeseries = False
credits = 'datamaps on <a href="https://www.npmjs.com/package/datamaps">npm</a>'
def query_obj(self):
qry = super(WorldMapViz, self).query_obj()
qry['metrics'] = [
self.form_data['metric'], self.form_data['secondary_metric']]
qry['groupby'] = [self.form_data['entity']]
return qry
def get_data(self, df):
from superset.data import countries
fd = self.form_data
cols = [fd.get('entity')]
metric = fd.get('metric')
secondary_metric = fd.get('secondary_metric')
if metric == secondary_metric:
ndf = df[cols]
# df[metric] will be a DataFrame
# because there are duplicate column names
ndf['m1'] = df[metric].iloc[:, 0]
ndf['m2'] = ndf['m1']
else:
cols += [metric, secondary_metric]
ndf = df[cols]
df = ndf
df.columns = ['country', 'm1', 'm2']
d = df.to_dict(orient='records')
for row in d:
country = None
if isinstance(row['country'], string_types):
country = countries.get(
fd.get('country_fieldtype'), row['country'])
if country:
row['country'] = country['cca3']
row['latitude'] = country['lat']
row['longitude'] = country['lng']
row['name'] = country['name']
else:
row['country'] = "XXX"
return d
class FilterBoxViz(BaseViz):
"""A multi filter, multi-choice filter box to make dashboards interactive"""
viz_type = "filter_box"
verbose_name = _("Filters")
is_timeseries = False
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
def query_obj(self):
qry = super(FilterBoxViz, self).query_obj()
groupby = self.form_data.get('groupby')
if len(groupby) < 1 and not self.form_data.get('date_filter'):
raise Exception("Pick at least one filter field")
qry['metrics'] = [
self.form_data['metric']]
return qry
def get_data(self, df):
qry = self.query_obj()
filters = [g for g in self.form_data['groupby']]
d = {}
for flt in filters:
qry['groupby'] = [flt]
df = super(FilterBoxViz, self).get_df(qry)
d[flt] = [{
'id': row[0],
'text': row[0],
'filter': flt,
'metric': row[1]}
for row in df.itertuples(index=False)
]
return d
class IFrameViz(BaseViz):
"""You can squeeze just about anything in this iFrame component"""
viz_type = "iframe"
verbose_name = _("iFrame")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def get_df(self):
return None
class ParallelCoordinatesViz(BaseViz):
"""Interactive parallel coordinate implementation
Uses this amazing javascript library
https://github.com/syntagmatic/parallel-coordinates
"""
viz_type = "para"
verbose_name = _("Parallel Coordinates")
credits = (
'<a href="https://syntagmatic.github.io/parallel-coordinates/">'
'Syntagmatic\'s library</a>')
is_timeseries = False
def query_obj(self):
d = super(ParallelCoordinatesViz, self).query_obj()
fd = self.form_data
d['metrics'] = copy.copy(fd.get('metrics'))
second = fd.get('secondary_metric')
if second not in d['metrics']:
d['metrics'] += [second]
d['groupby'] = [fd.get('series')]
return d
def get_data(self, df):
return df.to_dict(orient="records")
class HeatmapViz(BaseViz):
"""A nice heatmap visualization that support high density through canvas"""
viz_type = "heatmap"
verbose_name = _("Heatmap")
is_timeseries = False
credits = (
'inspired from mbostock @<a href="http://bl.ocks.org/mbostock/3074470">'
'bl.ocks.org</a>')
def query_obj(self):
d = super(HeatmapViz, self).query_obj()
fd = self.form_data
d['metrics'] = [fd.get('metric')]
d['groupby'] = [fd.get('all_columns_x'), fd.get('all_columns_y')]
return d
def get_data(self, df):
fd = self.form_data
x = fd.get('all_columns_x')
y = fd.get('all_columns_y')
v = fd.get('metric')
if x == y:
df.columns = ['x', 'y', 'v']
else:
df = df[[x, y, v]]
df.columns = ['x', 'y', 'v']
norm = fd.get('normalize_across')
overall = False
if norm == 'heatmap':
overall = True
else:
gb = df.groupby(norm, group_keys=False)
if len(gb) <= 1:
overall = True
else:
df['perc'] = (
gb.apply(
lambda x: (x.v - x.v.min()) / (x.v.max() - x.v.min()))
)
if overall:
v = df.v
min_ = v.min()
df['perc'] = (v - min_) / (v.max() - min_)
return df.to_dict(orient="records")
class HorizonViz(NVD3TimeSeriesViz):
"""Horizon chart
https://www.npmjs.com/package/d3-horizon-chart
"""
viz_type = "horizon"
verbose_name = _("Horizon Charts")
credits = (
'<a href="https://www.npmjs.com/package/d3-horizon-chart">'
'd3-horizon-chart</a>')
class MapboxViz(BaseViz):
"""Rich maps made with Mapbox"""
viz_type = "mapbox"
verbose_name = _("Mapbox")
is_timeseries = False
credits = (
'<a href=https://www.mapbox.com/mapbox-gl-js/api/>Mapbox GL JS</a>')
def query_obj(self):
d = super(MapboxViz, self).query_obj()
fd = self.form_data
label_col = fd.get('mapbox_label')
if not fd.get('groupby'):
d['columns'] = [fd.get('all_columns_x'), fd.get('all_columns_y')]
if label_col and len(label_col) >= 1:
if label_col[0] == "count":
raise Exception(
"Must have a [Group By] column to have 'count' as the [Label]")
d['columns'].append(label_col[0])
if fd.get('point_radius') != 'Auto':
d['columns'].append(fd.get('point_radius'))
d['columns'] = list(set(d['columns']))
else:
# Ensuring columns chosen are all in group by
if (label_col and len(label_col) >= 1 and
label_col[0] != "count" and
label_col[0] not in fd.get('groupby')):
raise Exception(
"Choice of [Label] must be present in [Group By]")
if (fd.get("point_radius") != "Auto" and
fd.get("point_radius") not in fd.get('groupby')):
raise Exception(
"Choice of [Point Radius] must be present in [Group By]")
if (fd.get('all_columns_x') not in fd.get('groupby') or
fd.get('all_columns_y') not in fd.get('groupby')):
raise Exception(
"[Longitude] and [Latitude] columns must be present in [Group By]")
return d
def get_data(self, df):
fd = self.form_data
label_col = fd.get('mapbox_label')
custom_metric = label_col and len(label_col) >= 1
metric_col = [None] * len(df.index)
if custom_metric:
if label_col[0] == fd.get('all_columns_x'):
metric_col = df[fd.get('all_columns_x')]
elif label_col[0] == fd.get('all_columns_y'):
metric_col = df[fd.get('all_columns_y')]
else:
metric_col = df[label_col[0]]
point_radius_col = (
[None] * len(df.index)
if fd.get("point_radius") == "Auto"
else df[fd.get("point_radius")])
# using geoJSON formatting
geo_json = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {
"metric": metric,
"radius": point_radius,
},
"geometry": {
"type": "Point",
"coordinates": [lon, lat],
}
}
for lon, lat, metric, point_radius
in zip(
df[fd.get('all_columns_x')],
df[fd.get('all_columns_y')],
metric_col, point_radius_col)
]
}
return {
"geoJSON": geo_json,
"customMetric": custom_metric,
"mapboxApiKey": config.get('MAPBOX_API_KEY'),
"mapStyle": fd.get("mapbox_style"),
"aggregatorName": fd.get("pandas_aggfunc"),
"clusteringRadius": fd.get("clustering_radius"),
"pointRadiusUnit": fd.get("point_radius_unit"),
"globalOpacity": fd.get("global_opacity"),
"viewportLongitude": fd.get("viewport_longitude"),
"viewportLatitude": fd.get("viewport_latitude"),
"viewportZoom": fd.get("viewport_zoom"),
"renderWhileDragging": fd.get("render_while_dragging"),
"tooltip": fd.get("rich_tooltip"),
"color": fd.get("mapbox_color"),
}
viz_types_list = [
TableViz,
PivotTableViz,
NVD3TimeSeriesViz,
NVD3DualLineViz,
NVD3CompareTimeSeriesViz,
NVD3TimeSeriesStackedViz,
NVD3TimeSeriesBarViz,
DistributionBarViz,
DistributionPieViz,
BubbleViz,
BulletViz,
MarkupViz,
WordCloudViz,
BigNumberViz,
BigNumberTotalViz,
SunburstViz,
DirectedForceViz,
SankeyViz,
CountryMapViz,
ChordViz,
WorldMapViz,
FilterBoxViz,
IFrameViz,
ParallelCoordinatesViz,
HeatmapViz,
BoxPlotViz,
TreemapViz,
CalHeatmapViz,
HorizonViz,
MapboxViz,
HistogramViz,
SeparatorViz,
]
viz_types = OrderedDict([(v.viz_type, v) for v in viz_types_list
if v.viz_type not in config.get('VIZ_TYPE_BLACKLIST')])
|
asdf2014/superset
|
superset/viz.py
|
Python
|
apache-2.0
| 52,742
|
[
"VisIt"
] |
c8ebf1dc716d37c98fbade0552305ac6fafdce5afe177d0ed4efe720fd16879a
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class apigatewayCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'create_api': ('parent', 'api_id', 'api', ),
'create_api_config': ('parent', 'api_config_id', 'api_config', ),
'create_gateway': ('parent', 'gateway_id', 'gateway', ),
'delete_api': ('name', ),
'delete_api_config': ('name', ),
'delete_gateway': ('name', ),
'get_api': ('name', ),
'get_api_config': ('name', 'view', ),
'get_gateway': ('name', ),
'list_api_configs': ('parent', 'page_size', 'page_token', 'filter', 'order_by', ),
'list_apis': ('parent', 'page_size', 'page_token', 'filter', 'order_by', ),
'list_gateways': ('parent', 'page_size', 'page_token', 'filter', 'order_by', ),
'update_api': ('api', 'update_mask', ),
'update_api_config': ('api_config', 'update_mask', ),
'update_gateway': ('gateway', 'update_mask', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=apigatewayCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the apigateway client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
googleapis/python-api-gateway
|
scripts/fixup_apigateway_v1_keywords.py
|
Python
|
apache-2.0
| 6,756
|
[
"VisIt"
] |
cfa11b3ba86dff58a77bbd8fda6d9b27d58e78b53f09b1626524e00489b5df52
|
from functools import reduce
from operator import add
from os.path import dirname, join
import arviz as az
import numpy as np
import pandas as pd
import pytest
from formulae import design_matrices
from bambi.models import Model
from bambi.terms import Term, GroupSpecificTerm
from bambi.priors import Prior
@pytest.fixture(scope="module")
def data_numeric_xy():
data = pd.DataFrame(
{
"y": np.random.normal(size=100),
"x": np.random.normal(size=100),
}
)
return data
@pytest.fixture(scope="module")
def diabetes_data():
data_dir = join(dirname(__file__), "data")
data = pd.read_csv(join(data_dir, "diabetes.txt"), sep="\t")
data["age_grp"] = 0
data.loc[data["AGE"] > 40, "age_grp"] = 1
data.loc[data["AGE"] > 60, "age_grp"] = 2
return data
@pytest.fixture(scope="module")
def crossed_data():
"""
Group specific effects:
10 subjects, 12 items, 5 sites
Subjects crossed with items, nested in sites
Items crossed with sites
Common effects:
A continuous predictor, a numeric dummy, and a three-level category
(levels a,b,c)
Structure:
Subjects nested in dummy (e.g., gender), crossed with threecats
Items crossed with dummy, nested in threecats
Sites partially crossed with dummy (4/5 see a single dummy, 1/5 sees both
dummies)
Sites crossed with threecats
"""
data_dir = join(dirname(__file__), "data")
data = pd.read_csv(join(data_dir, "crossed_random.csv"))
return data
def test_term_init(diabetes_data):
design = design_matrices("BMI", diabetes_data)
term_info = design.common.terms_info["BMI"]
term = Term("BMI", term_info, diabetes_data["BMI"])
# Test that all defaults are properly initialized
assert term.name == "BMI"
assert not term.categorical
assert not term.group_specific
assert term.levels is not None
assert term.data.shape == (442,)
def test_distribute_group_specific_effect_over(diabetes_data):
# 163 unique levels of BMI in diabetes_data
# With intercept
model = Model("BP ~ (C(age_grp)|BMI)", diabetes_data)
model.build()
# Treatment encoding because of the intercept
lvls = sorted(list(diabetes_data["age_grp"].unique()))[1:]
assert "C(age_grp)|BMI" in model.terms
assert "1|BMI" in model.terms
assert model.terms["C(age_grp)|BMI"].pymc_coords["C(age_grp)_coord_group_expr"] == lvls
# This is equal to the sub-matrix of Z that corresponds to this term.
# 442 is the number of observations. 163 the number of groups.
# 2 is the number of levels of the categorical variable 'C(age_grp)' after removing
# the reference level. Then the number of columns is 326 = 163 * 2.
assert model.terms["C(age_grp)|BMI"].data.shape == (442, 326)
# Without intercept. Reference level is not removed.
model = Model("BP ~ (0 + C(age_grp)|BMI)", diabetes_data)
model.build()
assert "C(age_grp)|BMI" in model.terms
assert not "1|BMI" in model.terms
assert model.terms["C(age_grp)|BMI"].data.shape == (442, 489)
def test_model_init_from_filename():
data_dir = join(dirname(__file__), "data")
filename = join(data_dir, "diabetes.txt")
model = Model("BP ~ BMI", filename)
assert isinstance(model.data, pd.DataFrame)
assert model.data.shape == (442, 11)
assert "BMI" in model.data.columns
def test_model_init_bad_data():
with pytest.raises(ValueError):
Model("y ~ x", {"x": 1})
def test_model_categorical_argument():
data = pd.DataFrame(
{
"y": np.random.normal(size=100),
"x": np.random.randint(2, size=100),
"z": np.random.randint(2, size=100),
}
)
model = Model("y ~ 0 + x", data, categorical="x")
assert model.terms["x"].categorical
model = Model("y ~ 0 + x*z", data, categorical=["x", "z"])
assert model.terms["x"].categorical
assert model.terms["z"].categorical
assert model.terms["x:z"].categorical
def test_model_no_response():
with pytest.raises(ValueError):
Model("x", pd.DataFrame({"x": [1]}))
def test_model_taylor_value(data_numeric_xy):
Model("y ~ x", data=data_numeric_xy, taylor=5)
def test_model_alternative_scaler(data_numeric_xy):
Model("y ~ x", data=data_numeric_xy, automatic_priors="mle")
def test_model_term_names_property(diabetes_data):
model = Model("BMI ~ age_grp + BP + S1", diabetes_data)
assert model.term_names == ["Intercept", "age_grp", "BP", "S1"]
def test_model_term_names_property_interaction(crossed_data):
crossed_data["fourcats"] = sum([[x] * 10 for x in ["a", "b", "c", "d"]], list()) * 3
model = Model("Y ~ threecats*fourcats", crossed_data)
assert model.term_names == ["Intercept", "threecats", "fourcats", "threecats:fourcats"]
def test_model_terms_levels_interaction(crossed_data):
crossed_data["fourcats"] = sum([[x] * 10 for x in ["a", "b", "c", "d"]], list()) * 3
model = Model("Y ~ threecats*fourcats", crossed_data)
assert model.terms["threecats:fourcats"].levels == [
"threecats[b]:fourcats[b]",
"threecats[b]:fourcats[c]",
"threecats[b]:fourcats[d]",
"threecats[c]:fourcats[b]",
"threecats[c]:fourcats[c]",
"threecats[c]:fourcats[d]",
]
def test_model_terms_levels():
data = pd.DataFrame(
{
"y": np.random.normal(size=50),
"x": np.random.normal(size=50),
"z": reduce(add, [[f"Group {x}"] * 10 for x in ["1", "2", "3", "1", "2"]]),
"time": list(range(1, 11)) * 5,
"subject": reduce(add, [[f"Subject {x}"] * 10 for x in range(1, 6)]),
}
)
model = Model("y ~ x + z + time + (time|subject)", data)
assert model.terms["z"].levels == ["z[Group 2]", "z[Group 3]"]
assert model.terms["1|subject"].groups == [f"Subject {x}" for x in range(1, 6)]
assert model.terms["time|subject"].groups == [f"Subject {x}" for x in range(1, 6)]
def test_model_term_classes():
data = pd.DataFrame(
{
"y": np.random.normal(size=50),
"x": np.random.normal(size=50),
"s": ["s1"] * 25 + ["s2"] * 25,
"g": np.random.choice(["a", "b", "c"], size=50),
}
)
model = Model("y ~ x*g + (x|s)", data)
assert isinstance(model.terms["x"], Term)
assert isinstance(model.terms["g"], Term)
assert isinstance(model.terms["x:g"], Term)
assert isinstance(model.terms["1|s"], GroupSpecificTerm)
assert isinstance(model.terms["x|s"], GroupSpecificTerm)
# Also check 'categorical' attribute is right
assert model.terms["g"].categorical
def test_one_shot_formula_fit(diabetes_data):
model = Model("S3 ~ S1 + S2", diabetes_data)
model.fit(draws=50)
named_vars = model.backend.model.named_vars
targets = ["S3", "S1", "Intercept"]
assert len(set(named_vars.keys()) & set(targets)) == 3
def test_categorical_term():
data = pd.DataFrame(
{
"y": np.random.normal(size=6),
"x1": np.random.normal(size=6),
"x2": [1, 1, 0, 0, 1, 1],
"g1": ["a"] * 3 + ["b"] * 3,
"g2": ["x", "x", "z", "z", "y", "y"],
}
)
model = Model("y ~ x1 + x2 + g1 + (g1|g2) + (x2|g2)", data)
fitted = model.fit(draws=10)
df = az.summary(fitted)
names = [
"Intercept",
"x1",
"x2",
"g1[b]",
"1|g2_sigma",
"1|g2[x]",
"1|g2[y]",
"1|g2[z]",
"g1|g2_sigma[b]",
"g1|g2[b, x]",
"g1|g2[b, y]",
"g1|g2[b, z]",
"x2|g2_sigma",
"x2|g2[x]",
"x2|g2[y]",
"x2|g2[z]",
"y_sigma",
]
assert list(df.index) == names
def test_omit_offsets_false():
data = pd.DataFrame(
{
"y": np.random.normal(size=100),
"x1": np.random.normal(size=100),
"g1": ["a"] * 50 + ["b"] * 50,
}
)
model = Model("y ~ x1 + (x1|g1)", data)
fitted = model.fit(omit_offsets=False)
offsets = [var for var in fitted.posterior.var() if var.endswith("_offset")]
assert offsets == ["1|g1_offset", "x1|g1_offset"]
def test_omit_offsets_true():
data = pd.DataFrame(
{
"y": np.random.normal(size=100),
"x1": np.random.normal(size=100),
"g1": ["a"] * 50 + ["b"] * 50,
}
)
model = Model("y ~ x1 + (x1|g1)", data)
fitted = model.fit(omit_offsets=True)
offsets = [var for var in fitted.posterior.var() if var.endswith("_offset")]
assert not offsets
def test_hyperprior_on_common_effect():
data = pd.DataFrame(
{
"y": np.random.normal(size=100),
"x1": np.random.normal(size=100),
"g1": ["a"] * 50 + ["b"] * 50,
}
)
slope = Prior("Normal", mu=0, sd=Prior("HalfCauchy", beta=2))
priors = {"x1": slope}
with pytest.raises(ValueError):
Model("y ~ x1 + (x1|g1)", data, priors=priors)
priors = {"common": slope}
with pytest.raises(ValueError):
Model("y ~ x1 + (x1|g1)", data, priors=priors)
def test_sparse_fails():
data = pd.DataFrame(
{
"y": np.random.normal(size=4),
"x1": np.random.normal(size=4),
"x2": np.random.normal(size=4),
"x3": np.random.normal(size=4),
"x4": np.random.normal(size=4),
}
)
with pytest.raises(ValueError, match="Design matrix for common effects is not full-rank"):
Model("y ~ x1 + x2 + x3 + x4", data, automatic_priors="mle")
data = pd.DataFrame(
{
"y": np.random.normal(size=4),
"g1": ["a", "b", "c", "d"],
"g2": ["a", "b", "c", "d"],
}
)
with pytest.raises(ValueError, match="Design matrix for common effects is not full-rank"):
Model("y ~ g1 + g2", data, automatic_priors="mle")
@pytest.mark.parametrize(
"family",
[
"gaussian",
"negativebinomial",
"bernoulli",
"poisson",
"gamma",
"vonmises",
"wald",
],
)
def test_automatic_priors(family):
"""Test that automatic priors work correctly"""
obs = pd.DataFrame([0], columns=["x"])
Model("x ~ 0", obs, family=family)
def test_links():
data = pd.DataFrame(
{
"g": np.random.choice([0, 1], size=100),
"y": np.random.randint(3, 10, size=100),
"x": np.random.randint(3, 10, size=100),
}
)
FAMILIES = {
"bernoulli": ["identity", "logit", "probit", "cloglog"],
"beta": ["identity", "logit", "probit", "cloglog"],
"gamma": ["identity", "inverse", "log"],
"gaussian": ["identity", "log", "inverse"],
"negativebinomial": ["identity", "log", "cloglog"],
"poisson": ["identity", "log"],
"vonmises": ["identity", "tan_2"],
"wald": ["inverse", "inverse_squared", "identity", "log"],
}
for family, links in FAMILIES.items():
for link in links:
if family == "bernoulli":
Model("g ~ x", data, family=family, link=link)
else:
Model("y ~ x", data, family=family, link=link)
def test_bad_links():
"""Passes names of links that are not suitable for the family."""
data = pd.DataFrame(
{
"g": np.random.choice([0, 1], size=100),
"y": np.random.randint(3, 10, size=100),
"x": np.random.randint(3, 10, size=100),
}
)
FAMILIES = {
"bernoulli": ["inverse", "inverse_squared", "log"],
"beta": ["inverse", "inverse_squared", "log"],
"gamma": ["logit", "probit", "cloglog"],
"gaussian": ["logit", "probit", "cloglog"],
"negativebinomial": ["logit", "probit", "inverse", "inverse_squared"],
"poisson": ["logit", "probit", "cloglog", "inverse", "inverse_squared"],
"vonmises": ["logit", "probit", "cloglog"],
"wald": ["logit", "probit", "cloglog"],
}
for family, links in FAMILIES.items():
for link in links:
with pytest.raises(ValueError):
if family == "bernoulli":
formula = "g ~ x"
else:
formula = "y ~ x"
Model(formula, data, family=family, link=link)
def test_constant_terms():
data = pd.DataFrame(
{
"y": np.random.normal(size=10),
"x": np.random.choice([1], size=10),
"z": np.random.choice(["A"], size=10),
}
)
with pytest.raises(ValueError):
Model("y ~ 0 + x", data)
with pytest.raises(ValueError):
Model("y ~ 0 + z", data)
def test_1d_group_specific():
data = pd.DataFrame(
{
"y": np.random.normal(size=40),
"x": np.random.choice(["A", "B"], size=40),
"g": ["A", "B", "C", "D"] * 10,
}
)
# Since there's 1|g, there's only one column for x|g
# We need to ensure x|g is of shape (40,) and not of shape (40, 1)
# We do so by checking the mean is (40, ) because shape of x|g still returns (40, 1)
# The difference is that we do .squeeze() on it after creation.
model = Model("y ~ (x|g)", data)
model.build()
assert model.backend.mu.tag.test_value.shape == (40,)
|
bambinos/bambi
|
bambi/tests/test_model_construction.py
|
Python
|
mit
| 13,347
|
[
"Gaussian"
] |
1ba72f9c00938041b5c86850afe34d2e449ab76caeead3073719d95b7f0f917b
|
"""IO methods for radar data from MYRORSS or MRMS.
MYRORSS = Multi-year Reanalysis of Remotely Sensed Storms
MRMS = Multi-radar Multi-sensor
"""
import os
import glob
import warnings
import numpy
import pandas
from netCDF4 import Dataset
from gewittergefahr.gg_io import netcdf_io
from gewittergefahr.gg_utils import number_rounding as rounder
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import time_periods
from gewittergefahr.gg_utils import longitude_conversion as lng_conversion
from gewittergefahr.gg_utils import grids
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import myrorss_and_mrms_utils
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
NW_GRID_POINT_LAT_COLUMN_ORIG = 'Latitude'
NW_GRID_POINT_LNG_COLUMN_ORIG = 'Longitude'
LAT_SPACING_COLUMN_ORIG = 'LatGridSpacing'
LNG_SPACING_COLUMN_ORIG = 'LonGridSpacing'
NUM_LAT_COLUMN_ORIG = 'Lat'
NUM_LNG_COLUMN_ORIG = 'Lon'
NUM_PIXELS_COLUMN_ORIG = 'pixel'
HEIGHT_COLUMN_ORIG = 'Height'
UNIX_TIME_COLUMN_ORIG = 'Time'
FIELD_NAME_COLUMN_ORIG = 'TypeName'
SENTINEL_VALUE_COLUMNS_ORIG = ['MissingData', 'RangeFolded']
GRID_ROW_COLUMN = 'grid_row'
GRID_COLUMN_COLUMN = 'grid_column'
NUM_GRID_CELL_COLUMN = 'num_grid_cells'
GRID_ROW_COLUMN_ORIG = 'pixel_x'
GRID_COLUMN_COLUMN_ORIG = 'pixel_y'
NUM_GRID_CELL_COLUMN_ORIG = 'pixel_count'
TIME_FORMAT_SECONDS = '%Y%m%d-%H%M%S'
TIME_FORMAT_MINUTES = '%Y%m%d-%H%M'
TIME_FORMAT_FOR_LOG_MESSAGES = '%Y-%m-%d-%H%M%S'
TIME_FORMAT_SECONDS_REGEX = (
'[0-9][0-9][0-9][0-9][0-1][0-9][0-3][0-9]-[0-2][0-9][0-5][0-9][0-5][0-9]')
MINUTES_TO_SECONDS = 60
METRES_TO_KM = 1e-3
SENTINEL_TOLERANCE = 10.
LATLNG_MULTIPLE_DEG = 1e-4
DEFAULT_MAX_TIME_OFFSET_FOR_AZ_SHEAR_SEC = 240
DEFAULT_MAX_TIME_OFFSET_FOR_NON_SHEAR_SEC = 180
ZIPPED_FILE_EXTENSION = '.gz'
UNZIPPED_FILE_EXTENSION = '.netcdf'
AZIMUTHAL_SHEAR_FIELD_NAMES = [
radar_utils.LOW_LEVEL_SHEAR_NAME, radar_utils.MID_LEVEL_SHEAR_NAME]
RADAR_FILE_NAMES_KEY = 'radar_file_name_matrix'
UNIQUE_TIMES_KEY = 'unique_times_unix_sec'
SPC_DATES_AT_UNIQUE_TIMES_KEY = 'spc_dates_at_unique_times_unix_sec'
FIELD_NAME_BY_PAIR_KEY = 'field_name_by_pair'
HEIGHT_BY_PAIR_KEY = 'height_by_pair_m_asl'
def _get_pathless_raw_file_pattern(unix_time_sec):
"""Generates glob pattern for pathless name of raw file.
This method rounds the time step to the nearest minute and allows the file
to be either zipped or unzipped.
The pattern generated by this method is meant for input to `glob.glob`.
This method is the "pattern" version of _get_pathless_raw_file_name.
:param unix_time_sec: Valid time.
:return: pathless_raw_file_pattern: Pathless glob pattern for raw file.
"""
return '{0:s}*{1:s}*'.format(
time_conversion.unix_sec_to_string(unix_time_sec, TIME_FORMAT_MINUTES),
UNZIPPED_FILE_EXTENSION
)
def _get_pathless_raw_file_name(unix_time_sec, zipped=True):
"""Generates pathless name for raw file.
:param unix_time_sec: Valid time.
:param zipped: Boolean flag. If True, will generate name for zipped file.
If False, will generate name for unzipped file.
:return: pathless_raw_file_name: Pathless name for raw file.
"""
if zipped:
return '{0:s}{1:s}{2:s}'.format(
time_conversion.unix_sec_to_string(
unix_time_sec, TIME_FORMAT_SECONDS),
UNZIPPED_FILE_EXTENSION,
ZIPPED_FILE_EXTENSION
)
return '{0:s}{1:s}'.format(
time_conversion.unix_sec_to_string(unix_time_sec, TIME_FORMAT_SECONDS),
UNZIPPED_FILE_EXTENSION
)
def _remove_sentinels_from_sparse_grid(
sparse_grid_table, field_name, sentinel_values):
"""Removes sentinel values from sparse grid.
:param sparse_grid_table: pandas DataFrame with columns produced by
`read_data_from_sparse_grid_file`.
:param field_name: Name of radar field in GewitterGefahr format.
:param sentinel_values: 1-D numpy array of sentinel values.
:return: sparse_grid_table: Same as input, except that rows with a sentinel
value are removed.
"""
num_rows = len(sparse_grid_table.index)
sentinel_flags = numpy.full(num_rows, False, dtype=bool)
for this_sentinel_value in sentinel_values:
these_sentinel_flags = numpy.isclose(
sparse_grid_table[field_name].values, this_sentinel_value,
atol=SENTINEL_TOLERANCE)
sentinel_flags = numpy.logical_or(sentinel_flags, these_sentinel_flags)
sentinel_indices = numpy.where(sentinel_flags)[0]
return sparse_grid_table.drop(
sparse_grid_table.index[sentinel_indices], axis=0, inplace=False)
def _remove_sentinels_from_full_grid(field_matrix, sentinel_values):
"""Removes sentinel values from full grid.
M = number of rows (unique grid-point latitudes)
N = number of columns (unique grid-point longitudes)
:param field_matrix: M-by-N numpy array with radar field.
:param sentinel_values: 1-D numpy array of sentinel values.
:return: field_matrix: Same as input, except that sentinel values are
replaced with NaN.
"""
num_grid_rows = field_matrix.shape[0]
num_grid_columns = field_matrix.shape[1]
num_grid_points = num_grid_rows * num_grid_columns
field_matrix = numpy.reshape(field_matrix, num_grid_points)
sentinel_flags = numpy.full(num_grid_points, False, dtype=bool)
for this_sentinel_value in sentinel_values:
these_sentinel_flags = numpy.isclose(
field_matrix, this_sentinel_value, atol=SENTINEL_TOLERANCE)
sentinel_flags = numpy.logical_or(sentinel_flags, these_sentinel_flags)
sentinel_indices = numpy.where(sentinel_flags)[0]
field_matrix[sentinel_indices] = numpy.nan
return numpy.reshape(field_matrix, (num_grid_rows, num_grid_columns))
def get_relative_dir_for_raw_files(field_name, data_source, height_m_asl=None):
"""Generates relative path for raw files.
:param field_name: Name of radar field in GewitterGefahr format.
:param data_source: Data source (string).
:param height_m_asl: Radar height (metres above sea level).
:return: relative_directory_name: Relative path for raw files.
"""
if field_name == radar_utils.REFL_NAME:
radar_utils.check_heights(
data_source=data_source, heights_m_asl=numpy.array([height_m_asl]),
field_name=radar_utils.REFL_NAME)
else:
height_m_asl = radar_utils.get_valid_heights(
data_source=data_source, field_name=field_name)[0]
return '{0:s}/{1:05.2f}'.format(
radar_utils.field_name_new_to_orig(
field_name=field_name, data_source_name=data_source),
float(height_m_asl) * METRES_TO_KM
)
def find_raw_file(
unix_time_sec, spc_date_string, field_name, data_source,
top_directory_name, height_m_asl=None, raise_error_if_missing=True):
"""Finds raw file.
File should contain one field at one time step (e.g., MESH at 123502 UTC,
reflectivity at 500 m above sea level and 123502 UTC).
:param unix_time_sec: Valid time.
:param spc_date_string: SPC date (format "yyyymmdd").
:param field_name: Name of radar field in GewitterGefahr format.
:param data_source: Data source (string).
:param top_directory_name: Name of top-level directory with raw files.
:param height_m_asl: Radar height (metres above sea level).
:param raise_error_if_missing: Boolean flag. If True and file is missing,
this method will raise an error. If False and file is missing, will
return *expected* path to raw file.
:return: raw_file_name: Path to raw file.
:raises: ValueError: if raise_error_if_missing = True and file is missing.
"""
# Error-checking.
_ = time_conversion.spc_date_string_to_unix_sec(spc_date_string)
error_checking.assert_is_string(top_directory_name)
error_checking.assert_is_boolean(raise_error_if_missing)
relative_directory_name = get_relative_dir_for_raw_files(
field_name=field_name, height_m_asl=height_m_asl,
data_source=data_source)
directory_name = '{0:s}/{1:s}/{2:s}/{3:s}'.format(
top_directory_name, spc_date_string[:4], spc_date_string,
relative_directory_name
)
pathless_file_name = _get_pathless_raw_file_name(unix_time_sec, zipped=True)
raw_file_name = '{0:s}/{1:s}'.format(directory_name, pathless_file_name)
if raise_error_if_missing and not os.path.isfile(raw_file_name):
pathless_file_name = _get_pathless_raw_file_name(
unix_time_sec, zipped=False)
raw_file_name = '{0:s}/{1:s}'.format(directory_name, pathless_file_name)
if raise_error_if_missing and not os.path.isfile(raw_file_name):
raise ValueError(
'Cannot find raw file. Expected at: "{0:s}"'.format(raw_file_name)
)
return raw_file_name
def raw_file_name_to_time(raw_file_name):
"""Parses time from file name.
:param raw_file_name: Path to raw file.
:return: unix_time_sec: Valid time.
"""
error_checking.assert_is_string(raw_file_name)
_, time_string = os.path.split(raw_file_name)
time_string = time_string.replace(ZIPPED_FILE_EXTENSION, '').replace(
UNZIPPED_FILE_EXTENSION, '')
return time_conversion.string_to_unix_sec(time_string, TIME_FORMAT_SECONDS)
def find_raw_file_inexact_time(
desired_time_unix_sec, spc_date_string, field_name, data_source,
top_directory_name, height_m_asl=None, max_time_offset_sec=None,
raise_error_if_missing=False):
"""Finds raw file at inexact time.
If you know the exact valid time, use `find_raw_file`.
:param desired_time_unix_sec: Desired valid time.
:param spc_date_string: SPC date (format "yyyymmdd").
:param field_name: Field name in GewitterGefahr format.
:param data_source: Data source (string).
:param top_directory_name: Name of top-level directory with raw files.
:param height_m_asl: Radar height (metres above sea level).
:param max_time_offset_sec: Maximum offset between actual and desired valid
time.
For example, if `desired_time_unix_sec` is 162933 UTC 5 Jan 2018 and
`max_time_offset_sec` = 60, this method will look for az-shear at valid
times from 162833...163033 UTC 5 Jan 2018.
If None, this defaults to `DEFAULT_MAX_TIME_OFFSET_FOR_AZ_SHEAR_SEC` for
azimuthal-shear fields and `DEFAULT_MAX_TIME_OFFSET_FOR_NON_SHEAR_SEC` for
all other fields.
:param raise_error_if_missing: Boolean flag. If no file is found and
raise_error_if_missing = True, this method will error out. If no file
is found and raise_error_if_missing = False, will return None.
:return: raw_file_name: Path to raw file.
:raises: ValueError: if no file is found and raise_error_if_missing = True.
"""
# Error-checking.
error_checking.assert_is_integer(desired_time_unix_sec)
_ = time_conversion.spc_date_string_to_unix_sec(spc_date_string)
error_checking.assert_is_boolean(raise_error_if_missing)
radar_utils.check_field_name(field_name)
if max_time_offset_sec is None:
if field_name in AZIMUTHAL_SHEAR_FIELD_NAMES:
max_time_offset_sec = DEFAULT_MAX_TIME_OFFSET_FOR_AZ_SHEAR_SEC
else:
max_time_offset_sec = DEFAULT_MAX_TIME_OFFSET_FOR_NON_SHEAR_SEC
error_checking.assert_is_integer(max_time_offset_sec)
error_checking.assert_is_greater(max_time_offset_sec, 0)
first_allowed_minute_unix_sec = numpy.round(int(rounder.floor_to_nearest(
float(desired_time_unix_sec - max_time_offset_sec),
MINUTES_TO_SECONDS)))
last_allowed_minute_unix_sec = numpy.round(int(rounder.floor_to_nearest(
float(desired_time_unix_sec + max_time_offset_sec),
MINUTES_TO_SECONDS)))
allowed_minutes_unix_sec = time_periods.range_and_interval_to_list(
start_time_unix_sec=first_allowed_minute_unix_sec,
end_time_unix_sec=last_allowed_minute_unix_sec,
time_interval_sec=MINUTES_TO_SECONDS, include_endpoint=True).astype(int)
relative_directory_name = get_relative_dir_for_raw_files(
field_name=field_name, data_source=data_source,
height_m_asl=height_m_asl)
raw_file_names = []
for this_time_unix_sec in allowed_minutes_unix_sec:
this_pathless_file_pattern = _get_pathless_raw_file_pattern(
this_time_unix_sec)
this_file_pattern = '{0:s}/{1:s}/{2:s}/{3:s}/{4:s}'.format(
top_directory_name, spc_date_string[:4], spc_date_string,
relative_directory_name, this_pathless_file_pattern
)
raw_file_names += glob.glob(this_file_pattern)
file_times_unix_sec = []
for this_raw_file_name in raw_file_names:
file_times_unix_sec.append(raw_file_name_to_time(this_raw_file_name))
if len(file_times_unix_sec):
file_times_unix_sec = numpy.array(file_times_unix_sec)
time_differences_sec = numpy.absolute(
file_times_unix_sec - desired_time_unix_sec)
nearest_index = numpy.argmin(time_differences_sec)
min_time_diff_sec = time_differences_sec[nearest_index]
else:
min_time_diff_sec = numpy.inf
if min_time_diff_sec > max_time_offset_sec:
if raise_error_if_missing:
desired_time_string = time_conversion.unix_sec_to_string(
desired_time_unix_sec, TIME_FORMAT_FOR_LOG_MESSAGES)
error_string = (
'Could not find "{0:s}" file within {1:d} seconds of {2:s}.'
).format(field_name, max_time_offset_sec, desired_time_string)
raise ValueError(error_string)
return None
return raw_file_names[nearest_index]
def find_raw_files_one_spc_date(
spc_date_string, field_name, data_source, top_directory_name,
height_m_asl=None, raise_error_if_missing=True):
"""Finds raw files for one field and one SPC date.
:param spc_date_string: SPC date (format "yyyymmdd").
:param field_name: Name of radar field in GewitterGefahr format.
:param data_source: Data source (string).
:param top_directory_name: Name of top-level directory with raw files.
:param height_m_asl: Radar height (metres above sea level).
:param raise_error_if_missing: Boolean flag. If True and no files are
found, will raise error.
:return: raw_file_names: 1-D list of paths to raw files.
:raises: ValueError: if raise_error_if_missing = True and no files are
found.
"""
error_checking.assert_is_boolean(raise_error_if_missing)
example_time_unix_sec = time_conversion.spc_date_string_to_unix_sec(
spc_date_string)
example_file_name = find_raw_file(
unix_time_sec=example_time_unix_sec, spc_date_string=spc_date_string,
field_name=field_name, data_source=data_source,
top_directory_name=top_directory_name, height_m_asl=height_m_asl,
raise_error_if_missing=False)
example_directory_name, example_pathless_file_name = os.path.split(
example_file_name)
example_time_string = time_conversion.unix_sec_to_string(
example_time_unix_sec, TIME_FORMAT_SECONDS)
pathless_file_pattern = example_pathless_file_name.replace(
example_time_string, TIME_FORMAT_SECONDS_REGEX)
pathless_file_pattern = pathless_file_pattern.replace(
ZIPPED_FILE_EXTENSION, '*')
raw_file_pattern = '{0:s}/{1:s}'.format(
example_directory_name, pathless_file_pattern)
raw_file_names = glob.glob(raw_file_pattern)
if raise_error_if_missing and not raw_file_names:
error_string = (
'Could not find any files with the following pattern: {0:s}'
).format(raw_file_pattern)
raise ValueError(error_string)
return raw_file_names
def find_many_raw_files(
desired_times_unix_sec, spc_date_strings, data_source, field_names,
top_directory_name, reflectivity_heights_m_asl=None,
max_time_offset_for_az_shear_sec=
DEFAULT_MAX_TIME_OFFSET_FOR_AZ_SHEAR_SEC,
max_time_offset_for_non_shear_sec=
DEFAULT_MAX_TIME_OFFSET_FOR_NON_SHEAR_SEC):
"""Finds raw file for each field/height pair and time step.
N = number of input times
T = number of unique input times
F = number of field/height pairs
:param desired_times_unix_sec: length-N numpy array with desired valid
times.
:param spc_date_strings: length-N list of corresponding SPC dates (format
"yyyymmdd").
:param data_source: Data source ("myrorss" or "mrms").
:param field_names: 1-D list of field names.
:param top_directory_name: Name of top-level directory with radar data from
the given source.
:param reflectivity_heights_m_asl: 1-D numpy array of heights (metres above
sea level) for the field "reflectivity_dbz". If "reflectivity_dbz" is
not in `field_names`, leave this as None.
:param max_time_offset_for_az_shear_sec: Max time offset (between desired
and actual valid time) for azimuthal-shear fields.
:param max_time_offset_for_non_shear_sec: Max time offset (between desired
and actual valid time) for non-azimuthal-shear fields.
:return: file_dictionary: Dictionary with the following keys.
file_dictionary['radar_file_name_matrix']: T-by-F numpy array of paths to
raw files.
file_dictionary['unique_times_unix_sec']: length-T numpy array of unique
valid times.
file_dictionary['spc_date_strings_for_unique_times']: length-T numpy array
of corresponding SPC dates.
file_dictionary['field_name_by_pair']: length-F list of field names.
file_dictionary['height_by_pair_m_asl']: length-F numpy array of heights
(metres above sea level).
"""
field_name_by_pair, height_by_pair_m_asl = (
myrorss_and_mrms_utils.fields_and_refl_heights_to_pairs(
field_names=field_names, data_source=data_source,
refl_heights_m_asl=reflectivity_heights_m_asl)
)
num_fields = len(field_name_by_pair)
error_checking.assert_is_integer_numpy_array(desired_times_unix_sec)
error_checking.assert_is_numpy_array(
desired_times_unix_sec, num_dimensions=1)
num_times = len(desired_times_unix_sec)
error_checking.assert_is_string_list(spc_date_strings)
error_checking.assert_is_numpy_array(
numpy.array(spc_date_strings),
exact_dimensions=numpy.array([num_times]))
spc_dates_unix_sec = numpy.array(
[time_conversion.spc_date_string_to_unix_sec(s)
for s in spc_date_strings])
time_matrix = numpy.hstack((
numpy.reshape(desired_times_unix_sec, (num_times, 1)),
numpy.reshape(spc_dates_unix_sec, (num_times, 1))
))
unique_time_matrix = numpy.vstack(
{tuple(this_row) for this_row in time_matrix}
).astype(int)
unique_times_unix_sec = unique_time_matrix[:, 0]
spc_dates_at_unique_times_unix_sec = unique_time_matrix[:, 1]
sort_indices = numpy.argsort(unique_times_unix_sec)
unique_times_unix_sec = unique_times_unix_sec[sort_indices]
spc_dates_at_unique_times_unix_sec = spc_dates_at_unique_times_unix_sec[
sort_indices]
num_unique_times = len(unique_times_unix_sec)
radar_file_name_matrix = numpy.full(
(num_unique_times, num_fields), '', dtype=object)
for i in range(num_unique_times):
this_spc_date_string = time_conversion.time_to_spc_date_string(
spc_dates_at_unique_times_unix_sec[i])
for j in range(num_fields):
if field_name_by_pair[j] in AZIMUTHAL_SHEAR_FIELD_NAMES:
this_max_time_offset_sec = max_time_offset_for_az_shear_sec
this_raise_error_flag = False
else:
this_max_time_offset_sec = max_time_offset_for_non_shear_sec
this_raise_error_flag = True
if this_max_time_offset_sec == 0:
radar_file_name_matrix[i, j] = find_raw_file(
unix_time_sec=unique_times_unix_sec[i],
spc_date_string=this_spc_date_string,
field_name=field_name_by_pair[j], data_source=data_source,
top_directory_name=top_directory_name,
height_m_asl=height_by_pair_m_asl[j],
raise_error_if_missing=this_raise_error_flag)
else:
radar_file_name_matrix[i, j] = find_raw_file_inexact_time(
desired_time_unix_sec=unique_times_unix_sec[i],
spc_date_string=this_spc_date_string,
field_name=field_name_by_pair[j], data_source=data_source,
top_directory_name=top_directory_name,
height_m_asl=height_by_pair_m_asl[j],
max_time_offset_sec=this_max_time_offset_sec,
raise_error_if_missing=this_raise_error_flag)
if radar_file_name_matrix[i, j] is None:
this_time_string = time_conversion.unix_sec_to_string(
unique_times_unix_sec[i], TIME_FORMAT_FOR_LOG_MESSAGES)
warning_string = (
'Cannot find file for "{0:s}" at {1:d} metres ASL and '
'{2:s}.'
).format(
field_name_by_pair[j], int(height_by_pair_m_asl[j]),
this_time_string
)
warnings.warn(warning_string)
return {
RADAR_FILE_NAMES_KEY: radar_file_name_matrix,
UNIQUE_TIMES_KEY: unique_times_unix_sec,
SPC_DATES_AT_UNIQUE_TIMES_KEY: spc_dates_at_unique_times_unix_sec,
FIELD_NAME_BY_PAIR_KEY: field_name_by_pair,
HEIGHT_BY_PAIR_KEY: numpy.round(height_by_pair_m_asl).astype(int)
}
def read_metadata_from_raw_file(
netcdf_file_name, data_source, raise_error_if_fails=True):
"""Reads metadata from raw (either MYRORSS or MRMS) file.
This file should contain one radar field at one height and valid time.
:param netcdf_file_name: Path to input file.
:param data_source: Data source (string).
:param raise_error_if_fails: Boolean flag. If True and file cannot be read,
this method will raise an error. If False and file cannot be read, will
return None.
:return: metadata_dict: Dictionary with the following keys.
metadata_dict['nw_grid_point_lat_deg']: Latitude (deg N) of northwesternmost
grid point.
metadata_dict['nw_grid_point_lng_deg']: Longitude (deg E) of
northwesternmost grid point.
metadata_dict['lat_spacing_deg']: Spacing (deg N) between meridionally
adjacent grid points.
metadata_dict['lng_spacing_deg']: Spacing (deg E) between zonally adjacent
grid points.
metadata_dict['num_lat_in_grid']: Number of rows (unique grid-point
latitudes).
metadata_dict['num_lng_in_grid']: Number of columns (unique grid-point
longitudes).
metadata_dict['height_m_asl']: Radar height (metres above ground level).
metadata_dict['unix_time_sec']: Valid time.
metadata_dict['field_name']: Name of radar field in GewitterGefahr format.
metadata_dict['field_name_orig']: Name of radar field in original (either
MYRORSS or MRMS) format.
metadata_dict['sentinel_values']: 1-D numpy array of sentinel values.
"""
error_checking.assert_file_exists(netcdf_file_name)
netcdf_dataset = netcdf_io.open_netcdf(
netcdf_file_name, raise_error_if_fails)
if netcdf_dataset is None:
return None
field_name_orig = str(getattr(netcdf_dataset, FIELD_NAME_COLUMN_ORIG))
metadata_dict = {
radar_utils.NW_GRID_POINT_LAT_COLUMN:
getattr(netcdf_dataset, NW_GRID_POINT_LAT_COLUMN_ORIG),
radar_utils.NW_GRID_POINT_LNG_COLUMN:
lng_conversion.convert_lng_positive_in_west(
getattr(netcdf_dataset, NW_GRID_POINT_LNG_COLUMN_ORIG),
allow_nan=False),
radar_utils.LAT_SPACING_COLUMN:
getattr(netcdf_dataset, LAT_SPACING_COLUMN_ORIG),
radar_utils.LNG_SPACING_COLUMN:
getattr(netcdf_dataset, LNG_SPACING_COLUMN_ORIG),
radar_utils.NUM_LAT_COLUMN:
netcdf_dataset.dimensions[NUM_LAT_COLUMN_ORIG].size + 1,
radar_utils.NUM_LNG_COLUMN:
netcdf_dataset.dimensions[NUM_LNG_COLUMN_ORIG].size + 1,
radar_utils.HEIGHT_COLUMN:
getattr(netcdf_dataset, HEIGHT_COLUMN_ORIG),
radar_utils.UNIX_TIME_COLUMN:
getattr(netcdf_dataset, UNIX_TIME_COLUMN_ORIG),
FIELD_NAME_COLUMN_ORIG: field_name_orig,
radar_utils.FIELD_NAME_COLUMN: radar_utils.field_name_orig_to_new(
field_name_orig=field_name_orig, data_source_name=data_source)
}
latitude_spacing_deg = metadata_dict[radar_utils.LAT_SPACING_COLUMN]
longitude_spacing_deg = metadata_dict[radar_utils.LNG_SPACING_COLUMN]
# TODO(thunderhoser): The following "if" condition is a hack. The purpose
# is to change grid corners only for actual MYRORSS data, not GridRad data
# in MYRORSS format.
if latitude_spacing_deg < 0.011 and longitude_spacing_deg < 0.011:
metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN] = (
rounder.floor_to_nearest(
metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN],
metadata_dict[radar_utils.LAT_SPACING_COLUMN]))
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN] = (
rounder.ceiling_to_nearest(
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
metadata_dict[radar_utils.LNG_SPACING_COLUMN]))
sentinel_values = []
for this_column in SENTINEL_VALUE_COLUMNS_ORIG:
sentinel_values.append(getattr(netcdf_dataset, this_column))
metadata_dict.update({
radar_utils.SENTINEL_VALUE_COLUMN: numpy.array(sentinel_values)})
netcdf_dataset.close()
return metadata_dict
def read_data_from_sparse_grid_file(
netcdf_file_name, field_name_orig, data_source, sentinel_values,
raise_error_if_fails=True):
"""Reads sparse radar grid from raw (either MYRORSS or MRMS) file.
This file should contain one radar field at one height and valid time.
:param netcdf_file_name: Path to input file.
:param field_name_orig: Name of radar field in original (either MYRORSS or
MRMS) format.
:param data_source: Data source (string).
:param sentinel_values: 1-D numpy array of sentinel values.
:param raise_error_if_fails: Boolean flag. If True and file cannot be read,
this method will raise an error. If False and file cannot be read, will
return None.
:return: sparse_grid_table: pandas DataFrame with the following columns.
Each row corresponds to one grid point.
sparse_grid_table.grid_row: Row index.
sparse_grid_table.grid_column: Column index.
sparse_grid_table.<field_name>: Radar measurement (column name is produced
by _field_name_orig_to_new).
sparse_grid_table.num_grid_cells: Number of consecutive grid points with the
same radar measurement. Counting is row-major (to the right along the
row, then down to the next column if necessary).
"""
error_checking.assert_file_exists(netcdf_file_name)
error_checking.assert_is_numpy_array_without_nan(sentinel_values)
error_checking.assert_is_numpy_array(sentinel_values, num_dimensions=1)
netcdf_dataset = netcdf_io.open_netcdf(
netcdf_file_name, raise_error_if_fails)
if netcdf_dataset is None:
return None
field_name = radar_utils.field_name_orig_to_new(
field_name_orig=field_name_orig, data_source_name=data_source)
num_values = len(netcdf_dataset.variables[GRID_ROW_COLUMN_ORIG])
if num_values == 0:
sparse_grid_dict = {
GRID_ROW_COLUMN: numpy.array([], dtype=int),
GRID_COLUMN_COLUMN: numpy.array([], dtype=int),
NUM_GRID_CELL_COLUMN: numpy.array([], dtype=int),
field_name: numpy.array([])}
else:
sparse_grid_dict = {
GRID_ROW_COLUMN: netcdf_dataset.variables[GRID_ROW_COLUMN_ORIG][:],
GRID_COLUMN_COLUMN:
netcdf_dataset.variables[GRID_COLUMN_COLUMN_ORIG][:],
NUM_GRID_CELL_COLUMN:
netcdf_dataset.variables[NUM_GRID_CELL_COLUMN_ORIG][:],
field_name: netcdf_dataset.variables[field_name_orig][:]}
netcdf_dataset.close()
sparse_grid_table = pandas.DataFrame.from_dict(sparse_grid_dict)
return _remove_sentinels_from_sparse_grid(
sparse_grid_table, field_name=field_name,
sentinel_values=sentinel_values)
def read_data_from_full_grid_file(
netcdf_file_name, metadata_dict, raise_error_if_fails=True):
"""Reads full radar grid from raw (either MYRORSS or MRMS) file.
This file should contain one radar field at one height and valid time.
:param netcdf_file_name: Path to input file.
:param metadata_dict: Dictionary created by `read_metadata_from_raw_file`.
:param raise_error_if_fails: Boolean flag. If True and file cannot be read,
this method will raise an error. If False and file cannot be read, will
return None for all output vars.
:return: field_matrix: M-by-N numpy array with radar field. Latitude
increases while moving up each column, and longitude increases while
moving right along each row.
:return: grid_point_latitudes_deg: length-M numpy array of grid-point
latitudes (deg N). This array is monotonically decreasing.
:return: grid_point_longitudes_deg: length-N numpy array of grid-point
longitudes (deg E). This array is monotonically increasing.
"""
error_checking.assert_file_exists(netcdf_file_name)
netcdf_dataset = netcdf_io.open_netcdf(
netcdf_file_name, raise_error_if_fails)
if netcdf_dataset is None:
return None, None, None
field_matrix = netcdf_dataset.variables[
metadata_dict[FIELD_NAME_COLUMN_ORIG]]
netcdf_dataset.close()
min_latitude_deg = metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN] - (
metadata_dict[radar_utils.LAT_SPACING_COLUMN] * (
metadata_dict[radar_utils.NUM_LAT_COLUMN] - 1))
grid_point_latitudes_deg, grid_point_longitudes_deg = (
grids.get_latlng_grid_points(
min_latitude_deg=min_latitude_deg,
min_longitude_deg=
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
lat_spacing_deg=metadata_dict[radar_utils.LAT_SPACING_COLUMN],
lng_spacing_deg=metadata_dict[radar_utils.LNG_SPACING_COLUMN],
num_rows=metadata_dict[radar_utils.NUM_LAT_COLUMN],
num_columns=metadata_dict[radar_utils.NUM_LNG_COLUMN]))
field_matrix = _remove_sentinels_from_full_grid(
field_matrix, metadata_dict[radar_utils.SENTINEL_VALUE_COLUMN])
return (numpy.flipud(field_matrix), grid_point_latitudes_deg[::-1],
grid_point_longitudes_deg)
def write_field_to_myrorss_file(
field_matrix, netcdf_file_name, field_name, metadata_dict,
height_m_asl=None):
"""Writes field to MYRORSS-formatted file.
M = number of rows (unique grid-point latitudes)
N = number of columns (unique grid-point longitudes)
:param field_matrix: M-by-N numpy array with one radar variable at one time.
Latitude should increase down each column, and longitude should increase
to the right along each row.
:param netcdf_file_name: Path to output file.
:param field_name: Name of radar field in GewitterGefahr format.
:param metadata_dict: Dictionary created by either
`gridrad_io.read_metadata_from_full_grid_file` or
`read_metadata_from_raw_file`.
:param height_m_asl: Height of radar field (metres above sea level).
"""
if field_name == radar_utils.REFL_NAME:
field_to_heights_dict_m_asl = (
myrorss_and_mrms_utils.fields_and_refl_heights_to_dict(
field_names=[field_name],
data_source=radar_utils.MYRORSS_SOURCE_ID,
refl_heights_m_asl=numpy.array([height_m_asl])))
else:
field_to_heights_dict_m_asl = (
myrorss_and_mrms_utils.fields_and_refl_heights_to_dict(
field_names=[field_name],
data_source=radar_utils.MYRORSS_SOURCE_ID))
field_name = list(field_to_heights_dict_m_asl.keys())[0]
radar_height_m_asl = field_to_heights_dict_m_asl[field_name][0]
if field_name in radar_utils.ECHO_TOP_NAMES:
field_matrix = METRES_TO_KM * field_matrix
field_name_myrorss = radar_utils.field_name_new_to_orig(
field_name=field_name, data_source_name=radar_utils.MYRORSS_SOURCE_ID)
file_system_utils.mkdir_recursive_if_necessary(file_name=netcdf_file_name)
netcdf_dataset = Dataset(
netcdf_file_name, 'w', format='NETCDF3_64BIT_OFFSET')
netcdf_dataset.setncattr(
FIELD_NAME_COLUMN_ORIG, field_name_myrorss)
netcdf_dataset.setncattr('DataType', 'SparseLatLonGrid')
netcdf_dataset.setncattr(
NW_GRID_POINT_LAT_COLUMN_ORIG, rounder.round_to_nearest(
metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN],
LATLNG_MULTIPLE_DEG))
netcdf_dataset.setncattr(
NW_GRID_POINT_LNG_COLUMN_ORIG, rounder.round_to_nearest(
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
LATLNG_MULTIPLE_DEG))
netcdf_dataset.setncattr(
HEIGHT_COLUMN_ORIG,
METRES_TO_KM * numpy.float(radar_height_m_asl))
netcdf_dataset.setncattr(
UNIX_TIME_COLUMN_ORIG,
numpy.int32(metadata_dict[radar_utils.UNIX_TIME_COLUMN]))
netcdf_dataset.setncattr('FractionalTime', 0.)
netcdf_dataset.setncattr('attributes', ' ColorMap SubType Unit')
netcdf_dataset.setncattr('ColorMap-unit', 'dimensionless')
netcdf_dataset.setncattr('ColorMap-value', '')
netcdf_dataset.setncattr('SubType-unit', 'dimensionless')
netcdf_dataset.setncattr('SubType-value', numpy.float(radar_height_m_asl))
netcdf_dataset.setncattr('Unit-unit', 'dimensionless')
netcdf_dataset.setncattr('Unit-value', 'dimensionless')
netcdf_dataset.setncattr(
LAT_SPACING_COLUMN_ORIG, rounder.round_to_nearest(
metadata_dict[radar_utils.LAT_SPACING_COLUMN],
LATLNG_MULTIPLE_DEG))
netcdf_dataset.setncattr(
LNG_SPACING_COLUMN_ORIG, rounder.round_to_nearest(
metadata_dict[radar_utils.LNG_SPACING_COLUMN],
LATLNG_MULTIPLE_DEG))
netcdf_dataset.setncattr(
SENTINEL_VALUE_COLUMNS_ORIG[0], numpy.double(-99000.))
netcdf_dataset.setncattr(
SENTINEL_VALUE_COLUMNS_ORIG[1], numpy.double(-99001.))
min_latitude_deg = metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN] - (
metadata_dict[radar_utils.LAT_SPACING_COLUMN] *
(metadata_dict[radar_utils.NUM_LAT_COLUMN] - 1))
unique_grid_point_lats_deg, unique_grid_point_lngs_deg = (
grids.get_latlng_grid_points(
min_latitude_deg=min_latitude_deg,
min_longitude_deg=
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
lat_spacing_deg=metadata_dict[radar_utils.LAT_SPACING_COLUMN],
lng_spacing_deg=metadata_dict[radar_utils.LNG_SPACING_COLUMN],
num_rows=metadata_dict[radar_utils.NUM_LAT_COLUMN],
num_columns=metadata_dict[radar_utils.NUM_LNG_COLUMN]))
num_grid_rows = len(unique_grid_point_lats_deg)
num_grid_columns = len(unique_grid_point_lngs_deg)
field_vector = numpy.reshape(field_matrix, num_grid_rows * num_grid_columns)
grid_point_lat_matrix, grid_point_lng_matrix = (
grids.latlng_vectors_to_matrices(
unique_grid_point_lats_deg, unique_grid_point_lngs_deg))
grid_point_lat_vector = numpy.reshape(
grid_point_lat_matrix, num_grid_rows * num_grid_columns)
grid_point_lng_vector = numpy.reshape(
grid_point_lng_matrix, num_grid_rows * num_grid_columns)
real_value_indices = numpy.where(numpy.invert(numpy.isnan(field_vector)))[0]
netcdf_dataset.createDimension(
NUM_LAT_COLUMN_ORIG, num_grid_rows - 1)
netcdf_dataset.createDimension(
NUM_LNG_COLUMN_ORIG, num_grid_columns - 1)
netcdf_dataset.createDimension(
NUM_PIXELS_COLUMN_ORIG, len(real_value_indices))
row_index_vector, column_index_vector = radar_utils.latlng_to_rowcol(
grid_point_lat_vector, grid_point_lng_vector,
nw_grid_point_lat_deg=
metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN],
nw_grid_point_lng_deg=
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
lat_spacing_deg=metadata_dict[radar_utils.LAT_SPACING_COLUMN],
lng_spacing_deg=metadata_dict[radar_utils.LNG_SPACING_COLUMN])
netcdf_dataset.createVariable(
field_name_myrorss, numpy.single, (NUM_PIXELS_COLUMN_ORIG,))
netcdf_dataset.createVariable(
GRID_ROW_COLUMN_ORIG, numpy.int16, (NUM_PIXELS_COLUMN_ORIG,))
netcdf_dataset.createVariable(
GRID_COLUMN_COLUMN_ORIG, numpy.int16, (NUM_PIXELS_COLUMN_ORIG,))
netcdf_dataset.createVariable(
NUM_GRID_CELL_COLUMN_ORIG, numpy.int32, (NUM_PIXELS_COLUMN_ORIG,))
netcdf_dataset.variables[field_name_myrorss].setncattr(
'BackgroundValue', numpy.int32(-99900))
netcdf_dataset.variables[field_name_myrorss].setncattr(
'units', 'dimensionless')
netcdf_dataset.variables[field_name_myrorss].setncattr(
'NumValidRuns', numpy.int32(len(real_value_indices)))
netcdf_dataset.variables[field_name_myrorss][:] = field_vector[
real_value_indices]
netcdf_dataset.variables[GRID_ROW_COLUMN_ORIG][:] = (
row_index_vector[real_value_indices])
netcdf_dataset.variables[GRID_COLUMN_COLUMN_ORIG][:] = (
column_index_vector[real_value_indices])
netcdf_dataset.variables[NUM_GRID_CELL_COLUMN_ORIG][:] = (
numpy.full(len(real_value_indices), 1, dtype=int))
netcdf_dataset.close()
|
thunderhoser/GewitterGefahr
|
gewittergefahr/gg_io/myrorss_and_mrms_io.py
|
Python
|
mit
| 38,214
|
[
"NetCDF"
] |
4d21b0fb9729d972650a61729c8ea3dafc6bfd2f1233cf9a30da3f9a1da8a752
|
import re
import pymol
import cmd
import setting
import parsing
import threading
from cmd import DEFAULT_ERROR, DEFAULT_SUCCESS, _raising, \
is_list, is_string, is_error
QuietException = parsing.QuietException
_prefix = "_tmp_editor"
tmp_wild = _prefix + "*"
tmp_editor = _prefix + "0"
tmp_connect = _prefix + "_con"
tmp_domain = _prefix + "_dom"
tmp1 = _prefix + "1"
tmp2 = _prefix + "2"
tmp3 = _prefix + "3"
tmp4 = _prefix + "4"
# routines to assist in molecular editing
def attach_fragment(selection,fragment,hydrogen,anchor,_self=cmd):
if not selection in _self.get_names("selections"):
if fragment in _self.get_names("objects"):
print " Error: an object with than name already exists"
raise QuietException
else:
_self.fragment(fragment)
if _self.get_setting_legacy("auto_remove_hydrogens"):
_self.remove("(hydro and %s)"%fragment)
else:
_self.fragment(fragment,tmp_editor)
if _self.count_atoms("((%s) and elem h)"%selection,quiet=1):
_self.fuse("(%s and id %d)"%(tmp_editor,hydrogen),"(pk1)",1)
if _self.get_setting_legacy("auto_remove_hydrogens"):
_self.remove("(hydro and pkmol)")
else:
_self.remove("(%s and id %d)"%(tmp_editor,hydrogen))
_self.fuse("(%s and id %d)"%(tmp_editor,anchor),"(pk1)",1)
if _self.get_setting_legacy("auto_remove_hydrogens"):
_self.remove("(hydro and pkmol)")
_self.delete(tmp_editor)
def combine_fragment(selection,fragment,hydrogen,anchor,_self=cmd):
if selection in _self.get_names("selections"):
_self.fragment(fragment,tmp_editor)
if _self.count_atoms("((%s) and elem h)"%selection,quiet=1):
_self.fuse("(%s and id %d)"%(tmp_editor,hydrogen),"(pk1)",3)
if _self.get_setting_legacy("auto_remove_hydrogens"):
_self.remove("(hydro and pkmol)")
else:
_self.fuse("(%s and id %d)"%(tmp_editor,anchor),"(pk1)",3)
if _self.get_setting_legacy("auto_remove_hydrogens"):
_self.remove("(hydro and pkmol)")
_self.delete(tmp_editor)
#from time import time as ___time
#___total = 0.0
#___seg1 = 0.0
#___seg2 = 0.0
#___seg3 = 0.0
#___pass = 0
#___last = ___time()
def attach_amino_acid(selection,amino_acid,center=0,animate=-1,object="",hydro=-1,ss=-1,_self=cmd):
'''
ARGUMENTS
selection = str: named selection of single N or C atom
amino_acid = str: fragment name to load from fragment library
center = bool: center on new terminus (pk1)
animate = int: animate centering
object = str: name of new object (if selection is none)
hydro = int (-1/0/1): keep hydrogens
ss = int: Secondary structure 1=alpha helix, 2=antiparallel beta, 3=parallel beta, 4=flat
'''
# global ___total, ___seg1, ___seg2, ___seg3, ___pass, ___last
# ___mark0 = ___time()
# ___mark1 = ___time()
# ___mark2 = ___time()
# ___entry = ___time()
r = DEFAULT_SUCCESS
ss = int(ss)
center = int(center)
if hydro<0:
hydro = not int(_self.get_setting_legacy("auto_remove_hydrogens"))
if (selection not in _self.get_names('all')):
if object == "":
object = amino_acid
# create new object
if amino_acid in _self.get_names("objects"):
print "Error: an object with than name already exists"
raise QuietException
r = _self.fragment(amino_acid,object)
if not hydro:
_self.remove("(hydro and %s)"%object)
if _self.count_atoms("((%s) and name c)"%object):
_self.edit("((%s) and name c)"%object)
elif _self.count_atoms("((%s) and name n)"%object):
_self.edit("((%s) and name n)"%object)
elif _self.select(tmp_connect,"(%s) & name N,C"%selection) != 1:
print "Error: invalid connection point: must be one atom, name N or C."
_self.delete(tmp_wild)
raise QuietException
elif amino_acid in ["nhh","nme"] and _self.select(tmp_connect,"(%s) & name C"%selection) != 1:
print "Error: invalid connection point: must be C for residue '%s'"%(amino_acid)
_self.delete(tmp_wild)
raise QuietException
elif amino_acid in ["ace"] and _self.select(tmp_connect,"(%s) & name N"%selection) != 1:
print "Error: invalid connection point: must be N for residue '%s'"%(amino_acid)
_self.delete(tmp_wild)
raise QuietException
else:
if ss<0:
ss = int(_self.get_setting_legacy("secondary_structure"))
if ss:
if ss==1: # helix
phi=-57.0
psi=-47.0
elif ss==2: # antipara-beta
phi=-139.0
psi=135.0
elif ss==3: # para-beta
phi=-119.0
psi=113.0
else:
phi=180.0
psi=180.0
_self.fragment(amino_acid,tmp_editor)
if _self.count_atoms("elem n",domain=tmp_connect):
tmp = [ None ]
_self.iterate(tmp_connect,"tmp[0]=resv", space={ 'tmp' : tmp })
tmp[0] = str(tmp[0]-1) # counting down
_self.alter(tmp_editor,"resi=tmp[0]",space={ 'tmp' : tmp})
_self.fuse("(%s and name C)"%(tmp_editor),tmp_connect,2)
_self.select(tmp_domain, "byresi (pk1 | pk2)")
if not hydro:
_self.remove("(pkmol and hydro)")
if ((_self.select(tmp1,"?pk1",domain=tmp_domain)==1) and
(_self.select(tmp2,"?pk2",domain=tmp_domain)==1)):
if ((_self.select(tmp3,"(name ca,ch3 & nbr. ?pk1)",domain=tmp_domain)==1) and
(_self.select(tmp4,"(name ca,ch3 & nbr. ?pk2)",domain=tmp_domain)==1)):
_self.set_dihedral(tmp4,tmp2,tmp1,tmp3,180.0)
_self.set_geometry(tmp2,3,3) # make nitrogen planer
if hydro:
_self.h_fix(tmp2) # fix hydrogen position
if ss:
if amino_acid[0:3]!='pro':
if ((_self.select(tmp4,
"((!r;pro) & name c & nbr. (name ca & nbr. "+tmp2+"))",
domain=tmp_domain)==1) and
(_self.select(tmp3,
"((!r;pro) & name ca & nbr. "+tmp2+")",
domain=tmp_domain)==1)):
_self.set_dihedral( # PHI
tmp4, # C
tmp3, # CA
tmp2, # N
tmp1, # C
phi)
if ((_self.select(tmp4,"(name n & nbr. (name ca & nbr. "+tmp1+"))",
domain=tmp_domain)==1) and
(_self.select(tmp3,"(name ca & nbr. "+tmp1+")",domain=tmp_domain)==1)):
_self.set_dihedral( # PSI (n-1)
tmp2, # N
tmp1, # C
tmp3, # CA
tmp4, # N
psi)
sele = ("(name N & (byres nbr. %s) &! (byres %s))"% (tmp_connect,tmp_connect))
if _self.select(tmp1,sele,domain=tmp_domain):
_self.edit(tmp1)
if center:
_self.center(tmp1,animate=animate)
elif _self.count_atoms("elem c",domain=tmp_connect): # forward
tmp = [ None ]
_self.iterate(tmp_connect,"tmp[0]=resv", space={ 'tmp' : tmp })
tmp[0] = str(tmp[0]+1) # counting up
_self.alter(tmp_editor,"resi=tmp[0]",space={ 'tmp' : tmp})
_self.fuse("(%s and name N)"%tmp_editor,tmp_connect,2)
_self.select(tmp_domain, "byresi (pk1 | pk2)")
if not hydro:
_self.remove("(pkmol and hydro)")
if (( _self.select(tmp1,"?pk1",domain=tmp_domain)==1) and
( _self.select(tmp2,"?pk2",domain=tmp_domain)==1)):
# ___mark1 = ___time()
if ((_self.select(tmp3,"(name ca,ch3 & nbr. ?pk1)",domain=tmp_domain)==1) and
(_self.select(tmp4,"(name ca,ch3 & nbr. ?pk2)",domain=tmp_domain)==1)):
_self.set_dihedral(tmp4,tmp2,tmp1,tmp3,180.0)
_self.set_geometry("pk1",3,3) # make nitrogen planer
if hydro:
_self.h_fix("pk1") # fix hydrogen position
if ss:
if hydro and amino_acid[0:3]=='nhh': # fix amide hydrogens
if ((_self.select(tmp3,"(name h1 & nbr. "+tmp1+")",domain=tmp_domain)==1) and
(_self.select(tmp4,"(name o & nbr. "+tmp2+")",domain=tmp_domain)==1)):
_self.set_dihedral(
tmp4, # O
tmp2, # C
tmp1, # N
tmp3, # H1
180)
if amino_acid[0:3]!='pro':
if ((_self.select(tmp3,"(name ca & nbr. "+tmp1+")",domain=tmp_domain)==1) and
(_self.select(tmp4,"(name c & nbr. (name ca & nbr. "+tmp1+"))",domain=tmp_domain)==1)):
_self.set_dihedral( # PHI
tmp2, # C
tmp1, # N
tmp3, # CA
tmp4, # C
phi)
if ((_self.select(tmp3,"(name ca & nbr. "+tmp2+")",domain=tmp_domain)==1) and
(_self.select(tmp4,"(name n & nbr. (name ca & nbr. "+tmp2+"))",domain=tmp_domain)==1)):
_self.set_dihedral( # PSI (n-1)
tmp4, # N
tmp3, # CA
tmp2, # C
tmp1, # N
psi)
# ___mark2 = ___time()
sele = ("(name C & (byres nbr. %s) & !(byres %s))"% (tmp_connect,tmp_connect))
if _self.select(tmp1,sele,domain=tmp_domain):
_self.edit(tmp1)
if center:
_self.center(tmp1,animate=animate)
else:
_self.unpick()
elif _self.count_atoms("((%s) and elem h)"%selection):
print "Error: please pick a nitrogen or carbonyl carbon to grow from."
_self.delete(tmp_wild)
raise QuietException
else:
print "Error: unable to attach fragment."
_self.delete(tmp_wild)
raise QuietException
_self.delete(tmp_wild)
# ___exit = ___time()
# ___seg1 = ___seg1 + ___mark1 - ___entry
# ___seg2 = ___seg2 + ___mark2 - ___mark1
# ___seg3 = ___seg3 + ___exit - ___mark2
# ___total = ___total + ___exit - ___entry
# ___pass = ___pass + 1
# print "%0.3f %0.3f %0.3f / %0.3f + %0.3f + %0.3f = %0.3f vs %0.3f"%(___seg1/___total,___seg2/___total,___seg3/___total,
# ___seg1/___pass, ___seg2/___pass, ___seg3/___pass,
# ___total/___pass, (___time()-___last) - (___exit - ___entry))
# ___last = ___time()
return r
_aa_codes = {
'A' : 'ala',
'B' : 'ace',
'C' : 'cys',
'D' : 'asp',
'E' : 'glu',
'F' : 'phe',
'G' : 'gly',
'H' : 'his',
'I' : 'ile',
'K' : 'lys',
'L' : 'leu',
'M' : 'met',
'N' : 'asn',
'P' : 'pro',
'Q' : 'gln',
'R' : 'arg',
'S' : 'ser',
'T' : 'thr',
'V' : 'val',
'W' : 'trp',
'Y' : 'tyr',
'Z' : 'nme',
}
_fab_codes = {
'peptide' : _aa_codes,
}
_pure_number = re.compile("[0-9]+")
def _fab(input,name,mode,resi,chain,segi,state,dir,hydro,ss,quiet,_self=cmd):
r = DEFAULT_ERROR
code = _fab_codes.get(mode,None)
quiet = int(quiet)
resi = int(resi)
state = int(state)
dir = int(dir)
hydro = int(hydro)
if hydro < 0:
hydro = not _self.get_setting_boolean("auto_remove_hydrogens")
seq_len = 0
if (mode == 'peptide') and is_string(input):
# '123/ ADC B/234/ AFCD' to [ '123/','A','D','C','B/234/','F','C','D' ]
frags = input.split()
input = []
for frag in frags:
if '/' in frag:
input.append(frag)
else:
seq_len = seq_len + len(frag)
input.extend(list(frag))
input.append("/") # breaks chain
if name == None:
name = _self.get_unused_name("obj")
# if mode in [ 'smiles' ]: # small molecule (FUTURE)
# from chempy.champ import Champ
# ch = Champ()
# ch.insert_pattern_string(input)
if mode in [ 'peptide' ]: # polymers
if (seq_len>99) and not quiet:
print " Generating a %d residue peptide from sequence..."%seq_len
input.reverse()
sequence = input
if code != None:
while len(sequence):
while len(sequence) and '/' in sequence[-1]:
part = sequence.pop().split('/')
if len(part)>1:
if len(part[-2]):
resi = int(part[-2])
if len(part)>2:
chain = part[-3]
if len(part)>3:
segi = part[-4]
if len(sequence) and not _self.count_atoms("?pk1"): # new polymer segment
tmp_obj = _self.get_unused_name()
first = sequence.pop()
_self.fragment(code[first], tmp_obj)
if not hydro:
cmd.remove(tmp_obj + ' and hydro')
_self.alter(tmp_obj,'resi="""%s""";chain="""%s""";segi="""%s"""'%(resi,chain,segi))
_self.create(name,tmp_obj+" or ?"+name,1,1,zoom=0)
tmp_sel = _self.get_unused_name()
if mode == 'peptide':
if dir>0:
_self.select(tmp_sel,"name c and "+tmp_obj)
resi = resi + 1
else:
_self.select(tmp_sel,"name n and "+tmp_obj)
resi = resi - 1
_self.edit(name+" in "+tmp_sel) # set the editor's pk1 selection
_self.delete(tmp_sel+" "+tmp_obj)
if mode == 'peptide':
while len(sequence):
if '/' in sequence[-1]:
_self.unpick() # break chain at this point
break
if not _self.count_atoms("?pk1"):
break
else:
attach_amino_acid("pk1",code[sequence.pop()],animate=0,ss=ss,hydro=hydro,_self=_self)
if dir>0:
resi = resi + 1
else:
resi = resi - 1
if not len(sequence):
r = DEFAULT_SUCCESS
def fab(input,name=None,mode='peptide',resi=1,chain='',segi='',state=-1,
dir=1,hydro=-1,ss=0,async=-1,quiet=1,_self=cmd):
if async<1:
r = _fab(input,name,mode,resi,chain,segi,
state,dir,hydro,ss,quiet,_self)
else:
fab_thread = threading.Thread(target=_fab, args=(input,name,mode,
resi,chain,
segi,state,dir,
hydro,ss,quiet,_self))
fab_thread.setDaemon(1)
fab_thread.start()
r = DEFAULT_SUCCESS
return r
def build_peptide(sequence,_self=cmd): # legacy
for aa in sequence:
attach_amino_acid("pk1",_aa_codes[aa])
|
gratefulfrog/lib
|
python/pymol/editor.py
|
Python
|
gpl-2.0
| 16,247
|
[
"ChemPy",
"PyMOL"
] |
c5b444eedf1c810e6ce040d9205809cb891c9460dd1d5b6ba49795573846c28f
|
# http://curtis.schlak.com/2012/01/04/python-visitor-pattern-helper.html
# http://curtis.schlak.com/2013/06/20/follow-up-to-python-visitor-pattern.html
# visit.py
# Updated 2013-06-20 to fix bug on line 41
import inspect
__all__ = ['on', 'when']
def on(param_name):
def f(fn):
dispatcher = Dispatcher(param_name, fn)
return dispatcher
return f
def when(param_type):
def f(fn):
frame = inspect.currentframe().f_back
dispatcher = frame.f_locals[fn.func_name]
if not isinstance(dispatcher, Dispatcher):
dispatcher = dispatcher.dispatcher
dispatcher.add_target(param_type, fn)
def ff(*args, **kw):
return dispatcher(*args, **kw)
ff.dispatcher = dispatcher
return ff
return f
class Dispatcher(object):
def __init__(self, param_name, fn):
frame = inspect.currentframe().f_back.f_back
top_level = frame.f_locals == frame.f_globals
self.param_index = inspect.getargspec(fn).args.index(param_name)
self.param_name = param_name
self.targets = {}
def __call__(self, *args, **kw):
typ = args[self.param_index].__class__
d = self.targets.get(typ)
if d is not None:
return d(*args, **kw)
else:
issub = issubclass
t = self.targets
ks = t.iterkeys()
return [t[k](*args, **kw) for k in ks if issub(typ, k)]
def add_target(self, typ, target):
self.targets[typ] = target
class BaseNode(object):
def accept(self, visitor):
return visitor.visit(self)
|
plum-umd/java-sketch
|
lib/visit.py
|
Python
|
mit
| 1,481
|
[
"VisIt"
] |
5a6c1d8598f27e29904b35e1c639cccca7431a8f9b19eef3ec218e4fb1884172
|
################################################################################
# The Neural Network (NN) based Speech Synthesis System
# https://svn.ecdf.ed.ac.uk/repo/inf/dnn_tts/
#
# Centre for Speech Technology Research
# University of Edinburgh, UK
# Copyright (c) 2014-2015
# All Rights Reserved.
#
# The system as a whole and most of the files in it are distributed
# under the following copyright and conditions
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# - The authors' names may not be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK
# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT
# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
################################################################################
import numpy, time, cPickle, gzip, sys, os, copy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
import logging
class MixtureDensityOutputLayer(object):
def __init__(self, rng, input, n_in, n_out, n_component, var_floor):
self.input = input
W_value = rng.normal(0.0, 1.0/numpy.sqrt(n_in), size=(n_in, n_out*n_component))
self.W_mu = theano.shared(value=numpy.asarray(W_value, dtype=theano.config.floatX), name='W_mu', borrow=True)
self.W_sigma = theano.shared(value=numpy.asarray(W_value.copy(), dtype=theano.config.floatX), name='W_sigma', borrow=True)
W_mix_value = rng.normal(0.0, 1.0/numpy.sqrt(n_in), size=(n_in, n_component))
self.W_mix = theano.shared(value=numpy.asarray(W_mix_value, dtype=theano.config.floatX), name='W_mix', borrow=True)
self.mu = T.dot(self.input, self.W_mu) # assume linear output for mean vectors
#self.sigma = T.nnet.softplus(T.dot(self.input, self.W_sigma)) # + 0.0001
self.sigma = T.exp(T.dot(self.input, self.W_sigma)) # Zen et al. 2014
self.sigma = T.maximum(var_floor, self.sigma) # hard variance flooring
# note: sigma contains variances, so var_floor=0.01 means that
# the lowest possible standard deviation is 0.1
self.mix = T.nnet.softmax(T.dot(self.input, self.W_mix))
self.delta_W_mu = theano.shared(value = numpy.zeros((n_in, n_out*n_component),
dtype=theano.config.floatX), name='delta_W_mu')
self.delta_W_sigma = theano.shared(value = numpy.zeros((n_in, n_out*n_component),
dtype=theano.config.floatX), name='delta_W_sigma')
self.delta_W_mix = theano.shared(value = numpy.zeros((n_in, n_component),
dtype=theano.config.floatX), name='delta_W_mix')
self.params = [self.W_mu, self.W_sigma, self.W_mix]
self.delta_params = [self.delta_W_mu, self.delta_W_sigma, self.delta_W_mix]
class LinearLayer(object):
def __init__(self, rng, input, n_in, n_out, W = None, b = None):
n_in = int(n_in) # ensure sizes have integer type
n_out = int(n_out)# ensure sizes have integer type
self.input = input
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
if W is None:
W_value = rng.normal(0.0, 1.0/numpy.sqrt(n_in), size=(n_in, n_out))
W = theano.shared(value=numpy.asarray(W_value, dtype=theano.config.floatX), name='W', borrow=True)
if b is None:
b = theano.shared(value=numpy.zeros((n_out,),
dtype=theano.config.floatX),
name='b', borrow=True)
self.W = W
self.b = b
self.delta_W = theano.shared(value = numpy.zeros((n_in,n_out),
dtype=theano.config.floatX), name='delta_W')
self.delta_b = theano.shared(value = numpy.zeros_like(self.b.get_value(borrow=True),
dtype=theano.config.floatX), name='delta_b')
self.output = T.dot(self.input, self.W) + self.b
self.params = [self.W, self.b]
self.delta_params = [self.delta_W, self.delta_b]
def errors(self, y):
L = T.sum( (self.output-y)*(self.output-y), axis=1 )
errors = T.mean(L)
return (errors)
def init_params(self, iparams):
updates = {}
for param, iparam in zip(self.params, iparams):
updates[param] = iparam
return updates
class SigmoidLayer(object):
def __init__(self, rng, x, n_in, n_out, W = None, b = None, activation = T.tanh, p=0.0, training=0):
n_in = int(n_in) # ensure sizes have integer type
n_out = int(n_out)# ensure sizes have integer type
self.x = x
if p > 0.0:
if training==1:
srng = RandomStreams(seed=123456)
self.x = T.switch(srng.binomial(size=x.shape, p=p), x, 0)
else:
self.x = (1-p) * x
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
if W is None:
W_value = numpy.asarray(rng.normal(0.0, 1.0/numpy.sqrt(n_in),
size=(n_in, n_out)), dtype=theano.config.floatX)
W = theano.shared(value=W_value,
name='W', borrow=True)
if b is None:
b = theano.shared(value=numpy.zeros((n_out,),
dtype=theano.config.floatX),
name='b', borrow=True)
self.W = W
self.b = b
self.delta_W = theano.shared(value = numpy.zeros((n_in,n_out),
dtype=theano.config.floatX), name='delta_W')
self.delta_b = theano.shared(value = numpy.zeros_like(self.b.get_value(borrow=True),
dtype=theano.config.floatX), name='delta_b')
self.output = T.dot(self.x, self.W) + self.b
self.output = activation(self.output)
self.params = [self.W, self.b]
self.delta_params = [self.delta_W, self.delta_b]
def errors(self, y):
L = T.sum( (self.output-y)*(self.output-y), axis=1 )
errors = T.mean(L)
return (errors)
def init_params(self, iparams):
updates = {}
for param, iparam in zip(self.params, iparams):
updates[param] = iparam
return updates
class GeneralLayer(object):
def __init__(self, rng, input, n_in, n_out, W = None, b = None, activation = 'linear'):
self.input = input
self.n_in = n_in
self.n_out = n_out
logger = logging.getLogger('general_layer')
# randomly initialise the activation weights based on the input size, as advised by the 'tricks of neural network book'
if W is None:
W_values = numpy.asarray(rng.normal(0.0, 1.0/numpy.sqrt(n_in),
size=(n_in, n_out)), dtype=theano.config.floatX)
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
self.delta_W = theano.shared(value = numpy.zeros((n_in,n_out),
dtype=theano.config.floatX), name='delta_W')
self.delta_b = theano.shared(value = numpy.zeros_like(self.b.get_value(borrow=True),
dtype=theano.config.floatX), name='delta_b')
lin_output = T.dot(input, self.W) + self.b
if activation == 'sigmoid':
self.output = T.nnet.sigmoid(lin_output)
elif activation == 'tanh':
self.output = T.tanh(lin_output)
elif activation == 'linear':
self.output = lin_output
elif activation == 'ReLU': ## rectifier linear unit
self.output = T.maximum(0.0, lin_output)
elif activation == 'ReSU': ## rectifier smooth unit
self.output = numpy.log(1.0 + numpy.exp(lin_output))
else:
logger.critical('the input activation function: %s is not supported right now. Please modify layers.py to support' % (activation))
raise
# parameters of the model
self.params = [self.W, self.b]
self.delta_params = [self.delta_W, self.delta_b]
def errors(self, y):
errors = T.mean(T.sum((self.output-y)**2, axis=1))
return errors
def init_params(self, iparams):
updates = {}
for param, iparam in zip(self.params, iparams):
updates[param] = iparam
return updates
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh, do_maxout = False, pool_size = 1,
do_pnorm = False, pnorm_order = 1):
""" Class for hidden layer """
self.input = input
self.n_in = n_in
self.n_out = n_out
if W is None:
W_values = numpy.asarray(rng.normal(0.0, 1.0/numpy.sqrt(n_in),
size=(n_in, n_out)), dtype=theano.config.floatX)
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
self.delta_W = theano.shared(value = numpy.zeros((n_in,n_out),
dtype=theano.config.floatX), name='delta_W')
self.delta_b = theano.shared(value = numpy.zeros_like(self.b.get_value(borrow=True),
dtype=theano.config.floatX), name='delta_b')
lin_output = T.dot(input, self.W) + self.b
if do_maxout == True:
self.last_start = n_out - pool_size
self.tmp_output = lin_output[:,0:self.last_start+1:pool_size]
for i in range(1, pool_size):
cur = lin_output[:,i:self.last_start+i+1:pool_size]
self.tmp_output = T.maximum(cur, self.tmp_output)
self.output = activation(self.tmp_output)
elif do_pnorm == True:
self.last_start = n_out - pool_size
self.tmp_output = abs(lin_output[:,0:self.last_start+1:pool_size]) ** pnorm_order
for i in range(1, pool_size):
cur = abs(lin_output[:,i:self.last_start+i+1:pool_size]) ** pnorm_order
self.tmp_output = self.tmp_output + cur
self.tmp_output = self.tmp_output ** (1.0 / pnorm_order)
self.output = activation(self.tmp_output)
else:
self.output = (lin_output if activation is None
else activation(lin_output))
# self.output = self.rectifier_linear(lin_output)
# parameters of the model
self.params = [self.W, self.b]
self.delta_params = [self.delta_W, self.delta_b]
def rectifier_linear(self, x):
x = T.maximum(0.0, x)
return x
def rectifier_smooth(self, x):
x = numpy.log(1.0 + numpy.exp(x))
return x
class SplitHiddenLayer(object):
'''
The nin x nout matrix is vertically split into 2 portions which can be updated
independently.
n_in1 -- by convention, use this part for subword contexts
n_in2 -- by convention, use this part for word projections
Bias is not split in any way.
'''
def __init__(self, rng, input, n_in1, n_in2, n_out, W1=None, W2=None, b=None,
activation=T.tanh, do_maxout = False, pool_size = 1,
do_pnorm = False, pnorm_order = 1):
""" Class for hidden layer """
self.input = input
#self.n_in = n_in
self.n_out = n_out
if W1 is None:
W1_values = numpy.asarray(rng.normal(0.0, 1.0/numpy.sqrt(n_in1),
size=(n_in1, n_out)), dtype=theano.config.floatX)
W1 = theano.shared(value=W1_values, name='W1', borrow=True)
if W2 is None:
W2_values = numpy.asarray(rng.normal(0.0, 1.0/numpy.sqrt(n_in1),
size=(n_in2, n_out)), dtype=theano.config.floatX)
W2 = theano.shared(value=W2_values, name='W2', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W1 = W1
self.W2 = W2
self.b = b
self.delta_W1 = theano.shared(value = numpy.zeros((n_in1,n_out),
dtype=theano.config.floatX), name='delta_W1')
self.delta_W2 = theano.shared(value = numpy.zeros((n_in2,n_out),
dtype=theano.config.floatX), name='delta_W2')
self.delta_b = theano.shared(value = numpy.zeros_like(self.b.get_value(borrow=True),
dtype=theano.config.floatX), name='delta_b')
lin_output = T.dot(input, T.concatenate([self.W1, self.W2])) + self.b
if do_maxout == True:
self.last_start = n_out - pool_size
self.tmp_output = lin_output[:,0:self.last_start+1:pool_size]
for i in range(1, pool_size):
cur = lin_output[:,i:self.last_start+i+1:pool_size]
self.tmp_output = T.maximum(cur, self.tmp_output)
self.output = activation(self.tmp_output)
elif do_pnorm == True:
self.last_start = n_out - pool_size
self.tmp_output = abs(lin_output[:,0:self.last_start+1:pool_size]) ** pnorm_order
for i in range(1, pool_size):
cur = abs(lin_output[:,i:self.last_start+i+1:pool_size]) ** pnorm_order
self.tmp_output = self.tmp_output + cur
self.tmp_output = self.tmp_output ** (1.0 / pnorm_order)
self.output = activation(self.tmp_output)
else:
self.output = (lin_output if activation is None
else activation(lin_output))
# self.output = self.rectifier_linear(lin_output)
# parameters of the model
self.params = [self.W1, self.W2, self.b]
self.delta_params = [self.delta_W1, self.delta_W2, self.delta_b]
def rectifier_linear(self, x):
x = T.maximum(0.0, x)
return x
def rectifier_smooth(self, x):
x = numpy.log(1.0 + numpy.exp(x))
return x
class TokenProjectionLayer(object):
'''
A single projection, not shared. MErging or outputs with non-projected inputs is handled elsewhere.
'''
def __init__(self, rng, input, projection_insize, projection_outsize, initial_projection_distrib='gaussian'):
self.input = input
self.params = []
self.delta_params = []
#self.n_in = n_in
self.projection_insize = projection_insize
self.projection_outsize = projection_outsize
if initial_projection_distrib == 'gaussian':
W_values = numpy.asarray(rng.normal(0.0, 0.1,
size=(projection_insize, projection_outsize)),
dtype=theano.config.floatX)
elif initial_projection_distrib == 'uniform':
# W_values = numpy.asarray(rng.uniform(low=-0.02, high=0.02,
W_values = numpy.asarray(rng.uniform(low=0.0, high=1.0,
size=(projection_insize, projection_outsize)),
dtype=theano.config.floatX)
elif initial_projection_distrib == 'zeros':
W_values = numpy.zeros((projection_insize, projection_outsize),
dtype=theano.config.floatX)
elif initial_projection_distrib == '4mix':
## TODO -- generalise to other n_modes and higher deimneionsal CVs
means = [(-0.5, -0.5), (0.5, 0.5), (0.5, -0.5), (-0.5, 0.5)]
var = (0.1, 0.1)
W_prelim = []
for mean in means:
W_prelim.append(
numpy.asarray(rng.normal(mean, var,
size=(projection_insize / len(means), projection_outsize)),
dtype=theano.config.floatX)
)
W_values = numpy.vstack(W_prelim)
rng.shuffle(W_values)
else:
sys.exit( 'initial_projection_distrib must be one of: gaussian, uniform' )
W = theano.shared(value=W_values, name='W', borrow=True)
delta_W = theano.shared(value = numpy.zeros((projection_insize, projection_outsize),
dtype=theano.config.floatX), name='delta_W')
self.params.append(W)
self.delta_params.append(delta_W)
self.output = T.dot(self.input, W)
class dA(object):
def __init__(self, theano_rng = None, input = None,
n_visible= None, n_hidden= None, W = None, bhid = None,
bvis = None, activation=None, firstlayer = 1, variance = None ):
self.n_visible = n_visible
self.n_hidden = n_hidden
if not W:
initial_W = numpy.asarray(theano_rng.normal(0.0, 1.0/numpy.sqrt(n_in),
size=(n_visible, n_hidden)), dtype=theano.config.floatX)
W = theano.shared(value = initial_W, name ='W')
#initial_W = numpy.asarray( numpy_rng.uniform(
# low = -4*numpy.sqrt(6./(n_hidden+n_visible)),
# high = 4*numpy.sqrt(6./(n_hidden+n_visible)),
# size = (n_visible, n_hidden)),
# dtype = theano.config.floatX)
if not bvis:
bvis = theano.shared(value = numpy.zeros(n_visible,
dtype = theano.config.floatX))
if not bhid:
bhid = theano.shared(value = numpy.zeros(n_hidden,
dtype = theano.config.floatX), name ='b')
self.W = W
self.b = bhid
self.b_prime = bvis
self.W_prime = self.W.T
self.theano_rng = theano_rng
self.activation = activation
if input == None :
self.x = T.dmatrix(name = 'input')
else:
self.x = input
self.params = [self.W, self.b, self.b_prime]
# first layer, use Gaussian noise
self.firstlayer = firstlayer
if self.firstlayer == 1 :
if variance == None :
self.var = T.vector(name = 'input')
else :
self.var = variance
else :
self.var = None
def apply_activation(self, lin_output, activation):
if activation == 'SIGMOID':
final_output = T.nnet.sigmoid(lin_output)
elif activation == 'TANH':
final_output = T.tanh(lin_output)
elif activation == 'LINEAR':
final_output = lin_output
elif activation == 'ReLU': ## rectifier linear unit
final_output = T.maximum(0.0, lin_output)
elif activation == 'ReSU': ## rectifier smooth unit
final_output = numpy.log(1.0 + numpy.exp(lin_output))
else:
self.logger.critical('the input activation function: %s is not supported right now. Please modify layers.py to support' % (activation))
raise
return final_output
def get_corrupted_input(self, input, corruption_level):
if self.firstlayer == 0 :
return self.theano_rng.binomial(
size = input.shape,
n = 1,
p = 1 - corruption_level,
dtype=theano.config.floatX) * input
else :
noise = self.theano_rng.normal( size = input.shape,
dtype = theano.config.floatX)
denoises = noise * self.var * corruption_level
return input+denoises
def get_hidden_values(self, input):
return self.apply_activation((T.dot(input, self.W) + self.b), self.activation)
def get_reconstructed_input(self, hidden):
if self.firstlayer == 1 :
return T.dot(hidden, self.W_prime) + self.b_prime
else :
return self.apply_activation((T.dot(hidden, self.W_prime) + self.b_prime), self.activation)
def get_cost_updates(self, corruption_level, learning_rate):
#if corruption_level == 0:
# tilde_x = self.x
#else:
# tilde_x = self.get_corrupted_input(self.x, corruption_level)
tilde_x = self.x
y = self.get_hidden_values(tilde_x)
z = self.get_reconstructed_input(y)
L = T.sum ( (self.x-z) * (self.x-z), axis=1 )
cost = T.mean(L) / 2
gparams = T.grad(cost, self.params)
updates = {}
for param, gparam in zip(self.params, gparams):
updates[param] = param - learning_rate*gparam
return (cost, updates)
def init_params(self, iparams):
updates = {}
for param, iparam in zip(self.params, iparams):
updates[param] = iparam
return updates
def get_test_cost(self, corruption_level):
""" This function computes the cost and the updates for one trainng
step of the dA """
# tilde_x = self.get_corrupted_input(self.x, corruption_level, 0.5)
y = self.get_hidden_values( self.x )
z = self.get_reconstructed_input(y)
L = T.sum ( (self.x-z) * (self.x-z), axis=1)
cost = T.mean(L)
return cost
|
ashmanmode/TTSDNNRepo
|
src/layers/layers.py
|
Python
|
apache-2.0
| 23,616
|
[
"Gaussian"
] |
945689fc7b6ed502cf405fd714f6468feb3fff42bc3bd3ff6e3ce7e04d04b8b8
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2002 Gary Shao
# Copyright (C) 2007 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2009 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# standard python modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
log = logging.getLogger(".paperstyle")
#-------------------------------------------------------------------------
#
# Page orientation
#
#-------------------------------------------------------------------------
PAPER_PORTRAIT = 0
PAPER_LANDSCAPE = 1
#------------------------------------------------------------------------
#
# PaperSize
#
#------------------------------------------------------------------------
class PaperSize(object):
"""
Defines the dimensions of a sheet of paper. All dimensions are in
centimeters.
"""
def __init__(self, name, height, width):
"""
Create a new paper style with.
:param name: name of the new style
:param height: page height in centimeters
:param width: page width in centimeters
"""
self.name = name
self.height = height
self.width = width
if self.name == 'Letter':
self.trans_pname = _('paper size|Letter')
elif self.name == 'Legal':
self.trans_pname = _('paper size|Legal')
elif self.name == 'Custom Size':
self.trans_pname = _('Custom Size')
else:
self.trans_pname = None
def get_name(self):
"Return the name of the paper style"
return self.name
def get_height(self):
"Return the page height in cm"
return self.height
def set_height(self, height):
"Set the page height in cm"
self.height = height
def get_width(self):
"Return the page width in cm"
return self.width
def set_width(self, width):
"Set the page width in cm"
self.width = width
def get_height_inches(self):
"Return the page height in inches"
return self.height / 2.54
def get_width_inches(self):
"Return the page width in inches"
return self.width / 2.54
#------------------------------------------------------------------------
#
# PaperStyle
#
#------------------------------------------------------------------------
class PaperStyle(object):
"""
Define the various options for a sheet of paper.
"""
def __init__(self, size, orientation,
lmargin=2.54, rmargin=2.54, tmargin=2.54, bmargin=2.54):
"""
Create a new paper style.
:param size: size of the new style
:type size: :class:`.PaperSize`
:param orientation: page orientation
:type orientation: PAPER_PORTRAIT or PAPER_LANDSCAPE
"""
self.__orientation = orientation
if orientation == PAPER_PORTRAIT:
self.__size = PaperSize(size.get_name(),
size.get_height(),
size.get_width())
else:
self.__size = PaperSize(size.get_name(),
size.get_width(),
size.get_height())
self.__lmargin = lmargin
self.__rmargin = rmargin
self.__tmargin = tmargin
self.__bmargin = bmargin
def get_size(self):
"""
Return the size of the paper.
:returns: object indicating the paper size
:rtype: :class:`.PaperSize`
"""
return self.__size
def get_orientation(self):
"""
Return the orientation of the page.
:returns: PAPER_PORTRIAT or PAPER_LANDSCAPE
:rtype: int
"""
return self.__orientation
def get_usable_width(self):
"""
Return the width of the page area in centimeters.
The value is the page width less the margins.
"""
return self.__size.get_width() - (self.__rmargin + self.__lmargin)
def get_usable_height(self):
"""
Return the height of the page area in centimeters.
The value is the page height less the margins.
"""
return self.__size.get_height() - (self.__tmargin + self.__bmargin)
def get_right_margin(self):
"""
Return the right margin.
:returns: Right margin in centimeters
:rtype: float
"""
return self.__rmargin
def get_left_margin(self):
"""
Return the left margin.
:returns: Left margin in centimeters
:rtype: float
"""
return self.__lmargin
def get_top_margin(self):
"""
Return the top margin.
:returns: Top margin in centimeters
:rtype: float
"""
return self.__tmargin
def get_bottom_margin(self):
"""
Return the bottom margin.
:returns: Bottom margin in centimeters
:rtype: float
"""
return self.__bmargin
|
pmghalvorsen/gramps_branch
|
gramps/gen/plug/docgen/paperstyle.py
|
Python
|
gpl-2.0
| 6,428
|
[
"Brian"
] |
3ebbd4d3302414b62f1318265283748b372bbaa6898de7586b16a3a2b99790ca
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
A row of Artificial Neural Network (ANN)
to recognize smiley faces as follow :
- 1: Happy :)
- 2: Sad :(
- 3: Mischievous >)
- 4: Mad >(
Generate pydoc by running :
`$ pydoc -w faces`
"""
__author__ = 'Benjamin Sientzoff'
__date__ = '11 December 2015'
__version__ = '1.0'
__license__ = "GNU GENERAL PUBLIC LICENSE V.2, June 1991"
# useful for stderr output
#from __future__ import print_function
import neuron
from neuron import Neuron
import sys
from random import shuffle
class ANN :
"""
An Artificial Neuronal Network to recognize faces
"""
def __init__( self ) :
self.__ann__ = [Neuron(400) for i in range( 4 )]
def __perform__( self, image ) :
"""
Use the ANN to recognize an image
:param image: the image to recognize
:return: activated neuron
"""
res = [i for i in range( 4 )]
for i in range( 4 ) :
res[i] = self.__ann__[i].g( image )
return max( enumerate(res), key=( lambda x : x[1] ) )[0]
def train( self, training_set, answers, learning_rate=.005 ) :
"""
Train an Artificial Neural Network
:param training_set: the inputs for the training
:param answers: the desired outputs for the training set
:param learning_rate: Learning rate for the training (default 0.005)
"""
# for each inputs
for key in training_set :
# get the neuron supposed to be activated
right_neuron = answers[key] - 1
# get the most activated neuron
activated_neuron = self.__perform__( training_set[key] )
# if the right neuron is not activated
if right_neuron != activated_neuron :
# for each neuron
for i in range( 4 ) :
# compute the wanted ouput
searched_output = 0. + int(i == right_neuron)
# adjust ann sensitivity according to the error
self.__ann__[i].learn( training_set[key], searched_output, learning_rate )
def recognize( self, faces ) :
"""
Recognize faces
:param faces: the test set to be used on the ANN
:return: dictionary containing for each faces name the recognized face
"""
res = {}
# for each images
for face in faces :
# recognize face and store the result in a dictionary
res[face] = self.__perform__( faces[face] ) + 1
# return the result
return res
def read_images( test_file_name ) :
"""
Create a dictionary to stored images from a given file
:param test_file_name: File name which stores the images
:return: dictionary image_name -> image
"""
# open the files
faces_f = open( test_file_name, 'r' )
# start read the file
line = faces_f.readline()
# create a dictionary
images = {}
# initiate a name for the first image
#img_name = "imageX"
# while where is lines in the file
while line:
# skip comments
if line.startswith( "#" ) :
line = faces_f.readline()
# if the line starts by a capital I
if line.startswith( 'I' ) :
# create a new entry for the dictionary
img_name = line.replace( '\n', '' )
# then the next lines are the image grey pixels value
line = faces_f.readline()
# convert string values on the line to integers
image_row = [int( x ) for x in line.split()]
# initiate an matrix in a vector to put the image in
img = [0 for i in range( len( image_row ) * len( image_row ) )]
# for each row of the image
for row in range( len( image_row ) ) :
# convert string values on the line to integers
image_row = [int( x ) for x in line.split()]
# for each pixels on the row
for colum in range( len( image_row ) ) :
# convert the value to float and divide it to make suitable
# and store it in the matrix
img[row * len( image_row ) + colum] = float(image_row[colum])/32.
# the next line is the next row of the image
line = faces_f.readline()
# then link the read image name with the image itself
images[img_name] = img
# read the next image
line = faces_f.readline()
# return the dictionary containing images name and they representation
return images
def read_facit( facit_file_name ) :
"""
Get the answer of a test from a file and store it in a dictionary
:param facit_file_name: name of the file containing answer of the training set
:return: dictionary storing for each images the answer
"""
# open the file
facit_f = open( facit_file_name, 'r' )
# initiate a dictionary
facit = {}
# for each line
for line in facit_f :
# if the line contains a image name and the answer
if line.startswith( "I" ) :
# get the words on the line
words = line.split()
# create an entry in the dictionary with the image name
# and convert to integer the answer associated
facit[words[0]] = int( words[1] )
# return the dictionary
return facit
def compare_images_key( key1, key2 ) :
"""
Sort the key for the image sets
:param key1: first key to compare
:param key2: second key to compare
:return: integer regarding the range between key1 and key2
"""
return int( key1[5:] ) - int( key2[5:] )
def cmp_to_key( comparator ):
"""
Convert a cmp= function into a key= function
:param comparator: function to compare keys
"""
class K:
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return comparator(self.obj, other.obj) < 0
def __gt__(self, other):
return comparator(self.obj, other.obj) > 0
def __eq__(self, other):
return comparator(self.obj, other.obj) == 0
def __le__(self, other):
return comparator(self.obj, other.obj) <= 0
def __ge__(self, other):
return comparator(self.obj, other.obj) >= 0
def __ne__(self, other):
return comparator(self.obj, other.obj) != 0
return K
if __name__ == "__main__" :
# define a help message
help = "\nUsage :\n $ python faces.py train facit test\n train: the training set\n facit: the training solution\n test : file for test\n"
# require 3 arguments
if 4 == len( sys.argv ) :
# get the images for the training set
training_images = read_images( sys.argv[1] )
# get the answers for the training set
facit = read_facit( sys.argv[2] )
training_keys = [key for key in training_images]
# compute size of training subsets
tst_end = len( training_keys )
# run traing for approx. 80% of the images and test on the 30% remaining
tst_start = int( tst_end * .80 )
# put training subsets in an array
training_subset = [{}, {}]
training_subset[0] = { key : training_images[key] for key in training_keys[:tst_start] }
training_subset[1] = { key : training_images[key] for key in training_keys[tst_start:tst_end] }
# create the ANN
ann = ANN()
error_rate = 100.
prev_error = 0.
print( "# training phase" )
# while the error rate is high
while error_rate > 20. :
# train the network for a subset
ann.train( training_subset[0], facit )
sum_error, sum_total = 0., 0.
# test the performance
res_test = ann.recognize( training_subset[1] )
for face in res_test :
sum_total += 1.
if int(facit[face]) != int(res_test[face]) :
sum_error += 1.
# update error rate
if sum_total > 0. :
error = (sum_error / sum_total) * 100
else :
error = 0
print( "# Should never happen" )
if prev_error != error :
prev_error = error
res_test = ann.recognize( training_subset[1] )
sum_error, sum_total = 0., 0.
for face in res_test :
sum_total += 1.
if int(facit[face]) != int(res_test[face]) :
sum_error += 1.
error_rate = (sum_error / sum_total) * 100.
print( str( "# error rate : " + str(error_rate) ) )
# shuffle the training sets
shuffle( training_keys )
training_subset[0] = {key : training_images[key] for key in training_keys[:tst_start]}
training_subset[1] = {key : training_images[key] for key in training_keys[tst_start:tst_end]}
# get the images for the test
test_images = read_images( sys.argv[3] )
# cognize faces
print( "# recognize phase" )
final = ann.recognize( test_images )
sorted_keys = sorted( final, key=cmp_to_key( compare_images_key ) )
# display the res, in the order as the input
for key in sorted_keys :
print( str( key + ' \t' + str(final[key]) ) )
else :
print( help )
|
blasterbug/SmileANN
|
faces.py
|
Python
|
gpl-2.0
| 9,564
|
[
"NEURON"
] |
c109a08f0d15a74bd673f5af19dfe7fa7b1973ee7fb512ec4f0df214c3253cd7
|
../../../../../../../share/pyshared/orca/scripts/apps/planner/script.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/planner/script.py
|
Python
|
gpl-3.0
| 71
|
[
"ORCA"
] |
af76dd540d95a333d2bf069070fe5a0c60fbbc9d4a189ea5602d0886229c341d
|
#! /bin/usr/env python
# D.J. Bennett
# 26/05/2014
"""
Tests for download tools.
"""
import unittest
import pickle
import logging
import os
import pglt.tools.download_tools as dtools
# DIRS
working_dir = os.path.dirname(__file__)
# DUMMIES
# expected terms and term search results
t1_term = '(txid1[PORGN]) OR txid2[PORGN] AND (("name1"[GENE]) \
OR "name2"[GENE]) NOT predicted[TI] NOT genome[TI] NOT unverified[TI]'
t2_term = '(txid1[PORGN]) OR txid2[PORGN] AND (("name1"[TI]) \
OR "name2"[TI]) NOT predicted[TI] NOT genome[TI] NOT unverified[TI]'
t3_term = '(txid1[PORGN]) OR txid2[PORGN] AND (("name1") \
OR "name2") NOT predicted[TI] NOT shotgun[TI] NOT scaffold[TI] \
NOT assembly[TI] NOT unverified[TI]'
t1_search_res = {'Count': 0}
t2_search_res = {'Count': 2, 'IdList': ['seq1', 'seq2']}
t3_search_res = {'Count': 3, 'IdList': ['seq1', 'seq2', 'seq3']}
outgroup_res = {'Count': 3, 'IdList': ['seq4', 'seq5', 'seq6']}
# Example seqrecord for findgeneinseq
with open(os.path.join(working_dir, 'data', "test_findgeneinseq_examplesequence\
.p"), "r") as file:
sequence = pickle.load(file)
# Dummy seq records for download
class dummy_Seq(object):
def __init__(self):
pass
def __str__(self):
# Just for parsing
return "A" * 500
class dummy_SeqRecord(object):
def __init__(self, description, length=500):
self.description = description
self.length = length
self.seq = dummy_Seq()
self.features = None
def __len__(self):
return self.length
seq1 = dummy_SeqRecord(description="A sequence of NAME1")
seq2 = dummy_SeqRecord(description="A sequence of NAME2")
seq3 = [dummy_SeqRecord(description="A sequence of NAME3"),
dummy_SeqRecord(description="A sequence of NAME4"),
dummy_SeqRecord(description="A sequence of NAME5"),
dummy_SeqRecord(description="A sequence of NAME1")]
# Sequences -- just Ts and Fs -- for testing filter
sequences = [True for i in range(80)]
sequences.extend([False for i in range(20)])
# Dependent stubs
def dummy_eSearch(term, logger, retStart=0, retMax=1, usehistory="n",
db="nucleotide"):
if term == t1_term:
return t1_search_res
if term == t2_term:
return t2_search_res
if term == t3_term:
return t3_search_res
else:
return outgroup_res
def dummy_eFetch(ncbi_id, logger, db="nucleotide"):
if ncbi_id == 'seq1':
return seq1
elif ncbi_id == 'seq2':
return seq2
elif ncbi_id == 'seq3':
return seq3
else:
# return all as list
return [seq1, seq2, seq3]
def dummy_blast(query, subj, minoverlap, logger, wd, threads):
# should return bools and positions
# pretend they've matched from 0-100 base positions
return query, [0, 100]
def dummy_checkAlignment(alignment, maxgaps, minoverlap, minlen, logger):
return alignment
# downloader init variables
taxids = ['1', '2']
gene_names = ['name1', 'name2']
nseqs = 2
thoroughness = 3
maxpn = 0.1
votesize = 3
maxgaps = 0.01
minoverlap = 200
maxtrys = 100
maxlen = 2000
minlen = 300
# dictionary variables
namesdict = {"species1": {'txids': ['1', '2']}, 'outgroup': {'txids': ['4']}}
allrankids = [1, 2, 3]
genedict = {'gene1': {'taxid': '3', 'names': ['name1', 'name2'],
'type': 'deep'}}
class DownloadTestSuite(unittest.TestCase):
def setUp(self):
self.logger = logging.getLogger()
self.wd = os.getcwd()
self.true_eSearch = dtools.etools.eSearch
self.true_eFetch = dtools.etools.eFetch
self.true_blast = dtools.atools.blast
self.true_checkAlignment = dtools.atools.checkAlignment
dtools.etools.eSearch = dummy_eSearch
dtools.etools.eFetch = dummy_eFetch
dtools.atools.blast = dummy_blast
dtools.atools.checkAlignment = dummy_checkAlignment
# mock Downloader instance
self.downloader = dtools.Downloader(gene_names=gene_names,
nseqs=nseqs,
thoroughness=thoroughness,
maxpn=maxpn, votesize=votesize,
maxtrys=maxtrys,
minoverlap=minoverlap,
maxlen=maxlen, minlen=minlen,
logger=self.logger, wd=self.wd)
# expected search terms at different thoroughnesses
self.t1_term = t1_term
self.t2_term = t2_term
self.t3_term = t3_term
self.seqids = ['seq1', 'seq2', 'seq3']
self.seq1 = seq1
self.seq2 = seq2
self.seq3 = seq3
self.sequences = sequences
self.taxids = taxids
self.record = sequence
self.namesdict = namesdict
self.allrankids = allrankids
self.genedict = genedict
def tearDown(self):
# repatch
dtools.etools.eSearch = self.true_eSearch
dtools.etools.eFetch = self.true_eFetch
dtools.atools.blast = self.true_blast
dtools.atools.checkAlignment = self.true_checkAlignment
def test_downloader_private_buildsearchterm_thoroughness1(self):
res = self.downloader._buildSearchTerm(self.taxids, 1)
self.assertEqual(res, self.t1_term)
def test_downloader_private_buildsearchterm_thoroughness2(self):
res = self.downloader._buildSearchTerm(self.taxids, 2)
self.assertEqual(res, self.t2_term)
def test_downloader_private_buildsearchterm_thoroughness3(self):
res = self.downloader._buildSearchTerm(self.taxids, 3)
self.assertEqual(res, self.t3_term)
def test_downloader_private_search(self):
# expect to only find 2, 1 and 0 sequences
# it should search until it finds two sequences (nseqs = 2),
# and then on the next search after raising its thoroughness
# it should find the last sequence. Searching again will
# find no more.
res1 = self.downloader._search(self.taxids)
res2 = self.downloader._search(self.taxids)
res3 = self.downloader._search(self.taxids)
self.assertEqual([len(res1), len(res2), len(res3)], [2, 1, 0])
def test_downloader_private_filter(self):
# weeds out Falses from sequences, should not be any Falses
# self.sequences is 80 Ts and 20 Fs
sequences = self.sequences[:]
res_filtered, res_downloaded = self.downloader._filter(sequences)
self.assertEqual(len(res_filtered), 80)
self.assertEqual(len(res_downloaded), 20)
def test_downloader_private_findgeneinseq(self):
# change gene names for test
gene_names = self.downloader.gene_names
self.downloader.gene_names = ['COI']
res = self.downloader._findGeneInSeq(self.record)
self.downloader.gene_names = gene_names
# I know that the COI sequence is 1545bp (5350..6894)
# (http://www.ncbi.nlm.nih.gov/nuccore/AM711897.1)
self.assertEqual(len(res), 1545)
def test_downloader_private_parse(self):
# seq3, a list of SeqRecords of which the third is the right
# size and contains no Ns
res = self.downloader._parse(self.seq3)
self.assertIsNotNone(res)
def test_downloader_private_download(self):
res = self.downloader._download(self.seqids)
self.assertEqual(len(res), 3)
def test_downloader_run(self):
# reset thoroughness and deja_vues
self.downloader.thoroughness = 1
self.downloader.deja_vues = []
res = self.downloader.run(self.taxids)
self.assertEqual(len(res), 3)
def test_findbestgenes(self):
res = dtools.findBestGenes(self.namesdict, self.genedict, 3,
self.allrankids, logger=self.logger,
minnseq=1, target=1, minnspp=0)
self.assertEqual(res[0], 'gene1')
def test_get_clusters(self):
# make a gene_sequences: [(name, sequence), ...]
# should return 80 sequences
names = ['sp1', 'sp2', 'sp3', 'sp4', 'sp5', 'sp6', 'sp7', 'sp8', 'sp9',
'sp10']*10
gene_sequences = zip(names, self.sequences)
res = dtools.getClusters(gene_sequences, 0.5, self.logger, self.wd)
self.assertEqual(len(res[0]), 80)
if __name__ == '__main__':
unittest.main()
|
DomBennett/pG-lt
|
tests/test_tools_download.py
|
Python
|
gpl-2.0
| 8,457
|
[
"BLAST"
] |
50a06fa172c6739cc4a99661cd358ba1c80b8ecef46e7291d28cdb6180856f0d
|
from Firefly import logging
from Firefly.components.zwave.device_types.water_sensor import ZwaveWaterSensor
from Firefly.const import SENSOR_DRY, WATER
ALARM = 'alarm'
BATTERY = 'battery'
TITLE = 'DSB45 Aeotec Water Sensor'
COMMANDS = []
REQUESTS = [ALARM, BATTERY, WATER]
INITIAL_VALUES = {
'_alarm': False,
'_battery': -1,
'_water': SENSOR_DRY
}
def Setup(firefly, package, **kwargs):
logging.message('Entering %s setup' % TITLE)
sensor = DSB45(firefly, package, **kwargs)
firefly.install_component(sensor)
return sensor.id
class DSB45(ZwaveWaterSensor):
def __init__(self, firefly, package, **kwargs):
initial_values = INITIAL_VALUES
if kwargs.get('initial_values') is not None:
initial_values_updated = INITIAL_VALUES.copy()
initial_values_updated.update(kwargs.get('initial_values'))
initial_values = initial_values_updated
kwargs.update({
'initial_values': initial_values,
'commands': COMMANDS,
'requests': REQUESTS
})
super().__init__(firefly, package, TITLE, **kwargs)
def update_device_config(self, **kwargs):
# TODO: Pull these out into config values
"""
Updated the devices to the desired config params. This will be useful to make new default devices configs.
Args:
**kwargs ():
"""
# Config Ref:
# https://github.com/OpenZWave/open-zwave/blob/master/config/aeotec/dsb45.xml
wake_up_on_power_on_idx = 2
wake_up_on_power_on = 1
# self.node.set_config_param(wake_up_on_power_on_idx, wake_up_on_power_on)
report_to_send_idx = 121
report_to_send_val = 4113 # OZW Ideal Setting: 0x1011 (Battery, Sensor Binary Report, Alarm)
report_to_send = (report_to_send_idx, report_to_send_val)
successful = self.verify_set_zwave_params([report_to_send])
self._update_try_count += 1
self._config_updated = successful
|
Firefly-Automation/Firefly
|
Firefly/components/zwave/aeotec/dsb45_water_sensor.py
|
Python
|
apache-2.0
| 1,889
|
[
"Firefly"
] |
b56bb50d1f9c6d298c57e59030c3804c700c061581a6ae9fe5b6f07b77214e21
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
from six.moves import map
from six.moves import zip
import math
import itertools
import logging
import time
from monty.json import MSONable
import numpy as np
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.transformations.transformation_abc import AbstractTransformation
from pymatgen.analysis.ewald import EwaldSummation, EwaldMinimizer
"""
This module defines site transformations which transforms a structure into
another structure. Site transformations differ from standard transformations
in that they operate in a site-specific manner.
All transformations should inherit the AbstractTransformation ABC.
"""
__author__ = "Shyue Ping Ong, Will Richards"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.2"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Sep 23, 2011"
class InsertSitesTransformation(AbstractTransformation):
"""
This transformation substitutes certain sites with certain species.
Args:
species: A list of species. e.g., ["Li", "Fe"]
coords: A list of coords corresponding to those species. e.g.,
[[0,0,0],[0.5,0.5,0.5]].
coords_are_cartesian (bool): Set to True if coords are given in
cartesian coords. Defaults to False.
validate_proximity (bool): Set to False if you do not wish to ensure
that added sites are not too close to other sites. Defaults to True.
"""
def __init__(self, species, coords, coords_are_cartesian=False,
validate_proximity=True):
if len(species) != len(coords):
raise ValueError("Species and coords must be the same length!")
self.species = species
self.coords = coords
self.coords_are_cartesian = coords_are_cartesian
self.validate_proximity = validate_proximity
def apply_transformation(self, structure):
s = structure.copy()
for i, sp in enumerate(self.species):
s.insert(i, sp, self.coords[i],
coords_are_cartesian=self.coords_are_cartesian,
validate_proximity=self.validate_proximity)
return s.get_sorted_structure()
def __str__(self):
return "InsertSiteTransformation : " + \
"species {}, coords {}".format(self.species, self.coords)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return False
class ReplaceSiteSpeciesTransformation(AbstractTransformation):
"""
This transformation substitutes certain sites with certain species.
Args:
indices_species_map: A dict containing the species mapping in
int-string pairs. E.g., { 1:"Na"} or {2:"Mn2+"}. Multiple
substitutions can be done. Overloaded to accept sp_and_occu
dictionary. E.g. {1: {"Ge":0.75, "C":0.25} }, which
substitutes a single species with multiple species to generate a
disordered structure.
"""
def __init__(self, indices_species_map):
self.indices_species_map = indices_species_map
def apply_transformation(self, structure):
s = structure.copy()
for i, sp in self.indices_species_map.items():
s[int(i)] = sp
return s
def __str__(self):
return "ReplaceSiteSpeciesTransformation :" + \
", ".join(["{}->{}".format(k, v) + v for k, v in
self.indices_species_map.items()])
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return False
class RemoveSitesTransformation(AbstractTransformation):
"""
Remove certain sites in a structure.
Args:
indices_to_remove: List of indices to remove. E.g., [0, 1, 2]
"""
def __init__(self, indices_to_remove):
self.indices_to_remove = indices_to_remove
def apply_transformation(self, structure):
s = structure.copy()
s.remove_sites(self.indices_to_remove)
return s
def __str__(self):
return "RemoveSitesTransformation :" + ", ".join(
map(str, self.indices_to_remove))
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return False
class TranslateSitesTransformation(AbstractTransformation):
"""
This class translates a set of sites by a certain vector.
Args:
indices_to_move: The indices of the sites to move
translation_vector: Vector to move the sites. If a list of list or numpy
array of shape, (len(indices_to_move), 3), is provided then each
translation vector is applied to the corresponding site in the
indices_to_move.
vector_in_frac_coords: Set to True if the translation vector is in
fractional coordinates, and False if it is in cartesian
coordinations. Defaults to True.
"""
def __init__(self, indices_to_move, translation_vector,
vector_in_frac_coords=True):
self.indices_to_move = indices_to_move
self.translation_vector = np.array(translation_vector)
self.vector_in_frac_coords = vector_in_frac_coords
def apply_transformation(self, structure):
s = structure.copy()
if self.translation_vector.shape == (len(self.indices_to_move), 3):
for i, idx in enumerate(self.indices_to_move):
s.translate_sites(idx, self.translation_vector[i],
self.vector_in_frac_coords)
else:
s.translate_sites(self.indices_to_move, self.translation_vector,
self.vector_in_frac_coords)
return s
def __str__(self):
return "TranslateSitesTransformation for indices " + \
"{}, vect {} and vect_in_frac_coords = {}".format(
self.indices_to_move, self.translation_vector,
self.vector_in_frac_coords)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return TranslateSitesTransformation(
self.indices_to_move, -self.translation_vector,
self.vector_in_frac_coords)
@property
def is_one_to_many(self):
return False
def as_dict(self):
"""
Json-serializable dict representation.
"""
d = MSONable.as_dict(self)
d["translation_vector"] = self.translation_vector.tolist()
return d
class PartialRemoveSitesTransformation(AbstractTransformation):
"""
Remove fraction of specie from a structure.
Requires an oxidation state decorated structure for ewald sum to be
computed.
Args:
indices:
A list of list of indices.
e.g. [[0, 1], [2, 3, 4, 5]]
fractions:
The corresponding fractions to remove. Must be same length as
indices. e.g., [0.5, 0.25]
algo:
This parameter allows you to choose the algorithm to perform
ordering. Use one of PartialRemoveSpecieTransformation.ALGO_*
variables to set the algo.
Given that the solution to selecting the right removals is NP-hard, there
are several algorithms provided with varying degrees of accuracy and speed.
The options are as follows:
ALGO_FAST:
This is a highly optimized algorithm to quickly go through the search
tree. It is guaranteed to find the optimal solution, but will return
only a single lowest energy structure. Typically, you will want to use
this.
ALGO_COMPLETE:
The complete algo ensures that you get all symmetrically distinct
orderings, ranked by the estimated Ewald energy. But this can be an
extremely time-consuming process if the number of possible orderings is
very large. Use this if you really want all possible orderings. If you
want just the lowest energy ordering, ALGO_FAST is accurate and faster.
ALGO_BEST_FIRST:
This algorithm is for ordering the really large cells that defeats even
ALGO_FAST. For example, if you have 48 sites of which you want to
remove 16 of them, the number of possible orderings is around
2 x 10^12. ALGO_BEST_FIRST shortcircuits the entire search tree by
removing the highest energy site first, then followed by the next
highest energy site, and so on. It is guaranteed to find a solution
in a reasonable time, but it is also likely to be highly inaccurate.
ALGO_ENUMERATE:
This algorithm uses the EnumerateStructureTransformation to perform
ordering. This algo returns *complete* orderings up to a single unit
cell size. It is more robust than the ALGO_COMPLETE, but requires
Gus Hart's enumlib to be installed.
"""
ALGO_FAST = 0
ALGO_COMPLETE = 1
ALGO_BEST_FIRST = 2
ALGO_ENUMERATE = 3
def __init__(self, indices, fractions, algo=ALGO_COMPLETE):
self.indices = indices
self.fractions = fractions
self.algo = algo
self.logger = logging.getLogger(self.__class__.__name__)
def best_first_ordering(self, structure, num_remove_dict):
self.logger.debug("Performing best first ordering")
starttime = time.time()
self.logger.debug("Performing initial ewald sum...")
ewaldsum = EwaldSummation(structure)
self.logger.debug("Ewald sum took {} seconds."
.format(time.time() - starttime))
starttime = time.time()
ematrix = ewaldsum.total_energy_matrix
to_delete = []
totalremovals = sum(num_remove_dict.values())
removed = {k: 0 for k in num_remove_dict.keys()}
for i in range(totalremovals):
maxindex = None
maxe = float("-inf")
maxindices = None
for indices in num_remove_dict.keys():
if removed[indices] < num_remove_dict[indices]:
for ind in indices:
if ind not in to_delete:
energy = sum(ematrix[:, ind]) + \
sum(ematrix[:, ind]) - ematrix[ind, ind]
if energy > maxe:
maxindex = ind
maxe = energy
maxindices = indices
removed[maxindices] += 1
to_delete.append(maxindex)
ematrix[:, maxindex] = 0
ematrix[maxindex, :] = 0
s = structure.copy()
s.remove_sites(to_delete)
self.logger.debug("Minimizing Ewald took {} seconds."
.format(time.time() - starttime))
return [{"energy": sum(sum(ematrix)),
"structure": s.get_sorted_structure()}]
def complete_ordering(self, structure, num_remove_dict):
self.logger.debug("Performing complete ordering...")
all_structures = []
symprec = 0.2
s = SpacegroupAnalyzer(structure, symprec=symprec)
self.logger.debug("Symmetry of structure is determined to be {}."
.format(s.get_space_group_symbol()))
sg = s.get_space_group_operations()
tested_sites = []
starttime = time.time()
self.logger.debug("Performing initial ewald sum...")
ewaldsum = EwaldSummation(structure)
self.logger.debug("Ewald sum took {} seconds."
.format(time.time() - starttime))
starttime = time.time()
allcombis = []
for ind, num in num_remove_dict.items():
allcombis.append(itertools.combinations(ind, num))
count = 0
for allindices in itertools.product(*allcombis):
sites_to_remove = []
indices_list = []
for indices in allindices:
sites_to_remove.extend([structure[i] for i in indices])
indices_list.extend(indices)
s_new = structure.copy()
s_new.remove_sites(indices_list)
energy = ewaldsum.compute_partial_energy(indices_list)
already_tested = False
for i, tsites in enumerate(tested_sites):
tenergy = all_structures[i]["energy"]
if abs((energy - tenergy) / len(s_new)) < 1e-5 and \
sg.are_symmetrically_equivalent(sites_to_remove,
tsites,
symm_prec=symprec):
already_tested = True
if not already_tested:
tested_sites.append(sites_to_remove)
all_structures.append({"structure": s_new, "energy": energy})
count += 1
if count % 10 == 0:
timenow = time.time()
self.logger.debug("{} structures, {:.2f} seconds."
.format(count, timenow - starttime))
self.logger.debug("Average time per combi = {} seconds"
.format((timenow - starttime) / count))
self.logger.debug("{} symmetrically distinct structures found."
.format(len(all_structures)))
self.logger.debug("Total symmetrically distinct structures found = {}"
.format(len(all_structures)))
all_structures = sorted(all_structures, key=lambda s: s["energy"])
return all_structures
def fast_ordering(self, structure, num_remove_dict, num_to_return=1):
"""
This method uses the matrix form of ewaldsum to calculate the ewald
sums of the potential structures. This is on the order of 4 orders of
magnitude faster when there are large numbers of permutations to
consider. There are further optimizations possible (doing a smarter
search of permutations for example), but this wont make a difference
until the number of permutations is on the order of 30,000.
"""
self.logger.debug("Performing fast ordering")
starttime = time.time()
self.logger.debug("Performing initial ewald sum...")
ewaldmatrix = EwaldSummation(structure).total_energy_matrix
self.logger.debug("Ewald sum took {} seconds."
.format(time.time() - starttime))
starttime = time.time()
m_list = []
for indices, num in num_remove_dict.items():
m_list.append([0, num, list(indices), None])
self.logger.debug("Calling EwaldMinimizer...")
minimizer = EwaldMinimizer(ewaldmatrix, m_list, num_to_return,
PartialRemoveSitesTransformation.ALGO_FAST)
self.logger.debug("Minimizing Ewald took {} seconds."
.format(time.time() - starttime))
all_structures = []
lowest_energy = minimizer.output_lists[0][0]
num_atoms = sum(structure.composition.values())
for output in minimizer.output_lists:
s = structure.copy()
del_indices = []
for manipulation in output[1]:
if manipulation[1] is None:
del_indices.append(manipulation[0])
else:
s.replace(manipulation[0], manipulation[1])
s.remove_sites(del_indices)
struct = s.get_sorted_structure()
all_structures.append(
{"energy": output[0],
"energy_above_minimum": (output[0] - lowest_energy)
/ num_atoms,
"structure": struct})
return all_structures
def enumerate_ordering(self, structure):
# Generate the disordered structure first.
s = structure.copy()
for indices, fraction in zip(self.indices, self.fractions):
for ind in indices:
new_sp = {sp: occu * fraction
for sp, occu
in structure[ind].species_and_occu.items()}
s[ind] = new_sp
# Perform enumeration
from pymatgen.transformations.advanced_transformations import \
EnumerateStructureTransformation
trans = EnumerateStructureTransformation()
return trans.apply_transformation(s, 10000)
def apply_transformation(self, structure, return_ranked_list=False):
"""
Apply the transformation.
Args:
structure: input structure
return_ranked_list (bool): Whether or not multiple structures are
returned. If return_ranked_list is a number, that number of
structures is returned.
Returns:
Depending on returned_ranked list, either a transformed structure
or a list of dictionaries, where each dictionary is of the form
{"structure" = .... , "other_arguments"}
the key "transformation" is reserved for the transformation that
was actually applied to the structure.
This transformation is parsed by the alchemy classes for generating
a more specific transformation history. Any other information will
be stored in the transformation_parameters dictionary in the
transmuted structure class.
"""
num_remove_dict = {}
total_combis = 0
for indices, frac in zip(self.indices, self.fractions):
num_to_remove = len(indices) * frac
if abs(num_to_remove - int(round(num_to_remove))) > 1e-3:
raise ValueError("Fraction to remove must be consistent with "
"integer amounts in structure.")
else:
num_to_remove = int(round(num_to_remove))
num_remove_dict[tuple(indices)] = num_to_remove
n = len(indices)
total_combis += int(round(math.factorial(n) /
math.factorial(num_to_remove) /
math.factorial(n - num_to_remove)))
self.logger.debug("Total combinations = {}".format(total_combis))
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
num_to_return = max(1, num_to_return)
self.logger.debug("Will return {} best structures."
.format(num_to_return))
if self.algo == PartialRemoveSitesTransformation.ALGO_FAST:
all_structures = self.fast_ordering(structure, num_remove_dict,
num_to_return)
elif self.algo == PartialRemoveSitesTransformation.ALGO_COMPLETE:
all_structures = self.complete_ordering(structure, num_remove_dict)
elif self.algo == PartialRemoveSitesTransformation.ALGO_BEST_FIRST:
all_structures = self.best_first_ordering(structure,
num_remove_dict)
elif self.algo == PartialRemoveSitesTransformation.ALGO_ENUMERATE:
all_structures = self.enumerate_ordering(structure)
else:
raise ValueError("Invalid algo.")
opt_s = all_structures[0]["structure"]
return opt_s if not return_ranked_list \
else all_structures[0:num_to_return]
def __str__(self):
return "PartialRemoveSitesTransformation : Indices and fraction" + \
" to remove = {}, ALGO = {}".format(self.indices, self.algo)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
class AddSitePropertyTransformation(AbstractTransformation):
"""
Simple transformation to add site properties to a given structure
"""
def __init__(self, site_properties):
"""
Args:
site_properties (dict): site properties to be added to a structure
"""
self.site_properties = site_properties
def apply_transformation(self, structure):
"""
apply the transformation
Args:
structure (Structure): structure to add site properties to
"""
new_structure = structure.copy()
for prop in self.site_properties.keys():
new_structure.add_site_property(prop, self.site_properties[prop])
return new_structure
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return False
|
matk86/pymatgen
|
pymatgen/transformations/site_transformations.py
|
Python
|
mit
| 20,996
|
[
"pymatgen"
] |
f08b06b9a4fecbe690181cfda7c102ca0820bbf85e5016010968b27a4b3d381c
|
"""
Test courseware search
"""
import os
import json
from ...pages.common.logout import LogoutPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.lms.courseware_search import CoursewareSearchPage
from ...pages.lms.course_nav import CourseNavPage
from ...fixtures.course import XBlockFixtureDesc
from ..helpers import create_user_partition_json
from xmodule.partitions.partitions import Group
from nose.plugins.attrib import attr
from ..studio.base_studio_test import ContainerBase
from ...pages.studio.auto_auth import AutoAuthPage as StudioAutoAuthPage
@attr('shard_1')
class SplitTestCoursewareSearchTest(ContainerBase):
"""
Test courseware search on Split Test Module.
"""
USERNAME = 'STUDENT_TESTER'
EMAIL = 'student101@example.com'
TEST_INDEX_FILENAME = "test_root/index_file.dat"
def setUp(self, is_staff=True):
"""
Create search page and course content to search
"""
# create test file in which index for this test will live
with open(self.TEST_INDEX_FILENAME, "w+") as index_file:
json.dump({}, index_file)
super(SplitTestCoursewareSearchTest, self).setUp(is_staff=is_staff)
self.staff_user = self.user
self.courseware_search_page = CoursewareSearchPage(self.browser, self.course_id)
self.course_navigation_page = CourseNavPage(self.browser)
self.course_outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self._add_and_configure_split_test()
self._studio_reindex()
def tearDown(self):
super(SplitTestCoursewareSearchTest, self).tearDown()
os.remove(self.TEST_INDEX_FILENAME)
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
LogoutPage(self.browser).visit()
StudioAutoAuthPage(self.browser, username=username, email=email,
course_id=self.course_id, staff=staff).visit()
def _studio_reindex(self):
"""
Reindex course content on studio course page
"""
self._auto_auth(self.staff_user["username"], self.staff_user["email"], True)
self.course_outline.visit()
self.course_outline.start_reindex()
self.course_outline.wait_for_ajax()
def _add_and_configure_split_test(self):
"""
Add a split test and a configuration to a test course fixture
"""
# Create a new group configurations
# pylint: disable=W0212
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
"Name",
"Description.",
[Group("0", "Group A"), Group("1", "Group B")]
),
create_user_partition_json(
456,
"Name 2",
"Description 2.",
[Group("2", "Group C"), Group("3", "Group D")]
),
],
},
})
# Add a split test module to the 'Test Unit' vertical in the course tree
split_test_1 = XBlockFixtureDesc('split_test', 'Test Content Experiment 1', metadata={'user_partition_id': 0})
split_test_1_parent_vertical = self.course_fixture.get_nested_xblocks(category="vertical")[1]
self.course_fixture.create_xblock(split_test_1_parent_vertical.locator, split_test_1)
# Add a split test module to the 'Test 2 Unit' vertical in the course tree
split_test_2 = XBlockFixtureDesc('split_test', 'Test Content Experiment 2', metadata={'user_partition_id': 456})
split_test_2_parent_vertical = self.course_fixture.get_nested_xblocks(category="vertical")[2]
self.course_fixture.create_xblock(split_test_2_parent_vertical.locator, split_test_2)
def populate_course_fixture(self, course_fixture):
"""
Populate the children of the test course fixture.
"""
course_fixture.add_advanced_settings({
u"advanced_modules": {"value": ["split_test"]},
})
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Content Section').add_children(
XBlockFixtureDesc('sequential', 'Content Subsection').add_children(
XBlockFixtureDesc('vertical', 'Content Unit').add_children(
XBlockFixtureDesc('html', 'VISIBLETOALLCONTENT', data='<html>VISIBLETOALLCONTENT</html>')
)
)
),
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
),
XBlockFixtureDesc('chapter', 'X Section').add_children(
XBlockFixtureDesc('sequential', 'X Subsection').add_children(
XBlockFixtureDesc('vertical', 'X Unit')
)
),
)
self.test_1_breadcrumb = "Test Section \xe2\x96\xb8 Test Subsection \xe2\x96\xb8 Test Unit".decode("utf-8")
self.test_2_breadcrumb = "X Section \xe2\x96\xb8 X Subsection \xe2\x96\xb8 X Unit".decode("utf-8")
def test_page_existence(self):
"""
Make sure that the page is accessible.
"""
self._auto_auth(self.USERNAME, self.EMAIL, False)
self.courseware_search_page.visit()
def test_search_for_experiment_content_user_not_assigned(self):
"""
Test user can't search for experiment content if not assigned to a group.
"""
self._auto_auth(self.USERNAME, self.EMAIL, False)
self.courseware_search_page.visit()
self.courseware_search_page.search_for_term("Group")
assert "Sorry, no results were found." in self.courseware_search_page.search_results.html[0]
def test_search_for_experiment_content_user_assigned_to_one_group(self):
"""
Test user can search for experiment content restricted to his group
when assigned to just one experiment group
"""
self._auto_auth(self.USERNAME, self.EMAIL, False)
self.courseware_search_page.visit()
self.course_navigation_page.go_to_section("Test Section", "Test Subsection")
self.courseware_search_page.search_for_term("Group")
assert "1 result" in self.courseware_search_page.search_results.html[0]
assert self.test_1_breadcrumb in self.courseware_search_page.search_results.html[0]
assert self.test_2_breadcrumb not in self.courseware_search_page.search_results.html[0]
|
B-MOOC/edx-platform
|
common/test/acceptance/tests/lms/test_lms_split_test_courseware_search.py
|
Python
|
agpl-3.0
| 6,940
|
[
"VisIt"
] |
e6633c73690146dd9882a12e939eb1ac0b227bc18e54c4a8ad0d4c008868da75
|
"""
-- Policy Network for decision making [more general]
"""
from nmt_uni import *
from layers import _p
import os
import time, datetime
import cPickle as pkl
# hyper params
TINY = 1e-7
PI = numpy.pi
E = numpy.e
A = 0.2
B = 1
class Controller(object):
def __init__(self, trng,
options,
n_in=None, n_out=None,
recurrent=False, id=None):
self.WORK = options['workspace']
self.trng = trng
self.options = options
self.recurrent = recurrent
self.type = options.get('type', 'categorical')
self.n_hidden = 128
self.n_in = n_in
self.n_out = n_out
if self.options.get('layernorm', True):
self.rec = 'lngru'
else:
self.rec = 'gru'
if not n_in:
self.n_in = options['readout_dim']
if not n_out:
if self.type == 'categorical':
self.n_out = 2 # initially it is a WAIT/COMMIT action.
elif self.type == 'gaussian':
self.n_out = 100
else:
raise NotImplementedError
# build the policy network
print 'parameter initialization'
params = OrderedDict()
if not self.recurrent:
print 'building a feedforward controller'
params = get_layer('ff')[0](options, params, prefix='policy_net_in',
nin=self.n_in, nout=self.n_hidden)
else:
print 'building a recurrent controller'
params = get_layer(self.rec)[0](options, params, prefix='policy_net_in',
nin=self.n_in, dim=self.n_hidden)
params = get_layer('ff')[0](options, params, prefix='policy_net_out',
nin=self.n_hidden,
nout=self.n_out if self.type == 'categorical' else self.n_out * 2)
# bias the forget probability
# if self.n_out == 3:
# params[_p('policy_net_out', 'b')][-1] = -2
# for the baseline network.
params_b = OrderedDict()
# using a scalar baseline [**]
# params_b['b0'] = numpy.array(numpy.random.rand() * 0.0, dtype='float32')
# using a MLP as a baseline
params_b = get_layer('ff')[0](options, params_b, prefix='baseline_net_in',
nin=self.n_in, nout=128)
params_b = get_layer('ff')[0](options, params_b, prefix='baseline_net_out',
nin=128, nout=1)
if id is not None:
print 'reload the saved model: {}'.format(id)
params = load_params(self.WORK + '.policy/{}-{}.current.npz'.format(id, self.options['base']), params)
params_b = load_params(self.WORK + '.policy/{}-{}.current.npz'.format(id, self.options['base']), params_b)
else:
id = datetime.datetime.fromtimestamp(time.time()).strftime('%y%m%d-%H%M%S')
print 'start from a new model: {}'.format(id)
self.id = id
self.model = self.WORK + '.policy/{}-{}'.format(id, self.options['base'])
# theano shared params
tparams = init_tparams(params)
tparams_b = init_tparams(params_b)
self.tparams = tparams
self.tparams_b = tparams_b
# build the policy network
self.build_sampler(options=options)
self.build_discriminator(options=options)
print 'policy network'
for p in params:
print p, params[p].shape
def build_batchnorm(self, observation, mask=None):
raise NotImplementedError
def build_sampler(self, options):
# ==================================================================================== #
# Build Action function: samplers
# ==================================================================================== #
observation = tensor.matrix('observation', dtype='float32') # batch_size x readout_dim (seq_steps=1)
prev_hidden = tensor.matrix('p_hidden', dtype='float32')
if not self.recurrent:
hiddens = get_layer('ff')[1](self.tparams, observation,
options, prefix='policy_net_in',
activ='tanh')
else:
hiddens = get_layer(self.rec)[1](self.tparams, observation,
options, prefix='policy_net_in', mask=None,
one_step=True, _init_state=prev_hidden)[0]
act_inps = [observation, prev_hidden]
if self.type == 'categorical':
act_prob = get_layer('ff')[1](self.tparams, hiddens, options,
prefix='policy_net_out',
activ='softmax') # batch_size x n_out
act_prob2 = tensor.clip(act_prob, TINY, 1 - TINY)
# compiling the sampling function for action
# action = self.trng.binomial(size=act_prop.shape, p=act_prop)
action = self.trng.multinomial(pvals=act_prob).argmax(1) # 0, 1, ...
print 'build action sampling function [Discrete]'
self.f_action = theano.function(act_inps, [action, act_prob, hiddens, act_prob2],
on_unused_input='ignore') # action/dist/hiddens
elif self.type == 'gaussian':
_temp = get_layer('ff')[1](self.tparams, hiddens, options,
prefix='policy_net_out',
activ='linear'
) # batch_size x n_out
mean, log_std = _temp[:, :self.n_out], _temp[:, self.n_out:]
mean, log_std = -A * tanh(mean), -B-relu(log_std)
action0 = self.trng.normal(size=mean.shape, dtype='float32')
action = action0 * tensor.exp(log_std) + mean
print 'build action sampling function [Gaussian]'
self.f_action = theano.function(act_inps, [action, mean, log_std, hiddens],
on_unused_input='ignore') # action/dist/hiddens
else:
raise NotImplementedError
def build_discriminator(self, options):
# ==================================================================================== #
# Build Action Discriminator
# ==================================================================================== #
observations = tensor.tensor3('observations', dtype='float32')
mask = tensor.matrix('mask', dtype='float32')
if self.type == 'categorical':
actions = tensor.matrix('actions', dtype='int64')
elif self.type == 'gaussian':
actions = tensor.tensor3('actions', dtype='float32')
else:
raise NotImplementedError
if not self.recurrent:
hiddens = get_layer('ff')[1](self.tparams, observations,
options, prefix='policy_net_in',
activ='tanh')
else:
hiddens = get_layer(self.rec)[1](self.tparams, observations,
options, prefix='policy_net_in', mask=mask)[0]
act_inputs = [observations, mask]
if self.type == 'categorical':
act_probs = get_layer('ff')[1](self.tparams, hiddens, options, prefix='policy_net_out',
activ='softmax') # seq_steps x batch_size x n_out
act_probs = tensor.clip(act_probs, TINY, 1 - TINY)
print 'build action distribiution'
self.f_probs = theano.function(act_inputs, act_probs,
on_unused_input='ignore') # get the action probabilities
elif self.type == 'gaussian':
_temps = get_layer('ff')[1](self.tparams, hiddens, options,
prefix='policy_net_out',
activ='linear'
) # batch_size x n_out
means, log_stds = _temps[:, :, :self.n_out], _temps[:, :, self.n_out:]
means, log_stds = -A * tanh(means), -B-relu(log_stds)
act_probs = [means, log_stds]
print 'build Gaussian PDF'
self.f_pdf = theano.function(act_inputs, [means, log_stds],
on_unused_input='ignore') # get the action probabilities
else:
raise NotImplementedError
# ==================================================================================== #
# Build Baseline Network (Input-dependent Value Function) & Advantages
# ==================================================================================== #
print 'setup the advantages & baseline network'
reward = tensor.matrix('reward') # seq_steps x batch_size :: rewards for each steps
# baseline is estimated with a 2-layer neural network.
hiddens_b = get_layer('ff')[1](self.tparams_b, observations, options,
prefix='baseline_net_in',
activ='tanh')
baseline = get_layer('ff')[1](self.tparams_b, hiddens_b, options,
prefix='baseline_net_out',
activ='linear')[:, :, 0] # seq_steps x batch_size or batch_size
advantages = self.build_advantages(act_inputs, reward, baseline, normalize=True)
# ==================================================================================== #
# Build Policy Gradient (here we provide two options)
# ==================================================================================== #
if self.options['updater'] == 'REINFORCE':
print 'build RENIFROCE.'
self.build_reinforce(act_inputs, act_probs, actions, advantages)
elif self.options['updater'] == 'TRPO':
print 'build TRPO'
self.build_trpo(act_inputs, act_probs, actions, advantages)
else:
raise NotImplementedError
# ==================================================================================== #
# Controller Actions
# ==================================================================================== #
def random(self, states, p=0.5):
live_k = states.shape[0]
return (numpy.random.random(live_k) > p).astype('int64'), \
numpy.ones(live_k) * p
def action(self, states, prevhidden):
return self.f_action(states, prevhidden)
def init_hidden(self, n_samples=1):
return numpy.zeros((n_samples, self.n_hidden), dtype='float32')
def init_action(self, n_samples=1):
states0 = numpy.zeros((n_samples, self.n_in), dtype='float32')
return self.f_action(states0, self.init_hidden(n_samples))
def get_learner(self):
if self.options['updater'] == 'REINFORCE':
return self.run_reinforce
elif self.options['updater'] == 'TRPO':
return self.run_trpo
else:
raise NotImplementedError
@staticmethod
def kl(prob0, prob1):
p1 = (prob0 + TINY) / (prob1 + TINY)
# p2 = (1 - prob0 + TINY) / (1 - prob1 + TINY)
return tensor.sum(prob0 * tensor.log(p1), axis=-1)
@staticmethod
def _grab_prob(probs, X):
assert probs.ndim == 3
batch_size = probs.shape[1]
max_len = probs.shape[0]
vocab_size = probs.shape[2]
probs = probs.reshape((batch_size * max_len, vocab_size))
return probs[tensor.arange(batch_size * max_len), X.flatten(1)].reshape(X.shape) # advanced indexing
def cross(self, probs, actions):
# return tensor.log(probs) * actions + tensor.log(1 - probs) * (1 - actions)
return self._grab_prob(tensor.log(probs), actions)
def build_advantages(self, act_inputs, reward, baseline, normalize=True):
# TODO: maybe we need a discount factor gamma for advantages.
# TODO: we can also rewrite advantages with value functions (GAE)
# Advantages and Normalization the return
reward_adv = reward - baseline
mask = act_inputs[1]
if normalize:
reward_mean = tensor.sum(mask * reward_adv) / tensor.sum(mask)
reward_mean2 = tensor.sum(mask * (reward_adv ** 2)) / tensor.sum(mask)
reward_std = tensor.sqrt(tensor.maximum(reward_mean2 - reward_mean ** 2, TINY)) + TINY
# reward_std = tensor.maximum(reward_std, 1)
reward_c = reward_adv - reward_mean # independent mean
advantages = reward_c / reward_std
else:
advantages = reward_adv
print 'build advantages and baseline gradient'
L = tensor.sum(mask * (reward_adv ** 2)) / tensor.sum(mask)
dL = tensor.grad(L, wrt=itemlist(self.tparams_b))
lr = tensor.scalar(name='lr')
inps_b = act_inputs + [reward]
oups_b = [L, advantages]
f_adv, f_update_b = adam(lr, self.tparams_b, dL, inps_b, oups_b)
self.f_adv = f_adv
self.f_update_b = f_update_b
return advantages
# ===================================================================
# Policy Grident: REINFORCE with Adam
# ===================================================================
def build_reinforce(self, act_inputs, act_probs, actions, advantages):
mask = act_inputs[1]
if self.type == 'categorical':
negEntropy = tensor.sum(tensor.log(act_probs) * act_probs, axis=-1)
logLikelihood = self.cross(act_probs, actions)
elif self.type == 'gaussian':
means, log_stds = act_probs
negEntropy = -tensor.sum(log_stds + tensor.log(tensor.sqrt(2 * PI * E)), axis=-1)
actions0 = (actions - means) / tensor.exp(log_stds)
logLikelihood = -tensor.sum(log_stds, axis=-1) - \
0.5 * tensor.sum(tensor.sqr(actions0), axis=-1) - \
0.5 * means.shape[-1] * tensor.log(2 * PI)
else:
raise NotImplementedError
# tensor.log(act_probs) * actions + tensor.log(1 - act_probs) * (1 - actions)
H = tensor.sum(mask * negEntropy, axis=0).mean() * 0.001 # penalty
J = tensor.sum(mask * -logLikelihood * advantages, axis=0).mean() + H
dJ = grad_clip(tensor.grad(J, wrt=itemlist(self.tparams)))
print 'build REINFORCE optimizer'
lr = tensor.scalar(name='lr')
inps = act_inputs + [actions, advantages]
outps = [J, H]
if self.type == 'gaussian':
outps += [actions0.mean(), actions.mean()]
f_cost, f_update = adam(lr, self.tparams, dJ, inps, outps)
self.f_cost = f_cost
self.f_update = f_update
print 'done'
def run_reinforce(self, act_inputs, actions, reward, update=True, lr=0.0002):
# sub baseline
inps_adv = act_inputs + [reward]
L, advantages = self.f_adv(*inps_adv)
inps_reinfoce = act_inputs + [actions, advantages]
if self.type == 'gaussian':
J, H, m, s = self.f_cost(*inps_reinfoce)
info = {'J': J, 'G_norm': H, 'B_loss': L, 'Adv': advantages.mean(), 'm': m, 's': s}
else:
J, H = self.f_cost(*inps_reinfoce)
info = {'J': J, 'Entropy': H, 'B_loss': L, 'Adv': advantages.mean()}
info['advantages'] = advantages
if update: # update the parameters
self.f_update_b(lr)
self.f_update(lr)
return info
# ==================================================================================== #
# Trust Region Policy Optimization
# ==================================================================================== #
def build_trpo(self, act_inputs, act_probs, actions, advantages):
assert self.type == 'categorical', 'in this stage not support TRPO'
# probability distribution
mask = act_inputs[1]
probs = act_probs
probs_old = tensor.matrix(dtype='float32')
logp = self.cross(probs, actions)
logp_old = self.cross(probs_old, actions)
# policy gradient
J = tensor.sum(mask * -tensor.exp(logp - logp_old) * advantages, axis=0).mean()
dJ = flatgrad(J, self.tparams)
probs_fix = theano.gradient.disconnected_grad(probs)
kl_fix = tensor.sum(mask * self.kl(probs_fix, probs), axis=0).mean()
kl_grads = tensor.grad(kl_fix, wrt=itemlist(self.tparams))
ftangents = tensor.fvector(name='flat_tan')
shapes = [self.tparams[var].get_value(borrow=True).shape for var in self.tparams]
start = 0
tangents = []
for shape in shapes:
size = numpy.prod(shape)
tangents.append(tensor.reshape(ftangents[start:start + size], shape))
start += size
gvp = tensor.add(*[tensor.sum(g * t) for (g, t) in zipsame(kl_grads, tangents)])
# Fisher-vectror product
fvp = flatgrad(gvp, self.tparams)
entropy = tensor.sum(mask * -self.cross(probs, probs), axis=0).mean()
kl = tensor.sum(mask * self.kl(probs_old, probs), axis=0).mean()
print 'compile the functions'
inps = act_inputs + [actions, advantages, probs_old]
loss = [J, kl, entropy]
self.f_pg = theano.function(inps, dJ)
self.f_loss = theano.function(inps, loss)
self.f_fisher = theano.function([ftangents] + inps, fvp, on_unused_input='ignore')
# get/set flatten params
print 'compling flat updater'
self.get_flat = theano.function([], tensor.concatenate([self.tparams[v].flatten() for v in self.tparams]))
theta = tensor.vector()
start = 0
updates = []
for v in self.tparams:
p = self.tparams[v]
shape = p.shape
size = tensor.prod(shape)
updates.append((p, theta[start:start + size].reshape(shape)))
start += size
self.set_flat = theano.function([theta], [], updates=updates)
def run_trpo(self, act_inputs, actions, reward,
update=True, cg_damping=1e-3, max_kl=1e-2, lr=0.0002):
# sub baseline
inps_adv = act_inputs + [reward]
L, advantages = self.f_adv(*inps_adv)
self.f_update_b(lr)
# get current action distributions
probs = self.f_probs(*act_inputs)
inps = act_inputs + [actions, advantages, probs]
thprev = self.get_flat()
def fisher_vector_product(p):
return self.f_fisher(p, *inps) + cg_damping * p
g = self.f_pg(*inps)
losses_before = self.f_loss(*inps)
if numpy.allclose(g, 0):
print 'zero gradient, not updating'
else:
stepdir = self.cg(fisher_vector_product, -g)
shs = .5 * stepdir.dot(fisher_vector_product(stepdir))
lm = numpy.sqrt(shs / max_kl)
print "\nlagrange multiplier:", lm, "gnorm:", numpy.linalg.norm(g)
fullstep = stepdir / lm
neggdotstepdir = -g.dot(stepdir)
def loss(th):
self.set_flat(th)
return self.f_loss(*inps)[0]
print 'do line search'
success, theta = self.linesearch(loss, thprev, fullstep, neggdotstepdir / lm)
print "success", success
self.set_flat(theta)
losses_after = self.f_loss(*inps)
info = OrderedDict()
for (lname, lbefore, lafter) in zipsame(['J', 'KL', 'entropy'], losses_before, losses_after):
info[lname + "_before"] = lbefore
info[lname + "_after"] = lafter
# add the baseline loss into full information
info['B_loss'] = L
return info
@staticmethod
def linesearch(f, x, fullstep, expected_improve_rate, max_backtracks=10, accept_ratio=.1):
"""
Backtracking linesearch, where expected_improve_rate is the slope dy/dx at the initial point
"""
fval = f(x)
print "fval before", fval
for (_n_backtracks, stepfrac) in enumerate(.5 ** numpy.arange(max_backtracks)):
xnew = x + stepfrac * fullstep
newfval = f(xnew)
actual_improve = fval - newfval
expected_improve = expected_improve_rate * stepfrac
ratio = actual_improve / expected_improve
print "a/e/r", actual_improve, expected_improve, ratio
if ratio > accept_ratio and actual_improve > 0:
print "fval after", newfval
return True, xnew
return False, x
@staticmethod
def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10):
"""
Conjuctate Gradient
"""
p = b.copy()
r = b.copy()
x = numpy.zeros_like(b)
rdotr = r.dot(r)
fmtstr = "%10i %10.3g %10.3g"
titlestr = "%10s %10s %10s"
if verbose: print titlestr % ("iter", "residual norm", "soln norm")
for i in xrange(cg_iters):
if callback is not None:
callback(x)
if verbose: print fmtstr % (i, rdotr, numpy.linalg.norm(x))
z = f_Ax(p)
v = rdotr / p.dot(z)
x += v * p
r -= v * z
newrdotr = r.dot(r)
mu = newrdotr / rdotr
p = r + mu * p
rdotr = newrdotr
if rdotr < residual_tol:
break
if callback is not None:
callback(x)
if verbose: print fmtstr % (i + 1, rdotr, numpy.linalg.norm(x))
return x
# ====================================================================== #
# Save & Load
# ====================================================================== #
def save(self, history, it):
_params = OrderedDict()
_params = unzip(self.tparams, _params)
_params = unzip(self.tparams_b, _params)
print 'save the policy network >> {}'.format(self.model)
numpy.savez('%s.current' % (self.model),
history=history,
it=it,
**_params)
numpy.savez('{}.iter={}'.format(self.model, it),
history=history,
it=it,
**_params)
def load(self):
if os.path.exists(self.model):
print 'loading from the existing model (current)'
rmodel = numpy.load(self.model)
history = rmodel['history']
it = rmodel['it']
self.params = load_params(rmodel, self.params)
self.params_b = load_params(rmodel, self.params_b)
self.tparams = init_tparams(self.params)
self.tparams_b = init_tparams(self.params_b)
print 'the dataset need to go over {} lines'.format(it)
return history, it
else:
return [], -1
|
nyu-dl/dl4mt-simul-trans
|
policy.py
|
Python
|
bsd-3-clause
| 23,644
|
[
"Gaussian"
] |
41c543f78fd49e54729dea809183d7436040842db2b3871af9dbfa26461ab896
|
# -*- coding: utf-8 -*-
#
#Created on Fri Apr 14 13:37:08 2017
#
#author: Elina Thibeau-Sutre
#
from .base import BaseMixture
from .base import _log_normal_matrix
from .kmeans import dist_matrix
from megamix.batch.initializations import initialization_plus_plus
from megamix.batch.initializations import initialization_k_means
import numpy as np
from scipy.misc import logsumexp
import scipy
class VariationalGaussianMixture(BaseMixture):
"""
Variational Bayesian Estimation of a Gaussian Mixture
This class allows to infer an approximate posterior distribution over the
parameters of a Gaussian mixture distribution.
The weights distribution is a Dirichlet distribution with parameter alpha
(see Bishop's book p474-486)
Parameters
----------
n_components : int, defaults to 1.
Number of clusters used.
init : str, defaults to 'kmeans'.
Method used in order to perform the initialization,
must be in ['random', 'plus', 'AF_KMC', 'kmeans', 'GMM'].
reg_covar : float, defaults to 1e-6
In order to avoid null covariances this float is added to the diagonal
of covariance matrices.
type_init : str, defaults to 'resp'.
The algorithm is initialized using this data (responsibilities if 'resp'
or means, covariances and weights if 'mcw').
Other parameters
----------------
alpha_0 : float, Optional | defaults to None.
The prior parameter on the weight distribution (Dirichlet).
A high value of alpha_0 will lead to equal weights, while a low value
will allow some clusters to shrink and disappear. Must be greater than 0.
If None, the value is set to 1/n_components
beta_0 : float, Optional | defaults to None.
The precision prior on the mean distribution (Gaussian).
Must be greater than 0.
If None, the value is set to 1.0
nu_0 : float, Optional | defaults to None.
The prior of the number of degrees of freedom on the covariance
distributions (Wishart). Must be greater or equal to dim.
If None, the value is set to dim
means_prior : array (dim,), Optional | defaults to None
The prior value to compute the value of the means.
If None, the value is set to the mean of points_data
cov_wishart_prior : type depends on covariance_type, Optional | defaults to None
If covariance_type is 'full' type must be array (dim,dim)
If covariance_type is 'spherical' type must be float
The prior value to compute the value of the precisions.
If None, the value is set to the covariance of points_data
Attributes
----------
name : str
The name of the method : 'VBGMM'
alpha : array of floats (n_components,)
Contains the parameters governing the weight distribution (Dirichlet)
beta : array of floats (n_components,)
Contains coefficients which are multipied with the precision matrices
to form the precision matrix on the Gaussian distribution of the means.
nu : array of floats (n_components,)
Contains the number of degrees of freedom on the distribution of
covariance matrices.
_inv_prec : array of floats (n_components,dim,dim)
Contains the equivalent of the matrix W described in Bishop's book. It
is proportional to cov.
_log_det_inv_prec : array of floats (n_components,)
Contains the logarithm of the determinant of W matrices.
cov : array of floats (n_components,dim,dim)
Contains the computed covariance matrices of the mixture.
means : array of floats (n_components,dim)
Contains the computed means of the mixture.
log_weights : array of floats (n_components,)
Contains the logarithm of weights of each cluster.
iter : int
The number of iterations computed with the method fit()
convergence_criterion_data : array of floats (iter,)
Stores the value of the convergence criterion computed with data
on which the model is fitted.
convergence_criterion_test : array of floats (iter,) | if _early_stopping only
Stores the value of the convergence criterion computed with test data
if it exists.
_is_initialized : bool
Ensures that the method _initialize() has been used before using other
methods such as score() or predict_log_assignements().
Raises
------
ValueError : if the parameters are inconsistent, for example if the cluster number is negative, init_type is not in ['resp','mcw']...
References
----------
'Pattern Recognition and Machine Learning', Bishop
"""
def __init__(self, n_components=1,alpha_0=None,beta_0=None,
nu_0=None,means_prior=None,cov_wishart_prior=None,
reg_covar=1e-6,kappa=1.0,n_jobs=1,window=1):
super(VariationalGaussianMixture, self).__init__()
self.name = 'VBGMM'
self.n_components = n_components
self.covariance_type = "full"
self.reg_covar = reg_covar
self.init = 'usual'
self.alpha_0 = alpha_0
self.beta_0 = beta_0
self.nu_0 = nu_0
self._means_prior = means_prior
self._inv_prec_prior = cov_wishart_prior
self.kappa = kappa
self.window = window
self.n_jobs = n_jobs
self._is_initialized = False
self.iter = 0
self.convergence_criterion_data = []
self.convergence_criterion_test = []
self._check_common_parameters()
def _initialize_cov(self,points):
n_points,dim = points.shape
assignements = np.zeros((n_points,self.n_components))
M = dist_matrix(points,self.means)
for i in range(n_points):
index_min = np.argmin(M[i]) #the cluster number of the ith point is index_min
if (isinstance(index_min,np.int64)):
assignements[i][index_min] = 1
else: #Happens when two points are equally distant from a cluster mean
assignements[i][index_min[0]] = 1
S = np.zeros((self.n_components,dim,dim))
for i in range(self.n_components):
# diff = point.reshape(dim) - X[i]
diff = points - self.means[i]
diff_weighted = diff * assignements[:,i:i+1]
S[i] = np.dot(diff_weighted.T,diff)
S[i].flat[::dim+1] += self.reg_covar
S /= n_points
self.cov = S * self.n_components
def _initialize_weights(self,points):
n_points,_ = points.shape
log_prob = _log_normal_matrix(points,self.means,self.cov_chol,
self.covariance_type,self.n_jobs)
log_prob_norm = logsumexp(log_prob, axis=1)
log_resp = log_prob - log_prob_norm[:,np.newaxis]
self.log_weights = logsumexp(log_resp,axis=0) + np.log(n_points)
def initialize(self,points,init_choice='plus',n_init=1):
"""
This method initializes the Gaussian Mixture by setting the values of
the means, covariances and weights.
Parameters
----------
points_data : an array (n_points,dim)
Data on which the model is fitted.
points_test: an array (n_points,dim) | Optional
Data used to do early stopping (avoid overfitting)
"""
n_points,dim = points.shape
self._check_prior_parameters(points)
if self.init == 'usual':
dist_min = np.inf
for i in range(n_init):
if init_choice == 'plus':
means,dist = initialization_plus_plus(self.n_components,points,info=True)
elif init_choice == 'kmeans':
means,_,dist = initialization_k_means(self.n_components,points,info=True)
if dist < dist_min:
dist_min = dist
self.means = means
self._initialize_cov(points)
# Computation of self.cov_chol
self.cov_chol = np.empty(self.cov.shape)
for i in range(self.n_components):
self.cov_chol[i],inf = scipy.linalg.lapack.dpotrf(self.cov[i],lower=True)
if not self.init == 'read_and_init':
self._initialize_weights(points)
self.iter = n_points + 1
# Hyperparameters
weights = np.exp(self.log_weights)
self.alpha = self.alpha_0 + weights
self.beta = self.beta_0 + weights
self.nu = self.nu_0 + weights
# Sufficient statistics
self.N = weights/n_points
self.X = self.means * self.N[:,np.newaxis]
self.S = self.cov * self.N[:,np.newaxis,np.newaxis]
# Matrix W
self._inv_prec = self.cov * self.nu[:,np.newaxis,np.newaxis]
self.iter = n_points + 1
self._is_initialized = True
return self.n_components
def _step_E(self, points):
"""
In this step the algorithm evaluates the responsibilities of each points in each cluster
Parameters
----------
points : an array (n_points,dim)
Returns
-------
log_resp: an array (n_points,n_components)
an array containing the logarithm of the responsibilities.
log_prob_norm : an array (n_points,)
logarithm of the probability of each sample in points
"""
n_points,dim = points.shape
log_gaussian = _log_normal_matrix(points,self.means,self.cov_chol,self.covariance_type,self.n_jobs)
digamma_sum = np.sum(scipy.special.psi(.5 * (self.nu - np.arange(0, dim)[:,np.newaxis])),0)
log_lambda = digamma_sum + dim * np.log(2) + dim/self.beta
log_prob = self.log_weights + log_gaussian + 0.5 * (log_lambda - dim * np.log(self.nu))
log_prob_norm = logsumexp(log_prob, axis=1)
log_resp = log_prob - log_prob_norm[:,np.newaxis]
return log_prob_norm,log_resp
def _estimate_wishart_full(self,N,X,S):
"""
This method computes the new value of _inv_prec with given parameteres
(in the case of full covariances)
Parameters
----------
N : an array (n_components,)
the empirical weights
X_barre: an array (n_components,dim)
the empirical means
S: an array (n_components,dim,dim)
the empirical covariances
"""
for i in range(self.n_components):
diff = X[i] - self._means_prior
product = self.beta_0 * N[i]/self.beta[i] * np.outer(diff,diff)
self._inv_prec[i] = self._inv_prec_prior + N[i] * S[i] + product
def _estimate_wishart_spherical(self,N,X,S):
"""
This method computes the new value of _inv_prec with given parameteres
(in the case of spherical covariances)
Parameters
----------
N : an array (n_components,)
the empirical weights
X_barre: an array (n_components,dim)
the empirical means
S: an array (n_components,dim,dim)
the empirical covariances
"""
for i in range(self.n_components):
diff = X[i] - self._means_prior
product = self.beta_0 * N[i] / self.beta[i] * np.mean(np.square(diff), 1)
self._inv_prec[i] = self._inv_prec_prior + N[i] * S[i] + product
# To test
def _step_M(self):
"""
In this step the algorithm updates the values of the parameters (means, covariances,
alpha, beta, nu).
"""
_,dim = self.X.shape
weights = self.N * self.iter
#Parameters update
self.alpha = self.alpha_0 + weights
self.beta = self.beta_0 + weights
self.nu = self.nu_0 + weights
# Weights update
self.log_weights = scipy.special.psi(self.alpha) - scipy.special.psi(np.sum(self.alpha))
# Means update
means = self.X / self.N[:,np.newaxis]
self.means = (self.beta_0 * self._means_prior + weights[:,np.newaxis] * means) / self.beta[:, np.newaxis]
# Covariance update
cov = self.S / self.N[:,np.newaxis,np.newaxis]
if self.covariance_type=="full":
self._estimate_wishart_full(weights,means,cov)
self.cov = self._inv_prec / self.nu[:,np.newaxis,np.newaxis]
for i in range(self.n_components):
self.cov_chol[i],err = scipy.linalg.lapack.dpotrf(self.cov[i],lower=True)
if err:
raise ValueError('Error while computing the cholesky factorization')
elif self.covariance_type=="spherical":
self._estimate_wishart_spherical(weights,means,cov)
self.cov = self._inv_prec / self.nu
self.cov_chol = np.sqrt(self.cov)
def _sufficient_statistics(self,points,log_resp):
n_points,dim = points.shape
resp = np.exp(log_resp)
# New sufficient statistics
N = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps
N /= n_points
X = np.dot(resp.T,points)
X /= n_points
S = np.zeros((self.n_components,dim,dim))
for i in range(self.n_components):
# diff = point.reshape(dim) - X[i]
diff = points - self.means[i]
diff_weighted = diff * resp[:,i:i+1]
S[i] = np.dot(diff_weighted.T,diff)
S[i].flat[::dim+1] += self.reg_covar
S /= n_points
# Sufficient statistics update
gamma = 1/((self.iter + n_points//2)**self.kappa)
self.N = (1-gamma)*self.N + gamma*N
self.X = (1-gamma)*self.X + gamma*X
self.S = (1-gamma)*self.S + gamma*S
# def _convergence_criterion_simplified(self,points,log_resp,log_prob_norm):
# """
# Compute the lower bound of the likelihood using the simplified Bishop's
# book formula. Can only be used with data which fits the model.
#
#
# Parameters
# ----------
# points : an array (n_points,dim)
#
# log_resp: an array (n_points,n_components)
# an array containing the logarithm of the responsibilities.
#
# log_prob_norm : an array (n_points,)
# logarithm of the probability of each sample in points
#
# Returns
# -------
# result : float
# the lower bound of the likelihood
#
# """
#
# resp = np.exp(log_resp)
# n_points,dim = points.shape
#
# prec = np.linalg.inv(self._inv_prec)
# prec_prior = np.linalg.inv(self._inv_prec_prior)
#
# lower_bound = np.zeros(self.n_components)
#
# for i in range(self.n_components):
#
# lower_bound[i] = _log_B(prec_prior,self.nu_0) - _log_B(prec[i],self.nu[i])
#
# resp_i = resp[:,i:i+1]
# log_resp_i = log_resp[:,i:i+1]
#
# lower_bound[i] -= np.sum(resp_i*log_resp_i)
# lower_bound[i] += dim*0.5*(np.log(self.beta_0) - np.log(self.beta[i]))
#
# result = np.sum(lower_bound)
# result += _log_C(self.alpha_0 * np.ones(self.n_components)) - _log_C(self.alpha)
# result -= n_points * dim * 0.5 * np.log(2*np.pi)
#
# return result
#
# def _convergence_criterion(self,points,log_resp,log_prob_norm):
# """
# Compute the lower bound of the likelihood using the Bishop's book formula.
# The formula cannot be simplified (as it is done in scikit-learn) as we also
# use it to calculate the lower bound of test points, in this case no
# simplification can be done.
#
#
# Parameters
# ----------
# points : an array (n_points,dim)
#
# log_resp: an array (n_points,n_components)
# an array containing the logarithm of the responsibilities.
#
# log_prob_norm : an array (n_points,)
# logarithm of the probability of each sample in points
#
# Returns
# -------
# result : float
# the lower bound of the likelihood
#
# """
#
# resp = np.exp(log_resp)
# n_points,dim = points.shape
#
# # Convenient statistics
# N = np.exp(logsumexp(log_resp,axis=0)) + 10*np.finfo(resp.dtype).eps #Array (n_components,)
# X_barre = np.tile(1/N, (dim,1)).T * np.dot(resp.T,points) #Array (n_components,dim)
# S = _full_covariance_matrices(points,X_barre,N,resp,self.reg_covar,self.n_jobs)
#
# prec = np.linalg.inv(self._inv_prec)
# prec_prior = np.linalg.inv(self._inv_prec_prior)
#
# lower_bound = np.zeros(self.n_components)
#
# for i in range(self.n_components):
#
# digamma_sum = np.sum(scipy.special.psi(.5 * (self.nu[i] - np.arange(0, dim)[:,np.newaxis])),0)
# log_det_prec_i = digamma_sum + dim * np.log(2) - self._log_det_inv_prec[i] #/!\ Inverse
#
# #First line
# lower_bound[i] = log_det_prec_i - dim/self.beta[i] - self.nu[i]*np.trace(np.dot(S[i],prec[i]))
# diff = X_barre[i] - self.means[i]
# lower_bound[i] += -self.nu[i]*np.dot(diff,np.dot(prec[i],diff.T))
# lower_bound[i] *= 0.5 * N[i]
#
# #Second line
# lower_bound[i] += (self.alpha_0 - self.alpha[i]) * self.log_weights[i]
# lower_bound[i] += _log_B(prec_prior,self.nu_0) - _log_B(prec[i],self.nu[i])
#
# resp_i = resp[:,i:i+1]
# log_resp_i = log_resp[:,i:i+1]
#
# lower_bound[i] += np.sum(resp_i) * self.log_weights[i] - np.sum(resp_i*log_resp_i)
# lower_bound[i] += 0.5 * (self.nu_0 - self.nu[i]) * log_det_prec_i
# lower_bound[i] += dim*0.5*(np.log(self.beta_0) - np.log(self.beta[i]))
# lower_bound[i] += dim*0.5*(1 - self.beta_0/self.beta[i] + self.nu[i])
#
# #Third line without the last term which is not summed
# diff = self.means[i] - self._means_prior
# lower_bound[i] += -0.5*self.beta_0*self.nu[i]*np.dot(diff,np.dot(prec[i],diff.T))
# lower_bound[i] += -0.5*self.nu[i]*np.trace(np.dot(self._inv_prec_prior,prec[i]))
#
# result = np.sum(lower_bound)
# result += _log_C(self.alpha_0 * np.ones(self.n_components))- _log_C(self.alpha)
# result -= n_points * dim * 0.5 * np.log(2*np.pi)
#
# return result
def _get_parameters(self):
return (self.log_weights, self.means, self.cov,
self.alpha, self.beta, self.nu)
def _set_parameters(self, params,verbose=True):
(self.log_weights, self.means, self.cov,
self.alpha, self.beta, self.nu )= params
# Matrix W
self._inv_prec = self.cov * self.nu[:,np.newaxis,np.newaxis]
self._log_det_inv_prec = np.log(np.linalg.det(self._inv_prec))
if self.n_components != len(self.means) and verbose:
print('The number of components changed')
self.n_components = len(self.means)
def _limiting_model(self,points):
n_points,dim = points.shape
log_resp = self.predict_log_resp(points)
_,n_components = log_resp.shape
exist = np.zeros(n_components)
for i in range(n_points):
for j in range(n_components):
if np.argmax(log_resp[i])==j:
exist[j] = 1
idx_existing = np.where(exist==1)
log_weights = self.log_weights[idx_existing]
means = self.means[idx_existing]
cov = self.cov[idx_existing]
alpha = self.alpha[idx_existing]
beta = self.beta[idx_existing]
nu = self.nu[idx_existing]
params = (log_weights, means, cov,
alpha, beta, nu)
return params
|
14thibea/megamix
|
megamix/online/VBGMM.py
|
Python
|
apache-2.0
| 20,937
|
[
"Gaussian"
] |
e876076f8c38f6d72ee02b29db79632790f2f59db680aa25e660dba33f1052f5
|
from sklearn.preprocessing import *
import pandas as pd
from sys import argv
file_path = argv[1]
verbose = argv[2] == "true"
scaler = argv[3]
#####################################################################################
# Scaler Options
# (https://scikit-learn.org/stable/auto_examples/preprocessing/plot_all_scaling.html)
# standard = StandardScaler
# minmax = MinMaxScaler
# maxabs = MaxAbsScaler
# power = PowerTransformer
# quantnorm = QuantileTransformer w/ normal output
# quantunif = QuantileTransformer w/ uniform output
# normalizer = Normalizer
# robust = RobustScaler
#####################################################################################
def scale_data(file_path, scaler, verbose):
scaler = scaler.lower()
data = pd.read_csv(file_path, sep='\t', index_col=0)
index = data.index
# Ignore columns that don't contain numerical values
non_num_columns = []
dtypes = data.columns.to_series().groupby(data.dtypes).groups
dtypes = {k.name: list(v) for k, v in dtypes.items()}
for dtype in dtypes.keys():
if 'float' not in dtype and 'int' not in dtype:
non_num_columns += dtypes[dtype]
if verbose and len(non_num_columns) > 0:
print('The following columns do not contain numerical values and will not be scaled:')
print('\t', ', '.join(sorted(list(set(non_num_columns)))))
# Add the class column, which should never be scaled
if "Class" in data:
non_num_columns.append('Class')
# # Ignore columns that contain numbers, but where fewer than half are distinct values
# categorical_columns = []
# num_unique = data.columns.to_series().groupby(data.nunique()).groups
# num_unique = {k: list(v) for k, v in num_unique.items()}
#
# for num in num_unique.keys():
# if num < len(index) * 0.5:
# categorical_columns += num_unique[num]
# if verbose and len(categorical_columns) > 0:
# print('In the following columns, fewer than half are distinct values, so they will not be scaled:')
# print('\t', ', '.join(categorical_columns))
#
# excluded_columns = list(set(non_num_columns + categorical_columns))
excluded_data = data[non_num_columns]
scaled_data = data.drop(non_num_columns, axis=1)
scaled_columns = scaled_data.columns
if len(scaled_columns) > 0:
if scaler == 'standard':
if verbose:
print("Applying the standard scaler (mean of zero, unit variance)")
transformer = StandardScaler()
elif scaler == 'minmax':
if verbose:
print("Applying the min-max scaler")
transformer = MinMaxScaler()
elif scaler == 'maxabs':
if verbose:
print("Applying the max absolute scaler")
transformer = MaxAbsScaler()
elif scaler == 'power':
if verbose:
print("Applying the power transformer for scaling")
transformer = PowerTransformer()
elif scaler == 'quantnorm':
if verbose:
print('Quantile scaling to the normal (Gaussian) distribution')
transformer = QuantileTransformer(output_distribution='normal')
elif scaler == 'quantunif':
if verbose:
print('Quantile scaling to the uniform distribution')
transformer = QuantileTransformer(output_distribution='uniform')
elif scaler == 'normalizer':
if verbose:
print('Scaling using the normalizer approach')
transformer = Normalizer()
else:
if verbose:
print('Scaling using the robust approach')
transformer = RobustScaler()
scaled_data = transformer.fit_transform(scaled_data)
scaled_data = pd.DataFrame(scaled_data, index=index, columns=scaled_columns)
data = pd.concat([scaled_data, excluded_data], axis=1, join='inner')
if verbose:
print('Saving scaled version of data to {}'.format(file_path))
data.to_csv(file_path, index_label='', sep='\t', compression="gzip")
else:
if verbose:
print('No columns to be scaled, no action was performed')
scale_data(file_path, scaler, verbose)
|
srp33/ShinyLearner
|
scripts/Scale.py
|
Python
|
mit
| 4,259
|
[
"Gaussian"
] |
f34d1f4420e1e6084d6dcb9535390aaffd977e67a7b15fda9740305910356a2e
|
import ocl as cam
import camvtk
import time
import vtk
import math
import datetime
red= (1,0,0)
green= (0,1,0)
blue= (0,0,1)
cyan= (0,1,1)
yellow= (1,1,0)
pink = ( float(255)/255,float(192)/255,float(203)/255)
grey = ( float(127)/255,float(127)/255,float(127)/255)
orange = ( float(255)/255,float(165)/255,float(0)/255)
#OCType = Enum('black', 'grey', 'white')
OCTMax = 8
def buildOCTree(volume, nodecenter=cam.Point(0,0,0), level=0):
# build octree of volume, return root node
node = OCTNode( level, center = nodecenter , type = 1, childlist=None)
flags = []
for n in xrange(0,9): # test all points
flags.append( volume.isInside( node.nodePoint(n) ) )
if (sum(flags) == 0): # nothing is inside
node.type = 0
#print "nothing inside!"
return node
if (sum(flags) == 9): # everything is inside
node.type = 2
#print "all inside!"
return node
if level== OCTMax: # reached max levels
return node #OCTNode(level, center= nodecenter, type = 2, childlist = None)
# have to subdivide:
childs = []
child_centers = []
for n in xrange(1,9):
child_center = node.childCenter(n)
childs.append( buildOCTree( volume , nodecenter = child_center, level= level+1) )
node.setChildren(childs)
return node
def searchOCTree(node, list):
# return list of nodes in the whole tree starting at node
if node.children is not None:
for chi in node.children:
searchOCTree(chi, list)
else:
list.append(node)
class Volume():
def __init__(self):
self.center = cam.Point(0,0,0)
self.radius = 0.45
def isInside(self, point):
p = point - self.center
if p.norm() < self.radius:
return 1
else:
return 0
class OCTNode():
def __init__(self, level=0, center=cam.Point(0,0,0), type = None, childlist=[]):
self.level = level
self.center = cam.Point(center)
self.scale = float(1) / (2**level)
self.children = childlist
self.type = type
def setChildren(self, list):
self.children = list
def posDir(self, index):
if index==0:
return cam.Point(0,0,0)
if index==1:
return cam.Point(1,1,1)
if index==2:
return cam.Point(-1,1,1)
if index==3:
return cam.Point(1,-1,1)
if index==4:
return cam.Point(1,1,-1)
if index==5:
return cam.Point(1,-1,-1)
if index==6:
return cam.Point(-1,1,-1)
if index==7:
return cam.Point(-1,-1,1)
if index==8:
return cam.Point(-1,-1,-1)
def nodePoint(self, index):
return self.center + 1.0 * self.scale * self.posDir(index)
def childCenter(self, index):
return self.center + 0.5 * self.scale * self.posDir(index)
def nodeColor(oct):
offset = 2
n = oct.level-offset
return (float(n)/(OCTMax-offset), float(OCTMax-offset - n)/(OCTMax-offset), 0)
def addNodes(myscreen, oct):
if oct.type == 1:
return # don't draw intermediate nodes
p = []
for n in xrange(1,9):
p1 = oct.nodePoint(n)
p.append(p1)
lines = []
lines.append ( camvtk.Line(p1=(p[0].x,p[0].y,p[0].z),p2=(p[1].x,p[1].y,p[1].z)) )
lines.append ( camvtk.Line(p1=(p[0].x,p[0].y,p[0].z),p2=(p[2].x,p[2].y,p[2].z)) )
lines.append ( camvtk.Line(p1=(p[0].x,p[0].y,p[0].z),p2=(p[3].x,p[3].y,p[3].z)) )
lines.append ( camvtk.Line(p1=(p[2].x,p[2].y,p[2].z),p2=(p[4].x,p[4].y,p[4].z)) )
lines.append ( camvtk.Line(p1=(p[1].x,p[1].y,p[1].z),p2=(p[5].x,p[5].y,p[5].z)) )
lines.append ( camvtk.Line(p1=(p[1].x,p[1].y,p[1].z),p2=(p[6].x,p[6].y,p[6].z)) )
lines.append ( camvtk.Line(p1=(p[2].x,p[2].y,p[2].z),p2=(p[6].x,p[6].y,p[6].z)) )
lines.append ( camvtk.Line(p1=(p[6].x,p[6].y,p[6].z),p2=(p[7].x,p[7].y,p[7].z)) )
lines.append ( camvtk.Line(p1=(p[4].x,p[4].y,p[4].z),p2=(p[7].x,p[7].y,p[7].z)) )
lines.append ( camvtk.Line(p1=(p[4].x,p[4].y,p[4].z),p2=(p[3].x,p[3].y,p[3].z)) )
lines.append ( camvtk.Line(p1=(p[5].x,p[5].y,p[5].z),p2=(p[3].x,p[3].y,p[3].z)) )
lines.append ( camvtk.Line(p1=(p[5].x,p[5].y,p[5].z),p2=(p[7].x,p[7].y,p[7].z)) )
if oct.type == 0:
color = camvtk.grey
if oct.type == 1:
color = camvtk.green
if oct.type == 2:
color = nodeColor(oct)
for li in lines:
li.SetColor( color )
if oct.type==0:
li.SetOpacity(0.2)
myscreen.addActor(li)
if __name__ == "__main__":
myscreen = camvtk.VTKScreen()
myscreen.camera.SetPosition(5, 3, 2)
myscreen.camera.SetFocalPoint(0,0, 0)
xar = camvtk.Arrow(color=red, rotXYZ=(0,0,0))
#myscreen.addActor(xar)
yar = camvtk.Arrow(color=green, rotXYZ=(0,0,90))
#myscreen.addActor(yar)
zar = camvtk.Arrow(color=blue, rotXYZ=(0,-90,0))
#myscreen.addActor(zar)
oct = OCTNode(level=0)
testvol = Volume()
print "building tree...",
tree = buildOCTree(testvol)
print "done."
print tree
list =[]
searchOCTree(tree, list)
print len(list), " nodes in tree"
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
w2if.Modified()
t = camvtk.Text()
t.SetPos( (myscreen.width-200, myscreen.height-30) )
myscreen.addActor( t)
t2 = camvtk.Text()
t2.SetPos( (myscreen.width-200, 30) )
myscreen.addActor( t2)
n = 0
for node in list:
addNodes(myscreen, node)
if (n%50) == 0:
nodetext = "Nodes: %5i" % (n)
t2.SetText(nodetext)
t.SetText("OpenCAMLib 10.03-beta " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
myscreen.render()
myscreen.camera.Azimuth( 3 )
print "frame %i of %i" % (n, len(list))
w2if.Modified()
lwr.SetFileName("frames/oct"+ ('%05d' % n)+".png")
#lwr.Write()
n = n +1
#time.sleep(0.1)
print "done!"
myscreen.render()
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
|
davidwusea/opencamlib
|
src/attic/oct_test1.py
|
Python
|
gpl-3.0
| 6,424
|
[
"VTK"
] |
ffa498a892eef6551298b1c7e2d3920069836df12bd25a796dfa5ede1211c170
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
import numpy as np
import pandas as pd
from scipy import ndimage
from scipy.spatial import cKDTree
from pandas import DataFrame
from .preprocessing import bandpass, scale_to_gamut, scalefactor_to_gamut
from .utils import record_meta, print_update, validate_tuple
from .masks import (binary_mask, N_binary_mask, r_squared_mask,
x_squared_masks, cosmask, sinmask)
from .uncertainty import _static_error, measure_noise
import trackpy # to get trackpy.__version__
from .try_numba import NUMBA_AVAILABLE
from .feature_numba import (_numba_refine_2D, _numba_refine_2D_c,
_numba_refine_2D_c_a, _numba_refine_3D)
def percentile_threshold(image, percentile):
"""Find grayscale threshold based on distribution in image."""
not_black = image[np.nonzero(image)]
if len(not_black) == 0:
return np.nan
return np.percentile(not_black, percentile)
def local_maxima(image, radius, percentile=64, margin=None):
"""Find local maxima whose brightness is above a given percentile.
Parameters
----------
radius : integer definition of "local" in "local maxima"
percentile : chooses minimum grayscale value for a local maximum
margin : zone of exclusion at edges of image. Defaults to radius.
A smarter value is set by locate().
"""
if margin is None:
margin = radius
ndim = image.ndim
# Compute a threshold based on percentile.
threshold = percentile_threshold(image, percentile)
if np.isnan(threshold):
warnings.warn("Image is completely black.", UserWarning)
return np.empty((0, ndim))
# The intersection of the image with its dilation gives local maxima.
if not np.issubdtype(image.dtype, np.integer):
raise TypeError("Perform dilation on exact (i.e., integer) data.")
footprint = binary_mask(radius, ndim)
dilation = ndimage.grey_dilation(image, footprint=footprint,
mode='constant')
maxima = np.vstack(np.where((image == dilation) & (image > threshold))).T
if not np.size(maxima) > 0:
warnings.warn("Image contains no local maxima.", UserWarning)
return np.empty((0, ndim))
# Do not accept peaks near the edges.
shape = np.array(image.shape)
near_edge = np.any((maxima < margin) | (maxima > (shape - margin - 1)), 1)
maxima = maxima[~near_edge]
if not np.size(maxima) > 0:
warnings.warn("All local maxima were in the margins.", UserWarning)
# Return coords in as a numpy array shaped so it can be passed directly
# to the DataFrame constructor.
return maxima
def estimate_mass(image, radius, coord):
"Compute the total brightness in the neighborhood of a local maximum."
square = [slice(c - rad, c + rad + 1) for c, rad in zip(coord, radius)]
neighborhood = binary_mask(radius, image.ndim)*image[square]
return np.sum(neighborhood)
def estimate_size(image, radius, coord, estimated_mass):
"Compute the total brightness in the neighborhood of a local maximum."
square = [slice(c - rad, c + rad + 1) for c, rad in zip(coord, radius)]
neighborhood = binary_mask(radius, image.ndim)*image[square]
Rg = np.sqrt(np.sum(r_squared_mask(radius, image.ndim) * neighborhood) /
estimated_mass)
return Rg
def _safe_center_of_mass(x, radius, grids):
normalizer = x.sum()
if normalizer == 0: # avoid divide-by-zero errors
return np.array(radius)
return np.array([(x * grids[dim]).sum() / normalizer
for dim in range(x.ndim)])
def refine(raw_image, image, radius, coords, separation=0, max_iterations=10,
engine='auto', characterize=True, walkthrough=False):
"""Find the center of mass of a bright feature starting from an estimate.
Characterize the neighborhood of a local maximum, and iteratively
hone in on its center-of-brightness. Return its coordinates, integrated
brightness, size (Rg), eccentricity (0=circular), and signal strength.
Parameters
----------
raw_image : array (any dimensions)
used for final characterization
image : array (any dimension)
processed image, used for locating center of mass
coord : array
estimated position
max_iterations : integer
max number of loops to refine the center of mass, default 10
characterize : boolean, True by default
Compute and return mass, size, eccentricity, signal.
walkthrough : boolean, False by default
Print the offset on each loop and display final neighborhood image.
engine : {'python', 'numba'}
Numba is faster if available, but it cannot do walkthrough.
"""
# ensure that radius is tuple of integers, for direct calls to refine()
radius = validate_tuple(radius, image.ndim)
# Main loop will be performed in separate function.
if engine == 'auto':
if NUMBA_AVAILABLE and image.ndim in [2, 3]:
engine = 'numba'
else:
engine = 'python'
if engine == 'python':
coords = np.array(coords) # a copy, will not modify in place
results = _refine(raw_image, image, radius, coords, max_iterations,
characterize, walkthrough)
elif engine == 'numba':
if not NUMBA_AVAILABLE:
warnings.warn("numba could not be imported. Without it, the "
"'numba' engine runs very slow. Use the 'python' "
"engine or install numba.", UserWarning)
if image.ndim not in [2, 3]:
raise NotImplementedError("The numba engine only supports 2D or 3D "
"images. You can extend it if you feel "
"like a hero.")
if walkthrough:
raise ValueError("walkthrough is not availabe in the numba engine")
# Do some extra prep in pure Python that can't be done in numba.
coords = np.array(coords, dtype=np.float64)
N = coords.shape[0]
mask = binary_mask(radius, image.ndim)
if image.ndim == 3:
if characterize:
if np.all(radius[1:] == radius[:-1]):
results_columns = 8
else:
results_columns = 10
else:
results_columns = 4
r2_mask = r_squared_mask(radius, image.ndim)[mask]
x2_masks = x_squared_masks(radius, image.ndim)
z2_mask = image.ndim * x2_masks[0][mask]
y2_mask = image.ndim * x2_masks[1][mask]
x2_mask = image.ndim * x2_masks[2][mask]
results = np.empty((N, results_columns), dtype=np.float64)
maskZ, maskY, maskX = np.asarray(np.asarray(mask.nonzero()),
dtype=np.int16)
_numba_refine_3D(np.asarray(raw_image), np.asarray(image),
radius[0], radius[1], radius[2], coords, N,
int(max_iterations), characterize,
image.shape[0], image.shape[1], image.shape[2],
maskZ, maskY, maskX, maskX.shape[0],
r2_mask, z2_mask, y2_mask, x2_mask, results)
elif not characterize:
mask_coordsY, mask_coordsX = np.asarray(mask.nonzero(), dtype=np.int16)
results = np.empty((N, 3), dtype=np.float64)
_numba_refine_2D(np.asarray(raw_image), np.asarray(image),
radius[0], radius[1], coords, N,
int(max_iterations),
image.shape[0], image.shape[1],
mask_coordsY, mask_coordsX, mask_coordsY.shape[0],
results)
elif radius[0] == radius[1]:
mask_coordsY, mask_coordsX = np.asarray(mask.nonzero(), dtype=np.int16)
results = np.empty((N, 7), dtype=np.float64)
r2_mask = r_squared_mask(radius, image.ndim)[mask]
cmask = cosmask(radius)[mask]
smask = sinmask(radius)[mask]
_numba_refine_2D_c(np.asarray(raw_image), np.asarray(image),
radius[0], radius[1], coords, N,
int(max_iterations),
image.shape[0], image.shape[1],
mask_coordsY, mask_coordsX, mask_coordsY.shape[0],
r2_mask, cmask, smask, results)
else:
mask_coordsY, mask_coordsX = np.asarray(mask.nonzero(), dtype=np.int16)
results = np.empty((N, 8), dtype=np.float64)
x2_masks = x_squared_masks(radius, image.ndim)
y2_mask = image.ndim * x2_masks[0][mask]
x2_mask = image.ndim * x2_masks[1][mask]
cmask = cosmask(radius)[mask]
smask = sinmask(radius)[mask]
_numba_refine_2D_c_a(np.asarray(raw_image), np.asarray(image),
radius[0], radius[1], coords, N,
int(max_iterations),
image.shape[0], image.shape[1],
mask_coordsY, mask_coordsX, mask_coordsY.shape[0],
y2_mask, x2_mask, cmask, smask, results)
else:
raise ValueError("Available engines are 'python' and 'numba'")
# Flat peaks return multiple nearby maxima. Eliminate duplicates.
if np.all(np.greater(separation, 0)):
mass_index = image.ndim # i.e., index of the 'mass' column
while True:
# Rescale positions, so that pairs are identified below a distance
# of 1. Do so every iteration (room for improvement?)
positions = results[:, :mass_index]/list(reversed(separation))
mass = results[:, mass_index]
duplicates = cKDTree(positions, 30).query_pairs(1)
if len(duplicates) == 0:
break
to_drop = []
for pair in duplicates:
# Drop the dimmer one.
if np.equal(*mass.take(pair, 0)):
# Rare corner case: a tie!
# Break ties by sorting by sum of coordinates, to avoid
# any randomness resulting from cKDTree returning a set.
dimmer = np.argsort(np.sum(positions.take(pair, 0), 1))[0]
else:
dimmer = np.argmin(mass.take(pair, 0))
to_drop.append(pair[dimmer])
results = np.delete(results, to_drop, 0)
return results
# (This is pure Python. A numba variant follows below.)
def _refine(raw_image, image, radius, coords, max_iterations,
characterize, walkthrough):
SHIFT_THRESH = 0.6
GOOD_ENOUGH_THRESH = 0.005
ndim = image.ndim
isotropic = np.all(radius[1:] == radius[:-1])
mask = binary_mask(radius, ndim)
slices = [[slice(c - rad, c + rad + 1) for c, rad in zip(coord, radius)]
for coord in coords]
# Declare arrays that we will fill iteratively through loop.
N = coords.shape[0]
final_coords = np.empty_like(coords, dtype=np.float64)
mass = np.empty(N, dtype=np.float64)
raw_mass = np.empty(N, dtype=np.float64)
if characterize:
if isotropic:
Rg = np.empty(N, dtype=np.float64)
else:
Rg = np.empty((N, len(radius)), dtype=np.float64)
ecc = np.empty(N, dtype=np.float64)
signal = np.empty(N, dtype=np.float64)
ogrid = np.ogrid[[slice(0, i) for i in mask.shape]] # for center of mass
ogrid = [g.astype(float) for g in ogrid]
for feat in range(N):
coord = coords[feat]
# Define the circular neighborhood of (x, y).
rect = slices[feat]
neighborhood = mask*image[rect]
cm_n = _safe_center_of_mass(neighborhood, radius, ogrid)
cm_i = cm_n - radius + coord # image coords
allow_moves = True
for iteration in range(max_iterations):
off_center = cm_n - radius
if walkthrough:
print_update(off_center)
if np.all(np.abs(off_center) < GOOD_ENOUGH_THRESH):
break # Accurate enough.
# If we're off by more than half a pixel in any direction, move.
elif np.any(np.abs(off_center) > SHIFT_THRESH) & allow_moves:
# In here, coord is an integer.
new_coord = coord
new_coord[off_center > SHIFT_THRESH] += 1
new_coord[off_center < -SHIFT_THRESH] -= 1
# Don't move outside the image!
upper_bound = np.array(image.shape) - 1 - radius
new_coord = np.clip(new_coord, radius, upper_bound).astype(int)
# Update slice to shifted position.
rect = [slice(c - rad, c + rad + 1)
for c, rad in zip(new_coord, radius)]
neighborhood = mask*image[rect]
# If we're off by less than half a pixel, interpolate.
else:
# Here, coord is a float. We are off the grid.
neighborhood = ndimage.shift(neighborhood, -off_center,
order=2, mode='constant', cval=0)
new_coord = coord + off_center
# Disallow any whole-pixels moves on future iterations.
allow_moves = False
cm_n = _safe_center_of_mass(neighborhood, radius, ogrid) # neighborhood
cm_i = cm_n - radius + new_coord # image coords
coord = new_coord
# matplotlib and ndimage have opposite conventions for xy <-> yx.
final_coords[feat] = cm_i[..., ::-1]
if walkthrough:
import matplotlib.pyplot as plt
plt.imshow(neighborhood)
# Characterize the neighborhood of our final centroid.
mass[feat] = neighborhood.sum()
if not characterize:
continue # short-circuit loop
if isotropic:
Rg[feat] = np.sqrt(np.sum(r_squared_mask(radius, ndim) *
neighborhood) / mass[feat])
else:
Rg[feat] = np.sqrt(ndim * np.sum(x_squared_masks(radius, ndim) *
neighborhood,
axis=tuple(range(1, ndim + 1))) /
mass[feat])[::-1] # change order yx -> xy
# I only know how to measure eccentricity in 2D.
if ndim == 2:
ecc[feat] = np.sqrt(np.sum(neighborhood*cosmask(radius))**2 +
np.sum(neighborhood*sinmask(radius))**2)
ecc[feat] /= (mass[feat] - neighborhood[radius] + 1e-6)
else:
ecc[feat] = np.nan
signal[feat] = neighborhood.max() # based on bandpassed image
raw_neighborhood = mask*raw_image[rect]
raw_mass[feat] = raw_neighborhood.sum() # based on raw image
if not characterize:
return np.column_stack([final_coords, mass])
else:
return np.column_stack([final_coords, mass, Rg, ecc, signal, raw_mass])
def locate(raw_image, diameter, minmass=100., maxsize=None, separation=None,
noise_size=1, smoothing_size=None, threshold=None, invert=False,
percentile=64, topn=None, preprocess=True, max_iterations=10,
filter_before=True, filter_after=True,
characterize=True, engine='auto'):
"""Locate Gaussian-like blobs of some approximate size in an image.
Preprocess the image by performing a band pass and a threshold.
Locate all peaks of brightness, characterize the neighborhoods of the peaks
and take only those with given total brightnesss ("mass"). Finally,
refine the positions of each peak.
Parameters
----------
image : image array (any dimensions)
diameter : feature size in px
This may be a single number or a tuple giving the feature's
extent in each dimension, useful when the dimensions do not have
equal resolution (e.g. confocal microscopy). The tuple order is the
same as the image shape, conventionally (z, y, x) or (y, x). The
number(s) must be odd integers. When in doubt, round up.
minmass : minimum integrated brightness
Default is 100, but a good value is often much higher. This is a
crucial parameter for elminating spurious features.
maxsize : maximum radius-of-gyration of brightness, default None
separation : feature separation, in pixels
Default is diameter + 1. May be a tuple, see diameter for details.
noise_size : width of Gaussian blurring kernel, in pixels
Default is 1. May be a tuple, see diameter for details.
smoothing_size : size of boxcar smoothing, in pixels
Default is diameter. May be a tuple, see diameter for details.
threshold : Clip bandpass result below this value.
Default None, passed through to bandpass.
invert : Set to True if features are darker than background. False by
default.
percentile : Features must have a peak brighter than pixels in this
percentile. This helps eliminate spurious peaks.
topn : Return only the N brightest features above minmass.
If None (default), return all features above minmass.
Returns
-------
DataFrame([x, y, mass, size, ecc, signal])
where mass means total integrated brightness of the blob,
size means the radius of gyration of its Gaussian-like profile,
and ecc is its eccentricity (0 is circular).
Other Parameters
----------------
preprocess : Set to False to turn out bandpass preprocessing.
max_iterations : integer
max number of loops to refine the center of mass, default 10
filter_before : boolean
Use minmass (and maxsize, if set) to eliminate spurious features
based on their estimated mass and size before refining position.
True by default for performance.
filter_after : boolean
Use final characterizations of mass and size to eliminate spurious
features. True by default.
characterize : boolean
Compute "extras": eccentricity, signal, ep. True by default.
engine : {'auto', 'python', 'numba'}
See Also
--------
batch : performs location on many images in batch
Notes
-----
Locate works with a coordinate system that has its origin at the center of
pixel (0, 0). In almost all cases this will be the topleft pixel: the
y-axis is pointing downwards.
This is an implementation of the Crocker-Grier centroid-finding algorithm.
[1]_
References
----------
.. [1] Crocker, J.C., Grier, D.G. http://dx.doi.org/10.1006/jcis.1996.0217
"""
# Validate parameters and set defaults.
raw_image = np.squeeze(raw_image)
shape = raw_image.shape
ndim = len(shape)
diameter = validate_tuple(diameter, ndim)
diameter = tuple([int(x) for x in diameter])
if not np.all([x & 1 for x in diameter]):
raise ValueError("Feature diameter must be an odd integer. Round up.")
radius = tuple([x//2 for x in diameter])
isotropic = np.all(radius[1:] == radius[:-1])
if (not isotropic) and (maxsize is not None):
raise ValueError("Filtering by size is not available for anisotropic "
"features.")
if separation is None:
separation = tuple([x + 1 for x in diameter])
else:
separation = validate_tuple(separation, ndim)
if smoothing_size is None:
smoothing_size = diameter
else:
smoothing_size = validate_tuple(smoothing_size, ndim)
noise_size = validate_tuple(noise_size, ndim)
# Check whether the image looks suspiciously like a color image.
if 3 in shape or 4 in shape:
dim = raw_image.ndim
warnings.warn("I am interpreting the image as {0}-dimensional. "
"If it is actually a {1}-dimensional color image, "
"convert it to grayscale first.".format(dim, dim-1))
if preprocess:
if invert:
# It is tempting to do this in place, but if it is called multiple
# times on the same image, chaos reigns.
if np.issubdtype(raw_image.dtype, np.integer):
max_value = np.iinfo(raw_image.dtype).max
raw_image = raw_image ^ max_value
else:
# To avoid degrading performance, assume gamut is zero to one.
# Have you ever encountered an image of unnormalized floats?
raw_image = 1 - raw_image
image = bandpass(raw_image, noise_size, smoothing_size, threshold)
else:
image = raw_image.copy()
# Coerce the image into integer type. Rescale to fill dynamic range.
if np.issubdtype(raw_image.dtype, np.integer):
dtype = raw_image.dtype
else:
dtype = np.uint8
scale_factor = scalefactor_to_gamut(image, dtype)
image = scale_to_gamut(image, dtype, scale_factor)
# Set up a DataFrame for the final results.
if image.ndim < 4:
coord_columns = ['x', 'y', 'z'][:image.ndim]
else:
coord_columns = map(lambda i: 'x' + str(i), range(image.ndim))
MASS_COLUMN_INDEX = len(coord_columns)
columns = coord_columns + ['mass']
if characterize:
if isotropic:
SIZE_COLUMN_INDEX = len(columns)
columns += ['size']
else:
SIZE_COLUMN_INDEX = range(len(columns),
len(columns) + len(coord_columns))
columns += ['size_' + cc for cc in coord_columns]
SIGNAL_COLUMN_INDEX = len(columns) + 1
columns += ['ecc', 'signal', 'raw_mass']
if isotropic and np.all(noise_size[1:] == noise_size[:-1]):
columns += ['ep']
else:
columns += ['ep_' + cc for cc in coord_columns]
# Find local maxima.
# Define zone of exclusion at edges of image, avoiding
# - Features with incomplete image data ("radius")
# - Extended particles that cannot be explored during subpixel
# refinement ("separation")
# - Invalid output of the bandpass step ("smoothing_size")
margin = tuple([max(rad, sep // 2 - 1, sm // 2) for (rad, sep, sm) in
zip(radius, separation, smoothing_size)])
coords = local_maxima(image, radius, percentile, margin)
count_maxima = coords.shape[0]
if count_maxima == 0:
return DataFrame(columns=columns)
# Proactively filter based on estimated mass/size before
# refining positions.
if filter_before:
approx_mass = np.empty(count_maxima) # initialize to avoid appending
for i in range(count_maxima):
approx_mass[i] = estimate_mass(image, radius, coords[i])
condition = approx_mass > minmass * scale_factor
if maxsize is not None:
approx_size = np.empty(count_maxima)
for i in range(count_maxima):
approx_size[i] = estimate_size(image, radius, coords[i],
approx_mass[i])
condition &= approx_size < maxsize
coords = coords[condition]
count_qualified = coords.shape[0]
if count_qualified == 0:
warnings.warn("No maxima survived mass- and size-based prefiltering.")
return DataFrame(columns=columns)
# Refine their locations and characterize mass, size, etc.
refined_coords = refine(raw_image, image, radius, coords, separation,
max_iterations, engine, characterize)
# mass and signal values has to be corrected due to the rescaling
# raw_mass was obtained from raw image; size and ecc are scale-independent
refined_coords[:, MASS_COLUMN_INDEX] *= 1. / scale_factor
if characterize:
refined_coords[:, SIGNAL_COLUMN_INDEX] *= 1. / scale_factor
# Filter again, using final ("exact") mass -- and size, if set.
exact_mass = refined_coords[:, MASS_COLUMN_INDEX]
if filter_after:
condition = exact_mass > minmass
if maxsize is not None:
exact_size = refined_coords[:, SIZE_COLUMN_INDEX]
condition &= exact_size < maxsize
refined_coords = refined_coords[condition]
exact_mass = exact_mass[condition] # used below by topn
count_qualified = refined_coords.shape[0]
if count_qualified == 0:
warnings.warn("No maxima survived mass- and size-based filtering.")
return DataFrame(columns=columns)
if topn is not None and count_qualified > topn:
if topn == 1:
# special case for high performance and correct shape
refined_coords = refined_coords[np.argmax(exact_mass)]
refined_coords = refined_coords.reshape(1, -1)
else:
refined_coords = refined_coords[np.argsort(exact_mass)][-topn:]
# Estimate the uncertainty in position using signal (measured in refine)
# and noise (measured here below).
if characterize:
if preprocess: # reuse processed image to increase performance
black_level, noise = measure_noise(raw_image, diameter,
threshold, image)
else:
black_level, noise = measure_noise(raw_image, diameter, threshold)
Npx = N_binary_mask(radius, ndim)
mass = refined_coords[:, SIGNAL_COLUMN_INDEX + 1] - Npx * black_level
ep = _static_error(mass, noise, radius[::-1], noise_size[::-1])
refined_coords = np.column_stack([refined_coords, ep])
f = DataFrame(refined_coords, columns=columns)
# If this is a pims Frame object, it has a frame number.
# Tag it on; this is helpful for parallelization.
if hasattr(raw_image, 'frame_no') and raw_image.frame_no is not None:
f['frame'] = raw_image.frame_no
return f
def batch(frames, diameter, minmass=100, maxsize=None, separation=None,
noise_size=1, smoothing_size=None, threshold=None, invert=False,
percentile=64, topn=None, preprocess=True, max_iterations=10,
filter_before=True, filter_after=True,
characterize=True, engine='auto',
output=None, meta=True):
"""Locate Gaussian-like blobs of some approximate size in a set of images.
Preprocess the image by performing a band pass and a threshold.
Locate all peaks of brightness, characterize the neighborhoods of the peaks
and take only those with given total brightnesss ("mass"). Finally,
refine the positions of each peak.
Parameters
----------
frames : list (or iterable) of images
diameter : feature size in px
This may be a single number or a tuple giving the feature's
extent in each dimension, useful when the dimensions do not have
equal resolution (e.g. confocal microscopy). The tuple order is the
same as the image shape, conventionally (z, y, x) or (y, x). The
number(s) must be odd integers. When in doubt, round up.
minmass : minimum integrated brightness
Default is 100, but a good value is often much higher. This is a
crucial parameter for elminating spurious features.
maxsize : maximum radius-of-gyration of brightness, default None
separation : feature separation, in pixels
Default is diameter + 1. May be a tuple, see diameter for details.
noise_size : width of Gaussian blurring kernel, in pixels
Default is 1. May be a tuple, see diameter for details.
smoothing_size : size of boxcar smoothing, in pixels
Default is diameter. May be a tuple, see diameter for details.
threshold : Clip bandpass result below this value.
Default None, passed through to bandpass.
invert : Set to True if features are darker than background. False by
default.
percentile : Features must have a peak brighter than pixels in this
percentile. This helps eliminate spurious peaks.
topn : Return only the N brightest features above minmass.
If None (default), return all features above minmass.
Returns
-------
DataFrame([x, y, mass, size, ecc, signal])
where mass means total integrated brightness of the blob,
size means the radius of gyration of its Gaussian-like profile,
and ecc is its eccentricity (0 is circular).
Other Parameters
----------------
preprocess : Set to False to turn off bandpass preprocessing.
max_iterations : integer
max number of loops to refine the center of mass, default 10
filter_before : boolean
Use minmass (and maxsize, if set) to eliminate spurious features
based on their estimated mass and size before refining position.
True by default for performance.
filter_after : boolean
Use final characterizations of mass and size to elminate spurious
features. True by default.
characterize : boolean
Compute "extras": eccentricity, signal, ep. True by default.
engine : {'auto', 'python', 'numba'}
output : {None, trackpy.PandasHDFStore, SomeCustomClass}
If None, return all results as one big DataFrame. Otherwise, pass
results from each frame, one at a time, to the write() method
of whatever class is specified here.
meta : By default, a YAML (plain text) log file is saved in the current
directory. You can specify a different filepath set False.
See Also
--------
locate : performs location on a single image
Notes
-----
This is an implementation of the Crocker-Grier centroid-finding algorithm.
[1]_
Locate works with a coordinate system that has its origin at the center of
pixel (0, 0). In almost all cases this will be the topleft pixel: the
y-axis is pointing downwards.
References
----------
.. [1] Crocker, J.C., Grier, D.G. http://dx.doi.org/10.1006/jcis.1996.0217
"""
# Gather meta information and save as YAML in current directory.
timestamp = pd.datetime.utcnow().strftime('%Y-%m-%d-%H%M%S')
try:
source = frames.filename
except:
source = None
meta_info = dict(timestamp=timestamp,
trackpy_version=trackpy.__version__,
source=source, diameter=diameter, minmass=minmass,
maxsize=maxsize, separation=separation,
noise_size=noise_size, smoothing_size=smoothing_size,
invert=invert, percentile=percentile, topn=topn,
preprocess=preprocess, max_iterations=max_iterations,
filter_before=filter_before, filter_after=filter_after)
if meta:
if isinstance(meta, str):
filename = meta
else:
filename = 'feature_log_%s.yml' % timestamp
record_meta(meta_info, filename)
all_features = []
for i, image in enumerate(frames):
features = locate(image, diameter, minmass, maxsize, separation,
noise_size, smoothing_size, threshold, invert,
percentile, topn, preprocess, max_iterations,
filter_before, filter_after, characterize,
engine)
if hasattr(image, 'frame_no') and image.frame_no is not None:
frame_no = image.frame_no
# If this works, locate created a 'frame' column.
else:
frame_no = i
features['frame'] = i # just counting iterations
message = "Frame %d: %d features" % (frame_no, len(features))
print_update(message)
if len(features) == 0:
continue
if output is None:
all_features.append(features)
else:
output.put(features)
if output is None:
if len(all_features) > 0:
return pd.concat(all_features).reset_index(drop=True)
else: # return empty DataFrame
warnings.warn("No maxima found in any frame.")
return pd.DataFrame(columns=list(features.columns) + ['frame'])
else:
return output
|
daniorerio/trackpy
|
trackpy/feature.py
|
Python
|
bsd-3-clause
| 32,400
|
[
"Gaussian"
] |
208797f777e34b22d86761c2d25954ae3a40e24261b329e199308ea26f1873f6
|
#-------------------------------------------------------------------------------
# Cloud-COPASI
# Copyright (c) 2013 Edward Kent.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Public License v3.0
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/gpl.html
#-------------------------------------------------------------------------------
from cloud_copasi.web_interface.task_plugins.base import BaseTask, BaseTaskForm
from cloud_copasi.web_interface.models import Task, CondorJob, CondorPool
from cloud_copasi.web_interface.models import Subtask
from django.forms import Form
from django import forms
from cloud_copasi import settings
from cloud_copasi.copasi.model import CopasiModel
from cloud_copasi.web_interface.task_plugins.plugins.raw_mode.copasi_model import RWCopasiModel
import os, math
import logging
from django.http.response import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse_lazy
from cloud_copasi.condor import condor_spec
from cloud_copasi.web_interface.task_plugins import load_balancing
from string import Template
import re
import datetime
from django.utils.timezone import now
log = logging.getLogger(__name__)
os.environ['HOME'] = settings.STORAGE_DIR #This needs to be set to a writable directory
import matplotlib
matplotlib.use('Agg') #Use this so matplotlib can be used on a headless server. Otherwise requires DISPLAY env variable to be set.
import matplotlib.pyplot as plt
from matplotlib.pyplot import annotate
internal_type = ('raw_mode', 'Raw mode')
class TaskForm(BaseTaskForm):
#Any extra fields for the task submission form
repeats = forms.IntegerField(required=True, min_value=1, help_text='The number of repeats to perform')
optional_data = forms.FileField(required=False, label='Optional data files', help_text='Select either a single data file, or if more than one data file is required, upload a .zip file containing multiple data files')
raw_mode_args = forms.RegexField(max_length=128, regex=re.compile(r'.*(\-\-save \$new_filename).*(\$filename).*$'), label='COPASI arguments', help_text='Arguments to add when running COPASI. Must contain <b>$filename</b> and <b>--save $new_filename</b> as arguments', widget=forms.TextInput(attrs={'size':'40'}), required=True, initial='--nologo --home . --save $new_filename $filename') #TODO: update this regex so that it won't match certain characters, e.g. ';','|', '&' etc (though perhaps this isn't necessary)
class TaskPlugin(BaseTask):
subtasks = 3
def __init__(self, task):
self.repeats = task.get_custom_field('repeats')
self.data_files = task.get_custom_field('data_files')
self.raw_mode_args = task.get_custom_field('raw_mode_args')
super(TaskPlugin, self).__init__(task)
self.copasi_model = RWCopasiModel(os.path.join(self.task.directory, self.task.original_model))
def validate(self):
#TODO:Abstract this to a new COPASI class in this plugin package
return self.copasi_model.is_valid('RW')
def initialize_subtasks(self):
#Create new subtask objects, and save them
#The main module
self.create_new_subtask('main')
#And a subtask to process any results
self.create_new_subtask('process', local=True)
self.task.result_view = False
self.task.result_download = False
self.task.save()
def prepare_subtask(self, index):
"""Prepare the indexed subtask"""
if index == 1:
return self.process_main_subtask()
elif index == 2:
return self.process_results_subtask()
else:
raise Exception('No subtasks remaining')
def process_main_subtask(self):
subtask = self.get_subtask(1)
#If no load balancing step required:
model_files, output_files = self.copasi_model.prepare_rw_jobs(self.repeats)
self.task.set_custom_field('output_files', output_files)
model_count = len(model_files)
self.task.set_custom_field('model_count', model_count)
self.task.save()
condor_pool = self.task.condor_pool
condor_job_file = self.copasi_model.prepare_rw_condor_job(condor_pool.pool_type, condor_pool.address, len(model_files), self.raw_mode_args, self.data_files, output_files, rank='0')
log.debug('Prepared copasi files %s'%model_files)
log.debug('Prepared condor job %s' %condor_job_file)
subtask.spec_file = condor_job_file
subtask.status = 'ready'
subtask.set_custom_field('job_output', '') # Job output is potentially >1 file. Currently can't check for this, so leave blank
subtask.save()
return subtask
def process_results_subtask(self):
subtask=self.get_subtask(2)
assert isinstance(subtask, Subtask)
subtask.start_time = now()
#Go through and collate the results
#This is reasonably computationally simple, so we run locally
directory = self.task.directory
output_files = self.task.get_custom_field('output_files')
model_count = self.task.get_custom_field('model_count')
collated_output_files = []
#Collate the output files back into their original name
for output_filename in output_files:
try:
output_file = open(os.path.join(directory, output_filename), 'w')
for partial_output in ['%d_%s' % (i, output_filename) for i in range(model_count)]:
partial_output_file = open(os.path.join(directory, partial_output), 'r')
for line in partial_output_file:
output_file.write(line)
partial_output_file.close()
output_file.close()
collated_output_files.append(output_filename)
except Exception, e:
raise e
pass
self.task.set_custom_field('collated_output_files', collated_output_files)
if len(collated_output_files) > 0:
self.task.result_view=True
self.task.save()
subtask.status = 'finished'
subtask.finish_time = now()
subtask.set_run_time(time_delta=subtask.finish_time-subtask.start_time)
subtask.save()
return subtask
#===========================================================================
# Results download code. No results view page for this task
#===========================================================================
def get_results_view_template_name(self, request):
"""Return a string with the HTML code to be used in the task results view page
"""
#Get the name of the page we're displaying. If not specified, assume main
page_name = request.GET.get('name', 'main')
if page_name == 'main':
return self.get_template_name('results_view')
def get_results_view_data(self, request):
#Get the name of the page we're displaying. If not specified, assume main
page_name = request.GET.get('name', 'main')
if page_name == 'main':
collated_output_files = self.task.get_custom_field('collated_output_files')
output = {'output_files': collated_output_files}
return output
def get_results_download_data(self, request):
filename = request.GET.get('name')
if not filename in self.task.get_custom_field('collated_output_files'):
raise Exception('Output file not recognized')
request.session['errors'] = [('Cannot Return Output', 'There was an internal error processing the results file')]
return HttpResponseRedirect(reverse_lazy('task_details', kwargs={'task_id':self.task.id}))
full_filename = os.path.join(self.task.directory, filename)
if not os.path.isfile(full_filename):
request.session['errors'] = [('Cannot Return Output', 'There was an internal error processing the results file')]
return HttpResponseRedirect(reverse_lazy('task_details', kwargs={'task_id':self.task.id}))
result_file = open(full_filename, 'r')
response = HttpResponse(result_file, content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=%s' % (filename.replace(' ', '_'))
response['Content-Length'] = os.path.getsize(full_filename)
return response
|
edkent/cloud-copasi
|
cloud_copasi/web_interface/task_plugins/plugins/raw_mode/plugin.py
|
Python
|
gpl-3.0
| 8,902
|
[
"COPASI"
] |
742813c08a3e39e7c2d2675d462432cad1fd8b035723d7e509259d99317b2059
|
import random
from sympy.core.basic import Basic
from sympy.core.compatibility import is_sequence, as_int
from sympy.core.function import count_ops
from sympy.core.decorators import call_highest_priority
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.core.sympify import sympify
from sympy.functions.elementary.trigonometric import cos, sin
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.simplify import simplify as _simplify
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.misc import filldedent
from sympy.utilities.decorator import doctest_depends_on
from sympy.matrices.matrices import (MatrixBase,
ShapeError, a2idx, classof)
def _iszero(x):
"""Returns True if x is zero."""
return x.is_zero
class DenseMatrix(MatrixBase):
is_MatrixExpr = False
_op_priority = 10.01
_class_priority = 4
def __getitem__(self, key):
"""Return portion of self defined by key. If the key involves a slice
then a list will be returned (if key is a single slice) or a matrix
(if key was a tuple involving a slice).
Examples
========
>>> from sympy import Matrix, I
>>> m = Matrix([
... [1, 2 + I],
... [3, 4 ]])
If the key is a tuple that doesn't involve a slice then that element
is returned:
>>> m[1, 0]
3
When a tuple key involves a slice, a matrix is returned. Here, the
first column is selected (all rows, column 0):
>>> m[:, 0]
Matrix([
[1],
[3]])
If the slice is not a tuple then it selects from the underlying
list of elements that are arranged in row order and a list is
returned if a slice is involved:
>>> m[0]
1
>>> m[::2]
[1, 3]
"""
if type(key) is tuple:
i, j = key
if type(i) is slice or type(j) is slice:
return self.submatrix(key)
else:
i, j = self.key2ij(key)
return self._mat[i*self.cols + j]
else:
# row-wise decomposition of matrix
if type(key) is slice:
return self._mat[key]
return self._mat[a2idx(key)]
def __setitem__(self, key, value):
raise NotImplementedError()
def __hash__(self):
# issue 880 suggests that there should be no hash for a mutable
# object...but at least we aren't caching the result
return hash((type(self).__name__,) + (self.shape, tuple(self._mat)))
@property
def is_Identity(self):
if not self.is_square:
return False
if not all(self[i, i] == 1 for i in range(self.rows)):
return False
for i in range(self.rows):
for j in range(i + 1, self.cols):
if self[i, j] or self[j, i]:
return False
return True
def tolist(self):
"""Return the Matrix as a nested Python list.
Examples
========
>>> from sympy import Matrix, ones
>>> m = Matrix(3, 3, range(9))
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> m.tolist()
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
>>> ones(3, 0).tolist()
[[], [], []]
When there are no rows then it will not be possible to tell how
many columns were in the original matrix:
>>> ones(0, 3).tolist()
[]
"""
if not self.rows:
return []
if not self.cols:
return [[] for i in range(self.rows)]
return [self._mat[i: i + self.cols]
for i in range(0, len(self), self.cols)]
def row(self, i, f=None):
"""Elementary row selector.
Examples
========
>>> from sympy import eye
>>> eye(2).row(0)
Matrix([[1, 0]])
See Also
========
col
row_op
row_swap
row_del
row_join
row_insert
"""
if f is None:
return self[i, :]
SymPyDeprecationWarning(
feature="calling .row(i, f)",
useinstead=".row_op(i, f)",
deprecated_since_version="0.7.2",
).warn()
self.row_op(i, f)
def col(self, j, f=None):
"""Elementary column selector.
Examples
========
>>> from sympy import eye
>>> eye(2).col(0)
Matrix([
[1],
[0]])
See Also
========
row
col_op
col_swap
col_del
col_join
col_insert
"""
if f is None:
return self[:, j]
SymPyDeprecationWarning(
feature="calling .col(j, f)",
useinstead=".col_op(j, f)",
deprecated_since_version="0.7.2",
).warn()
self.col_op(j, f)
def _eval_trace(self):
"""Calculate the trace of a square matrix.
Examples
========
>>> from sympy.matrices import eye
>>> eye(3).trace()
3
"""
trace = 0
for i in range(self.cols):
trace += self._mat[i*self.cols + i]
return trace
def _eval_determinant(self):
return self.det()
def _eval_transpose(self):
"""Matrix transposition.
Examples
========
>>> from sympy import Matrix, I
>>> m=Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m.transpose()
Matrix([
[ 1, 3],
[2 + I, 4]])
>>> m.T == m.transpose()
True
See Also
========
conjugate: By-element conjugation
"""
a = []
for i in range(self.cols):
a.extend(self._mat[i::self.cols])
return self._new(self.cols, self.rows, a)
def _eval_conjugate(self):
"""By-element conjugation.
See Also
========
transpose: Matrix transposition
H: Hermite conjugation
D: Dirac conjugation
"""
out = self._new(self.rows, self.cols,
lambda i, j: self[i, j].conjugate())
return out
def _eval_adjoint(self):
return self.T.C
def _eval_inverse(self, **kwargs):
"""Return the matrix inverse using the method indicated (default
is Gauss elimination).
kwargs
======
method : ('GE', 'LU', or 'ADJ')
iszerofunc
try_block_diag
Notes
=====
According to the ``method`` keyword, it calls the appropriate method:
GE .... inverse_GE(); default
LU .... inverse_LU()
ADJ ... inverse_ADJ()
According to the ``try_block_diag`` keyword, it will try to form block
diagonal matrices using the method get_diag_blocks(), invert these
individually, and then reconstruct the full inverse matrix.
Note, the GE and LU methods may require the matrix to be simplified
before it is inverted in order to properly detect zeros during
pivoting. In difficult cases a custom zero detection function can
be provided by setting the ``iszerosfunc`` argument to a function that
should return True if its argument is zero. The ADJ routine computes
the determinant and uses that to detect singular matrices in addition
to testing for zeros on the diagonal.
See Also
========
inverse_LU
inverse_GE
inverse_ADJ
"""
from sympy.matrices import diag
method = kwargs.get('method', 'GE')
iszerofunc = kwargs.get('iszerofunc', _iszero)
if kwargs.get('try_block_diag', False):
blocks = self.get_diag_blocks()
r = []
for block in blocks:
r.append(block.inv(method=method, iszerofunc=iszerofunc))
return diag(*r)
M = self.as_mutable()
if method == "GE":
rv = M.inverse_GE(iszerofunc=iszerofunc)
elif method == "LU":
rv = M.inverse_LU(iszerofunc=iszerofunc)
elif method == "ADJ":
rv = M.inverse_ADJ(iszerofunc=iszerofunc)
else:
# make sure to add an invertibility check (as in inverse_LU)
# if a new method is added.
raise ValueError("Inversion method unrecognized")
return self._new(rv)
def equals(self, other, failing_expression=False):
"""Applies ``equals`` to corresponding elements of the matrices,
trying to prove that the elements are equivalent, returning True
if they are, False if any pair is not, and None (or the first
failing expression if failing_expression is True) if it cannot
be decided if the expressions are equivalent or not. This is, in
general, an expensive operation.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x
>>> from sympy import cos
>>> A = Matrix([x*(x - 1), 0])
>>> B = Matrix([x**2 - x, 0])
>>> A == B
False
>>> A.simplify() == B.simplify()
True
>>> A.equals(B)
True
>>> A.equals(2)
False
See Also
========
sympy.core.expr.equals
"""
try:
if self.shape != other.shape:
return False
rv = True
for i in range(self.rows):
for j in range(self.cols):
ans = self[i, j].equals(other[i, j], failing_expression)
if ans is False:
return False
elif ans is not True and rv is True:
rv = ans
return rv
except AttributeError:
return False
def __eq__(self, other):
try:
if self.shape != other.shape:
return False
if isinstance(other, Matrix):
return self._mat == other._mat
elif isinstance(other, MatrixBase):
return self._mat == Matrix(other)._mat
except AttributeError:
return False
def __ne__(self, other):
return not self == other
def _cholesky(self):
"""Helper function of cholesky.
Without the error checks.
To be used privately. """
L = zeros(self.rows, self.rows)
for i in range(self.rows):
for j in range(i):
L[i, j] = (1 / L[j, j])*(self[i, j] -
sum(L[i, k]*L[j, k] for k in range(j)))
L[i, i] = sqrt(self[i, i] -
sum(L[i, k]**2 for k in range(i)))
return self._new(L)
def _LDLdecomposition(self):
"""Helper function of LDLdecomposition.
Without the error checks.
To be used privately.
"""
D = zeros(self.rows, self.rows)
L = eye(self.rows)
for i in range(self.rows):
for j in range(i):
L[i, j] = (1 / D[j, j])*(self[i, j] - sum(
L[i, k]*L[j, k]*D[k, k] for k in range(j)))
D[i, i] = self[i, i] - sum(L[i, k]**2*D[k, k]
for k in range(i))
return self._new(L), self._new(D)
def _lower_triangular_solve(self, rhs):
"""Helper function of function lower_triangular_solve.
Without the error checks.
To be used privately.
"""
X = zeros(self.rows, rhs.cols)
for j in range(rhs.cols):
for i in range(self.rows):
if self[i, i] == 0:
raise TypeError("Matrix must be non-singular.")
X[i, j] = (rhs[i, j] - sum(self[i, k]*X[k, j]
for k in range(i))) / self[i, i]
return self._new(X)
def _upper_triangular_solve(self, rhs):
"""Helper function of function upper_triangular_solve.
Without the error checks, to be used privately. """
X = zeros(self.rows, rhs.cols)
for j in range(rhs.cols):
for i in reversed(range(self.rows)):
if self[i, i] == 0:
raise ValueError("Matrix must be non-singular.")
X[i, j] = (rhs[i, j] - sum(self[i, k]*X[k, j]
for k in range(i + 1, self.rows))) / self[i, i]
return self._new(X)
def _diagonal_solve(self, rhs):
"""Helper function of function diagonal_solve,
without the error checks, to be used privately.
"""
return self._new(rhs.rows, rhs.cols, lambda i, j: rhs[i, j] / self[i, i])
def applyfunc(self, f):
"""Apply a function to each element of the matrix.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, lambda i, j: i*2+j)
>>> m
Matrix([
[0, 1],
[2, 3]])
>>> m.applyfunc(lambda i: 2*i)
Matrix([
[0, 2],
[4, 6]])
"""
if not callable(f):
raise TypeError("`f` must be callable.")
out = self._new(self.rows, self.cols, map(f, self._mat))
return out
def reshape(self, rows, cols):
"""Reshape the matrix. Total number of elements must remain the same.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 3, lambda i, j: 1)
>>> m
Matrix([
[1, 1, 1],
[1, 1, 1]])
>>> m.reshape(1, 6)
Matrix([[1, 1, 1, 1, 1, 1]])
>>> m.reshape(3, 2)
Matrix([
[1, 1],
[1, 1],
[1, 1]])
"""
if len(self) != rows*cols:
raise ValueError("Invalid reshape parameters %d %d" % (rows, cols))
return self._new(rows, cols, lambda i, j: self._mat[i*cols + j])
def as_mutable(self):
"""Returns a mutable version of this matrix
Examples
========
>>> from sympy import ImmutableMatrix
>>> X = ImmutableMatrix([[1, 2], [3, 4]])
>>> Y = X.as_mutable()
>>> Y[1, 1] = 5 # Can set values in Y
>>> Y
Matrix([
[1, 2],
[3, 5]])
"""
return Matrix(self)
def as_immutable(self):
"""Returns an Immutable version of this Matrix
"""
from immutable import ImmutableMatrix as cls
if self.rows:
return cls._new(self.tolist())
return cls._new(0, self.cols, [])
@classmethod
def zeros(cls, r, c=None):
"""Return an r x c matrix of zeros, square if c is omitted."""
if is_sequence(r):
SymPyDeprecationWarning(
feature="The syntax zeros([%i, %i])" % tuple(r),
useinstead="zeros(%i, %i)." % tuple(r),
issue=3381, deprecated_since_version="0.7.2",
).warn()
r, c = r
else:
c = r if c is None else c
r = as_int(r)
c = as_int(c)
return cls._new(r, c, [cls._sympify(0)]*r*c)
@classmethod
def eye(cls, n):
"""Return an n x n identity matrix."""
n = as_int(n)
mat = [cls._sympify(0)]*n*n
mat[::n + 1] = [cls._sympify(1)]*n
return cls._new(n, n, mat)
############################
# Mutable matrix operators #
############################
@call_highest_priority('__radd__')
def __add__(self, other):
return super(DenseMatrix, self).__add__(_force_mutable(other))
@call_highest_priority('__add__')
def __radd__(self, other):
return super(DenseMatrix, self).__radd__(_force_mutable(other))
@call_highest_priority('__rsub__')
def __sub__(self, other):
return super(DenseMatrix, self).__sub__(_force_mutable(other))
@call_highest_priority('__sub__')
def __rsub__(self, other):
return super(DenseMatrix, self).__rsub__(_force_mutable(other))
@call_highest_priority('__rmul__')
def __mul__(self, other):
return super(DenseMatrix, self).__mul__(_force_mutable(other))
@call_highest_priority('__mul__')
def __rmul__(self, other):
return super(DenseMatrix, self).__rmul__(_force_mutable(other))
@call_highest_priority('__div__')
def __div__(self, other):
return super(DenseMatrix, self).__div__(_force_mutable(other))
@call_highest_priority('__truediv__')
def __truediv__(self, other):
return super(DenseMatrix, self).__truediv__(_force_mutable(other))
@call_highest_priority('__rpow__')
def __pow__(self, other):
return super(DenseMatrix, self).__pow__(other)
@call_highest_priority('__pow__')
def __rpow__(self, other):
raise NotImplementedError("Matrix Power not defined")
def _force_mutable(x):
"""Return a matrix as a Matrix, otherwise return x."""
if getattr(x, 'is_Matrix', False):
return x.as_mutable()
elif isinstance(x, Basic):
return x
elif hasattr(x, '__array__'):
a = x.__array__()
if len(a.shape) == 0:
return sympify(a)
return Matrix(x)
return x
class MutableDenseMatrix(DenseMatrix, MatrixBase):
@classmethod
def _new(cls, *args, **kwargs):
rows, cols, flat_list = cls._handle_creation_inputs(*args, **kwargs)
self = object.__new__(cls)
self.rows = rows
self.cols = cols
self._mat = list(flat_list) # create a shallow copy
return self
def __new__(cls, *args, **kwargs):
return cls._new(*args, **kwargs)
def as_mutable(self):
return self.copy()
def __setitem__(self, key, value):
"""
Examples
========
>>> from sympy import Matrix, I, zeros, ones
>>> m = Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m[1, 0] = 9
>>> m
Matrix([
[1, 2 + I],
[9, 4]])
>>> m[1, 0] = [[0, 1]]
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = zeros(4)
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2; M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4; M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
rv = self._setitem(key, value)
if rv is not None:
i, j, value = rv
self._mat[i*self.cols + j] = value
def copyin_matrix(self, key, value):
"""Copy in values from a matrix into the given bounds.
Parameters
==========
key : slice
The section of this matrix to replace.
value : Matrix
The matrix to copy values from.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> M = Matrix([[0, 1], [2, 3], [4, 5]])
>>> I = eye(3)
>>> I[:3, :2] = M
>>> I
Matrix([
[0, 1, 0],
[2, 3, 0],
[4, 5, 1]])
>>> I[0, 1] = M
>>> I
Matrix([
[0, 0, 1],
[2, 2, 3],
[4, 4, 5]])
See Also
========
copyin_list
"""
rlo, rhi, clo, chi = self.key2bounds(key)
shape = value.shape
dr, dc = rhi - rlo, chi - clo
if shape != (dr, dc):
raise ShapeError(filldedent("The Matrix `value` doesn't have the "
"same dimensions "
"as the in sub-Matrix given by `key`."))
for i in range(value.rows):
for j in range(value.cols):
self[i + rlo, j + clo] = value[i, j]
def copyin_list(self, key, value):
"""Copy in elements from a list.
Parameters
==========
key : slice
The section of this matrix to replace.
value : iterable
The iterable to copy values from.
Examples
========
>>> from sympy.matrices import eye
>>> I = eye(3)
>>> I[:2, 0] = [1, 2] # col
>>> I
Matrix([
[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
>>> I[1, :2] = [[3, 4]]
>>> I
Matrix([
[1, 0, 0],
[3, 4, 0],
[0, 0, 1]])
See Also
========
copyin_matrix
"""
if not is_sequence(value):
raise TypeError("`value` must be an ordered iterable, not %s." % type(value))
return self.copyin_matrix(key, Matrix(value))
def zip_row_op(self, i, k, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], self[k, j])``.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M
Matrix([
[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
See Also
========
row
row_op
col_op
"""
i0 = i*self.cols
k0 = k*self.cols
ri = self._mat[i0: i0 + self.cols]
rk = self._mat[k0: k0 + self.cols]
self._mat[i0: i0 + self.cols] = [ f(x, y) for x, y in zip(ri, rk) ]
def row_op(self, i, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], j)``.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.row_op(1, lambda v, j: v + 2*M[0, j]); M
Matrix([
[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
See Also
========
row
zip_row_op
col_op
"""
i0 = i*self.cols
ri = self._mat[i0: i0 + self.cols]
self._mat[i0: i0 + self.cols] = [ f(x, j) for x, j in zip(ri, range(self.cols)) ]
def col_op(self, j, f):
"""In-place operation on col j using two-arg functor whose args are
interpreted as (self[i, j], i).
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.col_op(1, lambda v, i: v + 2*M[i, 0]); M
Matrix([
[1, 2, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
col
row_op
"""
self._mat[j::self.cols] = map(lambda t: f(*t),
zip(self._mat[j::self.cols], range(self.rows)))
def row_swap(self, i, j):
"""Swap the two given rows of the matrix in-place.
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix([[0, 1], [1, 0]])
>>> M
Matrix([
[0, 1],
[1, 0]])
>>> M.row_swap(0, 1)
>>> M
Matrix([
[1, 0],
[0, 1]])
See Also
========
row
col_swap
"""
for k in range(0, self.cols):
self[i, k], self[j, k] = self[j, k], self[i, k]
def col_swap(self, i, j):
"""Swap the two given columns of the matrix in-place.
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix([[1, 0], [1, 0]])
>>> M
Matrix([
[1, 0],
[1, 0]])
>>> M.col_swap(0, 1)
>>> M
Matrix([
[0, 1],
[0, 1]])
See Also
========
col
row_swap
"""
for k in range(0, self.rows):
self[k, i], self[k, j] = self[k, j], self[k, i]
def row_del(self, i):
"""Delete the given row.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.row_del(1)
>>> M
Matrix([
[1, 0, 0],
[0, 0, 1]])
See Also
========
row
col_del
"""
self._mat = self._mat[:i*self.cols] + self._mat[(i + 1)*self.cols:]
self.rows -= 1
def col_del(self, i):
"""Delete the given column.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.col_del(1)
>>> M
Matrix([
[1, 0],
[0, 0],
[0, 1]])
See Also
========
col
row_del
"""
for j in range(self.rows - 1, -1, -1):
del self._mat[i + j*self.cols]
self.cols -= 1
# Utility functions
def simplify(self, ratio=1.7, measure=count_ops):
"""Applies simplify to the elements of a matrix in place.
This is a shortcut for M.applyfunc(lambda x: simplify(x, ratio, measure))
See Also
========
sympy.simplify.simplify.simplify
"""
for i in range(len(self._mat)):
self._mat[i] = _simplify(self._mat[i], ratio=ratio,
measure=measure)
def fill(self, value):
"""Fill the matrix with the scalar value.
See Also
========
zeros
ones
"""
self._mat = [value]*len(self)
MutableMatrix = Matrix = MutableDenseMatrix
###########
# Numpy Utility Functions:
# list2numpy, matrix2numpy, symmarray, rot_axis[123]
###########
def list2numpy(l): # pragma: no cover
"""Converts python list of SymPy expressions to a NumPy array.
See Also
========
matrix2numpy
"""
from numpy import empty
a = empty(len(l), dtype=object)
for i, s in enumerate(l):
a[i] = s
return a
def matrix2numpy(m): # pragma: no cover
"""Converts SymPy's matrix to a NumPy array.
See Also
========
list2numpy
"""
from numpy import empty
a = empty(m.shape, dtype=object)
for i in range(m.rows):
for j in range(m.cols):
a[i, j] = m[i, j]
return a
@doctest_depends_on(modules=('numpy',))
def symarray(prefix, shape): # pragma: no cover
"""Create a numpy ndarray of symbols (as an object array).
The created symbols are named ``prefix_i1_i2_``... You should thus provide a
non-empty prefix if you want your symbols to be unique for different output
arrays, as SymPy symbols with identical names are the same object.
Parameters
----------
prefix : string
A prefix prepended to the name of every symbol.
shape : int or tuple
Shape of the created array. If an int, the array is one-dimensional; for
more than one dimension the shape must be a tuple.
Examples
--------
These doctests require numpy.
>>> from sympy import symarray
>>> symarray('', 3)
[_0 _1 _2]
If you want multiple symarrays to contain distinct symbols, you *must*
provide unique prefixes:
>>> a = symarray('', 3)
>>> b = symarray('', 3)
>>> a[0] is b[0]
True
>>> a = symarray('a', 3)
>>> b = symarray('b', 3)
>>> a[0] is b[0]
False
Creating symarrays with a prefix:
>>> symarray('a', 3)
[a_0 a_1 a_2]
For more than one dimension, the shape must be given as a tuple:
>>> symarray('a', (2, 3))
[[a_0_0 a_0_1 a_0_2]
[a_1_0 a_1_1 a_1_2]]
>>> symarray('a', (2, 3, 2))
[[[a_0_0_0 a_0_0_1]
[a_0_1_0 a_0_1_1]
[a_0_2_0 a_0_2_1]]
<BLANKLINE>
[[a_1_0_0 a_1_0_1]
[a_1_1_0 a_1_1_1]
[a_1_2_0 a_1_2_1]]]
"""
from numpy import empty, ndindex
arr = empty(shape, dtype=object)
for index in ndindex(shape):
arr[index] = Symbol('%s_%s' % (prefix, '_'.join(map(str, index))))
return arr
def rot_axis3(theta):
"""Returns a rotation matrix for a rotation of theta (in radians) about
the 3-axis.
Examples
========
>>> from sympy import pi
>>> from sympy.matrices import rot_axis3
A rotation of pi/3 (60 degrees):
>>> theta = pi/3
>>> rot_axis3(theta)
Matrix([
[ 1/2, sqrt(3)/2, 0],
[-sqrt(3)/2, 1/2, 0],
[ 0, 0, 1]])
If we rotate by pi/2 (90 degrees):
>>> rot_axis3(pi/2)
Matrix([
[ 0, 1, 0],
[-1, 0, 0],
[ 0, 0, 1]])
See Also
========
rot_axis1: Returns a rotation matrix for a rotation of theta (in radians)
about the 1-axis
rot_axis2: Returns a rotation matrix for a rotation of theta (in radians)
about the 2-axis
"""
ct = cos(theta)
st = sin(theta)
lil = ((ct, st, 0),
(-st, ct, 0),
(0, 0, 1))
return Matrix(lil)
def rot_axis2(theta):
"""Returns a rotation matrix for a rotation of theta (in radians) about
the 2-axis.
Examples
========
>>> from sympy import pi
>>> from sympy.matrices import rot_axis2
A rotation of pi/3 (60 degrees):
>>> theta = pi/3
>>> rot_axis2(theta)
Matrix([
[ 1/2, 0, -sqrt(3)/2],
[ 0, 1, 0],
[sqrt(3)/2, 0, 1/2]])
If we rotate by pi/2 (90 degrees):
>>> rot_axis2(pi/2)
Matrix([
[0, 0, -1],
[0, 1, 0],
[1, 0, 0]])
See Also
========
rot_axis1: Returns a rotation matrix for a rotation of theta (in radians)
about the 1-axis
rot_axis3: Returns a rotation matrix for a rotation of theta (in radians)
about the 3-axis
"""
ct = cos(theta)
st = sin(theta)
lil = ((ct, 0, -st),
(0, 1, 0),
(st, 0, ct))
return Matrix(lil)
def rot_axis1(theta):
"""Returns a rotation matrix for a rotation of theta (in radians) about
the 1-axis.
Examples
========
>>> from sympy import pi
>>> from sympy.matrices import rot_axis1
A rotation of pi/3 (60 degrees):
>>> theta = pi/3
>>> rot_axis1(theta)
Matrix([
[1, 0, 0],
[0, 1/2, sqrt(3)/2],
[0, -sqrt(3)/2, 1/2]])
If we rotate by pi/2 (90 degrees):
>>> rot_axis1(pi/2)
Matrix([
[1, 0, 0],
[0, 0, 1],
[0, -1, 0]])
See Also
========
rot_axis2: Returns a rotation matrix for a rotation of theta (in radians)
about the 2-axis
rot_axis3: Returns a rotation matrix for a rotation of theta (in radians)
about the 3-axis
"""
ct = cos(theta)
st = sin(theta)
lil = ((1, 0, 0),
(0, ct, st),
(0, -st, ct))
return Matrix(lil)
###############
# Functions
###############
def matrix_add(A, B):
SymPyDeprecationWarning(
feature="matrix_add(A, B)",
useinstead="A + B",
deprecated_since_version="0.7.2",
).warn()
return A + B
def matrix_multiply(A, B):
SymPyDeprecationWarning(
feature="matrix_multiply(A, B)",
useinstead="A*B",
deprecated_since_version="0.7.2",
).warn()
return A*B
def matrix_multiply_elementwise(A, B):
"""Return the Hadamard product (elementwise product) of A and B
>>> from sympy.matrices import matrix_multiply_elementwise
>>> from sympy.matrices import Matrix
>>> A = Matrix([[0, 1, 2], [3, 4, 5]])
>>> B = Matrix([[1, 10, 100], [100, 10, 1]])
>>> matrix_multiply_elementwise(A, B)
Matrix([
[ 0, 10, 200],
[300, 40, 5]])
See Also
========
__mul__
"""
if A.shape != B.shape:
raise ShapeError()
shape = A.shape
return classof(A, B)._new(shape[0], shape[1],
lambda i, j: A[i, j]*B[i, j])
def ones(r, c=None):
"""Returns a matrix of ones with ``r`` rows and ``c`` columns;
if ``c`` is omitted a square matrix will be returned.
See Also
========
zeros
eye
diag
"""
from dense import Matrix
if is_sequence(r):
SymPyDeprecationWarning(
feature="The syntax ones([%i, %i])" % tuple(r),
useinstead="ones(%i, %i)." % tuple(r),
issue=3381, deprecated_since_version="0.7.2",
).warn()
r, c = r
else:
c = r if c is None else c
r = as_int(r)
c = as_int(c)
return Matrix(r, c, [S.One]*r*c)
def zeros(r, c=None, cls=None):
"""Returns a matrix of zeros with ``r`` rows and ``c`` columns;
if ``c`` is omitted a square matrix will be returned.
See Also
========
ones
eye
diag
"""
if cls is None:
from dense import Matrix as cls
return cls.zeros(r, c)
def eye(n, cls=None):
"""Create square identity matrix n x n
See Also
========
diag
zeros
ones
"""
if cls is None:
from sympy.matrices import Matrix as cls
return cls.eye(n)
def diag(*values, **kwargs):
"""Create a sparse, diagonal matrix from a list of diagonal values.
Notes
=====
When arguments are matrices they are fitted in resultant matrix.
The returned matrix is a mutable, dense matrix. To make it a different
type, send the desired class for keyword ``cls``.
Examples
========
>>> from sympy.matrices import diag, Matrix, ones
>>> diag(1, 2, 3)
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> diag(*[1, 2, 3])
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
The diagonal elements can be matrices; diagonal filling will
continue on the diagonal from the last element of the matrix:
>>> from sympy.abc import x, y, z
>>> a = Matrix([x, y, z])
>>> b = Matrix([[1, 2], [3, 4]])
>>> c = Matrix([[5, 6]])
>>> diag(a, 7, b, c)
Matrix([
[x, 0, 0, 0, 0, 0],
[y, 0, 0, 0, 0, 0],
[z, 0, 0, 0, 0, 0],
[0, 7, 0, 0, 0, 0],
[0, 0, 1, 2, 0, 0],
[0, 0, 3, 4, 0, 0],
[0, 0, 0, 0, 5, 6]])
When diagonal elements are lists, they will be treated as arguments
to Matrix:
>>> diag([1, 2, 3], 4)
Matrix([
[1, 0],
[2, 0],
[3, 0],
[0, 4]])
>>> diag([[1, 2, 3]], 4)
Matrix([
[1, 2, 3, 0],
[0, 0, 0, 4]])
A given band off the diagonal can be made by padding with a
vertical or horizontal "kerning" vector:
>>> hpad = ones(0, 2)
>>> vpad = ones(2, 0)
>>> diag(vpad, 1, 2, 3, hpad) + diag(hpad, 4, 5, 6, vpad)
Matrix([
[0, 0, 4, 0, 0],
[0, 0, 0, 5, 0],
[1, 0, 0, 0, 6],
[0, 2, 0, 0, 0],
[0, 0, 3, 0, 0]])
The type is mutable by default but can be made immutable by setting
the ``mutable`` flag to False:
>>> type(diag(1))
<class 'sympy.matrices.dense.MutableDenseMatrix'>
>>> from sympy.matrices import ImmutableMatrix
>>> type(diag(1, cls=ImmutableMatrix))
<class 'sympy.matrices.immutable.ImmutableMatrix'>
See Also
========
eye
"""
from sparse import MutableSparseMatrix
cls = kwargs.pop('cls', None)
if cls is None:
from dense import Matrix as cls
if kwargs:
raise ValueError('unrecognized keyword%s: %s' % (
's' if len(kwargs) > 1 else '',
', '.join(kwargs.keys())))
rows = 0
cols = 0
values = list(values)
for i in range(len(values)):
m = values[i]
if isinstance(m, MatrixBase):
rows += m.rows
cols += m.cols
elif is_sequence(m):
m = values[i] = Matrix(m)
rows += m.rows
cols += m.cols
else:
rows += 1
cols += 1
res = MutableSparseMatrix.zeros(rows, cols)
i_row = 0
i_col = 0
for m in values:
if isinstance(m, MatrixBase):
res[i_row:i_row + m.rows, i_col:i_col + m.cols] = m
i_row += m.rows
i_col += m.cols
else:
res[i_row, i_col] = m
i_row += 1
i_col += 1
return cls._new(res)
def jordan_cell(eigenval, n):
"""
Create matrix of Jordan cell kind:
Examples
========
>>> from sympy.matrices import jordan_cell
>>> from sympy.abc import x
>>> jordan_cell(x, 4)
Matrix([
[x, 1, 0, 0],
[0, x, 1, 0],
[0, 0, x, 1],
[0, 0, 0, x]])
"""
n = as_int(n)
out = zeros(n)
for i in range(n - 1):
out[i, i] = eigenval
out[i, i + 1] = S.One
out[n - 1, n - 1] = eigenval
return out
def hessian(f, varlist, constraints=[]):
"""Compute Hessian matrix for a function f wrt parameters in varlist
which may be given as a sequence or a row/column vector. A list of
constraints may optionally be given.
Examples
========
>>> from sympy import Function, hessian, pprint
>>> from sympy.abc import x, y
>>> f = Function('f')(x, y)
>>> g1 = Function('g')(x, y)
>>> g2 = x**2 + 3*y
>>> pprint(hessian(f, (x, y), [g1, g2]))
[ d d ]
[ 0 0 --(g(x, y)) --(g(x, y)) ]
[ dx dy ]
[ ]
[ 0 0 2*x 3 ]
[ ]
[ 2 2 ]
[d d d ]
[--(g(x, y)) 2*x ---(f(x, y)) -----(f(x, y))]
[dx 2 dy dx ]
[ dx ]
[ ]
[ 2 2 ]
[d d d ]
[--(g(x, y)) 3 -----(f(x, y)) ---(f(x, y)) ]
[dy dy dx 2 ]
[ dy ]
References
==========
http://en.wikipedia.org/wiki/Hessian_matrix
See Also
========
sympy.matrices.mutable.Matrix.jacobian
wronskian
"""
# f is the expression representing a function f, return regular matrix
if isinstance(varlist, MatrixBase):
if 1 not in varlist.shape:
raise ShapeError("`varlist` must be a column or row vector.")
if varlist.cols == 1:
varlist = varlist.T
varlist = varlist.tolist()[0]
if is_sequence(varlist):
n = len(varlist)
if not n:
raise ShapeError("`len(varlist)` must not be zero.")
else:
raise ValueError("Improper variable list in hessian function")
if not getattr(f, 'diff'):
# check differentiability
raise ValueError("Function `f` (%s) is not differentiable" % f)
m = len(constraints)
N = m + n
out = zeros(N)
for k, g in enumerate(constraints):
if not getattr(g, 'diff'):
# check differentiability
raise ValueError("Function `f` (%s) is not differentiable" % f)
for i in range(n):
out[k, i + m] = g.diff(varlist[i])
for i in range(n):
for j in range(i, n):
out[i + m, j + m] = f.diff(varlist[i]).diff(varlist[j])
for i in range(N):
for j in range(i + 1, N):
out[j, i] = out[i, j]
return out
def GramSchmidt(vlist, orthog=False):
"""
Apply the Gram-Schmidt process to a set of vectors.
see: http://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process
"""
out = []
m = len(vlist)
for i in range(m):
tmp = vlist[i]
for j in range(i):
tmp -= vlist[i].project(out[j])
if not tmp.values():
raise ValueError(
"GramSchmidt: vector set not linearly independent")
out.append(tmp)
if orthog:
for i in range(len(out)):
out[i] = out[i].normalized()
return out
def wronskian(functions, var, method='bareis'):
"""
Compute Wronskian for [] of functions
::
| f1 f2 ... fn |
| f1' f2' ... fn' |
| . . . . |
W(f1, ..., fn) = | . . . . |
| . . . . |
| (n) (n) (n) |
| D (f1) D (f2) ... D (fn) |
see: http://en.wikipedia.org/wiki/Wronskian
See Also
========
sympy.matrices.mutable.Matrix.jacobian
hessian
"""
from dense import Matrix
for index in range(0, len(functions)):
functions[index] = sympify(functions[index])
n = len(functions)
if n == 0:
return 1
W = Matrix(n, n, lambda i, j: functions[i].diff(var, j))
return W.det(method)
def casoratian(seqs, n, zero=True):
"""Given linear difference operator L of order 'k' and homogeneous
equation Ly = 0 we want to compute kernel of L, which is a set
of 'k' sequences: a(n), b(n), ... z(n).
Solutions of L are linearly independent iff their Casoratian,
denoted as C(a, b, ..., z), do not vanish for n = 0.
Casoratian is defined by k x k determinant::
+ a(n) b(n) . . . z(n) +
| a(n+1) b(n+1) . . . z(n+1) |
| . . . . |
| . . . . |
| . . . . |
+ a(n+k-1) b(n+k-1) . . . z(n+k-1) +
It proves very useful in rsolve_hyper() where it is applied
to a generating set of a recurrence to factor out linearly
dependent solutions and return a basis:
>>> from sympy import Symbol, casoratian, factorial
>>> n = Symbol('n', integer=True)
Exponential and factorial are linearly independent:
>>> casoratian([2**n, factorial(n)], n) != 0
True
"""
from dense import Matrix
seqs = map(sympify, seqs)
if not zero:
f = lambda i, j: seqs[j].subs(n, n + i)
else:
f = lambda i, j: seqs[j].subs(n, i)
k = len(seqs)
return Matrix(k, k, f).det()
def randMatrix(r, c=None, min=0, max=99, seed=None, symmetric=False, percent=100):
"""Create random matrix with dimensions ``r`` x ``c``. If ``c`` is omitted
the matrix will be square. If ``symmetric`` is True the matrix must be
square. If ``percent`` is less than 100 then only approximately the given
percentage of elements will be non-zero.
Examples
========
>>> from sympy.matrices import randMatrix
>>> randMatrix(3) # doctest:+SKIP
[25, 45, 27]
[44, 54, 9]
[23, 96, 46]
>>> randMatrix(3, 2) # doctest:+SKIP
[87, 29]
[23, 37]
[90, 26]
>>> randMatrix(3, 3, 0, 2) # doctest:+SKIP
[0, 2, 0]
[2, 0, 1]
[0, 0, 1]
>>> randMatrix(3, symmetric=True) # doctest:+SKIP
[85, 26, 29]
[26, 71, 43]
[29, 43, 57]
>>> A = randMatrix(3, seed=1)
>>> B = randMatrix(3, seed=2)
>>> A == B # doctest:+SKIP
False
>>> A == randMatrix(3, seed=1)
True
>>> randMatrix(3, symmetric=True, percent=50) # doctest:+SKIP
[0, 68, 43]
[0, 68, 0]
[0, 91, 34]
"""
if c is None:
c = r
if seed is None:
prng = random.Random() # use system time
else:
prng = random.Random(seed)
if symmetric and r != c:
raise ValueError(
'For symmetric matrices, r must equal c, but %i != %i' % (r, c))
if not symmetric:
m = Matrix._new(r, c, lambda i, j: prng.randint(min, max))
else:
m = zeros(r)
for i in range(r):
for j in range(i, r):
m[i, j] = prng.randint(min, max)
for i in range(r):
for j in range(i):
m[i, j] = m[j, i]
if percent == 100:
return m
else:
z = int(r*c*percent // 100)
m._mat[:z] = [S.Zero]*z
prng.shuffle(m._mat)
return m
|
lidavidm/mathics-heroku
|
venv/lib/python2.7/site-packages/sympy/matrices/dense.py
|
Python
|
gpl-3.0
| 43,924
|
[
"DIRAC"
] |
3e661954ac1ffb76bd7f7c2157eb86d4509af9756eba5ee12426c54e00c88240
|
#
# Copyright (C) 2003-2006 Greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for fingerprinting
"""
import unittest
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem.Fingerprints import FingerprintMols
class TestCase(unittest.TestCase):
def test1(self):
# FIX: test HashAtom
pass
def test2(self):
# FIX: test HashBond
pass
def test3(self):
# FIX: test HashPath
pass
def test4(self):
""" check containing mols, no Hs, no valence """
tgts = [('CCC(O)C(=O)O', ('CCC', 'OCC', 'OCC=O', 'OCCO', 'CCCC', 'OC=O', 'CC(O)C')), ]
for smi, matches in tgts:
m = Chem.MolFromSmiles(smi)
fp1 = Chem.RDKFingerprint(m, 2, 7, 9192, 4, 0)
_ = fp1.GetOnBits()
for match in matches:
m2 = Chem.MolFromSmiles(match)
fp2 = Chem.RDKFingerprint(m2, 2, 7, 9192, 4, 0)
v1, _ = DataStructs.OnBitProjSimilarity(fp2, fp1)
self.assertAlmostEqual(v1, 1, 'substruct %s not properly contained in %s' % (match, smi))
def test5(self):
""" check containing mols, use Hs, no valence """
tgts = [('CCC(O)C(=O)O', ('O[CH-][CH2-]', 'O[CH-][C-]=O')), ]
for smi, matches in tgts:
m = Chem.MolFromSmiles(smi)
fp1 = Chem.RDKFingerprint(m, 2, 7, 9192, 4, 1)
_ = fp1.GetOnBits()
for match in matches:
m2 = Chem.MolFromSmiles(match)
fp2 = Chem.RDKFingerprint(m2, 2, 7, 9192, 4, 1)
v1, _ = DataStructs.OnBitProjSimilarity(fp2, fp1)
self.assertAlmostEqual(v1, 1, 'substruct %s not properly contained in %s' % (match, smi))
def test6(self):
""" check that the bits in a signature of size N which has been folded in half
are the same as those in a signature of size N/2 """
smis = ['CCC(O)C(=O)O', 'c1ccccc1', 'C1CCCCC1', 'C1NCCCC1', 'CNCNCNC']
for smi in smis:
m = Chem.MolFromSmiles(smi)
fp1 = Chem.RDKFingerprint(m, 2, 7, 4096)
fp2 = DataStructs.FoldFingerprint(fp1, 2)
fp3 = Chem.RDKFingerprint(m, 2, 7, 2048)
self.assertEqual(tuple(fp2.GetOnBits()), tuple(fp3.GetOnBits()))
fp2 = DataStructs.FoldFingerprint(fp2, 2)
fp3 = Chem.RDKFingerprint(m, 2, 7, 1024)
self.assertEqual(tuple(fp2.GetOnBits()), tuple(fp3.GetOnBits()))
fp2 = DataStructs.FoldFingerprint(fp1, 4)
self.assertEqual(tuple(fp2.GetOnBits()), tuple(fp3.GetOnBits()))
def testGithub1747(self):
""" test github #1747: deprecated apply() function causes GetRDKFingerprint
to fail in Python 3 """
fp = FingerprintMols.GetRDKFingerprint(Chem.MolFromSmiles('CCO'))
self.assertNotEqual(0,fp.GetNumOnBits())
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
greglandrum/rdkit
|
rdkit/Chem/Fingerprints/UnitTestFingerprints.py
|
Python
|
bsd-3-clause
| 2,918
|
[
"RDKit"
] |
525d31dc830d137a7514c1eea04cff9dc003256f9eff5ad37106290b88438e84
|
#
# Copyright 2005,2006,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import struct
import numpy
from gnuradio import gru
#import crc
import gnuradio.digital.crc as crc
from ctypes import *
#lib = "/root/libfec.so" # when submitted to grid
lib = "./libfec.so"
Decoder = cdll.LoadLibrary(lib)
Encoder = cdll.LoadLibrary(lib)
#cclib = "./_cat_cccodec3.so"
#ccDecoder = cdll.LoadLibrary(cclib)
#ccEncoder = cdll.LoadLibrary(cclib)
#MEMLEN = 8
#RATEINV = 3
#RSLEN = 1670
#CCLEN = (RSLEN + MEMLEN)*RATEINV
def conv_packed_binary_string_to_1_0_string(s):
"""
'\xAF' --> '10101111'
"""
r = []
for ch in s:
x = ord(ch)
for i in range(7,-1,-1):
t = (x >> i) & 0x1
r.append(t)
return ''.join(map(lambda x: chr(x + ord('0')), r))
def conv_1_0_string_to_packed_binary_string(s):
"""
'10101111' -> ('\xAF', False)
Basically the inverse of conv_packed_binary_string_to_1_0_string,
but also returns a flag indicating if we had to pad with leading zeros
to get to a multiple of 8.
"""
if not is_1_0_string(s):
raise ValueError, "Input must be a string containing only 0's and 1's"
# pad to multiple of 8
padded = False
rem = len(s) % 8
if rem != 0:
npad = 8 - rem
s = '0' * npad + s
padded = True
assert len(s) % 8 == 0
r = []
i = 0
while i < len(s):
t = 0
for j in range(8):
t = (t << 1) | (ord(s[i + j]) - ord('0'))
r.append(chr(t))
i += 8
return (''.join(r), padded)
default_access_code = \
conv_packed_binary_string_to_1_0_string('\xAC\xDD\xA4\xE2\xF2\x8C\x20\xFC')
preamble = \
conv_packed_binary_string_to_1_0_string('\xA4\xF2')
def is_1_0_string(s):
if not isinstance(s, str):
return False
for ch in s:
if not ch in ('0', '1'):
return False
return True
def string_to_hex_list(s):
return map(lambda x: hex(ord(x)), s)
def whiten(s, o):
sa = numpy.fromstring(s, numpy.uint8)
z = sa ^ random_mask_vec8[o:len(sa)+o]
return z.tostring()
def dewhiten(s, o):
return whiten(s, o) # self inverse
def make_header(payload_len, whitener_offset=0):
# Upper nibble is offset, lower 12 bits is len
val = ((whitener_offset & 0xf) << 12) | (payload_len & 0x0fff)
#print "offset =", whitener_offset, " len =", payload_len, " val=", val
return struct.pack('!HH', val, val)
def make_packet(payload, samples_per_symbol, bits_per_symbol,
access_code=default_access_code, pad_for_usrp=True,
whitener_offset=0, whitening=True):
"""
Build a packet, given access code, payload, and whitener offset
@param payload: packet payload, len [0, 4096]
@param samples_per_symbol: samples per symbol (needed for padding calculation)
@type samples_per_symbol: int
@param bits_per_symbol: (needed for padding calculation)
@type bits_per_symbol: int
@param access_code: string of ascii 0's and 1's
@param whitener_offset offset into whitener string to use [0-16)
Packet will have access code at the beginning, followed by length, payload
and finally CRC-32.
"""
if not is_1_0_string(access_code):
raise ValueError, "access_code must be a string containing only 0's and 1's (%r)" % (access_code,)
if not whitener_offset >=0 and whitener_offset < 16:
raise ValueError, "whitener_offset must be between 0 and 15, inclusive (%i)" % (whitener_offset,)
(packed_access_code, padded) = conv_1_0_string_to_packed_binary_string(access_code)
(packed_preamble, ignore) = conv_1_0_string_to_packed_binary_string(preamble)
payload_with_crc = crc.gen_and_append_crc32(payload)
#print "outbound crc =", string_to_hex_list(payload_with_crc[-4:])
########################
# Error control code here
nb = len(payload_with_crc)
cl = Encoder.cat_codelength(nb)
#print nb, cl
codedpayload = payload_with_crc + payload_with_crc[0:cl-nb]
Encoder.cat_encode(codedpayload,nb)
#ccnb = len(codedpayload);
#print "Input to CC Enc has length ", ccnb
#cccl = ccEncoder.cc3_codelength(ccnb,RSLEN)
#cccodedpayload = codedpayload + codedpayload[0: cccl-ccnb]
#cccodedpayload = codedpayload + ('x' * (cccl-ccnb))
#ccEncoder.cc3_encode(cccodedpayload, ccnb, RSLEN)
######################
# uncoded
#L = len(payload_with_crc)
# coded with RS only
L = len(codedpayload)
# Xu: When coded with RS + CC
#L = len(cccodedpayload)
MAXLEN = len(random_mask_tuple)
# Commented by Xu, 2014-2-9
#if L > MAXLEN:
# raise ValueError, "len(payload) must be in [0, %d]" % (MAXLEN,)
# Xu: Never whitening
'''
if whitening:
pkt = ''.join((packed_preamble, packed_access_code, make_header(L, whitener_offset),
whiten(payload_with_crc, whitener_offset), '\x55'))
else:
pkt = ''.join((packed_preamble, packed_access_code, make_header(L, whitener_offset),
(payload_with_crc), '\x55'))
'''
# coded
pkt = ''.join((packed_preamble, packed_access_code, make_header(L, whitener_offset),
(codedpayload), '\x55'))
if pad_for_usrp:
pkt = pkt + (_npadding_bytes(len(pkt), int(samples_per_symbol), bits_per_symbol) * '\x55')
#print "make_packet: len(pkt) =", len(pkt)
return pkt
def _npadding_bytes(pkt_byte_len, samples_per_symbol, bits_per_symbol):
"""
Generate sufficient padding such that each packet ultimately ends
up being a multiple of 512 bytes when sent across the USB. We
send 4-byte samples across the USB (16-bit I and 16-bit Q), thus
we want to pad so that after modulation the resulting packet
is a multiple of 128 samples.
@param ptk_byte_len: len in bytes of packet, not including padding.
@param samples_per_symbol: samples per bit (1 bit / symbolwidth GMSK)
@type samples_per_symbol: int
@param bits_per_symbol: bits per symbol (log2(modulation order))
@type bits_per_symbol: int
@returns number of bytes of padding to append.
"""
modulus = 128
byte_modulus = gru.lcm(modulus/8, samples_per_symbol) * bits_per_symbol / samples_per_symbol
r = pkt_byte_len % byte_modulus
if r == 0:
return 0
return byte_modulus - r
def unmake_packet(whitened_payload_with_crc, whitener_offset=0, dewhitening=True):
"""
Return (ok, payload)
@param whitened_payload_with_crc: string
"""
# Xu: Never dewhitening
'''
if dewhitening:
payload_with_crc = dewhiten(whitened_payload_with_crc, whitener_offset)
else:
payload_with_crc = (whitened_payload_with_crc)
'''
payload_with_crc = (whitened_payload_with_crc)
###############################
# Decoding here
#rx_pkt = payload_with_crc
#cccl = len(rx_pkt)
#ccnb = ccDecoder.cc3_nbytes(cccl, CCLEN)
#ccDecoder.cc3_decode(rx_pkt,cccl, CCLEN)
#rsbitstr = rx_pkt[0:ccnb]
rsbitstr = payload_with_crc
cl = len(rsbitstr)
nb = Decoder.cat_nbytes( cl )
Decoder.cat_decode(rsbitstr, cl)
ok, payload = crc.check_crc32(rsbitstr[0:nb])
##########################################################
# uncoded
#ok, payload = crc.check_crc32(payload_with_crc)
if 0:
print "payload_with_crc =", string_to_hex_list(payload_with_crc)
print "ok = %r, len(payload) = %d" % (ok, len(payload))
print "payload =", string_to_hex_list(payload)
return ok, payload
# FYI, this PN code is the output of a 15-bit LFSR
random_mask_tuple = (
255, 63, 0, 16, 0, 12, 0, 5, 192, 3, 16, 1, 204, 0, 85, 192,
63, 16, 16, 12, 12, 5, 197, 195, 19, 17, 205, 204, 85, 149, 255, 47,
0, 28, 0, 9, 192, 6, 208, 2, 220, 1, 153, 192, 106, 208, 47, 28,
28, 9, 201, 198, 214, 210, 222, 221, 152, 89, 170, 186, 255, 51, 0, 21,
192, 15, 16, 4, 12, 3, 69, 193, 243, 16, 69, 204, 51, 21, 213, 207,
31, 20, 8, 15, 70, 132, 50, 227, 85, 137, 255, 38, 192, 26, 208, 11,
28, 7, 73, 194, 182, 209, 182, 220, 118, 217, 230, 218, 202, 219, 23, 27,
78, 139, 116, 103, 103, 106, 170, 175, 63, 60, 16, 17, 204, 12, 85, 197,
255, 19, 0, 13, 192, 5, 144, 3, 44, 1, 221, 192, 89, 144, 58, 236,
19, 13, 205, 197, 149, 147, 47, 45, 220, 29, 153, 201, 170, 214, 255, 30,
192, 8, 80, 6, 188, 2, 241, 193, 132, 80, 99, 124, 41, 225, 222, 200,
88, 86, 186, 190, 243, 48, 69, 212, 51, 31, 85, 200, 63, 22, 144, 14,
236, 4, 77, 195, 117, 145, 231, 44, 74, 157, 247, 41, 134, 158, 226, 232,
73, 142, 182, 228, 118, 203, 102, 215, 106, 222, 175, 24, 124, 10, 161, 199,
56, 82, 146, 189, 173, 177, 189, 180, 113, 183, 100, 118, 171, 102, 255, 106,
192, 47, 16, 28, 12, 9, 197, 198, 211, 18, 221, 205, 153, 149, 170, 239,
63, 12, 16, 5, 204, 3, 21, 193, 207, 16, 84, 12, 63, 69, 208, 51,
28, 21, 201, 207, 22, 212, 14, 223, 68, 88, 51, 122, 149, 227, 47, 9,
220, 6, 217, 194, 218, 209, 155, 28, 107, 73, 239, 118, 204, 38, 213, 218,
223, 27, 24, 11, 74, 135, 119, 34, 166, 153, 186, 234, 243, 15, 5, 196,
3, 19, 65, 205, 240, 85, 132, 63, 35, 80, 25, 252, 10, 193, 199, 16,
82, 140, 61, 165, 209, 187, 28, 115, 73, 229, 246, 203, 6, 215, 66, 222,
177, 152, 116, 106, 167, 111, 58, 172, 19, 61, 205, 209, 149, 156, 111, 41,
236, 30, 205, 200, 85, 150, 191, 46, 240, 28, 68, 9, 243, 70, 197, 242,
211, 5, 157, 195, 41, 145, 222, 236, 88, 77, 250, 181, 131, 55, 33, 214,
152, 94, 234, 184, 79, 50, 180, 21, 183, 79, 54, 180, 22, 247, 78, 198,
180, 82, 247, 125, 134, 161, 162, 248, 121, 130, 162, 225, 185, 136, 114, 230,
165, 138, 251, 39, 3, 90, 129, 251, 32, 67, 88, 49, 250, 148, 67, 47,
113, 220, 36, 89, 219, 122, 219, 99, 27, 105, 203, 110, 215, 108, 94, 173,
248, 125, 130, 161, 161, 184, 120, 114, 162, 165, 185, 187, 50, 243, 85, 133,
255, 35, 0, 25, 192, 10, 208, 7, 28, 2, 137, 193, 166, 208, 122, 220,
35, 25, 217, 202, 218, 215, 27, 30, 139, 72, 103, 118, 170, 166, 255, 58,
192, 19, 16, 13, 204, 5, 149, 195, 47, 17, 220, 12, 89, 197, 250, 211,
3, 29, 193, 201, 144, 86, 236, 62, 205, 208, 85, 156, 63, 41, 208, 30,
220, 8, 89, 198, 186, 210, 243, 29, 133, 201, 163, 22, 249, 206, 194, 212,
81, 159, 124, 104, 33, 238, 152, 76, 106, 181, 239, 55, 12, 22, 133, 206,
227, 20, 73, 207, 118, 212, 38, 223, 90, 216, 59, 26, 147, 75, 45, 247,
93, 134, 185, 162, 242, 249, 133, 130, 227, 33, 137, 216, 102, 218, 170, 219,
63, 27, 80, 11, 124, 7, 97, 194, 168, 81, 190, 188, 112, 113, 228, 36,
75, 91, 119, 123, 102, 163, 106, 249, 239, 2, 204, 1, 149, 192, 111, 16,
44, 12, 29, 197, 201, 147, 22, 237, 206, 205, 148, 85, 175, 127, 60, 32,
17, 216, 12, 90, 133, 251, 35, 3, 89, 193, 250, 208, 67, 28, 49, 201,
212, 86, 223, 126, 216, 32, 90, 152, 59, 42, 147, 95, 45, 248, 29, 130,
137, 161, 166, 248, 122, 194, 163, 17, 185, 204, 114, 213, 229, 159, 11, 40,
7, 94, 130, 184, 97, 178, 168, 117, 190, 167, 48, 122, 148, 35, 47, 89,
220, 58, 217, 211, 26, 221, 203, 25, 151, 74, 238, 183, 12, 118, 133, 230,
227, 10, 201, 199, 22, 210, 142, 221, 164, 89, 187, 122, 243, 99, 5, 233,
195, 14, 209, 196, 92, 83, 121, 253, 226, 193, 137, 144, 102, 236, 42, 205,
223, 21, 152, 15, 42, 132, 31, 35, 72, 25, 246, 138, 198, 231, 18, 202,
141, 151, 37, 174, 155, 60, 107, 81, 239, 124, 76, 33, 245, 216, 71, 26,
178, 139, 53, 167, 87, 58, 190, 147, 48, 109, 212, 45, 159, 93, 168, 57,
190, 146, 240, 109, 132, 45, 163, 93, 185, 249, 178, 194, 245, 145, 135, 44,
98, 157, 233, 169, 142, 254, 228, 64, 75, 112, 55, 100, 22, 171, 78, 255,
116, 64, 39, 112, 26, 164, 11, 59, 71, 83, 114, 189, 229, 177, 139, 52,
103, 87, 106, 190, 175, 48, 124, 20, 33, 207, 88, 84, 58, 191, 83, 48,
61, 212, 17, 159, 76, 104, 53, 238, 151, 12, 110, 133, 236, 99, 13, 233,
197, 142, 211, 36, 93, 219, 121, 155, 98, 235, 105, 143, 110, 228, 44, 75,
93, 247, 121, 134, 162, 226, 249, 137, 130, 230, 225, 138, 200, 103, 22, 170,
142, 255, 36, 64, 27, 112, 11, 100, 7, 107, 66, 175, 113, 188, 36, 113,
219, 100, 91, 107, 123, 111, 99, 108, 41, 237, 222, 205, 152, 85, 170, 191,
63, 48, 16, 20, 12, 15, 69, 196, 51, 19, 85, 205, 255, 21, 128, 15,
32, 4, 24, 3, 74, 129, 247, 32, 70, 152, 50, 234, 149, 143, 47, 36,
28, 27, 73, 203, 118, 215, 102, 222, 170, 216, 127, 26, 160, 11, 56, 7,
82, 130, 189, 161, 177, 184, 116, 114, 167, 101, 186, 171, 51, 63, 85, 208,
63, 28, 16, 9, 204, 6, 213, 194, 223, 17, 152, 12, 106, 133, 239, 35,
12, 25, 197, 202, 211, 23, 29, 206, 137, 148, 102, 239, 106, 204, 47, 21,
220, 15, 25, 196, 10, 211, 71, 29, 242, 137, 133, 166, 227, 58, 201, 211,
22, 221, 206, 217, 148, 90, 239, 123, 12, 35, 69, 217, 243, 26, 197, 203,
19, 23, 77, 206, 181, 148, 119, 47, 102, 156, 42, 233, 223, 14, 216, 4,
90, 131, 123, 33, 227, 88, 73, 250, 182, 195, 54, 209, 214, 220, 94, 217,
248, 90, 194, 187, 17, 179, 76, 117, 245, 231, 7, 10, 130, 135, 33, 162,
152, 121, 170, 162, 255, 57, 128, 18, 224, 13, 136, 5, 166, 131, 58, 225,
211, 8, 93, 198, 185, 146, 242, 237, 133, 141, 163, 37, 185, 219, 50, 219,
85, 155, 127, 43, 96, 31, 104, 8, 46, 134, 156, 98, 233, 233, 142, 206,
228, 84, 75, 127, 119, 96, 38, 168, 26, 254, 139, 0, 103, 64, 42, 176,
31, 52, 8, 23, 70, 142, 178, 228, 117, 139, 103, 39, 106, 154, 175, 43,
60, 31, 81, 200, 60, 86, 145, 254, 236, 64, 77, 240, 53, 132, 23, 35,
78, 153, 244, 106, 199, 111, 18, 172, 13, 189, 197, 177, 147, 52, 109, 215,
109, 158, 173, 168, 125, 190, 161, 176, 120, 116, 34, 167, 89, 186, 186, 243,
51, 5, 213, 195, 31, 17, 200, 12, 86, 133, 254, 227, 0, 73, 192, 54,
208, 22, 220, 14, 217, 196, 90, 211, 123, 29, 227, 73, 137, 246, 230, 198,
202, 210, 215, 29, 158, 137, 168, 102, 254, 170, 192, 127, 16, 32, 12, 24,
5, 202, 131, 23, 33, 206, 152, 84, 106, 191, 111, 48, 44, 20, 29, 207,
73, 148, 54, 239, 86, 204, 62, 213, 208, 95, 28, 56, 9, 210, 134, 221,
162, 217, 185, 154, 242, 235, 5, 143, 67, 36, 49, 219, 84, 91, 127, 123,
96, 35, 104, 25, 238, 138, 204, 103, 21, 234, 143, 15, 36, 4, 27, 67,
75, 113, 247, 100, 70, 171, 114, 255, 101, 128, 43, 32, 31, 88, 8, 58,
134, 147, 34, 237, 217, 141, 154, 229, 171, 11, 63, 71, 80, 50, 188, 21,
177, 207, 52, 84, 23, 127, 78, 160, 52, 120, 23, 98, 142, 169, 164, 126,
251, 96, 67, 104, 49, 238, 148, 76, 111, 117, 236, 39, 13, 218, 133, 155,
35, 43, 89, 223, 122, 216, 35, 26, 153, 203, 42, 215, 95, 30, 184, 8,
114, 134, 165, 162, 251, 57, 131, 82, 225, 253, 136, 65, 166, 176, 122, 244,
35, 7, 89, 194, 186, 209, 179, 28, 117, 201, 231, 22, 202, 142, 215, 36,
94, 155, 120, 107, 98, 175, 105, 188, 46, 241, 220, 68, 89, 243, 122, 197,
227, 19, 9, 205, 198, 213, 146, 223, 45, 152, 29, 170, 137, 191, 38, 240,
26, 196, 11, 19, 71, 77, 242, 181, 133, 183, 35, 54, 153, 214, 234, 222,
207, 24, 84, 10, 191, 71, 48, 50, 148, 21, 175, 79, 60, 52, 17, 215,
76, 94, 181, 248, 119, 2, 166, 129, 186, 224, 115, 8, 37, 198, 155, 18,
235, 77, 143, 117, 164, 39, 59, 90, 147, 123, 45, 227, 93, 137, 249, 166,
194, 250, 209, 131, 28, 97, 201, 232, 86, 206, 190, 212, 112, 95, 100, 56,
43, 82, 159, 125, 168, 33, 190, 152, 112, 106, 164, 47, 59, 92, 19, 121,
205, 226, 213, 137, 159, 38, 232, 26, 206, 139, 20, 103, 79, 106, 180, 47,
55, 92, 22, 185, 206, 242, 212, 69, 159, 115, 40, 37, 222, 155, 24, 107,
74, 175, 119, 60, 38, 145, 218, 236, 91, 13, 251, 69, 131, 115, 33, 229,
216, 75, 26, 183, 75, 54, 183, 86, 246, 190, 198, 240, 82, 196, 61, 147,
81, 173, 252, 125, 129, 225, 160, 72, 120, 54, 162, 150, 249, 174, 194, 252,
81, 129, 252, 96, 65, 232, 48, 78, 148, 52, 111, 87, 108, 62, 173, 208,
125, 156, 33, 169, 216, 126, 218, 160, 91, 56, 59, 82, 147, 125, 173, 225,
189, 136, 113, 166, 164, 122, 251, 99, 3, 105, 193, 238, 208, 76, 92, 53,
249, 215, 2, 222, 129, 152, 96, 106, 168, 47, 62, 156, 16, 105, 204, 46,
213, 220, 95, 25, 248, 10, 194, 135, 17, 162, 140, 121, 165, 226, 251, 9,
131, 70, 225, 242, 200, 69, 150, 179, 46, 245, 220, 71, 25, 242, 138, 197,
167, 19, 58, 141, 211, 37, 157, 219, 41, 155, 94, 235, 120, 79, 98, 180,
41, 183, 94, 246, 184, 70, 242, 178, 197, 181, 147, 55, 45, 214, 157, 158,
233, 168, 78, 254, 180, 64, 119, 112, 38, 164, 26, 251, 75, 3, 119, 65,
230, 176, 74, 244, 55, 7, 86, 130, 190, 225, 176, 72, 116, 54, 167, 86,
250, 190, 195, 48, 81, 212, 60, 95, 81, 248, 60, 66, 145, 241, 172, 68,
125, 243, 97, 133, 232, 99, 14, 169, 196, 126, 211, 96, 93, 232, 57, 142,
146, 228, 109, 139, 109, 167, 109, 186, 173, 179, 61, 181, 209, 183, 28, 118,
137, 230, 230, 202, 202, 215, 23, 30, 142, 136, 100, 102, 171, 106, 255, 111,
0, 44, 0, 29, 192, 9, 144, 6, 236, 2, 205, 193, 149, 144, 111, 44,
44, 29, 221, 201, 153, 150, 234, 238, 207, 12, 84, 5, 255, 67, 0, 49,
192, 20, 80, 15, 124, 4, 33, 195, 88, 81, 250, 188, 67, 49, 241, 212,
68, 95, 115, 120, 37, 226, 155, 9, 171, 70, 255, 114, 192, 37, 144, 27,
44, 11, 93, 199, 121, 146, 162, 237, 185, 141, 178, 229, 181, 139, 55, 39,
86, 154, 190, 235, 48, 79, 84, 52, 63, 87, 80, 62, 188, 16, 113, 204,
36, 85, 219, 127, 27, 96, 11, 104, 7, 110, 130, 172, 97, 189, 232, 113,
142, 164, 100, 123, 107, 99, 111, 105, 236, 46, 205, 220, 85, 153, 255, 42,
192, 31, 16, 8, 12, 6, 133, 194, 227, 17, 137, 204, 102, 213, 234, 223,
15, 24, 4, 10, 131, 71, 33, 242, 152, 69, 170, 179, 63, 53, 208, 23,
28, 14, 137, 196, 102, 211, 106, 221, 239, 25, 140, 10, 229, 199, 11, 18,
135, 77, 162, 181, 185, 183, 50, 246, 149, 134, 239, 34, 204, 25, 149, 202,
239, 23, 12, 14, 133, 196, 99, 19, 105, 205, 238, 213, 140, 95, 37, 248,
27, 2, 139, 65, 167, 112, 122, 164, 35, 59, 89, 211, 122, 221, 227, 25,
137, 202, 230, 215, 10, 222, 135, 24, 98, 138, 169, 167, 62, 250, 144, 67,
44, 49, 221, 212, 89, 159, 122, 232, 35, 14, 153, 196, 106, 211, 111, 29,
236, 9, 141, 198, 229, 146, 203, 45, 151, 93, 174, 185, 188, 114, 241, 229,
132, 75, 35, 119, 89, 230, 186, 202, 243, 23, 5, 206, 131, 20, 97, 207,
104, 84, 46, 191, 92, 112, 57, 228, 18, 203, 77, 151, 117, 174, 167, 60,
122, 145, 227, 44, 73, 221, 246, 217, 134, 218, 226, 219, 9, 155, 70, 235,
114, 207, 101, 148, 43, 47, 95, 92, 56, 57, 210, 146, 221, 173, 153, 189,
170, 241, 191, 4, 112, 3, 100, 1, 235, 64, 79, 112, 52, 36, 23, 91,
78, 187, 116, 115, 103, 101, 234, 171, 15, 63, 68, 16, 51, 76, 21, 245,
207, 7, 20, 2, 143, 65, 164, 48, 123, 84, 35, 127, 89, 224, 58, 200,
19, 22, 141, 206, 229, 148, 75, 47, 119, 92, 38, 185, 218, 242, 219, 5,
155, 67, 43, 113, 223, 100, 88, 43, 122, 159, 99, 40, 41, 222, 158, 216,
104, 90, 174, 187, 60, 115, 81, 229, 252, 75, 1, 247, 64, 70, 176, 50,
244, 21, 135, 79, 34, 180, 25, 183, 74, 246, 183, 6, 246, 130, 198, 225,
146, 200, 109, 150, 173, 174, 253, 188, 65, 177, 240, 116, 68, 39, 115, 90,
165, 251, 59, 3, 83, 65, 253, 240, 65, 132, 48, 99, 84, 41, 255, 94,
192, 56, 80, 18, 188, 13, 177, 197, 180, 83, 55, 125, 214, 161, 158, 248,
104, 66, 174, 177, 188, 116, 113, 231, 100, 74, 171, 119, 63, 102, 144, 42,
236, 31, 13, 200, 5, 150, 131, 46, 225, 220, 72, 89, 246, 186, 198, 243,
18, 197, 205, 147, 21, 173, 207, 61, 148, 17, 175, 76, 124, 53, 225, 215,
8, 94, 134, 184, 98, 242, 169, 133, 190, 227, 48, 73, 212, 54, 223, 86,
216, 62, 218, 144, 91, 44, 59, 93, 211, 121, 157, 226, 233, 137, 142, 230,
228, 74, 203, 119, 23, 102, 142, 170, 228, 127, 11, 96, 7, 104, 2, 174,
129, 188, 96, 113, 232, 36, 78, 155, 116, 107, 103, 111, 106, 172, 47, 61,
220, 17, 153, 204, 106, 213, 239, 31, 12, 8, 5, 198, 131, 18, 225, 205,
136, 85, 166, 191, 58, 240, 19, 4, 13, 195, 69, 145, 243, 44, 69, 221,
243, 25, 133, 202, 227, 23, 9, 206, 134, 212, 98, 223, 105, 152, 46, 234,
156, 79, 41, 244, 30, 199, 72, 82, 182, 189, 182, 241, 182, 196, 118, 211,
102, 221, 234, 217, 143, 26, 228, 11, 11, 71, 71, 114, 178, 165, 181, 187,
55, 51, 86, 149, 254, 239, 0, 76, 0, 53, 192, 23, 16, 14, 140, 4,
101, 195, 107, 17, 239, 76, 76, 53, 245, 215, 7, 30, 130, 136, 97, 166,
168, 122, 254, 163, 0, 121, 192, 34, 208, 25, 156, 10, 233, 199, 14, 210,
132, 93, 163, 121, 185, 226, 242, 201, 133, 150, 227, 46, 201, 220, 86, 217,
254, 218, 192, 91, 16, 59, 76, 19, 117, 205, 231, 21, 138, 143, 39, 36,
26, 155, 75, 43, 119, 95, 102, 184, 42, 242, 159, 5, 168, 3, 62, 129,
208, 96, 92, 40, 57, 222, 146, 216, 109, 154, 173, 171, 61, 191, 81, 176,
60, 116, 17, 231, 76, 74, 181, 247, 55, 6, 150, 130, 238, 225, 140, 72,
101, 246, 171, 6, 255, 66, 192, 49, 144, 20, 108, 15, 109, 196, 45, 147,
93, 173, 249, 189, 130, 241, 161, 132, 120, 99, 98, 169, 233, 190, 206, 240,
84, 68, 63, 115, 80, 37, 252, 27, 1, 203, 64, 87, 112, 62, 164, 16,
123, 76, 35, 117, 217, 231, 26, 202, 139, 23, 39, 78, 154, 180, 107, 55,
111, 86, 172, 62, 253, 208, 65, 156, 48, 105, 212, 46, 223, 92, 88, 57,
250, 146, 195, 45, 145, 221, 172, 89, 189, 250, 241, 131, 4, 97, 195, 104,
81, 238, 188, 76, 113, 245, 228, 71, 11, 114, 135, 101, 162, 171, 57, 191,
82, 240, 61, 132, 17, 163, 76, 121, 245, 226, 199, 9, 146, 134, 237, 162,
205, 185, 149, 178, 239, 53, 140, 23, 37, 206, 155, 20, 107, 79, 111, 116,
44, 39, 93, 218, 185, 155, 50, 235, 85, 143, 127, 36, 32, 27, 88, 11,
122, 135, 99, 34, 169, 217, 190, 218, 240, 91, 4, 59, 67, 83, 113, 253,
228, 65, 139, 112, 103, 100, 42, 171, 95, 63, 120, 16, 34, 140, 25, 165,
202, 251, 23, 3, 78, 129, 244, 96, 71, 104, 50, 174, 149, 188, 111, 49,
236, 20, 77, 207, 117, 148, 39, 47, 90, 156, 59, 41, 211, 94, 221, 248,
89, 130, 186, 225, 179, 8, 117, 198, 167, 18, 250, 141, 131, 37, 161, 219,
56, 91, 82, 187, 125, 179, 97, 181, 232, 119, 14, 166, 132, 122, 227, 99,
9, 233, 198, 206, 210, 212, 93, 159, 121, 168, 34, 254, 153, 128, 106, 224,
47, 8, 28, 6, 137, 194, 230, 209, 138, 220, 103, 25, 234, 138, 207, 39,
20, 26, 143, 75, 36, 55, 91, 86, 187, 126, 243, 96, 69, 232, 51, 14,
149, 196, 111, 19, 108, 13, 237, 197, 141, 147, 37, 173, 219, 61, 155, 81,
171, 124, 127, 97, 224, 40, 72, 30, 182, 136, 118, 230, 166, 202, 250, 215,
3, 30, 129, 200, 96, 86, 168, 62, 254, 144, 64, 108, 48, 45, 212, 29,
159, 73, 168, 54, 254, 150, 192, 110, 208, 44, 92, 29, 249, 201, 130, 214,
225, 158, 200, 104, 86, 174, 190, 252, 112, 65, 228, 48, 75, 84, 55, 127,
86, 160, 62, 248, 16, 66, 140, 49, 165, 212, 123, 31, 99, 72, 41, 246,
158, 198, 232, 82, 206, 189, 148, 113, 175, 100, 124, 43, 97, 223, 104, 88,
46, 186, 156, 115, 41, 229, 222, 203, 24, 87, 74, 190, 183, 48, 118, 148,
38, 239, 90, 204, 59, 21, 211, 79, 29, 244, 9, 135, 70, 226, 178, 201,
181, 150, 247, 46, 198, 156, 82, 233, 253, 142, 193, 164, 80, 123, 124, 35,
97, 217, 232, 90, 206, 187, 20, 115, 79, 101, 244, 43, 7, 95, 66, 184,
49, 178, 148, 117, 175, 103, 60, 42, 145, 223, 44, 88, 29, 250, 137, 131,
38, 225, 218, 200, 91, 22, 187, 78, 243, 116, 69, 231, 115, 10, 165, 199,
59, 18, 147, 77, 173, 245, 189, 135, 49, 162, 148, 121, 175, 98, 252, 41,
129, 222, 224, 88, 72, 58, 182, 147, 54, 237, 214, 205, 158, 213, 168, 95,
62, 184, 16, 114, 140, 37, 165, 219, 59, 27, 83, 75, 125, 247, 97, 134,
168, 98, 254, 169, 128, 126, 224, 32, 72, 24, 54, 138, 150, 231, 46, 202,
156, 87, 41, 254, 158, 192, 104, 80, 46, 188, 28, 113, 201, 228, 86, 203,
126, 215, 96, 94, 168, 56, 126, 146, 160, 109, 184, 45, 178, 157, 181, 169,
183, 62, 246, 144, 70, 236, 50, 205, 213, 149, 159, 47, 40, 28, 30, 137,
200, 102, 214, 170, 222, 255, 24, 64, 10, 176, 7, 52, 2, 151, 65, 174,
176, 124, 116, 33, 231, 88, 74, 186, 183, 51, 54, 149, 214, 239, 30, 204,
8, 85, 198, 191, 18, 240, 13, 132, 5, 163, 67, 57, 241, 210, 196, 93,
147, 121, 173, 226, 253, 137, 129, 166, 224, 122, 200, 35, 22, 153, 206, 234,
212, 79, 31, 116, 8, 39, 70, 154, 178, 235, 53, 143, 87, 36, 62, 155,
80, 107, 124, 47, 97, 220, 40, 89, 222, 186, 216, 115, 26, 165, 203, 59,
23, 83, 78, 189, 244, 113, 135, 100, 98, 171, 105, 191, 110, 240, 44, 68,
29, 243, 73, 133, 246, 227, 6, 201, 194, 214, 209, 158, 220, 104, 89, 238,
186, 204, 115, 21, 229, 207, 11, 20, 7, 79, 66, 180, 49, 183, 84, 118,
191, 102, 240, 42, 196, 31, 19, 72, 13, 246, 133, 134, 227, 34, 201, 217,
150, 218, 238, 219, 12, 91, 69, 251, 115, 3, 101, 193, 235, 16, 79, 76,
52, 53, 215, 87, 30, 190, 136, 112, 102, 164, 42, 251, 95, 3, 120, 1,
226, 128, 73, 160, 54, 248, 22, 194, 142, 209, 164, 92, 123, 121, 227, 98,
201, 233, 150, 206, 238, 212, 76, 95, 117, 248, 39, 2, 154, 129, 171, 32,
127, 88, 32, 58, 152, 19, 42, 141, 223, 37, 152, 27, 42, 139, 95, 39,
120, 26, 162, 139, 57, 167, 82, 250, 189, 131, 49, 161, 212, 120, 95, 98,
184, 41, 178, 158, 245, 168, 71, 62, 178, 144, 117, 172, 39, 61, 218, 145,
155, 44, 107, 93, 239, 121, 140, 34, 229, 217, 139, 26, 231, 75, 10, 183,
71, 54, 178, 150, 245, 174, 199, 60, 82, 145, 253, 172, 65, 189, 240, 113,
132, 36, 99, 91, 105, 251, 110, 195, 108, 81, 237, 252, 77, 129, 245, 160,
71, 56, 50, 146, 149, 173, 175, 61, 188, 17, 177, 204, 116, 85, 231, 127,
10, 160, 7, 56, 2, 146, 129, 173, 160, 125, 184, 33, 178, 152, 117, 170,
167, 63, 58, 144, 19, 44, 13, 221, 197, 153, 147, 42, 237, 223, 13, 152,
5, 170, 131, 63, 33, 208, 24, 92, 10, 185, 199, 50, 210, 149, 157, 175,
41, 188, 30, 241, 200, 68, 86, 179, 126, 245, 224, 71, 8, 50, 134, 149,
162, 239, 57, 140, 18, 229, 205, 139, 21, 167, 79, 58, 180, 19, 55, 77,
214, 181, 158, 247, 40, 70, 158, 178, 232, 117, 142, 167, 36, 122, 155, 99,
43, 105, 223, 110, 216, 44, 90, 157, 251, 41, 131, 94, 225, 248, 72, 66,
182, 177, 182, 244, 118, 199, 102, 210, 170, 221, 191, 25, 176, 10, 244, 7,
7, 66, 130, 177, 161, 180, 120, 119, 98, 166, 169, 186, 254, 243, 0, 69,
192, 51, 16, 21, 204, 15, 21, 196, 15, 19, 68, 13, 243, 69, 133, 243,
35, 5, 217, 195, 26, 209, 203, 28, 87, 73, 254, 182, 192, 118, 208, 38,
220, 26, 217, 203, 26, 215, 75, 30, 183, 72, 118, 182, 166, 246, 250, 198,
195, 18, 209, 205, 156, 85, 169, 255, 62, 192, 16, 80, 12, 60, 5, 209,
195, 28, 81, 201, 252, 86, 193, 254, 208, 64, 92, 48, 57, 212, 18, 223,
77, 152, 53, 170, 151, 63, 46, 144, 28, 108, 9, 237, 198, 205, 146, 213,
173, 159, 61, 168, 17, 190, 140, 112, 101, 228, 43, 11, 95, 71, 120, 50,
162, 149, 185, 175, 50, 252, 21, 129, 207, 32, 84, 24, 63, 74, 144, 55,
44, 22, 157, 206, 233, 148, 78, 239, 116, 76, 39, 117, 218, 167, 27, 58,
139, 83, 39, 125, 218, 161, 155, 56, 107, 82, 175, 125, 188, 33, 177, 216,
116, 90, 167, 123, 58, 163, 83, 57, 253, 210, 193, 157, 144, 105, 172, 46,
253, 220, 65, 153, 240, 106, 196, 47, 19, 92, 13, 249, 197, 130, 211, 33,
157, 216, 105, 154, 174, 235, 60, 79, 81, 244, 60, 71, 81, 242, 188, 69,
177, 243, 52, 69, 215, 115, 30, 165, 200, 123, 22, 163, 78, 249, 244, 66,
199, 113, 146, 164, 109, 187, 109, 179, 109, 181, 237, 183, 13, 182, 133, 182,
227, 54, 201, 214, 214, 222, 222, 216, 88, 90, 186, 187, 51, 51, 255, 63 )
random_mask_vec8 = numpy.array(random_mask_tuple, numpy.uint8)
|
tyc85/nwsdr-3.6.3-dsc
|
dsc-gmsk-rs/cat_packet_utils.py
|
Python
|
gpl-3.0
| 29,805
|
[
"cclib"
] |
389d68ae80335eb116e2f5688fdf363ff7ca72ac3a1995bee7b63b5a4fbb39d2
|
"""Module for calculating electron-phonon couplings.
Electron-phonon interaction::
__
\ l + +
H = ) g c c ( a + a ),
el-ph /_ ij i j l l
l,ij
where the electron phonon coupling is given by::
______
l / hbar ___
g = /------- < i | \ / V * e | j > .
ij \/ 2 M w 'u eff l
l
Here, l denotes the vibrational mode, w_l and e_l is the frequency and
mass-scaled polarization vector, respectively, M is an effective mass, i, j are
electronic state indices and nabla_u denotes the gradient wrt atomic
displacements. The implementation supports calculations of the el-ph coupling
in both finite and periodic systems, i.e. expressed in a basis of molecular
orbitals or Bloch states.
The implementation is based on finite-difference calculations of the the atomic
gradients of the effective potential expressed on a real-space grid. The el-ph
couplings are obtained from LCAO representations of the atomic gradients of the
effective potential and the electronic states.
In PAW the matrix elements of the derivative of the effective potential is
given by the sum of the following contributions::
d d
< i | -- V | j > = < i | -- V | j>
du eff du
_
\ ~a d . ~a
+ ) < i | p > -- /_\H < p | j >
/_ i du ij j
a,ij
_
\ d ~a . ~a
+ ) < i | -- p > /_\H < p | j >
/_ du i ij j
a,ij
_
\ ~a . d ~a
+ ) < i | p > /_\H < -- p | j >
/_ i ij du j
a,ij
where the first term is the derivative of the potential (Hartree + XC) and the
last three terms originate from the PAW (pseudopotential) part of the effective
DFT Hamiltonian.
"""
import sys
import cPickle as pickle
from math import pi
from os.path import isfile
import numpy as np
import numpy.fft as fft
import numpy.linalg as la
import ase.units as units
from ase.phonons import Displacement, Phonons
from ase.parallel import rank, barrier
from gpaw.utilities import unpack2
from gpaw.utilities.tools import tri2full
from gpaw.utilities.timing import StepTimer, nulltimer, Timer
from gpaw.lcao.overlap import ManySiteDictionaryWrapper, \
TwoCenterIntegralCalculator
from gpaw.lcao.tightbinding import TightBinding
from gpaw.kpt_descriptor import KPointDescriptor
class ElectronPhononCoupling(Displacement):
"""Class for calculating the electron-phonon coupling in an LCAO basis.
The derivative of the effective potential wrt atomic displacements is
obtained from a finite difference approximation to the derivative by doing
a self-consistent calculation for atomic displacements in the +/-
directions. These calculations are carried out in the ``run`` member
function.
The subsequent calculation of the coupling matrix in the basis of atomic
orbitals (or Bloch-sums hereof for periodic systems) is handled by the
``calculate_matrix`` member function.
"""
def __init__(self, atoms, calc=None, supercell=(1, 1, 1), name='elph',
delta=0.01, phonons=None):
"""Initialize with base class args and kwargs.
Parameters
----------
atoms: Atoms object
The atoms to work on.
calc: Calculator
Calculator for the supercell calculation.
supercell: tuple
Size of supercell given by the number of repetitions (l, m, n) of
the small unit cell in each direction.
name: str
Name to use for files (default: 'elph').
delta: float
Magnitude of displacements.
phonons: Phonons object
If provided, the ``__call__`` member function of the ``Phonons``
class in ASE will be called in the ``__call__`` member function of
this class.
"""
# Init base class and make the center cell in the supercell the
# reference cell
Displacement.__init__(self, atoms, calc=calc, supercell=supercell,
name=name, delta=delta, refcell='center')
# Store ``Phonons`` object in attribute
self.phonons = phonons
# Log
self.set_log()
# LCAO calculator
self.calc_lcao = None
# Supercell matrix
self.g_xNNMM = None
def __call__(self, atoms_N):
"""Extract effective potential and projector coefficients."""
# Do calculation
atoms_N.get_potential_energy()
# Call phonons class if provided
if self.phonons is not None:
forces = self.phonons.__call__(atoms_N)
# Get calculator
calc = atoms_N.get_calculator()
# Effective potential (in Hartree) and projector coefficients
Vt_G = calc.hamiltonian.vt_sG[0]
Vt_G = calc.wfs.gd.collect(Vt_G, broadcast=True)
dH_asp = calc.hamiltonian.dH_asp
setups = calc.wfs.setups
nspins = calc.wfs.nspins
gd_comm = calc.wfs.gd.comm
dH_all_asp = {}
for a, setup in enumerate(setups):
ni = setup.ni
nii = ni * (ni + 1) // 2
dH_tmp_sp = np.zeros((nspins, nii))
if a in dH_asp:
dH_tmp_sp[:] = dH_asp[a]
gd_comm.sum(dH_tmp_sp)
dH_all_asp[a] = dH_tmp_sp
return Vt_G, dH_all_asp
def set_lcao_calculator(self, calc):
"""Set LCAO calculator for the calculation of the supercell matrix."""
# Add parameter checks here
# - check that gamma
# - check that no symmetries are used
# - ...
parameters = calc.input_parameters
assert parameters['mode'] == 'lcao', "LCAO mode required."
assert parameters['usesymm'] != True, "Symmetries not supported."
self.calc_lcao = calc
def set_basis_info(self, *args):
"""Store lcao basis info for atoms in reference cell in attribute.
Parameters
----------
args: tuple
If the LCAO calculator is not available (e.g. if the supercell is
loaded from file), the ``load_supercell_matrix`` member function
provides the required info as arguments.
"""
if len(args) == 0:
calc = self.calc_lcao
setups = calc.wfs.setups
bfs = calc.wfs.basis_functions
nao_a = [setups[a].nao for a in range(len(self.atoms))]
M_a = [bfs.M_a[a] for a in range(len(self.atoms))]
else:
M_a = args[0]
nao_a = args[1]
self.basis_info = {'M_a': M_a,
'niAO_a': nao_a} # XXX niAO -> nao
def set_log(self, log=None):
"""Set output log."""
if log is None:
self.timer = nulltimer
elif log == '-':
self.timer = StepTimer(name='elph')
else:
self.timer = StepTimer(name='elph', out=open(log, 'w'))
def calculate_supercell_matrix(self, dump=0, name=None, filter=None,
include_pseudo=True, atoms=None):
"""Calculate matrix elements of the el-ph coupling in the LCAO basis.
This function calculates the matrix elements between LCAOs and local
atomic gradients of the effective potential. The matrix elements are
calculated for the supercell used to obtain finite-difference
approximations to the derivatives of the effective potential wrt to
atomic displacements.
Parameters
----------
dump: int
Dump supercell matrix to pickle file (default: 0).
0: Supercell matrix not saved
1: Supercell matrix saved in a single pickle file.
2: Dump matrix for different gradients in separate files. Useful
for large systems where the total array gets too large for a
single pickle file.
name: string
User specified name of the generated pickle file(s). If not
provided, the string in the ``name`` attribute is used.
filter: str
Fourier filter atomic gradients of the effective potential. The
specified components (``normal`` or ``umklapp``) are removed
(default: None).
include_pseudo: bool
Include the contribution from the psedupotential in the atomic
gradients. If ``False``, only the gradient of the effective
potential is included (default: True).
atoms: Atoms object
Calculate supercell for an ``Atoms`` object different from the one
provided in the ``__init__`` method (WARNING, NOT working!).
"""
assert self.calc_lcao is not None, "Set LCAO calculator"
# Supercell atoms
if atoms is None:
atoms_N = self.atoms * self.N_c
else:
atoms_N = atoms
# Initialize calculator if required and extract useful quantities
calc = self.calc_lcao
if not hasattr(calc.wfs, 'S_qMM'):
calc.initialize(atoms_N)
calc.initialize_positions(atoms_N)
self.set_basis_info()
basis = calc.input_parameters['basis']
# Extract useful objects from the calculator
wfs = calc.wfs
gd = calc.wfs.gd
kd = calc.wfs.kd
kpt_u = wfs.kpt_u
setups = wfs.setups
nao = setups.nao
bfs = wfs.basis_functions
dtype = wfs.dtype
spin = 0 # XXX
# If gamma calculation, overlap with neighboring cell cannot be removed
if kd.gamma:
print "WARNING: Gamma-point calculation."
else:
# Bloch to real-space converter
tb = TightBinding(atoms_N, calc)
self.timer.write_now("Calculating supercell matrix")
self.timer.write_now("Calculating real-space gradients")
# Calculate finite-difference gradients (in Hartree / Bohr)
V1t_xG, dH1_xasp = self.calculate_gradient()
self.timer.write_now("Finished real-space gradients")
# Fourier filter the atomic gradients of the effective potential
if filter is not None:
self.timer.write_now("Fourier filtering gradients")
V1_xG = V1t_xG.copy()
self.fourier_filter(V1t_xG, components=filter)
self.timer.write_now("Finished Fourier filtering")
# For the contribution from the derivative of the projectors
dP_aqvMi = self.calculate_dP_aqvMi(wfs)
# Equilibrium atomic Hamiltonian matrix (projector coefficients)
dH_asp = pickle.load(open(self.name + '.eq.pckl'))[1]
# Check that the grid is the same as in the calculator
assert np.all(V1t_xG.shape[-3:] == (gd.N_c + gd.pbc_c - 1)), \
"Mismatch in grids."
# Calculate < i k | grad H | j k >, i.e. matrix elements in Bloch basis
# List for supercell matrices;
g_xNNMM = []
self.timer.write_now("Calculating gradient of PAW Hamiltonian")
# Do each cartesian component separately
for i, a in enumerate(self.indices):
for v in range(3):
# Corresponding array index
x = 3 * i + v
V1t_G = V1t_xG[x]
self.timer.write_now("%s-gradient of atom %u" %
(['x','y','z'][v], a))
# Array for different k-point components
g_qMM = np.zeros((len(kpt_u), nao, nao), dtype)
# 1) Gradient of effective potential
self.timer.write_now("Starting gradient of effective potential")
for kpt in kpt_u:
# Matrix elements
geff_MM = np.zeros((nao, nao), dtype)
bfs.calculate_potential_matrix(V1t_G, geff_MM, q=kpt.q)
tri2full(geff_MM, 'L')
# Insert in array
g_qMM[kpt.q] += geff_MM
self.timer.write_now("Finished gradient of effective potential")
if include_pseudo:
self.timer.write_now("Starting gradient of pseudo part")
# 2) Gradient of non-local part (projectors)
self.timer.write_now("Starting gradient of dH^a")
P_aqMi = calc.wfs.P_aqMi
# 2a) dH^a part has contributions from all other atoms
for kpt in kpt_u:
# Matrix elements
gp_MM = np.zeros((nao, nao), dtype)
dH1_asp = dH1_xasp[x]
for a_, dH1_sp in dH1_asp.items():
dH1_ii = unpack2(dH1_sp[spin])
gp_MM += np.dot(P_aqMi[a_][kpt.q], np.dot(dH1_ii,
P_aqMi[a_][kpt.q].T.conjugate()))
g_qMM[kpt.q] += gp_MM
self.timer.write_now("Finished gradient of dH^a")
self.timer.write_now("Starting gradient of projectors")
# 2b) dP^a part has only contributions from the same atoms
dP_qvMi = dP_aqvMi[a]
dH_ii = unpack2(dH_asp[a][spin])
for kpt in kpt_u:
#XXX Sort out the sign here; conclusion -> sign = +1 !
P1HP_MM = +1 * np.dot(dP_qvMi[kpt.q][v], np.dot(dH_ii,
P_aqMi[a][kpt.q].T.conjugate()))
# Matrix elements
gp_MM = P1HP_MM + P1HP_MM.T.conjugate()
g_qMM[kpt.q] += gp_MM
self.timer.write_now("Finished gradient of projectors")
self.timer.write_now("Finished gradient of pseudo part")
# Extract R_c=(0, 0, 0) block by Fourier transforming
if kd.gamma or kd.N_c is None:
g_MM = g_qMM[0]
else:
# Convert to array
g_MM = tb.bloch_to_real_space(g_qMM, R_c=(0, 0, 0))[0]
# Reshape to global unit cell indices
N = np.prod(self.N_c)
# Number of basis function in the primitive cell
assert (nao % N) == 0, "Alarm ...!"
nao_cell = nao / N
g_NMNM = g_MM.reshape((N, nao_cell, N, nao_cell))
g_NNMM = g_NMNM.swapaxes(1, 2).copy()
self.timer.write_now("Finished supercell matrix")
if dump != 2:
g_xNNMM.append(g_NNMM)
else:
if name is not None:
fname = '%s.supercell_matrix_x_%2.2u.%s.pckl' % (name, x, basis)
else:
fname = self.name + \
'.supercell_matrix_x_%2.2u.%s.pckl' % (x, basis)
if kd.comm.rank == 0:
fd = open(fname, 'w')
M_a = self.basis_info['M_a']
nao_a = self.basis_info['niAO_a']
pickle.dump((g_NNMM, M_a, nao_a), fd, 2)
fd.close()
self.timer.write_now("Finished gradient of PAW Hamiltonian")
if dump != 2:
# Collect gradients in one array
self.g_xNNMM = np.array(g_xNNMM)
# Dump to pickle file using binary mode together with basis info
if dump and kd.comm.rank == 0:
if name is not None:
fname = '%s.supercell_matrix.%s.pckl' % (name, basis)
else:
fname = self.name + '.supercell_matrix.%s.pckl' % basis
fd = open(fname, 'w')
M_a = self.basis_info['M_a']
nao_a = self.basis_info['nao_a']
pickle.dump((self.g_xNNMM, M_a, nao_a), fd, 2)
fd.close()
def load_supercell_matrix(self, basis=None, name=None, multiple=False):
"""Load supercell matrix from pickle file.
Parameters
----------
basis: string
String specifying the LCAO basis used to calculate the supercell
matrix, e.g. 'dz(dzp)'.
name: string
User specified name of the pickle file.
multiple: bool
Load each derivative from individual files.
"""
assert (basis is not None) or (name is not None), \
"Provide basis or name."
if self.g_xNNMM is not None:
self.g_xNNMM = None
if not multiple:
# File name
if name is not None:
fname = name
else:
fname = self.name + '.supercell_matrix.%s.pckl' % basis
fd = open(fname)
self.g_xNNMM, M_a, nao_a = pickle.load(fd)
fd.close()
else:
g_xNNMM = []
for x in range(len(self.indices)*3):
if name is not None:
fname = name
else:
fname = self.name + \
'.supercell_matrix_x_%2.2u.%s.pckl' % (x, basis)
fd = open(fname, 'r')
g_NNMM, M_a, nao_a = pickle.load(fd)
fd.close()
g_xNNMM.append(g_NNMM)
self.g_xNNMM = np.array(g_xNNMM)
self.set_basis_info(M_a, nao_a)
def apply_cutoff(self, cutmax=None, cutmin=None):
"""Zero matrix element inside/beyond the specified cutoffs.
Parameters
----------
cutmax: float
Zero matrix elements for basis functions with a distance to the
atomic gradient that is larger than the cutoff.
cutmin: float
Zero matrix elements where both basis functions have distances to
the atomic gradient that is smaller than the cutoff.
"""
if cutmax is not None:
cutmax = float(cutmax)
if cutmin is not None:
cutmin = float(cutmin)
# Reference to supercell matrix attribute
g_xNNMM = self.g_xNNMM
# Number of atoms and primitive cells
N_atoms = len(self.indices)
N = np.prod(self.N_c)
nao = g_xNNMM.shape[-1]
# Reshape array
g_avNNMM = g_xNNMM.reshape(N_atoms, 3, N, N, nao, nao)
# Make slices for orbitals on atoms
M_a = self.basis_info['M_a']
nao_a = self.basis_info['niAO_a']
slice_a = []
for a in range(len(self.atoms)):
start = M_a[a] ;
stop = start + nao_a[a]
s = slice(start, stop)
slice_a.append(s)
# Lattice vectors
R_cN = self.lattice_vectors()
# Unit cell vectors
cell_vc = self.atoms.cell.transpose()
# Atomic positions in reference cell
pos_av = self.atoms.get_positions()
# Create a mask array to zero the relevant matrix elements
if cutmin is not None:
mask_avNNMM = np.zeros(g_avNNMM.shape, dtype=bool)
# Zero elements where one of the basis orbitals has a distance to atoms
# (atomic gradients) in the reference cell larger than the cutoff
for n in range(N):
# Lattice vector to cell
R_v = np.dot(cell_vc, R_cN[:, n])
# Atomic positions in cell
posn_av = pos_av + R_v
for i, a in enumerate(self.indices):
# Atomic distances wrt to the position of the gradient
dist_a = np.sqrt(np.sum((pos_av[a] - posn_av)**2, axis=-1))
if cutmax is not None:
# Atoms indices where the distance is larger than the max
# cufoff
j_a = np.where(dist_a > cutmax)[0]
# Zero elements
for j in j_a:
g_avNNMM[a, :, n, :, slice_a[j], :] = 0.0
g_avNNMM[a, :, :, n, :, slice_a[j]] = 0.0
if cutmin is not None:
# Atoms indices where the distance is larger than the min
# cufoff
j_a = np.where(dist_a > cutmin)[0]
# Update mask to keep elements where one LCAO is outside
# the min cutoff
for j in j_a:
mask_avNNMM[a, :, n, :, slice_a[j], :] = True
mask_avNNMM[a, :, :, n, :, slice_a[j]] = True
# Zero elements where both LCAOs are located within the min cutoff
if cutmin is not None:
g_avNNMM[~mask_avNNMM] = 0.0
def lcao_matrix(self, u_l, omega_l):
"""Calculate the el-ph coupling in the electronic LCAO basis.
For now, only works for Gamma-point phonons.
Parameters
----------
u_l: ndarray
Mass-scaled polarization vectors (in units of 1 / sqrt(amu)) of the
phonons.
omega_l: ndarray
Vibrational frequencies in eV.
"""
# Supercell matrix (Hartree / Bohr)
assert self.g_xNNMM is not None, "Load supercell matrix."
assert self.g_xNNMM.shape[1:3] == (1, 1)
g_xMM = self.g_xNNMM[:, 0, 0, :, :]
# Number of atomic orbitals
nao = g_xMM.shape[-1]
# Number of phonon modes
nmodes = u_l.shape[0]
#
u_lx = u_l.reshape(nmodes, 3 * len(self.atoms))
g_lMM = np.dot(u_lx, g_xMM.transpose(1, 0, 2))
# Multiply prefactor sqrt(hbar / 2 * M * omega) in units of Bohr
amu = units._amu # atomic mass unit
me = units._me # electron mass
g_lMM /= np.sqrt(2 * amu / me / units.Hartree * \
omega_l[:, np.newaxis, np.newaxis])
# Convert to eV
g_lMM *= units.Hartree
return g_lMM
def bloch_matrix(self, kpts, qpts, c_kn, u_ql, omega_ql=None, kpts_from=None):
"""Calculate el-ph coupling in the Bloch basis for the electrons.
This function calculates the electron-phonon coupling between the
specified Bloch states, i.e.::
______
mnl / hbar ^
g = /------- < m k + q | e . grad V | n k >
kq \/ 2 M w ql q
ql
In case the ``omega_ql`` keyword argument is not given, the bare matrix
element (in units of eV / Ang) without the sqrt prefactor is returned.
Parameters
----------
kpts: ndarray or tuple.
k-vectors of the Bloch states. When a tuple of integers is given, a
Monkhorst-Pack grid with the specified number of k-points along the
directions of the reciprocal lattice vectors is generated.
qpts: ndarray or tuple.
q-vectors of the phonons.
c_kn: ndarray
Expansion coefficients for the Bloch states. The ordering must be
the same as in the ``kpts`` argument.
u_ql: ndarray
Mass-scaled polarization vectors (in units of 1 / sqrt(amu)) of the
phonons. Again, the ordering must be the same as in the
corresponding ``qpts`` argument.
omega_ql: ndarray
Vibrational frequencies in eV.
kpts_from: list of ints or int
Calculate only the matrix element for the k-vectors specified by
their index in the ``kpts`` argument (default: all).
In short, phonon frequencies and mode vectors must be given in ase units.
"""
assert self.g_xNNMM is not None, "Load supercell matrix."
assert len(c_kn.shape) == 3
assert len(u_ql.shape) == 4
if omega_ql is not None:
assert np.all(u_ql.shape[:2] == omega_ql.shape[:2])
# Translate k-points into 1. BZ (required by ``find_k_plus_q``` member
# function of the ```KPointDescriptor``).
if isinstance(kpts, np.ndarray):
assert kpts.shape[1] == 3, "kpts_kc array must be given"
# XXX This does not seem to cause problems!
kpts -= kpts.round()
# Use the KPointDescriptor to keep track of the k and q-vectors
kd_kpts = KPointDescriptor(kpts)
kd_qpts = KPointDescriptor(qpts)
# Check that number of k- and q-points agree with the number of Bloch
# functions and polarization vectors
assert kd_kpts.nbzkpts == len(c_kn)
assert kd_qpts.nbzkpts == len(u_ql)
# Include all k-point per default
if kpts_from is None:
kpts_kc = kd_kpts.bzk_kc
kpts_k = range(kd_kpts.nbzkpts)
else:
kpts_kc = kd_kpts.bzk_kc[kpts_from]
if isinstance(kpts_from, int):
kpts_k = list([kpts_from])
else:
kpts_k = list(kpts_from)
# Supercell matrix (real matrix in Hartree / Bohr)
g_xNNMM = self.g_xNNMM
# Number of phonon modes and electronic bands
nmodes = u_ql.shape[1]
nbands = c_kn.shape[1]
# Number of atoms displacements and basis functions
ndisp = np.prod(u_ql.shape[2:])
assert ndisp == (3 * len(self.indices))
nao = c_kn.shape[2]
assert ndisp == g_xNNMM.shape[0]
assert nao == g_xNNMM.shape[-1]
# Lattice vectors
R_cN = self.lattice_vectors()
# Number of unit cell in supercell
N = np.prod(self.N_c)
# Allocate array for couplings
g_qklnn = np.zeros((kd_qpts.nbzkpts, len(kpts_kc), nmodes,
nbands, nbands), dtype=complex)
self.timer.write_now("Calculating coupling matrix elements")
for q, q_c in enumerate(kd_qpts.bzk_kc):
# Find indices of k+q for the k-points
kplusq_k = kd_kpts.find_k_plus_q(q_c, kpts_k=kpts_k)
# Here, ``i`` is counting from 0 and ``k`` is the global index of
# the k-point
for i, (k, k_c) in enumerate(zip(kpts_k, kpts_kc)):
# Check the wave vectors (adapted to the ``KPointDescriptor`` class)
kplusq_c = k_c + q_c
kplusq_c -= kplusq_c.round()
assert np.allclose(kplusq_c, kd_kpts.bzk_kc[kplusq_k[i]] ), \
(i, k, k_c, q_c, kd_kpts.bzk_kc[kplusq_k[i]])
# Allocate array
g_xMM = np.zeros((ndisp, nao, nao), dtype=complex)
# Multiply phase factors
for m in range(N):
for n in range(N):
Rm_c = R_cN[:, m]
Rn_c = R_cN[:, n]
phase = np.exp(2.j * pi * (np.dot(k_c, Rm_c - Rn_c)
+ np.dot(q_c, Rm_c)))
# Sum contributions from different cells
g_xMM += g_xNNMM[:, m, n, :, :] * phase
# LCAO coefficient for Bloch states
ck_nM = c_kn[k]
ckplusq_nM = c_kn[kplusq_k[i]]
# Mass scaled polarization vectors
u_lx = u_ql[q].reshape(nmodes, 3 * len(self.atoms))
g_nxn = np.dot(ckplusq_nM.conj(), np.dot(g_xMM, ck_nM.T))
g_lnn = np.dot(u_lx, g_nxn)
# Insert value
g_qklnn[q, i] = g_lnn
# XXX Temp
if np.all(q_c == 0.0):
# These should be real
print g_qklnn[q].imag.min(), g_qklnn[q].imag.max()
self.timer.write_now("Finished calculation of coupling matrix elements")
# Return the bare matrix element if frequencies are not given
if omega_ql is None:
# Convert to eV / Ang
g_qklnn *= units.Hartree / units.Bohr
else:
# Multiply prefactor sqrt(hbar / 2 * M * omega) in units of Bohr
amu = units._amu # atomic mass unit
me = units._me # electron mass
g_qklnn /= np.sqrt(2 * amu / me / units.Hartree * \
omega_ql[:, np.newaxis, :, np.newaxis, np.newaxis])
# Convert to eV
g_qklnn *= units.Hartree
# Return couplings in eV (or eV / Ang)
return g_qklnn
def fourier_filter(self, V1t_xG, components='normal', criteria=1):
"""Fourier filter atomic gradients of the effective potential.
Parameters
----------
V1t_xG: ndarray
Array representation of atomic gradients of the effective potential
in the supercell grid.
components: str
Fourier components to filter out (``normal`` or ``umklapp``).
"""
assert components in ['normal', 'umklapp']
# Grid shape
shape = V1t_xG.shape[-3:]
# Primitive unit cells in Bohr/Bohr^-1
cell_cv = self.atoms.get_cell() / units.Bohr
reci_vc = 2 * pi * la.inv(cell_cv)
norm_c = np.sqrt(np.sum(reci_vc**2, axis=0))
# Periodic BC array
pbc_c = np.array(self.atoms.get_pbc(), dtype=bool)
# Supercell atoms and cell
atoms_N = self.atoms * self.N_c
supercell_cv = atoms_N.get_cell() / units.Bohr
# q-grid in units of the grid spacing (FFT ordering)
q_cG = np.indices(shape).reshape(3, -1)
q_c = np.array(shape)[:, np.newaxis]
q_cG += q_c // 2
q_cG %= q_c
q_cG -= q_c // 2
# Locate q-points inside the Brillouin zone
if criteria == 0:
# Works for all cases
# Grid spacing in direction of reciprocal lattice vectors
h_c = np.sqrt(np.sum((2 * pi * la.inv(supercell_cv))**2, axis=0))
# XXX Why does a "*=" operation on q_cG not work here ??
q1_cG = q_cG * h_c[:, np.newaxis] / (norm_c[:, np.newaxis] / 2)
mask_G = np.ones(np.prod(shape), dtype=bool)
for i, pbc in enumerate(pbc_c):
if not pbc:
continue
mask_G &= (-1. < q1_cG[i]) & (q1_cG[i] <= 1.)
else:
# 2D hexagonal lattice
# Projection of q points onto the periodic directions. Only in
# these directions do normal and umklapp processees make sense.
q_vG = np.dot(q_cG[pbc_c].T,
2 * pi * la.inv(supercell_cv).T[pbc_c]).T.copy()
# Parametrize the BZ boundary in terms of the angle theta
theta_G = np.arctan2(q_vG[1], q_vG[0]) % (pi / 3)
phi_G = pi / 6 - np.abs(theta_G)
qmax_G = norm_c[0] / 2 / np.cos(phi_G)
norm_G = np.sqrt(np.sum(q_vG**2, axis=0))
# Includes point on BZ boundary with +1e-2
mask_G = (norm_G <= qmax_G + 1e-2) # & (q_vG[1] < (norm_c[0] / 2 - 1e-3))
if components != 'normal':
mask_G = ~mask_G
# Reshape to grid shape
mask_G.shape = shape
for V1t_G in V1t_xG:
# Fourier transform atomic gradient
V1tq_G = fft.fftn(V1t_G)
# Zero normal/umklapp components
V1tq_G[mask_G] = 0.0
# Fourier transform back
V1t_G[:] = fft.ifftn(V1tq_G).real
def calculate_gradient(self):
"""Calculate gradient of effective potential and projector coefs.
This function loads the generated pickle files and calculates
finite-difference derivatives.
"""
# Array and dict for finite difference derivatives
V1t_xG = []
dH1_xasp = []
x = 0
for a in self.indices:
for v in range(3):
name = '%s.%d%s' % (self.name, a, 'xyz'[v])
# Potential and atomic density matrix for atomic displacement
try:
Vtm_G, dHm_asp = pickle.load(open(name + '-.pckl'))
Vtp_G, dHp_asp = pickle.load(open(name + '+.pckl'))
except (IOError, EOFError):
raise IOError, "%s(-/+).pckl" % name
# FD derivatives in Hartree / Bohr
V1t_G = (Vtp_G - Vtm_G) / (2 * self.delta / units.Bohr)
V1t_xG.append(V1t_G)
dH1_asp = {}
for atom in dHm_asp.keys():
dH1_asp[atom] = (dHp_asp[atom] - dHm_asp[atom]) / \
(2 * self.delta / units.Bohr)
dH1_xasp.append(dH1_asp)
x += 1
return np.array(V1t_xG), dH1_xasp
def calculate_dP_aqvMi(self, wfs):
"""Overlap between LCAO basis functions and gradient of projectors.
Only the gradient wrt the atomic positions in the reference cell is
computed.
"""
nao = wfs.setups.nao
nq = len(wfs.ibzk_qc)
atoms = [self.atoms[i] for i in self.indices]
# Derivatives in reference cell
dP_aqvMi = {}
for atom, setup in zip(atoms, wfs.setups):
a = atom.index
dP_aqvMi[a] = np.zeros((nq, 3, nao, setup.ni), wfs.dtype)
# Calculate overlap between basis function and gradient of projectors
# NOTE: the derivative is calculated wrt the atomic position and not
# the real-space coordinate
calc = TwoCenterIntegralCalculator(wfs.ibzk_qc, derivative=True)
expansions = ManySiteDictionaryWrapper(wfs.tci.P_expansions, dP_aqvMi)
calc.calculate(wfs.tci.atompairs, [expansions], [dP_aqvMi])
# Extract derivatives in the reference unit cell
# dP_aqvMi = {}
# for atom in self.atoms:
# dP_aqvMi[atom.index] = dPall_aqvMi[atom.index]
return dP_aqvMi
|
robwarm/gpaw-symm
|
gpaw/elph/electronphonon.py
|
Python
|
gpl-3.0
| 35,302
|
[
"ASE",
"GPAW"
] |
822459ade4df98155bf3db6e1386e82d6bd8a4b940e851b20b1619bbd4079722
|
###
# Copyright 2015-2020, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import print_function
from mendeley import Mendeley
import json
from operator import itemgetter
MENDELEY_PAPERS_FILE = "mendeley_papers.json"
def test():
client_id = input("Input your app ID for Mendeley API: \n")
client_secret = input("Input your app secret for Mendeley API: \n")
redirect_uri = "https://isb-cgc.appspot.com/"
# These values should match the ones supplied when registering your application.
mendeley = Mendeley(client_id, client_secret=client_secret, redirect_uri=redirect_uri)
auth = mendeley.start_implicit_grant_flow()
# The user needs to visit this URL, and log in to Mendeley.
login_url = auth.get_login_url()
print("Go to this link to log in: \n" + login_url)
# After logging in, the user will be redirected to a URL, auth_response.
auth_response = input("Copy the redirect link here: \n")
auth_response = auth_response.rstrip()
# print("** Response is: " + auth_response)
session = auth.authenticate(auth_response)
# print(session.token['access_token'])
# List all groups I have access to
groups = session.groups.iter()
isb_cgc_papers = {}
i = 1
for g in groups:
print("[{}] {}".format(i, g.name))
i = i + 1
if g.name == 'ISB-CGC':
target_group = session.groups.get(g.id)
docs = target_group.documents.iter()
for doc in docs:
isb_cgc_papers.update({doc.title: doc.id})
# Let choose a group
selected_index = int(input('Select group to get paper from: '))
i = 1
group_id = ''
groups = session.groups.iter()
for g in groups:
if i == selected_index:
group_id = g.id
break
i = i + 1
if group_id == '':
quit()
# Get all the documents in the group
target_group = session.groups.get(group_id)
docs = target_group.documents.iter()
# Write documents to json
json_papers = {}
papers_array = []
for doc in docs:
this_paper = {}
this_paper['title'] = doc.title
if doc.title in isb_cgc_papers:
this_paper['id'] = isb_cgc_papers.get(doc.title)
else:
this_paper['id'] = doc.id
if doc.identifiers:
identifiers = ""
iden_len = len(doc.identifiers)
i = 0
for key, value in doc.identifiers.items():
identifiers += "{}: {}".format(key, value)
if i != iden_len - 1:
identifiers += ", "
i = i + 1
this_paper['identifiers'] = identifiers
else:
this_paper['identifiers'] = doc.identifiers
if doc.authors:
author_names = ""
count = 0
for author in doc.authors[:-1]:
if count >= 3:
break
count += 1
author_names += "{} {}, ".format(author.first_name, author.last_name)
if count < 3:
author_names += "{} {}".format(doc.authors[-1].first_name, doc.authors[-1].last_name)
else:
author_names += "et al."
this_paper['authors'] = author_names
else:
this_paper['authors'] = doc.authors
this_paper['source'] = doc.source
this_paper['type'] = doc.type
this_paper['year'] = doc.year if doc.year else 0
if doc.keywords:
keywords = ""
for keyword in doc.keywords[:-1]:
keywords += "{}, ".format(keyword)
keywords += doc.keywords[-1]
this_paper['keywords'] = keywords
else:
this_paper['keywords'] = doc.keywords
papers_array.append(this_paper)
# Sort the list by year in reverse order (oldest last)
papers_array.sort(key=itemgetter('year'), reverse=True)
json_papers['papers'] = papers_array
# Write JSON file to GCP bucket
f = open(MENDELEY_PAPERS_FILE, 'r+')
f.truncate(0)
f = open(MENDELEY_PAPERS_FILE, 'w')
json.dump(json_papers, f, indent=4)
print(str(len(papers_array)) + " papers generated, please upload to GCP bucket to update: "
"webapp-static-files-isb-cgc-dev/static/publications/mendeley_papers.json")
if __name__ == "__main__":
test()
|
isb-cgc/ISB-CGC-Webapp
|
scripts/get_mendeley_papers.py
|
Python
|
apache-2.0
| 4,930
|
[
"VisIt"
] |
5aac311a292cc406f77ba57fd64309a2ab7f99da1f71209f3944587561701a12
|
"""Create Common Workflow Language (CWL) runnable files and tools from a world object.
"""
from __future__ import print_function
import collections
import copy
import dateutil
import functools
import json
import math
import operator
import os
import tarfile
import requests
import six
import toolz as tz
import yaml
from bcbio import utils
from bcbio.cwl import defs, workflow
from bcbio.distributed import objectstore, resources
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import alignment
from functools import reduce
INTEGRATION_MAP = {"keep:": "arvados", "s3:": "s3", "sbg:": "sbgenomics",
"dx:": "dnanexus", "gs:": "gs"}
def from_world(world, run_info_file, integrations=None, add_container_tag=None):
base = utils.splitext_plus(os.path.basename(run_info_file))[0]
out_dir = utils.safe_makedir("%s-workflow" % (base))
out_file = os.path.join(out_dir, "main-%s.cwl" % (base))
samples = [xs[0] for xs in world] # unpack world data objects
analyses = list(set([x["analysis"] for x in samples]))
assert len(analyses) == 1, "Only support writing CWL for a single analysis type: %s" % analyses
try:
workflow_fn = defs.workflows[analyses[0].lower()]
except KeyError:
raise NotImplementedError("Unsupported CWL analysis type: %s" % analyses[0])
prep_cwl(samples, workflow_fn, out_dir, out_file, integrations, add_container_tag=add_container_tag)
def _cwl_workflow_template(inputs, top_level=False):
"""Retrieve CWL inputs shared amongst different workflows.
"""
ready_inputs = []
for inp in inputs:
cur_inp = copy.deepcopy(inp)
for attr in ["source", "valueFrom", "wf_duplicate"]:
cur_inp.pop(attr, None)
if top_level:
cur_inp = workflow._flatten_nested_input(cur_inp)
cur_inp = _clean_record(cur_inp)
ready_inputs.append(cur_inp)
return {"class": "Workflow",
"cwlVersion": "v1.0",
"hints": [],
"requirements": [{"class": "EnvVarRequirement",
"envDef": [{"envName": "MPLCONFIGDIR", "envValue": "."}]},
{"class": "ScatterFeatureRequirement"},
{"class": "SubworkflowFeatureRequirement"}],
"inputs": ready_inputs,
"outputs": [],
"steps": []}
def _get_disk_estimates(name, parallel, inputs, file_estimates, samples, disk,
cur_remotes, no_files):
"""Retrieve disk usage estimates as CWL ResourceRequirement and hint.
Disk specification for temporary files and outputs.
Also optionally includes disk input estimates as a custom hint for
platforms which need to stage these and don't pre-estimate these when
allocating machine sizes.
"""
tmp_disk, out_disk, in_disk = 0, 0, 0
if file_estimates:
if disk:
for key, multiplier in disk.items():
if key in file_estimates:
out_disk += int(multiplier * file_estimates[key])
for inp in inputs:
scale = 2.0 if inp.get("type") == "array" else 1.0
# Allocating all samples, could remove for `to_rec` when we ensure we
# don't have to stage. Currently dnanexus stages everything so need to consider
if parallel in ["multi-combined", "multi-batch"] and "dnanexus" in cur_remotes:
scale *= (len(samples))
if workflow.is_cwl_record(inp):
for f in _get_record_fields(inp):
if f["name"] in file_estimates:
in_disk += file_estimates[f["name"]] * scale
elif inp["id"] in file_estimates:
in_disk += file_estimates[inp["id"]] * scale
# Round total estimates to integer, assign extra half to temp space
# It's not entirely clear how different runners interpret this
tmp_disk = int(math.ceil(out_disk * 0.5))
out_disk = int(math.ceil(out_disk))
bcbio_docker_disk = (10 if cur_remotes else 1) * 1024 # Minimum requirements for bcbio Docker image
disk_hint = {"outdirMin": bcbio_docker_disk + out_disk, "tmpdirMin": tmp_disk}
# Skip input disk for steps which require only transformation (and thus no staging)
if no_files:
in_disk = 0
# Avoid accidentally flagging as no staging if we don't know sizes of expected inputs
elif in_disk == 0:
in_disk = 1
input_hint = {"class": "dx:InputResourceRequirement", "indirMin": int(math.ceil(in_disk))}
return disk_hint, input_hint
def _add_current_quay_tag(repo, container_tags):
"""Lookup the current quay tag for the repository, adding to repo string.
Enables generation of CWL explicitly tied to revisions.
"""
if ':' in repo:
return repo, container_tags
try:
latest_tag = container_tags[repo]
except KeyError:
repo_id = repo[repo.find('/') + 1:]
tags = requests.request("GET", "https://quay.io/api/v1/repository/" + repo_id).json()["tags"]
latest_tag = None
latest_modified = None
for tag, info in tags.items():
if latest_tag:
if (dateutil.parser.parse(info['last_modified']) > dateutil.parser.parse(latest_modified)
and tag != 'latest'):
latest_modified = info['last_modified']
latest_tag = tag
else:
latest_modified = info['last_modified']
latest_tag = tag
container_tags[repo] = str(latest_tag)
latest_pull = repo + ':' + str(latest_tag)
return latest_pull, container_tags
def _write_tool(step_dir, name, inputs, outputs, parallel, image, programs,
file_estimates, disk, step_cores, samples, cur_remotes, no_files,
container_tags=None):
out_file = os.path.join(step_dir, "%s.cwl" % name)
resource_cores, mem_gb_per_core = resources.cpu_and_memory((programs or []) + ["default"], samples)
cores = min([step_cores, resource_cores]) if step_cores else resource_cores
mem_mb_total = int(mem_gb_per_core * cores * 1024)
cwl_res = {"class": "ResourceRequirement", "coresMin": cores, "ramMin": mem_mb_total}
disk_hint, input_hint = _get_disk_estimates(name, parallel, inputs, file_estimates, samples, disk,
cur_remotes, no_files)
cwl_res.update(disk_hint)
docker_image = "bcbio/bcbio" if image == "bcbio" else "quay.io/bcbio/%s" % image
if container_tags is not None:
docker_image, container_tags = _add_current_quay_tag(docker_image, container_tags)
docker = {"class": "DockerRequirement", "dockerPull": docker_image, "dockerImageId": docker_image}
out = {"class": "CommandLineTool",
"cwlVersion": "v1.0",
"baseCommand": ["bcbio_nextgen.py", "runfn", name, "cwl"],
"requirements": [],
"hints": [docker, cwl_res, input_hint],
"arguments": [],
"inputs": [],
"outputs": []}
if programs:
def resolve_package(p):
out = {}
parts = p.split("=")
if len(parts) == 2:
out["package"] = parts[0]
out["version"] = [parts[1]]
else:
out["package"] = p
out["specs"] = ["https://anaconda.org/bioconda/%s" % out["package"]]
return out
out["hints"].append({"class": "SoftwareRequirement",
"packages": [resolve_package(p) for p in programs]})
# GATK requires networking for setting up log4j logging, use arvados extension
if any(p.startswith(("gatk", "sentieon")) for p in programs):
out["hints"] += [{"class": "arv:APIRequirement"}]
# Multi-process methods that read heavily from BAM files need extra keep cache for Arvados
if name in ["pipeline_summary", "variantcall_batch_region", "detect_sv"]:
out["hints"] += [{"class": "arv:RuntimeConstraints", "keep_cache": 4096}]
def add_to_namespaces(k, v, out):
if "$namespaces" not in out:
out["$namespaces"] = {}
out["$namespaces"][k] = v
return out
if any(h.get("class", "").startswith("arv:") for h in out["hints"]):
out = add_to_namespaces("arv", "http://arvados.org/cwl#", out)
if any(h.get("class", "").startswith("dx") for h in out["hints"]):
out = add_to_namespaces("dx", "https://www.dnanexus.com/cwl#", out)
# Use JSON for inputs, rather than command line arguments
# Correctly handles multiple values and batching across CWL runners
use_commandline_args = False
out["requirements"] += [{"class": "InlineJavascriptRequirement"},
{"class": "InitialWorkDirRequirement",
"listing": [{"entryname": "cwl.inputs.json",
"entry": "$(JSON.stringify(inputs))"}]}]
out["arguments"] += [{"position": 0, "valueFrom":
"sentinel_runtime=cores,$(runtime['cores']),ram,$(runtime['ram'])"},
"sentinel_parallel=%s" % parallel,
"sentinel_outputs=%s" % ",".join([_get_sentinel_val(v) for v in outputs]),
"sentinel_inputs=%s" % ",".join(["%s:%s" %
(workflow.get_base_id(v["id"]),
"record" if workflow.is_cwl_record(v) else "var")
for v in inputs]),
"run_number=0"]
out = _add_inputs_to_tool(inputs, out, parallel, use_commandline_args)
out = _add_outputs_to_tool(outputs, out)
_tool_to_file(out, out_file)
return os.path.join("steps", os.path.basename(out_file))
def _write_expressiontool(step_dir, name, inputs, outputs, expression, parallel):
"""Create an ExpressionTool output for the given inputs
"""
out_file = os.path.join(step_dir, "%s.cwl" % name)
out = {"class": "ExpressionTool",
"cwlVersion": "v1.0",
"requirements": [{"class": "InlineJavascriptRequirement"}],
"inputs": [],
"outputs": [],
"expression": expression}
out = _add_inputs_to_tool(inputs, out, parallel)
out = _add_outputs_to_tool(outputs, out)
_tool_to_file(out, out_file)
return os.path.join("steps", os.path.basename(out_file))
def _add_outputs_to_tool(outputs, tool):
for outp in outputs:
outp_tool = copy.deepcopy(outp)
outp_tool = _clean_record(outp_tool)
outp_tool["id"] = workflow.get_base_id(outp["id"])
tool["outputs"].append(outp_tool)
return tool
def _add_inputs_to_tool(inputs, tool, parallel, use_commandline_args=False):
for i, inp in enumerate(inputs):
base_id = workflow.get_base_id(inp["id"])
inp_tool = copy.deepcopy(inp)
inp_tool["id"] = base_id
if inp.get("wf_duplicate"):
inp_tool["id"] += "_toolinput"
for attr in ["source", "valueFrom", "wf_duplicate"]:
inp_tool.pop(attr, None)
# Ensure records and workflow inputs get scattered
if (_is_scatter_parallel(parallel) and _do_scatter_var(inp, parallel) and
(workflow.is_cwl_record(inp) or inp["wf_duplicate"])):
inp_tool = workflow._flatten_nested_input(inp_tool)
if use_commandline_args:
inp_binding = {"prefix": "%s=" % base_id,
"separate": False, "itemSeparator": ";;", "position": i}
inp_tool = _place_input_binding(inp_tool, inp_binding, parallel)
else:
inp_binding = None
inp_tool = _place_secondary_files(inp_tool, inp_binding)
inp_tool = _clean_record(inp_tool)
tool["inputs"].append(inp_tool)
return tool
def _tool_to_file(tool, out_file):
with open(out_file, "w") as out_handle:
def str_presenter(dumper, data):
if len(data.splitlines()) > 1: # check for multiline string
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
yaml.add_representer(str, str_presenter)
yaml.dump(tool, out_handle, default_flow_style=False, allow_unicode=False)
return out_file
def _clean_record(rec):
"""Remove secondary files from record fields, which are currently not supported.
To be removed later when secondaryFiles added to records.
"""
if workflow.is_cwl_record(rec):
def _clean_fields(d):
if isinstance(d, dict):
if "fields" in d:
out = []
for f in d["fields"]:
f = utils.deepish_copy(f)
f.pop("secondaryFiles", None)
out.append(f)
d["fields"] = out
return d
else:
out = {}
for k, v in d.items():
out[k] = _clean_fields(v)
return out
else:
return d
return _clean_fields(rec)
else:
return rec
def _get_record_fields(d):
"""Get field names from a potentially nested record.
"""
if isinstance(d, dict):
if "fields" in d:
return d["fields"]
else:
for v in d.values():
fields = _get_record_fields(v)
if fields:
return fields
def _get_sentinel_val(v):
"""Retrieve expected sentinel value for an output, expanding records.
"""
out = workflow.get_base_id(v["id"])
if workflow.is_cwl_record(v):
out += ":%s" % ";".join([x["name"] for x in _get_record_fields(v)])
return out
def _place_input_binding(inp_tool, inp_binding, parallel):
"""Check nesting of variables to determine where to place the input binding.
We want to allow having multiple files together (like fasta_indices), combined
with the itemSeparator, but also support having multiple samples where we pass
things independently.
"""
if (parallel in ["multi-combined", "multi-batch", "batch-split", "batch-parallel",
"batch-merge", "batch-single"] and
tz.get_in(["type", "type"], inp_tool) == "array"):
inp_tool["type"]["inputBinding"] = inp_binding
else:
inp_tool["inputBinding"] = inp_binding
return inp_tool
def _place_secondary_files(inp_tool, inp_binding=None):
"""Put secondaryFiles at the level of the File item to ensure indexes get passed.
"""
def _is_file(val):
return (val == "File" or (isinstance(val, (list, tuple)) and
("File" in val or any(isinstance(x, dict) and _is_file(val)) for x in val)))
secondary_files = inp_tool.pop("secondaryFiles", None)
if secondary_files:
key = []
while (not _is_file(tz.get_in(key + ["type"], inp_tool))
and not _is_file(tz.get_in(key + ["items"], inp_tool))
and not _is_file(tz.get_in(key + ["items", "items"], inp_tool))):
key.append("type")
if tz.get_in(key, inp_tool):
inp_tool["secondaryFiles"] = secondary_files
elif inp_binding:
nested_inp_binding = copy.deepcopy(inp_binding)
nested_inp_binding["prefix"] = "ignore="
nested_inp_binding["secondaryFiles"] = secondary_files
inp_tool = tz.update_in(inp_tool, key, lambda x: nested_inp_binding)
return inp_tool
def _is_scatter_parallel(parallel):
return parallel.endswith("-parallel")
def _do_scatter_var(v, parallel):
"""Logic for scattering a variable.
"""
# For batches, scatter records only at the top level (double nested)
if parallel.startswith("batch") and workflow.is_cwl_record(v):
return (tz.get_in(["type", "type"], v) == "array" and
tz.get_in(["type", "type", "type"], v) == "array")
# Otherwise, scatter arrays
else:
return (tz.get_in(["type", "type"], v) == "array")
def _step_template(name, run_file, inputs, outputs, parallel, step_parallelism, scatter=None):
"""Templating function for writing a step to avoid repeating namespaces.
"""
scatter_inputs = []
sinputs = []
for inp in inputs:
step_inp = {"id": workflow.get_base_id(inp["id"]), "source": inp["id"]}
if inp.get("wf_duplicate"):
step_inp["id"] += "_toolinput"
for attr in ["source", "valueFrom"]:
if attr in inp:
step_inp[attr] = inp[attr]
sinputs.append(step_inp)
# An initial parallel scatter and multiple chained parallel sample scatters
if (parallel == "multi-parallel" and
(not step_parallelism or
step_parallelism.get(workflow.get_step_prefix(inp["id"])) == "multi-parallel")):
scatter_inputs.append(step_inp["id"])
# scatter on inputs from previous processes that have been arrayed
elif (_is_scatter_parallel(parallel) and (_do_scatter_var(inp, parallel)
or (scatter and inp["id"] in scatter))):
scatter_inputs.append(step_inp["id"])
out = {"run": run_file,
"id": name,
"in": sinputs,
"out": [{"id": workflow.get_base_id(output["id"])} for output in outputs]}
if _is_scatter_parallel(parallel):
assert scatter_inputs, "Did not find items to scatter on: %s" % name
out.update({"scatterMethod": "dotproduct",
"scatter": scatter_inputs})
return out
def _get_cur_remotes(path):
"""Retrieve remote references defined in the CWL.
"""
cur_remotes = set([])
if isinstance(path, (list, tuple)):
for v in path:
cur_remotes |= _get_cur_remotes(v)
elif isinstance(path, dict):
for v in path.values():
cur_remotes |= _get_cur_remotes(v)
elif path and isinstance(path, six.string_types):
if path.startswith(tuple(INTEGRATION_MAP.keys())):
cur_remotes.add(INTEGRATION_MAP.get(path.split(":")[0] + ":"))
return cur_remotes
def prep_cwl(samples, workflow_fn, out_dir, out_file, integrations=None,
add_container_tag=None):
"""Output a CWL description with sub-workflows and steps.
"""
if add_container_tag is None:
container_tags = None
elif add_container_tag.lower() == "quay_lookup":
container_tags = {}
else:
container_tags = collections.defaultdict(lambda: add_container_tag)
step_dir = utils.safe_makedir(os.path.join(out_dir, "steps"))
get_retriever = GetRetriever(integrations, samples)
variables, keyvals = _flatten_samples(samples, out_file, get_retriever)
cur_remotes = _get_cur_remotes(keyvals)
file_estimates = _calc_input_estimates(keyvals, get_retriever)
out = _cwl_workflow_template(variables)
parent_wfs = []
step_parallelism = {}
steps, wfoutputs = workflow_fn(samples)
used_inputs = set([])
for cur in workflow.generate(variables, steps, wfoutputs):
if cur[0] == "step":
_, name, parallel, inputs, outputs, image, programs, disk, cores, no_files = cur
step_file = _write_tool(step_dir, name, inputs, outputs, parallel, image, programs,
file_estimates, disk, cores, samples, cur_remotes, no_files, container_tags)
out["steps"].append(_step_template(name, step_file, inputs, outputs, parallel, step_parallelism))
used_inputs |= set(x["id"] for x in inputs)
elif cur[0] == "expressiontool":
_, name, inputs, outputs, expression, parallel = cur
step_file = _write_expressiontool(step_dir, name, inputs, outputs, expression, parallel)
out["steps"].append(_step_template(name, step_file, inputs, outputs, parallel, step_parallelism))
used_inputs |= set(x["id"] for x in inputs)
elif cur[0] == "upload":
for output in cur[1]:
wf_output = copy.deepcopy(output)
if "outputSource" not in wf_output:
wf_output["outputSource"] = wf_output.pop("source")
wf_output = _clean_record(wf_output)
# Avoid input/output naming clashes
if wf_output["id"] in used_inputs:
wf_output["id"] = "%s_out" % wf_output["id"]
out["outputs"].append(wf_output)
elif cur[0] == "wf_start":
parent_wfs.append(out)
out = _cwl_workflow_template(cur[1])
elif cur[0] == "wf_finish":
_, name, parallel, inputs, outputs, scatter = cur
wf_out_file = "wf-%s.cwl" % name
with open(os.path.join(out_dir, wf_out_file), "w") as out_handle:
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
out = parent_wfs.pop(-1)
out["steps"].append(_step_template(name, wf_out_file, inputs, outputs, parallel,
step_parallelism, scatter))
used_inputs |= set(x["id"] for x in inputs)
else:
raise ValueError("Unexpected workflow value %s" % str(cur))
step_parallelism[name] = parallel
with open(out_file, "w") as out_handle:
out["inputs"] = [x for x in out["inputs"] if x["id"] in used_inputs]
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
sample_json = "%s-samples.json" % utils.splitext_plus(out_file)[0]
out_clean = _clean_final_outputs(copy.deepcopy({k: v for k, v in keyvals.items() if k in used_inputs}),
get_retriever)
with open(sample_json, "w") as out_handle:
json.dump(out_clean, out_handle, sort_keys=True, indent=4, separators=(',', ': '))
return out_file, sample_json
def _flatten_samples(samples, base_file, get_retriever):
"""Create a flattened JSON representation of data from the bcbio world map.
"""
flat_data = []
for data in samples:
data["reference"] = _indexes_to_secondary_files(data["reference"], data["genome_build"])
cur_flat = {}
for key_path in [["analysis"], ["description"], ["rgnames"], ["config", "algorithm"],
["metadata"], ["genome_build"], ["resources"],
["files"], ["reference"], ["genome_resources"], ["vrn_file"]]:
cur_key = "__".join(key_path)
for flat_key, flat_val in _to_cwldata(cur_key, tz.get_in(key_path, data), get_retriever):
cur_flat[flat_key] = flat_val
flat_data.append(cur_flat)
out = {}
for key in sorted(list(set(reduce(operator.add, [list(d.keys()) for d in flat_data])))):
# Periods in keys cause issues with WDL and some CWL implementations
clean_key = key.replace(".", "_")
out[clean_key] = []
for cur_flat in flat_data:
out[clean_key].append(cur_flat.get(key))
# special case for back-compatibility with fasta specifications -- yuck
if "reference__fasta__base" not in out and "reference__fasta" in out:
out["reference__fasta__base"] = out["reference__fasta"]
del out["reference__fasta"]
return _samplejson_to_inputs(out), out
def _indexes_to_secondary_files(gresources, genome_build):
"""Convert a list of genome indexes into a single file plus secondary files.
This ensures that all indices are staged together in a single directory.
"""
out = {}
for refname, val in gresources.items():
if isinstance(val, dict) and "indexes" in val:
# list of indexes -- aligners
if len(val.keys()) == 1:
indexes = sorted(val["indexes"])
if len(indexes) == 0:
if refname not in alignment.allow_noindices():
raise ValueError("Did not find indexes for %s: %s" % (refname, val))
elif len(indexes) == 1:
val = {"indexes": indexes[0]}
else:
val = {"indexes": {"base": indexes[0], "indexes": indexes[1:]}}
# directory plus indexes -- snpEff
elif "base" in val and os.path.isdir(val["base"]) and len(val["indexes"]) > 0:
indexes = val["indexes"]
val = {"base": indexes[0], "indexes": indexes[1:]}
elif isinstance(val, dict) and genome_build in val:
val = _indexes_to_secondary_files(val, genome_build)
out[refname] = val
return out
def _add_suppl_info(inp, val):
"""Add supplementary information to inputs from file values.
"""
inp["type"] = _get_avro_type(val)
secondary = _get_secondary_files(val)
if secondary:
inp["secondaryFiles"] = secondary
return inp
def _get_secondary_files(val):
"""Retrieve associated secondary files.
Normalizes input values into definitions of available secondary files.
Requires indices to be present in all files, since declared CWL secondary
files are not optional. So if we have a mix of BAM (bai) and fastq (gbi) we
ignore the existing indices and will have to regenerate during processing.
"""
out = []
if isinstance(val, (tuple, list)):
s_counts = collections.defaultdict(int)
for x in val:
for s in _get_secondary_files(x):
s_counts[s] += 1
for s, count in s_counts.items():
if s and s not in out and count == len([x for x in val if x]):
out.append(s)
elif isinstance(val, dict) and (val.get("class") == "File" or "File" in val.get("class")):
if "secondaryFiles" in val:
for sf in [x["path"] for x in val["secondaryFiles"]]:
rext = _get_relative_ext(val["path"], sf)
if rext and rext not in out:
out.append(rext)
return out
def _get_relative_ext(of, sf):
"""Retrieve relative extension given the original and secondary files.
"""
def half_finished_trim(orig, prefix):
return (os.path.basename(prefix).count(".") > 0 and
os.path.basename(orig).count(".") == os.path.basename(prefix).count("."))
# Handle remote files
if of.find(":") > 0:
of = os.path.basename(of.split(":")[-1])
if sf.find(":") > 0:
sf = os.path.basename(sf.split(":")[-1])
prefix = os.path.commonprefix([sf, of])
while prefix.endswith(".") or (half_finished_trim(sf, prefix) and half_finished_trim(of, prefix)):
prefix = prefix[:-1]
exts_to_remove = of.replace(prefix, "")
ext_to_add = sf.replace(prefix, "")
# Return extensions relative to original
if not exts_to_remove or exts_to_remove.startswith("."):
return str("^" * exts_to_remove.count(".") + ext_to_add)
else:
raise ValueError("No cross platform way to reference complex extension: %s %s" % (sf, of))
def _get_avro_type(val):
"""Infer avro type for the current input.
"""
if isinstance(val, dict):
assert val.get("class") == "File" or "File" in val.get("class")
return "File"
elif isinstance(val, (tuple, list)):
types = []
for ctype in [_get_avro_type(v) for v in val]:
if isinstance(ctype, dict):
nested_types = [x["items"] for x in types if isinstance(x, dict)]
if ctype["items"] not in nested_types:
if isinstance(ctype["items"], (list, tuple)):
for t in ctype["items"]:
if t not in types:
types.append(t)
else:
if ctype not in types:
types.append(ctype)
elif isinstance(ctype, (list, tuple)):
for x in ctype:
if x not in types:
types.append(x)
elif ctype not in types:
types.append(ctype)
# handle empty types, allow null
if len(types) == 0:
types = ["null"]
# empty lists
if isinstance(val, (list, tuple)) and len(val) == 0:
types.append({"type": "array", "items": ["null"]})
types = _avoid_duplicate_arrays(types)
# Avoid empty null only arrays which confuse some runners
if len(types) == 1 and types[0] == "null":
types.append("string")
return {"type": "array", "items": (types[0] if len(types) == 1 else types)}
elif val is None:
return ["null"]
# encode booleans as string True/False and unencode on other side
elif isinstance(val, bool) or isinstance(val, six.string_types) and val.lower() in ["true", "false", "none"]:
return ["string", "null", "boolean"]
elif isinstance(val, int):
return "long"
elif isinstance(val, float):
return "double"
else:
return "string"
def _avoid_duplicate_arrays(types):
"""Collapse arrays when we have multiple types.
"""
arrays = [t for t in types if isinstance(t, dict) and t["type"] == "array"]
others = [t for t in types if not (isinstance(t, dict) and t["type"] == "array")]
if arrays:
items = set([])
for t in arrays:
if isinstance(t["items"], (list, tuple)):
items |= set(t["items"])
else:
items.add(t["items"])
if len(items) == 1:
items = items.pop()
else:
items = sorted(list(items))
arrays = [{"type": "array", "items": items}]
return others + arrays
def _samplejson_to_inputs(svals):
"""Convert sample output into inputs for CWL configuration files, with types.
"""
out = []
for key, val in svals.items():
out.append(_add_suppl_info({"id": "%s" % key}, val))
return out
def _to_cwldata(key, val, get_retriever):
"""Convert nested dictionary into CWL data, flatening and marking up files.
Moves file objects to the top level, enabling insertion in CWL inputs/outputs.
"""
out = []
if isinstance(val, dict):
if len(val) == 2 and "base" in val and "indexes" in val:
if len(val["indexes"]) > 0 and val["base"] == val["indexes"][0]:
out.append(("%s__indexes" % key, _item_to_cwldata(val["base"], get_retriever)))
else:
out.append((key, _to_cwlfile_with_indexes(val, get_retriever)))
# Dump shared nested keys like resources as a JSON string
elif key in workflow.ALWAYS_AVAILABLE or key in workflow.STRING_DICT:
out.append((key, _item_to_cwldata(json.dumps(val), get_retriever)))
elif key in workflow.FLAT_DICT:
flat = []
for k, vs in val.items():
if not isinstance(vs, (list, tuple)):
vs = [vs]
for v in vs:
flat.append("%s:%s" % (k, v))
out.append((key, _item_to_cwldata(flat, get_retriever)))
else:
remain_val = {}
for nkey, nval in val.items():
cur_nkey = "%s__%s" % (key, nkey)
cwl_nval = _item_to_cwldata(nval, get_retriever)
if isinstance(cwl_nval, dict):
out.extend(_to_cwldata(cur_nkey, nval, get_retriever))
elif key in workflow.ALWAYS_AVAILABLE:
remain_val[nkey] = nval
else:
out.append((cur_nkey, cwl_nval))
if remain_val:
out.append((key, json.dumps(remain_val, sort_keys=True, separators=(',', ':'))))
else:
out.append((key, _item_to_cwldata(val, get_retriever)))
return out
def _remove_remote_prefix(f):
"""Remove any remote references to allow object lookups by file paths.
"""
return f.split(":")[-1].split("/", 1)[1] if objectstore.is_remote(f) else f
def _index_blacklist(xs):
blacklist = []
return [x for x in xs if not any([x.find(b) >=0 for b in blacklist])]
def _to_cwlfile_with_indexes(val, get_retriever):
"""Convert reads with ready to go indexes into the right CWL object.
Identifies the top level directory and creates a tarball, avoiding
trying to handle complex secondary setups which are not cross platform.
Skips doing this for reference files and standard setups like bwa, which
take up too much time and space to unpack multiple times.
"""
val["indexes"] = _index_blacklist(val["indexes"])
tval = {"base": _remove_remote_prefix(val["base"]),
"indexes": [_remove_remote_prefix(f) for f in val["indexes"]]}
# Standard named set of indices, like bwa
# Do not include snpEff, which we need to isolate inside a nested directory
# hisat2 indices do also not localize cleanly due to compilicated naming
cp_dir, cp_base = os.path.split(os.path.commonprefix([tval["base"]] + tval["indexes"]))
if (cp_base and cp_dir == os.path.dirname(tval["base"]) and
not ("/snpeff/" in cp_dir or "/hisat2" in cp_dir)):
return _item_to_cwldata(val["base"], get_retriever, val["indexes"])
else:
dirname = os.path.dirname(tval["base"])
assert all([x.startswith(dirname) for x in tval["indexes"]])
return {"class": "File", "path": directory_tarball(dirname)}
def _add_secondary_if_exists(secondary, out, get_retriever):
"""Add secondary files only if present locally or remotely.
"""
secondary = [_file_local_or_remote(y, get_retriever) for y in secondary]
secondary = [z for z in secondary if z]
if secondary:
out["secondaryFiles"] = [{"class": "File", "path": f} for f in secondary]
return out
def _item_to_cwldata(x, get_retriever, indexes=None):
""""Markup an item with CWL specific metadata.
"""
if isinstance(x, (list, tuple)):
return [_item_to_cwldata(subx, get_retriever) for subx in x]
elif (x and isinstance(x, six.string_types) and
(((os.path.isfile(x) or os.path.isdir(x)) and os.path.exists(x)) or
objectstore.is_remote(x))):
if _file_local_or_remote(x, get_retriever):
out = {"class": "File", "path": x}
if indexes:
out = _add_secondary_if_exists(indexes, out, get_retriever)
elif x.endswith(".bam"):
out = _add_secondary_if_exists([x + ".bai"], out, get_retriever)
elif x.endswith(".cram"):
out = _add_secondary_if_exists([x + ".crai"], out, get_retriever)
elif x.endswith((".vcf.gz", ".bed.gz")):
out = _add_secondary_if_exists([x + ".tbi"], out, get_retriever)
elif x.endswith(".fa"):
out = _add_secondary_if_exists([x + ".fai", os.path.splitext(x)[0] + ".dict"], out, get_retriever)
elif x.endswith(".fa.gz"):
out = _add_secondary_if_exists([x + ".fai", x + ".gzi", x.replace(".fa.gz", "") + ".dict"],
out, get_retriever)
elif x.endswith(".fq.gz") or x.endswith(".fastq.gz"):
out = _add_secondary_if_exists([x + ".gbi"], out, get_retriever)
elif x.endswith(".gtf"):
out = _add_secondary_if_exists([x + ".db"], out, get_retriever)
else:
out = {"class": "File", "path": directory_tarball(x)}
return out
elif isinstance(x, bool):
return str(x)
else:
return x
def _file_local_or_remote(f, get_retriever):
"""Check for presence of a local or remote file.
"""
if os.path.exists(f):
return f
integration, config = get_retriever.integration_and_config(f)
if integration:
return integration.file_exists(f, config)
def directory_tarball(dirname):
"""Create a tarball of a complex directory, avoiding complex secondaryFiles.
Complex secondary files do not work on multiple platforms and are not portable
to WDL, so for now we create a tarball that workers will unpack.
"""
assert os.path.isdir(dirname), dirname
base_dir, tarball_dir = os.path.split(dirname)
while not os.path.exists(os.path.join(base_dir, "seq")) and base_dir and base_dir != "/":
base_dir, extra_tarball = os.path.split(base_dir)
tarball_dir = os.path.join(extra_tarball, tarball_dir)
if base_dir == "/" and not os.path.exists(os.path.join(base_dir, "seq")):
raise ValueError("Did not find relative directory to create tarball for %s" % dirname)
tarball = os.path.join(base_dir, "%s-wf.tar.gz" % (tarball_dir.replace(os.path.sep, "--")))
if not utils.file_exists(tarball):
print("Preparing CWL input tarball: %s" % tarball)
with file_transaction({}, tarball) as tx_tarball:
with utils.chdir(base_dir):
with tarfile.open(tx_tarball, "w:gz") as tar:
tar.add(tarball_dir)
return tarball
def _clean_final_outputs(keyvals, get_retriever):
def clean_path(get_retriever, x):
integration, config = get_retriever.integration_and_config(x)
if integration:
return integration.clean_file(x, config)
else:
return x
keyvals = _adjust_files(keyvals, functools.partial(clean_path, get_retriever))
return keyvals
def _adjust_items(xs, adjust_fn):
if isinstance(xs, (list, tuple)):
return [_adjust_items(x, adjust_fn) for x in xs]
elif isinstance(xs, dict):
out = {}
for k, v in xs.items():
out[k] = _adjust_items(v, adjust_fn)
return out
else:
return adjust_fn(xs)
def _adjust_files(xs, adjust_fn):
"""Walk over key/value, tuples applying adjust_fn to files.
"""
if isinstance(xs, dict):
if "path" in xs:
out = {}
out["path"] = adjust_fn(xs["path"])
for k, vs in xs.items():
if k != "path":
out[k] = _adjust_files(vs, adjust_fn)
return out
else:
out = {}
for k, vs in xs.items():
out[k] = _adjust_files(vs, adjust_fn)
return out
elif isinstance(xs, (list, tuple)):
return [_adjust_files(x, adjust_fn) for x in xs]
else:
return xs
def _calc_input_estimates(keyvals, get_retriever):
"""Calculate estimations of input file sizes for disk usage approximation.
These are current dominated by fastq/BAM sizes, so estimate based on that.
"""
out = {}
for key, val in keyvals.items():
size = _calc_file_size(val, 0, get_retriever)
if size:
out[key] = size
return out
def _calc_file_size(val, depth, get_retriever):
if isinstance(val, (list, tuple)):
sizes = [_calc_file_size(x, depth + 1, get_retriever) for x in val]
sizes = [x for x in sizes if x]
if sizes:
# Top level, biggest item, otherwise all files together
return max(sizes) if depth == 0 else sum(sizes)
elif isinstance(val, dict) and "path" in val:
return _get_file_size(val["path"], get_retriever)
return None
class GetRetriever:
def __init__(self, integrations, samples):
self._integrations = integrations
self._samples = samples
def integration_and_config(self, path):
"""Get a retriever and configuration for the given file path.
"""
if path.startswith(tuple(INTEGRATION_MAP.keys())):
key = INTEGRATION_MAP[path.split(":")[0] + ":"]
integration = self._integrations.get(key)
config = {}
for sample in self._samples:
config = tz.get_in(["config", key], sample)
if config:
break
return integration, config
return None, None
def _get_file_size(path, get_retriever):
"""Return file size in megabytes, including querying remote integrations
"""
integration, config = get_retriever.integration_and_config(path)
if integration:
return integration.file_size(path, config)
elif os.path.exists(path):
return os.path.getsize(path) / (1024.0 * 1024.0)
|
a113n/bcbio-nextgen
|
bcbio/cwl/create.py
|
Python
|
mit
| 40,458
|
[
"BWA",
"Bioconda"
] |
b0ca9494824c67f22d3d0038148e67097431e60f87dd3c9b199203e33c9329d2
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import copy
import os
import unittest
import warnings
from shutil import which
from monty.serialization import loadfn
from pymatgen.analysis.graphs import (
MoleculeGraph,
MolGraphSplitError,
PeriodicSite,
StructureGraph,
)
from pymatgen.analysis.local_env import (
CovalentBondNN,
CutOffDictNN,
MinimumDistanceNN,
MinimumOKeeffeNN,
OpenBabelNN,
VoronoiNN,
)
from pymatgen.command_line.critic2_caller import Critic2Analysis
from pymatgen.core import Lattice, Molecule, Site, Structure
from pymatgen.core.structure import FunctionalGroups
from pymatgen.util.testing import PymatgenTest
try:
from openbabel import openbabel as ob
except ImportError:
ob = None
import networkx as nx
import networkx.algorithms.isomorphism as iso
__author__ = "Matthew Horton, Evan Spotte-Smith"
__version__ = "0.1"
__maintainer__ = "Matthew Horton"
__email__ = "mkhorton@lbl.gov"
__status__ = "Beta"
__date__ = "August 2017"
module_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))
molecule_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules")
class StructureGraphTest(PymatgenTest):
def setUp(self):
self.maxDiff = None
# trivial example, simple square lattice for testing
structure = Structure(Lattice.tetragonal(5.0, 50.0), ["H"], [[0, 0, 0]])
self.square_sg = StructureGraph.with_empty_graph(structure, edge_weight_name="", edge_weight_units="")
self.square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(1, 0, 0))
self.square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(-1, 0, 0))
self.square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(0, 1, 0))
self.square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(0, -1, 0))
# TODO: decorating still fails because the structure graph gives a CN of 8 for this square lattice
# self.square_sg.decorate_structure_with_ce_info()
# body-centered square lattice for testing
structure = Structure(Lattice.tetragonal(5.0, 50.0), ["H", "He"], [[0, 0, 0], [0.5, 0.5, 0.5]])
self.bc_square_sg = StructureGraph.with_empty_graph(structure, edge_weight_name="", edge_weight_units="")
self.bc_square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(1, 0, 0))
self.bc_square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(-1, 0, 0))
self.bc_square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(0, 1, 0))
self.bc_square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(0, -1, 0))
self.bc_square_sg.add_edge(0, 1, from_jimage=(0, 0, 0), to_jimage=(0, 0, 0))
self.bc_square_sg.add_edge(0, 1, from_jimage=(0, 0, 0), to_jimage=(-1, 0, 0))
self.bc_square_sg.add_edge(0, 1, from_jimage=(0, 0, 0), to_jimage=(-1, -1, 0))
self.bc_square_sg.add_edge(0, 1, from_jimage=(0, 0, 0), to_jimage=(0, -1, 0))
# body-centered square lattice for testing
# directions reversed, should be equivalent to as bc_square
structure = Structure(Lattice.tetragonal(5.0, 50.0), ["H", "He"], [[0, 0, 0], [0.5, 0.5, 0.5]])
self.bc_square_sg_r = StructureGraph.with_empty_graph(structure, edge_weight_name="", edge_weight_units="")
self.bc_square_sg_r.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(1, 0, 0))
self.bc_square_sg_r.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(-1, 0, 0))
self.bc_square_sg_r.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(0, 1, 0))
self.bc_square_sg_r.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(0, -1, 0))
self.bc_square_sg_r.add_edge(0, 1, from_jimage=(0, 0, 0), to_jimage=(0, 0, 0))
self.bc_square_sg_r.add_edge(1, 0, from_jimage=(-1, 0, 0), to_jimage=(0, 0, 0))
self.bc_square_sg_r.add_edge(1, 0, from_jimage=(-1, -1, 0), to_jimage=(0, 0, 0))
self.bc_square_sg_r.add_edge(1, 0, from_jimage=(0, -1, 0), to_jimage=(0, 0, 0))
# MoS2 example, structure graph obtained from critic2
# (not ground state, from mp-1023924, single layer)
stdout_file = os.path.join(
PymatgenTest.TEST_FILES_DIR,
"critic2/MoS2_critic2_stdout.txt",
)
with open(stdout_file) as f:
reference_stdout = f.read()
self.structure = Structure.from_file(
os.path.join(
PymatgenTest.TEST_FILES_DIR,
"critic2/MoS2.cif",
)
)
c2o = Critic2Analysis(self.structure, reference_stdout)
self.mos2_sg = c2o.structure_graph(include_critical_points=False)
latt = Lattice.cubic(4.17)
species = ["Ni", "O"]
coords = [[0, 0, 0], [0.5, 0.5, 0.5]]
self.NiO = Structure.from_spacegroup(225, latt, species, coords).get_primitive_structure()
# BCC example.
self.bcc = Structure(Lattice.cubic(5.0), ["He", "He"], [[0, 0, 0], [0.5, 0.5, 0.5]])
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_inappropriate_construction(self):
# Check inappropriate strategy
with self.assertRaises(ValueError):
StructureGraph.with_local_env_strategy(self.NiO, CovalentBondNN())
def test_properties(self):
self.assertEqual(self.mos2_sg.name, "bonds")
self.assertEqual(self.mos2_sg.edge_weight_name, "bond_length")
self.assertEqual(self.mos2_sg.edge_weight_unit, "Å")
self.assertEqual(self.mos2_sg.get_coordination_of_site(0), 6)
self.assertEqual(len(self.mos2_sg.get_connected_sites(0)), 6)
self.assertTrue(isinstance(self.mos2_sg.get_connected_sites(0)[0].site, PeriodicSite))
self.assertEqual(str(self.mos2_sg.get_connected_sites(0)[0].site.specie), "S")
self.assertAlmostEqual(
self.mos2_sg.get_connected_sites(0, jimage=(0, 0, 100))[0].site.frac_coords[2],
100.303027,
)
# these two graphs should be equivalent
for n in range(len(self.bc_square_sg)):
self.assertEqual(
self.bc_square_sg.get_coordination_of_site(n),
self.bc_square_sg_r.get_coordination_of_site(n),
)
# test we're not getting duplicate connected sites
# thanks to Jack D. Sundberg for reporting this bug
# known example where this bug occurred due to edge weights not being
# bit-for-bit identical in otherwise identical edges
nacl_lattice = Lattice(
[
[3.48543625, 0.0, 2.01231756],
[1.16181208, 3.28610081, 2.01231756],
[0.0, 0.0, 4.02463512],
]
)
nacl = Structure(nacl_lattice, ["Na", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
nacl_graph = StructureGraph.with_local_env_strategy(nacl, CutOffDictNN({("Cl", "Cl"): 5.0}))
self.assertEqual(len(nacl_graph.get_connected_sites(1)), 12)
self.assertEqual(len(nacl_graph.graph.get_edge_data(1, 1)), 6)
def test_set_node_attributes(self):
self.square_sg.set_node_attributes()
specie = nx.get_node_attributes(self.square_sg.graph, "specie")
coords = nx.get_node_attributes(self.square_sg.graph, "coords")
properties = nx.get_node_attributes(self.square_sg.graph, "properties")
for i in range(len(self.square_sg.structure)):
self.assertEqual(str(specie[i]), str(self.square_sg.structure[i].specie))
self.assertEqual(coords[i][0], self.square_sg.structure[i].coords[0])
self.assertEqual(coords[i][1], self.square_sg.structure[i].coords[1])
self.assertEqual(coords[i][2], self.square_sg.structure[i].coords[2])
self.assertEqual(properties[i], self.square_sg.structure[i].properties)
def test_edge_editing(self):
square = copy.deepcopy(self.square_sg)
square.alter_edge(
0,
0,
to_jimage=(1, 0, 0),
new_weight=0.0,
new_edge_properties={"foo": "bar"},
)
new_edge = square.graph.get_edge_data(0, 0)[0]
self.assertEqual(new_edge["weight"], 0.0)
self.assertEqual(new_edge["foo"], "bar")
square.break_edge(0, 0, to_jimage=(1, 0, 0))
self.assertEqual(len(square.graph.get_edge_data(0, 0)), 1)
def test_insert_remove(self):
struct_copy = copy.deepcopy(self.square_sg.structure)
square_copy = copy.deepcopy(self.square_sg)
# Ensure that insert_node appropriately wraps Structure.insert()
struct_copy.insert(1, "O", [0.5, 0.5, 0.5])
square_copy.insert_node(1, "O", [0.5, 0.5, 0.5])
self.assertEqual(struct_copy, square_copy.structure)
# Test that removal is also equivalent between Structure and StructureGraph.structure
struct_copy.remove_sites([1])
square_copy.remove_nodes([1])
self.assertEqual(struct_copy, square_copy.structure)
square_copy.insert_node(
1,
"O",
[0.5, 0.5, 0.5],
edges=[{"from_index": 1, "to_index": 0, "to_jimage": (0, 0, 0)}],
)
self.assertEqual(square_copy.get_coordination_of_site(1), 1)
# Test that StructureGraph.graph is correctly updated
square_copy.insert_node(
1,
"H",
[0.5, 0.5, 0.75],
edges=[{"from_index": 1, "to_index": 2, "to_jimage": (0, 0, 0)}],
)
square_copy.remove_nodes([1])
self.assertEqual(square_copy.graph.number_of_nodes(), 2)
self.assertEqual(square_copy.graph.number_of_edges(), 3)
def test_substitute(self):
structure = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Li2O.cif"))
molecule = FunctionalGroups["methyl"]
structure_copy = copy.deepcopy(structure)
structure_copy_graph = copy.deepcopy(structure)
sg = StructureGraph.with_local_env_strategy(structure, MinimumDistanceNN())
sg_copy = copy.deepcopy(sg)
# Ensure that strings and molecules lead to equivalent substitutions
sg.substitute_group(1, molecule, MinimumDistanceNN)
sg_copy.substitute_group(1, "methyl", MinimumDistanceNN)
self.assertEqual(sg, sg_copy)
# Ensure that the underlying structure has been modified as expected
structure_copy.substitute(1, "methyl")
self.assertEqual(structure_copy, sg.structure)
# Test inclusion of graph dictionary
graph_dict = {
(0, 1): {"weight": 0.5},
(0, 2): {"weight": 0.5},
(0, 3): {"weight": 0.5},
}
sg_with_graph = StructureGraph.with_local_env_strategy(structure_copy_graph, MinimumDistanceNN())
sg_with_graph.substitute_group(1, "methyl", MinimumDistanceNN, graph_dict=graph_dict)
edge = sg_with_graph.graph.get_edge_data(11, 13)[0]
self.assertEqual(edge["weight"], 0.5)
def test_auto_image_detection(self):
sg = StructureGraph.with_empty_graph(self.structure)
sg.add_edge(0, 0)
self.assertEqual(len(list(sg.graph.edges(data=True))), 3)
def test_str(self):
square_sg_str_ref = """Structure Graph
Structure:
Full Formula (H1)
Reduced Formula: H2
abc : 5.000000 5.000000 50.000000
angles: 90.000000 90.000000 90.000000
Sites (1)
# SP a b c
--- ---- --- --- ---
0 H 0 0 0
Graph: bonds
from to to_image
---- ---- ------------
0 0 (1, 0, 0)
0 0 (-1, 0, 0)
0 0 (0, 1, 0)
0 0 (0, -1, 0)
"""
mos2_sg_str_ref = """Structure Graph
Structure:
Full Formula (Mo1 S2)
Reduced Formula: MoS2
abc : 3.190316 3.190315 17.439502
angles: 90.000000 90.000000 120.000006
Sites (3)
# SP a b c
--- ---- -------- -------- --------
0 Mo 0.333333 0.666667 0.213295
1 S 0.666667 0.333333 0.303027
2 S 0.666667 0.333333 0.123562
Graph: bonds
from to to_image bond_length (A)
---- ---- ------------ ------------------
0 1 (-1, 0, 0) 2.417e+00
0 1 (0, 0, 0) 2.417e+00
0 1 (0, 1, 0) 2.417e+00
0 2 (0, 1, 0) 2.417e+00
0 2 (-1, 0, 0) 2.417e+00
0 2 (0, 0, 0) 2.417e+00
"""
# don't care about testing Py 2.7 unicode support,
# change Å to A
self.mos2_sg.graph.graph["edge_weight_units"] = "A"
self.assertStrContentEqual(str(self.square_sg), square_sg_str_ref)
self.assertStrContentEqual(str(self.mos2_sg), mos2_sg_str_ref)
def test_mul(self):
square_sg_mul = self.square_sg * (2, 1, 1)
square_sg_mul_ref_str = """Structure Graph
Structure:
Full Formula (H2)
Reduced Formula: H2
abc : 10.000000 5.000000 50.000000
angles: 90.000000 90.000000 90.000000
Sites (2)
# SP a b c
--- ---- --- --- ---
0 H 0 0 0
1 H 0.5 0 -0
Graph: bonds
from to to_image
---- ---- ------------
0 0 (0, 1, 0)
0 0 (0, -1, 0)
0 1 (0, 0, 0)
0 1 (-1, 0, 0)
1 1 (0, 1, 0)
1 1 (0, -1, 0)
"""
square_sg_mul_actual_str = str(square_sg_mul)
# only testing bonds portion,
# the c frac_coord of the second H can vary from
# 0 to -0 depending on machine precision
square_sg_mul_ref_str = "\n".join(square_sg_mul_ref_str.splitlines()[11:])
square_sg_mul_actual_str = "\n".join(square_sg_mul_actual_str.splitlines()[11:])
self.assertStrContentEqual(square_sg_mul_actual_str, square_sg_mul_ref_str)
# test sequential multiplication
sq_sg_1 = self.square_sg * (2, 2, 1)
sq_sg_1 = sq_sg_1 * (2, 2, 1)
sq_sg_2 = self.square_sg * (4, 4, 1)
self.assertEqual(sq_sg_1.graph.number_of_edges(), sq_sg_2.graph.number_of_edges())
# TODO: the below test still gives 8 != 4
# self.assertEqual(self.square_sg.get_coordination_of_site(0), 4)
mos2_sg_mul = self.mos2_sg * (3, 3, 1)
for idx in mos2_sg_mul.structure.indices_from_symbol("Mo"):
self.assertEqual(mos2_sg_mul.get_coordination_of_site(idx), 6)
mos2_sg_premul = StructureGraph.with_local_env_strategy(self.structure * (3, 3, 1), MinimumDistanceNN())
self.assertTrue(mos2_sg_mul == mos2_sg_premul)
# test 3D Structure
nio_sg = StructureGraph.with_local_env_strategy(self.NiO, MinimumDistanceNN())
nio_sg = nio_sg * 3
for n in range(len(nio_sg)):
self.assertEqual(nio_sg.get_coordination_of_site(n), 6)
@unittest.skipIf(not (which("neato") and which("fdp")), "graphviz executables not present")
def test_draw(self):
# draw MoS2 graph
self.mos2_sg.draw_graph_to_file("MoS2_single.pdf", image_labels=True, hide_image_edges=False)
mos2_sg = self.mos2_sg * (9, 9, 1)
mos2_sg.draw_graph_to_file("MoS2.pdf", algo="neato")
# draw MoS2 graph that's been successively multiplied
mos2_sg_2 = self.mos2_sg * (3, 3, 1)
mos2_sg_2 = mos2_sg_2 * (3, 3, 1)
mos2_sg_2.draw_graph_to_file("MoS2_twice_mul.pdf", algo="neato", hide_image_edges=True)
# draw MoS2 graph that's generated from a pre-multiplied Structure
mos2_sg_premul = StructureGraph.with_local_env_strategy(self.structure * (3, 3, 1), MinimumDistanceNN())
mos2_sg_premul.draw_graph_to_file("MoS2_premul.pdf", algo="neato", hide_image_edges=True)
# draw graph for a square lattice
self.square_sg.draw_graph_to_file("square_single.pdf", hide_image_edges=False)
square_sg = self.square_sg * (5, 5, 1)
square_sg.draw_graph_to_file("square.pdf", algo="neato", image_labels=True, node_labels=False)
# draw graph for a body-centered square lattice
self.bc_square_sg.draw_graph_to_file("bc_square_single.pdf", hide_image_edges=False)
bc_square_sg = self.bc_square_sg * (9, 9, 1)
bc_square_sg.draw_graph_to_file("bc_square.pdf", algo="neato", image_labels=False)
# draw graph for a body-centered square lattice defined in an alternative way
self.bc_square_sg_r.draw_graph_to_file("bc_square_r_single.pdf", hide_image_edges=False)
bc_square_sg_r = self.bc_square_sg_r * (9, 9, 1)
bc_square_sg_r.draw_graph_to_file("bc_square_r.pdf", algo="neato", image_labels=False)
# delete generated test files
test_files = (
"bc_square_r_single.pdf",
"bc_square_r.pdf",
"bc_square_single.pdf",
"bc_square.pdf",
"MoS2_premul.pdf",
"MoS2_single.pdf",
"MoS2_twice_mul.pdf",
"MoS2.pdf",
"square_single.pdf",
"square.pdf",
)
for test_file in test_files:
os.remove(test_file)
def test_to_from_dict(self):
d = self.mos2_sg.as_dict()
sg = StructureGraph.from_dict(d)
d2 = sg.as_dict()
self.assertDictEqual(d, d2)
def test_from_local_env_and_equality_and_diff(self):
nn = MinimumDistanceNN()
sg = StructureGraph.with_local_env_strategy(self.structure, nn)
self.assertEqual(sg.graph.number_of_edges(), 6)
nn2 = MinimumOKeeffeNN()
sg2 = StructureGraph.with_local_env_strategy(self.structure, nn2)
self.assertTrue(sg == sg2)
self.assertTrue(sg == self.mos2_sg)
# TODO: find better test case where graphs are different
diff = sg.diff(sg2)
self.assertEqual(diff["dist"], 0)
self.assertEqual(self.square_sg.get_coordination_of_site(0), 2)
def test_from_edges(self):
edges = {
(0, 0, (0, 0, 0), (1, 0, 0)): None,
(0, 0, (0, 0, 0), (-1, 0, 0)): None,
(0, 0, (0, 0, 0), (0, 1, 0)): None,
(0, 0, (0, 0, 0), (0, -1, 0)): None,
}
structure = Structure(Lattice.tetragonal(5.0, 50.0), ["H"], [[0, 0, 0]])
sg = StructureGraph.with_edges(structure, edges)
self.assertEqual(sg, self.square_sg)
def test_extract_molecules(self):
structure_file = os.path.join(
PymatgenTest.TEST_FILES_DIR,
"H6PbCI3N_mp-977013_symmetrized.cif",
)
s = Structure.from_file(structure_file)
nn = MinimumDistanceNN()
sg = StructureGraph.with_local_env_strategy(s, nn)
molecules = sg.get_subgraphs_as_molecules()
self.assertEqual(molecules[0].composition.formula, "H3 C1")
self.assertEqual(len(molecules), 1)
molecules = self.mos2_sg.get_subgraphs_as_molecules()
self.assertEqual(len(molecules), 0)
def test_types_and_weights_of_connections(self):
types = self.mos2_sg.types_and_weights_of_connections
self.assertEqual(len(types["Mo-S"]), 6)
self.assertAlmostEqual(types["Mo-S"][0], 2.416931678417331)
def test_weight_statistics(self):
weight_statistics = self.mos2_sg.weight_statistics
self.assertEqual(len(weight_statistics["all_weights"]), 6)
self.assertAlmostEqual(weight_statistics["min"], 2.4169314100201875)
self.assertAlmostEqual(weight_statistics["variance"], 0)
def test_types_of_coordination_environments(self):
types = self.mos2_sg.types_of_coordination_environments()
self.assertListEqual(types, ["Mo-S(6)", "S-Mo(3)"])
types_anonymous = self.mos2_sg.types_of_coordination_environments(anonymous=True)
self.assertListEqual(types_anonymous, ["A-B(3)", "A-B(6)"])
def test_no_duplicate_hops(self):
test_structure_dict = {
"@module": "pymatgen.core.structure",
"@class": "Structure",
"charge": None,
"lattice": {"matrix": [[2.990355, -5.149042, 0.0], [2.990355, 5.149042, 0.0], [0.0, 0.0, 24.51998]]},
"sites": [
{"species": [{"element": "Ba", "occu": 1}], "abc": [0.005572, 0.994428, 0.151095], "properties": {}},
],
}
test_structure = Structure.from_dict(test_structure_dict)
nn = MinimumDistanceNN(cutoff=6, get_all_sites=True)
sg = StructureGraph.with_local_env_strategy(test_structure, nn)
self.assertEqual(sg.graph.number_of_edges(), 3)
class MoleculeGraphTest(unittest.TestCase):
def setUp(self):
cyclohexene = Molecule.from_file(
os.path.join(
PymatgenTest.TEST_FILES_DIR,
"graphs/cyclohexene.xyz",
)
)
self.cyclohexene = MoleculeGraph.with_empty_graph(
cyclohexene, edge_weight_name="strength", edge_weight_units=""
)
self.cyclohexene.add_edge(0, 1, weight=1.0)
self.cyclohexene.add_edge(1, 2, weight=1.0)
self.cyclohexene.add_edge(2, 3, weight=2.0)
self.cyclohexene.add_edge(3, 4, weight=1.0)
self.cyclohexene.add_edge(4, 5, weight=1.0)
self.cyclohexene.add_edge(5, 0, weight=1.0)
self.cyclohexene.add_edge(0, 6, weight=1.0)
self.cyclohexene.add_edge(0, 7, weight=1.0)
self.cyclohexene.add_edge(1, 8, weight=1.0)
self.cyclohexene.add_edge(1, 9, weight=1.0)
self.cyclohexene.add_edge(2, 10, weight=1.0)
self.cyclohexene.add_edge(3, 11, weight=1.0)
self.cyclohexene.add_edge(4, 12, weight=1.0)
self.cyclohexene.add_edge(4, 13, weight=1.0)
self.cyclohexene.add_edge(5, 14, weight=1.0)
self.cyclohexene.add_edge(5, 15, weight=1.0)
butadiene = Molecule.from_file(
os.path.join(
PymatgenTest.TEST_FILES_DIR,
"graphs/butadiene.xyz",
)
)
self.butadiene = MoleculeGraph.with_empty_graph(butadiene, edge_weight_name="strength", edge_weight_units="")
self.butadiene.add_edge(0, 1, weight=2.0)
self.butadiene.add_edge(1, 2, weight=1.0)
self.butadiene.add_edge(2, 3, weight=2.0)
self.butadiene.add_edge(0, 4, weight=1.0)
self.butadiene.add_edge(0, 5, weight=1.0)
self.butadiene.add_edge(1, 6, weight=1.0)
self.butadiene.add_edge(2, 7, weight=1.0)
self.butadiene.add_edge(3, 8, weight=1.0)
self.butadiene.add_edge(3, 9, weight=1.0)
ethylene = Molecule.from_file(
os.path.join(
PymatgenTest.TEST_FILES_DIR,
"graphs/ethylene.xyz",
)
)
self.ethylene = MoleculeGraph.with_empty_graph(ethylene, edge_weight_name="strength", edge_weight_units="")
self.ethylene.add_edge(0, 1, weight=2.0)
self.ethylene.add_edge(0, 2, weight=1.0)
self.ethylene.add_edge(0, 3, weight=1.0)
self.ethylene.add_edge(1, 4, weight=1.0)
self.ethylene.add_edge(1, 5, weight=1.0)
self.pc = Molecule.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "graphs", "PC.xyz"))
self.pc_edges = [
[5, 10],
[5, 12],
[5, 11],
[5, 3],
[3, 7],
[3, 4],
[3, 0],
[4, 8],
[4, 9],
[4, 1],
[6, 1],
[6, 0],
[6, 2],
]
self.pc_frag1 = Molecule.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "graphs", "PC_frag1.xyz"))
self.pc_frag1_edges = [[0, 2], [4, 2], [2, 1], [1, 3]]
self.tfsi = Molecule.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "graphs", "TFSI.xyz"))
self.tfsi_edges = (
[14, 1],
[1, 4],
[1, 5],
[1, 7],
[7, 11],
[7, 12],
[7, 13],
[14, 0],
[0, 2],
[0, 3],
[0, 6],
[6, 8],
[6, 9],
[6, 10],
)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
del self.ethylene
del self.butadiene
del self.cyclohexene
@unittest.skipIf(not ob, "OpenBabel not present. Skipping...")
def test_construction(self):
edges_frag = {(e[0], e[1]): {"weight": 1.0} for e in self.pc_frag1_edges}
mol_graph = MoleculeGraph.with_edges(self.pc_frag1, edges_frag)
# dumpfn(mol_graph.as_dict(), os.path.join(module_dir,"pc_frag1_mg.json"))
ref_mol_graph = loadfn(os.path.join(module_dir, "pc_frag1_mg.json"))
self.assertEqual(mol_graph, ref_mol_graph)
self.assertEqual(mol_graph.graph.adj, ref_mol_graph.graph.adj)
for node in mol_graph.graph.nodes:
self.assertEqual(
mol_graph.graph.nodes[node]["specie"],
ref_mol_graph.graph.nodes[node]["specie"],
)
for ii in range(3):
self.assertEqual(
mol_graph.graph.nodes[node]["coords"][ii],
ref_mol_graph.graph.nodes[node]["coords"][ii],
)
edges_pc = {(e[0], e[1]): {"weight": 1.0} for e in self.pc_edges}
mol_graph = MoleculeGraph.with_edges(self.pc, edges_pc)
# dumpfn(mol_graph.as_dict(), os.path.join(module_dir,"pc_mg.json"))
ref_mol_graph = loadfn(os.path.join(module_dir, "pc_mg.json"))
self.assertEqual(mol_graph, ref_mol_graph)
self.assertEqual(mol_graph.graph.adj, ref_mol_graph.graph.adj)
for node in mol_graph.graph:
self.assertEqual(
mol_graph.graph.nodes[node]["specie"],
ref_mol_graph.graph.nodes[node]["specie"],
)
for ii in range(3):
self.assertEqual(
mol_graph.graph.nodes[node]["coords"][ii],
ref_mol_graph.graph.nodes[node]["coords"][ii],
)
mol_graph_edges = MoleculeGraph.with_edges(self.pc, edges=edges_pc)
mol_graph_strat = MoleculeGraph.with_local_env_strategy(self.pc, OpenBabelNN())
self.assertTrue(mol_graph_edges.isomorphic_to(mol_graph_strat))
# Check inappropriate strategy
with self.assertRaises(ValueError):
MoleculeGraph.with_local_env_strategy(self.pc, VoronoiNN())
def test_properties(self):
self.assertEqual(self.cyclohexene.name, "bonds")
self.assertEqual(self.cyclohexene.edge_weight_name, "strength")
self.assertEqual(self.cyclohexene.edge_weight_unit, "")
self.assertEqual(self.cyclohexene.get_coordination_of_site(0), 4)
self.assertEqual(self.cyclohexene.get_coordination_of_site(2), 3)
self.assertEqual(self.cyclohexene.get_coordination_of_site(15), 1)
self.assertEqual(len(self.cyclohexene.get_connected_sites(0)), 4)
self.assertTrue(isinstance(self.cyclohexene.get_connected_sites(0)[0].site, Site))
self.assertEqual(str(self.cyclohexene.get_connected_sites(0)[0].site.specie), "H")
def test_set_node_attributes(self):
self.ethylene.set_node_attributes()
specie = nx.get_node_attributes(self.ethylene.graph, "specie")
coords = nx.get_node_attributes(self.ethylene.graph, "coords")
properties = nx.get_node_attributes(self.ethylene.graph, "properties")
for i in range(len(self.ethylene.molecule)):
self.assertEqual(str(specie[i]), str(self.ethylene.molecule[i].specie))
self.assertEqual(coords[i][0], self.ethylene.molecule[i].coords[0])
self.assertEqual(coords[i][1], self.ethylene.molecule[i].coords[1])
self.assertEqual(coords[i][2], self.ethylene.molecule[i].coords[2])
self.assertEqual(properties[i], self.ethylene.molecule[i].properties)
def test_coordination(self):
molecule = Molecule(["C", "C"], [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]])
mg = MoleculeGraph.with_empty_graph(molecule)
self.assertEqual(mg.get_coordination_of_site(0), 0)
self.assertEqual(self.cyclohexene.get_coordination_of_site(0), 4)
def test_edge_editing(self):
self.cyclohexene.alter_edge(0, 1, new_weight=0.0, new_edge_properties={"foo": "bar"})
new_edge = self.cyclohexene.graph.get_edge_data(0, 1)[0]
self.assertEqual(new_edge["weight"], 0.0)
self.assertEqual(new_edge["foo"], "bar")
self.cyclohexene.break_edge(0, 1)
self.assertTrue(self.cyclohexene.graph.get_edge_data(0, 1) is None)
# Replace the now-broken edge
self.cyclohexene.add_edge(0, 1, weight=1.0)
def test_insert_remove(self):
mol_copy = copy.deepcopy(self.ethylene.molecule)
eth_copy = copy.deepcopy(self.ethylene)
# Ensure that insert_node appropriately wraps Molecule.insert()
mol_copy.insert(1, "O", [0.5, 0.5, 0.5])
eth_copy.insert_node(1, "O", [0.5, 0.5, 0.5])
self.assertEqual(mol_copy, eth_copy.molecule)
# Test that removal is also equivalent between Molecule and MoleculeGraph.molecule
mol_copy.remove_sites([1])
eth_copy.remove_nodes([1])
self.assertEqual(mol_copy, eth_copy.molecule)
eth_copy.insert_node(
1,
"O",
[0.5, 0.5, 0.5],
edges=[{"from_index": 1, "to_index": 2}, {"from_index": 1, "to_index": 3}],
)
self.assertEqual(eth_copy.get_coordination_of_site(1), 2)
# Test that MoleculeGraph.graph is correctly updated
eth_copy.remove_nodes([1, 2])
self.assertEqual(eth_copy.graph.number_of_nodes(), 5)
self.assertEqual(eth_copy.graph.number_of_edges(), 2)
def test_get_disconnected(self):
disconnected = Molecule(
["C", "H", "H", "H", "H", "He"],
[
[0.0000, 0.0000, 0.0000],
[-0.3633, -0.5138, -0.8900],
[1.0900, 0.0000, 0.0000],
[-0.3633, 1.0277, 0.0000],
[-0.3633, -0.5138, -0.8900],
[5.0000, 5.0000, 5.0000],
],
)
no_he = Molecule(
["C", "H", "H", "H", "H"],
[
[0.0000, 0.0000, 0.0000],
[-0.3633, -0.5138, -0.8900],
[1.0900, 0.0000, 0.0000],
[-0.3633, 1.0277, 0.0000],
[-0.3633, -0.5138, -0.8900],
],
)
just_he = Molecule(["He"], [[5.0000, 5.0000, 5.0000]])
dis_mg = MoleculeGraph.with_empty_graph(disconnected)
dis_mg.add_edge(0, 1)
dis_mg.add_edge(0, 2)
dis_mg.add_edge(0, 3)
dis_mg.add_edge(0, 4)
fragments = dis_mg.get_disconnected_fragments()
self.assertEqual(len(fragments), 2)
self.assertEqual(fragments[0].molecule, no_he)
self.assertEqual(fragments[1].molecule, just_he)
con_mg = MoleculeGraph.with_empty_graph(no_he)
con_mg.add_edge(0, 1)
con_mg.add_edge(0, 2)
con_mg.add_edge(0, 3)
con_mg.add_edge(0, 4)
fragments = con_mg.get_disconnected_fragments()
self.assertEqual(len(fragments), 1)
def test_split(self):
bonds = [(0, 1), (4, 5)]
alterations = {
(2, 3): {"weight": 1.0},
(0, 5): {"weight": 2.0},
(1, 2): {"weight": 2.0},
(3, 4): {"weight": 2.0},
}
# Perform retro-Diels-Alder reaction - turn product into reactants
reactants = self.cyclohexene.split_molecule_subgraphs(bonds, allow_reverse=True, alterations=alterations)
self.assertTrue(isinstance(reactants, list))
reactants = sorted(reactants, key=len)
# After alterations, reactants should be ethylene and butadiene
self.assertEqual(reactants[0], self.ethylene)
self.assertEqual(reactants[1], self.butadiene)
with self.assertRaises(MolGraphSplitError):
self.cyclohexene.split_molecule_subgraphs([(0, 1)])
# Test naive charge redistribution
hydroxide = Molecule(["O", "H"], [[0, 0, 0], [0.5, 0.5, 0.5]], charge=-1)
oh_mg = MoleculeGraph.with_empty_graph(hydroxide)
oh_mg.add_edge(0, 1)
new_mgs = oh_mg.split_molecule_subgraphs([(0, 1)])
for mg in new_mgs:
if str(mg.molecule[0].specie) == "O":
self.assertEqual(mg.molecule.charge, -1)
else:
self.assertEqual(mg.molecule.charge, 0)
# Trying to test to ensure that remapping of nodes to atoms works
diff_species = Molecule(
["C", "I", "Cl", "Br", "F"],
[
[0.8314, -0.2682, -0.9102],
[1.3076, 1.3425, -2.2038],
[-0.8429, -0.7410, -1.1554],
[1.9841, -1.7636, -1.2953],
[1.0098, 0.1231, 0.3916],
],
)
diff_spec_mg = MoleculeGraph.with_empty_graph(diff_species)
diff_spec_mg.add_edge(0, 1)
diff_spec_mg.add_edge(0, 2)
diff_spec_mg.add_edge(0, 3)
diff_spec_mg.add_edge(0, 4)
for i in range(1, 5):
bond = (0, i)
split_mgs = diff_spec_mg.split_molecule_subgraphs([bond])
for split_mg in split_mgs:
species = nx.get_node_attributes(split_mg.graph, "specie")
for j in range(len(split_mg.graph.nodes)):
atom = split_mg.molecule[j]
self.assertEqual(species[j], str(atom.specie))
def test_build_unique_fragments(self):
edges = {(e[0], e[1]): None for e in self.pc_edges}
mol_graph = MoleculeGraph.with_edges(self.pc, edges)
unique_fragment_dict = mol_graph.build_unique_fragments()
unique_fragments = []
for key in unique_fragment_dict:
for fragment in unique_fragment_dict[key]:
unique_fragments.append(fragment)
self.assertEqual(len(unique_fragments), 295)
nm = iso.categorical_node_match("specie", "ERROR")
for ii in range(295):
# Test that each fragment is unique
for jj in range(ii + 1, 295):
self.assertFalse(
nx.is_isomorphic(
unique_fragments[ii].graph,
unique_fragments[jj].graph,
node_match=nm,
)
)
# Test that each fragment correctly maps between Molecule and graph
self.assertEqual(
len(unique_fragments[ii].molecule),
len(unique_fragments[ii].graph.nodes),
)
species = nx.get_node_attributes(unique_fragments[ii].graph, "specie")
coords = nx.get_node_attributes(unique_fragments[ii].graph, "coords")
mol = unique_fragments[ii].molecule
for ss, site in enumerate(mol):
self.assertEqual(str(species[ss]), str(site.specie))
self.assertEqual(coords[ss][0], site.coords[0])
self.assertEqual(coords[ss][1], site.coords[1])
self.assertEqual(coords[ss][2], site.coords[2])
# Test that each fragment is connected
self.assertTrue(nx.is_connected(unique_fragments[ii].graph.to_undirected()))
def test_find_rings(self):
rings = self.cyclohexene.find_rings(including=[0])
self.assertEqual(sorted(rings[0]), [(0, 5), (1, 0), (2, 1), (3, 2), (4, 3), (5, 4)])
no_rings = self.butadiene.find_rings()
self.assertEqual(no_rings, [])
def test_isomorphic(self):
ethylene = Molecule.from_file(
os.path.join(
PymatgenTest.TEST_FILES_DIR,
"graphs/ethylene.xyz",
)
)
# switch carbons
ethylene[0], ethylene[1] = ethylene[1], ethylene[0]
eth_copy = MoleculeGraph.with_edges(
ethylene,
{
(0, 1): {"weight": 2},
(1, 2): {"weight": 1},
(1, 3): {"weight": 1},
(0, 4): {"weight": 1},
(0, 5): {"weight": 1},
},
)
# If they are equal, they must also be isomorphic
eth_copy = copy.deepcopy(self.ethylene)
self.assertTrue(self.ethylene.isomorphic_to(eth_copy))
self.assertFalse(self.butadiene.isomorphic_to(self.ethylene))
def test_substitute(self):
molecule = FunctionalGroups["methyl"]
molgraph = MoleculeGraph.with_edges(
molecule,
{(0, 1): {"weight": 1}, (0, 2): {"weight": 1}, (0, 3): {"weight": 1}},
)
eth_mol = copy.deepcopy(self.ethylene)
eth_str = copy.deepcopy(self.ethylene)
# Ensure that strings and molecules lead to equivalent substitutions
eth_mol.substitute_group(5, molecule, MinimumDistanceNN)
eth_str.substitute_group(5, "methyl", MinimumDistanceNN)
self.assertEqual(eth_mol, eth_str)
graph_dict = {
(0, 1): {"weight": 1.0},
(0, 2): {"weight": 1.0},
(0, 3): {"weight": 1.0},
}
eth_mg = copy.deepcopy(self.ethylene)
eth_graph = copy.deepcopy(self.ethylene)
# Check that MoleculeGraph input is handled properly
eth_graph.substitute_group(5, molecule, MinimumDistanceNN, graph_dict=graph_dict)
eth_mg.substitute_group(5, molgraph, MinimumDistanceNN)
self.assertEqual(eth_graph.graph.get_edge_data(5, 6)[0]["weight"], 1.0)
self.assertEqual(eth_mg, eth_graph)
def test_replace(self):
eth_copy_sub = copy.deepcopy(self.ethylene)
eth_copy_repl = copy.deepcopy(self.ethylene)
# First, perform a substitution as above
eth_copy_sub.substitute_group(5, "methyl", MinimumDistanceNN)
eth_copy_repl.replace_group(5, "methyl", MinimumDistanceNN)
# Test that replacement on a terminal atom is equivalent to substitution
self.assertEqual(eth_copy_repl.molecule, eth_copy_sub.molecule)
self.assertEqual(eth_copy_repl, eth_copy_sub)
# Methyl carbon should have coordination 4
self.assertEqual(eth_copy_repl.get_coordination_of_site(5), 4)
# Now swap one functional group for another
eth_copy_repl.replace_group(5, "amine", MinimumDistanceNN)
self.assertEqual(
["C", "C", "H", "H", "H", "N", "H", "H"],
[str(s) for s in eth_copy_repl.molecule.species],
)
self.assertEqual(len(eth_copy_repl.graph.nodes), 8)
# Amine nitrogen should have coordination 3
self.assertEqual(eth_copy_repl.get_coordination_of_site(5), 3)
def test_as_from_dict(self):
d = self.cyclohexene.as_dict()
mg = MoleculeGraph.from_dict(d)
d2 = mg.as_dict()
self.assertEqual(str(d), str(d2))
if __name__ == "__main__":
unittest.main()
|
materialsproject/pymatgen
|
pymatgen/analysis/tests/test_graphs.py
|
Python
|
mit
| 38,777
|
[
"pymatgen"
] |
ad63ba51d46b16c6a3d4ae10c5e91f493c6c3945ebbdf0ab970895d2ec6fd858
|
# $Id$
#
# Copyright (C) 2002-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for the EState fingerprinting
validation values are from the paper (JCICS _35_ 1039-1045 (1995))
"""
import unittest
import numpy
from io import StringIO
from rdkit import Chem
from rdkit.Chem.EState import Fingerprinter
class TestCase(unittest.TestCase):
def _validate(self, vals, tol=1e-2, show=False):
for smi, c, v in vals:
mol = Chem.MolFromSmiles(smi)
counts, vals = Fingerprinter.FingerprintMol(mol)
counts = counts[numpy.nonzero(counts)]
vals = vals[numpy.nonzero(vals)]
if show:
print(counts)
print(vals)
assert len(c) == len(counts), 'bad count len for smiles: %s' % (smi)
assert len(v) == len(vals), 'bad val len for smiles: %s' % (smi)
c = numpy.array(c)
assert max(abs(c - counts)) < tol, 'bad count for SMILES: %s' % (smi)
v = numpy.array(v)
assert max(abs(v - vals)) < tol, 'bad val for SMILES: %s' % (smi)
def test1_molecules(self):
data = [
('c1[nH]cnc1CC(N)C(O)=O', [1, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[0.26, 3.12, -0.86, -1.01, 0.67, 5.25, 2.71, 3.84, 8.42, 10.26]),
('NCCc1ccc(O)c(O)c1', [2, 3, 3, 1, 2], [1.26, 4.71, 0.75, 5.30, 17.97]),
]
self._validate(data, show=False)
def test_exampleCode(self):
# We make sure that the example code runs
from rdkit.TestRunner import redirect_stdout
f = StringIO()
with redirect_stdout(f):
Fingerprinter._exampleCode()
s = f.getvalue()
self.assertIn('NCCc1ccc(O)c(O)c1', s)
if __name__ == '__main__':
unittest.main()
|
greglandrum/rdkit
|
rdkit/Chem/EState/UnitTestFingerprints.py
|
Python
|
bsd-3-clause
| 2,008
|
[
"RDKit"
] |
80444ffa7ad625e5907149992caa6aa4208eeedcd2709d78f19b661e72f462b6
|
# $Id$
#
# Copyright (C) 2002-2008 greg Landrum and Rational Discovery LLC
#
""" Various bits and pieces for calculating Molecular descriptors
"""
from rdkit import RDConfig
from rdkit.ML.Descriptors import Descriptors
from rdkit.Chem import Descriptors as DescriptorsMod
from rdkit.RDLogger import logger
logger = logger()
import re
class MolecularDescriptorCalculator(Descriptors.DescriptorCalculator):
""" used for calculating descriptors for molecules
"""
def __init__(self, simpleList, *args, **kwargs):
""" Constructor
**Arguments**
- simpleList: list of simple descriptors to be calculated
(see below for format)
**Note**
- format of simpleList:
a list of strings which are functions in the rdkit.Chem.Descriptors module
"""
self.simpleList = tuple(simpleList)
self.descriptorNames = tuple(self.simpleList)
self.compoundList = None
self._findVersions()
def _findVersions(self):
""" returns a tuple of the versions of the descriptor calculators
"""
self.descriptorVersions = []
for nm in self.simpleList:
vers = 'N/A'
if hasattr(DescriptorsMod, nm):
fn = getattr(DescriptorsMod, nm)
if hasattr(fn, 'version'):
vers = fn.version
self.descriptorVersions.append(vers)
def SaveState(self, fileName):
""" Writes this calculator off to a file so that it can be easily loaded later
**Arguments**
- fileName: the name of the file to be written
"""
from rdkit.six.moves import cPickle
try:
f = open(fileName, 'wb+')
except Exception:
logger.error('cannot open output file %s for writing' % (fileName))
return
cPickle.dump(self, f)
f.close()
def CalcDescriptors(self, mol, *args, **kwargs):
""" calculates all descriptors for a given molecule
**Arguments**
- mol: the molecule to be used
**Returns**
a tuple of all descriptor values
"""
res = [-666] * len(self.simpleList)
for i, nm in enumerate(self.simpleList):
fn = getattr(DescriptorsMod, nm, lambda x: 777)
try:
res[i] = fn(mol)
except Exception:
import traceback
traceback.print_exc()
return tuple(res)
def GetDescriptorNames(self):
""" returns a tuple of the names of the descriptors this calculator generates
"""
return self.descriptorNames
def GetDescriptorSummaries(self):
""" returns a tuple of summaries for the descriptors this calculator generates
"""
res = []
for nm in self.simpleList:
fn = getattr(DescriptorsMod, nm, lambda x: 777)
if hasattr(fn, '__doc__') and fn.__doc__:
doc = fn.__doc__.split('\n\n')[0].strip()
doc = re.sub('\ *\n\ *', ' ', doc)
else:
doc = 'N/A'
res.append(doc)
return res
def GetDescriptorFuncs(self):
""" returns a tuple of the functions used to generate this calculator's descriptors
"""
res = []
for nm in self.simpleList:
fn = getattr(DescriptorsMod, nm, lambda x: 777)
res.append(fn)
return tuple(res)
def GetDescriptorVersions(self):
""" returns a tuple of the versions of the descriptor calculators
"""
return tuple(self.descriptorVersions)
|
jandom/rdkit
|
rdkit/ML/Descriptors/MoleculeDescriptors.py
|
Python
|
bsd-3-clause
| 3,308
|
[
"RDKit"
] |
381e5735df359799187442f988b9c010b951679c04c2391de0f39935218c9d84
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyVcfKit(PythonPackage):
"""VCF-kit is a command-line based collection of utilities for performing
analysis on Variant Call Format (VCF) files."""
homepage = "https://github.com/AndersenLab/VCF-kit"
url = "https://github.com/AndersenLab/VCF-kit/archive/0.1.6.tar.gz"
version('0.1.6', sha256='4865414ac9dc6996c0baeefadf1d528c28e6d0c3cc3dbdc28a2cdc6e06212428')
depends_on('python@2.7:2.8', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-awesome-slugify', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-cython@0.24.1:', type='build')
depends_on('py-cyvcf2@0.6.5:', type=('build', 'run'))
depends_on('py-docopt', type=('build', 'run'))
depends_on('py-biopython', type=('build', 'run'))
depends_on('py-yahmm@1.1.2', type=('build', 'run'))
depends_on('py-clint', type=('build', 'run'))
depends_on('py-requests', type=('build', 'run'))
depends_on('py-networkx@1.11', type=('build', 'run'))
depends_on('py-intervaltree@2.1.0', type=('build', 'run'))
depends_on('py-tabulate', type=('build', 'run'))
depends_on('py-jinja2', type=('build', 'run'))
depends_on('bwa@0.7.12:', type='run')
depends_on('samtools@1.3:', type='run')
depends_on('bcftools@1.3:', type='run')
depends_on('blast-plus@2.2.31:', type='run')
depends_on('muscle@3.8.31:', type='run')
depends_on('primer3', type='run')
depends_on('vcftools', type='run')
|
rspavel/spack
|
var/spack/repos/builtin/packages/py-vcf-kit/package.py
|
Python
|
lgpl-2.1
| 1,832
|
[
"BLAST",
"BWA",
"Biopython"
] |
bc54ca154e4d4434206aac9521f208df08e5938013f9624f4741fc3b6ecbdcc9
|
#
from env import ABSPATH
import os
import json
from neuronjs import Neuron
dependency_tree = {}
dependency_file = os.path.normpath(
os.path.join(ABSPATH, './test/fixtures/dependency.json')
)
try:
dependency_json = open(dependency_file).read()
dependency_tree = json.loads(dependency_json)
except Exception as e:
print(e)
version = dependency_tree.get('_version')
# unset `dependency_file` which might leak the file structure of server
dependency_file = None
def resolve(module_ids):
if type(module_ids) is not list:
return _resolve(module_ids)
module_ids = [
_resolve(i).replace('/', '~')
for i in module_ids
]
return '/concat' + ','.join(module_ids)
def _resolve(module_id):
return '/mod' + '/' + module_id.replace('@', '/')
neuron = Neuron(
version=version,
dependency_tree=dependency_tree,
resolve=resolve,
debug=False,
js_config={
'path': "'http://a.com/mod'"
}
)
neuron.facade('home/a.js', {
'a': 1
})
neuron.facade('home/b.js')
# neuron.css('home/style.css')
# neuron.css('b/style.css', 'c/style.css')
neuron.combo('home', 'b')
neuron.analyze()
neuron.analyze()
neuron.analyze()
# -> <link rel="" href="//s1.xhscdn.com/">
print(neuron.output_scripts())
print(neuron.output_config())
print(neuron.output_facades())
print(neuron.output_css())
print(neuron.src('c/style.css'))
|
neuron-js/pyneuron
|
test/all.py
|
Python
|
mit
| 1,346
|
[
"NEURON"
] |
e31efcaa382a55f406ce033d327e3447cc6d5cbd028a7c325db281ce698e8c1d
|
import IMP
import IMP.core
import IMP.algebra
import IMP.atom
import IMP.pmi
import sys
import os.path
import IMP.display
import numpy as np
import math
import matplotlib.pyplot as plt
import time
colors = [(1,.4,.4),(.4,.4,1),(.4,1,.4),(1,.4,1),(.4,1,1),(1,.7,.4),(1,.4,.7)]
class MassSpecSystem:
def __init__(self,model,fname):
self.model = model
self.fname = fname
self.chains = []
self.ccs = []
self.radii = []
self.ccs_radii = []
self.distances = []
self.raw_connect = []
self.node_labels = []
self.node_structure = []
self.composite = []
self.distance_restraints = []
self.connectivity_restraint = []
self.nparticles = 0
self.nrestraints = 0
self.size = 0
self.ds = []
self.idx = []
self.rigid_bodies = []
self.ptypes = []
self.restraints = []
self.max_score = 100
self.distance_force_constant = 10
self.connectivity_force_constant = 10
self.refcoords = []
self.sympairs = [(0,1),(0,3),(1,3)]
self.symres = []
def convert_node_names_node_indices(self, child):
child_index = []
for i in range(len(child)):
child_index.append(self.chains.index(child[i]))
return child_index
def get_node(self, node):
tmp = node.split("=")[1].split(" ")
tmp = list(filter(None,tmp))
if len(tmp) is not 1:
#print(list(tmp)[0])
child1 = tmp[0].replace(","," ").strip().replace("["," ").replace("]"," ").strip().split()
child1 = self.convert_node_names_node_indices(child1)
parent = tmp[1].strip()
if len(tmp) is 1:
#print(tmp[0])
child1 = tmp[0].replace(","," ").strip().replace("["," ").replace("]"," ").strip().split()
child1 = self.convert_node_names_node_indices(child1)
parent = None
return [child1, parent]
def parse_raw_connectivity(self):
node_labels = []
node_structure = []
for i in range(len(self.raw_connect)):
node_labels.append(self.raw_connect[i].split("=")[0].strip())
node_structure.append(self.get_node(self.raw_connect[i]))
if (len(node_labels) == len(node_structure)):
self.node_labels = node_labels
self.node_structure = node_structure
else:
#print("error while reading tree")
sys.exit(0)
def read_restraints(self):
refcoords = []
if not os.path.exists(self.fname):
#print("%s does not exist"%(self.fname))
sys.exit(0)
else:
f = open(self.fname, 'r')
lines = f.readlines()
counter = 0
for line in lines:
if not line.startswith('##CCS'):
if not line.startswith('##DISTANCE'):
if not line.startswith('##CONNECT'):
if not line.startswith('##REF'):
if (counter is 0):
self.ccs_radii.append(line)
if (counter is 1):
self.distances.append(line)
if (counter is 2):
self.raw_connect.append(line)
if (counter is 3):
refcoords.append(line)
else:
counter = 3
else:
counter = 2
else:
counter = 1
f.close()
# loop over
for i in range(len(self.ccs_radii)):
if i is 0:
self.composite.append(self.ccs_radii[i].split(",")[0].strip())
self.composite.append(self.ccs_radii[i].split(",")[1].strip())
self.composite.append(self.ccs_radii[i].split(",")[2].strip())
else:
chain = self.ccs_radii[i].replace('"','').split(",")[0].strip()
if (len(chain) is 1):
self.chains.append(chain)
self.ccs.append(self.ccs_radii[i].split(",")[1].strip())
self.radii.append(self.ccs_radii[i].split(",")[2].strip())
self.nparticles = len(self.chains)
self.nrestraints = len(self.distances)
self.size = float(self.composite[2])
#print("SELFSIZE",self.size)
# get connectivity information from restraint file
self.parse_raw_connectivity()
#print(self.refcoords)
for item in refcoords:
item = item.translate([None, "'[]()\n"])
item = item.split(":")[1]
item = item.replace("[","").replace("]","")
F = np.fromstring(item,sep = " ")
self.refcoords.append(F)
def setup_system(self):
self.bb = IMP.algebra.BoundingBox3D(IMP.algebra.Vector3D(0,0,0),(IMP.algebra.Vector3D(self.size*2, self.size*2, self.size*2)))
#print(self.bb)
self.ps = [IMP.Particle(self.model) for x in range(self.nparticles)]
self.idx = self.model.get_particle_indexes()
self.rs = [IMP.pmi.Resolution.setup_particle(self.model, x, 300) for x in self.idx]
for i in range(self.nparticles):
init_coors = IMP.algebra.get_random_vector_in(self.bb)
#print(init_coors)
self.ds.append(IMP.core.XYZR.setup_particle(self.model, self.idx[i], IMP.algebra.Sphere3D(IMP.algebra.Vector3D(init_coors[0], init_coors[1], init_coors[2]), float(self.radii[i]))))
#print(self.ds)
self.rigid_bodies.append(IMP.core.RigidBody.setup_particle(self.model, self.ds[i],IMP.algebra.ReferenceFrame3D()))
#print("149")
self.rigid_bodies[i].set_coordinates(IMP.algebra.Vector3D(init_coors[0], init_coors[1], init_coors[2]))
#print("151")
self.rigid_bodies[i].set_coordinates_are_optimized(True)
#print("153")
IMP.atom.Mass.setup_particle(self.model, self.idx[i], 1.0)
#print("155")
def setup_symmetry_restraint(self):
for i,sp in enumerate([self.ds[8],self.ds[16]]):
IMP.core.Reference.setup_particle(sp,self.ds[0])
tr = IMP.algebra.Transformation3D(
IMP.algebra.get_rotation_about_axis(IMP.algebra.get_basis_vector_3d(2),np.pi*(2/3) * (i + 1)),IMP.algebra.Vector3D(0, 0, 0))
sm = IMP.core.TransformationSymmetry(tr)
c = IMP.core.SingletonConstraint(sm, None,sp)
self.symres.append(c)
for j, k in enumerate([self.ds[9],self.ds[17]]):
IMP.core.Reference.setup_particle(k, self.ds[1])
tra = IMP.algebra.Transformation3D(
IMP.algebra.get_rotation_about_axis(IMP.algebra.get_basis_vector_3d(2),np.pi*(2/3) * (j + 1)),IMP.algebra.Vector3D(0, 0, 0))
sma = IMP.core.TransformationSymmetry(tra)
d = IMP.core.SingletonConstraint(sma,None,k)
self.symres.append(d)
#print("SELF.SYMRES ", self.symres)
for j, k in enumerate([self.ds[10],self.ds[18]]):
IMP.core.Reference.setup_particle(k, self.ds[2])
tra = IMP.algebra.Transformation3D(
IMP.algebra.get_rotation_about_axis(IMP.algebra.get_basis_vector_3d(2),np.pi*(2/3) * (j + 1)),IMP.algebra.Vector3D(0, 0, 0))
sma = IMP.core.TransformationSymmetry(tra)
d = IMP.core.SingletonConstraint(sma,None,k)
self.symres.append(d)
for i,sp in enumerate([self.ds[11],self.ds[19]]):
IMP.core.Reference.setup_particle(sp,self.ds[3])
tr = IMP.algebra.Transformation3D(
IMP.algebra.get_rotation_about_axis(IMP.algebra.get_basis_vector_3d(2),np.pi*(2/3) * (i + 1)),IMP.algebra.Vector3D(0, 0, 0))
sm = IMP.core.TransformationSymmetry(tr)
c = IMP.core.SingletonConstraint(sm, None,sp)
self.symres.append(c)
for j, k in enumerate([self.ds[12],self.ds[20]]):
IMP.core.Reference.setup_particle(k, self.ds[4])
tra = IMP.algebra.Transformation3D(
IMP.algebra.get_rotation_about_axis(IMP.algebra.get_basis_vector_3d(2),np.pi*(2/3) * (j + 1)),IMP.algebra.Vector3D(0, 0, 0))
sma = IMP.core.TransformationSymmetry(tra)
d = IMP.core.SingletonConstraint(sma,None,k)
self.symres.append(d)
#print("SELF.SYMRES ", self.symres)
for j, k in enumerate([self.ds[13],self.ds[21]]):
IMP.core.Reference.setup_particle(k, self.ds[5])
tra = IMP.algebra.Transformation3D(
IMP.algebra.get_rotation_about_axis(IMP.algebra.get_basis_vector_3d(2),np.pi*(2/3) * (j + 1)),IMP.algebra.Vector3D(0, 0, 0))
sma = IMP.core.TransformationSymmetry(tra)
d = IMP.core.SingletonConstraint(sma,None,k)
self.symres.append(d)
#print("SELF.SYMRES ", self.symres)
for j, k in enumerate([self.ds[14],self.ds[22]]):
IMP.core.Reference.setup_particle(k, self.ds[6])
tra = IMP.algebra.Transformation3D(
IMP.algebra.get_rotation_about_axis(IMP.algebra.get_basis_vector_3d(2),np.pi*(2/3) * (j + 1)),IMP.algebra.Vector3D(0, 0, 0))
sma = IMP.core.TransformationSymmetry(tra)
d = IMP.core.SingletonConstraint(sma,None,k)
self.symres.append(d)
#print("SELF.SYMRES ", self.symres)
for j, k in enumerate([self.ds[15],self.ds[23]]):
IMP.core.Reference.setup_particle(k, self.ds[7])
tra = IMP.algebra.Transformation3D(
IMP.algebra.get_rotation_about_axis(IMP.algebra.get_basis_vector_3d(2),np.pi*(2/3) * (j + 1)),IMP.algebra.Vector3D(0, 0, 0))
sma = IMP.core.TransformationSymmetry(tra)
d = IMP.core.SingletonConstraint(sma,None,k)
self.symres.append(d)
#print("SELF.SYMRES ", self.symres)
# for j, k in enumerate([self.ds[12],self.ds[19]]):
# IMP.core.Reference.setup_particle(k, self.ds[5])
# tra = IMP.algebra.Transformation3D(
# IMP.algebra.get_rotation_about_axis(IMP.algebra.get_basis_vector_3d(2),np.pi*(2/3) * (j + 1)),IMP.algebra.Vector3D(0, 0, 0))
# sma = IMP.core.TransformationSymmetry(tra)
# d = IMP.core.SingletonConstraint(sma,None,k)
# self.symres.append(d)
# print("SELF.SYMRES ", self.symres)
# for j, k in enumerate([self.ds[13],self.ds[20]]):
# IMP.core.Reference.setup_particle(k, self.ds[6])
# tra = IMP.algebra.Transformation3D(
# IMP.algebra.get_rotation_about_axis(IMP.algebra.get_basis_vector_3d(2),np.pi*(2/3) * (j + 1)),IMP.algebra.Vector3D(0, 0, 0))
# sma = IMP.core.TransformationSymmetry(tra)
# d = IMP.core.SingletonConstraint(sma,None,k)
# self.symres.append(d)
# print("SELF.SYMRES ", self.symres)
def setup_distance_restraints(self, d_res = 0):
tmp_distance_restraints = self.distances[d_res]
tmp_distance_restraints = tmp_distance_restraints.translate([None, "'[]()\n"])
tmp_distance_restraints = tmp_distance_restraints.replace("\n","").split(";")
tmp_distance_restraints = list(filter(None, tmp_distance_restraints))
n_distance_restraints = len(tmp_distance_restraints)
for i in range(n_distance_restraints):
distance_restraint = tmp_distance_restraints[i].replace(",","").replace("[","").replace("]","").replace("'",'').split()
distance_restraint = list(filter(None, distance_restraint))
#dref.append(",".join(distance_restraint))
if distance_restraint:
pi = int(self.chains.index(distance_restraint[0]))
pj = int(self.chains.index(distance_restraint[1]))
rij = float(distance_restraint[2].replace("]",""))
rij = rij - float(self.radii[pi]) - float(self.radii[pj])
#print(rij)
self.distance_restraints.append(IMP.atom.create_distance_restraint(self.rigid_bodies[pi], self.rigid_bodies[pj], rij, self.distance_force_constant))
def setup_connectivity_restraints(self):
# Create MS connectivity restraint
#hw = IMP.core.HarmonicWell((-18.25,27.5), self.connectivity_force_constant)
#hw = IMP.core.HarmonicWell((16,58), 100)# range for ALL protein complexes
hw = IMP.core.HarmonicWell((20,46), 100)
ss = IMP.core.DistancePairScore(hw)
self.connectivity_restraint = IMP.core.MSConnectivityRestraint(self.model, ss)
self.connectivity_restraint.set_maximum_score(0)
# Connectivity taken from the composite information -- set chain ID -- read in from the restraint file
#print("COMPOSITE ", self.composite)
for i in range(len(self.composite[0])):
index_from_chain = self.chains.index(self.composite[0][i])
self.ptypes.append(self.connectivity_restraint.add_type([self.ds[index_from_chain]]))
#print(self.ds[index_from_chain].get_radius())
# Connectivity taken from the composite information -- set chain ID -- read in from the restraint file
for i in range(len(self.node_labels)):
node = self.node_structure[i]
node_label = self.node_structure[i][0]
node_parent = self.node_structure[i][1]
if node_parent is None:
self.connectivity_restraint.add_composite(node_label)
else:
self.connectivity_restraint.add_composite(node_label, self.node_labels.index(node_parent))
def collect_restraints(self):
restraints = []
for i in self.distance_restraints:
restraints.append(i)
restraints.append(self.connectivity_restraint)
self.restraints = restraints
def setup_restraints(self, d_res = 0):
#print("SETUP RESTRAINTS")
self.distance_restraints = []
self.connectivity_restraint = []
self.setup_distance_restraints(d_res = d_res)
self.setup_symmetry_restraint()
self.setup_connectivity_restraints()
self.collect_restraints()
#print(self.connectivity_restraint.get_connected_pairs())
#print(self.connectivity_restraint.get_pair_score())
class MassSpecDynamics:
#'MC sampling options class'
def __init__(self, system, scoring_function, initial_temperature = 1000, final_temperature = 100, mc_cool_cycles = 500, mc_cool_steps = 5000, mc_cycles = 1000, mc_steps = 1000, optimization_cycles = 10):
self.system = system
self.scoring_function = scoring_function
self.initial_temperature = initial_temperature
self.final_temperature = final_temperature
self.mc_cool_cycles = mc_cool_cycles
self.mc_cool_steps = mc_cool_steps
self.mc_cycles = mc_cycles
self.mc_steps = mc_steps
self.optimization_cycles = optimization_cycles
self.movers = []
self.optimizer = IMP.core.MonteCarlo(self.system.model)
self.print_annealing = True
def get_coordinates_xyz(self, header, output):
#""" writes out coordinates in xyz format
#"""
output.write("%s\n" % len(self.system.ds))
output.write("%s\n" % header)
for index, particle in enumerate(self.system.ds):
outi = "C"+str(index)+" "+str(particle.get_coordinates()[0])+" "+str(particle.get_coordinates()[1])+" "+str(particle.get_coordinates()[2])
output.write("%s\n" % outi)
def writepym(self, fname):
w = open(fname, 'w')
w.write('from pymol.cgo import *'+ '\n')
w.write('from pymol import cmd'+ '\n')
w.write('from pymol.vfont import plain' + '\n' + 'data={}' + '\n' + "curdata=[]" + '\n')
for index, particle in enumerate(self.system.ds):
w.write("k='Protein" + str(index) + " geometry'" +'\n'+ "if not k in data.keys():" +'\n'+" data[k]=[]"+'\n'+'curdata=['+'\n'+'COLOR,' + str(colors[index][0])+","+str(colors[index][1])+","+ str(colors[index][2])+"," + '\n' + 'SPHERE,'+ str(particle.get_coordinates()[0])+ ','+ str(particle.get_coordinates()[1])+',' + str(particle.get_coordinates()[2])+','+ str(particle.get_radius()) +'\n')
w.write("]"+"\n"+"k='Protein" + str(index) + " geometry'" + '\n' + "if k in data.keys():" + "\n" + " data[k]= data[k]+curdata"+'\n'+"else:" +'\n' +" data[k]= curdata"+"\n")
w.write("for k in data.keys():" + "\n" + " cmd.load_cgo(data[k], k, 1)" +"\n"+ "data= {}")
w.close()
def initialize_MC(self):
#""" initialize MC optimizer
#"""
# Initialize Monte Carlo sampler
self.optimizer.set_return_best(True)
self.optimizer.set_score_threshold(self.system.max_score*2)
self.optimizer.set_scoring_function(self.scoring_function)
self.movers = []
self.print_annealing = False
# Accumulate movers that are need the Monte Carlo sampler
for rbd in self.system.rigid_bodies:
self.movers.append(IMP.core.RigidBodyMover(rbd, 1, 2))
# Add movers to the Monte Carlo sampler
self.optimizer.add_movers(self.movers)
def run_MC(self, i,name):
y = 0
#""" optimizies scoring function using Monte Carlo sampling
#"""
# Setup MC optimizer
self.initialize_MC()
self.optimizer.set_return_best(False)
coords = []
scores = []
for mc in range(self.optimization_cycles): ## RANDOMIZE COORDINATES
for particle in self.system.ds:
init_coors = IMP.algebra.get_random_vector_in(self.system.bb)
particle.set_coordinates(init_coors)
if y != 0:
for i in [0,1,2,3,8,9,10,11,16,17,18,19]:
#self.system.ds[i].set_coordinates_are_optimized(False)
self.optimizer.add_mover(self.movers[i])
cycle = "annealing"
T = self.initial_temperature
print("INIT TEMP", T)
start = time.time()
score = []
temp = []
#for cc in range(self.mc_cool_cycles*7):
print(self.optimizer.get_movers())
for cc in range(self.mc_cool_cycles*10):
#for cc in range(500): ## DO THE ANNEALING MC
T = 0.999*T
self.optimizer.set_kt(T)
self.optimizer.optimize(self.mc_cool_steps)
score.append(self.scoring_function.evaluate(False))
temp.append(T)
#plt.clf()
#plt.plot(temp, score)
#plt.ylim([0,100000])
#plt.show()
print("FINAL TEMP,", T)
T = 400
stop = time.time()
print("annealing time", stop - start)
print(y)
#for particle in self.system.ds[np.array([0,1,2,3])]:
for i in [0,1,2,3,8,9,10,11,16,17,18,19]:
#self.system.ds[i].set_coordinates_are_optimized(False)
self.optimizer.remove_mover(self.movers[i])
#init_coors = IMP.algebra.get_random_vector_in(self.system.bb)
#particle.set_coordinates_are_optimized(False)
print(self.optimizer.get_movers())
#for c in range(self.mc_cycles): ## DO THE CONSTANT TEMP MC
start = time.time()
score = []
step = []
for c in range(100000): ## DO THE CONSTANT TEMP MC
self.optimizer.set_kt(T)
#self.optimizer.optimize(self.mc_steps)
self.optimizer.optimize(1000000)
if c%100 == 0: ## WRITE LIST TO AN ARRAY EVERY 100TH CYCLE
score.append(self.scoring_function.evaluate(False))
step.append(c)
y+=1
list = []
for i,particle in enumerate(self.system.ds):
x = particle.get_coordinates()
list.append(x)
x = np.array(list)
if type(coords) is np.ndarray:
coords = coords.tolist()
if type(scores) is np.ndarray:
scores = scores.tolist()
coords.append(list)
#print(list)
scores.append(self.scoring_function.evaluate(False))
stop = time.time()
print("MC Time", stop - start)
coords = np.array(coords)
scores = np.array(scores)
#np.save(name.replace(".pdb1_restraints.txt","")+ "_" + str("%02d" % i) + "_" +"coordfile.npy",coords)
np.save("coordfile.npy",coords) ## ALL THE STRUCTURES
np.save(name.replace(".pdb1_restraints.txt","")+ "_" + str("%02d" % i) + "_" +"scorefile.npy",scores)
|
jEschweiler/Urease
|
urease_software/IMMSonly_restraints/sampler.py
|
Python
|
gpl-3.0
| 17,964
|
[
"PyMOL"
] |
429315b9028c9fc7245c190c10cfdbe658eb069f3da7d73d614a8a7451042aed
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# TAMkin is a post-processing toolkit for normal mode analysis, thermochemistry
# and reaction kinetics.
# Copyright (C) 2008-2012 Toon Verstraelen <Toon.Verstraelen@UGent.be>, An Ghysels
# <An.Ghysels@UGent.be> and Matthias Vandichel <Matthias.Vandichel@UGent.be>
# Center for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all
# rights reserved unless otherwise stated.
#
# This file is part of TAMkin.
#
# TAMkin is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# In addition to the regulations of the GNU General Public License,
# publications and communications based in parts on this program or on
# parts of this program are required to cite the following article:
#
# "TAMkin: A Versatile Package for Vibrational Analysis and Chemical Kinetics",
# An Ghysels, Toon Verstraelen, Karen Hemelsoet, Michel Waroquier and Veronique
# Van Speybroeck, Journal of Chemical Information and Modeling, 2010, 50,
# 1736-1750W
# http://dx.doi.org/10.1021/ci100099g
#
# TAMkin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
# Import the tamkin libarary.
from __future__ import print_function
from tamkin import *
from molmod import *
# Load the gaussian data.
molecule = load_molecule_g03fchk("gaussian.fchk")
print(molecule.numbers)
# Perform the normal mode analysis
nma = NMA(molecule)
# Construct a partition function object with the typical gas phase contributions.
pf = PartFun(nma, [ExtTrans(), ExtRot()])
# 6 is the rotational symmetry number.
# Write some general information about the molecule and the partition function
# to a file.
pf.write_to_file("partfun.txt")
print(pf.free_energy(300)/(kjmol))
# Write an extensive overview of the thermodynamic properties to a file:
ta = ThermoAnalysis(pf, [300,400,500,600])
ta.write_to_file("thermo.csv")
|
molmod/tamkin
|
tamkin/examples/001_ethane/thermo.py
|
Python
|
gpl-3.0
| 2,326
|
[
"Gaussian"
] |
268879b38f7fe6043a7a0ed96eaee66de2bba42466fb94c9e7f17b68e65362bb
|
"""A notebook manager that uses the local file system for storage.
Authors:
* Brian Granger
* Zach Sailer
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import io
import os
import glob
import shutil
import uuid
from tornado import web
from IPython.html.services.notebooks.nbmanager import NotebookManager
from IPython.nbformat import current
from IPython.utils.traitlets import Unicode, Bool, TraitError
from IPython.utils.py3compat import getcwd
from IPython.utils import tz
from IPython.html.utils import is_hidden, to_os_path
from IPython.utils.tz import utcnow, tzUTC
METADATA_NBNAME = 'x-object-meta-nbname'
METADATA_CHK_ID = 'x-object-meta-checkpoint-id'
METADATA_LAST_MODIFIED = 'x-object-meta-nb-last-modified'
METADATA_NB_ID = 'x-object-meta-notebook-id'
DATE_FORMAT = "%X-%x"
NB_DNEXIST_ERR = 'Notebook does not exist: {}'
NB_SAVE_UNK_ERR = 'Unexpected error while saving notebook: {}'
NB_DEL_UNK_ERR = 'Unexpected error while deleting notebook: {}'
CHK_SAVE_UNK_ERR = 'Unexpected error while saving checkpoint: {}'
MAX_HISTORY_SIZE = 15
def sort_key(item):
"""Case-insensitive sorting."""
return item['name'].lower()
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class FileNotebookManager(NotebookManager):
user_agent = "bookstore v{version}".format(version='1.0.0')
container_name = Unicode('notebooks', config=True,
help='Container name for notebooks.')
save_script = Bool(False, config=True,
help="""Automatically create a Python script when saving the notebook.
For easier use of import, %run and %load across notebooks, a
<notebook-name>.py script will be created next to any
<notebook-name>.ipynb on each save. This can also be set with the
short `--script` flag.
"""
)
notebook_dir = Unicode(getcwd(), config=True)
def _notebook_dir_changed(self, name, old, new):
"""Do a bit of validation of the notebook dir."""
if not os.path.isabs(new):
# If we receive a non-absolute path, make it absolute.
self.notebook_dir = os.path.abspath(new)
return
if not os.path.exists(new) or not os.path.isdir(new):
raise TraitError("notebook dir %r is not a directory" % new)
checkpoint_dir = Unicode('.ipynb_checkpoints', config=True,
help="""The directory name in which to keep notebook checkpoints
This is a path relative to the notebook's own directory.
By default, it is .ipynb_checkpoints
"""
)
def _copy(self, src, dest):
"""copy src to dest
like shutil.copy2, but log errors in copystat
"""
shutil.copyfile(src, dest)
try:
shutil.copystat(src, dest)
except OSError as e:
self.log.debug("copystat on %s failed", dest, exc_info=True)
def get_notebook_names(self, path=''):
"""List all notebook names in the notebook dir and path."""
path = path.strip('/')
if not os.path.isdir(self._get_os_path(path=path)):
raise web.HTTPError(404, 'Directory not found: ' + path)
names = glob.glob(self._get_os_path('*'+self.filename_ext, path))
names = [os.path.basename(name)
for name in names]
return names
def path_exists(self, path):
"""Does the API-style path (directory) actually exist?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to base notebook-dir).
Returns
-------
exists : bool
Whether the path is indeed a directory.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return os.path.isdir(os_path)
def is_hidden(self, path):
"""Does the API style path correspond to a hidden directory or file?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to base notebook-dir).
Returns
-------
exists : bool
Whether the path is hidden.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return is_hidden(os_path, self.notebook_dir)
def _get_os_path(self, name=None, path=''):
"""Given a notebook name and a URL path, return its file system
path.
Parameters
----------
name : string
The name of a notebook file with the .ipynb extension
path : string
The relative URL path (with '/' as separator) to the named
notebook.
Returns
-------
path : string
A file system path that combines notebook_dir (location where
server started), the relative path, and the filename with the
current operating system's url.
"""
if name is not None:
path = path + '/' + name
return to_os_path(path, self.notebook_dir)
def notebook_exists(self, name, path=''):
"""Returns a True if the notebook exists. Else, returns False.
Parameters
----------
name : string
The name of the notebook you are checking.
path : string
The relative path to the notebook (with '/' as separator)
Returns
-------
bool
"""
path = path.strip('/')
nbpath = self._get_os_path(name, path=path)
return os.path.isfile(nbpath)
# TODO: Remove this after we create the contents web service and directories are
# no longer listed by the notebook web service.
def list_dirs(self, path):
"""List the directories for a given API style path."""
path = path.strip('/')
os_path = self._get_os_path('', path)
if not os.path.isdir(os_path):
raise web.HTTPError(404, u'directory does not exist: %r' % os_path)
elif is_hidden(os_path, self.notebook_dir):
self.log.info("Refusing to serve hidden directory, via 404 Error")
raise web.HTTPError(404, u'directory does not exist: %r' % os_path)
dir_names = os.listdir(os_path)
dirs = []
for name in dir_names:
os_path = self._get_os_path(name, path)
if os.path.isdir(os_path) and not is_hidden(os_path, self.notebook_dir)\
and self.should_list(name):
try:
model = self.get_dir_model(name, path)
except IOError:
pass
dirs.append(model)
dirs = sorted(dirs, key=sort_key)
return dirs
# TODO: Remove this after we create the contents web service and directories are
# no longer listed by the notebook web service.
def get_dir_model(self, name, path=''):
"""Get the directory model given a directory name and its API style path"""
path = path.strip('/')
os_path = self._get_os_path(name, path)
if not os.path.isdir(os_path):
raise IOError('directory does not exist: %r' % os_path)
info = os.stat(os_path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
created = tz.utcfromtimestamp(info.st_ctime)
# Create the notebook model.
model ={}
model['name'] = name
model['path'] = path
model['last_modified'] = last_modified
model['created'] = created
model['type'] = 'directory'
return model
def list_notebooks(self, path):
"""Returns a list of dictionaries that are the standard model
for all notebooks in the relative 'path'.
Parameters
----------
path : str
the URL path that describes the relative path for the
listed notebooks
Returns
-------
notebooks : list of dicts
a list of the notebook models without 'content'
"""
path = path.strip('/')
notebook_names = self.get_notebook_names(path)
notebooks = [self.get_notebook(name, path, content=False)
for name in notebook_names if self.should_list(name)]
notebooks = sorted(notebooks, key=sort_key)
return notebooks
def get_notebook(self, name, path='', content=True):
""" Takes a path and name for a notebook and returns its model
Parameters
----------
name : str
the name of the notebook
path : str
the URL path that describes the relative path for
the notebook
Returns
-------
model : dict
the notebook model. If contents=True, returns the 'contents'
dict in the model as well.
"""
path = path.strip('/')
if not self.notebook_exists(name=name, path=path):
raise web.HTTPError(404, u'Notebook does not exist: %s' % name)
os_path = self._get_os_path(name, path)
info = os.stat(os_path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
created = tz.utcfromtimestamp(info.st_ctime)
# Create the notebook model.
model ={}
model['name'] = name
model['path'] = path
model['last_modified'] = last_modified
model['created'] = created
model['type'] = 'notebook'
if content:
with io.open(os_path, 'r', encoding='utf-8') as f:
try:
nb = current.read(f, u'json')
except Exception as e:
raise web.HTTPError(400, u"Unreadable Notebook: %s %s" % (os_path, e))
self.mark_trusted_cells(nb, name, path)
model['content'] = nb
return model
def save_notebook(self, model, name='', path=''):
"""Save the notebook model and return the model with no content."""
path = path.strip('/')
if 'content' not in model:
raise web.HTTPError(400, u'No notebook JSON data provided')
# One checkpoint should always exist
if self.notebook_exists(name, path) and not self.list_checkpoints(name, path):
self.create_checkpoint(name, path)
new_path = model.get('path', path).strip('/')
new_name = model.get('name', name)
if path != new_path or name != new_name:
self.rename_notebook(name, path, new_name, new_path)
# Save the notebook file
os_path = self._get_os_path(new_name, new_path)
nb = current.to_notebook_json(model['content'])
self.check_and_sign(nb, new_name, new_path)
if 'name' in nb['metadata']:
nb['metadata']['name'] = u''
try:
self.log.debug("Autosaving notebook %s", os_path)
with io.open(os_path, 'w', encoding='utf-8') as f:
current.write(nb, f, u'json')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while autosaving notebook: %s %s' % (os_path, e))
# Save .py script as well
if self.save_script:
py_path = os.path.splitext(os_path)[0] + '.py'
self.log.debug("Writing script %s", py_path)
try:
with io.open(py_path, 'w', encoding='utf-8') as f:
current.write(nb, f, u'py')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while saving notebook as script: %s %s' % (py_path, e))
model = self.get_notebook(new_name, new_path, content=False)
return model
def update_notebook(self, model, name, path=''):
"""Update the notebook's path and/or name"""
path = path.strip('/')
new_name = model.get('name', name)
new_path = model.get('path', path).strip('/')
if path != new_path or name != new_name:
self.rename_notebook(name, path, new_name, new_path)
model = self.get_notebook(new_name, new_path, content=False)
return model
def delete_notebook(self, name, path=''):
"""Delete notebook by name and path."""
path = path.strip('/')
os_path = self._get_os_path(name, path)
if not os.path.isfile(os_path):
raise web.HTTPError(404, u'Notebook does not exist: %s' % os_path)
# clear checkpoints
for checkpoint in self.list_checkpoints(name, path):
checkpoint_id = checkpoint['id']
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
if os.path.isfile(cp_path):
self.log.debug("Unlinking checkpoint %s", cp_path)
os.unlink(cp_path)
self.log.debug("Unlinking notebook %s", os_path)
os.unlink(os_path)
def rename_notebook(self, old_name, old_path, new_name, new_path):
"""Rename a notebook."""
old_path = old_path.strip('/')
new_path = new_path.strip('/')
if new_name == old_name and new_path == old_path:
return
new_os_path = self._get_os_path(new_name, new_path)
old_os_path = self._get_os_path(old_name, old_path)
# Should we proceed with the move?
if os.path.isfile(new_os_path):
raise web.HTTPError(409, u'Notebook with name already exists: %s' % new_os_path)
if self.save_script:
old_py_path = os.path.splitext(old_os_path)[0] + '.py'
new_py_path = os.path.splitext(new_os_path)[0] + '.py'
if os.path.isfile(new_py_path):
raise web.HTTPError(409, u'Python script with name already exists: %s' % new_py_path)
# Move the notebook file
try:
shutil.move(old_os_path, new_os_path)
except Exception as e:
raise web.HTTPError(500, u'Unknown error renaming notebook: %s %s' % (old_os_path, e))
# Move the checkpoints
old_checkpoints = self.list_checkpoints(old_name, old_path)
for cp in old_checkpoints:
checkpoint_id = cp['id']
old_cp_path = self.get_checkpoint_path(checkpoint_id, old_name, old_path)
new_cp_path = self.get_checkpoint_path(checkpoint_id, new_name, new_path)
if os.path.isfile(old_cp_path):
self.log.debug("Renaming checkpoint %s -> %s", old_cp_path, new_cp_path)
shutil.move(old_cp_path, new_cp_path)
# Move the .py script
if self.save_script:
shutil.move(old_py_path, new_py_path)
# Checkpoint-related utilities
def get_checkpoints_home(self, path=''):
"""find the home path to the checkpoints"""
path = path.strip('/')
os_path = os.path.join(self._get_os_path(path=path), self.checkpoint_dir)
if not os.path.exists(os_path):
os.mkdir(os_path)
return os_path
def get_checkpoint_path(self, checkpoint_id, name, path=''):
"""find the path to a checkpoint"""
path = path.strip('/')
basename, _ = os.path.splitext(name)
filename = u"{name}-{checkpoint_id}{ext}".format(
name=basename,
checkpoint_id=checkpoint_id,
ext=self.filename_ext,
)
os_path = self._get_os_path(path=path)
cp_dir = os.path.join(os_path, self.checkpoint_dir)
if not os.path.exists(cp_dir):
os.mkdir(cp_dir)
cp_path = os.path.join(cp_dir, filename)
return cp_path
def get_checkpoint_model(self, checkpoint_id, name, path=''):
"""construct the info dict for a given checkpoint"""
path = path.strip('/')
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
stats = os.stat(cp_path)
last_modified = tz.utcfromtimestamp(stats.st_mtime)
info = dict(
id = checkpoint_id,
last_modified = last_modified,
)
return info
# public checkpoint API
def new_checkpoint_id(self):
"""Generate a new checkpoint_id and store its mapping."""
return unicode(uuid.uuid4())
def create_checkpoint(self, name, path=''):
"""Create a checkpoint from the current state of a notebook"""
path = path.strip('/')
nb_path = self._get_os_path(name, path)
# only the one checkpoint ID:
checkpoint_id = self.new_checkpoint_id()
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
self.log.debug("creating checkpoint for notebook %s", name)
if not os.path.exists(self.checkpoint_dir):
os.mkdir(self.checkpoint_dir)
self._copy(nb_path, cp_path)
# return the checkpoint info
return self.get_checkpoint_model(checkpoint_id, name, path)
def list_checkpoints(self, name, path=''):
"""list the checkpoints for a given notebook
This notebook manager currently only supports one checkpoint per notebook.
"""
path = path.strip('/')
checkpoint_id = "checkpoint"
os_path = self.get_checkpoint_path(checkpoint_id, name, path)
self.log.debug('name=%s', self.get_checkpoints_home(path))
basename, _ = os.path.splitext(name)
cp_path = os.path.join(self.get_checkpoints_home(path), basename + '-*' + self.filename_ext)
os_paths = glob.glob(cp_path)
os_paths.sort(key=os.path.getmtime, reverse=True)
if not os_paths:
return []
models = []
for p in os_paths:
base_name, _ = os.path.splitext(p)
checkpoint_id = base_name.split(basename + '-')[1]
models.append(self.get_checkpoint_model(checkpoint_id, name, path))
self.log.debug('models=%s', len(models[:MAX_HISTORY_SIZE]))
return models[:MAX_HISTORY_SIZE]
def restore_checkpoint(self, checkpoint_id, name, path=''):
"""restore a notebook to a checkpointed state"""
path = path.strip('/')
self.log.info("restoring Notebook %s from checkpoint %s", name, checkpoint_id)
nb_path = self._get_os_path(name, path)
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
if not os.path.isfile(cp_path):
self.log.debug("checkpoint file does not exist: %s", cp_path)
raise web.HTTPError(404,
u'Notebook checkpoint does not exist: %s-%s' % (name, checkpoint_id)
)
# ensure notebook is readable (never restore from an unreadable notebook)
with io.open(cp_path, 'r', encoding='utf-8') as f:
current.read(f, u'json')
self._copy(cp_path, nb_path)
self.log.debug("copying %s -> %s", cp_path, nb_path)
def delete_checkpoint(self, checkpoint_id, name, path=''):
"""delete a notebook's checkpoint"""
path = path.strip('/')
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
if not os.path.isfile(cp_path):
raise web.HTTPError(404,
u'Notebook checkpoint does not exist: %s%s-%s' % (path, name, checkpoint_id)
)
self.log.debug("unlinking %s", cp_path)
os.unlink(cp_path)
def info_string(self):
return "Serving notebooks from local directory: %s" % self.notebook_dir
|
wusung/ipython-notebook-store
|
bookstore/filenotebookmanager.py
|
Python
|
apache-2.0
| 20,614
|
[
"Brian"
] |
2ead2999bf99454326079a07011d5867482830f99f3ca78874f4ab3302fe4c76
|
#!/usr/bin/env python
########################################################################
# File : dirac-wms-job-kill
# Author : Stuart Paterson
########################################################################
"""
Issue a kill signal to a running DIRAC job
Usage:
dirac-wms-job-kill [options] ... JobID ...
Arguments:
JobID: DIRAC Job ID
Example:
$ dirac-wms-job-kill 1918
Killed job 1918
.. Note::
- jobs will not disappear from JobDB until JobCleaningAgent has deleted them
- jobs will be deleted "immediately" if they are in the status 'Deleted'
- USER jobs will be deleted after a grace period if they are in status Killed, Failed, Done
What happens when you hit the "kill job" button
- if the job is in status 'Running', 'Matched', 'Stalled' it will be properly killed, and then its
status will be marked as 'Killed'
- otherwise, it will be marked directly as 'Killed'.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
@DIRACScript()
def main():
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
if len(args) < 1:
Script.showHelp(exitCode=1)
from DIRAC.Interfaces.API.Dirac import Dirac, parseArguments
result = Dirac().killJob(parseArguments(args))
if result['OK']:
print('Killed jobs %s' % ','.join([str(j) for j in result['Value']]))
exitCode = 0
else:
print('ERROR', result['Message'])
exitCode = 2
DIRAC.exit(exitCode)
if __name__ == "__main__":
main()
|
yujikato/DIRAC
|
src/DIRAC/Interfaces/scripts/dirac_wms_job_kill.py
|
Python
|
gpl-3.0
| 1,677
|
[
"DIRAC"
] |
b83d1243bef27406ecfda1ce28d844e44f1be6b0f900561860f89a42660f5177
|
##
# Copyright 2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing Bowtie, implemented as an easyblock
@author: Cedric Laczny (Uni.Lu)
@author: Fotis Georgatos (Uni.Lu)
@author: Kenneth Hoste (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
import shutil
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
class EB_Bowtie(ConfigureMake):
"""
Support for building bowtie (ifast and sensitive read alignment)
"""
def configure_step(self):
"""
Set compilers in buildopts, there is no configure script.
"""
self.cfg.update('buildopts', 'CC="%s" CPP="%s"' % (os.getenv('CC'), os.getenv('CXX')))
def install_step(self):
"""
Install by copying files to install dir
"""
srcdir = self.cfg['start_dir']
destdir = os.path.join(self.installdir, 'bin')
srcfile = None
try:
os.makedirs(destdir)
for filename in ['bowtie-build', 'bowtie', 'bowtie-inspect']:
srcfile = os.path.join(srcdir, filename)
shutil.copy2(srcfile, destdir)
except (IOError, OSError), err:
raise EasyBuildError("Copying %s to installation dir %s failed: %s", srcfile, destdir, err)
def sanity_check_step(self):
"""Custom sanity check for Bowtie."""
custom_paths = {
'files': ['bin/bowtie', 'bin/bowtie-build', 'bin/bowtie-inspect'],
'dirs': []
}
super(EB_Bowtie, self).sanity_check_step(custom_paths=custom_paths)
|
ULHPC/modules
|
easybuild/easybuild-easyblocks/easybuild/easyblocks/b/bowtie.py
|
Python
|
mit
| 2,657
|
[
"Bowtie"
] |
408c50d58fe91110ac635339c4cee8304d146ed69ad471f6cf160b4b809202d2
|
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import espressomd._system as es
import espressomd
from espressomd import thermostat
from espressomd import code_info
import numpy as np
print("""
=======================================================
= slice_input.py =
=======================================================
Program Information:""")
print(code_info.features())
dev = "cpu"
# System parameters
#############################################################
box_l = 10.0
# Integration parameters
#############################################################
system = espressomd.System()
system.time_step = 0.01
system.cell_system.skin = 0.4
system.cell_system.max_num_cells = 2744
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.box_l = [box_l, box_l, box_l]
# Particle setup
#############################################################
n_part = 10
id_list = np.arange(n_part)
pos_list = np.random.random((n_part,3)) * system.box_l
type_list = np.ones(n_part, dtype=int)
system.part.add(id=id_list ,pos=pos_list, type=type_list)
print("TYPE\n%s"%system.part[:].type)
system.part[0:2].type=[3,3]
print("TYPE_NEW\n%s"%system.part[:].type)
print("POS\n%s"%system.part[:].pos)
system.part[:5].pos=[[1,1,1],[2,2,2],[3,3,3],[4,4,4],[5,5,5]]
print("POS_NEW\n%s"%system.part[:].pos)
print("V\n%s"%system.part[:].v)
system.part[:2].v=[[1,2,3],[2,3,4]]
print("V_NEW\n%s"%system.part[:].v)
print("F\n%s"%system.part[:].f)
system.part[:2].f=[[3,4,5],[4,5,6]]
print("F_NEW\n%s"%system.part[:].f)
if "MASS" in code_info.features():
print("MASS\n%s"%system.part[:].mass)
system.part[:2].mass=[2,3]
print("MASS_NEW\n%s"%system.part[:].mass)
if "ELECTROSTATICS" in code_info.features():
print("Q\n%s"%system.part[:].q)
system.part[::2].q=np.ones(n_part/2)
system.part[1::2].q=-np.ones(n_part/2)
print("Q_NEW\n%s"%system.part[:].q)
|
richter-t/espresso
|
samples/python/slice_input.py
|
Python
|
gpl-3.0
| 2,860
|
[
"ESPResSo"
] |
d36244c20f5f06d6df54c0598e1befda9855aa0af1acfdc09d1a79ddb92e7be5
|
"""
call the python built-in compile to convert a python program to an ast structure, then convert that structure
to a simplified structure using the classes in parsetree.py
"""
from parsetree import *
import _ast
import os
from os.path import split, splitext, join
import codecs
import sys
from jslib import pystorm_modules
loaded_modules = set()
global initial_module_dir
initial_module_dir = ''
class FrontendException(Exception):
def __init__(self,detail,lineno):
self.detail = detail
self.lineno = lineno
class FrontendNotHandledException(FrontendException):
def __init__(self,detail,lineno):
FrontendException.__init__(self,detail,lineno)
def __str__(self):
return "Cannot convert python code at line "+str(self.lineno)+": "+ self.detail
class FrontendInternalException(FrontendException):
def __init__(self,detail,lineno):
FrontendException.__init__(self,detail,lineno)
def __str__(self):
return "Python parser internal error "+str(self.lineno)+": "+ self.details
class GenVisitor(object):
def __init__(self,module_name,module_path,module_namespace):
self.uops = { 'Not':'not', 'UAdd':'+', 'USub':'-', 'Invert':'~' }
self.bops = { 'Eq':"==", 'NotEq':'!=', 'Mult':"*", 'Sub':"-", 'Lt':'<', 'LtE':'<=', 'Gt':'>', 'GtE':'>=', 'Add':'+', 'Mod':'%', 'And':'and', 'Or':'or', 'Div':'/', 'Pow':'**', 'In':'in', 'NotIn':'not in', 'RShift':'>>', 'LShift':'<<', 'BitOr':'|', 'BitXor':'^', 'BitAnd':'&', 'FloorDiv':'//' }
self.aops = { 'Add':'+=', 'Sub':'-=', 'Mult':'*=', 'Div':'/=', 'Mod':'%=', 'Pow':'**=', 'RShift':'>>=', 'LShift':'<<=', 'BitOr':'|=', 'BitXor':'^=', 'BitAnd':'&=', 'FloorDiv':'//=' }
self.module = None
self.module_name = module_name
self.module_path = module_path
self.module_namespace = module_namespace
self.scope = []
def parse(self,contents):
ast = compile(contents, '<string>', 'exec', _ast.PyCF_ONLY_AST)
return self.visit(ast)
def visit(self,ast):
if ast == None:
return None
name = ast.__class__.__name__
if hasattr(self,name):
return getattr(self,name)(ast)
else:
raise FrontendNotHandledException("No handler for AST object: "+name,ast.lineno)
def pushScope(self,scope):
return self.scope.append(scope)
def popScope(self):
self.scope.pop()
def getInnerScope(self):
return self.scope[len(self.scope)-1]
def isGlobal(self,name):
return self.getInnerScope().isGlobal(name)
def addGlobal(self,name):
self.getInnerScope().addGlobal(name)
# expressions
def Attribute(self,ast):
obj = self.visit(ast.value)
attr = ast.attr
return AttributeLookup(obj,attr)
def BinOp(self,ast):
op = self.bops[ast.op.__class__.__name__]
lhs = self.visit(ast.left)
rhs = self.visit(ast.right)
return BinaryOp(op,lhs,rhs)
def BoolOp(self,ast):
op = self.bops[ast.op.__class__.__name__]
values = ast.values
assert(len(values)>1)
for idx in xrange(0,len(values)):
values[idx] = self.visit(values[idx])
expr = None
start = len(values)-2
while start >= 0:
if expr == None:
expr = BinaryOp(op,values[start],values[start+1])
else:
expr = BinaryOp(op,values[start],expr)
start -= 1
return expr
def Call(self,ast):
args = []
kwargs = {}
for keyword in ast.keywords:
key = VarName(keyword.arg)
kwargs[key] = self.visit(keyword.value)
for a in xrange(0,len(ast.args)):
arg = self.visit(ast.args[a])
args.append(arg)
if ast.func.__class__.__name__ == 'Attribute':
target = self.visit(ast.func.value)
return MethodCall(target,ast.func.attr,args,kwargs)
fname = ast.func.id
return FunctionCall(fname,args,kwargs)
def Compare(self,ast):
result = None
lhs = self.visit(ast.left)
for index in xrange(0,len(ast.ops)):
op = self.bops[ast.ops[index].__class__.__name__]
rhs = self.visit(ast.comparators[index])
clause = BinaryOp(str(op),lhs,rhs)
if result == None:
result = clause
else:
result = BinaryOp("and",result,clause)
lhs = rhs
return result
def comprehension(self, ast):
t = self.visit(ast.target)
i = self.visit(ast.iter)
cond = None
for e in ast.ifs:
target = self.visit(e.left)
# print str(e.__dict__)
for index in xrange(0,len(e.ops)):
c = self.visit(e.comparators[index])
op = e.ops[index].__class__.__name__
if op in self.bops:
op = self.bops[op]
else:
raise FrontendNotHandledException('Binary Operaton:'+op,ast.lineno)
bop = BinaryOp(op,target,c)
if cond:
cond = BinaryOp('and',cond,bop)
else:
cond = bop
return ListComprehensionGenerator(t,i,cond)
def Dict(self, ast):
keyvals = []
for idx in xrange(0,len(ast.keys)):
key = ast.keys[idx]
value = ast.values[idx]
keyvals.append((self.visit(key),self.visit(value)))
return DictionaryValue(keyvals)
def Expr(self, ast):
expr = self.visit(ast.value)
return [ExpressionStatement(expr)]
def GeneratorExp(self, ast):
# it seems that generator expressions can be handled
# in the same way as list comprehensions!
return self.ListComp(ast)
def Index(self,ast):
return self.visit(ast.value)
def Lambda(self, ast):
args = []
for a in ast.args.args:
args.append(a.id)
body = self.visit(ast.body)
return Lambda(args,body)
def List(self,ast):
elements = []
for e in ast.elts:
elements.append(self.visit(e))
return ListValue(elements)
def ListComp(self, ast):
e = self.visit(ast.elt)
generators = []
for g in ast.generators:
generators.append(self.visit(g))
return ListComprehension(e,generators)
def Name(self,ast):
if ast.id == 'True':
return Literal(True)
elif ast.id == 'False':
return Literal(False)
elif ast.id == 'None':
return Literal(None)
return VarName(ast.id)
def Num(self,ast):
return Literal(ast.n)
def Slice(self,ast):
return (self.visit(ast.lower),self.visit(ast.upper),self.visit(ast.step))
def Str(self,ast):
return Literal(ast.s)
def Subscript(self,ast):
op = "[]"
arg = self.visit(ast.value)
index = self.visit(ast.slice)
if isinstance(index,tuple):
# slice operation
return SliceOp(arg,index)
else:
return BinaryOp(op,arg,index)
def Tuple(self,ast):
return self.List(ast)
def UnaryOp(self,ast):
op = self.uops[ast.op.__class__.__name__]
arg = self.visit(ast.operand)
return UniOp(op,arg)
# Blocks and statements
def makeBlock(self,code):
if not (isinstance(code,Block)):
return Block([code])
else:
return code
def Assign(self, ast):
if len(ast.targets) != 1:
raise FrontendNotHandledException('Multiple targets in assignment',ast.lineno)
target = self.visit(ast.targets[0])
expr = self.visit(ast.value)
adef = AssignmentStatement(target, expr)
return [adef]
def AugAssign(self, ast):
target = self.visit(ast.target)
op = self.aops[ast.op.__class__.__name__]
expr = self.visit(ast.value)
adef = AugmentedAssignmentStatement(target, op, expr)
return [adef]
def Break(self,ast):
bdef = BreakStatement()
return [bdef]
def ClassDef(self,ast):
name = ast.name
subclasses = []
bases = []
for base in ast.bases:
bases.append(base.id)
memberfns = []
staticvars = []
constructor = None
cdef = ClassDefinitionStatement(name,self.module)
self.pushScope(cdef)
for stmt in ast.body:
stmts = self.visit(stmt)
for b in stmts:
if isinstance(b,FunctionDefinitionStatement):
if b.fname == '__init__':
constructor = b
else:
memberfns.append(b)
elif isinstance(b,ClassDefinitionStatement):
b.setParentClass(cdef)
subclasses.append(b)
elif isinstance(b,AssignmentStatement):
staticvars.append((b.target,b.expr))
elif isinstance(b,EmptyStatement):
pass
elif isinstance(b,ExpressionStatement):
pass # perhaps a docstring? FIXME - need to check
else:
raise FrontendNotHandledException("class contents except for member variables, classes and functions",ast.lineno)
cdef.configure(bases,constructor,memberfns,staticvars,subclasses)
self.module.addClassMountPoint(name,cdef.getClassNamespace())
self.popScope()
return [cdef]+subclasses
def Continue(self,ast):
cdef = ContinueStatement()
return [cdef]
def Delete(self, d):
dels = []
for target in d.targets:
dels.append(DeleteStatement(self.visit(target)))
return dels
def excepthandler(self, ast):
etype = None
if ast.type:
etype = ast.type.id
ename = None
if ast.name:
ename = ast.name.id
body = self.Statements(ast.body)
edef = ExceptionHandlerStatement(etype,ename,body)
return edef
def For(self, ast):
target = self.visit(ast.target)
loopexpr = self.visit(ast.iter)
body = self.Statements(ast.body)
if isinstance(loopexpr,FunctionCall):
if loopexpr.fname == "xrange" or loopexpr.fname == 'range':
print loopexpr.fname, loopexpr.args
if len(loopexpr.args)==1:
lwb = Literal(0)
upb = loopexpr.args[0]
elif len(loopexpr.args)==2:
lwb = loopexpr.args[0]
upb = loopexpr.args[1]
if len(loopexpr.args)==3:
step = loopexpr.args[2]
else:
step = Literal(1)
fdef = ForStatement(target,lwb,upb,step,body)
return [fdef]
fdef = ForInStatement(target,loopexpr,body)
return [fdef]
def FunctionDef(self, ast):
decorators = set()
for decorator in ast.decorator_list:
decorators.add(decorator.id)
fname = ast.name
argnames = []
argdefaults = []
for d in ast.args.defaults:
argdefaults.append(self.visit(d))
for a in ast.args.args:
argnames.append(a.id)
vararg = None
kwarg = None
if ast.args.vararg:
vararg = ast.args.vararg
if ast.args.kwarg:
kwarg = ast.args.kwarg
fdef = FunctionDefinitionStatement(fname)
self.pushScope(fdef)
body = self.Statements(ast.body)
fdef.configure(decorators,argnames,argdefaults,vararg,kwarg,body)
self.popScope()
return [fdef]
def Global(self, ast):
globs = []
for name in ast.names:
globs.append(GlobalStatement(name))
self.addGlobal(name)
return globs
def If(self,ast):
tests = []
cond = self.visit(ast.test)
block = self.Statements(ast.body)
tests.append((cond,block))
elseblock = None
if ast.orelse:
elseblock = self.Statements(ast.orelse)
idef = IfStatement(tests,elseblock)
return [idef]
def Import(self,ast):
modules = []
for name in ast.names:
(modpath,namespace) = self.locateModule(name.name,name.asname)
code = None
mname = name.name
if name.asname and name.asname != "":
mname = name.asname
self.module.addModuleMountPoint(mname,namespace)
global loaded_modules
if namespace not in loaded_modules:
#try:
code = pyread(modpath,(name.name,modpath,namespace,[]))
#except Exception, ex:
# raise ex
modules.append(code)
loaded_modules.add(namespace)
return modules
def ImportFrom(self,ast):
if ast.module == "__future__":
return []
importall = False
if len(ast.names)==1 and ast.names[0].name=='*':
importall = True
namespace = self.module_namespace
(modpath,namespace) = self.locateModule(ast.module,'')
if importall == False:
for name in ast.names:
aliasedname = name.name
if name.asname and name.asname != "":
aliasedname = name.asname
unaliasedname = namespace + "." + name.name
self.module.aliases.append((aliasedname,unaliasedname))
else:
namespace = self.module_namespace
modules = []
global loaded_modules
if namespace not in loaded_modules:
code = None
try:
code = pyread(modpath,(ast.module,modpath,namespace))
except Exception, ex:
# cannot find file, see if there is a "module equivalent"
if importall:
for modname in pystorm_modules:
if ast.module.endswith(modname):
jspath = join("modules","javascript",pystorm_modules[modname]+".js")
jsfile = open(jspath,"r")
jscode = jsfile.read()
return [Verbatim(jscode)]
# if its on the import path
for path in sys.path:
print sys.path
raise ex
modules.append(code)
loaded_modules.add(namespace)
return modules
def locateModule(self,module,asname):
modpath = module
modpath = modpath.replace(".","/")
modpath += ".py"
(sourcedir,sourcefile) = split(self.module_path)
modpath = join(sourcedir,modpath)
namespace = ''
# first see if the module is located relative to the calling module, but only
# if the calling module is the initial module (or in the same directory)
if sourcedir != initial_module_dir:
try:
testf = open(modpath,"r")
ns = module
if asname and asname != "":
ns = name.asname
namespace = self.module_namespace
if namespace.rfind(".") != -1:
namespace = namespace[:namespace.rfind(".")]
if namespace != "":
namespace += "."
namespace += ns
return (modpath,namespace)
except:
pass
if asname and asname != "":
namespace = asname
else:
namespace = module
# also search the current PYTHONPATH
for path in sys.path:
modpath = path+"/"+module+".py"
if os.path.exists(modpath):
print (modpath, namespace)
return (modpath, namespace)
# assume that the module is relative to the top level module
modpath = module
modpath = modpath.replace(".","/")
modpath += ".py"
modpath = join(initial_module_dir,modpath)
return (modpath,namespace)
def Module(self,ast):
m = Module(self.module_name,self.module_path,self.module_namespace)
self.module = m
self.pushScope(m)
modulebody = self.Statements(ast.body)
m.configure(modulebody)
self.popScope()
return m
def Pass(self,ast):
edef = EmptyStatement()
return [edef]
def Print(self,ast):
pvalues = []
for value in ast.values:
pvalue = self.visit(value)
pvalues.append(pvalue)
pdef = PrintStatement(pvalues)
return [pdef]
def Raise(self, r):
rtype = None
robj = None
if r.type:
rtype = self.visit(r.type)
if r.inst:
robj = self.visit(r.inst)
rdef = RaiseStatement(rtype,robj)
return [rdef]
def Return(self,ast):
rvalue = self.visit(ast.value)
rdef = ReturnStatement(rvalue)
return [rdef]
def Statements(self,ast):
statements = []
skip = False
for s in ast:
stmts = []
try:
stmts = self.visit(s)
except:
if not skip:
raise
else:
stmts = []
for stmt in stmts:
if stmt:
if isinstance(stmt,ExpressionStatement):
expr = stmt.expr
if isinstance(expr,Literal):
val = expr.value
if isinstance(val,str):
if val.startswith('pystorm-verbatim:'):
statements.append(Verbatim(val[len('pystorm-verbatim:'):]))
continue
if val.startswith('pystorm-skip-begin'):
skip = True
continue
if val.startswith('pystorm-skip-end'):
skip = False
continue
if not skip:
statements.append(stmt)
block = Block(statements)
return block
def TryFinally(self, ast):
block = self.Statements(ast.body)
handlers = []
if isinstance(block,Block) and len(block.statements)==1:
stmt = block.statements[0]
if isinstance(stmt,TryStatement):
block = stmt.body
handlers = stmt.handlers
finalblock = self.Statements(ast.finalbody)
# fixme check for else
tdef = TryStatement(block,handlers,finalblock)
return [tdef]
def TryExcept(self, ast):
block = self.Statements(ast.body)
handlers = []
for h in ast.handlers:
handlers.append(self.visit(h))
# fixme check for else
tdef = TryStatement(block,handlers,None)
return [tdef]
def ExceptHandler(self, ast):
ast.body = self.Statements(ast.body)
return ast
def While(self, ast):
cond = self.visit(ast.test)
body = self.Statements(ast.body)
wdef = WhileStatement(cond,body)
return [wdef]
def With(self,ast):
raise FrontEndNotHandledException("with clause not supported",ast.lineno)
def Yield(self,ast):
raise FrontEndNotHandledException("yield clause not supported",ast.lineno)
def pyread(path,module=None):
# print "// parsing:"+path
if module == None:
module = ("__main__",path,"")
(mdir,mfile) = split(path)
global initial_module_dir
initial_module_dir = mdir
global loaded_modules
loaded_modules = set()
file = codecs.open( path, "r", "utf-8" )
contents = file.read()
gv = GenVisitor(module[0],module[1],module[2])
return gv.parse(contents)
|
treeform/pystorm
|
pyfrontend.py
|
Python
|
mit
| 20,299
|
[
"VisIt"
] |
a036863a969794eb9b2342e8047f4f1ece189685b5c5924120d8179e5a7467b6
|
"""
Module for recoding matrices.
date: 25/08/2016
"""
import sys
import os
import types
import subprocess
import re
import copy
import warnings
from Bio.Data import CodonTable
from p4 import Alignment
from p4 import func
from p4 import var
from p4 import P4Error
from p4 import read
from p4.code_utils import \
codon_position, \
codon_position_is_degenerate, \
codon_slice_has_aas, \
codon_slice_is_constant_aa, \
codon_slice_is_degenerate, \
codons_from_triplet_slice, \
degenerate_codon_slice, \
getBiopythonCode, nuc2val, \
recode_sequence, \
reduce_by_or, \
val2nuc, \
Code, R, Y
def formatwarning(message, category, filename, lineno, line):
return "%s:%s: %s:%s" % (filename, lineno, category.__name__, message)
warnings.formatwarning = formatwarning
CAT = "".join
# This method uses a generalized codon size, but this is not the case of everything in this module.
def getCodonPositionMask(self, pos, codon_size=3):
"""This method returns a mask corresponding to sites at *pos*-th codon position.
*codon_size* can be set to specify that the codons have a different size than 3."""
def pos_mask(i):
if (i + codon_size - pos) % codon_size == 0:
return "1"
else:
return "0"
return CAT([pos_mask(i+1) for i in range(self.length)])
Alignment.getCodonPositionMask = getCodonPositionMask
def getDegenerateSitesMask(self, transl_table=1, code=None, all_3rd_positions=False):
"""This method returns a mask corresponding to sites contributing to codon degeneracy.
This is intended to be used for submatrix extraction using the noLRSall3 method,
using :meth:`Alignment.getSubsetUsingMask` (with the option *inverse=True*, to get
the degeneracy-free sites).
If *all_3rd_positions* is set to True, then the mask includes all 3rd codon positions
regardless of their effective contribution to codon degeneracy.
The matrix is expected to start at a first codon position and stop at a third
codon position.
*transl_table* is an integer used to determine under which genetic code
the codons are to be interpreted. The default value of 1 corresponds to the
standard genetic code. Other values can be found in p4.GeneticCode.py
Alternatively, the genetic code can be provided directly, using a
dictionnary *code* whose keys are codons, and the values are the
corresponding amino-acids. All triplets present in the matrix should also
be present in the code dictionnary, except triplets of indels. Codons and
the corresponding amino-acids are expected to be in lower case.
If such a code is provided, the value of *transl_table* is ignored.
The name of this method noLRSall3 comes from its effect in the case of the
standard genetic code: it discards the sites participating in first
position degeneracy for leucine (L) and arginine (R), first and second
position degeneracy for serine (S), as well as all third codon positions
where degeneracy is observed (or all of them if *all_3rd_positions* is True).
Depending on the genetic code used, the type of amino-acid affected could
be different.
The goal of the submatrix extraction using the produced mask is to remove
the sites that could have been affected by composition bias: mutations
within a set of synonymous codons are more likely to favour the codons that
conform to the general nucleotide composition. However, one could argue
that this bias is less likely to have played when the observed codons
differ by more than one nucleotide and at least a non-synonymous mutation
has to occur to bridge the gap. With the standard genetic code, this occurs
for serine codons. Indeed, the minimal mutation paths connecting the
serine AGY and TCN codon categories are
AGY (serine) <-> TGY (cysteine) <-> TCY (serine)
and
AGY (serine) <-> ACY (threonine) <-> TCY (serine)
The current implementation (as of june 2012) does not check that a
mutational path between synonymous codons exists, that consists only in
synonymous point mutations. This may be considered as a bug, because you
may not want AGY and TCN (or other similar cases that could occur with
different genetic codes) to be considered as a single degeneracy continuum.
"""
gm = ["Alignment.getDegenerateSitesMask()"]
if code is None:
#code = GeneticCode(transl_table).code
# Use the generalized Code class defined in code_utils.py
code = Code(transl_table).code
n_codons = self.length / 3
mask = ""
# Loop over the successive triplets of sites.
for c in range(n_codons):
# 3 alignment slices. One for each codon position.
slices = [self.sequenceSlice((3 * c) + pos-1) for pos in [1, 2, 3]]
# The different codons found for the current triplet of sites.
codons = set([codon.lower() for codon in ["%s%s%s" % nnn for nnn in zip(
slices[0], slices[1], slices[2])]])
# These are not Codon instances, this probably doesn't deal properly with ambiguity codes.
# Record the amino-acids coded at the 3 nucleotides site, and the codons used for this aa.
aas_codons = {}
for codon in codons:
# Determine the corresponding amino-acid.
if codon == '---':
aa = '-'
elif code.has_key(codon):
aa = code[codon]
elif 'n' in codon:
# This is a simplification. Some "degenerate" codons
# can still code an unambiguous amino-acid.
aa = 'x'
else:
gm.append("Codon %s is not defined in the chosen code "
"or translation table." % codon)
gm.append("%s" % str(code))
raise P4Error(gm)
# Record the codon used for the aa.
if aas_codons.has_key(aa):
aas_codons[aa].append(codon)
else:
aas_codons[aa] = [codon]
# Determine which positions in the triplet are degenerate.
codon_mask = [False, False, False]
# Loop over the recorded amino-acids.
for aa in aas_codons.keys():
if len(aas_codons[aa]) > 1:
# Several codons have been found at this triplet for the amino-acid aa.
# For each position, count the number of different nucleotides
# present in the used codons.
degeneracy = [len(set([cod[0] for cod in aas_codons[aa]])),
len(set([cod[1] for cod in aas_codons[aa]])),
len(set([cod[2] for cod in aas_codons[aa]]))]
if all_3rd_positions:
# Put a position in the mask if it is already in the mask
# or if it is degenerate, or if it is a 3rd position.
codon_mask = [codon_mask[pos-1] or (degeneracy[pos-1] > 1)
for pos in [1, 2]] + [True]
else:
# Put a position in the mask if it is already in the mask
# or if it is degenerate.
codon_mask = [codon_mask[pos-1] or (degeneracy[pos-1] > 1)
for pos in [1, 2, 3]]
if all(codon_mask):
# All positions of the triplet have been found to contribute to
# some codon degeneracy somewhere in the alignment.
# There is no need to search further.
break
# Append the codon mask to the mask.
mask += CAT(map(lambda b: "1" if b else "0", codon_mask))
return mask
Alignment.getDegenerateSitesMask = getDegenerateSitesMask
def pseudoTranslate(self, transl_table=1, out_type="standard", code=None):
"""Returns a pseudo protein alignment from *self*, a DNA alignment.
The result is of datatype standard instead of protein, which allows
the use of special recodings, like distinguishing between two types
of serines, like in :meth:`Alignment.recode23aa()`.
*self* is translated using :attribute:`Code(transl_table).code`.
Alternatively, the genetic code can be provided through the parameter *code*.
If such a code is provided, the value of *transl_table* is ignored.
The parameter *code* can take to types of values:
1) It can be a string naming the code to use, as defined in Biopython's
`CodonTable.unambiguous_dna_by_name.keys()`
2) It can be a dictionnary *code* whose keys are codons, and the values are
the corresponding amino-acids. All triplets present in the matrix should
also be present in the code dictionnary, except triplets of indels. Codons
and the corresponding amino-acids are expected to be in lower case.
It may be possible to use a code based on another codon length as 3,
but this has not been tested as of June 2012.
At the moment, we can only do translations where the sequences are phased
with the coding frame, ie the first sequence position is the first position
of the codon, and the last sequence position should be a last codon position.
The default behaviour is to use translation table 1, that is the standard genetic code.
Other available translation tables, this week::
if transl_table == 1: # standard
elif transl_table == 2: # vertebrate mito
elif transl_table == 4: # Mold, Protozoan,
# and Coelenterate Mitochondrial Code
# and the Mycoplasma/Spiroplasma Code
elif transl_table == 5: # invertebrate mito
elif transl_table == 9: # echinoderm mito
and now 6, 10, 11, 12, 13, 14, 21.
(These are found in p4.GeneticCode.py or in :class:`Code`)
*transl_table* may also be provided as text consisting in blank-separated elements.
Each elements consists in n characters, where n is the number of defined codons.
The first element lists the coded (pseudo-)amino-acids.
The second elements describes whether a codon can be a start codon ('M') or not ('-').
The other elements correspond to the (pseudo-)nucleotides at the successive codon positions.
Example::
FFJJZZZZYY**CC*WBBBBPPPPHHQQUUUUIIIMTTTTNNKKXXOOVVVVAAAADDEEGGGG
---M---------------M------------MMMM---------------M------------
TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
"""
gm = ['p4.alignment_recoding.pseudoTranslate()']
if self.dataType != 'dna':
gm.append("Self should be a DNA alignment")
raise P4Error(gm)
if code is None:
#from GeneticCode import Code
code = Code(transl_table, in_type="dna", out_type=out_type).code
codelength = Code(transl_table).codelength
else:
if isinstance(code, types.StringType):
code = getBiopythonCode(code) # defined in code_utils.py
# We assume that the "codons" have all the same length,
# and we look at the first codon in the dictionary to know this length.
codelength = len(code.keys()[0])
# We use standard type, because, depending on the code used to make the translation,
# we may get something that contains symbols not corresponding to normal amino-acids.
out_type = "standard"
if self.length % codelength != 0:
gm.append("The length of self should be a multiple of %i" % codelength)
raise P4Error(gm)
ali = self.dupe()
ali.dataType = out_type
ali.length = self.length / codelength
ali.symbols = CAT(sorted(set(code.values())))
ali.equates = {}
ali.dim = len(ali.symbols)
ali.nexusSets = None
ali.parts = []
ali.excludeDelete = None
for seq in ali.sequences:
# Initialize an all-gap sequence.
seq.sequence = ['-'] * ali.length
seq.dataType = out_type
for i in range(len(self.sequences)):
# the original sequence
dnaSeq = self.sequences[i].sequence
# the future pseudo-translation
pseudoProtSeq = ali.sequences[i].sequence
for j in range(ali.length):
theCodon = dnaSeq[(j * codelength):((j+1) * codelength)]
if code.has_key(theCodon):
pseudoProtSeq[j] = code[theCodon]
elif theCodon == '-' * codelength:
# full indel
pseudoProtSeq[j] = '-'
elif theCodon.count('-'):
# partial indel
gm.append(" seq %i, position %4i, dnaSeq %4i, codon '%s' is incomplete" % (
i, j, (j*codelength), theCodon))
raise P4Error(gm)
else:
# Should we use a CodonTranslationError (defined in code_utils.py) here ?
gm.append(" seq %i position %4i, dnaSeq %4i, codon '%s' is not a known codon" % (
i, j, (j*codelength), theCodon))
raise P4Error(gm)
for seq in ali.sequences:
# Convert from list to string.
#s.sequence = string.join(s.sequence, '')
seq.sequence = CAT(seq.sequence)
#print s.sequence
return ali
Alignment.pseudoTranslate = pseudoTranslate
def recode23aa(self):
"""
This method gives a pseudo-translation of *self* where leucine, arginine and
serine are coded differently depending on the codon category
(CTN -> B, TTR -> J, AGR -> O, CGN -> U, AGY -> X, TCN -> Z)
The original letters R, S and L are not used.
Current implementation is based on the plastid/bacteria genetic code.
The results should be valid also if the standard genetic code is assumed.
"""
table = """FFJJZZZZYY**CC*WBBBBPPPPHHQQUUUUIIIMTTTTNNKKXXOOVVVVAAAADDEEGGGG
---M---------------M------------MMMM---------------M------------
TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
"""
return self.pseudoTranslate(transl_table=table)
Alignment.recode23aa = recode23aa
def triplet_slice(self, pos):
"""This method returns a list of 3 successive sequence slices from *self*,
starting at position *pos*."""
return [self.sequenceSlice(pos+i) for i in range(3)]
Alignment.triplet_slice = triplet_slice
# generalization of triplet_slice, not tested
def nuplet_slice(self, pos, n=3):
"""This method returns a list of n successive sequence slices from *self*,
starting at position *pos*."""
return [self.sequenceSlice(pos+i) for i in range(n)]
Alignment.nuplet_slice = nuplet_slice
def iter_codon_slices(self, code):
"""This method iterates over columns of codons of *self*."""
for i in xrange(0, self.length, 3):
triple = self.triplet_slice(i)
yield codons_from_triplet_slice(triple, code)
Alignment.iter_codon_slices = iter_codon_slices
# generalization, not tested
#def iter_codon_slices(self, code, n=3):
# """This method iterates over columns of codons of *self*."""
# for i in xrange(0, self.length, n):
# nuple = self.nuplet_slice(i, n)
# yield codons_from_nuplet_slice(nuple, code)
#Alignment.iter_codon_slices = iter_codon_slices
def getHasAAsMask(self, aas, transl_table=1, code=None):
"""
This method returns a mask corresponding to the triplets of sites where
at least one amino-acid present in *aas* is coded. This is intended
to be used for submatrix extraction using :meth:`Alignment.getSubsetUsingMask`
(with the option *inverse=True*, to get the sites with none of the amino-acids
present in *aas*).
The matrix is expected to start at a first codon position and stop at a third
codon position.
*transl_table* is an integer used to determine under which genetic code
the codons are to be interpreted. The default value of 1 corresponds to the
standard genetic code. Other values can be found in p4.GeneticCode.py
Alternatively, the genetic code can be provided through the parameter *code*.
If such a code is provided, the value of *transl_table* is ignored.
The parameter *code* can take to types of values:
1) It can be a string naming the code to use, as defined in Biopython's
`CodonTable.unambiguous_dna_by_name.keys()`
2) It can be a dictionnary *code* whose keys are codons, and the values are
the corresponding amino-acids. All triplets present in the matrix should
also be present in the code dictionnary, except triplets of indels. Codons
and the corresponding amino-acids are expected to be in lower case.
Alternatively, the genetic code can be provided directly, using a
dictionnary *code* whose keys are codons, and the values are the
corresponding amino-acids. All triplets present in the matrix should also
be present in the code dictionnary, except triplets of indels. Codons and
the corresponding amino-acids are expected to be in lower case.
"""
gm = ["Alignment.getHasAAsMask()"]
if code is None:
code = Code(transl_table).code
elif isinstance(code, types.StringType):
code = getBiopythonCode(code)
else:
msg = "code must be a dictionary, or a string naming the code in Biopython."
assert isinstance(code, dict), msg
return CAT(map(
lambda c_slice: "111" if codon_slice_has_aas(c_slice, aas) else "000",
self.iter_codon_slices(code)))
Alignment.getHasAAsMask = getHasAAsMask
def getConstantAAMask(self, transl_table=1, code=None, restrict_to=[]):
"""
This method returns a mask corresponding to the triplets of sites where
only one amino-acid is coded. This is intended to be used for submatrix
extraction using :meth:`Alignment.getSubsetUsingMask` (with the option
*inverse=True*, to get the non-constant sites).
If *restrict_to* is not empty, only those sites that are constant and code
for one of the amino-acids the one-letter code of which is in *restrict_to*
will be considered constant.
The matrix is expected to start at a first codon position and stop at a third
codon position.
*transl_table* is an integer used to determine under which genetic code
the codons are to be interpreted. The default value of 1 corresponds to the
standard genetic code. Other values can be found in p4.GeneticCode.py
Alternatively, the genetic code can be provided through the parameter *code*.
If such a code is provided, the value of *transl_table* is ignored.
The parameter *code* can take to types of values:
1) It can be a string naming the code to use, as defined in Biopython's
`CodonTable.unambiguous_dna_by_name.keys()`
2) It can be a dictionnary *code* whose keys are codons, and the values are
the corresponding amino-acids. All triplets present in the matrix should
also be present in the code dictionnary, except triplets of indels. Codons
and the corresponding amino-acids are expected to be in lower case.
Alternatively, the genetic code can be provided directly, using a
dictionnary *code* whose keys are codons, and the values are the
corresponding amino-acids. All triplets present in the matrix should also
be present in the code dictionnary, except triplets of indels. Codons and
the corresponding amino-acids are expected to be in lower case.
"""
gm = ["Alignment.getConstantAAMask()"]
if code is None:
code = Code(transl_table).code
elif isinstance(code, types.StringType):
code = getBiopythonCode(code) # defined in code_utils.py
else:
msg = "code must be a dictionary, or a string naming the code in Biopython."
assert isinstance(code, dict), msg
return CAT(
map(lambda c_slice: "111" if codon_slice_is_constant_aa(c_slice, restrict_to) else "000",
self.iter_codon_slices(code)))
Alignment.getConstantAAMask = getConstantAAMask
def getDegenerateCodonsMask(self, transl_table=1, code=None, restrict_to=[], ignore=[]):
"""
This method returns a mask corresponding to the triplets of sites where
degeneracy has been observed. This is intended to be used for submatrix
extraction using :meth:`Alignment.getSubsetUsingMask` (with the option
*inverse=True*, to get the sites with no degenerate codons).
If *restrict_to* is not empty, only those amino-acid the one-lettre code of
which is in *restrict_to* are considered.
If *ignore* is not empty, only those amino-acid the one-lettre code of
which is not in *ignore* are considered.
The matrix is expected to start at a first codon position and stop at a third
codon position.
*transl_table* is an integer used to determine under which genetic code
the codons are to be interpreted. The default value of 1 corresponds to the
standard genetic code. Other values can be found in p4.GeneticCode.py
Alternatively, the genetic code can be provided through the parameter *code*.
If such a code is provided, the value of *transl_table* is ignored.
The parameter *code* can take to types of values:
1) It can be a string naming the code to use, as defined in Biopython's
`CodonTable.unambiguous_dna_by_name.keys()`
2) It can be a dictionnary *code* whose keys are codons, and the values are
the corresponding amino-acids. All triplets present in the matrix should
also be present in the code dictionnary, except triplets of indels. Codons
and the corresponding amino-acids are expected to be in lower case.
Alternatively, the genetic code can be provided directly, using a
dictionnary *code* whose keys are codons, and the values are the
corresponding amino-acids. All triplets present in the matrix should also
be present in the code dictionnary, except triplets of indels. Codons and
the corresponding amino-acids are expected to be in lower case.
"""
gm = ["Alignment.getDegenerateCodonsMask()"]
if code is None:
#code = GeneticCode(transl_table).code
code = Code(transl_table).code
elif isinstance(code, types.StringType):
code = getBiopythonCode(code) # defined in code_utils.py
else:
msg = "code must be a dictionary, or a string naming the code in Biopython."
assert isinstance(code, dict), msg
# Experiments to test the speed of execution.
#mask= "".join(("111" if codon_slice_is_degenerate(cod_slice, restrict_to) else "000") for cod_slice in self.iter_codon_slices(code))
#return mask
#return "".join(("111" if codon_slice_is_degenerate(cod_slice, restrict_to) else "000") for cod_slice in self.iter_codon_slices(code))
#mask = ""
# Loop over the successive triplets of sites.
#for codon_slice in self.iter_codon_slices(code):
#for codon_slice in [codons_from_triplet_slice(self.triplet_slice(i), code) for i in xrange(0, self.length, 3)]:
#for i in xrange(0, self.length, 3):
# # i is positioned at the first codon position of the triplet.
# codon_slice = codons_from_triplet_slice(self.triplet_slice(i), code)
# if codon_slice_is_degenerate(codon_slice, restrict_to):
# mask += "111"
# else:
# mask += "000"
#return "".join(("111" if codon_slice_is_degenerate(cod_slice, restrict_to) else "000") for cod_slice in [codons_from_triplet_slice(self.triplet_slice(i), code) for i in range(0, self.length, 3)])
#mask= "".join(("111" if codon_slice_is_degenerate(cod_slice, restrict_to) else "000") for cod_slice in self.iter_codon_slices(code))
#mask = "".join(map(lambda c_slice : "111" if codon_slice_is_degenerate(c_slice, restrict_to) else "000", self.iter_codon_slices(code)))
#return "".join(map(lambda c_slice : "111" if codon_slice_is_degenerate(c_slice, restrict_to) else "000", self.iter_codon_slices(code)))
#mask = "".join(("111" if codon_slice_is_degenerate(cod_slice, restrict_to) else "000") for cod_slice in [codons_from_triplet_slice(self.triplet_slice(i), code) for i in xrange(0, self.length, 3)])
#mask = "".join(map(lambda c_slice : "111" if codon_slice_is_degenerate(c_slice, restrict_to) else "000", [codons_from_triplet_slice(self.triplet_slice(i), code) for i in xrange(0, self.length, 3)]))
#return "".join(map(lambda c_slice : "111" if codon_slice_is_degenerate(c_slice, restrict_to) else "000", [codons_from_triplet_slice(self.triplet_slice(i), code) for i in range(0, self.length, 3)]))
#return mask
#return "".join(map(lambda c_slice : "111" if codon_slice_is_degenerate(c_slice, restrict_to) else "000", self.iter_codon_slices(code)))
return CAT(map(
lambda c_slice: "111" if codon_slice_is_degenerate(
c_slice, restrict_to, ignore) else "000",
self.iter_codon_slices(code)))
Alignment.getDegenerateCodonsMask = getDegenerateCodonsMask
def getDegenerateSiteMaskForPos(self, pos, transl_table=1, code=None, restrict_to=[], ignore=[]):
"""
This method returns a mask corresponding to the sites where degeneracy has
been observed if they correspond to a *pos*-th codon position.
This is intended to be used for submatrix extraction using
:meth:`Alignment.getSubsetUsingMask` (with the option *inverse=True*, to
get the degeneracy-free sites).
If *restrict_to* is not empty, only those amino-acids
the one-lettre code of which is in *restrict_to* are considered.
If *ignore* is not empty, only those amino-acid the one-lettre code of
which is not in *ignore* are considered.
The matrix is expected to start at a first codon position and stop at a third
codon position.
*transl_table* is an integer used to determine under which genetic code
the codons are to be interpreted. The default value of 1 corresponds to the
standard genetic code. Other values can be found in p4.GeneticCode.py
Alternatively, the genetic code can be provided through the parameter *code*.
If such a code is provided, the value of *transl_table* is ignored.
The parameter *code* can take to types of values:
1) It can be a string naming the code to use, as defined in Biopython's
`CodonTable.unambiguous_dna_by_name.keys()`
2) It can be a dictionnary *code* whose keys are codons, and the values are
the corresponding amino-acids. All triplets present in the matrix should
also be present in the code dictionnary, except triplets of indels. Codons
and the corresponding amino-acids are expected to be in lower case.
Alternatively, the genetic code can be provided directly, using a
dictionnary *code* whose keys are codons, and the values are the
corresponding amino-acids. All triplets present in the matrix should also
be present in the code dictionnary, except triplets of indels. Codons and
the corresponding amino-acids are expected to be in lower case.
"""
gm = ["Alignment.getDegenerateSiteMaskForPos()"]
if code is None:
code = Code(transl_table).code
elif isinstance(code, types.StringType):
code = getBiopythonCode(code) # defined in code_utils.py
else:
msg = "code must be a dictionary, or a string naming the code in Biopython."
assert isinstance(code, dict), msg
def pos_mask(i):
if i == pos:
return "1"
else:
return "0"
def triplet_mask(selected):
if selected:
return CAT(map(pos_mask, [1, 2, 3]))
else:
return "000"
# Iterate over the slices, find the triplets that will be included in the mask
# (those where degeneracy occurs), generate the corresponding mask portions,
# and join the mask portions to make the matrix mask.
return CAT(map(
triplet_mask, [codon_position_is_degenerate(
cod_slice, pos, restrict_to, ignore) for cod_slice in self.iter_codon_slices(code)]))
Alignment.getDegenerateSiteMaskForPos = getDegenerateSiteMaskForPos
def degenerate(self, code="Standard", positions=[1, 2, 3], restrict_to=[], ignore=[], sub_code=None):
"""
This method returns a copy of *self* where the codons are replaced with degenerate versions.
If *restrict_to* is not empty, only those codons that code amino-acids listed in *restrict_to*
will be degenerated.
If *ignore* is not empty, those codons that code amino-acids listed in *ignore*
will not be degenerated.
*positions* determines which codon positions are degenerated. By default, the whole codons are
degenerated (if there is degeneracy of course).
*code* is the Biopython name of the genetic code under which degeneracy has to be interpreted
or a dictionary converting from codons to amino-acids (all in lower case).
Default is to use the standard genetic code. Possible values for *code* are:
%s
*sub_code*, if provided, should be a dictionary associating amino-acids to codons
(all in lower case). For the purpose of defining degeneracy groups, the codons present
in *sub_code* will be considered as coding for the amino-acid defined there instead of
the one defined by *code*. This can be used for instance to keep two distinct types of
serine codons, with degeneracy only within each type. The codons still count as coding
their original amino-acid with respect to the *restrict_to* and *ignore* options.
""" % "\n".join(sorted(CodonTable.unambiguous_dna_by_name.keys()))
if isinstance(code, types.StringType):
code = getBiopythonCode(code) # defined in code_utils.py
else:
msg = "code must be a dictionary, or a string naming the code in Biopython."
assert isinstance(code, dict), msg
# codons belonging to different sub-groups of codons for one amino-acid
# can be considered as coding different amino-acids
# (sub-amino-acids of the "normal" amino-acid, for instance two types of serine)
if sub_code is None:
sub_code = {}
else:
assert isinstance(sub_code, dict), "sub_code must be a dictionary."
sub_code = copy.copy(sub_code) # otherwise there are side effects:
# the content of sub_code can be modified
# in the calling context
if any([sub_aa in code.values() for sub_aa in sub_code.values()]):
msg = CAT(["Note that at least one sub-aminoacid provided in sub_code ",
"is identical to an amino-acid provided by the chosen genetic code.\n",
"The sub-amino-acids are:\n%s\n" % ", ".join(
[str(aa) for aa in sub_code.values()])])
warnings.warn(msg)
# Ensure the amino-acids are in lowercase.
restrict_to = set([aa.lower() for aa in restrict_to])
ignored_aas = set([aa.lower() for aa in ignore])
# Find the groups of synonymous codons.
# The keys are amino-acids, the values are lists of codons that code the amino-acid.
aas_codons = {}
for codon in code.keys():
aa = code[codon]
if not sub_code.has_key(codon):
sub_code[codon] = aa # sub_aa will be the same as aa
sub_aa = sub_code[codon]
# Only consider codons that are compatible with the restriction rule, if there is one.
if (len(restrict_to) == 0 or aa.lower() in restrict_to) and not (aa.lower() in ignored_aas) :
#if aas_codons.has_key(aa):
if aas_codons.has_key(sub_aa):
#aas_codons[aa].append(codon)
aas_codons[sub_aa].append(codon)
else:
#aas_codons[aa] = [codon]
aas_codons[sub_aa] = [codon]
# Build a conversion dictionary.
# The keys are the codons, the values their degenerate replacements.
cod2degen = {}
for codons in aas_codons.values():
# Compute degeneracy values at the 3 positions
# The degenerate value at a position is the binary union
# of the values of the nucleotides found at that position.
# nuc2val and reduce_by_or are defined in code_utils.py
degen1 = reduce_by_or([nuc2val[cod[0]] for cod in codons])
degen2 = reduce_by_or([nuc2val[cod[1]] for cod in codons])
degen3 = reduce_by_or([nuc2val[cod[2]] for cod in codons])
# Compute the string representation of the resulting degenerate codon.
# val2nuc is defined in code_utils.py
degenerate_codon = val2nuc[degen1] + val2nuc[degen2] + val2nuc[degen3]
# Associate this representation to all the synonymous codons it represents.
for cod in codons:
cod2degen[cod.lower()] = degenerate_codon.lower()
# If restrict_to is not empty, it is likely that not all codons
# are present in cod2degen, but the code_utils.recode_sequence function
# will just keep those codons as is.
# Make a copy of self.
ali = self.dupe()
for seq in ali.sequences:
# Recode the sequence using the conversion dictionary built previously.
# recode_sequence is defined in Codon_utils.py
seq.sequence = recode_sequence(seq.sequence, cod2degen, positions, code=code)
return ali
Alignment.degenerate = degenerate
def recodeRY(self, positions=[1, 2, 3]):
"""
This method returns a copy of *self* where purines are replaced with IUPAC ambiguity code R
and pyrimidines are replaced with IUPAC ambiguity code Y.
*positions* determines which codon positions are degenerated. By default, the whole codons are
degenerated (if there is degeneracy of course).
"""
recode_table = {}
# Make a copy of self.
ali = self.dupe()
for seq in ali.sequences:
new_seq = []
pos = 0
while pos < len(seq.sequence):
# codon_position is defined in code_utils.py
if codon_position(pos + 1) in positions:
letter = seq.sequence[pos]
if not recode_table.has_key(letter):
# nuc2val, R and Y are defined in code_utils.py
val = nuc2val[letter]
if (val & R) and (val & Y):
# letter is an ambiguity code representing both purines and pyrimidines.
recode_table[letter] = "n"
elif val & R:
recode_table[letter] = "r"
elif val & Y:
recode_table[letter] = "y"
else:
msg = "Letter %s should be the code for a gap ('-')." % letter
assert letter == "-", msg
recode_table[letter] = "-"
new_seq.append(recode_table[letter])
else:
# For consistency, all characters re-written in lower case.
new_seq.append(seq.sequence[pos].lower())
pos += 1
seq.sequence = CAT(new_seq)
return ali
Alignment.recodeRY = recodeRY
def degenerateByCodonColumn(self, code="Standard", restrict_to=[]):
"""
This method returns a copy of *self* where codons coding for the same
amino-acid in a given column of the matrix are replaced by their union
(i.e. degenerated), contrary to `degenerate` which does this regardless
of the codons present, just using the degeneracy observed in the genetic code.
If *restrict_to* is not empty, only those codons that code amino-acids listed
in *restrict_to* will be degenerated.
*code* is the Biopython name of the genetic code under which degeneracy has
to be interpreted or a dictionary converting from codons to amino-acids.
Default is to use the standard genetic code. Possible values for *code* are:
%s
""" % "\n".join(sorted(CodonTable.unambiguous_dna_by_name.keys()))
if isinstance(code, types.StringType):
code = getBiopythonCode(code) # defined in code_utils.py
else:
msg = "code must be a dictionary, or a string naming the code in Biopython."
assert isinstance(code, dict), msg
# Ensure the amino-acids are in lowercase.
restrict_to = set([aa.lower() for aa in restrict_to])
# The matrix will be rebuilt column-wise,
# and then the sequences will be rebuilt from these columns of codons.
new_slices = []
for cod_slice in iter_codon_slices(self, code):
new_slices.append(degenerate_codon_slice(cod_slice, restrict_to))
# Make a copy of self.
ali = self.dupe()
# Loop over the sequences.
for i in range(self.nChar):
ali.sequences[i].sequence = CAT([str(cod_slice[i]) for cod_slice in new_slices])
return ali
Alignment.degenerateByCodonColumn = degenerateByCodonColumn
def indelizeCodons(self, aas, code="Standard"):
"""
This method returns a copy of *self* where the codons corresponding to
amino-acids listed in *aas* are replaced by indels.
*code* is the Biopython name of the genetic code under which degeneracy has to be interpreted
or a dictionary converting from codons to amino-acids.
Default is to use the standard genetic code. Possible values for *code* are:
%s
""" % "\n".join(sorted(CodonTable.unambiguous_dna_by_name.keys()))
if isinstance(code, types.StringType):
code = getBiopythonCode(code) # defined in code_utils.py
else:
msg = "code must be a dictionary, or a string naming the code in Biopython."
assert isinstance(code, dict), msg
# Ensure the amino-acids are in lowercase.
aas = set([aa.lower() for aa in aas])
# Build a conversion dictionary.
# The keys are the codons, the values their "indelized" replacements.
cod2indels = {}
for codon in code.keys():
if code[codon] in aas:
cod2indels[codon] = "---"
else:
cod2indels[codon] = codon
# Make a copy of self.
ali = self.dupe()
for seq in ali.sequences:
# Recode the sequence using the conversion dictionary built previously.
# recode_sequence is defined in Codon_utils.py
seq.sequence = recode_sequence(seq.sequence, cod2indels, code=code)
return ali
Alignment.indelizeCodons = indelizeCodons
def blend_matrices(*matrices):
"""
This function returns an alignment that is made by taking its column in the
matrices provided as arguments, one column each and cycling between the
matrices, starting with the first one. All matrices should have the same length,
and this length should be a multiple of the number of matrices.
They should also have the same taxa, and the taxa should be in the same order
in the different matrices.
"""
mat_len = matrices[0].nChar
msg = "All matrices should have the same length."
assert all([mat.nChar == mat_len for mat in matrices[1:]]), msg
msg = "The length of the matrices should be a multiple of the number of matrices to blend."
assert mat_len % len(matrices) == 0, msg
# Start with a copy of the first matrix.
ali = matrices[0].dupe()
for i in range(ali.nTax):
ali.sequences[i].sequence = blend_sequences([m.sequences[i].sequence for m in matrices])
return ali
def blend_sequences(sequences):
"""This function returns a chain of characters made by taking characters in turn from the
chains provided as arguments. These chains should have the same length and this length
should be a multiple of the number of chains."""
n_seq = len(sequences)
#seq = ""
#i = 0
#while i < len(sequences[-1]):
# seq += sequences[i % n_seq][i]
# i += 1
#return seq
return CAT([sequences[i % n_seq][i] for i in range(len(sequences[-1]))])
def treeFinderMAPAnalysis(alignment, groups,
gamma=True, invariant=True, bootstrap=False,
nreplicates=100,
remove_files=False, run_analysis=True, verbose=False):
"""
Uses TreeFinder to estimate a Maximum Likelihood tree using the MAP
substitution model for grouped amino-acids.
- *alignment*: p4 alignment object of original (un-recoded) protein data from
which the "groups" are derived
- *groups*: list of grouped amino-acids, possibly resuling from
:meth:`Alignment.getKosiolAISGroups()` or :meth:`Alignment.getMinmaxChiSqGroups()`
- *gamma*: include gamma distribution of among-site rate variation
- *bootstrap*: run bootstrap analysis
- *nreplicates*: number of bootstrap replicates
- *invariant*: include a proportion of invariant sites
- *run_analysis*: run the analysis if TreeFinder in $PATH, else just write the
control file
- *remove_files*: remove analysis files. Only available if run_analysis=True
"""
gm = ["p4.alignment_recoding.treeFinderMAPAnalysis()"]
if not isinstance(alignment, Alignment):
msg = "alignment must be a Alignment object"
gm.append(msg)
raise P4Error(gm)
if alignment.dataType != "protein":
msg = "alignment should be the original protein data from" + \
"which the groups were defined. Doing nothing."
gm.append(msg)
raise P4Error(gm)
for param in [gamma, invariant, bootstrap,
remove_files, run_analysis, verbose]:
if not isinstance(param, types.BooleanType):
msg = "%s value must be either True or False" % param
gm.append(msg)
raise P4Error(gm)
if not isinstance(nreplicates, types.IntType):
msg = "nreplictes must be an integer"
gm.append(msg)
raise P4Error(gm)
if run_analysis:
if not func.which2("tf"):
msg = "tf (treefinder) is not in your $PATH" + \
"Cannot run analysis"
gm.append(msg)
raise P4Error(gm)
datafile_name = "tf_data.phy"
#tf commands
tls = """ReconstructPhylogeny[
"%(datafile)s",
SubstitutionModel->MAP[%(map)s][Optimum,Optimum]%(ifH)s,
WithEdgeSupport->%(bootstrap)s%(nreplicates)s
],
"%(outfile)s",SaveReport"""
od = {}
od["datafile"] = datafile_name
if gamma:
if invariant:
od["ifH"] = ":GI[Optimum]"
else:
od["ifH"] = ":G[Optimum]"
else:
if invariant:
od["ifH"] = ":I[Optimum]"
else:
od["ifH"] = ""
if bootstrap:
od["bootstrap"] = "True"
od["nreplicates"] = ",NReplicates->%i" % nreplicates
else:
od["bootstrap"] = "False"
od["nreplicates"] = ""
od["outfile"] = "tf_reconstruction.output"
od["map"] = ",".join(['"%s"' % i for i in [group.upper() for group in groups]])
if run_analysis:
#Write data file
alignment.writePhylip(datafile_name)
#Write control file
tl_file = "tf_control.tl"
fh = open(tl_file, "w")
fh.write(tls % od)
fh.close()
if verbose:
direct = subprocess.STDOUT
else:
direct = open("/dev/null", "w")
child = subprocess.Popen("tf tf_control.tl", stderr=direct, shell=True)
if verbose:
print "Running TreeFinder, this could take some time...",
sys.stdout.flush()
child.communicate()
if verbose:
print "done."
sys.stdout.flush()
#This doesnt seem to work, why?
#while child.poll() is None:
# time.sleep(60)
# if verbose:
# sys.stdout.write(".")
# sys.stdout.flush()
if child.returncode != 0:
msg = "TreeFinder returned error code %s"
gm.append(msg % (child.returncode))
raise P4Error(gm)
fh = open(od["outfile"], "r")
line = fh.readlines()[1]
fh.close()
rd = {}
#Likelihood
rd["Likelihood"] = float(line[line.index("Likelihood->")+12:line.index(",")])
#Tree
ts = line[line.index("Phylogeny->")+11:line.index("SubstitutionModel->")-1]
rd["Phylogeny"] = ts
#SubstitutionModel
sm = line[line.index("SubstitutionModel->")+19:line.index("OSubstitutionModel->")-1]
rd["SubstitutionModel"] = sm
#OSubstitutionModel
osm = line[line.index("OSubstitutionModel->")+20:line.index("OEdgeOptimizationOff->")-1]
rd["OSubstitutionModel"] = osm
#NSites
ns = line[line.index("NSites->")+8:line.index("NParameters->")-1]
rd["Nsites"] = int(ns)
#NParameters
np = line[line.index("NParameters->")+13:line.index("AIC->")-1]
rd["NParameters"] = int(np)
#AIC
rd["AIC"] = float(line[line.index("AIC->")+5:line.index("AICc->")-1])
#AICc->
rd["AICc"] = float(line[line.index("AICc->")+6:line.index("HQ->")-1])
#HQ
rd["HQ"] = float(line[line.index("HQ->")+4:line.index("BIC->")-1])
#BIC
rd["BIC"] = float(line[line.index("BIC->")+5:line.index("Checksum->")-1])
#LikelihoodTime
lt = line[line.index("LikelihoodTime->")+16:line.index("LikelihoodMemory->")-1]
rd["LikelihoodTime"] = float(lt)
#LikelihoodMemory
lm = line[line.index("LikelihoodMemory->")+18:-3]
rd["LikelihoodMemory"] = int(lm)
#Make a tree object
tree = rd["Phylogeny"].replace("{", "(")
tree = tree.replace("}", ")")
tree = tree.replace("\"", "")
tree = tree + ";"
if bootstrap:
#Tree viewer has the brlen before bootstrap value plus an extra colon
# turn "xxx):0.00001:87.999,yyy" into "xxx)87.999:0.00001,yyy"
patt = re.compile(r"\):([0-9]+\.[0-9e-]+):([0-9]+\.[0-9e-]*)")
repl = r")\2:\1"
tree = re.sub(patt, repl, tree)
origw = var.warnReadNoFile
var.warnReadNoFile = False
read(tree)
var.warnReadNoFile = origw
result_tree = var.trees.pop()
if bootstrap:
#Round up floats to percentages
for node in result_tree.iterInternalsNoRoot():
node.name = "%2.f" % float(node.name)
if remove_files:
os.remove("tf_control.tl")
os.remove("tf_data.phy")
os.remove("tf_reconstruction.output")
if verbose:
print "\n"
result_tree.draw()
print "\nLikelihood: %.4f\n" % rd["Likelihood"]
return result_tree, rd
else:
print tls % od
return (None, None)
|
blaiseli/p4-phylogenetics
|
p4/alignment_recoding.py
|
Python
|
gpl-2.0
| 46,750
|
[
"Biopython"
] |
f3ac5350efe09dc5ff52ef296cc1f2030e677455f4258c13585d5b4b5f269451
|
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from PyQt4 import QtGui
from PyQt4.QtCore import Qt
from customdelegate import CustomDelegate, DocumentationMetaclass
from camelot.view.controls import editors
class LabelDelegate(CustomDelegate):
__metaclass__ = DocumentationMetaclass
editor = editors.LabelEditor
def paint(self, painter, option, index):
painter.save()
self.drawBackground(painter, option, index)
checked = index.model().data(index, Qt.EditRole).toBool()
background_color = QtGui.QColor(index.model().data(index, Qt.BackgroundRole))
if( option.state & QtGui.QStyle.State_Selected ):
painter.fillRect(option.rect, option.palette.highlight())
elif not self.editable:
painter.fillRect(option.rect, option.palette.window())
else:
painter.fillRect(option.rect, background_color)
QtGui.QApplication.style().drawControl(QtGui.QStyle.CE_CheckBox,
checked,
painter)
painter.restore()
|
kurtraschke/camelot
|
camelot/view/controls/delegates/labeldelegate.py
|
Python
|
gpl-2.0
| 2,178
|
[
"VisIt"
] |
7dbdaab260c29eb871ba533fe1bdd6f69a0269dfeb8999514f3b9c3bcfc94c84
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.ao2mo import _ao2mo
from pyscf import df
from pyscf.cc import ccsd
from pyscf.cc import _ccsd
from pyscf import __config__
MEMORYMIN = getattr(__config__, 'cc_ccsd_memorymin', 2000)
class RCCSD(ccsd.CCSD):
def __init__(self, mf, frozen=None, mo_coeff=None, mo_occ=None):
ccsd.CCSD.__init__(self, mf, frozen, mo_coeff, mo_occ)
if getattr(mf, 'with_df', None):
self.with_df = mf.with_df
else:
self.with_df = df.DF(mf.mol)
self.with_df.auxbasis = df.make_auxbasis(mf.mol, mp2fit=True)
self._keys.update(['with_df'])
def reset(self, mol=None):
self.with_df.reset(mol)
return ccsd.CCSD.reset(self, mol)
def ao2mo(self, mo_coeff=None):
return _make_df_eris(self, mo_coeff)
def _add_vvvv(self, t1, t2, eris, out=None, with_ovvv=False, t2sym=None):
assert(not self.direct)
return ccsd.CCSD._add_vvvv(self, t1, t2, eris, out, with_ovvv, t2sym)
def _contract_vvvv_t2(mycc, mol, vvL, t2, out=None, verbose=None):
'''Ht2 = numpy.einsum('ijcd,acdb->ijab', t2, vvvv)
Args:
vvvv : None or integral object
if vvvv is None, contract t2 to AO-integrals using AO-direct algorithm
'''
_dgemm = lib.numpy_helper._dgemm
time0 = logger.process_clock(), logger.perf_counter()
log = logger.new_logger(mol, verbose)
naux = vvL.shape[-1]
nvira, nvirb = t2.shape[-2:]
x2 = t2.reshape(-1,nvira,nvirb)
nocc2 = x2.shape[0]
nvir2 = nvira * nvirb
Ht2 = numpy.ndarray(x2.shape, buffer=out)
Ht2[:] = 0
max_memory = max(MEMORYMIN, mycc.max_memory - lib.current_memory()[0])
def contract_blk_(eri, i0, i1, j0, j1):
ic = i1 - i0
jc = j1 - j0
#:Ht2[:,j0:j1] += numpy.einsum('xef,efab->xab', x2[:,i0:i1], eri)
_dgemm('N', 'N', nocc2, jc*nvirb, ic*nvirb,
x2.reshape(-1,nvir2), eri.reshape(-1,jc*nvirb),
Ht2.reshape(-1,nvir2), 1, 1, i0*nvirb, 0, j0*nvirb)
if i0 > j0:
#:Ht2[:,i0:i1] += numpy.einsum('xef,abef->xab', x2[:,j0:j1], eri)
_dgemm('N', 'T', nocc2, ic*nvirb, jc*nvirb,
x2.reshape(-1,nvir2), eri.reshape(-1,jc*nvirb),
Ht2.reshape(-1,nvir2), 1, 1, j0*nvirb, 0, i0*nvirb)
#TODO: check if vvL can be entirely loaded into memory
nvir_pair = nvirb * (nvirb+1) // 2
dmax = numpy.sqrt(max_memory*.7e6/8/nvirb**2/2)
dmax = int(min((nvira+3)//4, max(ccsd.BLKMIN, dmax)))
vvblk = (max_memory*1e6/8 - dmax**2*(nvirb**2*1.5+naux))/naux
vvblk = int(min((nvira+3)//4, max(ccsd.BLKMIN, vvblk/naux)))
eribuf = numpy.empty((dmax,dmax,nvir_pair))
loadbuf = numpy.empty((dmax,dmax,nvirb,nvirb))
tril2sq = lib.square_mat_in_trilu_indices(nvira)
for i0, i1 in lib.prange(0, nvira, dmax):
off0 = i0*(i0+1)//2
off1 = i1*(i1+1)//2
vvL0 = _cp(vvL[off0:off1])
for j0, j1 in lib.prange(0, i1, dmax):
ijL = vvL0[tril2sq[i0:i1,j0:j1] - off0].reshape(-1,naux)
eri = numpy.ndarray(((i1-i0)*(j1-j0),nvir_pair), buffer=eribuf)
for p0, p1 in lib.prange(0, nvir_pair, vvblk):
vvL1 = _cp(vvL[p0:p1])
eri[:,p0:p1] = lib.ddot(ijL, vvL1.T)
vvL1 = None
tmp = numpy.ndarray((i1-i0,nvirb,j1-j0,nvirb), buffer=loadbuf)
_ccsd.libcc.CCload_eri(tmp.ctypes.data_as(ctypes.c_void_p),
eri.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*4)(i0, i1, j0, j1),
ctypes.c_int(nvirb))
contract_blk_(tmp, i0, i1, j0, j1)
time0 = log.timer_debug1('vvvv [%d:%d,%d:%d]'%(i0,i1,j0,j1), *time0)
return Ht2.reshape(t2.shape)
class _ChemistsERIs(ccsd._ChemistsERIs):
def _contract_vvvv_t2(self, mycc, t2, direct=False, out=None, verbose=None):
assert(not direct)
return _contract_vvvv_t2(mycc, self.mol, self.vvL, t2, out, verbose)
def _make_df_eris(cc, mo_coeff=None):
eris = _ChemistsERIs()
eris._common_init_(cc, mo_coeff)
nocc = eris.nocc
nmo = eris.fock.shape[0]
nvir = nmo - nocc
nvir_pair = nvir*(nvir+1)//2
with_df = cc.with_df
naux = eris.naux = with_df.get_naoaux()
eris.feri = lib.H5TmpFile()
eris.oooo = eris.feri.create_dataset('oooo', (nocc,nocc,nocc,nocc), 'f8')
eris.ovoo = eris.feri.create_dataset('ovoo', (nocc,nvir,nocc,nocc), 'f8', chunks=(nocc,1,nocc,nocc))
eris.ovov = eris.feri.create_dataset('ovov', (nocc,nvir,nocc,nvir), 'f8', chunks=(nocc,1,nocc,nvir))
eris.ovvo = eris.feri.create_dataset('ovvo', (nocc,nvir,nvir,nocc), 'f8', chunks=(nocc,1,nvir,nocc))
eris.oovv = eris.feri.create_dataset('oovv', (nocc,nocc,nvir,nvir), 'f8', chunks=(nocc,nocc,1,nvir))
# nrow ~ 4e9/8/blockdim to ensure hdf5 chunk < 4GB
chunks = (min(nvir_pair,int(4e8/with_df.blockdim)), min(naux,with_df.blockdim))
eris.vvL = eris.feri.create_dataset('vvL', (nvir_pair,naux), 'f8', chunks=chunks)
Loo = numpy.empty((naux,nocc,nocc))
Lov = numpy.empty((naux,nocc,nvir))
mo = numpy.asarray(eris.mo_coeff, order='F')
ijslice = (0, nmo, 0, nmo)
p1 = 0
Lpq = None
for k, eri1 in enumerate(with_df.loop()):
Lpq = _ao2mo.nr_e2(eri1, mo, ijslice, aosym='s2', mosym='s1', out=Lpq)
p0, p1 = p1, p1 + Lpq.shape[0]
Lpq = Lpq.reshape(p1-p0,nmo,nmo)
Loo[p0:p1] = Lpq[:,:nocc,:nocc]
Lov[p0:p1] = Lpq[:,:nocc,nocc:]
Lvv = lib.pack_tril(Lpq[:,nocc:,nocc:])
eris.vvL[:,p0:p1] = Lvv.T
Lpq = Lvv = None
Loo = Loo.reshape(naux,nocc**2)
#Lvo = Lov.transpose(0,2,1).reshape(naux,nvir*nocc)
Lov = Lov.reshape(naux,nocc*nvir)
eris.oooo[:] = lib.ddot(Loo.T, Loo).reshape(nocc,nocc,nocc,nocc)
eris.ovoo[:] = lib.ddot(Lov.T, Loo).reshape(nocc,nvir,nocc,nocc)
ovov = lib.ddot(Lov.T, Lov).reshape(nocc,nvir,nocc,nvir)
eris.ovov[:] = ovov
eris.ovvo[:] = ovov.transpose(0,1,3,2)
ovov = None
mem_now = lib.current_memory()[0]
max_memory = max(0, cc.max_memory - mem_now)
blksize = max(ccsd.BLKMIN, int((max_memory*.9e6/8-nocc**2*nvir_pair)/(nocc**2+naux)))
oovv_tril = numpy.empty((nocc*nocc,nvir_pair))
for p0, p1 in lib.prange(0, nvir_pair, blksize):
oovv_tril[:,p0:p1] = lib.ddot(Loo.T, _cp(eris.vvL[p0:p1]).T)
eris.oovv[:] = lib.unpack_tril(oovv_tril).reshape(nocc,nocc,nvir,nvir)
oovv_tril = Loo = None
Lov = Lov.reshape(naux,nocc,nvir)
vblk = max(nocc, int((max_memory*.15e6/8)/(nocc*nvir_pair)))
vvblk = int(min(nvir_pair, 4e8/nocc, max(4, (max_memory*.8e6/8)/(vblk*nocc+naux))))
eris.ovvv = eris.feri.create_dataset('ovvv', (nocc,nvir,nvir_pair), 'f8',
chunks=(nocc,1,vvblk))
for q0, q1 in lib.prange(0, nvir_pair, vvblk):
vvL = _cp(eris.vvL[q0:q1])
for p0, p1 in lib.prange(0, nvir, vblk):
tmpLov = _cp(Lov[:,:,p0:p1]).reshape(naux,-1)
eris.ovvv[:,p0:p1,q0:q1] = lib.ddot(tmpLov.T, vvL.T).reshape(nocc,p1-p0,q1-q0)
vvL = None
return eris
def _cp(a):
return numpy.array(a, copy=False, order='C')
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = 'cc-pvdz'
mol.build()
mf = scf.RHF(mol).density_fit('weigend').run()
mycc = RCCSD(mf).run()
print(mycc.e_corr - -0.21337100025961622)
print("IP energies... (right eigenvector)")
part = None
e,v = mycc.ipccsd(nroots=3, partition=part)
print(e)
print(e[0] - 0.43364287418576897)
print(e[1] - 0.5188001071775572 )
print(e[2] - 0.67851590275796392)
print("IP energies... (left eigenvector)")
e,lv = mycc.ipccsd(nroots=3,left=True,partition=part)
print(e)
print(e[0] - 0.43364286531878882)
print(e[1] - 0.51879999865136994)
print(e[2] - 0.67851587320495355)
print("EA energies... (right eigenvector)")
e,v = mycc.eaccsd(nroots=3, partition=part)
print(e)
print(e[0] - 0.16730125785810035)
print(e[1] - 0.23999823045518162)
print(e[2] - 0.50960183439619933)
print("EA energies... (left eigenvector)")
e,lv = mycc.eaccsd(nroots=3, left=True, partition=part)
print(e)
print(e[0] - 0.16730137808538076)
print(e[1] - 0.23999845448276602)
print(e[2] - 0.50960182130968001)
e, v = mycc.eeccsd(nroots=4)
print(e[0] - 0.27575637238275519)
print(e[1] - 0.27575637238275519)
print(e[2] - 0.27575637238275519)
print(e[3] - 0.30068967373840394)
|
sunqm/pyscf
|
pyscf/cc/dfccsd.py
|
Python
|
apache-2.0
| 9,438
|
[
"PySCF"
] |
9406b503ded1dafc5014a939498ad9ba6b2af97bd78295776e667342c7b012ba
|
# Copyright Yair Benita Y.Benita@pharm.uu.nl
# Biopython (http://biopython.org) license applies
"""Simple protein analysis.
Example,
X = ProteinAnalysis("MAEGEITTFTALTEKFNLPPGNYKKPKLLYCSNGGHFLRILPDGTVDGTRDRSDQHIQLQLSAESVGEVYIKSTETGQYLAMDTSGLLYGSQTPSEECLFLERLEENHYNTYTSKKHAEKNWFVGLKKNGSCKRGPRTHYGQKAILFLPLPV")
print X.count_amino_acids()
print X.get_amino_acids_percent()
print X.molecular_weight()
print X.aromaticity()
print X.instability_index()
print X.flexibility()
print X.isoelectric_point()
print X.secondary_structure_fraction()
print X.protein_scale(ProtParamData.kd, 9, 0.4)
"""
import sys
import ProtParamData, IsoelectricPoint
from ProtParamData import kd # Added by Iddo to enable the gravy method
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
from Bio.Data import IUPACData
#from BioModule import
class ProteinAnalysis:
"""Class containing methods for protein analysis.
The class init method takes only one argument, the protein sequence as a
string and builds a sequence object using the Bio.Seq module. This is done
just to make sure the sequence is a protein sequence and not anything else.
"""
def __init__(self, ProtSequence):
if ProtSequence.islower():
self.sequence = Seq(ProtSequence.upper(), IUPAC.protein)
else:
self.sequence = Seq(ProtSequence, IUPAC.protein)
self.amino_acids_content = None
self.amino_acids_percent = None
self.length = len(self.sequence)
def count_amino_acids(self):
"""Count standard amino acids, returns a dict.
Simply counts the number times an amino acid is repeated in the protein
sequence. Returns a dictionary {AminoAcid:Number} and also stores the
dictionary in self.amino_acids_content.
"""
ProtDic = dict([ (k, 0) for k in IUPACData.protein_letters])
for i in ProtDic:
ProtDic[i]=self.sequence.count(i)
self.amino_acids_content = ProtDic
return ProtDic
def get_amino_acids_percent(self):
"""Calculate the amino acid content in percents.
The same as count_amino_acids only returns the Number in percentage of
entire sequence. Returns a dictionary and stores the dictionary in
self.amino_acids_content_percent.
input is the dictionary from CountAA.
output is a dictionary with AA as keys.
"""
if not self.amino_acids_content:
self.count_amino_acids()
PercentAA = {}
for i in self.amino_acids_content:
if self.amino_acids_content[i] > 0:
PercentAA[i]=self.amino_acids_content[i]/float(self.length)
else:
PercentAA[i] = 0
self.amino_acids_percent = PercentAA
return PercentAA
def molecular_weight (self):
"""Calculate MW from Protein sequence"""
# make local dictionary for speed
MwDict = {}
# remove a molecule of water from the amino acid weight.
for i in IUPACData.protein_weights:
MwDict[i] = IUPACData.protein_weights[i] - 18.02
MW = 18.02 # add just one water molecule for the whole sequence.
for i in self.sequence:
MW += MwDict[i]
return MW
def aromaticity(self):
"""Calculate the aromaticity according to Lobry, 1994.
Calculates the aromaticity value of a protein according to Lobry, 1994.
It is simply the relative frequency of Phe+Trp+Tyr.
"""
if not self.amino_acids_percent:
self.get_amino_acids_percent()
Arom= self.amino_acids_percent['Y']+self.amino_acids_percent['W']+self.amino_acids_percent['F']
return Arom
def instability_index(self):
"""Calculate the instability index according to Guruprasad et al 1990.
Implementation of the method of Guruprasad et al. 1990 to test a
protein for stability. Any value above 40 means the protein is unstable
(has a short half life).
See: Guruprasad K., Reddy B.V.B., Pandit M.W.
Protein Engineering 4:155-161(1990).
"""
#make the dictionary local for speed.
DIWV=ProtParamData.DIWV.copy()
score=0.0
for i in range(self.length - 1):
DiPeptide=DIWV[self.sequence[i]][self.sequence[i+1]]
score += DiPeptide
return (10.0/self.length) * score
def flexibility(self):
"""Calculate the flexibility according to Vihinen, 1994.
No argument to change window size because parameters are specific for a
window=9. The parameters used are optimized for determining the flexibility.
"""
Flex = ProtParamData.Flex.copy()
Window=9
Weights=[0.25,0.4375,0.625,0.8125,1]
List=[]
for i in range(self.length - Window):
SubSeq=self.sequence[i:i+Window]
score = 0.0
for j in range(Window//2):
score += (Flex[SubSeq[j]]+Flex[SubSeq[Window-j-1]]) * Weights[j]
score += Flex[SubSeq[Window//2+1]]
List.append(score/5.25)
return List
def gravy(self):
"""Calculate the gravy according to Kyte and Doolittle."""
ProtGravy=0.0
for i in self.sequence:
ProtGravy += kd[i]
return ProtGravy/self.length
# this method is used to make a list of relative weight of the
# window edges compared to the window center. The weights are linear.
# it actually generates half a list. For a window of size 9 and edge 0.4
# you get a list of [0.4, 0.55, 0.7, 0.85].
def _weight_list(self, window, edge):
unit = ((1.0-edge)/(window-1))*2
list = [0.0]*(window//2)
for i in range(window//2):
list[i] = edge + unit * i
return list
# The weight list returns only one tail. If the list should be [0.4,0.7,1.0,0.7,0.4]
# what you actually get from _weights_list is [0.4,0.7]. The correct calculation is done
# in the loop.
def protein_scale(self, ParamDict, Window, Edge=1.0):
"""Compute a profile by any amino acid scale.
An amino acid scale is defined by a numerical value assigned to each type of
amino acid. The most frequently used scales are the hydrophobicity or
hydrophilicity scales and the secondary structure conformational parameters
scales, but many other scales exist which are based on different chemical and
physical properties of the amino acids. You can set several parameters that
control the computation of a scale profile, such as the window size and the
window edge relative weight value. WindowSize: The window size is the length
of the interval to use for the profile computation. For a window size n, we
use the i- ( n-1)/2 neighboring residues on each side of residue it compute
the score for residue i. The score for residue is the sum of the scale values
for these amino acids, optionally weighted according to their position in the
window. Edge: The central amino acid of the window always has a weight of 1.
By default, the amino acids at the remaining window positions have the same
weight, but you can make the residue at the center of the window have a
larger weight than the others by setting the edge value for the residues at
the beginning and end of the interval to a value between 0 and 1. For
instance, for Edge=0.4 and a window size of 5 the weights will be: 0.4, 0.7,
1.0, 0.7, 0.4. The method returns a list of values which can be plotted to
view the change along a protein sequence. Many scales exist. Just add your
favorites to the ProtParamData modules.
Similar to expasy's ProtScale: http://www.expasy.org/cgi-bin/protscale.pl
"""
# generate the weights
weight = self._weight_list(Window,Edge)
list = []
# the score in each Window is divided by the sum of weights
sum_of_weights = 0.0
for i in weight: sum_of_weights += i
# since the weight list is one sided:
sum_of_weights = sum_of_weights*2+1
for i in range(self.length-Window+1):
subsequence = self.sequence[i:i+Window]
score = 0.0
for j in range(Window//2):
# walk from the outside of the Window towards the middle.
# Iddo: try/except clauses added to avoid raising an exception on a non-standad amino acid
try:
score += weight[j] * ParamDict[subsequence[j]] + weight[j] * ParamDict[subsequence[Window-j-1]]
except KeyError:
sys.stderr.write('warning: %s or %s is not a standard amino acid.\n' %
(subsequence[j],subsequence[Window-j-1]))
# Now add the middle value, which always has a weight of 1.
if subsequence[Window//2] in ParamDict:
score += ParamDict[subsequence[Window//2]]
else:
sys.stderr.write('warning: %s is not a standard amino acid.\n' % (subsequence[Window//2]))
list.append(score/sum_of_weights)
return list
def isoelectric_point(self):
"""Calculate the isoelectric point.
This method uses the module IsoelectricPoint to calculate the pI of a protein.
"""
if not self.amino_acids_content:
self.count_amino_acids()
X = IsoelectricPoint.IsoelectricPoint(self.sequence, self.amino_acids_content)
return X.pi()
def secondary_structure_fraction (self):
"""Calculate fraction of helix, turn and sheet.
This methods returns a list of the fraction of amino acids which tend
to be in Helix, Turn or Sheet.
Amino acids in helix: V, I, Y, F, W, L.
Amino acids in Turn: N, P, G, S.
Amino acids in sheet: E, M, A, L.
Returns a tuple of three integers (Helix, Turn, Sheet).
"""
if not self.amino_acids_percent:
self.get_amino_acids_percent()
Helix = self.amino_acids_percent['V'] + self.amino_acids_percent['I'] + self.amino_acids_percent['Y'] + self.amino_acids_percent['F'] + self.amino_acids_percent['W'] + self.amino_acids_percent['L']
Turn = self.amino_acids_percent['N'] + self.amino_acids_percent['P'] + self.amino_acids_percent['G'] + self.amino_acids_percent['S']
Sheet = self.amino_acids_percent['E'] + self.amino_acids_percent['M'] + self.amino_acids_percent['A'] + self.amino_acids_percent['L']
return Helix, Turn, Sheet
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/SeqUtils/ProtParam.py
|
Python
|
gpl-2.0
| 10,842
|
[
"Biopython"
] |
8f1fe2f1284c3fa7cc9c002502e25ca93d98c69407b78bfcbf2c7eb6d38f8502
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create a rendering window and renderer
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.StereoCapableWindowOn()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
reader = vtk.vtkGenericEnSightReader()
reader.SetCaseFileName("" + str(VTK_DATA_ROOT) + "/Data/EnSight/elements6-bin.case")
reader.UpdateInformation()
reader.GetOutputInformation(0).Set(vtk.vtkStreamingDemandDrivenPipeline.UPDATE_TIME_STEP(), 0.1)
geom = vtk.vtkGeometryFilter()
geom.SetInputConnection(reader.GetOutputPort())
mapper = vtk.vtkHierarchicalPolyDataMapper()
mapper.SetInputConnection(geom.GetOutputPort())
mapper.SetColorModeToMapScalars()
mapper.SetScalarModeToUsePointFieldData()
mapper.ColorByArrayComponent("pointTensors",0)
mapper.SetScalarRange(0,300)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# assign our actor to the renderer
ren1.AddActor(actor)
# enable user interface interactor
iren.Initialize()
renWin.Render()
# prevent the tk window from showing up then start the event loop
reader.SetDefaultExecutivePrototype(None)
# --- end of script --
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/IO/EnSight/Testing/Python/EnSight6ElementsBin.py
|
Python
|
gpl-3.0
| 1,233
|
[
"VTK"
] |
28febc0b530517bf15ef56136c3c2b89da1e6b7aaa724c2b2d99b73debe97c0c
|
import logging
import urllib
from functools import partial
from collections import defaultdict
from django.conf import settings
from django.core.context_processors import csrf
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect
from edxmako.shortcuts import render_to_response, render_to_string
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from django.db import transaction
from markupsafe import escape
from courseware import grades
from courseware.access import has_access
from courseware.courses import (get_courses, get_course_with_access,
get_courses_by_university, sort_by_announcement)
import courseware.tabs as tabs
from courseware.masquerade import setup_masquerade
from courseware.model_data import FieldDataCache
from .module_render import toc_for_course, get_module_for_descriptor, get_module
from courseware.models import StudentModule, StudentModuleHistory
from course_modes.models import CourseMode
from student.models import UserTestGroup, CourseEnrollment
from student.views import course_from_id, single_course_reverification_info
from util.cache import cache, cache_if_anonymous
from xblock.fragment import Fragment
from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import InvalidLocationError, ItemNotFoundError, NoPathToItem
from xmodule.modulestore.search import path_to_location
from xmodule.course_module import CourseDescriptor
import shoppingcart
from microsite_configuration.middleware import MicrositeConfiguration
log = logging.getLogger("edx.courseware")
template_imports = {'urllib': urllib}
def user_groups(user):
"""
TODO (vshnayder): This is not used. When we have a new plan for groups, adjust appropriately.
"""
if not user.is_authenticated():
return []
# TODO: Rewrite in Django
key = 'user_group_names_{user.id}'.format(user=user)
cache_expiration = 60 * 60 # one hour
# Kill caching on dev machines -- we switch groups a lot
group_names = cache.get(key)
if settings.DEBUG:
group_names = None
if group_names is None:
group_names = [u.name for u in UserTestGroup.objects.filter(users=user)]
cache.set(key, group_names, cache_expiration)
return group_names
@ensure_csrf_cookie
@cache_if_anonymous
def courses(request):
"""
Render "find courses" page. The course selection work is done in courseware.courses.
"""
courses = get_courses(request.user, request.META.get('HTTP_HOST'))
courses = sort_by_announcement(courses)
return render_to_response("courseware/courses.html", {'courses': courses})
def render_accordion(request, course, chapter, section, field_data_cache):
"""
Draws navigation bar. Takes current position in accordion as
parameter.
If chapter and section are '' or None, renders a default accordion.
course, chapter, and section are the url_names.
Returns the html string
"""
# grab the table of contents
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
toc = toc_for_course(user, request, course, chapter, section, field_data_cache)
context = dict([('toc', toc),
('course_id', course.id),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format)] + template_imports.items())
return render_to_string('courseware/accordion.html', context)
def get_current_child(xmodule):
"""
Get the xmodule.position's display item of an xmodule that has a position and
children. If xmodule has no position or is out of bounds, return the first child.
Returns None only if there are no children at all.
"""
if not hasattr(xmodule, 'position'):
return None
if xmodule.position is None:
pos = 0
else:
# position is 1-indexed.
pos = xmodule.position - 1
children = xmodule.get_display_items()
if 0 <= pos < len(children):
child = children[pos]
elif len(children) > 0:
# Something is wrong. Default to first child
child = children[0]
else:
child = None
return child
def redirect_to_course_position(course_module):
"""
Return a redirect to the user's current place in the course.
If this is the user's first time, redirects to COURSE/CHAPTER/SECTION.
If this isn't the users's first time, redirects to COURSE/CHAPTER,
and the view will find the current section and display a message
about reusing the stored position.
If there is no current position in the course or chapter, then selects
the first child.
"""
urlargs = {'course_id': course_module.id}
chapter = get_current_child(course_module)
if chapter is None:
# oops. Something bad has happened.
raise Http404("No chapter found when loading current position in course")
urlargs['chapter'] = chapter.url_name
if course_module.position is not None:
return redirect(reverse('courseware_chapter', kwargs=urlargs))
# Relying on default of returning first child
section = get_current_child(chapter)
if section is None:
raise Http404("No section found when loading current position in course")
urlargs['section'] = section.url_name
return redirect(reverse('courseware_section', kwargs=urlargs))
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, c in enumerate(seq_module.get_display_items(), start=1):
if c.url_name == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def chat_settings(course, user):
"""
Returns a dict containing the settings required to connect to a
Jabber chat server and room.
"""
domain = getattr(settings, "JABBER_DOMAIN", None)
if domain is None:
log.warning('You must set JABBER_DOMAIN in the settings to '
'enable the chat widget')
return None
return {
'domain': domain,
# Jabber doesn't like slashes, so replace with dashes
'room': "{ID}_class".format(ID=course.id.replace('/', '-')),
'username': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
# TODO: clearly this needs to be something other than the username
# should also be something that's not necessarily tied to a
# particular course
'password': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
}
@login_required
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def index(request, course_id, chapter=None, section=None,
position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right chapter.
If neither chapter or section are specified, redirects to user's most recent
chapter, or the first chapter if this is the user's first visit.
Arguments:
- request : HTTP request
- course_id : course id (str: ORG/course/URL_NAME)
- chapter : chapter url_name (str)
- section : section url_name (str)
- position : position in module, eg of <sequential> module (str)
Returns:
- HTTPresponse
"""
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
course = get_course_with_access(user, course_id, 'load', depth=2)
staff_access = has_access(user, course, 'staff')
registered = registered_for_course(course, user)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug(u'User {0} tried to view course {1} but is not enrolled'.format(user, course.location.url()))
return redirect(reverse('about_course', args=[course.id]))
masq = setup_masquerade(request, staff_access)
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, user, course, depth=2)
course_module = get_module_for_descriptor(user, request, course, field_data_cache, course.id)
if course_module is None:
log.warning(u'If you see this, something went wrong: if we got this'
u' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course.id]))
if chapter is None:
return redirect_to_course_position(course_module)
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': render_accordion(request, course, chapter, section, field_data_cache),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'fragment': Fragment(),
'staff_access': staff_access,
'masquerade': masq,
'xqa_server': settings.FEATURES.get('USE_XQA_SERVER', 'http://xqa:server@content-qa.mitx.mit.edu/xqa'),
'reverifications': fetch_reverify_banner_info(request, course_id),
}
# Only show the chat if it's enabled by the course and in the
# settings.
show_chat = course.show_chat and settings.FEATURES['ENABLE_CHAT']
if show_chat:
context['chat'] = chat_settings(course, user)
# If we couldn't load the chat settings, then don't show
# the widget in the courseware.
if context['chat'] is None:
show_chat = False
context['show_chat'] = show_chat
chapter_descriptor = course.get_child_by(lambda m: m.url_name == chapter)
if chapter_descriptor is not None:
save_child_position(course_module, chapter)
else:
raise Http404('No chapter descriptor found with name {}'.format(chapter))
chapter_module = course_module.get_child_by(lambda m: m.url_name == chapter)
if chapter_module is None:
# User may be trying to access a chapter that isn't live yet
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no chapter %s' % chapter)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
if section is not None:
section_descriptor = chapter_descriptor.get_child_by(lambda m: m.url_name == section)
if section_descriptor is None:
# Specifically asked-for section doesn't exist
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no section %s' % section)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
# cdodge: this looks silly, but let's refetch the section_descriptor with depth=None
# which will prefetch the children more efficiently than doing a recursive load
section_descriptor = modulestore().get_instance(course.id, section_descriptor.location, depth=None)
# Load all descendants of the section, because we're going to display its
# html, which in general will need all of its children
section_field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_id, user, section_descriptor, depth=None)
section_module = get_module_for_descriptor(request.user,
request,
section_descriptor,
section_field_data_cache,
course_id,
position
)
if section_module is None:
# User may be trying to be clever and access something
# they don't have access to.
raise Http404
# Save where we are in the chapter
save_child_position(chapter_module, section)
context['fragment'] = section_module.render('student_view')
context['section_title'] = section_descriptor.display_name_with_default
else:
# section is none, so display a message
prev_section = get_current_child(chapter_module)
if prev_section is None:
# Something went wrong -- perhaps this chapter has no sections visible to the user
raise Http404
prev_section_url = reverse('courseware_section', kwargs={'course_id': course_id,
'chapter': chapter_descriptor.url_name,
'section': prev_section.url_name})
context['fragment'] = Fragment(content=render_to_string(
'courseware/welcome-back.html',
{
'course': course,
'chapter_module': chapter_module,
'prev_section': prev_section,
'prev_section_url': prev_section_url
}
))
result = render_to_response('courseware/courseware.html', context)
except Exception as e:
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception("Error in index view: user={user}, course={course},"
" chapter={chapter} section={section}"
"position={position}".format(
user=user,
course=course,
chapter=chapter,
section=section,
position=position
))
try:
result = render_to_response('courseware/courseware-error.html',
{'staff_access': staff_access,
'course': course})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
@ensure_csrf_cookie
def jump_to_id(request, course_id, module_id):
"""
This entry point allows for a shorter version of a jump to where just the id of the element is
passed in. This assumes that id is unique within the course_id namespace
"""
course_location = CourseDescriptor.id_to_location(course_id)
items = modulestore().get_items(
Location('i4x', course_location.org, course_location.course, None, module_id),
course_id=course_id
)
if len(items) == 0:
raise Http404("Could not find id = {0} in course_id = {1}. Referer = {2}".
format(module_id, course_id, request.META.get("HTTP_REFERER", "")))
if len(items) > 1:
log.warning("Multiple items found with id = {0} in course_id = {1}. Referer = {2}. Using first found {3}...".
format(module_id, course_id, request.META.get("HTTP_REFERER", ""), items[0].location.url()))
return jump_to(request, course_id, items[0].location.url())
@ensure_csrf_cookie
def jump_to(request, course_id, location):
"""
Show the page that contains a specific location.
If the location is invalid or not in any class, return a 404.
Otherwise, delegates to the index view to figure out whether this user
has access, and what they should see.
"""
# Complain if the location isn't valid
try:
location = Location(location)
except InvalidLocationError:
raise Http404("Invalid location")
# Complain if there's not data for this location
try:
(course_id, chapter, section, position) = path_to_location(modulestore(), course_id, location)
except ItemNotFoundError:
raise Http404(u"No data at this location: {0}".format(location))
except NoPathToItem:
raise Http404(u"This location is not in any class: {0}".format(location))
# choose the appropriate view (and provide the necessary args) based on the
# args provided by the redirect.
# Rely on index to do all error handling and access control.
if chapter is None:
return redirect('courseware', course_id=course_id)
elif section is None:
return redirect('courseware_chapter', course_id=course_id, chapter=chapter)
elif position is None:
return redirect('courseware_section', course_id=course_id, chapter=chapter, section=section)
else:
return redirect('courseware_position', course_id=course_id, chapter=chapter, section=section, position=position)
@ensure_csrf_cookie
def course_info(request, course_id):
"""
Display the course's info.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
masq = setup_masquerade(request, staff_access) # allow staff to toggle masquerade on info page
reverifications = fetch_reverify_banner_info(request, course_id)
context = {
'request': request,
'course_id': course_id,
'cache': None,
'course': course,
'staff_access': staff_access,
'masquerade': masq,
'reverifications': reverifications,
}
return render_to_response('courseware/info.html', context)
@ensure_csrf_cookie
def static_tab(request, course_id, tab_slug):
"""
Display the courses tab with the given name.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
tab = tabs.get_static_tab_by_slug(course, tab_slug)
if tab is None:
raise Http404
contents = tabs.get_static_tab_contents(
request,
course,
tab
)
if contents is None:
raise Http404
staff_access = has_access(request.user, course, 'staff')
return render_to_response('courseware/static_tab.html',
{'course': course,
'tab': tab,
'tab_contents': contents,
'staff_access': staff_access, })
# TODO arjun: remove when custom tabs in place, see courseware/syllabus.py
@ensure_csrf_cookie
def syllabus(request, course_id):
"""
Display the course's syllabus.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
return render_to_response('courseware/syllabus.html', {'course': course,
'staff_access': staff_access, })
def registered_for_course(course, user):
"""
Return True if user is registered for course, else False
"""
if user is None:
return False
if user.is_authenticated():
return CourseEnrollment.is_enrolled(user, course.id)
else:
return False
@ensure_csrf_cookie
@cache_if_anonymous
def course_about(request, course_id):
if MicrositeConfiguration.get_microsite_configuration_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
):
raise Http404
course = get_course_with_access(request.user, course_id, 'see_exists')
registered = registered_for_course(course, request.user)
if has_access(request.user, course, 'load'):
course_target = reverse('info', args=[course.id])
else:
course_target = reverse('about_course', args=[course.id])
show_courseware_link = (has_access(request.user, course, 'load') or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
# Note: this is a flow for payment for course registration, not the Verified Certificate flow.
registration_price = 0
in_cart = False
reg_then_add_to_cart_link = ""
if (settings.FEATURES.get('ENABLE_SHOPPING_CART') and
settings.FEATURES.get('ENABLE_PAID_COURSE_REGISTRATION')):
registration_price = CourseMode.min_course_price_for_currency(course_id,
settings.PAID_COURSE_REGISTRATION_CURRENCY[0])
if request.user.is_authenticated():
cart = shoppingcart.models.Order.get_cart_for_user(request.user)
in_cart = shoppingcart.models.PaidCourseRegistration.contained_in_order(cart, course_id)
reg_then_add_to_cart_link = "{reg_url}?course_id={course_id}&enrollment_action=add_to_cart".format(
reg_url=reverse('register_user'), course_id=course.id)
# see if we have already filled up all allowed enrollments
is_course_full = CourseEnrollment.is_course_full(course)
return render_to_response('courseware/course_about.html',
{'course': course,
'registered': registered,
'course_target': course_target,
'registration_price': registration_price,
'in_cart': in_cart,
'reg_then_add_to_cart_link': reg_then_add_to_cart_link,
'show_courseware_link': show_courseware_link,
'is_course_full': is_course_full})
@ensure_csrf_cookie
@cache_if_anonymous
def mktg_course_about(request, course_id):
"""
This is the button that gets put into an iframe on the Drupal site
"""
try:
course = get_course_with_access(request.user, course_id, 'see_exists')
except (ValueError, Http404) as e:
# if a course does not exist yet, display a coming
# soon button
return render_to_response(
'courseware/mktg_coming_soon.html', {'course_id': course_id}
)
registered = registered_for_course(course, request.user)
if has_access(request.user, course, 'load'):
course_target = reverse('info', args=[course.id])
else:
course_target = reverse('about_course', args=[course.id])
allow_registration = has_access(request.user, course, 'enroll')
show_courseware_link = (has_access(request.user, course, 'load') or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
course_modes = CourseMode.modes_for_course(course.id)
return render_to_response(
'courseware/mktg_course_about.html',
{
'course': course,
'registered': registered,
'allow_registration': allow_registration,
'course_target': course_target,
'show_courseware_link': show_courseware_link,
'course_modes': course_modes,
}
)
@login_required
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@transaction.commit_manually
def progress(request, course_id, student_id=None):
"""
Wraps "_progress" with the manual_transaction context manager just in case
there are unanticipated errors.
"""
with grades.manual_transaction():
return _progress(request, course_id, student_id)
def _progress(request, course_id, student_id):
"""
Unwrapped version of "progress".
User progress. We show the grade bar and every problem score.
Course staff are allowed to see the progress of students in their class.
"""
course = get_course_with_access(request.user, course_id, 'load', depth=None)
staff_access = has_access(request.user, course, 'staff')
if student_id is None or student_id == request.user.id:
# always allowed to see your own profile
student = request.user
else:
# Requesting access to a different student's profile
if not staff_access:
raise Http404
student = User.objects.get(id=int(student_id))
# NOTE: To make sure impersonation by instructor works, use
# student instead of request.user in the rest of the function.
# The pre-fetching of groups is done to make auth checks not require an
# additional DB lookup (this kills the Progress page in particular).
student = User.objects.prefetch_related("groups").get(id=student.id)
courseware_summary = grades.progress_summary(student, request, course)
grade_summary = grades.grade(student, request, course)
if courseware_summary is None:
#This means the student didn't have access to the course (which the instructor requested)
raise Http404
context = {
'course': course,
'courseware_summary': courseware_summary,
'grade_summary': grade_summary,
'staff_access': staff_access,
'student': student,
'reverifications': fetch_reverify_banner_info(request, course_id)
}
with grades.manual_transaction():
response = render_to_response('courseware/progress.html', context)
return response
def fetch_reverify_banner_info(request, course_id):
"""
Fetches needed context variable to display reverification banner in courseware
"""
reverifications = defaultdict(list)
user = request.user
if not user.id:
return reverifications
enrollment = CourseEnrollment.get_or_create_enrollment(request.user, course_id)
course = course_from_id(course_id)
info = single_course_reverification_info(user, course, enrollment)
if info:
reverifications[info.status].append(info)
return reverifications
@login_required
def submission_history(request, course_id, student_username, location):
"""Render an HTML fragment (meant for inclusion elsewhere) that renders a
history of all state changes made by this user for this problem location.
Right now this only works for problems because that's all
StudentModuleHistory records.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
# Permission Denied if they don't have staff access and are trying to see
# somebody else's submission history.
if (student_username != request.user.username) and (not staff_access):
raise PermissionDenied
try:
student = User.objects.get(username=student_username)
student_module = StudentModule.objects.get(course_id=course_id,
module_state_key=location,
student_id=student.id)
except User.DoesNotExist:
return HttpResponse(escape("User {0} does not exist.".format(student_username)))
except StudentModule.DoesNotExist:
return HttpResponse(escape("{0} has never accessed problem {1}".format(student_username, location)))
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
# If no history records exist, let's force a save to get history started.
if not history_entries:
student_module.save()
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
context = {
'history_entries': history_entries,
'username': student.username,
'location': location,
'course_id': course_id
}
return render_to_response('courseware/submission_history.html', context)
|
pku9104038/edx-platform
|
lms/djangoapps/courseware/views.py
|
Python
|
agpl-3.0
| 28,526
|
[
"VisIt"
] |
c7513d40a19274cdacab9e45cc6516940db96aa0de48b597c5025c5bad67973a
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
""" Contains basic data type and methods for crystal structures. """
__docformat__ = "restructuredtext en"
__all__ = ['Atom', 'Structure', 'space_group', 'cell_invariants', 'smith_normal_form', 'gruber',
'supercell', 'into_cell', 'into_voronoi', 'zero_centered', 'are_periodic_images',
'HFTransform', 'primitive', 'is_primitive', 'neighbors', 'coordination_shells',
'map_sites', 'iterator', 'specieset', 'transform', 'vasp_ordered', 'which_site']
from .atom import Atom
from .structure import Structure
from ._space_group import space_group, cell_invariants
from .cutilities import smith_normal_form, gruber, supercell
from .utilities import into_cell, into_voronoi, zero_centered, are_periodic_images
from .hart_forcade import HFTransform
from ._primitive import primitive, is_primitive
from ._coordination_shells import coordination_shells, neighbors
from ._map_sites import map_sites
from . import iterator
def transform(structure, rotation, translation=None):
""" Returns a copy of the structure transformed according to affine operation """
from copy import deepcopy
if translation is None and rotation.shape[0] == rotation.shape[1] + 1:
translation = rotation[-1, :]
rotation = rotation[:-1, :]
result = deepcopy(structure)
result.transform(rotation, translation)
return result
def specieset(structure):
""" Returns set of species (without repetitions) in the original order
Especially usefull with VASP since we are sure that the list of species
is always ordered the same way.
"""
l = [a.type for a in structure]
seen = set()
return [x for x in l if not (x in seen or seen.add(x))]
def vasp_ordered(structure, site=False):
""" Returns a structure with correct VASP order of ions.
:param structure:
:class:`Structure` for which to reorder atoms.
"""
from copy import deepcopy
result = deepcopy(structure)
has_site = sum([0 if hasattr(x, 'site') else 1 for x in structure])
if site and has_site == 0:
def sortme(self): return self.site
else:
def sortme(self): return self.type.lower()
result[:] = sorted(structure, key=sortme)
return result
def which_site(atom, lattice, invcell=None, tolerance=1e-8):
""" Index of periodically equivalent atom.
:param atom:
:py:class:`~cppwrappers.Atom` for which to find periodic equivalent.
:param lattice:
:py:class:`~cppwrappers.Structure` defining the periodicity.
:type lattice:
:py:class:`~cppwrappers.Structure` or matrix
:return: index in list of atoms, or -1 if not found.
"""
from numpy.linalg import inv
if invcell is None:
invcell = inv(lattice.cell)
lattice = [getattr(site, 'pos', site) for site in lattice]
pos = getattr(atom, 'pos', atom)
for i, site in enumerate(lattice):
if are_periodic_images(pos, site, invcell, tolerance):
return i
return -1
def _normalize_freeze_cell(freeze, periodicity=3):
""" Transforms freeze parameters into a normalized form.
The normalized form is a list of six boolean where, if True, each of xx,
yy, zz, yz, xy, xz is *frozen*. The other forms allow strings, list of
strings, or the same list of booleans as the output.
If periodicity is 2, then the degrees of freedom are xx, yy.
"""
from numpy import array
if isinstance(freeze, str):
freeze = freeze.split()
if periodicity == 3:
if len(freeze) == 6 \
and all(isinstance(u, bool) or isinstance(u, int) for u in freeze):
return [u == True for u in freeze]
freeze = {u.lower() for u in freeze}
return array(['xx' in freeze,
'yy' in freeze,
'zz' in freeze,
('yz' in freeze or 'zy' in freeze),
('xy' in freeze or 'yx' in freeze),
('xz' in freeze or 'zx' in freeze)])
elif periodicity == 2:
if len(freeze) == 2 \
and all(isinstance(u, bool) or isinstance(u, int) for u in freeze):
return [u == True for u in freeze]
freeze = {u.lower() for u in freeze}
return array(['xx' in freeze, 'yy' in freeze])
def _normalize_freeze_atom(freeze):
""" Transforms freeze parameters into a normalized form.
The normalized form is a list of 3 boolean where, if True, each of x, y,
z is *frozen*. The other forms allow strings, list of strings, or the
same list of booleans as the output.
"""
from numpy import array
from .. import error
if hasattr(freeze, '__iter__') and len(freeze) == 3 \
and all(isinstance(u, bool) or isinstance(u, int) for u in freeze):
return [u == True for u in freeze]
elif not hasattr(freeze, 'lower'):
raise error.TypeError('Could not make sense of freeze parameter.')
freeze = freeze.lower()
return array(['x' in freeze, 'y' in freeze, 'z' in freeze])
|
pylada/pylada-light
|
src/pylada/crystal/__init__.py
|
Python
|
gpl-3.0
| 6,379
|
[
"CRYSTAL",
"VASP"
] |
2991bc82008d2e8a4a92b5c90d53978e1031d50295674990ce5c82f3f1fc991c
|
##############################################################################
##############################################################################
# Default settings and helpers for
# Particle filtering
#
# Copyright (c) 2016 Johan Dahlin [ johan.dahlin (at) liu.se ]
# Distributed under the MIT license.
#
##############################################################################
##############################################################################
import numpy as np
import pandas
import os
##############################################################################
# Set default settings if needed
##############################################################################
def setSettings(sm,vers):
#=====================================================================
# Settings for the filter
#=====================================================================
if ( vers == "filter" ):
if ( sm.xo != None ):
sm.genInitialState = False;
if ( sm.genInitialState == None ):
print("pf (genInitialState): No initial state given, so assuming known zero state.");
sm.genInitialState = False;
sm.xo = 0.0;
if ( sm.nPart == None ):
print("pf (nPart): No of particles not given, so defaulting to using N=T=" + str(sm.T) + ".");
sm.nPart = sm.T;
if ( sm.resamplingType == None ):
print("pf (resamplingType): No resampling scheme given, so defauling to systematic resampling.");
sm.resamplingType = "systematic";
if ( sm.resampFactor == None ):
print("pf (resampFactor): No limit of effective particles given for resampling, so resampling at every iteration.")
sm.resampFactor = 2.0;
##############################################################################
# Calculate the pdf of a univariate Gaussian
##############################################################################
def uninormpdf(x,mu,sigma):
return 1.0/np.sqrt( 2.0 * np.pi * sigma**2 ) * np.exp( - 0.5 * (x-mu)**2 * sigma**(-2) );
##############################################################################
# Calculate the log-pdf of a univariate Gaussian
##############################################################################
def loguninormpdf(x,mu,sigma):
return -0.5 * np.log( 2.0 * np.pi * sigma**2) - 0.5 * (x-mu)**2 * sigma**(-2);
##############################################################################
# Calculate the log-pdf of a multivariate Gaussian with mean vector mu and covariance matrix S
##############################################################################
def lognormpdf(x,mu,S):
nx = len(S)
norm_coeff = nx * np.log( 2.0 * np.pi ) + np.linalg.slogdet(S)[1]
err = x-mu
numerator = np.dot( np.dot(err,np.linalg.pinv(S)),err.transpose())
return -0.5*(norm_coeff+numerator)
##############################################################################
# Check if a matrix is positive semi-definite but checking for negative eigenvalues
##############################################################################
def isPSD(x):
return np.all(np.linalg.eigvals(x) > 0)
##########################################################################
# Helper: Reconstruct the particle trajectories
##########################################################################
def reconstructTrajectories_helper(sm,sys):
xtraj = np.zeros( (sm.nPart,sys.T) );
xtraj[:,sys.T-1] = sm.p[ :, sys.T-1 ];
# Plot all the particles and their resampled ancestors
for ii in range(0,sm.nPart):
att = ii;
for tt in np.arange(sys.T-2,0,-1):
at = sm.a[att,tt+1];
at = at.astype(int);
xtraj[ii,tt] = sm.p[at,tt];
att = at;
att = att.astype(int);
sm.x = xtraj;
##########################################################################
# Helper: compile the results and write to file
##########################################################################
def writeToFile_helper(sm,fileOutName=None,noLLests=False):
# Compile the results for output for smoother and filter
if hasattr(sm, 'xhats'):
# Smoother
columnlabels = [None]*3;
columnlabels[0] = "xhats"
columnlabels[1] = "xhatf"
columnlabels[2] = "llt"
out = np.hstack((sm.xhats,sm.xhatf,(sm.llt).reshape((sm.T,1))))
else:
# Filter
columnlabels = [None]*2;
columnlabels[0] = "xhatf"
columnlabels[1] = "llt"
out = np.hstack((sm.xhatf,(sm.llt).reshape((sm.T,1))))
# Write out the results to file
fileOut = pandas.DataFrame(out,columns=columnlabels);
if ( fileOutName == None ):
if hasattr(sm, 'xhats'):
fileOutName = 'results/' + str(sm.filePrefix) + '/state_' + sm.filterType + '_' + sm.smootherType + '_N' + str(sm.nPart) + '.csv';
else:
fileOutName = 'results/' + str(sm.filePrefix) + '/state_' + sm.filterType + '_N' + str(sm.nPart) + '.csv';
ensure_dir(fileOutName);
fileOut.to_csv(fileOutName);
print("writeToFile_helper: wrote results to file: " + fileOutName)
##############################################################################
# Check if dirs for outputs exists, otherwise create them
##############################################################################
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
##############################################################################
##############################################################################
# End of file
##############################################################################
##############################################################################
|
compops/pmmh-correlated2015
|
state/smc_helpers.py
|
Python
|
gpl-3.0
| 5,924
|
[
"Gaussian"
] |
af9fde41d9b9be13b4684c593f9f6f3379131877a5a264635858b0791752144d
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 03 14:53:54 2010
@author: a1185872
"""
import numpy as np
import scipy.signal as sps
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
def padzeros(f,npad=None,padpattern=None):
"""
padzeros(f) will return a function that is padded with zeros to the next
power of 2 for faster processing for fft or to length npad if given.
Inputs:
f = array to pad
npad = length to pad to defaults to next power of two
padpattern = pattern to pad with default is zero
Outputs:
fpad = array f padded to length npad with padpattern
"""
#make f an array
f=np.array(f)
#check dimensions of f
try:
n,m=f.shape
except ValueError:
n=f.shape[0]
m=0
if npad==None:
power=np.log2(n)
fpow=np.floor(power)
if power!=fpow:
npad=2**(fpow+1)
else:
npad=2**power
else:
pass
if m!=0:
fpad=np.zeros((npad,m),dtype=type(f[0,0]))
fpad[0:n,m-1]=f[0:n,m-1]
if padpattern!=None:
fpad[n:npad,m-1]=padpattern
else:
fpad=np.zeros(npad,dtype=type(f[0]))
fpad[0:n]=f[0:n]
if padpattern!=None:
fpad[n:npad]=padpattern
return fpad
def sfilter(f,fcutoff=10.,w=10.0,dt=.001):
"""
Will apply a sinc filter of width w to the function f by multipling in
the frequency domain. Returns filtered function
Inputs:
f = array to filter
fcuttoff = cutoff frequency
w = length of filter
dt = sampling time (s)
Outputs:
filtfunc = filtered function
"""
tshift=float(w)/2.
fpad=padzeros(f)
Fpad=np.fft.fft(fpad)
fc=fcutoff
t=np.arange(start=-tshift,stop=tshift,step=dt)
filt=np.zeros(len(fpad))
fs=2*fc*np.sinc(2*t*fc)
norm=sum(fs)
filt[0:len(t)]=fs/norm
Filt=np.fft.fft(filt)
Filtfunc=Fpad*Filt
filtfunc=np.fft.ifft(Filtfunc)
filtfunc=filtfunc[len(t)/2:len(f)+len(t)/2]
return filtfunc
def dctrend(f):
"""
dctrend(f) will remove a dc trend from the function f.
Inputs:
f = array to dctrend
Outputs:
fdc = array f with dc component removed
"""
fdc=sps.detrend(f)
return fdc
def normalizeL2(f):
"""
normalizeL2(f) will return the function f normalized by the L2 norm ->
f/(sqrt(sum(abs(x_i)^2))).
Inputs:
f = array to be normalized
Outputs:
fnorm = array f normalized in L2 sense
"""
f=np.array(f)
fsum=np.sum(np.abs(f))
if fsum==0:
fnorm=f
else:
fnorm=f/np.sqrt(np.sum(np.abs(f)**2))
return fnorm
def decimatef(f,m):
"""
Will decimate a function by the factor m. First an 8th order Cheybechev
type I filter with a cuttoff frequency of .8/m is applied in both
directions to minimize any phase distortion and remove any aliasing. Note
decimation values above 10 will typically result in bad coefficients,
therefore if you decimation is more than 10 just repeat the decimation until
the desired decimation is reached.
Inputs:
f = array to be decimated
m = decimation factor
Outputs:
fdec = array f decimated by factor m
"""
n=len(f)
fdec=sps.resample(f,n/m,window='hanning')
# n=len(f)
# nout=np.ceil(n/m)
# nfilt=8
# rip=.05
#
# #make a cheybeshev1 zero-phase filter with cuttoff frequency of .8/m
# b,a=sps.iirfilter(nfilt,.8/m,rp=rip,btype='low',ftype='cheby1',output='ba')
# ffilt=sps.filtfilt(b,a,f)
# nbeg=n-m*nout
# fdec=np.array([ffilt[ii] for ii in np.arange(start=nbeg,stop=int(n),step=m)])
return fdec
def dwindow(window):
"""
Calculates the derivative of the given window
Input:
window = some sort of window function
Output:
dwin = derivative of window
"""
h=window
nh=len(h)
lh=(nh-1)/2
stepheight=(h[0]+h[-1])/2.
ramp=float((h[-1]-h[0]))/nh
h2=np.zeros(nh+2)
h2[1:nh+1]=h-stepheight-ramp*np.arange(start=-lh,stop=lh+1,step=1)
dwin=(h2[2:nh+2]-h2[0:nh])/2.+ramp
dwin[0]=dwin[0]+stepheight
dwin[-1]=dwin[-1]-stepheight
return dwin
def gausswin(winlen,alpha=2.5):
"""
gausswin will compute a gaussian window of length winlen with a variance of
alpha
Inputs:
winlen = length of desired window
alpha = 1/standard deviation of window, ie full width half max of window
Outputs:
gwin = gaussian window
"""
lh=(winlen-1)/2+1-np.remainder(winlen,2)
gt=np.arange(start=-lh,stop=lh+1,step=1)
gwin=np.exp(-.5*(alpha*gt/float(lh))**2)
return gwin
def wvdas(fx):
"""
wvdas(fx) will compute the analytic signal for WVVD as defined by \
J. M. O' Toole, M. Mesbah, and B. Boashash, (2008), "A New Discrete Analytic\
Signal for Reducing Aliasing in the Discrete Wigner-Ville Distribution", \
IEEE Trans. on Signal Processing,
Inputs:
fx = signal to compute anlytic signal for with length N
Outputs:
fxa = analytic signal of fx with length 2*N
"""
n=len(fx)
#pad the time series with zeros
fxp=padzeros(fx,npad=2*n)
#compute the fourier transform
FX=np.fft.fft(fxp)
#apply analytic signal
FX[1:n-1]=2*FX[1:n-1]
FX[n:]=0
#inverse fourier transform and set anything outside of length n to zero
fxa=np.fft.ifft(FX)
fxa[n:]=0
return fxa
def stft(fx,nh=2**8,tstep=2**7,ng=1,df=1.0,nfbins=2**10):
"""stft(fx,nh=2**8,tstep=2**7,ng=1,df=1.0) will calculate the spectrogam of
the given function by calculating the fft of a window of length nh at each
time instance with an interval of tstep. The frequency resolution is nfbins
Can compute the cross STFT by inputting fx as [fx1,fx2]
Inputs:
fx = the function to have a spectrogram computed for can be two functions
input as [fx1,fx2]
nh = window length for each time step
tstep = time step between short windows
ng = smoothing window along frequency plane should be odd
df = sampling frequency
nfbins = number of frequency bins
Outputs:
tfarray = spectrogram in units of amplitude
tlst = time instance array where each window was calculated
flst = frequency array containing only positive frequencies
"""
#get length of input time series if there is two columns
if type(fx) is list:
fx=np.array(fx)
try:
fn,fm=fx.shape
if fm<fn:
fm,fn=fx.shape
except ValueError:
fn=fx.shape[0]
fm=1
if fm>1:
fx=fx.reshape(fn)
else:
fx=fx.reshape(fn)
#make a hanning window to minimize aliazing and Gibbs effect of short time
#windows
h=normalizeL2(np.hanning(nh))
#make a hanning window to smooth in frequency domain
if ng!=1:
if np.remainder(ng,2)!=1:
ng=ng-1
print 'ng forced to be odd as ng-1'
else:
pass
g=normalizeL2(np.hanning(ng))
else:
pass
#make time step list
tlst=np.arange(start=0,stop=fn-nh+1,step=tstep)
#make a frequency list for plotting exporting only positive frequencies
df=float(df)
flst=np.fft.fftfreq(nfbins,1/df)[0:nfbins/2] #get only positive frequencies
#initialize the TFD array
tfarray=np.zeros((nfbins/2,len(tlst)),dtype='complex128')
fa=sps.hilbert(dctrend(fx))
for place,ii in enumerate(tlst):
fxwin=fa[ii:ii+nh]*h
#get only positive frequencies
FXwin=np.fft.fft(padzeros(fxwin,npad=nfbins))[:nfbins/2]
#smooth in frequency plane
if ng!=1:
FXwin=np.convolve(padzeros(FXwin,npad=len(FXwin)+ng-1),g,'valid')
else:
pass
#pull out only positive quadrant, flip array for plotting
tfarray[:,place]=FXwin[::-1]
return tfarray,tlst,flst
def reassignedstft(fx,nh=2**6-1,tstep=2**5,nfbins=2**10,df=1.0,alpha=4,
threshold=None):
"""
reassignedstft(fx,nh=2**5-1,tstep=2**8,nfbins=2**10,df=1.0,alpha=20) will
compute the reassigned spectrogram by estimating the center of gravity of
the signal and condensing dispersed energy back to that location.
Inputs:
fx = time series to be analyzed
nh = length of gaussian window, should be odd
tstep = time step for each window calculation
nfbins = number of frequency bins to calculate, note result will be
length nfbins/2
df = sampling frequency (Hz)
alpha = reciprocal of full width half max of gaussian window
threshold = threshold value for reassignment
Outputs:
rtfarray = reassigned spectrogram in units of amplitude
tlst = array of time instances where windows were calculated for ploting
flst = array of frequencies for plotting
stft = standard spectrogram in units of amplitude
"""
#make sure fx is type array
fx=np.array(fx)
#compute length of fx
nx=len(fx)
#make sure window length is odd
if np.remainder(nh,2)==0:
nh=nh+1
#compute gaussian window
h=gausswin(nh,alpha=alpha)
#h=np.hanning(nh)
lh=(nh-1)/2
#compute ramp window
th=h*np.arange(start=-lh,stop=lh+1,step=1)
#compute derivative of window
dh=dwindow(h)
#make a time list of indexes
tlst=np.arange(start=0,stop=nx,step=tstep)
nt=len(tlst)
#make a frequency list
flst=np.fft.fftfreq(nfbins,1./df)[nfbins/2:]
#initialize some time-frequency arrays
tfr=np.zeros((nfbins,nt),dtype='complex128')
tf2=np.zeros((nfbins,nt),dtype='complex128')
tf3=np.zeros((nfbins,nt),dtype='complex128')
#compute components for reassignment
for ii,tt in enumerate(tlst):
#create a time shift list
tau=np.arange(start=-min([np.round(nx/2.),lh,tt-1]),
stop=min([np.round(nx/2.),lh,nx-tt-1])+1)
#compute the frequency spots to be calculated
ff=np.remainder(nfbins+tau,nfbins)
xlst=tt+tau
hlst=lh+tau
normh=np.sqrt(np.sum(abs(h[hlst])**2))
tfr[ff,ii]=fx[xlst]*h[hlst].conj()/normh
tf2[ff,ii]=fx[xlst]*th[hlst].conj()/normh
tf3[ff,ii]=fx[xlst]*dh[hlst].conj()/normh
#compute Fourier Transform
spec=np.fft.fft(tfr,axis=0)
spect=np.fft.fft(tf2,axis=0)
specd=np.fft.fft(tf3,axis=0)
#get only positive frequencies
spec=spec[nfbins/2:,:]
spect=spect[nfbins/2:,:]
specd=specd[nfbins/2:,:]
#check to make sure no spurious zeros floating around
szf=np.where(abs(spec)<1.E-6)
spec[szf]=0.0
zerofind=np.nonzero(abs(spec))
twspec=np.zeros((nfbins/2,nt),dtype='float')
dwspec=np.zeros((nfbins/2,nt),dtype='float')
twspec[zerofind]=np.round(np.real(spect[zerofind]/spec[zerofind])/1)
dwspec[zerofind]=np.round(np.imag((nfbins/2.)*specd[zerofind]/spec[zerofind])/
(np.pi))
#compute reassignment
rtfarray=np.zeros_like(spec)
if threshold==None:
threshold=1.E-4*np.mean(fx[tlst])
for nn in range(nt):
for kk in range(nfbins/2):
if abs(spec[kk,nn])>threshold:
#get center of gravity index in time direction
nhat=int(nn+twspec[kk,nn])
nhat=int(min([max([nhat,1]),nt-1]))
#get center of gravity index in frequency direction
khat=int(kk-dwspec[kk,nn])
khat=int(np.remainder(np.remainder(khat-1,nfbins/2)+nfbins/2,
nfbins/2))
#reassign energy
rtfarray[khat,nhat]=rtfarray[khat,nhat]+spec[kk,nn]
#rtfarray[kk,nn]=spec[khat,nhat]
spect[kk,nn]=khat+1j*nhat
else:
spect[kk,nn]=np.inf*(1+1j)
rtfarray[kk,nn]=rtfarray[kk,nn]+spec[kk,nn]
return rtfarray,tlst,flst,spec
def wvd(fx,nh=2**8-1,tstep=2**5,nfbins=2**10,df=1.0):
"""
wvd(f,nh=2**8-1,tstep=2**5,nfbins=2**10,df=1.0) will calculate the
Wigner-Ville distribution for a function f. Can compute the cross spectra
by inputting fx as [fx1,fx2]
Inputs:
fx = array for which WVD will be calculated, input as [fx1,fx2] for
cross-spectra calculation
nh = window length, needs to be odd so centered on zero
tstep = time step between windows
nfbins = number of frequencies
df = sampling frequency (Hz)
Outputs:
tfarray = WVD estimation of array fx
tlst = time instances of each calculation
flst = array of positive frequencies
"""
if type(fx) is list:
fx=np.array(fx)
try:
fn,fm=fx.shape
if fm>fn:
fm,fn=fx.shape
except ValueError:
fn=len(fx)
fm=1
if fm>1:
fn=fn[0]
print 'computing cross spectra'
#compute the analytic signal of function f and dctrend
fa=wvdas(fx[0])
fb=wvdas(fx[1])
else:
#compute the analytic signal of function f and dctrend
fa=wvdas(fx)
fa=sps.hilbert(dctrend(fx))
fb=fa.copy()
fn=len(fa)
#sampling period
df=float(df)
dt=1./df
tau=(nh-1)/2
#create a time array such that the first point is centered on time window
tlst=np.arange(start=0,stop=fn-1,step=tstep,dtype='int')
#create an empty array to put the tf in
tfarray=np.zeros((nfbins,len(tlst)),dtype='complex128')
#create a frequency array with just positive frequencies
flst=np.fft.fftfreq(nfbins,dt)[0:nfbins/2]
#calculate pseudo WV
for point,nn in enumerate(tlst):
#calculate the smallest timeshift possible
taun=min(nn,tau,fn-nn-1)
#make a timeshift array
taulst=np.arange(start=-taun,stop=taun+1,step=1,dtype='int')
#calculate rectangular windowed correlation function of analytic signal
Rnn=4*np.conjugate(fa[nn-taulst])*fb[nn+taulst]
#calculate fft of windowed correlation function
#FTRnn=np.fft.fft(padzeros(Rnn,npad=nfbins))
#put into tfarray
tfarray[:,point]=padzeros(Rnn,npad=nfbins)[::-1]
#normalize
tfarray=np.fft.fft(tfarray,axis=0)
tfarray=tfarray/nh
return tfarray,tlst,flst
def spwvd(fx,tstep=2**5,nfbins=2**10,df=1.0,nh=None,ng=None,sigmat=None,
sigmaf=None):
"""
spwvd(fx,tstep=2**5,nfbins=2**10,df=1.0,nh=2**8-1,ng=2**5-1,sigmat=None,
sigmaf=None)
will calculate the smoothed pseudo Wigner-Ville distribution for a function
fx. smoothed with Gaussians windows to get best localization.
Inputs:
fx = array to estimate spwvd, input as [fx1,fx2] if computing cross
spectra
tstep = time step between windows
nfbins = number of frequencies
df = sampling frequency (Hz)
ng = length of time-domain smoothing window (needs to be odd)
nh = length of frequency-domain smoothing window (needs to be odd)
sigmat = std of window h, ie full width half max of gaussian
sigmaf = std of window g, ie full width half max of gaussian
Outputs:
tfarray = SPWVD estimation of array fx
tlst = time instances of each calculation
flst = array of positive frequencies
"""
if type(fx) is list:
fx=np.array(fx)
try:
fn,fm=fx.shape
if fm>fn:
fm,fn=fx.shape
except ValueError:
fn=len(fx)
fm=1
if fm>1:
print 'computing cross spectra'
#compute the analytic signal of function f and dctrend
fa=wvdas(fx[0])
fb=wvdas(fx[1])
else:
#compute the analytic signal of function f and dctrend
fa=wvdas(fx)
fa=sps.hilbert(dctrend(fx))
fb=fa.copy()
print 'Computed Analytic signal'
#sampling period
df=float(df)
dt=1/df
#create normalize windows in time (g) and frequency (h)
#note window length should be odd so that h,g[0]=1,nh>ng
if nh==None:
nh=np.floor(fn/2.)
#make sure the window length is odd
if np.remainder(nh,2)==0:
nh=nh+1
#calculate length for time smoothing window
if ng==None:
ng=np.floor(fn/5.)
if np.remainder(ng,2)==0:
ng=ng+1
#calculate standard deviations for gaussian windows
if sigmat==None:
sigmah=nh/(6*np.sqrt(2*np.log(2)))
else:
sigmah=sigmat
if sigmaf==None:
sigmag=ng/(6*np.sqrt(2*np.log(2)))
else:
sigmag=sigmaf
nh=int(nh)
ng=int(ng)
print 'nh='+str(nh)+'; ng='+str(ng)
#calculate windows and normalize
h=sps.gaussian(nh,sigmah)
h=h/sum(h)
g=sps.gaussian(ng,sigmag)
g=g/sum(g)
Lh=(nh-1)/2 #midpoint index of window h
Lg=(ng-1)/2 #midpoint index of window g
#create a time array such that the first point is centered on time window
tlst=np.arange(start=0,stop=fn+1,step=tstep,dtype='int')
#create an empty array to put the tf in
#make sure data type is complex
tfarray=np.zeros((nfbins,len(tlst)),dtype='complex128')
#create a frequency array with just positive frequencies
flst=np.fft.fftfreq(nfbins,dt)[0:nfbins/2]
#calculate pseudo WV
for point,t in enumerate(tlst):
#find the smallest possible time shift
maxtau=min(t+Lg-1,fn-t+Lg,round(nfbins/2),Lh)
#create time lag list
taulst=np.arange(start=-min(Lg,fn-t),stop=min(Lg,t-1)+1,step=1,
dtype='int')
#calculate windowed correlation function of analytic function for
#zero frequency
tfarray[0,point]=sum(2*(g[Lg+taulst]/sum(g[Lg+taulst]))*fa[t-taulst-1]*
np.conjugate(fb[t-taulst-1]))
#calculate tfd by calculating convolution of window and correlation
#function as sum of correlation function over the lag period times the
#window at that point. Calculate symmetrical segments for FFT later
for mm in range(maxtau):
taulst=np.arange(start=-min(Lg,fn-t-mm-1),stop=min(Lg,t-mm-1)+1,
step=1,dtype='int')
#compute positive half
gm=2*(g[Lg+taulst]/sum(g[Lg+taulst]))
Rmm=sum(gm*fa[t+mm-taulst-1]*np.conjugate(fb[t-mm-taulst]))
tfarray[mm,point]=h[Lh+mm-1]*Rmm
#compute negative half
Rmm=sum(gm*fa[t-mm-taulst]*np.conjugate(fb[t+mm-taulst-1]))
tfarray[nfbins-mm-1,point]=h[Lh-mm]*Rmm
mm=round(nfbins/2)
if t<=fn-mm and t>=mm and mm<=Lh:
print 'doing weird thing'
taulst=np.arange(start=-min(Lg,fn-t-mm),stop=min(Lg,fn-t,mm)+1,step=1,
dtype='int')
gm=g[Lg+taulst]/sum(g[Lg+taulst])
tfarray[mm-1,point]=.5*\
(sum(h[Lh+mm]*(gm*fa[t+mm-taulst-1]*
np.conjugate(fb[t-mm-taulst])))+\
sum(h[Lh-mm]*(gm*fa[t-mm-taulst]*
np.conjugate(fb[t+mm-taulst-1]))))
tfarray=np.fft.fft(tfarray,axis=0)
#rotate for plotting purposes so that (t=0,f=0) is at the lower left
tfarray=np.rot90(tfarray.T,1)
return tfarray,tlst,flst
def robustwvd(fx,nh=2**7-1,ng=2**4-1,tstep=2**4,nfbins=2**8,df=1.0,
sigmanh=None,sigmang=None):
"""
robustwvd(fx,tstep=2**5,nfbins=2**10,df=1.0,nh=2**8-1,ng=2**5-1,
sigmanh=None,sigmang=None)
will calculate the smoothed pseudo Wigner-Ville distribution for a function
fx. smoothed with Gaussians windows to get best localization.
Inputs:
fx = array to estimate spwvd, input as [fx1,fx2] if computing cross
spectra
tstep = time step between windows
nfbins = number of frequencies
df = sampling frequency (Hz)
ng = length of time-domain smoothing window (needs to be odd)
nh = length of frequency-domain smoothing window (needs to be odd)
sigmanh = std of window h, ie full width half max of gaussian
sigmang = std of window g, ie full width half max of gaussian
Outputs:
tfarray = SPWVD estimation of array fx
tlst = time instances of each calculation
flst = array of positive frequencies
"""
if type(fx) is list:
fx=np.array(fx)
try:
fn,fm=fx.shape
if fm>fn:
fm,fn=fx.shape
except ValueError:
fn=len(fx)
fm=1
if fm>1:
print 'computing cross spectra'
#compute the analytic signal of function f and dctrend
fa=wvdas(fx[0])
fb=wvdas(fx[1])
else:
#compute the analytic signal of function f and dctrend
fa=wvdas(fx)
fa=sps.hilbert(dctrend(fx))
fb=fa.copy()
print 'Computed Analytic signal'
#make sure window length is odd
if nh==None:
nh=np.floor(fn/2.)
#make sure the window length is odd
if np.remainder(nh,2)==0:
nh=nh+1
#calculate length for time smoothing window
if ng==None:
ng=np.floor(fn/5.)
if np.remainder(ng,2)==0:
ng=ng+1
nh=int(nh)
ng=int(ng)
print 'nh= ',nh
print 'ng= ',ng
dt=1./(df*2.)
#get length of input time series
nfx=len(fa)
#make frequency smoothing window
if sigmanh==None:
sigmanh=nh/(5*np.sqrt(2*np.log(2)))
h=sps.gaussian(nh,sigmanh)
h=h/sum(h)
#make a time smoothing window
if sigmang==None:
sigmang=ng/(5*np.sqrt(2*np.log(2)))
g=sps.gaussian(ng,sigmang)
mlst=np.arange(start=-nh/2+1,stop=nh/2+1,step=1,dtype='int')
#mlst=np.arange(nh,dtype='int')
tlst=np.arange(start=nh/2,stop=nfx-nh/2,step=tstep)
#make a frequency list for plotting exporting only positive frequencies
flst=np.fft.fftfreq(nfbins,dt)[nfbins/2:]#get only positive frequencies
flst[-1]=0
flstp=np.fft.fftfreq(nfbins,2*dt)[0:nfbins/2]
#create an empty array to put the tf in
tfarray=np.zeros((nfbins/2,len(tlst)),dtype='complex128')
for tpoint,nn in enumerate(tlst):
#calculate windowed correlation function of analytic function
fxwin=h*fa[nn+mlst]*fb[nn-mlst].conj()
for fpoint,mm in enumerate(flst):
fxmed=np.convolve(g,fxwin*np.exp(1j*4*np.pi*mlst*mm*dt),
mode='same')/(nh*ng)
fxmedpoint=np.median(fxmed.real)
if fxmedpoint==0.0:
tfarray[fpoint,tpoint]=1E-10
else:
tfarray[fpoint,tpoint]=fxmedpoint
tfarray=(4.*nh/dt)*tfarray
return tfarray,tlst,flstp
def specwv(fx,tstep=2**5,nfbins=2**10,nhs=2**8,nhwv=2**9-1,ngwv=2**3-1,df=1.0):
"""
specwv(f,tstep=2**5,nfbins=2**10,nh=2**8-1,ng=1,df=1.0) will calculate
the Wigner-Ville distribution mulitplied by the STFT windowed by the common
gaussian window h for a function f.
Inputs:
fx = array to compute the specwv
tstep = time step between windows
nfbins = number of frequencies
nhs = length of time-domain smoothing window for STFT should be even
nhwv = length of time-domain smoothing window for WV (needs to be odd)
ngwv = lenght of frequency-domain smoothing window (needs to be odd)
df = sampling frequency (Hz)
Outputs:
tfarray = SPECWV estimation of array fx
tlst = time instances of each calculation
flst = array of positive frequencies
"""
#calculate stft
pst,tlst,flst=stft(fx,nh=nhs,tstep=tstep,nfbins=nfbins,df=df)
#calculate new time step so WVD and STFT will align
ntstep=len(fx)/(len(tlst)*2.)
#calculate spwvd
pwv,twv,fwv=spwvd(fx,tstep=ntstep,nfbins=nfbins,df=df,nh=nhwv,ng=ngwv)
#multiply the two together normalize
tfarray=pst/pst.max()*pwv/pwv.max()
return tfarray,tlst,flst
def modifiedb(fx,tstep=2**5,nfbins=2**10,df=1.0,nh=2**8-1,beta=.2):
"""modifiedb(fx,tstep=2**5,nfbins=2**10,df=1.0,nh=2**8-1,beta=.2)
will calculate the modified b distribution as defined by cosh(n)^-2 beta
for a function fx.
Inputs:
fx = array from which modifiedb will be calculated if computing cross
spectra input as [fx1,fx2]
tstep = time step between windows
nfbins = number of frequencies
df = sampling frequency (Hz)
nh = length of time-domain smoothing window (needs to be odd)
beta = smoothing coefficient
Outputs:
tfarray = modifiedB estimation of array fx
tlst = time instances of each calculation
flst = array of positive frequencies
"""
if type(fx) is list:
fx=np.array(fx)
try:
fn,fm=fx.shape
if fm>fn:
fm,fn=fx.shape
except ValueError:
fn=len(fx)
fm=1
if fm>1:
fn=fn[0]
print 'computing cross spectra'
#compute the analytic signal of function f and dctrend
fa=wvdas(fx[0])
fb=wvdas(fx[1])
else:
#compute the analytic signal of function f and dctrend
fa=wvdas(fx)
fa=sps.hilbert(dctrend(fx))
fb=fa.copy()
#sampling period
df=float(df)
dt=1./df
tau=(nh-1)/2 #midpoint index of window h
#create a time array such that the first point is centered on time window
tlst=np.arange(start=0,stop=fn-1,step=tstep,dtype='int')
#create an empty array to put the tf in
tfarray=np.zeros((nfbins,len(tlst)),dtype='complex')
#create a frequency array with just positive frequencies
flst=np.fft.fftfreq(nfbins,dt)[0:nfbins/2]
#calculate pseudo WV
for point,nn in enumerate(tlst):
#calculate the smallest timeshift possible
taun=min(nn,tau,fn-nn-1)
#make a timeshift array
taulst=np.arange(start=-taun,stop=taun+1,step=1,dtype='int')
#create modified b window
mbwin=np.cosh(taulst)**(-2*beta)
mbwin=mbwin/sum(mbwin)
MBwin=np.fft.fft(padzeros(mbwin,npad=nfbins))
#calculate windowed correlation function of analytic function
Rnn=np.conjugate(fa[nn-taulst])*fb[nn+taulst]
#calculate fft of windowed correlation function
FTRnn=MBwin*np.fft.fft(padzeros(Rnn,npad=nfbins))
#put into tfarray
tfarray[:,point]=FTRnn[::-1]
#need to cut the time frequency array in half due to the WVD assuming
#time series sampled at twice nyquist.
tfarray=tfarray
return tfarray,tlst,flst
def robuststftMedian(fx,nh=2**8,tstep=2**5,df=1.0,nfbins=2**10):
"""
robuststftMedian(fx,nh=2**8,tstep=2**5,ng=1,df=1.0) will output an array
of the time-frequency robust spectrogram calculated using the vector median
simplification.
Inputs:
fx = the function to have a spectrogram computed for
nh = window length for each time step
tstep = time step between short windows
df = sampling frequency
nfbins = number of frequency bins
Outputs:
tfarray = WVD estimation of array fx
tlst = time instances of each calculation
flst = array of positive frequencies
"""
#get length of input time series
nfx=len(fx)
#compute time shift list
mlst=np.arange(start=-nh/2+1,stop=nh/2+1,step=1,dtype='int')
#compute time locations to take STFT
tlst=np.arange(start=0,stop=nfx-nh+1,step=tstep)
#make a frequency list for plotting exporting only positive frequencies
flst=np.fft.fftfreq(nfbins,1/df)
flstc=flst[nfbins/2:]
#Note: these are actually the negative frequencies but works better for
#calculations
flstp=flst[0:nfbins/2]
#make time window and normalize
sigmanh=nh/(6*np.sqrt(2*np.log(2)))
h=sps.gaussian(nh,sigmanh)
h=h/sum(h)
#create an empty array to put the tf in and initialize a complex value
tfarray=np.zeros((nfbins/2,len(tlst)),dtype='complex128')
#take the hilbert transform of the signal to make complex and remove
#negative frequencies
fa=sps.hilbert(dctrend(fx))
fa=fa/fa.std()
#make a frequency list for plotting exporting only positive frequencies
flst=np.fft.fftfreq(nfbins,1/df)[nfbins/2:]#get only positive frequencies
for tpoint,nn in enumerate(tlst):
#calculate windowed correlation function of analytic function
fxwin=h*fa[nn:nn+nh]
for fpoint,mm in enumerate(flstc):
fxmed=fxwin*np.exp(1j*2*np.pi*mlst*mm/df)
fxmedreal=np.median(fxmed.real)
fxmedimag=np.median(fxmed.imag)
if fxmedreal+1j*fxmedimag==0.0:
tfarray[fpoint,tpoint]=1E-10
else:
tfarray[fpoint,tpoint]=fxmedreal+1j*fxmedimag
#normalize tfarray
tfarray=(4.*nh*df)*tfarray
return tfarray,tlst,flstp
def robuststftL(fx,alpha=.325, nh=2**8,tstep=2**5,df=1.0,nfbins=2**10):
"""
robuststftL(fx,nh=2**8,tstep=2**5,ng=1,df=1.0) will output an array of the
time-frequency robust spectrogram by estimating the vector median and
summing terms estimated by alpha coefficients.
Inputs:
fx = the function to have a spectrogram computed for
alpha = robust parameter [0,.5] -> 0 gives spectrogram, .5 gives median stft
nh = window length for each time step
tstep = time step between short windows
df = sampling frequency
nfbins = number of frequency bins
Outputs:
tfarray = robust L-estimation of array fx
tlst = time instances of each calculation
flst = array of positive frequencies
"""
#get length of input time series
nfx=len(fx)
#compute time shift list
mlst=np.arange(start=-nh/2+1,stop=nh/2+1,step=1,dtype='int')
#compute time locations to take STFT
tlst=np.arange(start=0,stop=nfx-nh+1,step=tstep)
#make a frequency list for plotting exporting only positive frequencies
flst=np.fft.fftfreq(nfbins,1/df)
flstc=flst[nfbins/2:]
#Note: these are actually the negative frequencies but works better for
#calculations
flstp=flst[0:nfbins/2]
#make time window and normalize
sigmanh=nh/(6*np.sqrt(2*np.log(2)))
h=sps.gaussian(nh,sigmanh)
h=h/sum(h)
#create an empty array to put the tf in and initialize a complex value
tfarray=np.zeros((nfbins/2,len(tlst)),dtype='complex128')
#take the hilbert transform of the signal to make complex and remove
#negative frequencies
fa=sps.hilbert(dctrend(fx))
fa=fa/fa.std()
#make a frequency list for plotting exporting only positive frequencies
flst=np.fft.fftfreq(nfbins,1/df)[nfbins/2:]#get only positive frequencies
#create list of coefficients
a=np.zeros(nh)
a[(nh-2)*alpha:alpha*(2-nh)+nh-1]=1./(nh*(1-2*alpha)+4*alpha)
for tpoint,nn in enumerate(tlst):
#calculate windowed correlation function of analytic function
fxwin=h*fa[nn:nn+nh]
for fpoint,mm in enumerate(flstc):
fxelement=fxwin*np.exp(1j*2*np.pi*mlst*mm/df)
fxreal=np.sort(fxelement.real)[::-1]
fximag=np.sort(fxelement.imag)[::-1]
tfpoint=sum(a*(fxreal+1j*fximag))
if tfpoint==0.0:
tfarray[fpoint,tpoint]=1E-10
else:
tfarray[fpoint,tpoint]=tfpoint
#normalize tfarray
tfarray=(4.*nh*df)*tfarray
return tfarray,tlst,flstp
def smethod(fx,L=11,nh=2**8,tstep=2**7,ng=1,df=1.0,nfbins=2**10,sigmaL=None):
"""
smethod(fx,L=11,nh=2**8,tstep=2**7,ng=1,df=1.0,nfbins=2**10) will calculate
the smethod by estimating the STFT first and computing the WV of window
length L in the frequency domain. For larger L more of WV estimation, if
L=0 get back STFT
Inputs:
fx = the function to have a S-methoc computed for, if computing cross
spectra input as [fx1,fx2]
L = window length in frequency domain
nh = window length for each time step
tstep = time step between short windows
ng = smoothing window along frequency plane should be odd
df = sampling frequency
nfbins = number of frequency bins
Outputs:
tfarray = S-method estimation of array fx
tlst = time instances of each calculation
flst = array of positive frequencies
"""
df=float(df)
if type(fx) is list:
fx=np.array(fx)
try:
fn,fm=fx.shape
if fm>fn:
fm,fn=fx.shape
except ValueError:
fn=len(fx)
fm=1
if fm>1:
print 'computing cross spectra'
#compute the analytic signal of function f and dctrend
#fa=sps.hilbert(dctrend(fx[0]))
#fb=sps.hilbert(dctrend(fx[1]))
fa=fx[0]
fb=fx[1]
fa=fa.reshape(fn)
fb=fb.reshape(fn)
pxa,tlst,flst=stft(fa,nh=nh,tstep=tstep,ng=ng,df=df,nfbins=nfbins)
pxb,tlst,flst=stft(fb,nh=nh,tstep=tstep,ng=ng,df=df,nfbins=nfbins)
pxx=pxa*pxb.conj()
else:
#compute the analytic signal of function f and dctrend
#fa=sps.hilbert(dctrend(fx))
fa=fx
fa=fa.reshape(fn)
fb=fa
pxx,tlst,flst=stft(fa,nh=nh,tstep=tstep,ng=ng,df=df,nfbins=nfbins)
# pxb=pxa
#make an new array to put the new tfd in
tfarray=abs(pxx)**2
#get shape of spectrogram
nf,nt=tfarray.shape
#create a list of frequency shifts
Llst=np.arange(start=-L/2+1,stop=L/2+1,step=1,dtype='int')
#create a frequency gaussian window
if sigmaL==None:
sigmaL=L/(1*np.sqrt(2*np.log(2)))
p=sps.gaussian(L,sigmaL)
#make a matrix of windows
pm=np.zeros((L,nt))
for kk in range(nt):
pm[:,kk]=p
#loop over frequency and calculate the s-method
for ff in range(L/2,nf-L/2):
tfarray[ff,:]=tfarray[ff,:]+2*np.real(np.sum(pm*pxx[ff+Llst,:]*
pxx[ff-Llst,:].conj(),axis=0))
tfarray[L/2:-L/2]=tfarray[L/2:-L/2]/L
return tfarray,tlst,flst,pxx
def robustSmethod(fx,L=5,nh=2**7,tstep=2**5,nfbins=2**10,df=1.0,
robusttype='median',sigmal=None):
"""
robustSmethod(fx,L=15,nh=2**7,tstep=2**5,nfbins=2**10,df=1.0) computes the
robust Smethod via the robust spectrogram.
Inputs:
fx = array of data, if computing cross-spectra input as [fa,fb]
L = frequency smoothing window if robusttype='median'
nh = window length for STFT
tstep = time step for each STFT to be computed
nfbins = number of frequency bins to be calculate
df = sampling frequency
robusttype = type of robust STFT calculation can be 'median' or 'L'
simgal = full-width half max of gaussian window applied in frequency
Outputs:
tfarray = robust S-method estimation of array fx
tlst = time instances of each calculation
flst = array of positive frequencies
"""
if type(fx) is list:
fx=np.array(fx)
try:
fn,fm=fx.shape
if fm>fn:
fm,fn=fx.shape
except ValueError:
fn=len(fx)
fm=1
if fm>1:
print 'computing cross spectra'
#compute the analytic signal of function f and dctrend
fa=fx[0].reshape(fn)
fb=fx[1].reshape(fn)
if robusttype=='median':
pxa,tlst,flst=robuststftMedian(fa,nh=nh,tstep=tstep,df=df,
nfbins=nfbins)
pxb,tlst,flst=robuststftMedian(fb,nh=nh,tstep=tstep,df=df,
nfbins=nfbins)
elif robusttype=='L':
pxa,tlst,flst=robuststftL(fa,nh=nh,tstep=tstep,df=df,nfbins=nfbins)
pxb,tlst,flst=robuststftL(fb,nh=nh,tstep=tstep,df=df,nfbins=nfbins)
else:
raise ValueError('robusttype undefined')
pxx=pxa*pxb.conj()
else:
fa=fx.reshape(fn)
if robusttype=='median':
pxx,tlst,flst=robuststftMedian(fa,nh=nh,tstep=tstep,df=df,
nfbins=nfbins)
elif robusttype=='L':
pxx,tlst,flst=robuststftL(fa,nh=nh,tstep=tstep,df=df,nfbins=nfbins)
else:
raise ValueError('robusttype undefined')
#compute frequency shift list
Llst=np.arange(start=-L/2+1,stop=L/2+1,step=1,dtype='int')
#compute the frequency window of length L
if sigmal==None:
sigmal=L/3*(np.sqrt(2*np.log(2)))
lwin=gausswin(L,sigmal)
lwin=lwin/sum(lwin)
pm=np.zeros((L,len(tlst)))
for kk in range(len(tlst)):
pm[:,kk]=lwin
smarray=pxx.copy()
#compute S-method
for ff in range(L/2,nfbins/2-L/2):
smarray[ff,:]=smarray[ff,:]+2*np.real(np.sum(pm*pxx[ff+Llst,:]*
pxx[ff-Llst,:].conj(),axis=0))
# for tt in range(len(tlst)):
# for kk in range((L-1)/2,len(flst)-(L-1)/2):
# smarray[kk,tt]=abs(pxx[kk,tt])+np.sqrt(abs(2*sum(lwin*
# pxx[kk+Llst,tt]*pxx[kk-Llst,tt].conj())))
smarray=(2./(L*nh))*smarray
return smarray,tlst,flst,pxx
def reassignedSmethod(fx,nh=2**7-1,tstep=2**4,nfbins=2**9,df=1.0,alpha=4,
thresh=.01,L=5):
"""
reassignedSmethod(fx,nh=2**7-2,tstep=2**4,nfbins=2**9,df=1.0,alpha=4,
thresh=.05,L=5)
will calulate the reassigned S-method as described by Djurovic[1999] by
using the spectrogram to estimate the reassignment
Inputs:
fx = 1-d array to be processed
nh = window length for each time instance
tstep = step between time instances
nfbins = number of frequency bins, note output will be nfbins/2 due to
symmetry of the FFT
df = sampling rate (Hz)
alpha = inverse of full-width half max of gaussian window, smaller
numbers mean broader windows
thresh = threshold for reassignment, lower numbers more points
reassigned, higer numbers less points reassigned
L = length of window for S-method calculation, higher numbers tend
tend toward WVD
Outputs:
rtfarray = reassigned S-method shape of (nfbins/2,len(fx)/tstep)
tlst = list of time instances where rtfarray was calculated
flst = positive frequencies
sm = S-method array
"""
if type(fx) is list:
fx=np.array(fx)
try:
fn,fm=fx.shape
if fm>fn:
fm,fn=fx.shape
except ValueError:
fn=len(fx)
fm=1
if fm>1:
print 'computing cross spectra'
#compute the analytic signal of function f and dctrend
#fa=sps.hilbert(dctrend(fx[0]))
#fb=sps.hilbert(dctrend(fx[1]))
fa=fx[0]
fb=fx[1]
fa=fa.reshape(fn)
fb=fb.reshape(fn)
else:
fa=fx
fa=fa.reshape(fn)
fb=fa.copy()
nx=len(fx)
#compute gaussian window
h=gausswin(nh,alpha=alpha)
#h=np.hanning(nh)
lh=(nh-1)/2
#compute ramp window
th=h*np.arange(start=-lh,stop=lh+1,step=1)
#compute derivative of window
dh=dwindow(h)
#make a time list of indexes
tlst=np.arange(start=0,stop=nx,step=tstep)
nt=len(tlst)
#make frequency list for plotting
flst=np.fft.fftfreq(nfbins,1./df)[:nfbins/2]
#initialize some time-frequency arrays
tfh=np.zeros((nfbins,nt),dtype='complex128')
tfth=np.zeros((nfbins,nt),dtype='complex128')
tfdh=np.zeros((nfbins,nt),dtype='complex128')
#compute components for reassignment
for ii,tt in enumerate(tlst):
#create a time shift list
tau=np.arange(start=-min([np.round(nx/2.),lh,tt-1]),
stop=min([np.round(nx/2.),lh,nx-tt-1])+1)
#compute the frequency spots to be calculated
ff=np.remainder(nfbins+tau,nfbins)
#make lists of data points for each window calculation
xlst=tt+tau
hlst=lh+tau
normh=np.sqrt(np.sum(abs(h[hlst])**2))
tfh[ff,ii]=fx[xlst]*h[hlst].conj()/normh
tfth[ff,ii]=fx[xlst]*th[hlst].conj()/normh
tfdh[ff,ii]=fx[xlst]*dh[hlst].conj()/normh
#compute Fourier Transform
spech=np.fft.fft(tfh,axis=0)
specth=np.fft.fft(tfth,axis=0)
specdh=np.fft.fft(tfdh,axis=0)
#get only positive frequencies
spech=spech[nfbins/2:,:]
specth=specth[nfbins/2:,:]
specdh=specdh[nfbins/2:,:]
#check to make sure no spurious zeros floating around
szf=np.where(abs(spech)<1.E-6)
spech[szf]=0.0+0.0j
zerofind=np.nonzero(abs(spech))
twspec=np.zeros((nfbins/2,nt),dtype='float')
dwspec=np.zeros((nfbins/2,nt),dtype='float')
twspec[zerofind]=np.round(np.real(specth[zerofind]/spech[zerofind]))
dwspec[zerofind]=np.round(np.imag((nfbins/2.)*specdh[zerofind]/
spech[zerofind])/(np.pi))
#get shape of spectrogram
nf,nt=spech.shape
#-----calculate s-method-----
Llst=np.arange(start=-L/2+1,stop=L/2+1,step=1,dtype='int')
#make and empty array of zeros
sm=np.zeros_like(spech)
#put values where L cannot be value of L, near top and bottom
sm[0:L/2,:]=abs(spech[0:L/2,:])**2
sm[-L/2:,:]=abs(spech[-L/2:,:])**2
#calculate s-method
for ff in range(L/2,nf-L/2-1):
sm[ff,:]=2*np.real(np.sum(spech[ff+Llst,:]*spech[ff-Llst,:].conj(),
axis=0))/L
#------compute reassignment-----
rtfarray=np.zeros((nfbins/2,nt))
threshold=thresh*np.max(abs(sm))
for nn in range(nt):
for kk in range(nf):
if abs(spech[kk,nn])>threshold:
#get center of gravity index in time direction from spectrogram
nhat=int(nn+twspec[kk,nn])
nhat=int(min([max([nhat,1]),nt-1]))
#get center of gravity index in frequency direction from spec
khat=int(kk-dwspec[kk,nn])
khat=int(np.remainder(np.remainder(khat-1,nfbins/2)+nfbins/2,
nfbins/2))
rtfarray[khat,nhat]=rtfarray[khat,nhat]+abs(sm[kk,nn])
else:
rtfarray[kk,nn]=rtfarray[kk,nn]+sm[kk,nn]
#place values where L cannot be L
rtfarray[:L/2,:]=abs(sm[:L/2,:])
rtfarray[-L/2:,:]=abs(sm[-L/2:,:])
tz=np.where(rtfarray==0)
rtfarray[tz]=1.0
tz=np.where(sm==0.0)
sm[tz]=1.0
#scale
rtfarray=abs(rtfarray)
return rtfarray,tlst,flst,sm
def plottf(tfarray,tlst,flst,fignum=1,starttime=0,timeinc='hrs',
dt=1.0,title=None,vmm=None,cmap=None,aspect=None,interpolation=None,
cbori=None,cbshrink=None,cbaspect=None,cbpad=None,powscale='log',
normalize='n',yscale='log',period='n'):
"""plottf(tfarray,tlst,flst,fignum=1) will plot a calculated tfarray with
limits corresponding to tlst and flst.
Inputs:
starttime = starttime measured in timeincrement
tinc = 'hrs','min' or 'sec'
vmm = [vmin,vmax] a list for min and max
title = title string
cmap = colormap scheme default is jet, type help on matplotlib.cm
aspect = aspect of plot, default is auto, can be 'equal' or a scalar
interpolation = type of color interpolation, type help on
matplotlib.pyplot.imshow
cbori = colorbar orientation 'horizontal' or 'vertical'
cbshrink = percentage of 1 for shrinking colorbar
cbaspect = aspect ratio of long to short dimensions
cbpad = pad between colorbar and axis
powscale = linear or log for power
normalize = y or n, yes for normalization, n for no
yscale = linear or log plot yscale
period = 'y' or 'n' to plot in period instead of frequency
Outputs:
plot
"""
#time increment
if timeinc=='hrs':
tinc=3600/dt
elif timeinc=='min':
tinc=60/dt
elif timeinc=='sec':
tinc=1/dt
else:
raise ValueError(timeinc+'is not defined')
#colormap
if cmap==None:
cmap='jet'
else:
cmap=cmap
#aspect ratio
if aspect==None:
aspect='auto'
else:
aspect=aspect
#interpolation
if interpolation==None:
interpolation='gaussian'
else:
interpolation=interpolation
#colorbar orientation
if cbori==None:
cbori='vertical'
else:
cbori=cbori
#colorbar shinkage
if cbshrink==None:
cbshrink=.8
else:
cbshrink=cbshrink
#colorbar aspect
if cbaspect==None:
cbaspect=20
else:
cbaspect=cbaspect
#colorbar pad
if cbpad==None:
cbpad=.05
else:
cbpad=cbpad
#scale
if powscale=='log':
zerofind=np.where(abs(tfarray)==0)
tfarray[zerofind]=1.0
if normalize=='y':
plottfarray=10*np.log10(abs(tfarray/np.max(abs(tfarray))))
else:
plottfarray=10*np.log10(abs(tfarray))
elif powscale=='linear':
if normalize=='y':
plottfarray=abs(tfarray/np.max(abs(tfarray)))
else:
plottfarray=abs(tfarray)
#period or frequency
if period=='y':
flst[1:]=1./flst[1:]
flst[0]=2*flst[1]
elif period=='n':
pass
#set properties for the plot
plt.rcParams['font.size']=9
plt.rcParams['figure.subplot.left']=.12
plt.rcParams['figure.subplot.right']=.99
plt.rcParams['figure.subplot.bottom']=.12
plt.rcParams['figure.subplot.top']=.96
plt.rcParams['figure.subplot.wspace']=.25
plt.rcParams['figure.subplot.hspace']=.20
#set the font dictionary
fdict={'size':10,'weight':'bold'}
#make a meshgrid if yscale is logarithmic
if yscale=='log':
logt,logf=np.meshgrid(tlst,flst)
#make figure
fig1=plt.figure(fignum,[10,10],dpi=300)
ax=fig1.add_subplot(1,1,1)
if vmm!=None:
vmin=vmm[0]
vmax=vmm[1]
#add in log yscale
if yscale=='log':
#need to flip the matrix so that origin is bottom right
cbp=ax.pcolormesh(logt,logf,np.flipud(plottfarray),
cmap=cmap,vmin=vmin,vmax=vmax)
ax.semilogy()
ax.set_ylim(flst[1],flst[-1])
ax.set_xlim(tlst[0],tlst[-1])
cb=plt.colorbar(cbp,orientation=cbori,shrink=cbshrink,pad=cbpad,
aspect=cbaspect)
else:
plt.imshow(plottfarray,extent=(tlst[0]/tinc+starttime,
tlst[-1]/tinc+starttime,flst[1],flst[-1]),aspect=aspect,
vmin=vmin,vmax=vmax,cmap=cmap,
interpolation=interpolation)
cb=plt.colorbar(orientation=cbori,shrink=cbshrink,pad=cbpad,
aspect=cbaspect)
else:
if yscale=='log':
cbp=ax.pcolormesh(logt,logf,np.flipud(plottfarray),
cmap=cmap)
ax.semilogy()
ax.set_ylim(flst[1],flst[-1])
ax.set_xlim(tlst[0],tlst[-1])
cb=plt.colorbar(cbp,orientation=cbori,shrink=cbshrink,pad=cbpad,
aspect=cbaspect)
else:
plt.imshow(plottfarray,extent=(tlst[0]/tinc+starttime,
tlst[-1]/tinc+starttime,flst[1],flst[-1]),aspect=aspect,
cmap=cmap,interpolation=interpolation)
cb=plt.colorbar(orientation=cbori,shrink=cbshrink,pad=cbpad,
aspect=cbaspect)
ax.set_xlabel('time('+timeinc+')',fontdict=fdict)
if period=='y':
ax.set_ylabel('period (s)',fontdict=fdict)
else:
ax.set_ylabel('frequency (Hz)',fontdict=fdict)
if title!=None:
ax.set_title(title,fontdict=fdict)
plt.show()
def plotAll(fx,tfarray,tlst,flst,fignum=1,starttime=0,timeinc='hrs',
dt=1.0,title=None,vmm=None,cmap=None,aspect=None,interpolation=None,
cbori=None,cbshrink=None,cbaspect=None,cbpad=None,normalize='n',
scale='log'):
"""plottf(tfarray,tlst,flst,fignum=1) will plot a calculated tfarray with
limits corresponding to tlst and flst. Can have:
Inputs:
starttime = starttime measured in timeincrement
timeincrement = 'hrs','min' or 'sec'
vmm = [vmin,vmax] a list for min and max
title = title string
cmap = colormap scheme default is jet, type help on matplotlib.cm
aspect = aspect of plot, default is auto, can be 'equal' or a scalar
interpolation = type of color interpolation, type help on
matplotlib.pyplot.imshow
cbori = colorbar orientation 'horizontal' or 'vertical'
cbshrink = percentage of 1 for shrinking colorbar
cbaspect = aspect ratio of long to short dimensions
cbpad = pad between colorbar and axis
normalization = y or n, y for normalization n for none
Outputs:
plot
"""
#time increment
if timeinc=='hrs':
tinc=3600/dt
elif timeinc=='min':
tinc=60/dt
elif timeinc=='sec':
tinc=1/dt
else:
raise ValueError(timeinc+'is not defined')
#colormap
if cmap==None:
cmap='jet'
else:
cmap=cmap
#aspect ratio
if aspect==None:
aspect='auto'
else:
aspect=aspect
#interpolation
if interpolation==None:
interpolation='gaussian'
else:
interpolation=interpolation
#colorbar orientation
if cbori==None:
cbori='vertical'
else:
cbori=cbori
#colorbar shinkage
if cbshrink==None:
cbshrink=.99
else:
cbshrink=cbshrink
#colorbar aspect
if cbaspect==None:
cbaspect=20
else:
cbaspect=cbaspect
#colorbar pad
if cbpad==None:
cbpad=.1
else:
cbpad=cbpad
#scale
if scale=='log':
zerofind=np.where(abs(tfarray)==0)
tfarray[zerofind]=1.0
if normalize=='y':
plottfarray=20*np.log10(abs(tfarray/np.max(abs(tfarray))))
else:
plottfarray=20*np.log10(abs(tfarray))
elif scale=='linear':
if normalize=='y':
plottfarray=abs(plottfarray/np.max(abs(plottfarray)))**2
else:
plottfarray=abs(tfarray)**2
t=np.arange(len(fx))*dt+starttime*dt
FX=np.fft.fft(padzeros(fx))
FXfreq=np.fft.fftfreq(len(FX),dt)
#set some plot parameters
plt.rcParams['font.size']=10
plt.rcParams['figure.subplot.left']=.13
plt.rcParams['figure.subplot.right']=.98
plt.rcParams['figure.subplot.bottom']=.07
plt.rcParams['figure.subplot.top']=.96
plt.rcParams['figure.subplot.wspace']=.25
plt.rcParams['figure.subplot.hspace']=.20
#plt.rcParams['font.family']='helvetica'
fig=plt.figure(fignum)
plt.clf()
#plot FFT of fx
fax=fig.add_axes([.05,.25,.1,.7])
plt.semilogx(abs(FX[0:len(FX)/2]/max(abs(FX))),FXfreq[0:len(FX)/2],'-k')
plt.axis('tight')
plt.ylim(0,FXfreq[len(FX)/2-1])
# fax.xaxis.set_major_locator(MultipleLocator(.5))
#plot TFD
pax=fig.add_axes([.25,.25,.75,.7])
if vmm!=None:
vmin=vmm[0]
vmax=vmm[1]
plt.imshow(plottfarray,extent=(tlst[0]/tinc,tlst[-1]/tinc,
flst[0],flst[-1]),aspect=aspect,vmin=vmin,vmax=vmax,cmap=cmap,
interpolation=interpolation)
else:
plt.imshow(plottfarray,extent=(tlst[0]/tinc,tlst[-1]/tinc,
flst[0],flst[-1]),aspect=aspect,cmap=cmap,
interpolation=interpolation)
plt.xlabel('Time('+timeinc+')',fontsize=12,fontweight='bold')
plt.ylabel('Frequency (Hz)',fontsize=12,fontweight='bold')
if title!=None:
plt.title(title,fontsize=14,fontweight='bold')
plt.colorbar(orientation=cbori,shrink=cbshrink,pad=cbpad,aspect=cbaspect)
#plot timeseries
tax=fig.add_axes([.25,.05,.60,.1])
plt.plot(t,fx,'-k')
plt.axis('tight')
plt.show()
def stfbss(X,nsources=5,ng=2**5-1,nh=2**9-1,tstep=2**6-1,df=1.0,nfbins=2**10,
tftol=1.E-8,normalize=True):
"""
btfssX,nsources=5,ng=2**5-1,nh=2**9-1,tstep=2**6-1,df=1.0,nfbins=2**10,
tftol=1.E-8,normalize=True)
estimates sources using a blind source algorithm based on spatial
time-frequency distributions. At the moment this algorithm uses the SPWVD
to estimate TF distributions.
Inputs:
X = m x n array of time series, where m is number of time series and n
is length of each time series
nsources = number of estimated sources
ng = frequency window length
nh = time window length
tstep = time step increment
df = sampling frequency (Hz)
nfbins = number of frequencies
tftol = tolerance for a time-frequency point to be estimated as a cross
term or as an auto term, the higher the number the more auto
terms.
normalization = True or False, True to normalize, False if already
normalized
Outputs:
Se = estimated individual signals up to a permutation and scale
Ae = estimated mixing matrix as X=A*S
"""
#get shape of timeseries matrix,
#m=number of channels
#tlen=length of timeseries
m,maxn=X.shape
n=nsources
#get number of time bins
ntbins=int(float(maxn)/tstep)
tfkwargs={'ng':ng,'nh':nh,'df':df,'nfbins':nfbins,'tstep':tstep}
#remove dc component from time series and normalize
if normalize==True:
for ii in range(m):
X[ii,:]=X[ii,:]-np.mean(X[ii,:])
X[ii,:]=X[ii,:]/X[ii,:].std()
#===============================================================================
# Whiten data and Compute Whitening matrix
#===============================================================================
#whiten data to get a unitary matrix with unit variance and zero mean
#compute covariance matrix
Rxx=(np.dot(X,X.T))/float(maxn)
#calculate eigen decomposition
[l,u]=np.linalg.eig(Rxx)
#sort eigenvalues from smallest to largest assuming largest are sources and
#smallest are noise
lspot=l.argsort()
eigval=l[lspot]
eigvec=u[:,lspot]
#calculate the noise variance as mean of non-principal components
sigman=np.mean(eigval[0:m-n])
#compute scaling factor for whitening matrix
wscale=1/np.sqrt(eigval[m-n:m]-sigman)
#compute whitening matrix
W=np.zeros((m,n))
for kk in range(n):
W[:,kk]=wscale[kk]*eigvec[:,m-n+kk].T
W=W.T
#compute whitened signal vector. Note the dimensionality is reduced from [mxn]
#to [nxn] making the computation simpler.
Z=np.dot(W,X)
#===============================================================================
# Compute Spatial Time Frequency Distribution
#===============================================================================
stfd=np.zeros((n,n,nfbins,ntbins+1),dtype='complex128')
Za=np.array(Z.copy())
#compute auto terms
for ii in range(n):
pswvd,tswvd,fswvd=spwvd(Za[ii].reshape(maxn),**tfkwargs)
stfd[ii,ii,:,:]=pswvd
#compute cross terms
for jj in range(n):
for kk in range(jj,n):
pswvd,tswvd,fswvd=spwvd([Za[jj].reshape(maxn),Za[kk].reshape(maxn)],
**tfkwargs)
stfd[jj,kk,:,:]=pswvd
stfd[kk,jj,:,:]=pswvd.conj()
#===============================================================================
# Compute criteria for cross terms
#===============================================================================
stfdTr=np.zeros((nfbins,ntbins))
C=np.zeros((nfbins,ntbins))
for ff in range(nfbins):
for tt in range(ntbins):
#compensate for noise
stfd[:,:,ff,tt]=stfd[:,:,ff,tt]-sigman*np.matrix(W)*np.matrix(W.T)
#compute the trace
stfdTr[ff,tt]=abs(np.trace(stfd[:,:,ff,tt]))
#compute mean over entire t-f plane
trmean=stfdTr.mean()
#find t-f points that meet the criteria
fspot,tspot=np.nonzero(stfdTr>trmean)
for ll in range(len(fspot)):
treig=abs(np.linalg.eig(stfd[:,:,fspot[ll],tspot[ll]])[0])
if sum(treig)!=0 and sum(treig)>tftol:
C[fspot[ll],tspot[ll]]=max(treig)/sum(treig)
else:
C[fspot[ll],tspot[ll]]=0
#compute gradients and jacobi matrices
negjacobi=np.zeros((nfbins,ntbins))
smallgrad=np.zeros((nfbins,ntbins))
maxpoints=np.zeros((nfbins,ntbins))
gradt,gradf=np.gradient(C)
Jtt,Jtf=np.gradient(gradt)
Jft,Jff=np.gradient(gradf)
#get points when L2 of gradient is smaller than tolerance level
smallgrad=np.where(np.sqrt(gradt**2+gradf**2)<tftol,1,0)
#get points where the Jacobi is negative definite
detjacobi=Jtt*Jff-Jtf*Jft
negjacobi=np.where(detjacobi>0,1,0)*np.where(Jtt<0,1,0)\
*np.where((Jtt+Jff)<0,1,0)
maxpoints=smallgrad*negjacobi
gfspot,gtspot=np.nonzero(maxpoints)
ntfpoints=len(gfspot)
if ntfpoints==0:
raise ValueError('Found no tf points, relax tolerance')
else:
print 'Found '+str(ntfpoints)+' t-f points'
for rr in range(ntfpoints):
if rr==0:
Rjd=stfd[:,:,gfspot[rr],gtspot[rr]]
else:
Rjd=np.concatenate((Rjd,stfd[:,:,gfspot[rr],gtspot[rr]]),axis=1)
Rjd=np.array(Rjd)
#===============================================================================
# Calculate Joint Diagonalization
#===============================================================================
#get size of array of matrices to be diagonalized
mtf,nm=Rjd.shape #mtf is number of t-f points, nm is number of matrices
#set up some initial parameters
V=np.eye(mtf)
#update boolean
encore=True
#Total number of rotations
updates=0
sweep=0
#print 'Computing Joint Diagonalization'
# Joint diagonalization proper
# ============================
while encore:
#reset some parameters
encore=False
sweep+=1
upds=0
Vkeep=V
for p in range(mtf):
for q in range(p+1,mtf):
#set up indices
qi=np.arange(start=q,stop=nm,step=mtf)
pi=np.arange(start=p,stop=nm,step=mtf)
# computation of Givens angle
g=np.array([Rjd[p,pi]-Rjd[q,qi],Rjd[p,qi],Rjd[q,pi]])
gg=np.real(np.dot(g,g.T))
ton=gg[0,0]-gg[1,1]
toff=gg[0,1]+gg[1,0]
theta=0.5*np.arctan2(toff,ton+np.sqrt(ton**2+toff**2))
# Givens update
if abs(theta) > tftol:
encore=True
upds+=1
c=np.cos(theta)
s=np.sin(theta)
G=np.matrix([[c,-s],[s,c]])
pair =np.array([p,q])
V[:,pair]=V[:,pair]*G
Rjd[pair,:]=G.T*Rjd[pair,:]
Rjd[:,np.concatenate([pi,qi])]=np.append(
c*Rjd[:,pi]+s*Rjd[:,qi],
-s*Rjd[:,pi]+c*Rjd[:,qi],axis=1)
updates+=upds
print 'Updated '+str(updates)+' times.'
#compute estimated signal matrix
Se=np.dot(V.T,Z)
#compute estimated mixing matrix
Ae=np.dot(np.linalg.pinv(W),V)
return Se,Ae
|
geophysics/mtpy
|
mtpy/legacy/tftools.py
|
Python
|
gpl-3.0
| 60,744
|
[
"Gaussian"
] |
73f1940b7be0d6403fac3db5a69d77429fbf74a4fe2ea9eab79aea54b72df204
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""
This module has methods for parsing names and versions of packages from URLs.
The idea is to allow package creators to supply nothing more than the
download location of the package, and figure out version and name information
from there.
**Example:** when spack is given the following URL:
https://www.hdfgroup.org/ftp/HDF/releases/HDF4.2.12/src/hdf-4.2.12.tar.gz
It can figure out that the package name is ``hdf``, and that it is at version
``4.2.12``. This is useful for making the creation of packages simple: a user
just supplies a URL and skeleton code is generated automatically.
Spack can also figure out that it can most likely download 4.2.6 at this URL:
https://www.hdfgroup.org/ftp/HDF/releases/HDF4.2.6/src/hdf-4.2.6.tar.gz
This is useful if a user asks for a package at a particular version number;
spack doesn't need anyone to tell it where to get the tarball even though
it's never been told about that version before.
"""
import os
import re
from six import StringIO
from six.moves.urllib.parse import urlsplit, urlunsplit
import llnl.util.tty as tty
from llnl.util.tty.color import colorize
import spack.error
import spack.util.compression as comp
from spack.version import Version
#
# Note: We call the input to most of these functions a "path" but the functions
# work on paths and URLs. There's not a good word for both of these, but
# "path" seemed like the most generic term.
#
def find_list_url(url):
"""Finds a good list URL for the supplied URL.
By default, returns the dirname of the archive path.
Provides special treatment for the following websites, which have a
unique list URL different from the dirname of the download URL:
========= =======================================================
GitHub https://github.com/<repo>/<name>/releases
GitLab https://gitlab.\*/<repo>/<name>/tags
BitBucket https://bitbucket.org/<repo>/<name>/downloads/?tab=tags
CRAN https://\*.r-project.org/src/contrib/Archive/<name>
========= =======================================================
Parameters:
url (str): The download URL for the package
Returns:
str: The list URL for the package
"""
url_types = [
# GitHub
# e.g. https://github.com/llnl/callpath/archive/v1.0.1.tar.gz
(r'(.*github\.com/[^/]+/[^/]+)',
lambda m: m.group(1) + '/releases'),
# GitLab
# e.g. https://gitlab.dkrz.de/k202009/libaec/uploads/631e85bcf877c2dcaca9b2e6d6526339/libaec-1.0.0.tar.gz
(r'(.*gitlab[^/]+/[^/]+/[^/]+)',
lambda m: m.group(1) + '/tags'),
# BitBucket
# e.g. https://bitbucket.org/eigen/eigen/get/3.3.3.tar.bz2
(r'(.*bitbucket.org/[^/]+/[^/]+)',
lambda m: m.group(1) + '/downloads/?tab=tags'),
# CRAN
# e.g. https://cran.r-project.org/src/contrib/Rcpp_0.12.9.tar.gz
# e.g. https://cloud.r-project.org/src/contrib/rgl_0.98.1.tar.gz
(r'(.*\.r-project\.org/src/contrib)/([^_]+)',
lambda m: m.group(1) + '/Archive/' + m.group(2)),
]
for pattern, fun in url_types:
match = re.search(pattern, url)
if match:
return fun(match)
else:
return os.path.dirname(url)
def strip_query_and_fragment(path):
try:
components = urlsplit(path)
stripped = components[:3] + (None, None)
query, frag = components[3:5]
suffix = ''
if query:
suffix += '?' + query
if frag:
suffix += '#' + frag
return (urlunsplit(stripped), suffix)
except ValueError:
tty.debug("Got error parsing path %s" % path)
return (path, '') # Ignore URL parse errors here
def strip_version_suffixes(path):
"""Some tarballs contain extraneous information after the version:
* ``bowtie2-2.2.5-source``
* ``libevent-2.0.21-stable``
* ``cuda_8.0.44_linux.run``
These strings are not part of the version number and should be ignored.
This function strips those suffixes off and returns the remaining string.
The goal is that the version is always the last thing in ``path``:
* ``bowtie2-2.2.5``
* ``libevent-2.0.21``
* ``cuda_8.0.44``
Args:
path (str): The filename or URL for the package
Returns:
str: The ``path`` with any extraneous suffixes removed
"""
# NOTE: This could be done with complicated regexes in parse_version_offset
# NOTE: The problem is that we would have to add these regexes to the end
# NOTE: of every single version regex. Easier to just strip them off
# NOTE: permanently
suffix_regexes = [
# Download type
'[Ii]nstall',
'all',
'src(_0)?',
'[Ss]ources?',
'file',
'full',
'single',
'public',
'with[a-zA-Z_-]+',
'bin',
'binary',
'run',
'[Uu]niversal',
'jar',
'complete',
'dynamic',
'oss',
'gem',
'tar',
'sh',
# Download version
'release',
'stable',
'[Ff]inal',
'rel',
'orig',
'dist',
'\+',
# License
'gpl',
# Arch
# Needs to come before and after OS, appears in both orders
'ia32',
'intel',
'amd64',
'x64',
'x86_64',
'x86',
'i[36]86',
'ppc64(le)?',
'armv?(7l|6l|64)',
# OS
'[Ll]inux(_64)?',
'[Uu]ni?x',
'[Ss]un[Oo][Ss]',
'[Mm]ac[Oo][Ss][Xx]?',
'[Oo][Ss][Xx]',
'[Dd]arwin(64)?',
'[Aa]pple',
'[Ww]indows',
'[Ww]in(64|32)?',
'[Cc]ygwin(64|32)?',
'[Mm]ingw',
# Arch
# Needs to come before and after OS, appears in both orders
'ia32',
'intel',
'amd64',
'x64',
'x86_64',
'x86',
'i[36]86',
'ppc64(le)?',
'armv?(7l|6l|64)?',
# PyPI
'[._-]py[23].*\.whl',
'[._-]cp[23].*\.whl',
'[._-]win.*\.exe',
]
for regex in suffix_regexes:
# Remove the suffix from the end of the path
# This may be done multiple times
path = re.sub(r'[._-]?' + regex + '$', '', path)
return path
def strip_name_suffixes(path, version):
"""Most tarballs contain a package name followed by a version number.
However, some also contain extraneous information in-between the name
and version:
* ``rgb-1.0.6``
* ``converge_install_2.3.16``
* ``jpegsrc.v9b``
These strings are not part of the package name and should be ignored.
This function strips the version number and any extraneous suffixes
off and returns the remaining string. The goal is that the name is
always the last thing in ``path``:
* ``rgb``
* ``converge``
* ``jpeg``
Args:
path (str): The filename or URL for the package
version (str): The version detected for this URL
Returns:
str: The ``path`` with any extraneous suffixes removed
"""
# NOTE: This could be done with complicated regexes in parse_name_offset
# NOTE: The problem is that we would have to add these regexes to every
# NOTE: single name regex. Easier to just strip them off permanently
suffix_regexes = [
# Strip off the version and anything after it
# name-ver
# name_ver
# name.ver
r'[._-]v?' + str(version) + '.*',
# namever
str(version) + '.*',
# Download type
'install',
'src',
'(open)?[Ss]ources?',
'[._-]archive',
'[._-]std',
# Download version
'release',
'snapshot',
'distrib',
# VCS
'0\+bzr',
# License
'gpl',
]
for regex in suffix_regexes:
# Remove the suffix from the end of the path
# This may be done multiple times
path = re.sub('[._-]?' + regex + '$', '', path)
return path
def split_url_extension(path):
"""Some URLs have a query string, e.g.:
1. https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true
2. http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.0/apache-cassandra-1.2.0-rc2-bin.tar.gz
3. https://gitlab.kitware.com/vtk/vtk/repository/archive.tar.bz2?ref=v7.0.0
In (1), the query string needs to be stripped to get at the
extension, but in (2) & (3), the filename is IN a single final query
argument.
This strips the URL into three pieces: ``prefix``, ``ext``, and ``suffix``.
The suffix contains anything that was stripped off the URL to
get at the file extension. In (1), it will be ``'?raw=true'``, but
in (2), it will be empty. In (3) the suffix is a parameter that follows
after the file extension, e.g.:
1. ``('https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7', '.tgz', '?raw=true')``
2. ``('http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.0/apache-cassandra-1.2.0-rc2-bin', '.tar.gz', None)``
3. ``('https://gitlab.kitware.com/vtk/vtk/repository/archive', '.tar.bz2', '?ref=v7.0.0')``
"""
prefix, ext, suffix = path, '', ''
# Strip off sourceforge download suffix.
# e.g. https://sourceforge.net/projects/glew/files/glew/2.0.0/glew-2.0.0.tgz/download
match = re.search(r'(.*(?:sourceforge\.net|sf\.net)/.*)(/download)$', path)
if match:
prefix, suffix = match.groups()
ext = comp.extension(prefix)
if ext is not None:
prefix = comp.strip_extension(prefix)
else:
prefix, suf = strip_query_and_fragment(prefix)
ext = comp.extension(prefix)
prefix = comp.strip_extension(prefix)
suffix = suf + suffix
if ext is None:
ext = ''
return prefix, ext, suffix
def determine_url_file_extension(path):
"""This returns the type of archive a URL refers to. This is
sometimes confusing because of URLs like:
(1) https://github.com/petdance/ack/tarball/1.93_02
Where the URL doesn't actually contain the filename. We need
to know what type it is so that we can appropriately name files
in mirrors.
"""
match = re.search(r'github.com/.+/(zip|tar)ball/', path)
if match:
if match.group(1) == 'zip':
return 'zip'
elif match.group(1) == 'tar':
return 'tar.gz'
prefix, ext, suffix = split_url_extension(path)
return ext
def parse_version_offset(path):
"""Try to extract a version string from a filename or URL.
Args:
path (str): The filename or URL for the package
Returns:
tuple of (Version, int, int, int, str): A tuple containing:
version of the package,
first index of version,
length of version string,
the index of the matching regex
the matching regex
Raises:
UndetectableVersionError: If the URL does not match any regexes
"""
original_path = path
# path: The prefix of the URL, everything before the ext and suffix
# ext: The file extension
# suffix: Any kind of query string that begins with a '?'
path, ext, suffix = split_url_extension(path)
# stem: Everything from path after the final '/'
original_stem = os.path.basename(path)
# Try to strip off anything after the version number
stem = strip_version_suffixes(original_stem)
# Assumptions:
#
# 1. version always comes after the name
# 2. separators include '-', '_', and '.'
# 3. names can contain A-Z, a-z, 0-9, '+', separators
# 4. versions can contain A-Z, a-z, 0-9, separators
# 5. versions always start with a digit
# 6. versions are often prefixed by a 'v' character
# 7. separators are most reliable to determine name/version boundaries
# List of the following format:
#
# [
# (regex, string),
# ...
# ]
#
# The first regex that matches string will be used to determine
# the version of the package. Thefore, hyperspecific regexes should
# come first while generic, catch-all regexes should come last.
# With that said, regular expressions are slow, so if possible, put
# ones that only catch one or two URLs at the bottom.
version_regexes = [
# 1st Pass: Simplest case
# Assume name contains no digits and version contains no letters
# e.g. libpng-1.6.27
(r'^[a-zA-Z+._-]+[._-]v?(\d[\d._-]*)$', stem),
# 2nd Pass: Version only
# Assume version contains no letters
# ver
# e.g. 3.2.7, 7.0.2-7, v3.3.0, v1_6_3
(r'^v?(\d[\d._-]*)$', stem),
# 3rd Pass: No separator characters are used
# Assume name contains no digits
# namever
# e.g. turbolinux702, nauty26r7
(r'^[a-zA-Z+]*(\d[\da-zA-Z]*)$', stem),
# 4th Pass: A single separator character is used
# Assume name contains no digits
# name-name-ver-ver
# e.g. panda-2016-03-07, gts-snapshot-121130, cdd-061a
(r'^[a-zA-Z+-]*(\d[\da-zA-Z-]*)$', stem),
# name_name_ver_ver
# e.g. tinyxml_2_6_2, boost_1_55_0, tbb2017_20161128, v1_6_3
(r'^[a-zA-Z+_]*(\d[\da-zA-Z_]*)$', stem),
# name.name.ver.ver
# e.g. prank.source.150803, jpegsrc.v9b, atlas3.11.34, geant4.10.01.p03
(r'^[a-zA-Z+.]*(\d[\da-zA-Z.]*)$', stem),
# 5th Pass: Two separator characters are used
# Name may contain digits, version may contain letters
# name-name-ver.ver
# e.g. m4-1.4.17, gmp-6.0.0a, launchmon-v1.0.2
(r'^[a-zA-Z\d+-]+-v?(\d[\da-zA-Z.]*)$', stem),
# name-name-ver_ver
# e.g. icu4c-57_1
(r'^[a-zA-Z\d+-]+-v?(\d[\da-zA-Z_]*)$', stem),
# name_name_ver.ver
# e.g. superlu_dist_4.1, pexsi_v0.9.0
(r'^[a-zA-Z\d+_]+_v?(\d[\da-zA-Z.]*)$', stem),
# name_name.ver.ver
# e.g. fer_source.v696
(r'^[a-zA-Z\d+_]+\.v?(\d[\da-zA-Z.]*)$', stem),
# name-name-ver.ver-ver.ver
# e.g. sowing-1.1.23-p1, bib2xhtml-v3.0-15-gf506, 4.6.3-alpha04
(r'^(?:[a-zA-Z\d+-]+-)?v?(\d[\da-zA-Z.-]*)$', stem),
# namever.ver-ver.ver
# e.g. go1.4-bootstrap-20161024
(r'^[a-zA-Z+]+v?(\d[\da-zA-Z.-]*)$', stem),
# 6th Pass: All three separator characters are used
# Name may contain digits, version may contain letters
# name_name-ver.ver
# e.g. the_silver_searcher-0.32.0, sphinx_rtd_theme-0.1.10a0
(r'^[a-zA-Z\d+_]+-v?(\d[\da-zA-Z.]*)$', stem),
# name.name_ver.ver-ver.ver
# e.g. TH.data_1.0-8, XML_3.98-1.4
(r'^[a-zA-Z\d+.]+_v?(\d[\da-zA-Z.-]*)$', stem),
# name-name-ver.ver_ver.ver
# e.g. pypar-2.1.5_108
(r'^[a-zA-Z\d+-]+-v?(\d[\da-zA-Z._]*)$', stem),
# name.name_name-ver.ver
# e.g. tap.py-1.6, backports.ssl_match_hostname-3.5.0.1
(r'^[a-zA-Z\d+._]+-v?(\d[\da-zA-Z.]*)$', stem),
# name-namever.ver_ver.ver
# e.g. STAR-CCM+11.06.010_02
(r'^[a-zA-Z+-]+(\d[\da-zA-Z._]*)$', stem),
# 7th Pass: Specific VCS
# bazaar
# e.g. libvterm-0+bzr681
(r'bzr(\d[\da-zA-Z._-]*)$', stem),
# 8th Pass: Version in path
# github.com/repo/name/releases/download/vver/name
# e.g. https://github.com/nextflow-io/nextflow/releases/download/v0.20.1/nextflow
(r'github\.com/[^/]+/[^/]+/releases/download/[a-zA-Z+._-]*v?(\d[\da-zA-Z._-]*)/', path), # noqa
# 9th Pass: Query strings
# e.g. http://gitlab.cosma.dur.ac.uk/swift/swiftsim/repository/archive.tar.gz?ref=v0.3.0
(r'\?ref=[a-zA-Z+._-]*v?(\d[\da-zA-Z._-]*)$', suffix),
# e.g. http://apps.fz-juelich.de/jsc/sionlib/download.php?version=1.7.1
(r'\?version=v?(\d[\da-zA-Z._-]*)$', suffix),
# e.g. http://slepc.upv.es/download/download.php?filename=slepc-3.6.2.tar.gz
(r'\?filename=[a-zA-Z\d+-]+-v?(\d[\da-zA-Z.]*)$', stem),
# e.g. http://wwwpub.zih.tu-dresden.de/%7Emlieber/dcount/dcount.php?package=otf&get=OTF-1.12.5salmon.tar.gz
(r'\?package=[a-zA-Z\d+-]+&get=[a-zA-Z\d+-]+-v?(\d[\da-zA-Z.]*)$', stem), # noqa
]
for i, version_regex in enumerate(version_regexes):
regex, match_string = version_regex
match = re.search(regex, match_string)
if match and match.group(1) is not None:
version = match.group(1)
start = match.start(1)
# If we matched from the stem or suffix, we need to add offset
offset = 0
if match_string is stem:
offset = len(path) - len(original_stem)
elif match_string is suffix:
offset = len(path)
if ext:
offset += len(ext) + 1 # .tar.gz is converted to tar.gz
start += offset
return version, start, len(version), i, regex
raise UndetectableVersionError(original_path)
def parse_version(path):
"""Try to extract a version string from a filename or URL.
Args:
path (str): The filename or URL for the package
Returns:
spack.version.Version: The version of the package
Raises:
UndetectableVersionError: If the URL does not match any regexes
"""
version, start, length, i, regex = parse_version_offset(path)
return Version(version)
def parse_name_offset(path, v=None):
"""Try to determine the name of a package from its filename or URL.
Args:
path (str): The filename or URL for the package
v (str): The version of the package
Returns:
tuple of (str, int, int, int, str): A tuple containing:
name of the package,
first index of name,
length of name,
the index of the matching regex
the matching regex
Raises:
UndetectableNameError: If the URL does not match any regexes
"""
original_path = path
# We really need to know the version of the package
# This helps us prevent collisions between the name and version
if v is None:
try:
v = parse_version(path)
except UndetectableVersionError:
# Not all URLs contain a version. We still want to be able
# to determine a name if possible.
v = 'unknown'
# path: The prefix of the URL, everything before the ext and suffix
# ext: The file extension
# suffix: Any kind of query string that begins with a '?'
path, ext, suffix = split_url_extension(path)
# stem: Everything from path after the final '/'
original_stem = os.path.basename(path)
# Try to strip off anything after the package name
stem = strip_name_suffixes(original_stem, v)
# List of the following format:
#
# [
# (regex, string),
# ...
# ]
#
# The first regex that matches string will be used to determine
# the name of the package. Thefore, hyperspecific regexes should
# come first while generic, catch-all regexes should come last.
# With that said, regular expressions are slow, so if possible, put
# ones that only catch one or two URLs at the bottom.
name_regexes = [
# 1st Pass: Common repositories
# GitHub: github.com/repo/name/
# e.g. https://github.com/nco/nco/archive/4.6.2.tar.gz
(r'github\.com/[^/]+/([^/]+)', path),
# GitLab: gitlab.*/repo/name/
# e.g. http://gitlab.cosma.dur.ac.uk/swift/swiftsim/repository/archive.tar.gz?ref=v0.3.0
(r'gitlab[^/]+/[^/]+/([^/]+)', path),
# Bitbucket: bitbucket.org/repo/name/
# e.g. https://bitbucket.org/glotzer/hoomd-blue/get/v1.3.3.tar.bz2
(r'bitbucket\.org/[^/]+/([^/]+)', path),
# PyPI: pypi.(python.org|io)/packages/source/first-letter/name/
# e.g. https://pypi.python.org/packages/source/m/mpmath/mpmath-all-0.19.tar.gz
# e.g. https://pypi.io/packages/source/b/backports.ssl_match_hostname/backports.ssl_match_hostname-3.5.0.1.tar.gz
(r'pypi\.(?:python\.org|io)/packages/source/[A-Za-z\d]/([^/]+)', path),
# 2nd Pass: Query strings
# ?filename=name-ver.ver
# e.g. http://slepc.upv.es/download/download.php?filename=slepc-3.6.2.tar.gz
(r'\?filename=([A-Za-z\d+-]+)$', stem),
# ?package=name
# e.g. http://wwwpub.zih.tu-dresden.de/%7Emlieber/dcount/dcount.php?package=otf&get=OTF-1.12.5salmon.tar.gz
(r'\?package=([A-Za-z\d+-]+)', stem),
# download.php
# e.g. http://apps.fz-juelich.de/jsc/sionlib/download.php?version=1.7.1
(r'([^/]+)/download.php$', path),
# 3rd Pass: Name followed by version in archive
(r'^([A-Za-z\d+\._-]+)$', stem),
]
for i, name_regex in enumerate(name_regexes):
regex, match_string = name_regex
match = re.search(regex, match_string)
if match:
name = match.group(1)
start = match.start(1)
# If we matched from the stem or suffix, we need to add offset
offset = 0
if match_string is stem:
offset = len(path) - len(original_stem)
elif match_string is suffix:
offset = len(path)
if ext:
offset += len(ext) + 1 # .tar.gz is converted to tar.gz
start += offset
return name, start, len(name), i, regex
raise UndetectableNameError(original_path)
def parse_name(path, ver=None):
"""Try to determine the name of a package from its filename or URL.
Args:
path (str): The filename or URL for the package
ver (str): The version of the package
Returns:
str: The name of the package
Raises:
UndetectableNameError: If the URL does not match any regexes
"""
name, start, length, i, regex = parse_name_offset(path, ver)
return name
def parse_name_and_version(path):
"""Try to determine the name of a package and extract its version
from its filename or URL.
Args:
path (str): The filename or URL for the package
Returns:
tuple of (str, Version)A tuple containing:
The name of the package
The version of the package
Raises:
UndetectableVersionError: If the URL does not match any regexes
UndetectableNameError: If the URL does not match any regexes
"""
ver = parse_version(path)
name = parse_name(path, ver)
return (name, ver)
def insensitize(string):
"""Change upper and lowercase letters to be case insensitive in
the provided string. e.g., 'a' becomes '[Aa]', 'B' becomes
'[bB]', etc. Use for building regexes."""
def to_ins(match):
char = match.group(1)
return '[%s%s]' % (char.lower(), char.upper())
return re.sub(r'([a-zA-Z])', to_ins, string)
def cumsum(elts, init=0, fn=lambda x: x):
"""Return cumulative sum of result of fn on each element in elts."""
sums = []
s = init
for i, e in enumerate(elts):
sums.append(s)
s += fn(e)
return sums
def find_all(substring, string):
"""Returns a list containing the indices of
every occurrence of substring in string."""
occurrences = []
index = 0
while index < len(string):
index = string.find(substring, index)
if index == -1:
break
occurrences.append(index)
index += len(substring)
return occurrences
def substitution_offsets(path):
"""This returns offsets for substituting versions and names in the
provided path. It is a helper for :func:`substitute_version`.
"""
# Get name and version offsets
try:
ver, vs, vl, vi, vregex = parse_version_offset(path)
name, ns, nl, ni, nregex = parse_name_offset(path, ver)
except UndetectableNameError:
return (None, -1, -1, (), ver, vs, vl, (vs,))
except UndetectableVersionError:
try:
name, ns, nl, ni, nregex = parse_name_offset(path)
return (name, ns, nl, (ns,), None, -1, -1, ())
except UndetectableNameError:
return (None, -1, -1, (), None, -1, -1, ())
# Find the index of every occurrence of name and ver in path
name_offsets = find_all(name, path)
ver_offsets = find_all(ver, path)
return (name, ns, nl, name_offsets,
ver, vs, vl, ver_offsets)
def wildcard_version(path):
"""Find the version in the supplied path, and return a regular expression
that will match this path with any version in its place.
"""
# Get version so we can replace it with a wildcard
version = parse_version(path)
# Split path by versions
vparts = path.split(str(version))
# Replace each version with a generic capture group to find versions
# and escape everything else so it's not interpreted as a regex
result = '(\d.*)'.join(re.escape(vp) for vp in vparts)
return result
def substitute_version(path, new_version):
"""Given a URL or archive name, find the version in the path and
substitute the new version for it. Replace all occurrences of
the version *if* they don't overlap with the package name.
Simple example:
.. code-block:: python
substitute_version('http://www.mr511.de/software/libelf-0.8.13.tar.gz', '2.9.3')
>>> 'http://www.mr511.de/software/libelf-2.9.3.tar.gz'
Complex example:
.. code-block:: python
substitute_version('https://www.hdfgroup.org/ftp/HDF/releases/HDF4.2.12/src/hdf-4.2.12.tar.gz', '2.3')
>>> 'https://www.hdfgroup.org/ftp/HDF/releases/HDF2.3/src/hdf-2.3.tar.gz'
"""
(name, ns, nl, noffs,
ver, vs, vl, voffs) = substitution_offsets(path)
new_path = ''
last = 0
for vo in voffs:
new_path += path[last:vo]
new_path += str(new_version)
last = vo + vl
new_path += path[last:]
return new_path
def color_url(path, **kwargs):
"""Color the parts of the url according to Spack's parsing.
Colors are:
| Cyan: The version found by :func:`parse_version_offset`.
| Red: The name found by :func:`parse_name_offset`.
| Green: Instances of version string from :func:`substitute_version`.
| Magenta: Instances of the name (protected from substitution).
Args:
path (str): The filename or URL for the package
errors (bool): Append parse errors at end of string.
subs (bool): Color substitutions as well as parsed name/version.
"""
errors = kwargs.get('errors', False)
subs = kwargs.get('subs', False)
(name, ns, nl, noffs,
ver, vs, vl, voffs) = substitution_offsets(path)
nends = [no + nl - 1 for no in noffs]
vends = [vo + vl - 1 for vo in voffs]
nerr = verr = 0
out = StringIO()
for i in range(len(path)):
if i == vs:
out.write('@c')
verr += 1
elif i == ns:
out.write('@r')
nerr += 1
elif subs:
if i in voffs:
out.write('@g')
elif i in noffs:
out.write('@m')
out.write(path[i])
if i == vs + vl - 1:
out.write('@.')
verr += 1
elif i == ns + nl - 1:
out.write('@.')
nerr += 1
elif subs:
if i in vends or i in nends:
out.write('@.')
if errors:
if nerr == 0:
out.write(" @r{[no name]}")
if verr == 0:
out.write(" @r{[no version]}")
if nerr == 1:
out.write(" @r{[incomplete name]}")
if verr == 1:
out.write(" @r{[incomplete version]}")
return colorize(out.getvalue())
class UrlParseError(spack.error.SpackError):
"""Raised when the URL module can't parse something correctly."""
def __init__(self, msg, path):
super(UrlParseError, self).__init__(msg)
self.path = path
class UndetectableVersionError(UrlParseError):
"""Raised when we can't parse a version from a string."""
def __init__(self, path):
super(UndetectableVersionError, self).__init__(
"Couldn't detect version in: " + path, path)
class UndetectableNameError(UrlParseError):
"""Raised when we can't parse a package name from a string."""
def __init__(self, path):
super(UndetectableNameError, self).__init__(
"Couldn't parse package name in: " + path, path)
|
lgarren/spack
|
lib/spack/spack/url.py
|
Python
|
lgpl-2.1
| 29,937
|
[
"HOOMD-blue",
"VTK"
] |
dcbda43554aeb451e4ccbf1a9658df7a65e32b0aae2e30414d880c4bf662e017
|
from __future__ import print_function
import fileinput
import os
import shutil
import subprocess
import click
def _get_version():
with open('flexget/_version.py') as f:
g = globals()
l = {}
exec(f.read(), g, l) # pylint: disable=W0122
if not l['__version__']:
raise click.ClickException('Could not find __version__ from flexget/_version.py')
return l['__version__']
@click.group()
def cli():
pass
@cli.command()
def version():
"""Prints the version number of the source"""
click.echo(_get_version())
@cli.command()
@click.argument('bump_type', type=click.Choice(['dev', 'release']))
def bump_version(bump_type):
"""Bumps version to the next release, or development version."""
cur_ver = _get_version()
click.echo('current version: %s' % cur_ver)
ver_split = cur_ver.split('.')
if 'dev' in ver_split[-1]:
if bump_type == 'dev':
# If this is already a development version, increment the dev count by 1
ver_split[-1] = 'dev%d' % (int(ver_split[-1].strip('dev') or 0) + 1)
else:
# Just strip off dev tag for next release version
ver_split = ver_split[:-1]
else:
# Increment the revision number by one
if len(ver_split) == 2:
# We don't have a revision number, assume 0
ver_split.append('1')
else:
if 'b' in ver_split[2]:
# beta version
minor, beta = ver_split[-1].split('b')
ver_split[-1] = '%sb%s' % (minor, int(beta) + 1)
else:
ver_split[-1] = str(int(ver_split[-1]) + 1)
if bump_type == 'dev':
ver_split.append('dev')
new_version = '.'.join(ver_split)
for line in fileinput.FileInput('flexget/_version.py', inplace=1):
if line.startswith('__version__ ='):
line = "__version__ = '%s'\n" % new_version
print(line, end='')
click.echo('new version: %s' % new_version)
@cli.command()
def build_webui():
"""Build webui for release packaging"""
cwd = os.path.join('flexget', 'ui')
# Cleanup previous builds
click.echo('cleaning previous builds')
for folder in ['bower_components' 'node_modules']:
folder = os.path.join(cwd, folder)
if os.path.exists(folder):
click.echo('Deleting recursively {}'.format(folder))
shutil.rmtree(folder)
# Install npm packages
click.echo('running `npm install`')
subprocess.check_call('npm install', cwd=cwd, shell=True)
# Build the ui
click.echo('running `bower install`')
subprocess.check_call('bower install', cwd=cwd, shell=True)
# Build the ui
click.echo('running `gulp buildapp`')
subprocess.check_call('gulp buildapp', cwd=cwd, shell=True)
if __name__ == '__main__':
cli()
|
oxc/Flexget
|
dev_tools.py
|
Python
|
mit
| 2,861
|
[
"GULP"
] |
3551b7e774da48efc30818b8f01726e83c863fc9b202cdc5374d141f47145680
|
#!/usr/bin/env python2
import sys
import os
from parmed.tools import changeLJSingleType
from parmed.tools import parmout
from parmed.tools import change
from parmed.amber import AmberParm
from parmed.structure import Structure
def parmed_topology(top_file, param_type, param_list,TIP3P_param_list, index):
"""
Use ParmEd to edit water parameters
"""
parm = AmberParm(top_file)
local_param_list = TIP3P_param_list
ow_charge_column = hw_charge_column = ow_rad_column = ow_eps_column = 999
# Write a log file for parmEd
f = open('waterbot_parmed.log','w')
logfile = open('parmed.log','w')
for i, nonbonded_term in enumerate(param_type):
if 'charge' in nonbonded_term.lower() and 'ow' in nonbonded_term.lower():
ow_charge_column = i
f.write('The partial charge of water oxygen will be perturbed; ')
f.write('the value of the new parameter is %.4f.\n'%(param_list[i][index]))
local_param_list[2] = param_list[i][index]
elif 'charge' in nonbonded_term.lower() and 'hw' in nonbonded_term.lower():
hw_charge_column = i
f.write('The partial charge of water hydrogen will be perturbed; ')
f.write('the value of the new parameter is %.4f.\n'%(param_list[i][index]))
local_param_list[3] = param_list[i][index]
elif 'radius' in nonbonded_term.lower():
ow_rad_column = i
f.write('The radii parameter of water hydrogen will be perturbed; ')
f.write('the value of the new parameter is %.4f.\n'%(param_list[i][index]))
local_param_list[0] = param_list[i][index]
elif 'epsilon' in nonbonded_term.lower():
ow_eps_column = i
f.write('The epsilon parameter of water hydrogen will be perturbed; ')
f.write('the value of the new parameter is %.4f.\n'%(param_list[i][index]))
local_param_list[1] = param_list[i][index]
# if only one of the charges were provided:
if ow_charge_column == 999 and hw_charge_column!=999:
f.write('\nThe charge of water oxygen was not provided.')
local_param_list[2] = -2.0 * local_param_list[3]
f.write('A value of %.4f was computed and used to make sure this water model is neutral.'%(local_param_list[2]))
elif hw_charge_column == 999 and ow_charge_column!=999:
f.write('\nThe charge of water hydrogen was not provided.')
local_param_list[3] = -local_param_list[2]/2.0
f.write('A value of %.4f was computed and used to make sure this water model is neutral.'%(local_param_list[3]))
# Check whether the water model has a neutral charge
if (local_param_list[2] + 2*local_param_list[3]) != 0:
f.write('\nAborted.The new water model is not neutral!!!\n')
sys.exit(1)
if ow_rad_column!=999 or ow_eps_column!=999:
# It looks like there is no way to only change radius or epsilon
action = changeLJSingleType(parm, "@%OW", local_param_list[0], local_param_list[1])
action.execute()
logfile.write(('%s\n' % action))
if ow_charge_column!=999 or hw_charge_column!=999:
action = change(parm, 'CHARGE', "@%OW", local_param_list[2])
action.execute()
logfile.write(('%s\n' % action))
Structure.save(parm,'solvated_perturbed.prmtop')
|
Janeyin600/WaterBot
|
waterbot_parmed.py
|
Python
|
gpl-3.0
| 3,304
|
[
"Amber"
] |
6ea39e7739e11ece6695c775ef48b59229bd8bc86b61820bdbcdfbaf4abd06fe
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in activation functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import keras_export
from tensorflow.python.keras.layers import advanced_activations
# b/123041942
# In TF 2.x, if the `tf.nn.softmax` is used as an activation function in Keras
# layers, it gets serialized as 'softmax_v2' instead of 'softmax' as the
# internal method name is returned in serialization. This results in errors in
# model exporting and loading as Keras can't find any activation function with
# the name of `softmax_v2`.
# This dict maps the activation function name from its v2 version to its
# canonical name.
_TF_ACTIVATIONS_V2 = {
'softmax_v2': 'softmax',
}
@keras_export('keras.activations.softmax')
@dispatch.add_dispatch_support
def softmax(x, axis=-1):
"""Softmax converts a real vector to a vector of categorical probabilities.
The elements of the output vector are in range (0, 1) and sum to 1.
Each vector is handled independently. The `axis` argument sets which axis
of the input the function is applied along.
Softmax is often used as the activation for the last
layer of a classification network because the result could be interpreted as
a probability distribution.
The softmax of each vector x is computed as
`exp(x) / tf.reduce_sum(exp(x))`.
The input values in are the log-odds of the resulting probability.
Args:
x : Input tensor.
axis: Integer, axis along which the softmax normalization is applied.
Returns:
Tensor, output of softmax transformation (all values are non-negative
and sum to 1).
"""
output = nn.softmax(x, axis=axis)
# Cache the logits to use for crossentropy loss.
output._keras_logits = x # pylint: disable=protected-access
return output
@keras_export('keras.activations.elu')
@dispatch.add_dispatch_support
def elu(x, alpha=1.0):
"""Exponential Linear Unit.
The exponential linear unit (ELU) with `alpha > 0` is:
`x` if `x > 0` and
`alpha * (exp(x) - 1)` if `x < 0`
The ELU hyperparameter `alpha` controls the value to which an
ELU saturates for negative net inputs. ELUs diminish the
vanishing gradient effect.
ELUs have negative values which pushes the mean of the activations
closer to zero.
Mean activations that are closer to zero enable faster learning as they
bring the gradient closer to the natural gradient.
ELUs saturate to a negative value when the argument gets smaller.
Saturation means a small derivative which decreases the variation
and the information that is propagated to the next layer.
Example Usage:
>>> import tensorflow as tf
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='elu',
... input_shape=(28, 28, 1)))
>>> model.add(tf.keras.layers.MaxPooling2D((2, 2)))
>>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu'))
>>> model.add(tf.keras.layers.MaxPooling2D((2, 2)))
>>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu'))
<tensorflow.python.keras.engine.sequential.Sequential object ...>
Args:
x: Input tensor.
alpha: A scalar, slope of negative section. `alpha` controls the value to
which an ELU saturates for negative net inputs.
Returns:
The exponential linear unit (ELU) activation function: `x` if `x > 0` and
`alpha * (exp(x) - 1)` if `x < 0`.
Reference:
[Fast and Accurate Deep Network Learning by Exponential Linear Units
(ELUs) (Clevert et al, 2016)](https://arxiv.org/abs/1511.07289)
"""
return K.elu(x, alpha)
@keras_export('keras.activations.selu')
@dispatch.add_dispatch_support
def selu(x):
"""Scaled Exponential Linear Unit (SELU).
The Scaled Exponential Linear Unit (SELU) activation function is defined as:
- `if x > 0: return scale * x`
- `if x < 0: return scale * alpha * (exp(x) - 1)`
where `alpha` and `scale` are pre-defined constants
(`alpha=1.67326324` and `scale=1.05070098`).
Basically, the SELU activation function multiplies `scale` (> 1) with the
output of the `tf.keras.activations.elu` function to ensure a slope larger
than one for positive inputs.
The values of `alpha` and `scale` are
chosen so that the mean and variance of the inputs are preserved
between two consecutive layers as long as the weights are initialized
correctly (see `tf.keras.initializers.LecunNormal` initializer)
and the number of input units is "large enough"
(see reference paper for more information).
Example Usage:
>>> num_classes = 10 # 10-class problem
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(64, kernel_initializer='lecun_normal',
... activation='selu'))
>>> model.add(tf.keras.layers.Dense(32, kernel_initializer='lecun_normal',
... activation='selu'))
>>> model.add(tf.keras.layers.Dense(16, kernel_initializer='lecun_normal',
... activation='selu'))
>>> model.add(tf.keras.layers.Dense(num_classes, activation='softmax'))
Args:
x: A tensor or variable to compute the activation function for.
Returns:
The scaled exponential unit activation: `scale * elu(x, alpha)`.
Notes:
- To be used together with the
`tf.keras.initializers.LecunNormal` initializer.
- To be used together with the dropout variant
`tf.keras.layers.AlphaDropout` (not regular dropout).
References:
- [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)
"""
return nn.selu(x)
@keras_export('keras.activations.softplus')
@dispatch.add_dispatch_support
def softplus(x):
"""Softplus activation function, `softplus(x) = log(exp(x) + 1)`.
Example Usage:
>>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)
>>> b = tf.keras.activations.softplus(a)
>>> b.numpy()
array([2.0611537e-09, 3.1326166e-01, 6.9314718e-01, 1.3132616e+00,
2.0000000e+01], dtype=float32)
Args:
x: Input tensor.
Returns:
The softplus activation: `log(exp(x) + 1)`.
"""
return nn.softplus(x)
@keras_export('keras.activations.softsign')
@dispatch.add_dispatch_support
def softsign(x):
"""Softsign activation function, `softsign(x) = x / (abs(x) + 1)`.
Example Usage:
>>> a = tf.constant([-1.0, 0.0, 1.0], dtype = tf.float32)
>>> b = tf.keras.activations.softsign(a)
>>> b.numpy()
array([-0.5, 0. , 0.5], dtype=float32)
Args:
x: Input tensor.
Returns:
The softsign activation: `x / (abs(x) + 1)`.
"""
return nn.softsign(x)
@keras_export('keras.activations.swish')
@dispatch.add_dispatch_support
def swish(x):
"""Swish activation function, `swish(x) = x * sigmoid(x)`.
Swish activation function which returns `x*sigmoid(x)`.
It is a smooth, non-monotonic function that consistently matches
or outperforms ReLU on deep networks, it is unbounded above and
bounded below.
Example Usage:
>>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)
>>> b = tf.keras.activations.swish(a)
>>> b.numpy()
array([-4.1223075e-08, -2.6894143e-01, 0.0000000e+00, 7.3105860e-01,
2.0000000e+01], dtype=float32)
Args:
x: Input tensor.
Returns:
The swish activation applied to `x` (see reference paper for details).
Reference:
- [Ramachandran et al., 2017](https://arxiv.org/abs/1710.05941)
"""
return nn.swish(x)
@keras_export('keras.activations.relu')
@dispatch.add_dispatch_support
def relu(x, alpha=0., max_value=None, threshold=0):
"""Applies the rectified linear unit activation function.
With default values, this returns the standard ReLU activation:
`max(x, 0)`, the element-wise maximum of 0 and the input tensor.
Modifying default parameters allows you to use non-zero thresholds,
change the max value of the activation,
and to use a non-zero multiple of the input for values below the threshold.
For example:
>>> foo = tf.constant([-10, -5, 0.0, 5, 10], dtype = tf.float32)
>>> tf.keras.activations.relu(foo).numpy()
array([ 0., 0., 0., 5., 10.], dtype=float32)
>>> tf.keras.activations.relu(foo, alpha=0.5).numpy()
array([-5. , -2.5, 0. , 5. , 10. ], dtype=float32)
>>> tf.keras.activations.relu(foo, max_value=5).numpy()
array([0., 0., 0., 5., 5.], dtype=float32)
>>> tf.keras.activations.relu(foo, threshold=5).numpy()
array([-0., -0., 0., 0., 10.], dtype=float32)
Args:
x: Input `tensor` or `variable`.
alpha: A `float` that governs the slope for values lower than the
threshold.
max_value: A `float` that sets the saturation threshold (the largest value
the function will return).
threshold: A `float` giving the threshold value of the activation function
below which values will be damped or set to zero.
Returns:
A `Tensor` representing the input tensor,
transformed by the relu activation function.
Tensor will be of the same shape and dtype of input `x`.
"""
return K.relu(x, alpha=alpha, max_value=max_value, threshold=threshold)
@keras_export('keras.activations.gelu', v1=[])
@dispatch.add_dispatch_support
def gelu(x, approximate=False):
"""Applies the Gaussian error linear unit (GELU) activation function.
Gaussian error linear unit (GELU) computes
`x * P(X <= x)`, where `P(X) ~ N(0, 1)`.
The (GELU) nonlinearity weights inputs by their value, rather than gates
inputs by their sign as in ReLU.
For example:
>>> x = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype=tf.float32)
>>> y = tf.keras.activations.gelu(x)
>>> y.numpy()
array([-0.00404951, -0.15865529, 0. , 0.8413447 , 2.9959507 ],
dtype=float32)
>>> y = tf.keras.activations.gelu(x, approximate=True)
>>> y.numpy()
array([-0.00363752, -0.15880796, 0. , 0.841192 , 2.9963627 ],
dtype=float32)
Args:
x: Input tensor.
approximate: A `bool`, whether to enable approximation.
Returns:
The gaussian error linear activation:
`0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))`
if `approximate` is `True` or
`x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`,
where `P(X) ~ N(0, 1)`,
if `approximate` is `False`.
Reference:
- [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)
"""
return nn.gelu(x, approximate)
@keras_export('keras.activations.tanh')
@dispatch.add_dispatch_support
def tanh(x):
"""Hyperbolic tangent activation function.
For example:
>>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)
>>> b = tf.keras.activations.tanh(a)
>>> b.numpy()
array([-0.9950547, -0.7615942, 0., 0.7615942, 0.9950547], dtype=float32)
Args:
x: Input tensor.
Returns:
Tensor of same shape and dtype of input `x`, with tanh activation:
`tanh(x) = sinh(x)/cosh(x) = ((exp(x) - exp(-x))/(exp(x) + exp(-x)))`.
"""
return nn.tanh(x)
@keras_export('keras.activations.sigmoid')
@dispatch.add_dispatch_support
def sigmoid(x):
"""Sigmoid activation function, `sigmoid(x) = 1 / (1 + exp(-x))`.
Applies the sigmoid activation function. For small values (<-5),
`sigmoid` returns a value close to zero, and for large values (>5)
the result of the function gets close to 1.
Sigmoid is equivalent to a 2-element Softmax, where the second element is
assumed to be zero. The sigmoid function always returns a value between
0 and 1.
For example:
>>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)
>>> b = tf.keras.activations.sigmoid(a)
>>> b.numpy()
array([2.0611537e-09, 2.6894143e-01, 5.0000000e-01, 7.3105860e-01,
1.0000000e+00], dtype=float32)
Args:
x: Input tensor.
Returns:
Tensor with the sigmoid activation: `1 / (1 + exp(-x))`.
"""
output = nn.sigmoid(x)
# Cache the logits to use for crossentropy loss.
output._keras_logits = x # pylint: disable=protected-access
return output
@keras_export('keras.activations.exponential')
@dispatch.add_dispatch_support
def exponential(x):
"""Exponential activation function.
For example:
>>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)
>>> b = tf.keras.activations.exponential(a)
>>> b.numpy()
array([0.04978707, 0.36787945, 1., 2.7182817 , 20.085537], dtype=float32)
Args:
x: Input tensor.
Returns:
Tensor with exponential activation: `exp(x)`.
"""
return math_ops.exp(x)
@keras_export('keras.activations.hard_sigmoid')
@dispatch.add_dispatch_support
def hard_sigmoid(x):
"""Hard sigmoid activation function.
A faster approximation of the sigmoid activation.
For example:
>>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)
>>> b = tf.keras.activations.hard_sigmoid(a)
>>> b.numpy()
array([0. , 0.3, 0.5, 0.7, 1. ], dtype=float32)
Args:
x: Input tensor.
Returns:
The hard sigmoid activation, defined as:
- `if x < -2.5: return 0`
- `if x > 2.5: return 1`
- `if -2.5 <= x <= 2.5: return 0.2 * x + 0.5`
"""
return K.hard_sigmoid(x)
@keras_export('keras.activations.linear')
@dispatch.add_dispatch_support
def linear(x):
"""Linear activation function (pass-through).
For example:
>>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)
>>> b = tf.keras.activations.linear(a)
>>> b.numpy()
array([-3., -1., 0., 1., 3.], dtype=float32)
Args:
x: Input tensor.
Returns:
The input, unmodified.
"""
return x
@keras_export('keras.activations.serialize')
@dispatch.add_dispatch_support
def serialize(activation):
"""Returns the string identifier of an activation function.
Args:
activation : Function object.
Returns:
String denoting the name attribute of the input function
For example:
>>> tf.keras.activations.serialize(tf.keras.activations.tanh)
'tanh'
>>> tf.keras.activations.serialize(tf.keras.activations.sigmoid)
'sigmoid'
>>> tf.keras.activations.serialize('abcd')
Traceback (most recent call last):
...
ValueError: ('Cannot serialize', 'abcd')
Raises:
ValueError: The input function is not a valid one.
"""
if (hasattr(activation, '__name__') and
activation.__name__ in _TF_ACTIVATIONS_V2):
return _TF_ACTIVATIONS_V2[activation.__name__]
return serialize_keras_object(activation)
@keras_export('keras.activations.deserialize')
@dispatch.add_dispatch_support
def deserialize(name, custom_objects=None):
"""Returns activation function given a string identifier.
Args:
name: The name of the activation function.
custom_objects: Optional `{function_name: function_obj}`
dictionary listing user-provided activation functions.
Returns:
Corresponding activation function.
For example:
>>> tf.keras.activations.deserialize('linear')
<function linear at 0x1239596a8>
>>> tf.keras.activations.deserialize('sigmoid')
<function sigmoid at 0x123959510>
>>> tf.keras.activations.deserialize('abcd')
Traceback (most recent call last):
...
ValueError: Unknown activation function:abcd
Raises:
ValueError: `Unknown activation function` if the input string does not
denote any defined Tensorflow activation function.
"""
globs = globals()
# only replace missing activations
advanced_activations_globs = advanced_activations.get_globals()
for key, val in advanced_activations_globs.items():
if key not in globs:
globs[key] = val
return deserialize_keras_object(
name,
module_objects=globs,
custom_objects=custom_objects,
printable_module_name='activation function')
@keras_export('keras.activations.get')
@dispatch.add_dispatch_support
def get(identifier):
"""Returns function.
Args:
identifier: Function or string
Returns:
Function corresponding to the input string or input function.
For example:
>>> tf.keras.activations.get('softmax')
<function softmax at 0x1222a3d90>
>>> tf.keras.activations.get(tf.keras.activations.softmax)
<function softmax at 0x1222a3d90>
>>> tf.keras.activations.get(None)
<function linear at 0x1239596a8>
>>> tf.keras.activations.get(abs)
<built-in function abs>
>>> tf.keras.activations.get('abcd')
Traceback (most recent call last):
...
ValueError: Unknown activation function:abcd
Raises:
ValueError: Input is an unknown function or string, i.e., the input does
not denote any defined function.
"""
if identifier is None:
return linear
if isinstance(identifier, six.string_types):
identifier = str(identifier)
return deserialize(identifier)
elif isinstance(identifier, dict):
return deserialize(identifier)
elif callable(identifier):
return identifier
else:
raise TypeError(
'Could not interpret activation function identifier: {}'.format(
identifier))
|
annarev/tensorflow
|
tensorflow/python/keras/activations.py
|
Python
|
apache-2.0
| 18,023
|
[
"Gaussian"
] |
945aad1829be37d434de863b0b87444ed6ca3d706efd3b00b4df3ae438c7eefb
|
import json
from django.core.context_processors import csrf
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, render_to_response
from django.template import Template, Context
from django.template.loader import get_template
from pathgenerator.models import Place, Path, UserForm
# Create your views here.
def map_page(request):
t = get_template('map.html')
html = t.render(Context())
return HttpResponse(html)
def mapsearch_page(request):
# Need to add csrf token to every page with a form or ajax
c = {"tag_list":["art_gallery", "bicycle_store", "cafe", "book_store",
"aquarium", "park", "pet_store", "campground", "zoo",
"cemetery", "funeral_home", "liquor_store", "hospital",
"beauty_salon", "clothing_store", "florist", "hair_care",
"jewelry_store", "shoe_store", "shopping_mall", "spa", "department_store",
"accounting", "atm", "bank", "courthouse", "finance",
"insurance_agency", "lawyer", "parking", "post_office", "storage",
"bar", "casino", "night_club", "amusement_park"],
"login_form": UserForm()}
c.update(csrf(request))
return render_to_response('map_search.html', c)
def place(request):
if request.method == 'POST':
# this is a list of strings with one element
place_string = request.POST.get('results')
# print json_string
process_place_json(place_string)
return HttpResponse('success')
else:
return HttpResponse('Not implemented')
def process_place_json(place_string):
'''
Processes json into places.
string_list - a list of strings which are json objects
'''
# this is now a list of dicts
place_json = json.loads(place_string)
for place in place_json:
# Create a new Place only if not currently exists
# This is a workaround because django doesn't like the string 'id'
_place_id = str(place['id'])
if Place.objects.filter(google_id=_place_id).exists():
print 'Object already in database'
else:
# Not sure if there will be decimal problems
lat = place['geometry']['location']['d']
lng = place['geometry']['location']['e']
Place.objects.create(
latitude=lat,
longitude=lng,
address='',
name=place['name'],
source='google',
google_reference=place['reference'],
google_id=_place_id,
raw_data=json.dumps(place, separators=(',',':')),
)
# Json format contains
# start, end - which are google latlng
# waypointIDs - strings that should correspond to an existing Place
#
# start, end, name
# start: {lat:
# lng:
# }
# waypoints: {
# id: asdf,
# lat:
# lng:
# }
# use decorator?
def path(request):
# User must be logged in to get and post paths
if not request.user.is_authenticated():
return HttpResponse("User not logged in")
if request.method == "GET":
# get user's path
path_name = request.GET['path-name']
# problem:raises error if the path doesn't exist?
try:
path = Path.objects.get(user=request.user, name=path_name)
except:
# Status code for "no content"
return HttpResponse(content="Path with specified user and name doesn't exist")
# return a Json object that can be reconstituted into a path
# Have to load waypoints otherwise it doesn't get deserialized correctly
path_dict = {
"name": path.name,
"start": {
"lat": str(path.start_lat),
"lng": str(path.start_lng)
},
"end": {
"lat": str(path.end_lat),
"lng": str(path.end_lng),
},
"waypoints": json.loads(path.json)
}
json_string = json.dumps(path_dict)
return HttpResponse(json.dumps(path_dict), content_type="application/json")
elif request.method == "POST":
# waypoints is a json
path_name = request.POST['name']
# Prevent paths with duplicate names
# If you want to edit a path, should use a PUT request
if Path.objects.filter(name=path_name, user=request.user).exists():
return HttpResponse('Path already exists')
waypoints = request.POST['waypoints']
start = json.loads(request.POST['start'])
end = json.loads(request.POST['end'])
# get user
path = Path.objects.create(
name=path_name,
json=waypoints,
user=request.user,
start_lat=start['d'],
start_lng=start['e'],
end_lat=end['d'],
end_lng=end['e']
)
# get a Place for each waypoint
waypoints = json.loads(waypoints)
for waypoint in waypoints:
# We assume that a place exists. Problem: duplicate places?
try:
place = Place.objects.get(google_id=waypoint['id'])
except:
return HttpResponse('more than one place with same id')
path.places.add(place)
path.save() # is this necessary?
return HttpResponse('success')
else:
return HttpResponse('not implemented yet')
def saved_path_page(request):
# TODO implement display of user paths
# Should not be able to view this page without being logged in
if request.user.is_authenticated():
# Get all paths that belong to user
paths = Path.objects.filter(user=request.user)
c = {"paths": paths}
c.update(csrf(request))
return render_to_response('saved_path_page.html', c)
else:
# if user not logged in, can't see saved paths
return HttpResponseRedirect('/')
'''
TODO: actually put this in database
Implement getting paths from ID.
Query which paths a user has.
'''
|
Earthstar/double-d
|
pathgenerator/views.py
|
Python
|
mit
| 5,967
|
[
"CASINO"
] |
3b824a521a76c61ca9d75d2b3b270161e6912bd76bc2a44bc2bf0b5ed2a9b671
|
from GenericRequest import GenericRequest
from kol.manager import PatternManager
class CharpaneRequest(GenericRequest):
"Requests the user's character pane."
def __init__(self, session):
super(CharpaneRequest, self).__init__(session)
self.url = session.serverURL + 'charpane.php'
def parseResponse(self):
accountPwdPattern = PatternManager.getOrCompilePattern('accountPwd')
match = accountPwdPattern.search(self.responseText)
self.responseData["pwd"] = match.group(1)
accountNamePattern = PatternManager.getOrCompilePattern('accountName')
match = accountNamePattern.search(self.responseText)
self.responseData["userName"] = match.group(1)
accountIdPattern = PatternManager.getOrCompilePattern('accountId')
match = accountIdPattern.search(self.responseText)
self.responseData["userId"] = int(match.group(1))
characterLevelPattern = PatternManager.getOrCompilePattern('characterLevel')
match = characterLevelPattern.search(self.responseText)
if match:
self.responseData["level"] = int(match.group(1))
title = str(match.group(2))
self.responseData["levelTitle"] = title
if title == "Astral Spirit":
self.responseData["class"] = "Astral Spirit"
elif title in ["Lemming Trampler", "Tern Slapper", "Puffin Intimidator", "Ermine Thumper", "Penguin Frightener", "Malamute Basher", "Narwhal Pummeler", "Otter Crusher", "Caribou Smacker", "Moose Harasser", "Reindeer Threatener", "Ox Wrestler", "Walrus Bludgeoner", "Whale Boxer", "Seal Clubber"]:
self.responseData["class"] = "Seal Clubber"
elif title in ["Toad Coach", "Skink Trainer", "Frog Director", "Gecko Supervisor", "Newt Herder", "Frog Boss", "Iguana Driver", "Salamander Subduer", "Bullfrog Overseer", "Rattlesnake Chief", "Crocodile Lord", "Cobra Commander", "Alligator Subjugator", "Asp Master", "Turtle Tamer"]:
self.responseData["class"] = "Turtle Tamer"
elif title in ["Dough Acolyte", "Yeast Scholar", "Noodle Neophyte", "Starch Savant", "Carbohydrate Cognoscenti", "Spaghetti Sage", "Macaroni Magician", "Vermicelli Enchanter", "Linguini Thaumaturge", "Ravioli Sorcerer", "Manicotti Magus", "Spaghetti Spellbinder", "Cannelloni Conjurer", "Angel-Hair Archmage", "Pastamancer"]:
self.responseData["class"] = "Pastamancer"
elif title in ["Allspice Acolyte", "Cilantro Seer", "Parsley Enchanter", "Sage Sage", "Rosemary Diviner", "Thyme Wizard", "Tarragon Thaumaturge", "Oreganoccultist", "Basillusionist", "Coriander Conjurer", "Bay Leaf Brujo", "Sesame Soothsayer", "Marinara Mage", "Alfredo Archmage", "Sauceror"]:
self.responseData["class"] = "Sauceror"
elif title in ["Funk Footpad", "Rhythm Rogue", "Chill Crook", "Jiggy Grifter", "Beat Snatcher", "Sample Swindler", "Move Buster", "Jam Horker", "Groove Filcher", "Vibe Robber", "Boogie Brigand", "Flow Purloiner", "Jive Pillager", "Rhymer And Stealer", "Disco Bandit"]:
self.responseData["class"] = "Disco Bandit"
elif title in ["Polka Criminal", "Mariachi Larcenist", "Zydeco Rogue", "Chord Horker", "Chromatic Crook", "Squeezebox Scoundrel", "Concertina Con Artist", "Button Box Burglar", "Hurdy-Gurdy Hooligan", "Sub-Sub-Apprentice Accordion Thief", "Sub-Apprentice Accordion Thief", "Pseudo-Apprentice Accordion Thief", "Hemi-Apprentice Accordion Thief", "Apprentice Accordion Thief", "Accordion Thief"]:
self.responseData["class"] = "Accordion Thief"
characterHPPattern = PatternManager.getOrCompilePattern('characterHP')
match = characterHPPattern.search(self.responseText)
if match:
self.responseData["currentHP"] = int(match.group(1))
self.responseData["maxHP"] = int(match.group(2))
characterMPPattern = PatternManager.getOrCompilePattern('characterMP')
match = characterMPPattern.search(self.responseText)
if match:
self.responseData["currentMP"] = int(match.group(1))
self.responseData["maxMP"] = int(match.group(2))
characterMeatPattern = PatternManager.getOrCompilePattern('characterMeat')
match = characterMeatPattern.search(self.responseText)
if match:
self.responseData["meat"] = int(match.group(1).replace(',', ''))
characterAdventuresPattern = PatternManager.getOrCompilePattern('characterAdventures')
match = characterAdventuresPattern.search(self.responseText)
if match:
self.responseData["adventures"] = int(match.group(1))
characterDrunkPattern = PatternManager.getOrCompilePattern('characterDrunk')
match = characterDrunkPattern.search(self.responseText)
if match:
self.responseData["drunkenness"] = int(match.group(1))
currentFamiliarPattern = PatternManager.getOrCompilePattern('currentFamiliar')
match = currentFamiliarPattern.search(self.responseText)
if match:
self.responseData["familiar"] = {'name':str(match.group(1)), 'type':str(match.group(3)), 'weight':int(match.group(2))}
effects = []
characterEffectPattern = PatternManager.getOrCompilePattern('characterEffect')
for match in characterEffectPattern.finditer(self.responseText):
effect = {}
effect["name"] = str(match.group(1))
effect["turns"] = int(match.group(2))
effects.append(effect)
if len(effects) > 0:
self.responseData["effects"] = effects
characterMusclePattern = PatternManager.getOrCompilePattern('characterMuscle')
match = characterMusclePattern.search(self.responseText)
if match:
if match.group(1) and len(str(match.group(1))) > 0:
self.responseData["buffedMuscle"] = int(match.group(1))
self.responseData["baseMuscle"] = int(match.group(2))
characterMoxiePattern = PatternManager.getOrCompilePattern('characterMoxie')
match = characterMoxiePattern.search(self.responseText)
if match:
if match.group(1) and len(str(match.group(1))) > 0:
self.responseData["buffedMoxie"] = int(match.group(1))
self.responseData["baseMoxie"] = int(match.group(2))
characterMysticalityPattern = PatternManager.getOrCompilePattern('characterMysticality')
match = characterMysticalityPattern.search(self.responseText)
if match:
if match.group(1) and len(str(match.group(1))) > 0:
self.responseData["buffedMysticality"] = int(match.group(1))
self.responseData["baseMysticality"] = int(match.group(2))
characterRoninPattern = PatternManager.getOrCompilePattern('characterRonin')
match = characterRoninPattern.search(self.responseText)
if match:
self.responseData["roninLeft"] = int(match.group(1))
characterMindControlPattern = PatternManager.getOrCompilePattern('characterMindControl')
match = characterMindControlPattern.search(self.responseText)
if match:
self.responseData["mindControl"] = int(match.group(1))
|
ijzer/cwbot-ndy
|
kol/request/CharpaneRequest.py
|
Python
|
bsd-3-clause
| 7,273
|
[
"MOOSE"
] |
3d43d473bc6c1e8528acafa9768756cf8ec4941270b32beb3d46101fb88115b0
|
from tkinter import *
from tkinter.filedialog import *
from tkinter.messagebox import *
##from tkinter.filedialog import askopenfilenames
##from tkinter.filedialog import asksaveasfilename
import GEToperant
GETprofile = None
MPC_filenames = None
#Define functions for the menus
def openprofile():
global GETprofile
GETprofile = askopenfilename(title = 'Select data profile', filetypes = [('Excel GEToperant Profile', '*.xlsx'), ('Excel 97-2003 GEToperant Profile', '*.xls'), ('MPC2XL Row Profile', '*.MRP')])
def opendata():
global MPC_filenames
MPC_filenames = askopenfilenames(title = 'Select files to import')
def saveoutput():
outputfile = asksaveasfilename(title = 'Save output file as', defaultextension='.xlsx', filetypes=(('Excel', '*.xlsx'),('All Files', '*.*')))
if GETprofile == None or MPC_filenames == None or len(GETprofile) < 1 or len(MPC_filenames) < 1 or len(outputfile) < 1:
showerror('Error!', 'Please select a data profile, at least one Med-PC data file and a define a save file.')
else:
GEToperant.GEToperant(GETprofile, MPC_filenames, outputfile,
exportfilename = Header_Filename.get(),
exportstartdate = Header_StartDate.get(),
exportenddate = Header_EndDate.get(),
exportsubject = Header_Subject.get(),
exportexperiment = Header_Experiment.get(),
exportgroup = Header_Group.get(),
exportbox = Header_Box.get(),
exportstarttime = Header_StartTime.get(),
exportendtime = Header_EndTime.get(),
exportmsn = Header_MSN.get(),
mode = 'Main')
def saveoutputsheet():
outputfile = asksaveasfilename(title = 'Save output file as', defaultextension='.xlsx', filetypes=(('Excel', '*.xlsx'),('All Files', '*.*')))
if GETprofile == None or MPC_filenames == None or len(GETprofile) < 1 or len(MPC_filenames) < 1 or len(outputfile) < 1:
showerror('Error!', 'Please select a data profile, at least one Med-PC data file and a define a save file.')
else:
GEToperant.GEToperant(GETprofile, MPC_filenames, outputfile,
exportfilename = Header_Filename.get(),
exportstartdate = Header_StartDate.get(),
exportenddate = Header_EndDate.get(),
exportsubject = Header_Subject.get(),
exportexperiment = Header_Experiment.get(),
exportgroup = Header_Group.get(),
exportbox = Header_Box.get(),
exportstarttime = Header_StartTime.get(),
exportendtime = Header_EndTime.get(),
exportmsn = Header_MSN.get(),
mode = 'Sheets')
def saveoutputbooks():
outputfile = askdirectory(title = 'Select directory to save exported files to')
if GETprofile == None or MPC_filenames == None or len(GETprofile) < 1 or len(MPC_filenames) < 1 or len(outputfile) < 1:
showerror('Error!', 'Please select a data profile, at least one Med-PC data file and a define a save file.')
else:
GEToperant.GEToperant(GETprofile, MPC_filenames, outputfile,
exportfilename = Header_Filename.get(),
exportstartdate = Header_StartDate.get(),
exportenddate = Header_EndDate.get(),
exportsubject = Header_Subject.get(),
exportexperiment = Header_Experiment.get(),
exportgroup = Header_Group.get(),
exportbox = Header_Box.get(),
exportstarttime = Header_StartTime.get(),
exportendtime = Header_EndTime.get(),
exportmsn = Header_MSN.get(),
mode = 'Books')
#Define functions for the buttons
def RunExport():
exmode = Export_Mode.get()
if exmode == 1:
GETexpress()
elif exmode == 2:
GETsheets()
elif exmode == 3:
GETbooks()
def convertprofile():
GETprofile = askopenfilename(title = 'Select data profile', filetypes = [('MPC2XL Row Profile', '*.MRP')])
if len(GETprofile) < 1:
showerror('Error!', 'Please select a MPC2XL Row Profile.')
return None
profileexport = asksaveasfilename(title = 'Save converted profile as', defaultextension='.xlsx', filetypes=[('Excel', '*.xlsx')])
if len(profileexport) < 1:
showerror('Error!', 'Please choose a save file.')
return None
else:
GEToperant.convertMRP(GETprofile = GETprofile, profileexport = profileexport)
def GETexpress():
GETprofile = askopenfilename(title = 'Select data profile', filetypes = [('Excel GEToperant Profile', '*.xlsx'), ('Excel 97-2003 GEToperant Profile', '*.xls'), ('MPC2XL Row Profile', '*.MRP')])
if len(GETprofile) < 1:
showerror('Error!', 'Please select a data profile')
return None
MPC_filenames = askopenfilenames(title = 'Select files to import')
if len(MPC_filenames) < 1:
showerror('Error!', 'Please select at least one Med-PC data file.')
return None
outputfile = asksaveasfilename(title = 'Save output file as', defaultextension='.xlsx', filetypes=(('Excel', '*.xlsx'),('All Files', '*.*')))
if len(outputfile) < 1:
showerror('Error!', 'Please select an output file.')
return None
elif '.xls' not in outputfile[-4:] and '.xlsx' not in outputfile[-5:]:
outputfile = outputfile + '.xlsx'
GEToperant.GEToperant(GETprofile, MPC_filenames, outputfile,
exportfilename = Header_Filename.get(),
exportstartdate = Header_StartDate.get(),
exportenddate = Header_EndDate.get(),
exportsubject = Header_Subject.get(),
exportexperiment = Header_Experiment.get(),
exportgroup = Header_Group.get(),
exportbox = Header_Box.get(),
exportstarttime = Header_StartTime.get(),
exportendtime = Header_EndTime.get(),
exportmsn = Header_MSN.get(),
mode = 'Main')
def GETsheets():
GETprofile = askopenfilename(title = 'Select data profile', filetypes = [('Excel GEToperant Profile', '*.xlsx'), ('Excel 97-2003 GEToperant Profile', '*.xls'), ('MPC2XL Row Profile', '*.MRP')])
if len(GETprofile) < 1:
showerror('Error!', 'Please select a data profile')
return None
MPC_filenames = askopenfilenames(title = 'Select files to import')
if len(MPC_filenames) < 1:
showerror('Error!', 'Please select at least one Med-PC data file.')
return None
outputfile = asksaveasfilename(title = 'Save output file as', defaultextension='.xlsx', filetypes=(('Excel', '*.xlsx'),('All Files', '*.*')))
if len(outputfile) < 1:
showerror('Error!', 'Please select an output file.')
return None
elif '.xls' not in outputfile[-4:] and '.xlsx' not in outputfile[-5:]:
outputfile = outputfile + '.xlsx'
GEToperant.GEToperant(GETprofile, MPC_filenames, outputfile,
exportfilename = Header_Filename.get(),
exportstartdate = Header_StartDate.get(),
exportenddate = Header_EndDate.get(),
exportsubject = Header_Subject.get(),
exportexperiment = Header_Experiment.get(),
exportgroup = Header_Group.get(),
exportbox = Header_Box.get(),
exportstarttime = Header_StartTime.get(),
exportendtime = Header_EndTime.get(),
exportmsn = Header_MSN.get(),
mode = 'Sheets')
def GETbooks():
GETprofile = askopenfilename(title = 'Select data profile', filetypes = [('Excel GEToperant Profile', '*.xlsx'), ('Excel 97-2003 GEToperant Profile', '*.xls'), ('MPC2XL Row Profile', '*.MRP')])
if len(GETprofile) < 1:
showerror('Error!', 'Please select a data profile')
return None
MPC_filenames = askopenfilenames(title = 'Select files to import')
if len(MPC_filenames) < 1:
showerror('Error!', 'Please select at least one Med-PC data file.')
return None
outputfile = askdirectory(title = 'Select directory to save exported files to')
if len(outputfile) < 1:
showerror('Error!', 'Please select an output directory.')
return None
else:
GEToperant.GEToperant(GETprofile, MPC_filenames, outputfile,
exportfilename = Header_Filename.get(),
exportstartdate = Header_StartDate.get(),
exportenddate = Header_EndDate.get(),
exportsubject = Header_Subject.get(),
exportexperiment = Header_Experiment.get(),
exportgroup = Header_Group.get(),
exportbox = Header_Box.get(),
exportstarttime = Header_StartTime.get(),
exportendtime = Header_EndTime.get(),
exportmsn = Header_MSN.get(),
mode = 'Books')
def helpme():
helpwindow = Toplevel()
helpwindow.title('How to use GEToperant')
helptext = Text(helpwindow, height = 30, width = 80)
helptext.pack(side= 'top')
scroll = Scrollbar(helpwindow, command = helptext.yview)
helptext.configure(yscrollcommand = scroll.set)
helptext.tag_configure('regular', font=('Verdana', 11))
howtoGET = """
How to use GEToperant
Using GEToperant involves four steps.
1. Create a data profile
2. Use the checkboxes to select which headers you wish to export
3. Click on the button that corresponds to the output you want
4. Follow the prompts to select your data profile and files
Your data profile tells GEToperant what data you want extracted
and what to label each element as. You can extract:
- a single element
- a section of an array
- a whole array
You can also use MPC2XL Row Profiles (MRPs) to extract your data
or convert an MRP to an GEToperant profile.
Your data profile needs to have up to 7 pieces of information:
1. A Label
2. A Label Start Value
3. A Label Increment
4. An Array or Variable
5. The Start Element
6. The Increment Element
7. The Stop Element
In order to extract a single element you will need to define:
- The Label
- The Array or Variable
- The Start Element (i.e. the element you want extracted)
- The Increment Element (which must equal 0)
For example, the element A(0) contains the total lever responses.
You would define the label as 'Lever Presses', the Array as 'A',
the Start Element as 0 and the Increment Element as 0. This tells
GEToperant to get the element A(0) from all sessions in the data
files you load and to label it 'Lever Presses'.
In order to extract a section of an array you need:
- The label
- The Array or Variable
- The Start Element
- The Increment Element
- The Stop Element
You can also use:
- The Label Start Value
- The Label Increment
Your Stop Element must be greater than your Start Element and
your Increment Element must be greater than 0. This will tell
GEToperant to start at a particular part of the array and keep
going up by the increments you define until it reaches the Stop
Element. So if you wanted every second value of the B array from
beginning to element 30, you would set the Start Element to 0,
the Incremenet Element to 2 and the Stop Element to 30.
The Label Increment and Label Start Value are optional and allow
you to define a value to put at the end of your label. This is
useful for a series like timebins. For example, you could have
a label of 'Responses Min' with a Label Start Value of 1 and a
Label Increment of 1. You would then get 'Responses Min 1',
'Responses Min 2', 'Responses Min 3' and so on.
In order to extract an array until it ends you will need the same
details as required to extract a section of an array except you
should leave the Stop Element blank or write something in it, such
as 'End'. However, any text string will be read as the end of the
array.
Session comments are not extracted automatically. In order to
extract comments provide:
- The Label
- An Array or Variable with the word 'comment' in it (this is not
case sensitive)
- A Start Element and Increment Element of 0
Once you have your data profile, you can select your headers.
All headers are selected by default.
You can export your data as:
1. A single worksheet
2. Separate sheets
3. Separate books
Click on the button corresponding to the type of output you want and
GEToperant will display windows to select the appropriate files.
For a single worksheet, GEToperant will save all data to one sheet
on one Excel file.
For separate sheets, GEToperant will save each data file in a separate
worksheet, but in one Excel file.
For separate books, GEToperant will save each data file in a separate
Excel file, named after the file that it corresponds to.
"""
helptext.insert(END, howtoGET, 'regular')
helptext.pack(side=LEFT)
scroll.pack(side=RIGHT, fill = Y)
def aboutGET():
aboutme = Toplevel()
aboutme.title('About GEToperant')
abouttext = Text(aboutme, height = 26, width = 75)
abouttext.pack(side= 'top')
abouttext.tag_configure('regular', font=('Verdana', 11))
about = """
GEToperant is a general extraction tool for Med-PC®.
It was designed to be compatible with Med-PC® IV but given how
little Med-PC® changes, it should be compatible with Med-PC® V.
It was originally written by Shaun Khoo using Python 3.4.4 with the xlrd
and xlsxwriter packages. It has since been updated to run with Python
3.9.1 and now also uses the openpyxl package. Executable files were
produced using pyinstaller (Windows) and py2app (Mac).
It is free open source software available under an MIT license.
You pay nothing and you can do with it as you please.
If you have enjoyed using GEToperant, please tell your friends or
reference it in one of your publications:
Khoo, S. Y. (2021). GEToperant: A General Extraction Tool for Med-PC
Data. Figshare. doi: 10.6084/m9.figshare.13697851
For the latest version and source code visit:
https://github.com/SKhoo/GEToperant/
For up to date contact information visit:
https://orcid.org/0000-0002-0972-3788
"""
abouttext.insert(END, about, 'regular')
abouttext.pack(side=LEFT)
def licenseMIT():
licenseme = Toplevel()
licenseme.title('GEToperant MIT License')
MIT = Text(licenseme, height = 31, width = 60)
MIT.pack(side= 'top')
MIT.tag_configure('regular', font=('Arial', 11))
MITtext = """
MIT License
Copyright (c) 2018-2021 Shaun Khoo
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY
OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
"""
MIT.insert(END, MITtext, 'regular')
MIT.pack(side=LEFT)
def quit():
root.destroy()
root = Tk()
##Set window size
root.geometry('876x500')
root.title('GEToperant v1.1 >(\' . \')<')
Montre = PhotoImage(file='icon.pnm')
root.wm_iconphoto('True', Montre)
#Display header logo
Kip = PhotoImage(file='logo.pnm')
displaylogo = Label(root, image = Kip).grid(row = 0)
##Checkbox options
Label(root, text = 'Select headers to export:', font=('Verdana', 11)).grid(row = 1)
Cboxes1 = Frame(height = 80, width = 876)
Cboxes1.grid(row = 2)
Header_Filename = IntVar(value = 1)
Checkbutton(Cboxes1, text= 'Filename', variable = Header_Filename, font=('Verdana', 9)).grid(row = 0, column = 0, sticky = W, padx = 15)
Header_StartDate = IntVar(value = 1)
Checkbutton(Cboxes1, text= 'Start Date', variable = Header_StartDate, font=('Verdana', 9)).grid(row = 0, column = 1, sticky = W, padx = 15)
Header_EndDate = IntVar(value = 1)
Checkbutton(Cboxes1, text = 'End Date', variable = Header_EndDate, font=('Verdana', 9)).grid(row = 0, column = 2, sticky = W, padx = 15)
Header_Subject = IntVar(value = 1)
Checkbutton(Cboxes1, text = 'Subject', variable = Header_Subject, font=('Verdana', 9)).grid(row = 0, column = 3, sticky = W, padx = 15)
Header_Experiment = IntVar(value = 1)
Checkbutton(Cboxes1, text = 'Experiment', variable = Header_Experiment, font=('Verdana', 9)).grid(row = 0, column = 4, sticky = W, padx = 15)
Header_Group = IntVar(value = 1)
Checkbutton(Cboxes1, text = 'Group', variable = Header_Group, font=('Verdana', 9)).grid(row = 1, column = 0, sticky = W, padx = 15)
Header_Box = IntVar(value = 1)
Checkbutton(Cboxes1, text = 'Box', variable = Header_Box, font=('Verdana', 9)).grid(row = 1, column = 1, sticky = W, padx = 15)
Header_StartTime = IntVar(value = 1)
Checkbutton(Cboxes1, text = 'Start Time', variable = Header_StartTime, font=('Verdana', 9)).grid(row = 1, column = 2, sticky = W, padx = 15)
Header_EndTime = IntVar(value = 1)
Checkbutton(Cboxes1, text = 'End Time', variable = Header_EndTime, font=('Verdana', 9)).grid(row = 1, column = 3, sticky = W, padx = 15)
Header_MSN = IntVar(value = 1)
Checkbutton(Cboxes1, text = 'MSN (Program Name)', variable = Header_MSN, font=('Verdana', 9)).grid(row = 1, column = 4, sticky = W, padx = 15)
## Menus
menu = Menu(root)
root.config(menu = menu)
filemenu = Menu(menu)
menu.add_cascade(label = 'File', menu = filemenu)
filemenu.add_command(label = 'Select Profile', command = openprofile)
filemenu.add_command(label = 'Open Data File(s)', command = opendata)
filemenu.add_command(label = 'Save Output: One Sheet, One Workbook', command = saveoutput)
filemenu.add_command(label = 'Save Output: Data Files on Separate Sheets', command = saveoutputsheet)
filemenu.add_command(label = 'Save Output: Data Files in Separate Workbooks', command = saveoutputbooks)
filemenu.add_separator()
filemenu.add_command(label = 'Convert MPC2XL Row Profile', command = convertprofile)
filemenu.add_separator()
filemenu.add_command(label = 'Close', command = root.quit)
helpmenu = Menu(menu)
menu.add_cascade(label = 'Help', menu = helpmenu)
helpmenu.add_command(label = 'How to use GEToperant', command = helpme)
helpmenu.add_command(label = 'About', command = aboutGET)
helpmenu.add_command(label = 'License', command = licenseMIT)
class CreateToolTip(object):
def __init__(self, widget, text='widget info'):
self.waittime = 500
self.wraplength = 400
self.widget = widget
self.text = text
self.widget.bind("<Enter>", self.enter)
self.widget.bind("<Leave>", self.leave)
self.widget.bind("<ButtonPress>", self.leave)
self.id = None
self.tw = None
def enter(self, event=None):
self.schedule()
def leave(self, event=None):
self.unschedule()
self.hidetip()
def schedule(self):
self.unschedule()
self.id = self.widget.after(self.waittime, self.showtip)
def unschedule(self):
id = self.id
self.id = None
if id:
self.widget.after_cancel(id)
def showtip(self, event=None):
x = y = 0
x, y, cx, cy = self.widget.bbox("insert")
x += self.widget.winfo_rootx() + 50
y += self.widget.winfo_rooty() - 20
# creates a toplevel window
self.tw = Toplevel(self.widget)
# Leaves only the label and removes the app window
self.tw.wm_overrideredirect(True)
self.tw.wm_geometry("+%d+%d" % (x, y))
label = Label(self.tw, text=self.text, justify='left',
background="#ffffff", relief='solid', borderwidth=1,
wraplength = self.wraplength)
label.pack(ipadx=1)
def hidetip(self):
tw = self.tw
self.tw= None
if tw:
tw.destroy()
class App:
def __init__(self, root):
frame = Frame(height = 50, width = 876)
frame.grid(row = 3, pady = 15)
Label(frame, text = 'Export data file(s) to:', font=('Verdana', 10)).grid(row = 0, column = 1)
self.express = Button(frame, text = '1. Single Worksheet', command = GETexpress, font=('Verdana', 9))
self.express.grid(row = 1, column = 0, sticky = W, padx = 30)
self.express.tip = CreateToolTip(self.express, "Export all data to one Excel worksheet in one Excel file")
self.sheets = Button(frame, text = '2. Separate Sheets', command = GETsheets, font=('Verdana', 9))
self.sheets.grid(row = 1, column = 1, sticky = W, padx = 30)
self.sheets.tip = CreateToolTip(self.sheets, "Export each data file to a separate worksheet within the same Excel file")
self.books = Button(frame, text = '3. Separate Books', command = GETbooks, font=('Verdana', 9))
self.books.grid(row = 1, column = 2, sticky = W, padx = 30)
self.books.tip = CreateToolTip(self.books, "Export each data file to its own Excel file")
self.convert = Button(frame, text = 'Convert MRP', command = convertprofile, font=('Verdana', 9))
self.convert.grid(row = 2, column = 0, sticky = W, padx = 30)
self.convert.tip = CreateToolTip(self.convert, "Convert an MPC2XL Row Profile into a GEToperant profile")
self.exit = Button(frame, text = 'Quit', command = quit, font=('Verdana', 9))
self.exit.grid(row = 2, column = 2, sticky = E, padx = 30, pady = 10)
Label(root, text="doi: 10.6084/m9.figshare.13697851").grid(row = 4, ipadx = 12, sticky = E)
app = App(root)
root.mainloop()
|
SKhoo/GEToperant
|
GEToperantGUI.py
|
Python
|
mit
| 24,091
|
[
"VisIt"
] |
3e623385072b064673c964480aca55630ab75501b79073de3b978f67eede70c6
|
'''
Define the public 'draw' function to be used to draw
morphology using plotly
'''
from __future__ import absolute_import # prevents name clash with local plotly module
from itertools import chain
import numpy as np
try:
import plotly.graph_objs as go
from plotly.offline import plot, iplot, init_notebook_mode
except ImportError:
raise ImportError(
'neurom[plotly] is not installed. Please install it by doing: pip install neurom[plotly]')
from neurom import COLS, iter_segments, iter_neurites
from neurom.view.view import TREE_COLOR
def draw(obj, plane='3d', inline=False, **kwargs):
'''Draw the morphology using in the given plane
plane (str): a string representing the 2D plane (example: 'xy')
or '3d', '3D' for a 3D view
inline (bool): must be set to True for interactive ipython notebook plotting
'''
if plane.lower() == '3d':
return _plot_neuron3d(obj, inline, **kwargs)
return _plot_neuron(obj, plane, inline, **kwargs)
def _plot_neuron(neuron, plane, inline, **kwargs):
return _plotly(neuron, plane=plane, title='neuron-2D', inline=inline, **kwargs)
def _plot_neuron3d(neuron, inline, **kwargs):
'''
Generates a figure of the neuron,
that contains a soma and a list of trees.
'''
return _plotly(neuron, plane='3d', title='neuron-3D', inline=inline, **kwargs)
def _make_trace(neuron, plane):
'''Create the trace to be plotted'''
for neurite in iter_neurites(neuron):
segments = list(iter_segments(neurite))
segs = [(s[0][COLS.XYZ], s[1][COLS.XYZ]) for s in segments]
coords = dict(x=list(chain.from_iterable((p1[0], p2[0], None) for p1, p2 in segs)),
y=list(chain.from_iterable((p1[1], p2[1], None) for p1, p2 in segs)),
z=list(chain.from_iterable((p1[2], p2[2], None) for p1, p2 in segs)))
color = TREE_COLOR.get(neurite.root_node.type, 'black')
if plane.lower() == '3d':
plot_fun = go.Scatter3d
else:
plot_fun = go.Scatter
coords = dict(x=coords[plane[0]], y=coords[plane[1]])
yield plot_fun(
line=dict(color=color, width=2),
mode='lines',
**coords
)
def _plotly(neuron, plane, title, inline, **kwargs):
data = list(_make_trace(neuron, plane))
axis = dict(
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)'
)
if plane != '3d':
soma_2d = [
# filled circle
{
'type': 'circle',
'xref': 'x',
'yref': 'y',
'fillcolor': 'rgba(50, 171, 96, 0.7)',
'x0': neuron.soma.center[0] - neuron.soma.radius,
'y0': neuron.soma.center[1] - neuron.soma.radius,
'x1': neuron.soma.center[0] + neuron.soma.radius,
'y1': neuron.soma.center[1] + neuron.soma.radius,
'line': {
'color': 'rgba(50, 171, 96, 1)',
},
},
]
else:
soma_2d = []
theta = np.linspace(0, 2 * np.pi, 100)
phi = np.linspace(0, np.pi, 100)
z = np.outer(np.ones(100), np.cos(phi)) + neuron.soma.center[2]
r = neuron.soma.radius
data.append(
go.Surface(
x=(np.outer(np.cos(theta), np.sin(phi)) + neuron.soma.center[0]) * r,
y=(np.outer(np.sin(theta), np.sin(phi)) + neuron.soma.center[1]) * r,
z=z * r,
cauto=False,
surfacecolor=['black'] * len(z),
showscale=False,
)
)
layout = dict(
autosize=True,
title=title,
scene=dict( # This is used for 3D plots
xaxis=axis, yaxis=axis, zaxis=axis,
camera=dict(up=dict(x=0, y=0, z=1), eye=dict(x=-1.7428, y=1.0707, z=0.7100,)),
aspectmode='data'
),
yaxis=dict(scaleanchor="x"), # This is used for 2D plots
shapes=soma_2d,
)
fig = dict(data=data, layout=layout)
plot_fun = iplot if inline else plot
if inline:
init_notebook_mode(connected=True) # pragma: no cover
plot_fun(fig, filename=title + '.html', **kwargs)
return fig
|
lidakanari/NeuroM
|
neurom/view/plotly.py
|
Python
|
bsd-3-clause
| 4,380
|
[
"NEURON"
] |
70f81d95ddce2128d896a3d76b78c98ceb61d6f743ed26b204d43a70f590227e
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.