text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and University of
# of Connecticut School of Medicine.
# All rights reserved.
# Copyright (C) 2010 - 2016 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and The University
# of Manchester.
# All rights reserved.
# Copyright (C) 2008 - 2009 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., EML Research, gGmbH, University of Heidelberg,
# and The University of Manchester.
# All rights reserved.
# Copyright (C) 2006 - 2007 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc. and EML Research, gGmbH.
# All rights reserved.
import COPASI
import unittest
from types import *
def createModel():
datamodel=COPASI.CRootContainer.addDatamodel()
model=datamodel.getModel()
model.setVolumeUnit(COPASI.CModel.fl)
model.setTimeUnit(COPASI.CModel.s)
model.setQuantityUnit(COPASI.CModel.fMol)
comp=model.createCompartment("CompartmentA")
A=model.createMetabolite("A",comp.getObjectName())
A.setInitialConcentration(2.0e-4)
B=model.createMetabolite("B",comp.getObjectName())
B.setInitialConcentration(0.0)
react=model.createReaction("Decay_1")
react.addSubstrate(A.getKey())
react.addProduct(B.getKey())
react.setReversible(False)
react.setFunction("Mass action (irreversible)")
react.setParameterValue("k1",0.5)
mapping=COPASI.StringStdVector()
mapping.append(react.getChemEq().getSubstrate(0).getMetabolite().getKey())
react.setParameterMappingVector(react.getFunction().getVariables().getParameter(1).getObjectName(),mapping);
model.compileIfNecessary()
changedObjects=COPASI.ObjectStdVector()
changedObjects.push_back(comp.getObject(COPASI.CCommonName("Reference=InitialVolume")))
changedObjects.push_back(A.getObject(COPASI.CCommonName("Reference=InitialConcentration")))
changedObjects.push_back(B.getObject(COPASI.CCommonName("Reference=InitialConcentration")))
changedObjects.push_back(react.getParameters().getParameter(0).getObject(COPASI.CCommonName("Reference=Value")))
model.updateInitialValues(changedObjects)
return datamodel
def extendModel(datamodel):
model=datamodel.getModel()
metab=model.createMetabolite("C",model.getCompartment(0).getObjectName())
metab.setInitialConcentration(0.0)
react=model.createReaction("Decay_2")
react.addSubstrate(model.getMetabolite(1).getKey())
react.addProduct(metab.getKey())
react.setReversible(False)
react.setFunction("Mass action (irreversible)")
react.getParameters().getParameter(0).setValue(0.1)
mapping=COPASI.StringStdVector()
mapping.append(react.getChemEq().getSubstrate(0).getMetabolite().getKey())
react.setParameterMappingVector(react.getFunction().getVariables().getParameter(1).getObjectName(),mapping);
model.compileIfNecessary()
changedObjects=COPASI.ObjectStdVector()
changedObjects.push_back(metab.getObject(COPASI.CCommonName("Reference=InitialConcentration")))
changedObjects.push_back(react.getParameters().getParameter(0).getObject(COPASI.CCommonName("Reference=Value")))
model.updateInitialValues(changedObjects);
class Test_CreateSimpleModel(unittest.TestCase):
def setUp(self):
self.datamodel=createModel()
self.model=self.datamodel.getModel()
def test_createModel(self):
self.assert_(self.model!=None)
self.assert_(self.model.__class__==COPASI.CModel)
self.assert_(self.model.getCompartments().size()==1)
self.assert_(self.model.getCompartment(0).getObjectName()=="CompartmentA")
self.assert_(self.model.getMetabolites().size()==2)
self.assert_(self.model.getMetabolite(0).getObjectName()=="A")
self.assert_(self.model.getMetabolite(0).getInitialConcentration()==2.0e-4)
self.assert_(self.model.getMetabolite(1).getObjectName()=="B")
self.assert_(self.model.getMetabolite(1).getInitialValue()==0.0)
self.assert_(self.model.getReactions().size()==1)
self.assert_(self.model.getReaction(0).getObjectName()=="Decay_1")
self.assert_(self.model.getReaction(0).isReversible()==False)
self.assert_(self.model.getReaction(0).getChemEq().getSubstrates().size()==1)
self.assert_(self.model.getReaction(0).getChemEq().getSubstrate(0).getMetabolite().getObjectName()=="A")
self.assert_(self.model.getReaction(0).getChemEq().getSubstrate(0).getMultiplicity()==1.0)
self.assert_(self.model.getReaction(0).getChemEq().getProducts().size()==1)
self.assert_(self.model.getReaction(0).getChemEq().getProduct(0).getMetabolite().getObjectName()=="B")
self.assert_(self.model.getReaction(0).getChemEq().getProduct(0).getMultiplicity()==1.0)
self.assert_(self.model.getReaction(0).getChemEq().getModifiers().size()==0)
self.assert_(self.model.getReaction(0).getFunction()!=None)
self.assert_(self.model.getReaction(0).getFunction().getObjectName()=="Mass action (irreversible)")
self.assert_(self.model.getReaction(0).getParameters().size()==1)
self.assert_(self.model.getReaction(0).getParameters().getParameter(0).getObjectName()=="k1")
self.assert_(self.model.getReaction(0).getParameters().getParameter(0).getValue()==0.5)
def test_extendModel(self):
extendModel(self.datamodel)
self.assert_(self.model.getMetabolites().size()==3)
self.assert_(self.model.getMetabolite(2).getObjectName()=="C")
self.assert_(self.model.getMetabolite(2).getInitialValue()==0.0)
self.assert_(self.model.getReactions().size()==2)
self.assert_(self.model.getReaction(1).getObjectName()=="Decay_2")
self.assert_(self.model.getReaction(1).isReversible()==False)
self.assert_(self.model.getReaction(1).getChemEq().getSubstrates().size()==1)
self.assert_(self.model.getReaction(1).getChemEq().getSubstrate(0).getMetabolite().getObjectName()=="B")
self.assert_(self.model.getReaction(1).getChemEq().getSubstrate(0).getMultiplicity()==1.0)
self.assert_(self.model.getReaction(1).getChemEq().getProducts().size()==1)
self.assert_(self.model.getReaction(1).getChemEq().getProduct(0).getMetabolite().getObjectName()=="C")
self.assert_(self.model.getReaction(1).getChemEq().getProduct(0).getMultiplicity()==1.0)
self.assert_(self.model.getReaction(1).getChemEq().getModifiers().size()==0)
self.assert_(self.model.getReaction(1).getFunction()!=None)
self.assert_(self.model.getReaction(1).getFunction().getObjectName()=="Mass action (irreversible)")
self.assert_(self.model.getReaction(1).getParameters().size()==1)
self.assert_(self.model.getReaction(1).getParameters().getParameter(0).getObjectName()=="k1")
self.assert_(self.model.getReaction(1).getParameters().getParameter(0).getValue()==0.1)
def suite():
tests=[
'test_createModel'
,'test_extendModel'
]
return unittest.TestSuite(map(Test_CreateSimpleModel,tests))
if(__name__ == '__main__'):
unittest.TextTestRunner(verbosity=2).run(suite())
|
jonasfoe/COPASI
|
copasi/bindings/python/unittests/Test_CreateSimpleModel.py
|
Python
|
artistic-2.0
| 7,093
|
[
"COPASI"
] |
579ab4c7c4d6959ad4a3cd6728bba1a8d5bff88cf2067033194d90bddbdbce18
|
# pylint: disable=arguments-differ
""" Models for the shopping cart and assorted purchase types """
from collections import namedtuple
from datetime import datetime
from datetime import timedelta
from decimal import Decimal
import json
import analytics
from io import BytesIO
from django.db.models import Q, F
import pytz
import logging
import smtplib
import StringIO
import csv
from boto.exception import BotoServerError # this is a super-class of SESError and catches connection errors
from django.dispatch import receiver
from django.db import models
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _, ugettext_lazy
from django.db import transaction
from django.db.models import Sum, Count
from django.db.models.signals import post_save, post_delete
from django.core.urlresolvers import reverse
from model_utils.managers import InheritanceManager
from model_utils.models import TimeStampedModel
from django.core.mail.message import EmailMessage
from xmodule.modulestore.django import modulestore
from eventtracking import tracker
from courseware.courses import get_course_by_id
from config_models.models import ConfigurationModel
from course_modes.models import CourseMode
from edxmako.shortcuts import render_to_string
from student.models import CourseEnrollment, UNENROLL_DONE, EnrollStatusChange
from util.query import use_read_replica_if_available
from openedx.core.djangoapps.xmodule_django.models import CourseKeyField
from .exceptions import (
InvalidCartItem,
PurchasedCallbackException,
ItemAlreadyInCartException,
AlreadyEnrolledInCourseException,
CourseDoesNotExistException,
MultipleCouponsNotAllowedException,
InvalidStatusToRetire,
UnexpectedOrderItemStatus,
ItemNotFoundInCartException
)
from shoppingcart.pdf import PDFInvoice
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
log = logging.getLogger("shoppingcart")
ORDER_STATUSES = (
# The user is selecting what he/she wants to purchase.
('cart', 'cart'),
# The user has been sent to the external payment processor.
# At this point, the order should NOT be modified.
# If the user returns to the payment flow, he/she will start a new order.
('paying', 'paying'),
# The user has successfully purchased the items in the order.
('purchased', 'purchased'),
# The user's order has been refunded.
('refunded', 'refunded'),
# The user's order went through, but the order was erroneously left
# in 'cart'.
('defunct-cart', 'defunct-cart'),
# The user's order went through, but the order was erroneously left
# in 'paying'.
('defunct-paying', 'defunct-paying'),
)
# maps order statuses to their defunct states
ORDER_STATUS_MAP = {
'cart': 'defunct-cart',
'paying': 'defunct-paying',
}
# we need a tuple to represent the primary key of various OrderItem subclasses
OrderItemSubclassPK = namedtuple('OrderItemSubclassPK', ['cls', 'pk'])
class OrderTypes(object):
"""
This class specify purchase OrderTypes.
"""
PERSONAL = 'personal'
BUSINESS = 'business'
ORDER_TYPES = (
(PERSONAL, 'personal'),
(BUSINESS, 'business'),
)
class Order(models.Model):
"""
This is the model for an order. Before purchase, an Order and its related OrderItems are used
as the shopping cart.
FOR ANY USER, THERE SHOULD ONLY EVER BE ZERO OR ONE ORDER WITH STATUS='cart'.
"""
class Meta(object):
app_label = "shoppingcart"
user = models.ForeignKey(User, db_index=True)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES)
purchase_time = models.DateTimeField(null=True, blank=True)
refunded_time = models.DateTimeField(null=True, blank=True)
# Now we store data needed to generate a reasonable receipt
# These fields only make sense after the purchase
bill_to_first = models.CharField(max_length=64, blank=True)
bill_to_last = models.CharField(max_length=64, blank=True)
bill_to_street1 = models.CharField(max_length=128, blank=True)
bill_to_street2 = models.CharField(max_length=128, blank=True)
bill_to_city = models.CharField(max_length=64, blank=True)
bill_to_state = models.CharField(max_length=8, blank=True)
bill_to_postalcode = models.CharField(max_length=16, blank=True)
bill_to_country = models.CharField(max_length=64, blank=True)
bill_to_ccnum = models.CharField(max_length=8, blank=True) # last 4 digits
bill_to_cardtype = models.CharField(max_length=32, blank=True)
# a JSON dump of the CC processor response, for completeness
processor_reply_dump = models.TextField(blank=True)
# bulk purchase registration code workflow billing details
company_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_email = models.CharField(max_length=255, null=True, blank=True)
recipient_name = models.CharField(max_length=255, null=True, blank=True)
recipient_email = models.CharField(max_length=255, null=True, blank=True)
customer_reference_number = models.CharField(max_length=63, null=True, blank=True)
order_type = models.CharField(max_length=32, default='personal', choices=OrderTypes.ORDER_TYPES)
@classmethod
def get_cart_for_user(cls, user):
"""
Always use this to preserve the property that at most 1 order per user has status = 'cart'
"""
# find the newest element in the db
try:
cart_order = cls.objects.filter(user=user, status='cart').order_by('-id')[:1].get()
except ObjectDoesNotExist:
# if nothing exists in the database, create a new cart
cart_order, _created = cls.objects.get_or_create(user=user, status='cart')
return cart_order
@classmethod
def does_user_have_cart(cls, user):
"""
Returns a boolean whether a shopping cart (Order) exists for the specified user
"""
return cls.objects.filter(user=user, status='cart').exists()
@classmethod
def user_cart_has_items(cls, user, item_types=None):
"""
Returns true if the user (anonymous user ok) has
a cart with items in it. (Which means it should be displayed.
If a item_type is passed in, then we check to see if the cart has at least one of
those types of OrderItems
"""
if not user.is_authenticated():
return False
cart = cls.get_cart_for_user(user)
if not item_types:
# check to see if the cart has at least some item in it
return cart.has_items()
else:
# if the caller is explicitly asking to check for particular types
for item_type in item_types:
if cart.has_items(item_type):
return True
return False
@classmethod
def remove_cart_item_from_order(cls, item, user):
"""
Removes the item from the cart if the item.order.status == 'cart'.
Also removes any code redemption associated with the order_item
"""
if item.order.status == 'cart':
log.info("order item %s removed for user %s", str(item.id), user)
item.delete()
# remove any redemption entry associated with the item
CouponRedemption.remove_code_redemption_from_item(item, user)
@property
def total_cost(self):
"""
Return the total cost of the cart. If the order has been purchased, returns total of
all purchased and not refunded items.
"""
return sum(i.line_cost for i in self.orderitem_set.filter(status=self.status))
def has_items(self, item_type=None):
"""
Does the cart have any items in it?
If an item_type is passed in then we check to see if there are any items of that class type
"""
if not item_type:
return self.orderitem_set.exists()
else:
items = self.orderitem_set.all().select_subclasses()
for item in items:
if isinstance(item, item_type):
return True
return False
def reset_cart_items_prices(self):
"""
Reset the items price state in the user cart
"""
for item in self.orderitem_set.all():
if item.is_discounted:
item.unit_cost = item.list_price
item.save()
def clear(self):
"""
Clear out all the items in the cart
"""
self.orderitem_set.all().delete()
@transaction.atomic
def start_purchase(self):
"""
Start the purchase process. This will set the order status to "paying",
at which point it should no longer be modified.
Future calls to `Order.get_cart_for_user()` will filter out orders with
status "paying", effectively creating a new (empty) cart.
"""
if self.status == 'cart':
self.status = 'paying'
self.save()
for item in OrderItem.objects.filter(order=self).select_subclasses():
item.start_purchase()
def update_order_type(self):
"""
updating order type. This method wil inspect the quantity associated with the OrderItem.
In the application, it is implied that when qty > 1, then the user is to purchase
'RegistrationCodes' which are randomly generated strings that users can distribute to
others in order for them to enroll in paywalled courses.
The UI/UX may change in the future to make the switching between PaidCourseRegistration
and CourseRegCodeItems a more explicit UI gesture from the purchaser
"""
cart_items = self.orderitem_set.all()
is_order_type_business = False
for cart_item in cart_items:
if cart_item.qty > 1:
is_order_type_business = True
items_to_delete = []
old_to_new_id_map = []
if is_order_type_business:
for cart_item in cart_items:
if hasattr(cart_item, 'paidcourseregistration'):
course_reg_code_item = CourseRegCodeItem.add_to_order(
self, cart_item.paidcourseregistration.course_id, cart_item.qty,
)
# update the discounted prices if coupon redemption applied
course_reg_code_item.list_price = cart_item.list_price
course_reg_code_item.unit_cost = cart_item.unit_cost
course_reg_code_item.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": course_reg_code_item.id})
else:
for cart_item in cart_items:
if hasattr(cart_item, 'courseregcodeitem'):
paid_course_registration = PaidCourseRegistration.add_to_order(
self, cart_item.courseregcodeitem.course_id,
)
# update the discounted prices if coupon redemption applied
paid_course_registration.list_price = cart_item.list_price
paid_course_registration.unit_cost = cart_item.unit_cost
paid_course_registration.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": paid_course_registration.id})
for item in items_to_delete:
item.delete()
self.order_type = OrderTypes.BUSINESS if is_order_type_business else OrderTypes.PERSONAL
self.save()
return old_to_new_id_map
def generate_pdf_receipt(self, order_items):
"""
Generates the pdf receipt for the given order_items
and returns the pdf_buffer.
"""
items_data = []
for item in order_items:
item_total = item.qty * item.unit_cost
items_data.append({
'item_description': item.pdf_receipt_display_name,
'quantity': item.qty,
'list_price': item.get_list_price(),
'discount': item.get_list_price() - item.unit_cost,
'item_total': item_total
})
pdf_buffer = BytesIO()
PDFInvoice(
items_data=items_data,
item_id=str(self.id),
date=self.purchase_time,
is_invoice=False,
total_cost=self.total_cost,
payment_received=self.total_cost,
balance=0
).generate_pdf(pdf_buffer)
return pdf_buffer
def generate_registration_codes_csv(self, orderitems, site_name):
"""
this function generates the csv file
"""
course_names = []
csv_file = StringIO.StringIO()
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['Course Name', 'Registration Code', 'URL'])
for item in orderitems:
course_id = item.course_id
course = get_course_by_id(item.course_id, depth=0)
registration_codes = CourseRegistrationCode.objects.filter(course_id=course_id, order=self)
course_names.append(course.display_name)
for registration_code in registration_codes:
redemption_url = reverse('register_code_redemption', args=[registration_code.code])
url = '{base_url}{redemption_url}'.format(base_url=site_name, redemption_url=redemption_url)
csv_writer.writerow([unicode(course.display_name).encode("utf-8"), registration_code.code, url])
return csv_file, course_names
def send_confirmation_emails(self, orderitems, is_order_type_business, csv_file, pdf_file, site_name, course_names):
"""
send confirmation e-mail
"""
recipient_list = [(self.user.username, self.user.email, 'user')] # pylint: disable=no-member
if self.company_contact_email:
recipient_list.append((self.company_contact_name, self.company_contact_email, 'company_contact'))
joined_course_names = ""
if self.recipient_email:
recipient_list.append((self.recipient_name, self.recipient_email, 'email_recipient'))
joined_course_names = " " + ", ".join(course_names)
if not is_order_type_business:
subject = _("Order Payment Confirmation")
else:
subject = _('Confirmation and Registration Codes for the following courses: {course_name_list}').format(
course_name_list=joined_course_names
)
dashboard_url = '{base_url}{dashboard}'.format(
base_url=site_name,
dashboard=reverse('dashboard')
)
try:
from_address = configuration_helpers.get_value(
'email_from_address',
settings.PAYMENT_SUPPORT_EMAIL
)
# Send a unique email for each recipient. Don't put all email addresses in a single email.
for recipient in recipient_list:
message = render_to_string(
'emails/business_order_confirmation_email.txt' if is_order_type_business else 'emails/order_confirmation_email.txt',
{
'order': self,
'recipient_name': recipient[0],
'recipient_type': recipient[2],
'site_name': site_name,
'order_items': orderitems,
'course_names': ", ".join(course_names),
'dashboard_url': dashboard_url,
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'order_placed_by': '{username} ({email})'.format(
username=self.user.username, email=self.user.email
),
'has_billing_info': settings.FEATURES['STORE_BILLING_INFO'],
'platform_name': configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME),
'payment_support_email': configuration_helpers.get_value(
'payment_support_email', settings.PAYMENT_SUPPORT_EMAIL,
),
'payment_email_signature': configuration_helpers.get_value('payment_email_signature'),
}
)
email = EmailMessage(
subject=subject,
body=message,
from_email=from_address,
to=[recipient[1]]
)
# Only the business order is HTML formatted. A single seat order confirmation is plain text.
if is_order_type_business:
email.content_subtype = "html"
if csv_file:
email.attach(u'RegistrationCodesRedemptionUrls.csv', csv_file.getvalue(), 'text/csv')
if pdf_file is not None:
email.attach(u'ReceiptOrder{}.pdf'.format(str(self.id)), pdf_file.getvalue(), 'application/pdf')
else:
file_buffer = StringIO.StringIO(_('pdf download unavailable right now, please contact support.'))
email.attach(u'pdf_not_available.txt', file_buffer.getvalue(), 'text/plain')
email.send()
except (smtplib.SMTPException, BotoServerError): # sadly need to handle diff. mail backends individually
log.error('Failed sending confirmation e-mail for order %d', self.id)
def purchase(self, first='', last='', street1='', street2='', city='', state='', postalcode='',
country='', ccnum='', cardtype='', processor_reply_dump=''):
"""
Call to mark this order as purchased. Iterates through its OrderItems and calls
their purchased_callback
`first` - first name of person billed (e.g. John)
`last` - last name of person billed (e.g. Smith)
`street1` - first line of a street address of the billing address (e.g. 11 Cambridge Center)
`street2` - second line of a street address of the billing address (e.g. Suite 101)
`city` - city of the billing address (e.g. Cambridge)
`state` - code of the state, province, or territory of the billing address (e.g. MA)
`postalcode` - postal code of the billing address (e.g. 02142)
`country` - country code of the billing address (e.g. US)
`ccnum` - last 4 digits of the credit card number of the credit card billed (e.g. 1111)
`cardtype` - 3-digit code representing the card type used (e.g. 001)
`processor_reply_dump` - all the parameters returned by the processor
"""
if self.status == 'purchased':
log.error(
u"`purchase` method called on order {}, but order is already purchased.".format(self.id) # pylint: disable=no-member
)
return
self.status = 'purchased'
self.purchase_time = datetime.now(pytz.utc)
self.bill_to_first = first
self.bill_to_last = last
self.bill_to_city = city
self.bill_to_state = state
self.bill_to_country = country
self.bill_to_postalcode = postalcode
if settings.FEATURES['STORE_BILLING_INFO']:
self.bill_to_street1 = street1
self.bill_to_street2 = street2
self.bill_to_ccnum = ccnum
self.bill_to_cardtype = cardtype
self.processor_reply_dump = processor_reply_dump
# save these changes on the order, then we can tell when we are in an
# inconsistent state
self.save()
# this should return all of the objects with the correct types of the
# subclasses
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
site_name = configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME)
if self.order_type == OrderTypes.BUSINESS:
self.update_order_type()
for item in orderitems:
item.purchase_item()
csv_file = None
course_names = []
if self.order_type == OrderTypes.BUSINESS:
#
# Generate the CSV file that contains all of the RegistrationCodes that have already been
# generated when the purchase has transacted
#
csv_file, course_names = self.generate_registration_codes_csv(orderitems, site_name)
try:
pdf_file = self.generate_pdf_receipt(orderitems)
except Exception: # pylint: disable=broad-except
log.exception('Exception at creating pdf file.')
pdf_file = None
try:
self.send_confirmation_emails(
orderitems, self.order_type == OrderTypes.BUSINESS,
csv_file, pdf_file, site_name, course_names
)
except Exception: # pylint: disable=broad-except
# Catch all exceptions here, since the Django view implicitly
# wraps this in a transaction. If the order completes successfully,
# we don't want to roll back just because we couldn't send
# the confirmation email.
log.exception('Error occurred while sending payment confirmation email')
self._emit_order_event('Completed Order', orderitems)
def refund(self):
"""
Refund the given order. As of right now, this just marks the order as refunded.
"""
self.status = 'refunded'
self.save()
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
self._emit_order_event('Refunded Order', orderitems)
def _emit_order_event(self, event_name, orderitems):
"""
Emit an analytics event with the given name for this Order. Will iterate over all associated
OrderItems and add them as products in the event as well.
"""
try:
if settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(self.user.id, event_name, {
'orderId': self.id,
'total': str(self.total_cost),
'currency': self.currency,
'products': [item.analytics_data() for item in orderitems]
}, context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
})
except Exception: # pylint: disable=broad-except
# Capturing all exceptions thrown while tracking analytics events. We do not want
# an operation to fail because of an analytics event, so we will capture these
# errors in the logs.
log.exception(
u'Unable to emit {event} event for user {user} and order {order}'.format(
event=event_name, user=self.user.id, order=self.id)
)
def add_billing_details(self, company_name='', company_contact_name='', company_contact_email='', recipient_name='',
recipient_email='', customer_reference_number=''):
"""
This function is called after the user selects a purchase type of "Business" and
is asked to enter the optional billing details. The billing details are updated
for that order.
company_name - Name of purchasing organization
company_contact_name - Name of the key contact at the company the sale was made to
company_contact_email - Email of the key contact at the company the sale was made to
recipient_name - Name of the company should the invoice be sent to
recipient_email - Email of the company should the invoice be sent to
customer_reference_number - purchase order number of the organization associated with this Order
"""
self.company_name = company_name
self.company_contact_name = company_contact_name
self.company_contact_email = company_contact_email
self.recipient_name = recipient_name
self.recipient_email = recipient_email
self.customer_reference_number = customer_reference_number
self.save()
def generate_receipt_instructions(self):
"""
Call to generate specific instructions for each item in the order. This gets displayed on the receipt
page, typically. Instructions are something like "visit your dashboard to see your new courses".
This will return two things in a pair. The first will be a dict with keys=OrderItemSubclassPK corresponding
to an OrderItem and values=a set of html instructions they generate. The second will be a set of de-duped
html instructions
"""
instruction_set = set([]) # heh. not ia32 or alpha or sparc
instruction_dict = {}
order_items = OrderItem.objects.filter(order=self).select_subclasses()
for item in order_items:
item_pk_with_subclass, set_of_html = item.generate_receipt_instructions()
instruction_dict[item_pk_with_subclass] = set_of_html
instruction_set.update(set_of_html)
return instruction_dict, instruction_set
def retire(self):
"""
Method to "retire" orders that have gone through to the payment service
but have (erroneously) not had their statuses updated.
This method only works on orders that satisfy the following conditions:
1) the order status is either "cart" or "paying" (otherwise we raise
an InvalidStatusToRetire error)
2) the order's order item's statuses match the order's status (otherwise
we throw an UnexpectedOrderItemStatus error)
"""
# if an order is already retired, no-op:
if self.status in ORDER_STATUS_MAP.values():
return
if self.status not in ORDER_STATUS_MAP.keys():
raise InvalidStatusToRetire(
"order status {order_status} is not 'paying' or 'cart'".format(
order_status=self.status
)
)
for item in self.orderitem_set.all():
if item.status != self.status:
raise UnexpectedOrderItemStatus(
"order_item status is different from order status"
)
self.status = ORDER_STATUS_MAP[self.status]
self.save()
for item in self.orderitem_set.all():
item.retire()
def find_item_by_course_id(self, course_id):
"""
course_id: Course id of the item to find
Returns OrderItem from the Order given a course_id
Raises exception ItemNotFoundException when the item
having the given course_id is not present in the cart
"""
cart_items = OrderItem.objects.filter(order=self).select_subclasses()
found_items = []
for item in cart_items:
if getattr(item, 'course_id', None):
if item.course_id == course_id:
found_items.append(item)
if not found_items:
raise ItemNotFoundInCartException
return found_items
class OrderItem(TimeStampedModel):
"""
This is the basic interface for order items.
Order items are line items that fill up the shopping carts and orders.
Each implementation of OrderItem should provide its own purchased_callback as
a method.
"""
class Meta(object):
app_label = "shoppingcart"
objects = InheritanceManager()
order = models.ForeignKey(Order, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. user should always be = order.user
user = models.ForeignKey(User, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. status should always be = order.status
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES, db_index=True)
qty = models.IntegerField(default=1)
unit_cost = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
list_price = models.DecimalField(decimal_places=2, max_digits=30, null=True)
line_desc = models.CharField(default="Misc. Item", max_length=1024)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
fulfilled_time = models.DateTimeField(null=True, db_index=True)
refund_requested_time = models.DateTimeField(null=True, db_index=True)
service_fee = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
# general purpose field, not user-visible. Used for reporting
report_comments = models.TextField(default="")
@property
def line_cost(self):
""" Return the total cost of this OrderItem """
return self.qty * self.unit_cost
@classmethod
def add_to_order(cls, order, *args, **kwargs):
"""
A suggested convenience function for subclasses.
NOTE: This does not add anything to the cart. That is left up to the
subclasses to implement for themselves
"""
# this is a validation step to verify that the currency of the item we
# are adding is the same as the currency of the order we are adding it
# to
currency = kwargs.get('currency', 'usd')
if order.currency != currency and order.orderitem_set.exists():
raise InvalidCartItem(_("Trying to add a different currency into the cart"))
@transaction.atomic
def purchase_item(self):
"""
This is basically a wrapper around purchased_callback that handles
modifying the OrderItem itself
"""
self.purchased_callback()
self.status = 'purchased'
self.fulfilled_time = datetime.now(pytz.utc)
self.save()
def start_purchase(self):
"""
Start the purchase process. This will set the order item status to "paying",
at which point it should no longer be modified.
"""
self.status = 'paying'
self.save()
def purchased_callback(self):
"""
This is called on each inventory item in the shopping cart when the
purchase goes through.
"""
raise NotImplementedError
def generate_receipt_instructions(self):
"""
This is called on each item in a purchased order to generate receipt instructions.
This should return a list of `ReceiptInstruction`s in HTML string
Default implementation is to return an empty set
"""
return self.pk_with_subclass, set([])
@property
def pk_with_subclass(self):
"""
Returns a named tuple that annotates the pk of this instance with its class, to fully represent
a pk of a subclass (inclusive) of OrderItem
"""
return OrderItemSubclassPK(type(self), self.pk)
@property
def is_discounted(self):
"""
Returns True if the item a discount coupon has been applied to the OrderItem and False otherwise.
Earlier, the OrderItems were stored with an empty list_price if a discount had not been applied.
Now we consider the item to be non discounted if list_price is None or list_price == unit_cost. In
these lines, an item is discounted if it's non-None and list_price and unit_cost mismatch.
This should work with both new and old records.
"""
return self.list_price and self.list_price != self.unit_cost
def get_list_price(self):
"""
Returns the unit_cost if no discount has been applied, or the list_price if it is defined.
"""
return self.list_price if self.list_price else self.unit_cost
@property
def single_item_receipt_template(self):
"""
The template that should be used when there's only one item in the order
"""
return 'shoppingcart/receipt.html'
@property
def single_item_receipt_context(self):
"""
Extra variables needed to render the template specified in
`single_item_receipt_template`
"""
return {}
def additional_instruction_text(self, **kwargs): # pylint: disable=unused-argument
"""
Individual instructions for this order item.
Currently, only used for emails.
"""
return ''
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
This can be overridden by the subclasses of OrderItem
"""
course_key = getattr(self, 'course_id', None)
if course_key:
course = get_course_by_id(course_key, depth=0)
return course.display_name
else:
raise Exception(
"Not Implemented. OrderItems that are not Course specific should have"
" a overridden pdf_receipt_display_name property"
)
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
The default implementation returns defaults for most attributes. When no name or
category is specified by the implementation, the string 'N/A' is placed for the
name and category. This should be handled appropriately by all implementations.
Returns
A dictionary containing analytics data for this OrderItem.
"""
return {
'id': self.id,
'sku': type(self).__name__,
'name': 'N/A',
'price': str(self.unit_cost),
'quantity': self.qty,
'category': 'N/A',
}
def retire(self):
"""
Called by the `retire` method defined in the `Order` class. Retires
an order item if its (and its order's) status was erroneously not
updated to "purchased" after the order was processed.
"""
self.status = ORDER_STATUS_MAP[self.status]
self.save()
class Invoice(TimeStampedModel):
"""
This table capture all the information needed to support "invoicing"
which is when a user wants to purchase Registration Codes,
but will not do so via a Credit Card transaction.
"""
class Meta(object):
app_label = "shoppingcart"
company_name = models.CharField(max_length=255, db_index=True)
company_contact_name = models.CharField(max_length=255)
company_contact_email = models.CharField(max_length=255)
recipient_name = models.CharField(max_length=255)
recipient_email = models.CharField(max_length=255)
address_line_1 = models.CharField(max_length=255)
address_line_2 = models.CharField(max_length=255, null=True, blank=True)
address_line_3 = models.CharField(max_length=255, null=True, blank=True)
city = models.CharField(max_length=255, null=True)
state = models.CharField(max_length=255, null=True)
zip = models.CharField(max_length=15, null=True)
country = models.CharField(max_length=64, null=True)
# This field has been deprecated.
# The total amount can now be calculated as the sum
# of each invoice item associated with the invoice.
# For backwards compatibility, this field is maintained
# and written to during invoice creation.
total_amount = models.FloatField()
# This field has been deprecated in order to support
# invoices for items that are not course-related.
# Although this field is still maintained for backwards
# compatibility, you should use CourseRegistrationCodeInvoiceItem
# to look up the course ID for purchased redeem codes.
course_id = CourseKeyField(max_length=255, db_index=True)
internal_reference = models.CharField(
max_length=255,
null=True,
blank=True,
help_text=ugettext_lazy("Internal reference code for this invoice.")
)
customer_reference_number = models.CharField(
max_length=63,
null=True,
blank=True,
help_text=ugettext_lazy("Customer's reference code for this invoice.")
)
is_valid = models.BooleanField(default=True)
@classmethod
def get_invoice_total_amount_for_course(cls, course_key):
"""
returns the invoice total amount generated by course.
"""
result = cls.objects.filter(course_id=course_key, is_valid=True).aggregate(total=Sum('total_amount'))
total = result.get('total', 0)
return total if total else 0
def generate_pdf_invoice(self, course, course_price, quantity, sale_price):
"""
Generates the pdf invoice for the given course
and returns the pdf_buffer.
"""
discount_per_item = float(course_price) - sale_price / quantity
list_price = course_price - discount_per_item
items_data = [{
'item_description': course.display_name,
'quantity': quantity,
'list_price': list_price,
'discount': discount_per_item,
'item_total': quantity * list_price
}]
pdf_buffer = BytesIO()
PDFInvoice(
items_data=items_data,
item_id=str(self.id),
date=datetime.now(pytz.utc),
is_invoice=True,
total_cost=float(self.total_amount),
payment_received=0,
balance=float(self.total_amount)
).generate_pdf(pdf_buffer)
return pdf_buffer
def snapshot(self):
"""Create a snapshot of the invoice.
A snapshot is a JSON-serializable representation
of the invoice's state, including its line items
and associated transactions (payments/refunds).
This is useful for saving the history of changes
to the invoice.
Returns:
dict
"""
return {
'internal_reference': self.internal_reference,
'customer_reference': self.customer_reference_number,
'is_valid': self.is_valid,
'contact_info': {
'company_name': self.company_name,
'company_contact_name': self.company_contact_name,
'company_contact_email': self.company_contact_email,
'recipient_name': self.recipient_name,
'recipient_email': self.recipient_email,
'address_line_1': self.address_line_1,
'address_line_2': self.address_line_2,
'address_line_3': self.address_line_3,
'city': self.city,
'state': self.state,
'zip': self.zip,
'country': self.country,
},
'items': [
item.snapshot()
for item in InvoiceItem.objects.filter(invoice=self).select_subclasses()
],
'transactions': [
trans.snapshot()
for trans in InvoiceTransaction.objects.filter(invoice=self)
],
}
def __unicode__(self):
label = (
unicode(self.internal_reference)
if self.internal_reference
else u"No label"
)
created = (
self.created.strftime("%Y-%m-%d")
if self.created
else u"No date"
)
return u"{label} ({date_created})".format(
label=label, date_created=created
)
INVOICE_TRANSACTION_STATUSES = (
# A payment/refund is in process, but money has not yet been transferred
('started', 'started'),
# A payment/refund has completed successfully
# This should be set ONLY once money has been successfully exchanged.
('completed', 'completed'),
# A payment/refund was promised, but was cancelled before
# money had been transferred. An example would be
# cancelling a refund check before the recipient has
# a chance to deposit it.
('cancelled', 'cancelled')
)
class InvoiceTransaction(TimeStampedModel):
"""Record payment and refund information for invoices.
There are two expected use cases:
1) We send an invoice to someone, and they send us a check.
We then manually create an invoice transaction to represent
the payment.
2) We send an invoice to someone, and they pay us. Later, we
need to issue a refund for the payment. We manually
create a transaction with a negative amount to represent
the refund.
"""
class Meta(object):
app_label = "shoppingcart"
invoice = models.ForeignKey(Invoice)
amount = models.DecimalField(
default=0.0, decimal_places=2, max_digits=30,
help_text=ugettext_lazy(
"The amount of the transaction. Use positive amounts for payments"
" and negative amounts for refunds."
)
)
currency = models.CharField(
default="usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
comments = models.TextField(
null=True,
blank=True,
help_text=ugettext_lazy("Optional: provide additional information for this transaction")
)
status = models.CharField(
max_length=32,
default='started',
choices=INVOICE_TRANSACTION_STATUSES,
help_text=ugettext_lazy(
"The status of the payment or refund. "
"'started' means that payment is expected, but money has not yet been transferred. "
"'completed' means that the payment or refund was received. "
"'cancelled' means that payment or refund was expected, but was cancelled before money was transferred. "
)
)
created_by = models.ForeignKey(User)
last_modified_by = models.ForeignKey(User, related_name='last_modified_by_user')
@classmethod
def get_invoice_transaction(cls, invoice_id):
"""
if found Returns the Invoice Transaction object for the given invoice_id
else returns None
"""
try:
return cls.objects.get(Q(invoice_id=invoice_id), Q(status='completed') | Q(status='refunded'))
except InvoiceTransaction.DoesNotExist:
return None
@classmethod
def get_total_amount_of_paid_course_invoices(cls, course_key):
"""
returns the total amount of the paid invoices.
"""
result = cls.objects.filter(amount__gt=0, invoice__course_id=course_key, status='completed').aggregate(
total=Sum(
'amount',
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
total = result.get('total', 0)
return total if total else 0
def snapshot(self):
"""Create a snapshot of the invoice transaction.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'amount': unicode(self.amount),
'currency': self.currency,
'comments': self.comments,
'status': self.status,
'created_by': self.created_by.username,
'last_modified_by': self.last_modified_by.username
}
class InvoiceItem(TimeStampedModel):
"""
This is the basic interface for invoice items.
Each invoice item represents a "line" in the invoice.
For example, in an invoice for course registration codes,
there might be an invoice item representing 10 registration
codes for the DemoX course.
"""
class Meta(object):
app_label = "shoppingcart"
objects = InheritanceManager()
invoice = models.ForeignKey(Invoice, db_index=True)
qty = models.IntegerField(
default=1,
help_text=ugettext_lazy("The number of items sold.")
)
unit_price = models.DecimalField(
default=0.0,
decimal_places=2,
max_digits=30,
help_text=ugettext_lazy("The price per item sold, including discounts.")
)
currency = models.CharField(
default="usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
def snapshot(self):
"""Create a snapshot of the invoice item.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'qty': self.qty,
'unit_price': unicode(self.unit_price),
'currency': self.currency
}
class CourseRegistrationCodeInvoiceItem(InvoiceItem):
"""
This is an invoice item that represents a payment for
a course registration.
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
def snapshot(self):
"""Create a snapshot of the invoice item.
This is the same as a snapshot for other invoice items,
with the addition of a `course_id` field.
Returns:
dict
"""
snapshot = super(CourseRegistrationCodeInvoiceItem, self).snapshot()
snapshot['course_id'] = unicode(self.course_id)
return snapshot
class InvoiceHistory(models.Model):
"""History of changes to invoices.
This table stores snapshots of invoice state,
including the associated line items and transactions
(payments/refunds).
Entries in the table are created, but never deleted
or modified.
We use Django signals to save history entries on change
events. These signals are fired within a database
transaction, so the history record is created only
if the invoice change is successfully persisted.
"""
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
invoice = models.ForeignKey(Invoice)
# JSON-serialized representation of the current state
# of the invoice, including its line items and
# transactions (payments/refunds).
snapshot = models.TextField(blank=True)
@classmethod
def save_invoice_snapshot(cls, invoice):
"""Save a snapshot of the invoice's current state.
Arguments:
invoice (Invoice): The invoice to save.
"""
cls.objects.create(
invoice=invoice,
snapshot=json.dumps(invoice.snapshot())
)
@staticmethod
def snapshot_receiver(sender, instance, **kwargs): # pylint: disable=unused-argument
"""Signal receiver that saves a snapshot of an invoice.
Arguments:
sender: Not used, but required by Django signals.
instance (Invoice, InvoiceItem, or InvoiceTransaction)
"""
if isinstance(instance, Invoice):
InvoiceHistory.save_invoice_snapshot(instance)
elif hasattr(instance, 'invoice'):
InvoiceHistory.save_invoice_snapshot(instance.invoice)
class Meta(object):
get_latest_by = "timestamp"
app_label = "shoppingcart"
# Hook up Django signals to record changes in the history table.
# We record any change to an invoice, invoice item, or transaction.
# We also record any deletion of a transaction, since users can delete
# transactions via Django admin.
# Note that we need to include *each* InvoiceItem subclass
# here, since Django signals do not fire automatically for subclasses
# of the "sender" class.
post_save.connect(InvoiceHistory.snapshot_receiver, sender=Invoice)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=CourseRegistrationCodeInvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
post_delete.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
class CourseRegistrationCode(models.Model):
"""
This table contains registration codes
With registration code, a user can register for a course for free
"""
class Meta(object):
app_label = "shoppingcart"
code = models.CharField(max_length=32, db_index=True, unique=True)
course_id = CourseKeyField(max_length=255, db_index=True)
created_by = models.ForeignKey(User, related_name='created_by_user')
created_at = models.DateTimeField(auto_now_add=True)
order = models.ForeignKey(Order, db_index=True, null=True, related_name="purchase_order")
mode_slug = models.CharField(max_length=100, null=True)
is_valid = models.BooleanField(default=True)
# For backwards compatibility, we maintain the FK to "invoice"
# In the future, we will remove this in favor of the FK
# to "invoice_item" (which can be used to look up the invoice).
invoice = models.ForeignKey(Invoice, null=True)
invoice_item = models.ForeignKey(CourseRegistrationCodeInvoiceItem, null=True)
@classmethod
def order_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via bulk purchase scenario.
"""
return cls.objects.filter(order__isnull=False, course_id=course_id)
@classmethod
def invoice_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via invoice.
"""
return cls.objects.filter(invoice__isnull=False, course_id=course_id)
class RegistrationCodeRedemption(models.Model):
"""
This model contains the registration-code redemption info
"""
class Meta(object):
app_label = "shoppingcart"
order = models.ForeignKey(Order, db_index=True, null=True)
registration_code = models.ForeignKey(CourseRegistrationCode, db_index=True)
redeemed_by = models.ForeignKey(User, db_index=True)
redeemed_at = models.DateTimeField(auto_now_add=True, null=True)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True)
@classmethod
def registration_code_used_for_enrollment(cls, course_enrollment):
"""
Returns RegistrationCodeRedemption object if registration code
has been used during the course enrollment else Returns None.
"""
# theoretically there could be more than one (e.g. someone self-unenrolls
# then re-enrolls with a different regcode)
reg_codes = cls.objects.filter(course_enrollment=course_enrollment).order_by('-redeemed_at')
if reg_codes:
# return the first one. In all normal use cases of registration codes
# the user will only have one
return reg_codes[0]
return None
@classmethod
def is_registration_code_redeemed(cls, course_reg_code):
"""
Checks the existence of the registration code
in the RegistrationCodeRedemption
"""
return cls.objects.filter(registration_code__code=course_reg_code).exists()
@classmethod
def get_registration_code_redemption(cls, code, course_id):
"""
Returns the registration code redemption object if found else returns None.
"""
try:
code_redemption = cls.objects.get(registration_code__code=code, registration_code__course_id=course_id)
except cls.DoesNotExist:
code_redemption = None
return code_redemption
@classmethod
def create_invoice_generated_registration_redemption(cls, course_reg_code, user): # pylint: disable=invalid-name
"""
This function creates a RegistrationCodeRedemption entry in case the registration codes were invoice generated
and thus the order_id is missing.
"""
code_redemption = RegistrationCodeRedemption(registration_code=course_reg_code, redeemed_by=user)
code_redemption.save()
return code_redemption
class SoftDeleteCouponManager(models.Manager):
""" Use this manager to get objects that have a is_active=True """
def get_active_coupons_queryset(self):
"""
filter the is_active = True Coupons only
"""
return super(SoftDeleteCouponManager, self).get_queryset().filter(is_active=True)
def get_queryset(self):
"""
get all the coupon objects
"""
return super(SoftDeleteCouponManager, self).get_queryset()
class Coupon(models.Model):
"""
This table contains coupon codes
A user can get a discount offer on course if provide coupon code
"""
class Meta(object):
app_label = "shoppingcart"
code = models.CharField(max_length=32, db_index=True)
description = models.CharField(max_length=255, null=True, blank=True)
course_id = CourseKeyField(max_length=255)
percentage_discount = models.IntegerField(default=0)
created_by = models.ForeignKey(User)
created_at = models.DateTimeField(auto_now_add=True)
is_active = models.BooleanField(default=True)
expiration_date = models.DateTimeField(null=True, blank=True)
course_overview = models.ForeignKey(CourseOverview, on_delete=models.CASCADE, default=None)
def __unicode__(self):
return "[Coupon] code: {} course: {}".format(self.code, self.course_id)
objects = SoftDeleteCouponManager()
@property
def display_expiry_date(self):
"""
return the coupon expiration date in the readable format
"""
return (self.expiration_date - timedelta(days=1)).strftime("%B %d, %Y") if self.expiration_date else None
class CouponRedemption(models.Model):
"""
This table contain coupon redemption info
"""
class Meta(object):
app_label = "shoppingcart"
order = models.ForeignKey(Order, db_index=True)
user = models.ForeignKey(User, db_index=True)
coupon = models.ForeignKey(Coupon, db_index=True)
@classmethod
def remove_code_redemption_from_item(cls, item, user):
"""
If an item removed from shopping cart then we will remove
the corresponding redemption info of coupon code
"""
order_item_course_id = item.course_id
try:
# Try to remove redemption information of coupon code, If exist.
coupon_redemption = cls.objects.get(
user=user,
coupon__course_id=order_item_course_id if order_item_course_id else CourseKeyField.Empty,
order=item.order_id
)
coupon_redemption.delete()
log.info(
u'Coupon "%s" redemption entry removed for user "%s" for order item "%s"',
coupon_redemption.coupon.code,
user,
str(item.id),
)
except CouponRedemption.DoesNotExist:
log.debug(u'Code redemption does not exist for order item id=%s.', str(item.id))
@classmethod
def remove_coupon_redemption_from_cart(cls, user, cart):
"""
This method delete coupon redemption
"""
coupon_redemption = cls.objects.filter(user=user, order=cart)
if coupon_redemption:
coupon_redemption.delete()
log.info(u'Coupon redemption entry removed for user %s for order %s', user, cart.id)
@classmethod
def get_discount_price(cls, percentage_discount, value):
"""
return discounted price against coupon
"""
discount = Decimal("{0:.2f}".format(Decimal(percentage_discount / 100.00) * value))
return value - discount
@classmethod
def add_coupon_redemption(cls, coupon, order, cart_items):
"""
add coupon info into coupon_redemption model
"""
is_redemption_applied = False
coupon_redemptions = cls.objects.filter(order=order, user=order.user)
for coupon_redemption in coupon_redemptions:
if coupon_redemption.coupon.code != coupon.code or coupon_redemption.coupon.id == coupon.id:
log.exception(
u"Coupon redemption already exist for user '%s' against order id '%s'",
order.user.username,
order.id,
)
raise MultipleCouponsNotAllowedException
for item in cart_items:
if item.course_id:
if item.course_id == coupon.course_id:
coupon_redemption = cls(order=order, user=order.user, coupon=coupon)
coupon_redemption.save()
discount_price = cls.get_discount_price(coupon.percentage_discount, item.unit_cost)
item.list_price = item.unit_cost
item.unit_cost = discount_price
item.save()
log.info(
u"Discount generated for user %s against order id '%s'",
order.user.username,
order.id,
)
is_redemption_applied = True
return is_redemption_applied
return is_redemption_applied
@classmethod
def get_top_discount_codes_used(cls, course_id):
"""
Returns the top discount codes used.
QuerySet = [
{
'coupon__percentage_discount': 22,
'coupon__code': '12',
'coupon__used_count': '2',
},
{
...
}
]
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).values(
'coupon__code', 'coupon__percentage_discount'
).annotate(coupon__used_count=Count('coupon__code')).order_by('-coupon__used_count')
@classmethod
def get_total_coupon_code_purchases(cls, course_id):
"""
returns total seats purchases using coupon codes
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).aggregate(Count('coupon'))
class PaidCourseRegistration(OrderItem):
"""
This is an inventory item for paying for a course registration
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True)
@classmethod
def get_self_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the count of paid_course items filter by course_id and status.
"""
return cls.objects.filter(course_id=course_key, status=status).count()
@classmethod
def get_course_item_for_user_enrollment(cls, user, course_id, course_enrollment):
"""
Returns PaidCourseRegistration object if user has payed for
the course enrollment else Returns None
"""
try:
return cls.objects.filter(course_id=course_id, user=user, course_enrollment=course_enrollment,
status='purchased').latest('id')
except PaidCourseRegistration.DoesNotExist:
return None
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("paidcourseregistration")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum(
F('qty') * F('unit_cost'),
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, mode_slug=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG,
cost=None, currency=None): # pylint: disable=arguments-differ
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning(
u"User %s tried to add PaidCourseRegistration for course %s, already in cart id %s",
order.user.email,
course_id,
order.id,
)
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_MODE
course_mode = CourseMode.DEFAULT_SHOPPINGCART_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(PaidCourseRegistration, cls).add_to_order(order, course_id, cost, currency=currency)
item, __ = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id)
item.status = order.status
item.mode = course_mode.slug
item.qty = 1
item.unit_cost = cost
item.list_price = cost
item.line_desc = _(u'Registration for Course: {course_name}').format(
course_name=course.display_name_with_default_escaped)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
CourseEnrollment.send_signal_full(EnrollStatusChange.paid_start,
user=order.user, mode=item.mode, course_id=course_id,
cost=cost, currency=currency)
return item
def purchased_callback(self):
"""
When purchased, this should enroll the user in the course. We are assuming that
course settings for enrollment date are configured such that only if the (user.email, course_id) pair is found
in CourseEnrollmentAllowed will the user be allowed to enroll. Otherwise requiring payment
would in fact be quite silly since there's a clear back door.
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
# enroll in course and link to the enrollment_id
self.course_enrollment = CourseEnrollment.enroll(user=self.user, course_key=self.course_id, mode=self.mode)
self.save()
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost))
self.course_enrollment.send_signal(EnrollStatusChange.paid_complete,
cost=self.line_cost, currency=self.currency)
def generate_receipt_instructions(self):
"""
Generates instructions when the user has purchased a PaidCourseRegistration.
Basically tells the user to visit the dashboard to see their new classes
"""
notification = _(
u"Please visit your {link_start}dashboard{link_end} "
u"to see your new course."
).format(
link_start=u'<a href="{url}">'.format(url=reverse('dashboard')),
link_end=u'</a>',
)
return self.pk_with_subclass, set([notification])
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return PaidCourseRegistrationAnnotation.objects.get(course_id=self.course_id).annotation
except PaidCourseRegistrationAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the Order Item is associated with a course, additional fields will be populated with
course information. If there is a mode associated, the mode data is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(PaidCourseRegistration, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItem(OrderItem):
"""
This is an inventory item for paying for
generating course registration codes
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
@classmethod
def get_bulk_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the sum of bulk purchases seats.
"""
total = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(total=Sum('qty'))
if result['total'] is not None:
total = result['total']
return total
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("courseregcodeitem")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum(
F('qty') * F('unit_cost'),
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, qty, mode_slug=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG,
cost=None, currency=None): # pylint: disable=arguments-differ
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning("User {} tried to add PaidCourseRegistration for course {}, already in cart id {}"
.format(order.user.email, course_id, order.id))
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_SHOPPINGCART_MODE
course_mode = CourseMode.DEFAULT_SHOPPINGCART_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(CourseRegCodeItem, cls).add_to_order(order, course_id, cost, currency=currency)
item, created = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id) # pylint: disable=unused-variable
item.status = order.status
item.mode = course_mode.slug
item.unit_cost = cost
item.list_price = cost
item.qty = qty
item.line_desc = _(u'Enrollment codes for Course: {course_name}').format(
course_name=course.display_name_with_default_escaped)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
return item
def purchased_callback(self):
"""
The purchase is completed, this OrderItem type will generate Registration Codes that will
be redeemed by users
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
total_registration_codes = int(self.qty)
# we need to import here because of a circular dependency
# we should ultimately refactor code to have save_registration_code in this models.py
# file, but there's also a shared dependency on a random string generator which
# is in another PR (for another feature)
from lms.djangoapps.instructor.views.api import save_registration_code
for i in range(total_registration_codes): # pylint: disable=unused-variable
save_registration_code(self.user, self.course_id, self.mode, order=self.order)
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost))
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return CourseRegCodeItemAnnotation.objects.get(course_id=self.course_id).annotation
except CourseRegCodeItemAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the OrderItem is associated with a course, additional fields will be populated with
course information. If a mode is available, it will be included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CourseRegCodeItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItemAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
# pylint: disable=no-member
return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation)
class PaidCourseRegistrationAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
# pylint: disable=no-member
return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation)
class CertificateItem(OrderItem):
"""
This is an inventory item for purchasing certificates
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
course_enrollment = models.ForeignKey(CourseEnrollment)
mode = models.SlugField()
@receiver(UNENROLL_DONE)
def refund_cert_callback(sender, course_enrollment=None, skip_refund=False, **kwargs): # pylint: disable=no-self-argument,unused-argument
"""
When a CourseEnrollment object calls its unenroll method, this function checks to see if that unenrollment
occurred in a verified certificate that was within the refund deadline. If so, it actually performs the
refund.
Returns the refunded certificate on a successful refund; else, it returns nothing.
"""
# Only refund verified cert unenrollments that are within bounds of the expiration date
if (not course_enrollment.refundable()) or skip_refund:
return
target_certs = CertificateItem.objects.filter(course_id=course_enrollment.course_id, user_id=course_enrollment.user, status='purchased', mode='verified')
try:
target_cert = target_certs[0]
except IndexError:
log.warning(
u"Matching CertificateItem not found while trying to refund. User %s, Course %s",
course_enrollment.user,
course_enrollment.course_id,
)
return
target_cert.status = 'refunded'
target_cert.refund_requested_time = datetime.now(pytz.utc)
target_cert.save()
target_cert.order.refund()
order_number = target_cert.order_id
# send billing an email so they can handle refunding
subject = _("[Refund] User-Requested Refund")
message = "User {user} ({user_email}) has requested a refund on Order #{order_number}.".format(user=course_enrollment.user,
user_email=course_enrollment.user.email,
order_number=order_number)
to_email = [settings.PAYMENT_SUPPORT_EMAIL]
from_email = configuration_helpers.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
try:
send_mail(subject, message, from_email, to_email, fail_silently=False)
except Exception as exception: # pylint: disable=broad-except
err_str = ('Failed sending email to billing to request a refund for verified certificate'
' (User {user}, Course {course}, CourseEnrollmentID {ce_id}, Order #{order})\n{exception}')
log.error(err_str.format(
user=course_enrollment.user,
course=course_enrollment.course_id,
ce_id=course_enrollment.id,
order=order_number,
exception=exception,
))
return target_cert
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, cost, mode, currency='usd'):
"""
Add a CertificateItem to an order
Returns the CertificateItem object after saving
`order` - an order that this item should be added to, generally the cart order
`course_id` - the course that we would like to purchase as a CertificateItem
`cost` - the amount the user will be paying for this CertificateItem
`mode` - the course mode that this certificate is going to be issued for
This item also creates a new enrollment if none exists for this user and this course.
Example Usage:
cart = Order.get_cart_for_user(user)
CertificateItem.add_to_order(cart, 'edX/Test101/2013_Fall', 30, 'verified')
"""
super(CertificateItem, cls).add_to_order(order, course_id, cost, currency=currency)
course_enrollment = CourseEnrollment.get_or_create_enrollment(order.user, course_id)
# do some validation on the enrollment mode
valid_modes = CourseMode.modes_for_course_dict(course_id)
if mode in valid_modes:
mode_info = valid_modes[mode]
else:
msg = u"Mode {mode} does not exist for {course_id}".format(mode=mode, course_id=course_id)
log.error(msg)
raise InvalidCartItem(
_(u"Mode {mode} does not exist for {course_id}").format(mode=mode, course_id=course_id)
)
item, _created = cls.objects.get_or_create(
order=order,
user=order.user,
course_id=course_id,
course_enrollment=course_enrollment,
mode=mode,
)
item.status = order.status
item.qty = 1
item.unit_cost = cost
item.list_price = cost
course_name = modulestore().get_course(course_id).display_name
# Translators: In this particular case, mode_name refers to a
# particular mode (i.e. Honor Code Certificate, Verified Certificate, etc)
# by which a user could enroll in the given course.
item.line_desc = _("{mode_name} for course {course}").format(
mode_name=mode_info.name,
course=course_name
)
item.currency = currency
order.currency = currency
order.save()
item.save()
# signal course added to cart
course_enrollment.send_signal(EnrollStatusChange.paid_start, cost=cost, currency=currency)
return item
def purchased_callback(self):
"""
When purchase goes through, activate and update the course enrollment for the correct mode
"""
self.course_enrollment.change_mode(self.mode)
self.course_enrollment.activate()
self.course_enrollment.send_signal(EnrollStatusChange.upgrade_complete,
cost=self.unit_cost, currency=self.currency)
def additional_instruction_text(self):
verification_reminder = ""
refund_reminder_msg = _("You can unenroll in the course and receive a full refund for 14 days after the course "
"start date. ")
is_enrollment_mode_verified = self.course_enrollment.is_verified_enrollment()
is_professional_mode_verified = self.course_enrollment.is_professional_enrollment()
if is_enrollment_mode_verified:
domain = configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME)
path = reverse('verify_student_verify_now', kwargs={'course_id': unicode(self.course_id)})
verification_url = "http://{domain}{path}".format(domain=domain, path=path)
verification_reminder = _(
"If you haven't verified your identity yet, please start the verification process ({verification_url})."
).format(verification_url=verification_url)
if is_professional_mode_verified:
refund_reminder_msg = _("You can unenroll in the course and receive a full refund for 2 days after the "
"course start date. ")
refund_reminder = _(
"{refund_reminder_msg}"
"To receive your refund, contact {billing_email}. "
"Please include your order number in your email. "
"Please do NOT include your credit card information."
).format(
refund_reminder_msg=refund_reminder_msg,
billing_email=settings.PAYMENT_SUPPORT_EMAIL
)
# Need this to be unicode in case the reminder strings
# have been translated and contain non-ASCII unicode
return u"{verification_reminder} {refund_reminder}".format(
verification_reminder=verification_reminder,
refund_reminder=refund_reminder
)
@classmethod
def verified_certificates_count(cls, course_id, status):
"""Return a queryset of CertificateItem for every verified enrollment in course_id with the given status."""
return use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status).count())
# TODO combine these three methods into one
@classmethod
def verified_certificates_monetary_field_sum(cls, course_id, status, field_to_aggregate):
"""
Returns a Decimal indicating the total sum of field_to_aggregate for all verified certificates with a particular status.
Sample usages:
- status 'refunded' and field_to_aggregate 'unit_cost' will give the total amount of money refunded for course_id
- status 'purchased' and field_to_aggregate 'service_fees' gives the sum of all service fees for purchased certificates
etc
"""
query = use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status)).aggregate(Sum(field_to_aggregate))[field_to_aggregate + '__sum']
if query is None:
return Decimal(0.00)
else:
return query
@classmethod
def verified_certificates_contributing_more_than_minimum(cls, course_id):
return use_read_replica_if_available(
CertificateItem.objects.filter(
course_id=course_id,
mode='verified',
status='purchased',
unit_cost__gt=(CourseMode.min_course_price_for_verified_for_currency(course_id, 'usd')))).count()
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the CertificateItem is associated with a course, additional fields will be populated with
course information. If there is a mode associated with the certificate, it is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CertificateItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class DonationConfiguration(ConfigurationModel):
"""Configure whether donations are enabled on the site."""
class Meta(ConfigurationModel.Meta):
app_label = "shoppingcart"
class Donation(OrderItem):
"""A donation made by a user.
Donations can be made for a specific course or to the organization as a whole.
Users can choose the donation amount.
"""
class Meta(object):
app_label = "shoppingcart"
# Types of donations
DONATION_TYPES = (
("general", "A general donation"),
("course", "A donation to a particular course")
)
# The type of donation
donation_type = models.CharField(max_length=32, default="general", choices=DONATION_TYPES)
# If a donation is made for a specific course, then store the course ID here.
# If the donation is made to the organization as a whole,
# set this field to CourseKeyField.Empty
course_id = CourseKeyField(max_length=255, db_index=True)
@classmethod
@transaction.atomic
def add_to_order(cls, order, donation_amount, course_id=None, currency='usd'):
"""Add a donation to an order.
Args:
order (Order): The order to add this donation to.
donation_amount (Decimal): The amount the user is donating.
Keyword Args:
course_id (CourseKey): If provided, associate this donation with a particular course.
currency (str): The currency used for the the donation.
Raises:
InvalidCartItem: The provided course ID is not valid.
Returns:
Donation
"""
# This will validate the currency but won't actually add the item to the order.
super(Donation, cls).add_to_order(order, currency=currency)
# Create a line item description, including the name of the course
# if this is a per-course donation.
# This will raise an exception if the course can't be found.
description = cls._line_item_description(course_id=course_id)
params = {
"order": order,
"user": order.user,
"status": order.status,
"qty": 1,
"unit_cost": donation_amount,
"currency": currency,
"line_desc": description
}
if course_id is not None:
params["course_id"] = course_id
params["donation_type"] = "course"
else:
params["donation_type"] = "general"
return cls.objects.create(**params)
def purchased_callback(self):
"""Donations do not need to be fulfilled, so this method does nothing."""
pass
def generate_receipt_instructions(self):
"""Provide information about tax-deductible donations in the receipt.
Returns:
tuple of (Donation, unicode)
"""
return self.pk_with_subclass, set([self._tax_deduction_msg()])
def additional_instruction_text(self, **kwargs):
"""Provide information about tax-deductible donations in the confirmation email.
Returns:
unicode
"""
return self._tax_deduction_msg()
def _tax_deduction_msg(self):
"""Return the translated version of the tax deduction message.
Returns:
unicode
"""
return _(
u"We greatly appreciate this generous contribution and your support of the {platform_name} mission. "
u"This receipt was prepared to support charitable contributions for tax purposes. "
u"We confirm that neither goods nor services were provided in exchange for this gift."
).format(platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME))
@classmethod
def _line_item_description(cls, course_id=None):
"""Create a line-item description for the donation.
Includes the course display name if provided.
Keyword Arguments:
course_id (CourseKey)
Raises:
CourseDoesNotExistException: The course ID is not valid.
Returns:
unicode
"""
# If a course ID is provided, include the display name of the course
# in the line item description.
if course_id is not None:
course = modulestore().get_course(course_id)
if course is None:
msg = u"Could not find a course with the ID '{course_id}'".format(course_id=course_id)
log.error(msg)
raise CourseDoesNotExistException(
_(u"Could not find a course with the ID '{course_id}'").format(course_id=course_id)
)
return _(u"Donation for {course}").format(course=course.display_name)
# The donation is for the organization as a whole, not a specific course
else:
return _(u"Donation for {platform_name}").format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME),
)
@property
def single_item_receipt_context(self):
return {
'receipt_has_donation_item': True,
}
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the donation is associated with a course, additional fields will be populated with
course information. When no name or category is specified by the implementation, the
platform name is used as a default value for required event fields, to declare that
the Order is specific to the platform, rather than a specific product name or category.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(Donation, self).analytics_data()
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
else:
data['name'] = configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
data['category'] = configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
return data
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
"""
return self._line_item_description(course_id=self.course_id)
|
naresh21/synergetics-edx-platform
|
lms/djangoapps/shoppingcart/models.py
|
Python
|
agpl-3.0
| 91,630
|
[
"VisIt"
] |
0627c6b9e2084f1168ba1d07f1568701bcd1a32bb85d636dbbdd0d9b6eb866d5
|
# Copyright 2002 by Andrew Dalke. All rights reserved.
# Revisions 2007-2008 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Note that BioSQL (including the database schema and scripts) is
# available and licensed separately. Please consult www.biosql.org
"""Code for storing and retrieving biological sequences from a BioSQL
relational database. See:
http://biopython.org/wiki/BioSQL
http://www.biosql.org/
"""
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/BioSQL/__init__.py
|
Python
|
gpl-2.0
| 556
|
[
"Biopython"
] |
34455b919babee3abd3d2e6637946af384783789a17a402c874bc79efed22b24
|
#!/usr/bin/env python
# =========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =========================================================================
import SimpleITK as sitk
import sys
import os
def command_iteration(filter):
print(f"{filter.GetElapsedIterations():3} = {filter.GetMetric():10.5f}")
if len(sys.argv) < 4:
print("Usage:", sys.argv[0], "<fixedImageFilter> <movingImageFile>",
"[initialTransformFile] <outputTransformFile>")
sys.exit(1)
fixed = sitk.ReadImage(sys.argv[1])
moving = sitk.ReadImage(sys.argv[2])
matcher = sitk.HistogramMatchingImageFilter()
if (fixed.GetPixelID() in (sitk.sitkUInt8, sitk.sitkInt8)):
matcher.SetNumberOfHistogramLevels(128)
else:
matcher.SetNumberOfHistogramLevels(1024)
matcher.SetNumberOfMatchPoints(7)
matcher.ThresholdAtMeanIntensityOn()
moving = matcher.Execute(moving, fixed)
# The fast symmetric forces Demons Registration Filter
# Note there is a whole family of Demons Registration algorithms included in
# SimpleITK
demons = sitk.FastSymmetricForcesDemonsRegistrationFilter()
demons.SetNumberOfIterations(200)
# Standard deviation for Gaussian smoothing of displacement field
demons.SetStandardDeviations(1.0)
demons.AddCommand(sitk.sitkIterationEvent, lambda: command_iteration(demons))
if len(sys.argv) > 4:
initialTransform = sitk.ReadTransform(sys.argv[3])
sys.argv[-1] = sys.argv.pop()
toDisplacementFilter = sitk.TransformToDisplacementFieldFilter()
toDisplacementFilter.SetReferenceImage(fixed)
displacementField = toDisplacementFilter.Execute(initialTransform)
displacementField = demons.Execute(fixed, moving, displacementField)
else:
displacementField = demons.Execute(fixed, moving)
print("-------")
print(f"Number Of Iterations: {demons.GetElapsedIterations()}")
print(f" RMS: {demons.GetRMSChange()}")
outTx = sitk.DisplacementFieldTransform(displacementField)
sitk.WriteTransform(outTx, sys.argv[3])
if ("SITK_NOSHOW" not in os.environ):
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(fixed)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetDefaultPixelValue(100)
resampler.SetTransform(outTx)
out = resampler.Execute(moving)
simg1 = sitk.Cast(sitk.RescaleIntensity(fixed), sitk.sitkUInt8)
simg2 = sitk.Cast(sitk.RescaleIntensity(out), sitk.sitkUInt8)
cimg = sitk.Compose(simg1, simg2, simg1 // 2. + simg2 // 2.)
sitk.Show(cimg, "DeformableRegistration1 Composition")
|
richardbeare/SimpleITK
|
Examples/DemonsRegistration2/DemonsRegistration2.py
|
Python
|
apache-2.0
| 3,096
|
[
"Gaussian"
] |
9b0f69ec4cafe53623c6bf1ee50783bc8096ae466dcbcd0a13f7f039e1c6c56d
|
import os
import re
import json
import random
import tempfile
from django import forms
from django.urls import reverse
from django.http import HttpResponse, JsonResponse
from django.views.generic.base import View
from django.views.decorators.http import require_http_methods
from django.shortcuts import get_object_or_404
from django.db.models import Q
from django.db import transaction
from edge.forms import FragmentForm
from edge.models import Fragment, Genome, Operation
from edge.io import IO
from edge import import_gff
from edge.tasks import build_genome_blastdb, build_genome_fragment_indices
IS_RO_SERVER = os.getenv("RO_SERVER", False) == "True"
def genome_export(request, genome_id):
get_genome_or_404(genome_id)
io = IO(Genome.objects.get(pk=genome_id))
response = HttpResponse(content_type="text/plain")
response["Content-Disposition"] = 'attachment; filename="g%s.gff"' % genome_id
io.to_gff_file(response)
return response
@require_http_methods(["POST"])
def genome_import(request):
res = {
"imported_genomes": [],
}
for name in request.FILES:
with tempfile.NamedTemporaryFile(mode="w+b", delete=False) as gff:
for chuck in request.FILES.get(name).chunks():
gff.write(chuck)
g = import_gff(name, gff.name)
os.unlink(gff.name)
blastdb_task = build_genome_blastdb.delay(g.id)
res["imported_genomes"].append(
{"id": g.id, "name": g.name, "blastdb_task_id": blastdb_task.id,}
)
return JsonResponse(res)
def schedule_building_blast_db(genome_id, countdown=None):
# scheduling building genome DB in the future, so a) our transaction has a
# chance to commit and b) we avoid an immediate followup operation building
# db on demand at the same time as this delayed building of database
countdown = 30 if countdown is None else countdown
build_genome_blastdb.apply_async((genome_id,), countdown=countdown)
def get_genome_or_404(pk):
return get_object_or_404(Genome, pk=pk)
def get_fragment_or_404(pk):
return get_object_or_404(Fragment, pk=pk)
class ViewBase(View):
def get(self, request, *args, **kwargs):
res = self.on_get(request, *args, **kwargs)
return HttpResponse(json.dumps(res), content_type="application/json")
def put(self, request, *args, **kwargs):
res, status = self.on_put(request, *args, **kwargs)
return HttpResponse(
json.dumps(res), status=status, content_type="application/json"
)
def post(self, request, *args, **kwargs):
res, status = self.on_post(request, *args, **kwargs)
return HttpResponse(
json.dumps(res), status=status, content_type="application/json"
)
class RequestParser(object):
def __init__(self):
self.__args = []
def add_argument(
self, name, field_type, required=False, default=None, location="get"
):
if type(field_type) not in (list, tuple):
if field_type == str:
field_type = [bytes, str]
else:
field_type = [field_type]
self.__args.append((name, field_type, required, default, location))
def parse_args(self, request):
json_payload = None
args = {}
for name, field_type, required, default, location in self.__args:
if location == "get":
d = request.GET
elif location == "post":
d = request.POST
else:
if json_payload is None:
json_payload = json.loads(request.body)
d = json_payload
if name not in d and required:
raise Exception('Missing required field "%s"' % (name,))
if name not in d:
args[name] = default
else:
v = d[name]
if type(v) not in field_type:
if int in field_type:
v = int(v)
elif float in field_type:
v = float(v)
else:
raise Exception(
'Field should be of type "%s", got "%s"'
% (field_type, type(v))
)
args[name] = v
return args
fragment_parser = RequestParser()
fragment_parser.add_argument("name", field_type=str, required=True, location="json")
fragment_parser.add_argument("sequence", field_type=str, required=True, location="json")
fragment_parser.add_argument(
"circular", field_type=bool, default=False, location="json"
)
class FragmentView(ViewBase):
@staticmethod
def to_dict(fragment, compute_length=True):
length = fragment.est_length
if compute_length is True and fragment.has_location_index:
length = fragment.indexed_fragment().length
return dict(
id=fragment.id,
uri=reverse("fragment", kwargs=dict(fragment_id=fragment.id)),
name=fragment.name,
circular=fragment.circular,
parent_id=fragment.parent.id if fragment.parent else None,
length=length,
)
def on_get(self, request, fragment_id):
fragment = get_fragment_or_404(fragment_id)
return FragmentView.to_dict(fragment)
class FragmentSequenceView(ViewBase):
def on_get(self, request, fragment_id):
q_parser = RequestParser()
q_parser.add_argument("f", field_type=int, location="get")
q_parser.add_argument("l", field_type=int, location="get")
args = q_parser.parse_args(request)
f = args["f"]
ll = args["l"]
fragment = get_fragment_or_404(fragment_id)
s = fragment.indexed_fragment().get_sequence(bp_lo=f, bp_hi=ll)
if f is None:
f = 1
if ll is None:
ll = f + len(s) - 1
return {"sequence": s, "base_first": f, "base_last": ll}
class FragmentAnnotationsView(ViewBase):
@staticmethod
def to_dict(annotation):
return dict(
base_first=annotation.base_first,
base_last=annotation.base_last,
strand=annotation.feature.strand,
feature_base_first=annotation.feature_base_first,
feature_base_last=annotation.feature_base_last,
feature=dict(id=annotation.feature.id,
name=annotation.feature.name,
type=annotation.feature.type,
length=annotation.feature.length,
qualifiers=annotation.feature.qualifiers),
# below fields are for backwards compatibility only, repeated in
# the .feature dictionary
name=annotation.feature.name,
type=annotation.feature.type,
qualifiers=annotation.feature.qualifiers,
feature_full_length=annotation.feature.length
)
def on_get(self, request, fragment_id):
q_parser = RequestParser()
q_parser.add_argument("f", field_type=int, location="get")
q_parser.add_argument("l", field_type=int, location="get")
q_parser.add_argument("m", field_type=int, location="get")
args = q_parser.parse_args(request)
f = args["f"]
ll = args["l"]
m = args["m"]
fragment = get_fragment_or_404(fragment_id)
annotations = fragment.indexed_fragment().annotations(bp_lo=f, bp_hi=ll)
if m is not None and len(annotations) > m:
to_return = []
while len(to_return) < m:
i = random.randint(0, len(annotations) - 1)
to_return.append(annotations[i])
new_a = annotations[0:i] + annotations[i + 1 :]
annotations = new_a
annotations = to_return
return [
FragmentAnnotationsView.to_dict(annotation) for annotation in annotations
]
@transaction.atomic()
def on_post(self, request, fragment_id):
annotation_parser = RequestParser()
annotation_parser.add_argument(
"base_first", field_type=int, required=True, location="json"
)
annotation_parser.add_argument(
"base_last", field_type=int, required=True, location="json"
)
annotation_parser.add_argument(
"name", field_type=str, required=True, location="json"
)
annotation_parser.add_argument(
"type", field_type=str, required=True, location="json"
)
annotation_parser.add_argument(
"strand", field_type=int, required=True, location="json"
)
annotation_parser.add_argument(
"qualifiers", field_type=dict, required=False, default=None, location="json"
)
args = annotation_parser.parse_args(request)
fragment = get_fragment_or_404(fragment_id)
fragment = fragment.indexed_fragment()
fragment.annotate(
first_base1=args["base_first"],
last_base1=args["base_last"],
name=args["name"],
type=args["type"],
strand=args["strand"],
qualifiers=args["qualifiers"],
)
return {}, 201
class FragmentAnnotateChunksView(ViewBase):
@transaction.atomic()
def on_post(self, request, fragment_id):
annotation_parser = RequestParser()
annotation_parser.add_argument(
"bases", field_type=list, required=True, location="json"
)
annotation_parser.add_argument(
"name", field_type=str, required=True, location="json"
)
annotation_parser.add_argument(
"type", field_type=str, required=True, location="json"
)
annotation_parser.add_argument(
"strand", field_type=int, required=True, location="json"
)
annotation_parser.add_argument(
"qualifiers", field_type=dict, required=False, default=None, location="json"
)
args = annotation_parser.parse_args(request)
fragment = get_fragment_or_404(fragment_id)
fragment = fragment.indexed_fragment()
fragment.annotate_chunks(
bases=args["bases"],
name=args["name"],
type=args["type"],
strand=args["strand"],
qualifiers=args["qualifiers"],
)
return {}, 201
class FragmentListView(ViewBase):
def on_get(self, request):
q_parser = RequestParser()
q_parser.add_argument("q", field_type=str, location="get")
q_parser.add_argument("s", field_type=int, location="get", default=0)
q_parser.add_argument("p", field_type=int, location="get", default=100)
args = q_parser.parse_args(request)
s = args["s"]
p = args["p"]
q = args["q"]
p = 200 if p > 200 else p
if q is not None and q.strip() != "":
fragments = Fragment.user_defined_fragments(Q(name__icontains=q), s, s + p)
else:
fragments = Fragment.user_defined_fragments(None, s, s + p)
return [FragmentView.to_dict(fragment) for fragment in fragments]
@transaction.atomic()
def on_post(self, request):
args = fragment_parser.parse_args(request)
fragment = Fragment.create_with_sequence(
name=args["name"], sequence=args["sequence"], circular=args["circular"]
)
return FragmentView.to_dict(fragment), 201
class GenomeView(ViewBase):
@staticmethod
def op_to_dict(genome, op):
choices = Operation._meta.get_field("type").choices
type_str = [t[1] for t in choices if t[0] == op.type]
if len(type_str) > 0:
type_str = type_str[0]
else:
type_str = ""
annotations = {}
if genome.has_location_index:
genome = genome.indexed_genome()
for feature in op.feature_set.all():
fragment_annotations = genome.find_annotation_by_feature(feature)
for fragment_id, fragment_annotations in fragment_annotations.items():
if fragment_id not in annotations:
annotations[fragment_id] = []
annotations[fragment_id].extend(fragment_annotations)
annotation_list = []
for fragment_id in annotations:
f = genome.fragments.filter(id=fragment_id)[0]
v = annotations[fragment_id]
v = [FragmentAnnotationsView.to_dict(x) for x in v]
for a in v:
a["fragment_id"] = fragment_id
a["fragment_name"] = f.name
annotation_list.append(a)
d = dict(
type=type_str, params=json.loads(op.params), annotations=annotation_list
)
return d
@staticmethod
def to_dict(
genome, compute_length=True, include_fragments=True, include_operations=True
):
operations = []
if include_operations:
for op in genome.operation_set.order_by("id"):
d = GenomeView.op_to_dict(genome, op)
operations.append(d)
fragments = None
if include_fragments:
fragments = []
for f in genome.fragments.all():
d = FragmentView.to_dict(f, compute_length=compute_length)
fragments.append(d)
d = dict(
id=genome.id,
uri=reverse("genome", kwargs=dict(genome_id=genome.id)),
name=genome.name,
notes=genome.notes,
parent_id=genome.parent_id,
parent_name=genome.parent.name if genome.parent is not None else "",
fragments=fragments,
)
if len(operations):
d["operations"] = operations
return d
def on_get(self, request, genome_id):
genome = get_genome_or_404(genome_id)
return GenomeView.to_dict(genome)
class GenomeAnnotationsView(ViewBase):
def on_get(self, request, genome_id):
genome = get_genome_or_404(genome_id)
q_parser = RequestParser()
q_parser.add_argument("q", field_type=str, required=True)
q_parser.add_argument("field", field_type=str, required=False, default=None)
args = q_parser.parse_args(request)
field = args["field"]
if not genome.has_location_index:
build_genome_fragment_indices.delay(genome.id)
return dict(
error="Missing genome indices, building. Please check back later."
)
res = []
if field is None:
fragment_annotations = genome.indexed_genome().find_annotation_by_name(
args["q"]
)
else:
fragment_annotations = genome.indexed_genome().find_annotation_by_qualifier(
args["q"], fields=[field]
)
for fragment_id in fragment_annotations:
fragment = get_fragment_or_404(fragment_id)
annotations = fragment_annotations[fragment_id]
d = FragmentView.to_dict(fragment)
a = [FragmentAnnotationsView.to_dict(x) for x in annotations]
res.append((d, a))
return res
class GenomeFragmentListView(ViewBase):
@transaction.atomic()
def on_post(self, request, genome_id): # adding new fragment
args = fragment_parser.parse_args(request)
genome = get_genome_or_404(genome_id)
fragment = None
fragment = genome.add_fragment(
name=args["name"], sequence=args["sequence"], circular=args["circular"]
)
return FragmentView.to_dict(fragment), 201
class GenomeDeriveView(ViewBase):
@transaction.atomic()
def on_post(self, request, genome_id):
genome = get_genome_or_404(genome_id)
data = json.loads(request.body)
cleaned_data = []
for entry in data:
form = FragmentForm(data=entry)
if not form.is_valid():
raise forms.ValidationError(form.errors)
cleaned_data.append(form.cleaned_data)
child = genome.update()
for entry in cleaned_data:
child.add_fragment(
entry["name"], entry["sequence"], circular=entry["circular"]
)
return GenomeView.to_dict(child), 201
class GenomeListView(ViewBase):
def on_get(self, request):
if "f" in request.GET:
fragment_ids = []
for f in request.GET.getlist("f"):
try:
fragment_ids.append(int(f))
except ValueError:
return []
if len(fragment_ids) == 0:
return []
sql_joins = 5
q = Genome.objects.filter(genome_fragment__fragment_id=fragment_ids[0])
for i in range(1, sql_joins):
if i < len(fragment_ids):
q = q.filter(genome_fragment__fragment_id=fragment_ids[i])
candidates = list(q)
genomes = []
for g in candidates:
x = Genome.objects.raw(
"""
SELECT edge_genome.id,
GROUP_CONCAT(edge_genome_fragment.fragment_id) as fragment_id_list
FROM edge_genome
JOIN edge_genome_fragment ON edge_genome_fragment.genome_id = edge_genome.id
WHERE edge_genome.id = %s
""",
[g.id],
)
x = list(x)[0]
id_list = [int(n) for n in x.fragment_id_list.split(",")]
if set(id_list) == set(fragment_ids):
genomes.append(g)
else:
q_parser = RequestParser()
q_parser.add_argument("q", field_type=str, location="get")
q_parser.add_argument("s", field_type=int, location="get", default=0)
q_parser.add_argument("p", field_type=int, location="get", default=100)
args = q_parser.parse_args(request)
s = args["s"]
p = args["p"]
q = args["q"]
p = 200 if p > 200 else p
if q is not None and q.strip() != "":
where = Q(name__icontains=q)
try:
int(q) # See if q can be converted to an int
except BaseException:
pass
else:
where = where | Q(id=q)
genomes = (
Genome.objects.filter(active=True)
.filter(where)
.order_by("-id")[s : s + p]
)
else:
genomes = Genome.objects.filter(active=True).order_by("-id")[s : s + p]
return [
GenomeView.to_dict(
genome,
compute_length=False,
include_fragments=False,
include_operations=False,
)
for genome in genomes
]
@transaction.atomic()
def on_post(self, request):
genome_parser = RequestParser()
genome_parser.add_argument(
"name", field_type=str, required=True, location="json"
)
genome_parser.add_argument("notes", field_type=str, location="json")
args = genome_parser.parse_args(request)
genome = Genome.create(name=args["name"], notes=args["notes"])
return GenomeView.to_dict(genome), 201
class GenomeBlastView(ViewBase):
def on_post(self, request, genome_id):
from edge.blast import blast_genome
from edge.blastdb import check_and_build_genome_db
genome = get_genome_or_404(genome_id)
check_and_build_genome_db(genome)
parser = RequestParser()
parser.add_argument("query", field_type=str, required=True, location="json")
parser.add_argument("program", field_type=str, required=True, location="json")
args = parser.parse_args(request)
results = blast_genome(genome, args["program"], args["query"])
results = [r.to_dict() for r in results]
return results, 200
class GenomePcrView(ViewBase):
def on_post(self, request, genome_id):
from edge.pcr import pcr_from_genome
from edge.blastdb import check_and_build_genome_db
genome = get_genome_or_404(genome_id)
check_and_build_genome_db(genome)
parser = RequestParser()
parser.add_argument("primers", field_type=list, required=True, location="json")
args = parser.parse_args(request)
primers = args["primers"]
if len(primers) != 2:
raise Exception("Expecting two primers, got %s" % (primers,))
r = pcr_from_genome(genome, primers[0], primers[1])
r = (r[0], [b.to_dict() for b in r[1]], [b.to_dict() for b in r[2]], r[3])
return r, 200
class GenomeOperationViewBase(ViewBase):
def on_post(self, request, genome_id):
from edge.blastdb import check_and_build_genome_db
genome = get_genome_or_404(genome_id)
if IS_RO_SERVER and (not genome.has_location_index or not genome.blastdb):
return [], 409 # 409 is "Conflict" - caller is expected to retry request on RW server
check_and_build_genome_db(genome)
# always require a 'create' argument
parser = RequestParser()
parser.add_argument("create", field_type=bool, required=True, location="json")
args = parser.parse_args(request)
create = args["create"]
errors = []
parsed = self.parse_arguments(request, errors)
if parsed is None:
return dict(errors=" ".join(errors)), 400
args, op_class = parsed
if create is False:
r = op_class.check(genome, **args)
if r is None:
return [], 200
return [x.to_dict() for x in r], 200
else:
child = None
status_code = 400
with transaction.atomic():
op = op_class.get_operation(**args)
# find another a child genome with same operation
for existing_child in genome.children.all():
if (
existing_child.operation_set.count() == 1
and existing_child.operation_set.all()[0].type == op.type
and existing_child.operation_set.all()[0].params == op.params
):
child = existing_child
if child is None:
child = op_class.perform(genome, **args)
if child:
print(
f"Generated child genome {child.id} from parent genome {genome_id}"
)
status_code = 201
else: # found existing child, update genome name and set to active
if "genome_name" in args:
child.name = args["genome_name"]
child.active = True
child.save()
status_code = 200
if child is None:
return None, 400
else:
# scheduling tasks outside of transaction block
schedule_building_blast_db(child.id)
return GenomeView.to_dict(child), status_code
class GenomeCrisprDSBView(GenomeOperationViewBase):
def parse_arguments(self, request, errors):
from edge.crispr import CrisprOp
parser = RequestParser()
parser.add_argument(
"guide", field_type=str, required=True, default=None, location="json"
)
parser.add_argument(
"pam", field_type=str, required=True, default=None, location="json"
)
parser.add_argument(
"genome_name", field_type=str, required=False, default=None, location="json"
)
parser.add_argument(
"notes", field_type=str, required=False, default=None, location="json"
)
args = parser.parse_args(request)
guide = args["guide"]
pam = args["pam"]
genome_name = args["genome_name"]
notes = args["notes"]
return (
dict(guide=guide, pam=pam, genome_name=genome_name, notes=notes),
CrisprOp,
)
class GenomeRecombinationView(GenomeOperationViewBase):
DEFAULT_HA_LENGTH = 30
@staticmethod
def validate_annotations(cassette, annotations):
"""
If user supplied annotations for the donor sequence, to make sure we
can precisely use the annotation and not leave any ambiguity, we
require the donor dna to not contain overhangs and backbone
modifications. This method returns True if that's the case, and
required fields for annotations exist.
"""
if (
re.match(r"^[A-Za-z]+$", cassette)
and len(
[
a
for a in annotations
if "base_first" not in a
or "base_last" not in a
or "name" not in a
or "type" not in a
or "strand" not in a
]
)
== 0
):
return True
# if there are no supplied annotations, we don't care what format the
# donor sequence is
return len(annotations) == 0
def parse_arguments(self, request, errors):
from edge.recombine import RecombineOp
parser = RequestParser()
parser.add_argument("cassette", field_type=str, required=True, location="json")
parser.add_argument(
"homology_arm_length", field_type=int, required=False, location="json"
)
parser.add_argument(
"genome_name", field_type=str, required=False, default=None, location="json"
)
parser.add_argument(
"cassette_name",
field_type=str,
required=False,
default=None,
location="json",
)
parser.add_argument(
"notes", field_type=str, required=False, default=None, location="json"
)
parser.add_argument(
"design_primers",
field_type=bool,
required=False,
default=False,
location="json",
)
parser.add_argument(
"primer3_opts",
field_type=dict,
required=False,
default=None,
location="json",
)
parser.add_argument(
"annotations",
field_type=list,
required=False,
default=None,
location="json",
)
args = parser.parse_args(request)
cassette = args["cassette"].strip()
homology_arm_length = args["homology_arm_length"]
genome_name = args["genome_name"]
cassette_name = args["cassette_name"]
notes = args["notes"]
design_primers = args["design_primers"]
primer3_opts = args["primer3_opts"]
annotations = args["annotations"]
if primer3_opts is None:
primer3_opts = {}
if annotations is None:
annotations = []
elif (
GenomeRecombinationView.validate_annotations(cassette, annotations) is False
):
errors.append(
"Annotations failed validation: \
please make sure donor sequence does not have overhangs \
and annotation array elements have all the required fields."
)
return None
if homology_arm_length is None:
homology_arm_length = GenomeRecombinationView.DEFAULT_HA_LENGTH
return (
dict(
cassette=cassette,
homology_arm_length=homology_arm_length,
genome_name=genome_name,
cassette_name=cassette_name,
notes=notes,
design_primers=design_primers,
primer3_opts=primer3_opts,
annotations=annotations,
),
RecombineOp,
)
|
ginkgobioworks/edge
|
src/edge/views.py
|
Python
|
mit
| 28,250
|
[
"BLAST"
] |
d413ebb026ebbbce899bd77a5efda93413e3e7d4abed6089564d300513dfcaa1
|
# Copyright (C) 2012 Olaf Lenz
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Check whether all features used in the code are defined
#
import sys, os, re, fileinput
sys.path.append(os.path.join(sys.path[0], '..', '..', 'config'))
import featuredefs
if len(sys.argv) < 3:
print "Usage: %s DEFFILE [FILE...]" % sys.argv[0]
exit(2)
print "Checking for completeness of features in test configurations..."
fdefs = featuredefs.defs(sys.argv[1])
featurefound = set()
featurere = re.compile('^#define (\w+)')
for line in fileinput.input(sys.argv[2:]):
res = featurere.match(line)
if res is not None:
feature = res.group(1)
featurefound.add(feature)
unused = fdefs.features.difference(featurefound)
unused = unused.difference(fdefs.notestfeatures)
if len(unused) > 0:
for feature in unused:
print "check_myconfig_complete: %s is not used" % feature
else:
print "check_myconfig_complete: All features are used!"
|
roehm/espresso
|
testsuite/configs/check_myconfig_complete.py
|
Python
|
gpl-3.0
| 1,587
|
[
"ESPResSo"
] |
179d24653a7d5df1e8f9859ecb5301992a6c26f194c34bcfc1264e37b1307c08
|
import logging
import numpy as np
import os.path
from ast import literal_eval
from warnings import warn
import nibabel as nib
from dipy.core.gradients import gradient_table
from dipy.data import default_sphere
from dipy.io.gradients import read_bvals_bvecs
from dipy.io.peaks import save_peaks, peaks_to_niftis
from dipy.io.image import load_nifti, save_nifti, load_nifti_data
from dipy.io.utils import nifti1_symmat
from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel,
auto_response)
from dipy.reconst.dti import (TensorModel, color_fa, fractional_anisotropy,
geodesic_anisotropy, mean_diffusivity,
axial_diffusivity, radial_diffusivity,
lower_triangular, mode as get_mode)
from dipy.direction.peaks import peaks_from_model
from dipy.reconst.shm import CsaOdfModel
from dipy.workflows.workflow import Workflow
from dipy.reconst.dki import DiffusionKurtosisModel, split_dki_param
from dipy.reconst.ivim import IvimModel
from dipy.reconst import mapmri
class ReconstMAPMRIFlow(Workflow):
@classmethod
def get_short_name(cls):
return 'mapmri'
def run(self, data_files, bvals_files, bvecs_files, small_delta, big_delta,
b0_threshold=50.0, laplacian=True, positivity=True,
bval_threshold=2000, save_metrics=[],
laplacian_weighting=0.05, radial_order=6, out_dir='',
out_rtop='rtop.nii.gz', out_lapnorm='lapnorm.nii.gz',
out_msd='msd.nii.gz', out_qiv='qiv.nii.gz',
out_rtap='rtap.nii.gz',
out_rtpp='rtpp.nii.gz', out_ng='ng.nii.gz',
out_perng='perng.nii.gz',
out_parng='parng.nii.gz'):
"""Workflow for fitting the MAPMRI model (with optional Laplacian
regularization). Generates rtop, lapnorm, msd, qiv, rtap, rtpp,
non-gaussian (ng), parallel ng, perpendicular ng saved in a nifti
format in input files provided by `data_files` and saves the nifti
files to an output directory specified by `out_dir`.
In order for the MAPMRI workflow to work in the way
intended either the Laplacian or positivity or both must
be set to True.
Parameters
----------
data_files : string
Path to the input volume.
bvals_files : string
Path to the bval files.
bvecs_files : string
Path to the bvec files.
small_delta : float
Small delta value used in generation of gradient table of provided
bval and bvec.
big_delta : float
Big delta value used in generation of gradient table of provided
bval and bvec.
b0_threshold : float, optional
Threshold used to find b=0 directions (default 0.0)
laplacian : bool, optional
Regularize using the Laplacian of the MAP-MRI basis (default True)
positivity : bool, optional
Constrain the propagator to be positive. (default True)
bval_threshold : float, optional
Sets the b-value threshold to be used in the scale factor
estimation. In order for the estimated non-Gaussianity to have
meaning this value should set to a lower value (b<2000 s/mm^2)
such that the scale factors are estimated on signal points that
reasonably represent the spins at Gaussian diffusion.
(default: 2000)
save_metrics : variable string, optional
List of metrics to save.
Possible values: rtop, laplacian_signal, msd, qiv, rtap, rtpp,
ng, perng, parng
(default: [] (all))
laplacian_weighting : float, optional
Weighting value used in fitting the MAPMRI model in the Laplacian
and both model types. (default: 0.05)
radial_order : unsigned int, optional
Even value used to set the order of the basis
(default: 6)
out_dir : string, optional
Output directory (default: input file directory)
out_rtop : string, optional
Name of the rtop to be saved
out_lapnorm : string, optional
Name of the norm of Laplacian signal to be saved
out_msd : string, optional
Name of the msd to be saved
out_qiv : string, optional
Name of the qiv to be saved
out_rtap : string, optional
Name of the rtap to be saved
out_rtpp : string, optional
Name of the rtpp to be saved
out_ng : string, optional
Name of the Non-Gaussianity to be saved
out_perng : string, optional
Name of the Non-Gaussianity perpendicular to be saved
out_parng : string, optional
Name of the Non-Gaussianity parallel to be saved
"""
io_it = self.get_io_iterator()
for (dwi, bval, bvec, out_rtop, out_lapnorm, out_msd, out_qiv,
out_rtap, out_rtpp, out_ng, out_perng, out_parng) in io_it:
logging.info('Computing MAPMRI metrics for {0}'.format(dwi))
data, affine = load_nifti(dwi)
bvals, bvecs = read_bvals_bvecs(bval, bvec)
if b0_threshold < bvals.min():
warn("b0_threshold (value: {0}) is too low, increase your "
"b0_threshold. It should be higher than the first b0 value "
"({1}).".format(b0_threshold, bvals.min()))
gtab = gradient_table(bvals=bvals, bvecs=bvecs,
small_delta=small_delta,
big_delta=big_delta,
b0_threshold=b0_threshold)
if not save_metrics:
save_metrics = ['rtop', 'laplacian_signal', 'msd',
'qiv', 'rtap', 'rtpp',
'ng', 'perng', 'parng']
if laplacian and positivity:
map_model_aniso = mapmri.MapmriModel(
gtab,
radial_order=radial_order,
laplacian_regularization=True,
laplacian_weighting=laplacian_weighting,
positivity_constraint=True,
bval_threshold=bval_threshold)
mapfit_aniso = map_model_aniso.fit(data)
elif positivity:
map_model_aniso = mapmri.MapmriModel(
gtab,
radial_order=radial_order,
laplacian_regularization=False,
positivity_constraint=True,
bval_threshold=bval_threshold)
mapfit_aniso = map_model_aniso.fit(data)
elif laplacian:
map_model_aniso = mapmri.MapmriModel(
gtab,
radial_order=radial_order,
laplacian_regularization=True,
laplacian_weighting=laplacian_weighting,
bval_threshold=bval_threshold)
mapfit_aniso = map_model_aniso.fit(data)
else:
map_model_aniso = mapmri.MapmriModel(
gtab,
radial_order=radial_order,
laplacian_regularization=False,
positivity_constraint=False,
bval_threshold=bval_threshold)
mapfit_aniso = map_model_aniso.fit(data)
# for name, fname, func in [('rtop', out_rtop, mapfit_aniso.rtop),
# ]:
# if name in save_metrics:
# r = func()
# save_nifti(fname, r.astype(np.float32), affine)
if 'rtop' in save_metrics:
r = mapfit_aniso.rtop()
save_nifti(out_rtop, r.astype(np.float32), affine)
if 'laplacian_signal' in save_metrics:
ll = mapfit_aniso.norm_of_laplacian_signal()
save_nifti(out_lapnorm, ll.astype(np.float32), affine)
if 'msd' in save_metrics:
m = mapfit_aniso.msd()
save_nifti(out_msd, m.astype(np.float32), affine)
if 'qiv' in save_metrics:
q = mapfit_aniso.qiv()
save_nifti(out_qiv, q.astype(np.float32), affine)
if 'rtap' in save_metrics:
r = mapfit_aniso.rtap()
save_nifti(out_rtap, r.astype(np.float32), affine)
if 'rtpp' in save_metrics:
r = mapfit_aniso.rtpp()
save_nifti(out_rtpp, r.astype(np.float32), affine)
if 'ng' in save_metrics:
n = mapfit_aniso.ng()
save_nifti(out_ng, n.astype(np.float32), affine)
if 'perng' in save_metrics:
n = mapfit_aniso.ng_perpendicular()
save_nifti(out_perng, n.astype(np.float32), affine)
if 'parng' in save_metrics:
n = mapfit_aniso.ng_parallel()
save_nifti(out_parng, n.astype(np.float32), affine)
logging.info('MAPMRI saved in {0}'.
format(os.path.dirname(out_dir)))
class ReconstDtiFlow(Workflow):
@classmethod
def get_short_name(cls):
return 'dti'
def run(self, input_files, bvalues_files, bvectors_files, mask_files,
b0_threshold=50, bvecs_tol=0.01, save_metrics=[],
out_dir='', out_tensor='tensors.nii.gz', out_fa='fa.nii.gz',
out_ga='ga.nii.gz', out_rgb='rgb.nii.gz', out_md='md.nii.gz',
out_ad='ad.nii.gz', out_rd='rd.nii.gz', out_mode='mode.nii.gz',
out_evec='evecs.nii.gz', out_eval='evals.nii.gz', nifti_tensor=True):
""" Workflow for tensor reconstruction and for computing DTI metrics.
using Weighted Least-Squares.
Performs a tensor reconstruction on the files by 'globing'
``input_files`` and saves the DTI metrics in a directory specified by
``out_dir``.
Parameters
----------
input_files : string
Path to the input volumes. This path may contain wildcards to
process multiple inputs at once.
bvalues_files : string
Path to the bvalues files. This path may contain wildcards to use
multiple bvalues files at once.
bvectors_files : string
Path to the bvectors files. This path may contain wildcards to use
multiple bvectors files at once.
mask_files : string
Path to the input masks. This path may contain wildcards to use
multiple masks at once.
b0_threshold : float, optional
Threshold used to find b=0 directions (default 0.0)
bvecs_tol : float, optional
Threshold used to check that norm(bvec) = 1 +/- bvecs_tol
b-vectors are unit vectors (default 0.01)
save_metrics : variable string, optional
List of metrics to save.
Possible values: fa, ga, rgb, md, ad, rd, mode, tensor, evec, eval
(default [] (all))
out_dir : string, optional
Output directory (default input file directory)
out_tensor : string, optional
Name of the tensors volume to be saved (default 'tensors.nii.gz').
Per default, this will be saved following the nifti standard:
with the tensor elements as Dxx, Dxy, Dyy, Dxz, Dyz, Dzz on the
last (5th) dimension of the volume (shape: (i, j, k, 1, 6)). If
`nifti_tensor` is False, this will be saved in an alternate format
that is used by other software (e.g., FSL): a
4-dimensional volume (shape (i, j, k, 6)) with Dxx, Dxy, Dxz, Dyy,
Dyz, Dzz on the last dimension.
out_fa : string, optional
Name of the fractional anisotropy volume to be saved
(default 'fa.nii.gz')
out_ga : string, optional
Name of the geodesic anisotropy volume to be saved
(default 'ga.nii.gz')
out_rgb : string, optional
Name of the color fa volume to be saved (default 'rgb.nii.gz')
out_md : string, optional
Name of the mean diffusivity volume to be saved
(default 'md.nii.gz')
out_ad : string, optional
Name of the axial diffusivity volume to be saved
(default 'ad.nii.gz')
out_rd : string, optional
Name of the radial diffusivity volume to be saved
(default 'rd.nii.gz')
out_mode : string, optional
Name of the mode volume to be saved (default 'mode.nii.gz')
out_evec : string, optional
Name of the eigenvectors volume to be saved
(default 'evecs.nii.gz')
out_eval : string, optional
Name of the eigenvalues to be saved (default 'evals.nii.gz')
nifti_tensor : bool, optional
Whether the tensor is saved in the standard Nifti format or in an
alternate format
that is used by other software (e.g., FSL): a
4-dimensional volume (shape (i, j, k, 6)) with
Dxx, Dxy, Dxz, Dyy, Dyz, Dzz on the last dimension. Default: True
References
----------
.. [1] Basser, P.J., Mattiello, J., LeBihan, D., 1994. Estimation of
the effective self-diffusion tensor from the NMR spin echo. J Magn
Reson B 103, 247-254.
.. [2] Basser, P., Pierpaoli, C., 1996. Microstructural and
physiological features of tissues elucidated by quantitative
diffusion-tensor MRI. Journal of Magnetic Resonance 111, 209-219.
.. [3] Lin-Ching C., Jones D.K., Pierpaoli, C. 2005. RESTORE: Robust
estimation of tensors by outlier rejection. MRM 53: 1088-1095
.. [4] hung, SW., Lu, Y., Henry, R.G., 2006. Comparison of bootstrap
approaches for estimation of uncertainties of DTI parameters.
NeuroImage 33, 531-541.
"""
io_it = self.get_io_iterator()
for dwi, bval, bvec, mask, otensor, ofa, oga, orgb, omd, oad, orad, \
omode, oevecs, oevals in io_it:
logging.info('Computing DTI metrics for {0}'.format(dwi))
data, affine = load_nifti(dwi)
if mask is not None:
mask = load_nifti_data(mask).astype(np.bool)
tenfit, _ = self.get_fitted_tensor(data, mask, bval, bvec,
b0_threshold, bvecs_tol)
if not save_metrics:
save_metrics = ['fa', 'md', 'rd', 'ad', 'ga', 'rgb', 'mode',
'evec', 'eval', 'tensor']
FA = fractional_anisotropy(tenfit.evals)
FA[np.isnan(FA)] = 0
FA = np.clip(FA, 0, 1)
if 'tensor' in save_metrics:
tensor_vals = lower_triangular(tenfit.quadratic_form)
if nifti_tensor:
ten_img = nifti1_symmat(tensor_vals, affine=affine)
else:
alt_order = [0, 1, 3, 2, 4, 5]
ten_img = nib.Nifti1Image(
tensor_vals[..., alt_order].astype(np.float32),
affine)
nib.save(ten_img, otensor)
if 'fa' in save_metrics:
save_nifti(ofa, FA.astype(np.float32), affine)
if 'ga' in save_metrics:
GA = geodesic_anisotropy(tenfit.evals)
save_nifti(oga, GA.astype(np.float32), affine)
if 'rgb' in save_metrics:
RGB = color_fa(FA, tenfit.evecs)
save_nifti(orgb, np.array(255 * RGB, 'uint8'), affine)
if 'md' in save_metrics:
MD = mean_diffusivity(tenfit.evals)
save_nifti(omd, MD.astype(np.float32), affine)
if 'ad' in save_metrics:
AD = axial_diffusivity(tenfit.evals)
save_nifti(oad, AD.astype(np.float32), affine)
if 'rd' in save_metrics:
RD = radial_diffusivity(tenfit.evals)
save_nifti(orad, RD.astype(np.float32), affine)
if 'mode' in save_metrics:
MODE = get_mode(tenfit.quadratic_form)
save_nifti(omode, MODE.astype(np.float32), affine)
if 'evec' in save_metrics:
save_nifti(oevecs, tenfit.evecs.astype(np.float32), affine)
if 'eval' in save_metrics:
save_nifti(oevals, tenfit.evals.astype(np.float32), affine)
dname_ = os.path.dirname(oevals)
if dname_ == '':
logging.info('DTI metrics saved in current directory')
else:
logging.info(
'DTI metrics saved in {0}'.format(dname_))
def get_tensor_model(self, gtab):
return TensorModel(gtab, fit_method="WLS")
def get_fitted_tensor(self, data, mask, bval, bvec,
b0_threshold=50, bvecs_tol=0.01):
logging.info('Tensor estimation...')
bvals, bvecs = read_bvals_bvecs(bval, bvec)
gtab = gradient_table(bvals, bvecs, b0_threshold=b0_threshold,
atol=bvecs_tol)
tenmodel = self.get_tensor_model(gtab)
tenfit = tenmodel.fit(data, mask)
return tenfit, gtab
class ReconstCSDFlow(Workflow):
@classmethod
def get_short_name(cls):
return 'csd'
def run(self, input_files, bvalues_files, bvectors_files, mask_files,
b0_threshold=50.0, bvecs_tol=0.01, roi_center=None, roi_radius=10,
fa_thr=0.7, frf=None, extract_pam_values=False, sh_order=8,
odf_to_sh_order=8, parallel=False, nbr_processes=None,
out_dir='',
out_pam='peaks.pam5', out_shm='shm.nii.gz',
out_peaks_dir='peaks_dirs.nii.gz',
out_peaks_values='peaks_values.nii.gz',
out_peaks_indices='peaks_indices.nii.gz', out_gfa='gfa.nii.gz'):
""" Constrained spherical deconvolution
Parameters
----------
input_files : string
Path to the input volumes. This path may contain wildcards to
process multiple inputs at once.
bvalues_files : string
Path to the bvalues files. This path may contain wildcards to use
multiple bvalues files at once.
bvectors_files : string
Path to the bvectors files. This path may contain wildcards to use
multiple bvectors files at once.
mask_files : string
Path to the input masks. This path may contain wildcards to use
multiple masks at once. (default: No mask used)
b0_threshold : float, optional
Threshold used to find b=0 directions
bvecs_tol : float, optional
Bvecs should be unit vectors. (default:0.01)
roi_center : variable int, optional
Center of ROI in data. If center is None, it is assumed that it is
the center of the volume with shape `data.shape[:3]` (default None)
roi_radius : int, optional
radius of cubic ROI in voxels (default 10)
fa_thr : float, optional
FA threshold for calculating the response function (default 0.7)
frf : variable float, optional
Fiber response function can be for example inputed as 15 4 4
(from the command line) or [15, 4, 4] from a Python script to be
converted to float and multiplied by 10**-4 . If None
the fiber response function will be computed automatically
(default: None).
extract_pam_values : bool, optional
Save or not to save pam volumes as single nifti files.
sh_order : int, optional
Spherical harmonics order (default 6) used in the CSA fit.
odf_to_sh_order : int, optional
Spherical harmonics order used for peak_from_model to compress
the ODF to spherical harmonics coefficients (default 8)
parallel : bool, optional
Whether to use parallelization in peak-finding during the
calibration procedure. Default: False
nbr_processes : int, optional
If `parallel` is True, the number of subprocesses to use
(default multiprocessing.cpu_count()).
out_dir : string, optional
Output directory (default input file directory)
out_pam : string, optional
Name of the peaks volume to be saved (default 'peaks.pam5')
out_shm : string, optional
Name of the spherical harmonics volume to be saved
(default 'shm.nii.gz')
out_peaks_dir : string, optional
Name of the peaks directions volume to be saved
(default 'peaks_dirs.nii.gz')
out_peaks_values : string, optional
Name of the peaks values volume to be saved
(default 'peaks_values.nii.gz')
out_peaks_indices : string, optional
Name of the peaks indices volume to be saved
(default 'peaks_indices.nii.gz')
out_gfa : string, optional
Name of the generalized FA volume to be saved (default 'gfa.nii.gz')
References
----------
.. [1] Tournier, J.D., et al. NeuroImage 2007. Robust determination of
the fibre orientation distribution in diffusion MRI: Non-negativity
constrained super-resolved spherical deconvolution.
"""
io_it = self.get_io_iterator()
for (dwi, bval, bvec, maskfile, opam, oshm, opeaks_dir, opeaks_values,
opeaks_indices, ogfa) in io_it:
logging.info('Loading {0}'.format(dwi))
data, affine = load_nifti(dwi)
bvals, bvecs = read_bvals_bvecs(bval, bvec)
print(b0_threshold, bvals.min())
if b0_threshold < bvals.min():
warn("b0_threshold (value: {0}) is too low, increase your "
"b0_threshold. It should be higher than the first b0 value "
"({1}).".format(b0_threshold, bvals.min()))
gtab = gradient_table(bvals, bvecs, b0_threshold=b0_threshold,
atol=bvecs_tol)
mask_vol = load_nifti_data(maskfile).astype(np.bool)
n_params = ((sh_order + 1) * (sh_order + 2)) / 2
if data.shape[-1] < n_params:
raise ValueError(
'You need at least {0} unique DWI volumes to '
'compute fiber odfs. You currently have: {1}'
' DWI volumes.'.format(n_params, data.shape[-1]))
if frf is None:
logging.info('Computing response function')
if roi_center is not None:
logging.info('Response ROI center:\n{0}'
.format(roi_center))
logging.info('Response ROI radius:\n{0}'
.format(roi_radius))
response, ratio, nvox = auto_response(
gtab, data,
roi_center=roi_center,
roi_radius=roi_radius,
fa_thr=fa_thr,
return_number_of_voxels=True)
response = list(response)
else:
logging.info('Using response function')
if isinstance(frf, str):
l01 = np.array(literal_eval(frf), dtype=np.float64)
else:
l01 = np.array(frf, dtype=np.float64)
l01 *= 10 ** -4
response = np.array([l01[0], l01[1], l01[1]])
ratio = l01[1] / l01[0]
response = (response, ratio)
logging.info("Eigenvalues for the frf of the input"
" data are :{0}".format(response[0]))
logging.info('Ratio for smallest to largest eigen value is {0}'
.format(ratio))
peaks_sphere = default_sphere
logging.info('CSD computation started.')
csd_model = ConstrainedSphericalDeconvModel(gtab, response,
sh_order=sh_order)
peaks_csd = peaks_from_model(model=csd_model,
data=data,
sphere=peaks_sphere,
relative_peak_threshold=.5,
min_separation_angle=25,
mask=mask_vol,
return_sh=True,
sh_order=sh_order,
normalize_peaks=True,
parallel=parallel,
nbr_processes=nbr_processes)
peaks_csd.affine = affine
save_peaks(opam, peaks_csd)
logging.info('CSD computation completed.')
if extract_pam_values:
peaks_to_niftis(peaks_csd, oshm, opeaks_dir, opeaks_values,
opeaks_indices, ogfa, reshape_dirs=True)
dname_ = os.path.dirname(opam)
if dname_ == '':
logging.info('Pam5 file saved in current directory')
else:
logging.info(
'Pam5 file saved in {0}'.format(dname_))
return io_it
class ReconstCSAFlow(Workflow):
@classmethod
def get_short_name(cls):
return 'csa'
def run(self, input_files, bvalues_files, bvectors_files, mask_files,
sh_order=6, odf_to_sh_order=8, b0_threshold=50.0, bvecs_tol=0.01,
extract_pam_values=False, parallel=False, nbr_processes=None,
out_dir='',
out_pam='peaks.pam5', out_shm='shm.nii.gz',
out_peaks_dir='peaks_dirs.nii.gz',
out_peaks_values='peaks_values.nii.gz',
out_peaks_indices='peaks_indices.nii.gz',
out_gfa='gfa.nii.gz'):
""" Constant Solid Angle.
Parameters
----------
input_files : string
Path to the input volumes. This path may contain wildcards to
process multiple inputs at once.
bvalues_files : string
Path to the bvalues files. This path may contain wildcards to use
multiple bvalues files at once.
bvectors_files : string
Path to the bvectors files. This path may contain wildcards to use
multiple bvectors files at once.
mask_files : string
Path to the input masks. This path may contain wildcards to use
multiple masks at once. (default: No mask used)
sh_order : int, optional
Spherical harmonics order (default 6) used in the CSA fit.
odf_to_sh_order : int, optional
Spherical harmonics order used for peak_from_model to compress
the ODF to spherical harmonics coefficients (default 8)
b0_threshold : float, optional
Threshold used to find b=0 directions
bvecs_tol : float, optional
Threshold used so that norm(bvec)=1 (default 0.01)
extract_pam_values : bool, optional
Wheter or not to save pam volumes as single nifti files.
parallel : bool, optional
Whether to use parallelization in peak-finding during the
calibration procedure. Default: False
nbr_processes : int, optional
If `parallel` is True, the number of subprocesses to use
(default multiprocessing.cpu_count()).
out_dir : string, optional
Output directory (default input file directory)
out_pam : string, optional
Name of the peaks volume to be saved (default 'peaks.pam5')
out_shm : string, optional
Name of the spherical harmonics volume to be saved
(default 'shm.nii.gz')
out_peaks_dir : string, optional
Name of the peaks directions volume to be saved
(default 'peaks_dirs.nii.gz')
out_peaks_values : string, optional
Name of the peaks values volume to be saved
(default 'peaks_values.nii.gz')
out_peaks_indices : string, optional
Name of the peaks indices volume to be saved
(default 'peaks_indices.nii.gz')
out_gfa : string, optional
Name of the generalized FA volume to be saved (default 'gfa.nii.gz')
References
----------
.. [1] Aganj, I., et al. 2009. ODF Reconstruction in Q-Ball Imaging
with Solid Angle Consideration.
"""
io_it = self.get_io_iterator()
for (dwi, bval, bvec, maskfile, opam, oshm, opeaks_dir,
opeaks_values, opeaks_indices, ogfa) in io_it:
logging.info('Loading {0}'.format(dwi))
data, affine = load_nifti(dwi)
bvals, bvecs = read_bvals_bvecs(bval, bvec)
if b0_threshold < bvals.min():
warn("b0_threshold (value: {0}) is too low, increase your "
"b0_threshold. It should be higher than the first b0 value "
"({1}).".format(b0_threshold, bvals.min()))
gtab = gradient_table(bvals, bvecs,
b0_threshold=b0_threshold, atol=bvecs_tol)
mask_vol = load_nifti_data(maskfile).astype(np.bool)
peaks_sphere = default_sphere
logging.info('Starting CSA computations {0}'.format(dwi))
csa_model = CsaOdfModel(gtab, sh_order)
peaks_csa = peaks_from_model(model=csa_model,
data=data,
sphere=peaks_sphere,
relative_peak_threshold=.5,
min_separation_angle=25,
mask=mask_vol,
return_sh=True,
sh_order=odf_to_sh_order,
normalize_peaks=True,
parallel=parallel,
nbr_processes=nbr_processes)
peaks_csa.affine = affine
save_peaks(opam, peaks_csa)
logging.info('Finished CSA {0}'.format(dwi))
if extract_pam_values:
peaks_to_niftis(peaks_csa, oshm, opeaks_dir,
opeaks_values,
opeaks_indices, ogfa, reshape_dirs=True)
dname_ = os.path.dirname(opam)
if dname_ == '':
logging.info('Pam5 file saved in current directory')
else:
logging.info(
'Pam5 file saved in {0}'.format(dname_))
return io_it
class ReconstDkiFlow(Workflow):
@classmethod
def get_short_name(cls):
return 'dki'
def run(self, input_files, bvalues_files, bvectors_files, mask_files,
b0_threshold=50.0, save_metrics=[],
out_dir='', out_dt_tensor='dti_tensors.nii.gz', out_fa='fa.nii.gz',
out_ga='ga.nii.gz', out_rgb='rgb.nii.gz', out_md='md.nii.gz',
out_ad='ad.nii.gz', out_rd='rd.nii.gz', out_mode='mode.nii.gz',
out_evec='evecs.nii.gz', out_eval='evals.nii.gz',
out_dk_tensor="dki_tensors.nii.gz",
out_mk="mk.nii.gz", out_ak="ak.nii.gz", out_rk="rk.nii.gz"):
""" Workflow for Diffusion Kurtosis reconstruction and for computing
DKI metrics. Performs a DKI reconstruction on the files by 'globing'
``input_files`` and saves the DKI metrics in a directory specified by
``out_dir``.
Parameters
----------
input_files : string
Path to the input volumes. This path may contain wildcards to
process multiple inputs at once.
bvalues_files : string
Path to the bvalues files. This path may contain wildcards to use
multiple bvalues files at once.
bvectors_files : string
Path to the bvalues files. This path may contain wildcards to use
multiple bvalues files at once.
mask_files : string
Path to the input masks. This path may contain wildcards to use
multiple masks at once. (default: No mask used)
b0_threshold : float, optional
Threshold used to find b=0 directions (default 0.0)
save_metrics : variable string, optional
List of metrics to save.
Possible values: fa, ga, rgb, md, ad, rd, mode, tensor, evec, eval
(default [] (all))
out_dir : string, optional
Output directory (default input file directory)
out_dt_tensor : string, optional
Name of the tensors volume to be saved
(default: 'dti_tensors.nii.gz')
out_dk_tensor : string, optional
Name of the tensors volume to be saved
(default 'dki_tensors.nii.gz')
out_fa : string, optional
Name of the fractional anisotropy volume to be saved
(default 'fa.nii.gz')
out_ga : string, optional
Name of the geodesic anisotropy volume to be saved
(default 'ga.nii.gz')
out_rgb : string, optional
Name of the color fa volume to be saved (default 'rgb.nii.gz')
out_md : string, optional
Name of the mean diffusivity volume to be saved
(default 'md.nii.gz')
out_ad : string, optional
Name of the axial diffusivity volume to be saved
(default 'ad.nii.gz')
out_rd : string, optional
Name of the radial diffusivity volume to be saved
(default 'rd.nii.gz')
out_mode : string, optional
Name of the mode volume to be saved (default 'mode.nii.gz')
out_evec : string, optional
Name of the eigenvectors volume to be saved
(default 'evecs.nii.gz')
out_eval : string, optional
Name of the eigenvalues to be saved (default 'evals.nii.gz')
out_mk : string, optional
Name of the mean kurtosis to be saved (default: 'mk.nii.gz')
out_ak : string, optional
Name of the axial kurtosis to be saved (default: 'ak.nii.gz')
out_rk : string, optional
Name of the radial kurtosis to be saved (default: 'rk.nii.gz')
References
----------
.. [1] Tabesh, A., Jensen, J.H., Ardekani, B.A., Helpern, J.A., 2011.
Estimation of tensors and tensor-derived measures in diffusional
kurtosis imaging. Magn Reson Med. 65(3), 823-836
.. [2] Jensen, Jens H., Joseph A. Helpern, Anita Ramani, Hanzhang Lu,
and Kyle Kaczynski. 2005. Diffusional Kurtosis Imaging: The
Quantification of Non-Gaussian Water Diffusion by Means of Magnetic
Resonance Imaging. MRM 53 (6):1432-40.
"""
io_it = self.get_io_iterator()
for (dwi, bval, bvec, mask, otensor, ofa, oga, orgb, omd, oad, orad,
omode, oevecs, oevals, odk_tensor, omk, oak, ork) in io_it:
logging.info('Computing DKI metrics for {0}'.format(dwi))
data, affine = load_nifti(dwi)
if mask is not None:
mask = load_nifti_data(mask).astype(np.bool)
dkfit, _ = self.get_fitted_tensor(data, mask, bval, bvec,
b0_threshold)
if not save_metrics:
save_metrics = ['mk', 'rk', 'ak', 'fa', 'md', 'rd', 'ad', 'ga',
'rgb', 'mode', 'evec', 'eval', 'dt_tensor',
'dk_tensor']
evals, evecs, kt = split_dki_param(dkfit.model_params)
FA = fractional_anisotropy(evals)
FA[np.isnan(FA)] = 0
FA = np.clip(FA, 0, 1)
if 'dt_tensor' in save_metrics:
tensor_vals = lower_triangular(dkfit.quadratic_form)
correct_order = [0, 1, 3, 2, 4, 5]
tensor_vals_reordered = tensor_vals[..., correct_order]
save_nifti(otensor, tensor_vals_reordered.astype(np.float32),
affine)
if 'dk_tensor' in save_metrics:
save_nifti(odk_tensor, dkfit.kt.astype(np.float32), affine)
if 'fa' in save_metrics:
save_nifti(ofa, FA.astype(np.float32), affine)
if 'ga' in save_metrics:
GA = geodesic_anisotropy(dkfit.evals)
save_nifti(oga, GA.astype(np.float32), affine)
if 'rgb' in save_metrics:
RGB = color_fa(FA, dkfit.evecs)
save_nifti(orgb, np.array(255 * RGB, 'uint8'), affine)
if 'md' in save_metrics:
MD = mean_diffusivity(dkfit.evals)
save_nifti(omd, MD.astype(np.float32), affine)
if 'ad' in save_metrics:
AD = axial_diffusivity(dkfit.evals)
save_nifti(oad, AD.astype(np.float32), affine)
if 'rd' in save_metrics:
RD = radial_diffusivity(dkfit.evals)
save_nifti(orad, RD.astype(np.float32), affine)
if 'mode' in save_metrics:
MODE = get_mode(dkfit.quadratic_form)
save_nifti(omode, MODE.astype(np.float32), affine)
if 'evec' in save_metrics:
save_nifti(oevecs, dkfit.evecs.astype(np.float32), affine)
if 'eval' in save_metrics:
save_nifti(oevals, dkfit.evals.astype(np.float32), affine)
if 'mk' in save_metrics:
save_nifti(omk, dkfit.mk().astype(np.float32), affine)
if 'ak' in save_metrics:
save_nifti(oak, dkfit.ak().astype(np.float32), affine)
if 'rk' in save_metrics:
save_nifti(ork, dkfit.rk().astype(np.float32), affine)
logging.info('DKI metrics saved in {0}'.
format(os.path.dirname(oevals)))
def get_dki_model(self, gtab):
return DiffusionKurtosisModel(gtab)
def get_fitted_tensor(self, data, mask, bval, bvec, b0_threshold=50):
logging.info('Diffusion kurtosis estimation...')
bvals, bvecs = read_bvals_bvecs(bval, bvec)
if b0_threshold < bvals.min():
warn("b0_threshold (value: {0}) is too low, increase your "
"b0_threshold. It should be higher than the first b0 value "
"({1}).".format(b0_threshold, bvals.min()))
gtab = gradient_table(bvals, bvecs, b0_threshold=b0_threshold)
dkmodel = self.get_dki_model(gtab)
dkfit = dkmodel.fit(data, mask)
return dkfit, gtab
class ReconstIvimFlow(Workflow):
@classmethod
def get_short_name(cls):
return 'ivim'
def run(self, input_files, bvalues_files, bvectors_files, mask_files,
split_b_D=400, split_b_S0=200, b0_threshold=0, save_metrics=[],
out_dir='', out_S0_predicted='S0_predicted.nii.gz',
out_perfusion_fraction='perfusion_fraction.nii.gz',
out_D_star='D_star.nii.gz', out_D='D.nii.gz'):
""" Workflow for Intra-voxel Incoherent Motion reconstruction and for
computing IVIM metrics. Performs a IVIM reconstruction on the files
by 'globing' ``input_files`` and saves the IVIM metrics in a directory
specified by ``out_dir``.
Parameters
----------
input_files : string
Path to the input volumes. This path may contain wildcards to
process multiple inputs at once.
bvalues_files : string
Path to the bvalues files. This path may contain wildcards to use
multiple bvalues files at once.
bvectors_files : string
Path to the bvalues files. This path may contain wildcards to use
multiple bvalues files at once.
mask_files : string
Path to the input masks. This path may contain wildcards to use
multiple masks at once. (default: No mask used)
split_b_D : int, optional
Value to split the bvals to estimate D for the two-stage process of
fitting
(default 400)
split_b_S0 : int, optional
Value to split the bvals to estimate S0 for the two-stage process
of fitting.
(default 200)
b0_threshold : int, optional
Threshold value for the b0 bval.
(default 0)
save_metrics : variable string, optional
List of metrics to save.
Possible values: S0_predicted, perfusion_fraction, D_star, D
(default [] (all))
out_dir : string, optional
Output directory (default input file directory)
out_S0_predicted : string, optional
Name of the S0 signal estimated to be saved
(default: 'S0_predicted.nii.gz')
out_perfusion_fraction : string, optional
Name of the estimated volume fractions to be saved
(default 'perfusion_fraction.nii.gz')
out_D_star : string, optional
Name of the estimated pseudo-diffusion parameter to be saved
(default 'D_star.nii.gz')
out_D : string, optional
Name of the estimated diffusion parameter to be saved
(default 'D.nii.gz')
References
----------
.. [Stejskal65] Stejskal, E. O.; Tanner, J. E. (1 January 1965).
"Spin Diffusion Measurements: Spin Echoes in the
Presence of a Time-Dependent Field Gradient". The
Journal of Chemical Physics 42 (1): 288.
Bibcode: 1965JChPh..42..288S. doi:10.1063/1.1695690.
.. [LeBihan84] Le Bihan, Denis, et al. "Separation of diffusion
and perfusion in intravoxel incoherent motion MR
imaging." Radiology 168.2 (1988): 497-505.
"""
io_it = self.get_io_iterator()
for (dwi, bval, bvec, mask, oS0_predicted, operfusion_fraction,
oD_star, oD) in io_it:
logging.info('Computing IVIM metrics for {0}'.format(dwi))
data, affine = load_nifti(dwi)
if mask is not None:
mask = load_nifti_data(mask).astype(np.bool)
ivimfit, _ = self.get_fitted_ivim(data, mask, bval, bvec,
b0_threshold)
if not save_metrics:
save_metrics = ['S0_predicted', 'perfusion_fraction', 'D_star',
'D']
if 'S0_predicted' in save_metrics:
save_nifti(oS0_predicted,
ivimfit.S0_predicted.astype(np.float32), affine)
if 'perfusion_fraction' in save_metrics:
save_nifti(operfusion_fraction,
ivimfit.perfusion_fraction.astype(np.float32),
affine)
if 'D_star' in save_metrics:
save_nifti(oD_star, ivimfit.D_star.astype(np.float32), affine)
if 'D' in save_metrics:
save_nifti(oD, ivimfit.D.astype(np.float32), affine)
logging.info('IVIM metrics saved in {0}'.
format(os.path.dirname(oD)))
def get_fitted_ivim(self, data, mask, bval, bvec, b0_threshold=50):
logging.info('Intra-Voxel Incoherent Motion Estimation...')
bvals, bvecs = read_bvals_bvecs(bval, bvec)
if b0_threshold < bvals.min():
warn("b0_threshold (value: {0}) is too low, increase your "
"b0_threshold. It should be higher than the first b0 value "
"({1}).".format(b0_threshold, bvals.min()))
gtab = gradient_table(bvals, bvecs, b0_threshold=b0_threshold)
ivimmodel = IvimModel(gtab)
ivimfit = ivimmodel.fit(data, mask)
return ivimfit, gtab
|
FrancoisRheaultUS/dipy
|
dipy/workflows/reconst.py
|
Python
|
bsd-3-clause
| 44,675
|
[
"Gaussian"
] |
4b1da6556daec8657afecbac40f1bda50fcb901d88625aa786ec58d1ef918175
|
#!/usr/bin/env python
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2011, 2012 Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 or (at your
# option) any later version as published by the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
import datetime
import fcntl
import json
import md5
import os
import sys
import tempfile
import time
import xml.utils.iso8601
from twisted.internet import reactor
from twisted.internet.defer import DeferredList
# Imported or side effect of adding ZenPack's lib directory to sys.path.
import locallibs
# Requires that locallibs first be imported.
import txcloudstack
# Map of listEvents level response parameter to Zenoss severity.
SEVERITY_MAP = {
'INFO': 2,
'WARN': 3,
'ERROR': 4,
}
# Map of listAlerts type response parameter to textual description.
# https://github.com/CloudStack/CloudStack/blob/2.2.4/ui/scripts/cloud.core.js#L2033
ALERT_TYPE_MAP = {
0: 'Capacity Threshold - Memory',
1: 'Capacity Threshold - CPU',
2: 'Capacity Threshold - Storage Used',
3: 'Capacity Threshold - Storage Allocated',
4: 'Capacity Threshold - Public IP',
5: 'Capacity Threshold - Private IP',
6: 'Monitoring - Host',
7: 'Monitoring - VM',
8: 'Monitoring - Domain Router',
9: 'Monitoring - Console Proxy',
10: 'Monitoring - Routing Host',
11: 'Monitoring - Storage',
12: 'Monitoring - Usage Server',
13: 'Monitoring - Management Server',
14: 'Migration - Domain Router',
15: 'Migration - Console Proxy',
16: 'Migration - User VM',
17: 'VLAN',
18: 'Monitoring - Secondary Storage VM',
}
def alert_type(alert_type_id):
"""Return the string representation of give numeric alert type ID."""
return ALERT_TYPE_MAP.get(alert_type_id, 'Unknown (%s)' % alert_type_id)
class CloudStackPoller(object):
def __init__(self, url, api_key, secret_key, collect_events=False):
self._url = url
self._api_key = api_key
self._secret_key = secret_key
self._collect_events = collect_events
self._events = []
self._values = {}
def _temp_filename(self, key):
target_hash = md5.md5('%s+%s+%s' % (
self._url, self._api_key, self._secret_key)).hexdigest()
return os.path.join(
tempfile.gettempdir(),
'.zenoss_cloudstack_%s_%s' % (key, target_hash))
def _save(self, data, key):
tmpfile = self._temp_filename(key=key)
tmp = open(tmpfile, 'w')
# Many copies of this script could be attempting to write this file at
# the same time. Use file locking for safety.
fcntl.flock(tmp.fileno(), fcntl.LOCK_EX)
tmp.truncate()
json.dump(data, tmp)
tmp.close()
def _saved(self, key):
tmpfile = self._temp_filename(key=key)
if not os.path.isfile(tmpfile):
return []
data = None
tmp = open(tmpfile, 'r')
try:
data = json.load(tmp)
except ValueError:
pass
tmp.close()
return data
def _saved_values(self):
tmpfile = self._temp_filename(key='values')
if not os.path.isfile(tmpfile):
return None
# Make sure temporary data isn't too stale.
if os.stat(tmpfile).st_mtime < (time.time() - 50):
os.unlink(tmpfile)
return None
tmp = open(tmpfile, 'r')
try:
values = json.load(tmp)
except ValueError:
return None
tmp.close()
return values
def _print_output(self):
print json.dumps({'events': self._events, 'values': self._values})
def _process_listAlerts(self, response):
events = []
last_alerts = self._saved(key='alerts')
last_alert_ids = set()
if last_alerts:
for alert in last_alerts:
last_alert_ids.add(alert['id'])
new_alerts = response.get('alert', [])
new_alert_ids = set()
self._save(new_alerts, key='alerts')
for alert in new_alerts:
new_alert_ids.add(alert['id'])
# Don't send events for the same alert every cycle.
if alert['id'] in last_alert_ids:
continue
rcvtime = xml.utils.iso8601.parse(alert['sent'])
alert_type = ALERT_TYPE_MAP.get(
alert['type'], 'Unknown (%s)' % alert['type'])
events.append(dict(
severity=3,
summary=alert['description'],
eventClassKey='cloudstack_alert',
cloudstack_type=alert_type,
rcvtime=rcvtime,
))
# Send clear events for alerts that no longer exist.
for alert in last_alerts:
if alert['id'] not in new_alert_ids:
alert_type = ALERT_TYPE_MAP.get(
alert['type'], 'Unknown (%s)' % alert['type'])
events.append(dict(
severity=0,
summary=alert['description'],
message='%s: %s' % (alert_type, alert['description']),
eventClassKey='cloudstack_alert',
cloudstack_type=alert_type,
))
return events
def _process_listEvents(self, response):
events = []
last_events = self._saved(key='events')
last_events_dict = {}
for event in last_events:
last_events_dict[event['id']] = event
new_events = response.get('event', [])
new_event_ids = set()
self._save(new_events, key='events')
for event in new_events:
new_event_ids.add(event['id'])
# Don't sent events for the same event every cycle.
last_event = last_events_dict.get(event['id'], None)
if last_event is not None and last_event['state'] == event['state']:
continue
rcvtime = xml.utils.iso8601.parse(event['created'])
new_event = dict(
severity=SEVERITY_MAP.get(event['level'], 3),
summary=event['description'],
eventClassKey='cloudstack_event',
rcvtime=rcvtime,
cloudstack_account=event['account'],
cloudstack_domain=event['domain'],
cloudstack_state=event['state'],
cloudstack_type=event['type'],
)
if 'username' in event:
new_event['cloudstack_username'] = event['username']
events.append(new_event)
return events
def _process_listHosts(self, response):
values = {}
cloud_id = 'cloud'
for h in response.get('host', []):
if h['type'] != 'Routing':
continue
zone_id = 'zone%s' % h['zoneid']
pod_id = 'pod%s' % h['podid']
cluster_id = 'cluster%s' % h['clusterid']
host_id = 'host%s' % h['id']
# Massage the host capacity data into the metrics we want.
cpu_cores = h['cpunumber']
cpu_total = cpu_cores * h['cpuspeed'] * 1e6
cpu_total_op = float(h['cpuwithoverprovisioning']) * 1e6
cpu_allocated_percent = float(h['cpuallocated'].rstrip('%'))
cpu_allocated = cpu_total_op * (cpu_allocated_percent * 0.01)
cpu_used_percent = float(h.get('cpuused', '0%').rstrip('%'))
cpu_used = cpu_total * (cpu_used_percent * 0.01)
memory_total = float(h['memorytotal'])
memory_allocated = float(h['memoryallocated'])
memory_allocated_percent = (memory_allocated / memory_total) * 100.0
memory_used = float(h.get('memoryused', 0.0))
memory_used_percent = (memory_used / memory_total) * 100.0
# Convert networkkbs* to bits/sec.
network_read = float(h.get('networkkbsread', 0)) * 1024 * 8
network_write = float(h.get('networkkbswrite', 0)) * 1024 * 8
values[host_id] = dict(
cpuTotal=cpu_total,
cpuTotalOP=cpu_total_op,
cpuAllocated=cpu_allocated,
cpuAllocatedPercent=cpu_allocated_percent,
cpuUsed=cpu_used,
cpuUsedPercent=cpu_used_percent,
cpuCores=cpu_cores,
memoryTotal=memory_total,
memoryAllocated=memory_allocated,
memoryAllocatedPercent=memory_allocated_percent,
memoryUsed=memory_used,
memoryUsedPercent=memory_used_percent,
networkRead=network_read,
networkWrite=network_write,
)
for a in (cloud_id, zone_id, pod_id, cluster_id):
if a not in values:
values[a] = {
'memoryTotal': 0,
'memoryAllocated': 0,
'memoryAllocatedPercent': 0,
'memoryUsed': 0,
'memoryUsedPercent': 0,
'cpuTotalOP': 0,
'cpuTotal': 0,
'cpuAllocated': 0,
'cpuAllocatedPercent': 0,
'cpuUsed': 0,
'cpuUsedPercent': 0,
'cpuCores': 0,
'networkRead': 0,
'networkWrite': 0,
}
values[a]['cpuTotal'] += cpu_total
values[a]['cpuTotalOP'] += cpu_total_op
values[a]['cpuAllocated'] += cpu_allocated
values[a]['cpuAllocatedPercent'] += cpu_allocated_percent
values[a]['cpuUsed'] += cpu_used
values[a]['cpuUsedPercent'] += cpu_used_percent
values[a]['cpuCores'] += cpu_cores
values[a]['memoryTotal'] += memory_total
values[a]['memoryAllocated'] += memory_allocated
values[a]['memoryAllocatedPercent'] += memory_allocated_percent
values[a]['memoryUsed'] += memory_used
values[a]['memoryUsedPercent'] += memory_used_percent
values[a]['networkRead'] += network_read
values[a]['networkWrite'] += network_write
for k, v in values.items():
if k.startswith('cloud') or \
k.startswith('zone') or \
k.startswith('pod') or \
k.startswith('cluster'):
values[k]['cpuAllocatedPercent'] = \
(v['cpuAllocated'] / v['cpuTotalOP']) * 100.0
values[k]['cpuUsedPercent'] = \
(v['cpuUsed'] / v['cpuTotal']) * 100.0
values[k]['memoryAllocatedPercent'] = \
(v['memoryAllocated'] / v['memoryTotal']) * 100.0
values[k]['memoryUsedPercent'] = \
(v['memoryUsed'] / v['memoryTotal']) * 100.0
return values
def _process_listVirtualMachines(self, response):
values = {}
for vm in response.get('virtualmachine', []):
vm_id = 'vm%s' % vm['id']
values[vm_id] = {}
if 'cpuused' in vm:
values[vm_id]['cpuUsedPercent'] = float(
vm.get('cpuused', '0%').rstrip('%'))
if 'cpunumber' in vm:
values[vm_id]['cpuCores'] = vm['cpunumber']
if 'cpuspeed' in vm:
values[vm_id]['cpuTotal'] = (
vm['cpunumber'] * vm['cpuspeed'] * 1e6)
if 'cpuUsedPercent' in values[vm_id] and \
'cpuTotal' in values[vm_id]:
values[vm_id]['cpuUsed'] = (
values[vm_id]['cpuTotal'] * (
values[vm_id]['cpuUsedPercent'] * 0.01))
if 'networkkbsread' in vm:
values[vm_id]['networkRead'] = float(
vm.get('networkkbsread', 0)) * 1024 * 8
if 'networkkbswrite' in vm:
values[vm_id]['networkWrite'] = float(
vm.get('networkkbswrite', 0)) * 1024 * 8
return values
def _process_listSystemVms(self, response):
values = {}
for systemvm in response.get('systemvm', []):
if systemvm['systemvmtype'] != 'consoleproxy':
continue
systemvm_id = 'systemvm%s' % systemvm['id']
if 'activeviewersessions' in systemvm:
values[systemvm_id] = {
'activeViewerSessions': systemvm['activeviewersessions'],
}
return values
def _process_listCapacity(self, response):
values = {'cloud': {}}
metric_name_map = {
('public_ips', 'capacitytotal'): 'publicIPsTotal',
('public_ips', 'capacityused'): 'publicIPsUsed',
('public_ips', 'percentused'): 'publicIPsUsedPercent',
('private_ips', 'capacitytotal'): 'privateIPsTotal',
('private_ips', 'capacityused'): 'privateIPsUsed',
('private_ips', 'percentused'): 'privateIPsUsedPercent',
('memory', 'capacitytotal'): 'memoryTotalOP',
('memory', 'capacityused'): 'memoryAllocated',
('memory', 'percentused'): 'memoryAllocatedPercent',
('cpu', 'capacitytotal'): 'cpuTotalOP',
('cpu', 'capacityused'): 'cpuAllocated',
('cpu', 'percentused'): 'cpuAllocatedPercent',
('primary_storage_allocated', 'capacitytotal'): 'primaryStorageTotalOP',
('primary_storage_allocated', 'capacityused'): 'primaryStorageAllocated',
('primary_storage_allocated', 'percentused'): 'primaryStorageAllocatedPercent',
('primary_storage_used', 'capacitytotal'): 'primaryStorageTotal',
('primary_storage_used', 'capacityused'): 'primaryStorageUsed',
('primary_storage_used', 'percentused'): 'primaryStorageUsedPercent',
('secondary_storage', 'capacitytotal'): 'secondaryStorageTotal',
('secondary_storage', 'capacityused'): 'secondaryStorageUsed',
('secondary_storage', 'percentused'): 'secondaryStorageUsedPercent',
}
for c in response.get('capacity', []):
c_type = txcloudstack.capacity_type_string(c['type'])
for c_key in ('capacitytotal', 'capacityused', 'percentused'):
metric_name = metric_name_map.get((c_type, c_key), None)
if not metric_name:
continue
# Convert CPU from MHz to Hz.
if c_type == 'cpu' and not c_key.startswith('percent'):
c[c_key] = float(c[c_key]) * 1e6
# Zone
if c.get('podid', -1) == -1:
values['cloud'].setdefault(metric_name, 0)
zone_id = 'zone%s' % c['zoneid']
values.setdefault(zone_id, {})
values['cloud'][metric_name] += float(c[c_key])
values[zone_id][metric_name] = float(c[c_key])
# Pod
else:
pod_id = 'pod%s' % c['podid']
values.setdefault(pod_id, {})
values[pod_id][metric_name] = float(c[c_key])
# Calculate average percentages for cloud.
for k, v in values['cloud'].items():
if k.endswith('AllocatedPercent'):
allocated_k = k.replace('Percent', '')
total_op_k = k.replace('AllocatedPercent', 'TotalOP')
try:
values['cloud'][k] = (
values['cloud'][allocated_k] /
values['cloud'][total_op_k]) * 100.0
except ZeroDivisionError:
values['cloud'][k] = 0.0
elif k.endswith('UsedPercent'):
used_k = k.replace('Percent', '')
total_k = k.replace('UsedPercent', 'Total')
try:
values['cloud'][k] = (
values['cloud'][used_k] /
values['cloud'][total_k]) * 100.0
except ZeroDivisionError:
values['cloud'][k] = 0.0
return values
def _callback(self, results):
if reactor.running:
reactor.stop()
data = {}
for success, result in results:
# for non admin user, some list calls might fail, due to permission
# but list Capabilities and/or VMs should succeed
if success:
data.update(result)
# send error event only when all list calls fail
if not bool(data): # no data collected
error = result.getErrorMessage()
self._events.append(dict(
severity=4,
summary='CloudStack error: %s' % error,
eventKey='cloudstack_failure',
eventClassKey='cloudstack_error',
))
self._print_output()
return
if 'listalertsresponse' in data:
self._events.extend(
self._process_listAlerts(data['listalertsresponse']))
if 'listeventsresponse' in data:
self._events.extend(
self._process_listEvents(data['listeventsresponse']))
if 'listhostsresponse' in data:
self._values.update(
self._process_listHosts(data['listhostsresponse']))
if 'listcapacityresponse' in data:
capacity = self._process_listCapacity(
data['listcapacityresponse'])
for component, values in capacity.items():
for k, v in values.items():
if component in self._values:
self._values[component].update(values)
else:
self._values[component] = values
if 'listsystemvmsresponse' in data:
self._values.update(
self._process_listSystemVms(data['listsystemvmsresponse']))
if 'listvirtualmachinesresponse' in data:
self._values.update(
self._process_listVirtualMachines(
data['listvirtualmachinesresponse']))
if len(self._values.keys()) > 0:
self._save(self._values, key='values')
self._events.append(dict(
severity=0,
summary='CloudStack polled successfully',
eventKey='cloudstack_failure',
eventClassKey='cloudstack_success',
))
self._print_output()
def run(self):
client = txcloudstack.Client(
self._url,
self._api_key,
self._secret_key)
deferreds = []
if self._collect_events:
# Prevent multiple simultaneous calls to the same API.
lock = open(self._temp_filename('events.lock'), 'w')
fcntl.flock(lock.fileno(), fcntl.LOCK_EX)
# Go back two days to compensate for downtime and timezone
# variance between poller and cloud.
startdate = datetime.date.today() - datetime.timedelta(hours=1)
deferreds.extend((
client.listAlerts(),
client.listEvents(startdate=startdate.strftime('%Y-%m-%d')),
))
else:
# Prevent multiple simultaneous calls to the same API.
lock = open(self._temp_filename('values.lock'), 'w')
fcntl.flock(lock.fileno(), fcntl.LOCK_EX)
saved_values = self._saved_values()
if saved_values is not None:
self._values = saved_values
self._print_output()
return
deferreds.extend((
client.listCapacity(),
client.listHosts(type="Routing"),
client.listSystemVms(),
client.listVirtualMachines(
isrecursive=True, state="Running", listAll='true'),
))
DeferredList(deferreds, consumeErrors=True).addCallback(self._callback)
reactor.run()
if __name__ == '__main__':
usage = "Usage: %s <url> <apikey> <secretkey>"
url = api_key = secret_key = None
try:
url, api_key, secret_key = sys.argv[1:4]
except ValueError:
print >> sys.stderr, usage % sys.argv[0]
sys.exit(1)
events = False
if len(sys.argv) > 4 and sys.argv[4] == 'events':
events = True
poller = CloudStackPoller(url, api_key, secret_key, collect_events=events)
poller.run()
|
zenoss/ZenPacks.zenoss.CloudStack
|
ZenPacks/zenoss/CloudStack/poll_cloudstack.py
|
Python
|
gpl-2.0
| 21,076
|
[
"VisIt"
] |
0b7da490713f1d787a4c0b015150e235fc430fc8b763a5366edcb75ae617e9dc
|
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/linear-prediction.py
# Using neural networks to predict number sequences
################################################################################
# A neural network can be used to predict future values of a sequence of
# numbers. Wold's Decomposition Theorem stablishes that any sequence can be
# split in a regular and predictable part and an innovation process (which is
# discrete white noise, and thus impredictable). The goal of this tutorial is
# to show how to use the neural network implementation of Peach to do this.
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
from numpy import *
import random
import peach as p
# First, we create the network, with only one layer with only one neuron in it.
# The neuron has many inputs and only one output. The activation function is the
# identity. This kind of neuron is usually known as ADALINE (Adaptive Linear
# Neuron, later Adaptive Linear Element). We use as learning algorithm the LMS
# algorithm.
N = 32
nn = p.FeedForward((N, 1), phi=p.Identity, lrule=p.LMS(0.05))
# The lists below will track the values of the sequence being predicted and of
# the error for plotting.
xlog = [ ]
ylog = [ ]
elog = [ ]
error = 1.
i = 0
x = zeros((N, 1), dtype=float) # Input is a column-vector.
while i < 2000 and error > 1.e-10:
# The sequence we will predict is the one generated by a cossinus. The next
# value of the function is the desired output of the neuron. The neuron will
# use past values to predict the unknown value. To spice things, we add some
# gaussian noise (actually, it might help the convergence).
d = cos(2.*pi/128. * i) + random.gauss(0., 0.01)
# Here, we activate the network to calculate the prediction.
y = nn(x)[0, 0] # Notice that we need to access the output
error = abs(d - y) # as a vector, since that's how the NN work.
nn.learn(x, d)
# We store the results to plot later.
xlog.append(d)
ylog.append(y)
elog.append(error)
# Here, we apply a delay in the sequence by shifting every value one
# position back. We are using N (=32) samples to make the prediction, but
# the code here makes no distinction and could be used with any number of
# coefficients in the prediction. The last value of the sequence is put in
# the [0] position of the vector.
x[1:] = x[:-1]
x[0] = d
i = i + 1
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``linear-prediction.png``.
try:
import pylab
pylab.subplot(211)
pylab.hold(True)
pylab.grid(True)
pylab.plot(array(xlog), 'b--')
pylab.plot(array(ylog), 'g')
pylab.plot(array(elog), 'r:')
pylab.legend([ "$x$", "$y$", "$error$" ])
pylab.subplot(212)
pylab.grid(True)
pylab.stem(arange(0, N), reshape(nn[0].weights, (N,)), "k-", "ko", "k-")
pylab.xlim([0, N-1])
pylab.savefig("linear-prediction.png")
except ImportError:
print "After %d iterations:" % (len(elog),)
print nn[0].weights
|
anki1909/peach
|
tutorial/neural-networks/linear-prediction.py
|
Python
|
lgpl-2.1
| 3,386
|
[
"Gaussian",
"NEURON"
] |
3830e6442f013865de2d8d091db70b82462e0af684c3470c9c895f9b4dfde59f
|
"""
Summarize hits in a blast file based on the sequences present
"""
import os
import sys
import argparse
from roblib import stream_fasta, stream_blast_results
def seq_lengths(fafile, verbose=False):
"""
Read the sequence length from a fasta file
:param fafile: the fasta file to read
:param verbose: more output
:return: a dict of sequence id and length
"""
length = {}
for i,s in stream_fasta(fafile):
length[i] = len(s)
return length
def summarize_blast(fafile, blfile, verbose=False):
"""
Summarize blast hits
:param fafile: the query fasta file
:param blfile: the blast output file
:param verbose: more output
:return:
"""
seqlens = seq_lengths(fafile, verbose)
for b in stream_blast_results(blfile, verbose=verbose):
"blech blech blech"
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument('-b', help='blast output file', required=True)
parser.add_argument('-f', help='fasta query file', required=True)
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
|
linsalrob/EdwardsLab
|
blast/summarize_blast.py
|
Python
|
mit
| 1,172
|
[
"BLAST"
] |
7031cd0e1a25fb01d4b8dc04551e2fc54f5395c84702c2f33a610a5a14ebecb7
|
# $Id$
#
# Copyright (C) 2002-2008 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Atom-based calculation of LogP and MR using Crippen's approach
Reference:
S. A. Wildman and G. M. Crippen *JCICS* _39_ 868-873 (1999)
"""
from __future__ import print_function
import os
from rdkit import RDConfig
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
import numpy
_smartsPatterns = {}
_patternOrder = []
# this is the file containing the atom contributions
defaultPatternFileName = os.path.join(RDConfig.RDDataDir, 'Crippen.txt')
def _ReadPatts(fileName):
""" *Internal Use Only*
parses the pattern list from the data file
"""
patts = {}
order = []
with open(fileName, 'r') as f:
lines = f.readlines()
for line in lines:
if line[0] != '#':
splitLine = line.split('\t')
if len(splitLine) >= 4 and splitLine[0] != '':
sma = splitLine[1]
if sma != 'SMARTS':
sma.replace('"', '')
p = Chem.MolFromSmarts(sma)
if p:
if len(splitLine[0]) > 1 and splitLine[0][1] not in 'S0123456789':
cha = splitLine[0][:2]
else:
cha = splitLine[0][0]
logP = float(splitLine[2])
if splitLine[3] != '':
mr = float(splitLine[3])
else:
mr = 0.0
if cha not in order:
order.append(cha)
l = patts.get(cha, [])
l.append((sma, p, logP, mr))
patts[cha] = l
else:
print('Problems parsing smarts: %s' % (sma))
return order, patts
_GetAtomContribs = rdMolDescriptors._CalcCrippenContribs
def _pyGetAtomContribs(mol, patts=None, order=None, verbose=0, force=0):
""" *Internal Use Only*
calculates atomic contributions to the LogP and MR values
if the argument *force* is not set, we'll use the molecules stored
_crippenContribs value when possible instead of re-calculating.
**Note:** Changes here affect the version numbers of MolLogP and MolMR
as well as the VSA descriptors in Chem.MolSurf
"""
if not force and hasattr(mol, '_crippenContribs'):
return mol._crippenContribs
if patts is None:
patts = _smartsPatterns
order = _patternOrder
nAtoms = mol.GetNumAtoms()
atomContribs = [(0., 0.)] * nAtoms
doneAtoms = [0] * nAtoms
nAtomsFound = 0
done = False
for cha in order:
pattVect = patts[cha]
for sma, patt, logp, mr in pattVect:
#print('try:',entry[0])
for match in mol.GetSubstructMatches(patt, False, False):
firstIdx = match[0]
if not doneAtoms[firstIdx]:
doneAtoms[firstIdx] = 1
atomContribs[firstIdx] = (logp, mr)
if verbose:
print('\tAtom %d: %s %4.4f %4.4f' % (match[0], sma, logp, mr))
nAtomsFound += 1
if nAtomsFound >= nAtoms:
done = True
break
if done:
break
mol._crippenContribs = atomContribs
return atomContribs
def _Init():
global _smartsPatterns, _patternOrder
if _smartsPatterns == {}:
_patternOrder, _smartsPatterns = _ReadPatts(defaultPatternFileName)
def _pyMolLogP(inMol, patts=None, order=None, verbose=0, addHs=1):
""" DEPRECATED
"""
if addHs < 0:
mol = Chem.AddHs(inMol, 1)
elif addHs > 0:
mol = Chem.AddHs(inMol, 0)
else:
mol = inMol
if patts is None:
global _smartsPatterns, _patternOrder
if _smartsPatterns == {}:
_patternOrder, _smartsPatterns = _ReadPatts(defaultPatternFileName)
patts = _smartsPatterns
order = _patternOrder
atomContribs = _pyGetAtomContribs(mol, patts, order, verbose=verbose)
return numpy.sum(atomContribs, 0)[0]
_pyMolLogP.version = "1.1.0"
def _pyMolMR(inMol, patts=None, order=None, verbose=0, addHs=1):
""" DEPRECATED
"""
if addHs < 0:
mol = Chem.AddHs(inMol, 1)
elif addHs > 0:
mol = Chem.AddHs(inMol, 0)
else:
mol = inMol
if patts is None:
global _smartsPatterns, _patternOrder
if _smartsPatterns == {}:
_patternOrder, _smartsPatterns = _ReadPatts(defaultPatternFileName)
patts = _smartsPatterns
order = _patternOrder
atomContribs = _pyGetAtomContribs(mol, patts, order, verbose=verbose)
return numpy.sum(atomContribs, 0)[1]
_pyMolMR.version = "1.1.0"
MolLogP = lambda *x, **y: rdMolDescriptors.CalcCrippenDescriptors(*x, **y)[0]
MolLogP.version = rdMolDescriptors._CalcCrippenDescriptors_version
MolLogP.__doc__ = """ Wildman-Crippen LogP value
Uses an atom-based scheme based on the values in the paper:
S. A. Wildman and G. M. Crippen JCICS 39 868-873 (1999)
**Arguments**
- inMol: a molecule
- addHs: (optional) toggles adding of Hs to the molecule for the calculation.
If true, hydrogens will be added to the molecule and used in the calculation.
"""
MolMR = lambda *x, **y: rdMolDescriptors.CalcCrippenDescriptors(*x, **y)[1]
MolMR.version = rdMolDescriptors._CalcCrippenDescriptors_version
MolMR.__doc__ = """ Wildman-Crippen MR value
Uses an atom-based scheme based on the values in the paper:
S. A. Wildman and G. M. Crippen JCICS 39 868-873 (1999)
**Arguments**
- inMol: a molecule
- addHs: (optional) toggles adding of Hs to the molecule for the calculation.
If true, hydrogens will be added to the molecule and used in the calculation.
"""
if __name__ == '__main__':
import sys
if len(sys.argv):
ms = []
verbose = 0
if '-v' in sys.argv:
verbose = 1
sys.argv.remove('-v')
for smi in sys.argv[1:]:
ms.append((smi, Chem.MolFromSmiles(smi)))
for smi, m in ms:
print('Mol: %s' % (smi))
logp = MolLogP(m, verbose=verbose)
print('----')
mr = MolMR(m, verbose=verbose)
print('Res:', logp, mr)
newM = Chem.AddHs(m)
logp = MolLogP(newM, addHs=0)
mr = MolMR(newM, addHs=0)
print('\t', logp, mr)
print('-*-*-*-*-*-*-*-*')
|
rvianello/rdkit
|
rdkit/Chem/Crippen.py
|
Python
|
bsd-3-clause
| 6,149
|
[
"RDKit"
] |
81476be8f99d3f2a231fd9d182b74e48ee16eb82ca0d96151d02d981c4c24f26
|
"""
LAMMPS EEX I/O
"""
import pandas as pd
import math
import numpy as np
import eex
from . import lammps_metadata as lmd
import logging
logger = logging.getLogger(__name__)
def write_lammps_file(dl, data_filename, input_filename, unit_style="real", blocksize=110):
# handle units
unit_set = lmd.units_style[unit_style]
term_table = lmd.build_term_table(unit_style)
nb_term_table = lmd.build_nb_table("real")
data_file = open(data_filename, 'w')
input_file = open(input_filename, 'w')
data_file.write("LAMMPS data file generated by MolSSI EEX\n\n")
input_file.write("# LAMMPS input file generated by MolSSI EEX\n")
input_file.write("units\t%s\n" %(unit_style))
input_file.write("atom_style\tfull\n")
sizes = {}
sizes["atoms"] = dl.get_atom_count()
sizes["bonds"] = dl.get_term_count(2, "total")
sizes["angles"] = dl.get_term_count(3, "total")
sizes["dihedrals"] = dl.get_term_count(4, "total") # Not qutie right once we do impropers
sizes["impropers"] = 0
sizes["atom types"] = len(dl.get_unique_atom_types())
# All the UID's minus the "total" columns
sizes["bond types"] = len(dl.get_term_count(2)) - 1
sizes["angle types"] = len(dl.get_term_count(3)) - 1
sizes["dihedral types"] = len(dl.get_term_count(4)) - 1
sizes["improper types"] = 0
# Write header information
for k in lmd.size_keys:
data_file.write(" %d %s\n" % (sizes[k], k))
# data_file.write(' '.join([str(data["sizes"][k]), k, '\n']))
# Write box information
box_size = dl.get_box_size(utype={"a": unit_set["[length]"], "b": unit_set["[length]"], "c": unit_set["[length]"],
"alpha": "degree", "beta": "degree", "gamma": "degree"})
box_center = dl.get_box_center(utype={"x": "angstrom", "y": "angstrom", "z": "angstrom"})
lo_hi = {}
if box_center and box_size:
lo_hi["x"] = [box_center["x"] - box_size["a"]/2., box_center["x"] + box_size["a"]/2.]
lo_hi["y"] = [box_center["y"] - box_size["b"]/2., box_center["y"] + box_size["b"] / 2.]
lo_hi["z"] = [box_center["z"] - box_size["c"] / 2., box_center["z"] + box_size["c"] / 2.]
for k, v in lo_hi.items():
data_file.write("% 8.6f% 8.6f %slo %shi\n" % (v[0],v[1], k, k))
data_file.write('\n')
param_fmt = "%10.8f"
# Loop over Pair Coeffs
nb_forms = dl.list_stored_nb_types()
# Handle nonbonds - This needs to be generalized badly - make it work for now.
if dl.get_mixing_rule() != '':
data_file.write(("Pair Coeffs\n\n").title())
stored_nb_parameters = dl.list_nb_parameters(
nb_name="LJ", nb_model="epsilon/sigma", utype={"epsilon": unit_set["[energy]"], "sigma": unit_set["[length]"]}, itype="single")
else:
data_file.write("\nPairIJ Coeffs\n\n")
stored_nb_parameters = dl.list_nb_parameters(
nb_name="LJ", nb_model="epsilon/sigma", utype={"epsilon": unit_set["[energy]"], "sigma": unit_set["[length]"]}, itype="pair")
for key, value in stored_nb_parameters.items():
if key[1] == None:
data_file.write(("%2d %10.8f %10.8f\n" % (key[0], value['epsilon'], value['sigma'])))
else:
data_file.write(("%2d %2d %10.8f %10.8f\n" % (key[0], key[1], value['epsilon'], value['sigma'])))
data_file.write("\n")
# Loop over all of the parameter data
for param_order, param_type in zip([2, 3, 4], ["bond", "angle", "dihedral"]):
param_uids = dl.list_term_uids(param_order)
if len(param_uids) == 0: continue
data_file.write(("%s Coeffs\n\n" % param_type).title())
for uid in param_uids:
param_coeffs = dl.get_term_parameter(param_order, uid)
term_data = term_table[param_order][param_coeffs[0]]
param_coeffs = dl.get_term_parameter(param_order, uid, utype=term_data["utype"])
# Order the data like lammps wants it
parameters = [param_coeffs[1][k] for k in term_data["parameters"]]
data_file.write("%2d " % uid)
data_file.write(" ".join(param_fmt % f for f in parameters))
data_file.write("\n")
input_file.write("%s_style\t%s\n" %(param_type, param_coeffs[0]))
data_file.write("\n")
# Write out mass data - don't use get_atom_parameter since we cannot assume that uid = atom_type
data_file.write(" Masses\n\n")
data = dl.get_atoms(["atom_type", "mass"],
utype={'atom_type': None, 'mass':lmd.get_context(unit_style, "[mass]")},
by_value=True).drop_duplicates()
data.to_csv(data_file, sep=' ', index=False, header=False)
# This is not working in Python 3?
#np.savetxt(data_file, np.array(data), fmt='%2d %10.8f')
data_file.write('\n')
# Write out atom data
data_file.write(" Atoms\n\n")
atoms = dl.get_atoms(["molecule_index", "atom_type", "charge", "xyz"], by_value=True)
atoms.index = pd.RangeIndex(start=1, stop=atoms.shape[0] + 1)
# Build a simple formatter
def float_fmt(n):
return "%10.8f" % n
atoms.to_string(data_file, header=None, float_format=float_fmt)
data_file.write('\n\n')
# Write out all of the term data
for param_order, param_type in zip([2, 3, 4], ["bonds", "angles", "dihedrals"]):
if sizes[param_type] == 0: continue
data_file.write((" %s\n\n" % param_type).title())
# Grab term and reorder
cols = ["term_index"] + ["atom%s" % d for d in range(1, param_order + 1)]
term = dl.get_terms(param_type)[cols]
term.index = pd.RangeIndex(start=1, stop=term.shape[0] + 1)
# print(term)
term.to_csv(data_file, header=None, sep=" ")
data_file.write('\n')
input_file.write("read_data\t%s\n" %(data_filename))
data_file.close()
input_file.close()
return True
|
dgasmith/EEX_scratch
|
eex/translators/lammps/lammps_write.py
|
Python
|
bsd-3-clause
| 5,929
|
[
"LAMMPS"
] |
e1fa0032ded843bf2c3e9eeb17b0cbc63c8518cc9d95716cb44ba8b014cf4b9d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Initialize the component for the CNMF
contain a list of functions to initialize the neurons and the corresponding traces with
different set of methods like ICA PCA, greedy roi
"""
#\package Caiman/source_extraction/cnmf/
#\version 1.0
#\copyright GNU General Public License v2.0
#\date Created on Tue Jun 30 21:01:17 2015
#\author: Eftychios A. Pnevmatikakis
from builtins import range
import cv2
import logging
from math import sqrt
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from multiprocessing import current_process
import numpy as np
from past.utils import old_div
import scipy
import scipy.ndimage as nd
from scipy.ndimage.measurements import center_of_mass
from scipy.ndimage.filters import correlate
import scipy.sparse as spr
from skimage.morphology import disk
from sklearn.decomposition import NMF, FastICA
from sklearn.utils.extmath import randomized_svd, squared_norm, randomized_range_finder
import sys
from typing import List
import caiman
from .deconvolution import constrained_foopsi
#from .utilities import fast_graph_Laplacian_patches
from .pre_processing import get_noise_fft, get_noise_welch
from .spatial import circular_constraint, connectivity_constraint
from ...utils.utils import parmap
from ...utils.stats import pd_solve, compressive_nmf
try:
cv2.setNumThreads(0)
except:
pass
def resize(Y, size, interpolation=cv2.INTER_LINEAR):
"""faster and 3D compatible version of skimage.transform.resize"""
if Y.ndim == 2:
return cv2.resize(Y, tuple(size[::-1]), interpolation=interpolation)
elif Y.ndim == 3:
if np.isfortran(Y):
return (cv2.resize(np.array(
[cv2.resize(y, size[:2], interpolation=interpolation) for y in Y.T]).T
.reshape((-1, Y.shape[-1]), order='F'),
(size[-1], np.prod(size[:2])), interpolation=interpolation).reshape(size, order='F'))
else:
return np.array([cv2.resize(y, size[:0:-1], interpolation=interpolation) for y in
cv2.resize(Y.reshape((len(Y), -1), order='F'),
(np.prod(Y.shape[1:]), size[0]), interpolation=interpolation)
.reshape((size[0],) + Y.shape[1:], order='F')])
else: # TODO deal with ndim=4
raise NotImplementedError
#%%
def decimate_last_axis(y, sub):
q = y.shape[-1] // sub
r = y.shape[-1] % sub
Y_ds = np.zeros(y.shape[:-1] + (q + (r > 0),), dtype=y.dtype)
Y_ds[..., :q] = y[..., :q * sub].reshape(y.shape[:-1] + (-1, sub)).mean(-1)
if r > 0:
Y_ds[..., -1] = y[..., -r:].mean(-1)
return Y_ds
def downscale(Y, ds, opencv=False):
"""downscaling without zero padding
faster version of skimage.transform._warps.block_reduce(Y, ds, np.nanmean, np.nan)"""
from caiman.base.movies import movie
d = Y.ndim
if opencv and (d in [2, 3]):
if d == 2:
Y = Y[..., None]
ds = tuple(ds) + (1,)
else:
Y_ds = movie(Y.transpose(2, 0, 1)).resize(fx=1. / ds[0], fy=1. / ds[1], fz=1. / ds[2],
interpolation=cv2.INTER_AREA).transpose(1, 2, 0)
logging.info('Downscaling using OpenCV')
else:
if d > 3:
# raise NotImplementedError
# slower and more memory intensive version using skimage
from skimage.transform._warps import block_reduce
return block_reduce(Y, ds, np.nanmean, np.nan)
elif d == 1:
return decimate_last_axis(Y, ds)
elif d == 2:
Y = Y[..., None]
ds = tuple(ds) + (1,)
if d == 3 and Y.shape[-1] > 1 and ds[0] == ds[1]:
ds_mat = caiman.source_extraction.cnmf.utilities.decimation_matrix(Y.shape[:2], ds[0])
Y_ds = ds_mat.dot(Y.reshape((-1, Y.shape[-1]), order='F')).reshape(
(1 + (Y.shape[0] - 1) // ds[0], 1 + (Y.shape[1] - 1) // ds[0], -1), order='F')
if ds[2] > 1:
Y_ds = decimate_last_axis(Y_ds, ds[2])
else:
q = np.array(Y.shape) // np.array(ds)
r = np.array(Y.shape) % np.array(ds)
s = q * np.array(ds)
Y_ds = np.zeros(q + (r > 0), dtype=Y.dtype)
Y_ds[:q[0], :q[1], :q[2]] = (Y[:s[0], :s[1], :s[2]]
.reshape(q[0], ds[0], q[1], ds[1], q[2], ds[2])
.mean(1).mean(2).mean(3))
if r[0]:
Y_ds[-1, :q[1], :q[2]] = (Y[-r[0]:, :s[1], :s[2]]
.reshape(r[0], q[1], ds[1], q[2], ds[2])
.mean(0).mean(1).mean(2))
if r[1]:
Y_ds[-1, -1, :q[2]] = (Y[-r[0]:, -r[1]:, :s[2]]
.reshape(r[0], r[1], q[2], ds[2])
.mean(0).mean(0).mean(1))
if r[2]:
Y_ds[-1, -1, -1] = Y[-r[0]:, -r[1]:, -r[2]:].mean()
if r[2]:
Y_ds[-1, :q[1], -1] = (Y[-r[0]:, :s[1]:, -r[2]:]
.reshape(r[0], q[1], ds[1], r[2])
.mean(0).mean(1).mean(1))
if r[1]:
Y_ds[:q[0], -1, :q[2]] = (Y[:s[0], -r[1]:, :s[2]]
.reshape(q[0], ds[0], r[1], q[2], ds[2])
.mean(1).mean(1).mean(2))
if r[2]:
Y_ds[:q[0], -1, -1] = (Y[:s[0]:, -r[1]:, -r[2]:]
.reshape(q[0], ds[0], r[1], r[2])
.mean(1).mean(1).mean(1))
if r[2]:
Y_ds[:q[0], :q[1], -1] = (Y[:s[0], :s[1], -r[2]:]
.reshape(q[0], ds[0], q[1], ds[1], r[2])
.mean(1).mean(2).mean(2))
return Y_ds if d == 3 else Y_ds[:, :, 0]
#%%
try:
profile
except:
def profile(a): return a
def initialize_components(Y, K=30, gSig=[5, 5], gSiz=None, ssub=1, tsub=1, nIter=5, maxIter=5, nb=1,
kernel=None, use_hals=True, normalize_init=True, img=None, method_init='greedy_roi',
max_iter_snmf=500, alpha_snmf=10e2, sigma_smooth_snmf=(.5, .5, .5),
perc_baseline_snmf=20, options_local_NMF=None, rolling_sum=False,
rolling_length=100, sn=None, options_total=None,
min_corr=0.8, min_pnr=10, seed_method='auto', ring_size_factor=1.5,
center_psf=False, ssub_B=2, init_iter=2, remove_baseline = True,
SC_kernel='heat', SC_sigma=1, SC_thr=0, SC_normalize=True, SC_use_NN=False,
SC_nnn=20, lambda_gnmf=1):
"""
Initalize components. This function initializes the spatial footprints, temporal components,
and background which are then further refined by the CNMF iterations. There are four
different initialization methods depending on the data you're processing:
'greedy_roi': GreedyROI method used in standard 2p processing (default)
'corr_pnr': GreedyCorr method used for processing 1p data
'sparse_nmf': Sparse NMF method suitable for dendritic/axonal imaging
'graph_nmf': Graph NMF method also suitable for dendritic/axonal imaging
The GreedyROI method by default is not using the RollingGreedyROI method. This can
be changed through the binary flag 'rolling_sum'.
All the methods can be used for volumetric data except 'corr_pnr' which is only
available for 2D data.
It is also by default followed by hierarchical alternative least squares (HALS) NMF.
Optional use of spatio-temporal downsampling to boost speed.
Args:
Y: np.ndarray
d1 x d2 [x d3] x T movie, raw data.
K: [optional] int
number of neurons to extract (default value: 30). Maximal number for method 'corr_pnr'.
tau: [optional] list,tuple
standard deviation of neuron size along x and y [and z] (default value: (5,5).
gSiz: [optional] list,tuple
size of kernel (default 2*tau + 1).
nIter: [optional] int
number of iterations for shape tuning (default 5).
maxIter: [optional] int
number of iterations for HALS algorithm (default 5).
ssub: [optional] int
spatial downsampling factor recommended for large datasets (default 1, no downsampling).
tsub: [optional] int
temporal downsampling factor recommended for long datasets (default 1, no downsampling).
kernel: [optional] np.ndarray
User specified kernel for greedyROI
(default None, greedy ROI searches for Gaussian shaped neurons)
use_hals: [optional] bool
Whether to refine components with the hals method
normalize_init: [optional] bool
Whether to normalize_init data before running the initialization
img: optional [np 2d array]
Image with which to normalize. If not present use the mean + offset
method_init: {'greedy_roi', 'corr_pnr', 'sparse_nmf', 'graph_nmf', 'pca_ica'}
Initialization method (default: 'greedy_roi')
max_iter_snmf: int
Maximum number of sparse NMF iterations
alpha_snmf: scalar
Sparsity penalty
rolling_sum: boolean
Detect new components based on a rolling sum of pixel activity (default: False)
rolling_length: int
Length of rolling window (default: 100)
center_psf: Boolean
True indicates centering the filtering kernel for background
removal. This is useful for data with large background
fluctuations.
min_corr: float
minimum local correlation coefficients for selecting a seed pixel.
min_pnr: float
minimum peak-to-noise ratio for selecting a seed pixel.
seed_method: str {'auto', 'manual', 'semi'}
methods for choosing seed pixels
'semi' detects K components automatically and allows to add more manually
if running as notebook 'semi' and 'manual' require a backend that does not
inline figures, e.g. %matplotlib tk
ring_size_factor: float
it's the ratio between the ring radius and neuron diameters.
nb: integer
number of background components for approximating the background using NMF model
sn: ndarray
per pixel noise
options_total: dict
the option dictionary
ssub_B: int, optional
downsampling factor for 1-photon imaging background computation
init_iter: int, optional
number of iterations for 1-photon imaging initialization
Returns:
Ain: np.ndarray
(d1 * d2 [ * d3]) x K , spatial filter of each neuron.
Cin: np.ndarray
T x K , calcium activity of each neuron.
center: np.ndarray
K x 2 [or 3] , inferred center of each neuron.
bin: np.ndarray
(d1 * d2 [ * d3]) x nb, initialization of spatial background.
fin: np.ndarray
nb x T matrix, initalization of temporal background
Raises:
Exception "Unsupported method"
Exception 'You need to define arguments for local NMF'
"""
method = method_init
if method == 'local_nmf':
tsub_lnmf = tsub
ssub_lnmf = ssub
tsub = 1
ssub = 1
if gSiz is None:
gSiz = 2 * (np.asarray(gSig) + .5).astype(int) + 1
d, T = np.shape(Y)[:-1], np.shape(Y)[-1]
# rescale according to downsampling factor
gSig = np.asarray(gSig, dtype=float) / ssub
gSiz = np.round(np.asarray(gSiz) / ssub).astype(np.int)
if normalize_init is True:
logging.info('Variance Normalization')
if img is None:
img = np.mean(Y, axis=-1)
img += np.median(img)
img += np.finfo(np.float32).eps
Y = old_div(Y, np.reshape(img, d + (-1,), order='F'))
alpha_snmf /= np.mean(img)
else:
Y = np.array(Y)
# spatial downsampling
if ssub != 1 or tsub != 1:
if method == 'corr_pnr':
logging.info("Spatial/Temporal downsampling 1-photon")
# this icrements the performance against ground truth and solves border problems
Y_ds = downscale(Y, tuple([ssub] * len(d) + [tsub]), opencv=False)
else:
logging.info("Spatial/Temporal downsampling 2-photon")
# this icrements the performance against ground truth and solves border problems
Y_ds = downscale(Y, tuple([ssub] * len(d) + [tsub]), opencv=True)
# mean_val = np.mean(Y)
# Y_ds = downscale_local_mean(Y, tuple([ssub] * len(d) + [tsub]), cval=mean_val)
else:
Y_ds = Y
ds = Y_ds.shape[:-1]
if nb > min(np.prod(ds), Y_ds.shape[-1]):
nb = -1
logging.info('Roi Initialization...')
if method == 'greedy_roi':
Ain, Cin, _, b_in, f_in = greedyROI(
Y_ds, nr=K, gSig=gSig, gSiz=gSiz, nIter=nIter, kernel=kernel, nb=nb,
rolling_sum=rolling_sum, rolling_length=rolling_length, seed_method=seed_method)
if use_hals:
logging.info('Refining Components using HALS NMF iterations')
Ain, Cin, b_in, f_in = hals(
Y_ds, Ain, Cin, b_in, f_in, maxIter=maxIter)
elif method == 'corr_pnr':
Ain, Cin, _, b_in, f_in, extra_1p = greedyROI_corr(
Y, Y_ds, max_number=K, gSiz=gSiz[0], gSig=gSig[0], min_corr=min_corr, min_pnr=min_pnr,
ring_size_factor=ring_size_factor, center_psf=center_psf, options=options_total,
sn=sn, nb=nb, ssub=ssub, ssub_B=ssub_B, init_iter=init_iter, seed_method=seed_method)
elif method == 'sparse_nmf':
Ain, Cin, _, b_in, f_in = sparseNMF(
Y_ds, nr=K, nb=nb, max_iter_snmf=max_iter_snmf, alpha=alpha_snmf,
sigma_smooth=sigma_smooth_snmf, remove_baseline=remove_baseline, perc_baseline=perc_baseline_snmf)
elif method == 'compressed_nmf':
Ain, Cin, _, b_in, f_in = compressedNMF(
Y_ds, nr=K, nb=nb, max_iter_snmf=max_iter_snmf,
sigma_smooth=sigma_smooth_snmf, remove_baseline=remove_baseline, perc_baseline=perc_baseline_snmf)
elif method == 'graph_nmf':
Ain, Cin, _, b_in, f_in = graphNMF(
Y_ds, nr=K, nb=nb, max_iter_snmf=max_iter_snmf, lambda_gnmf=lambda_gnmf,
sigma_smooth=sigma_smooth_snmf, remove_baseline=remove_baseline,
perc_baseline=perc_baseline_snmf, SC_kernel=SC_kernel,
SC_sigma=SC_sigma, SC_use_NN=SC_use_NN, SC_nnn=SC_nnn,
SC_normalize=SC_normalize, SC_thr=SC_thr)
elif method == 'pca_ica':
Ain, Cin, _, b_in, f_in = ICA_PCA(
Y_ds, nr=K, sigma_smooth=sigma_smooth_snmf, truncate=2, fun='logcosh', tol=1e-10,
max_iter=max_iter_snmf, remove_baseline=True, perc_baseline=perc_baseline_snmf, nb=nb)
elif method == 'local_nmf':
# todo check this unresolved reference
from SourceExtraction.CNMF4Dendrites import CNMF4Dendrites
from SourceExtraction.AuxilaryFunctions import GetCentersData
# Get initialization for components center
# print(Y_ds.transpose([2, 0, 1]).shape)
if options_local_NMF is None:
raise Exception('You need to define arguments for local NMF')
else:
NumCent = options_local_NMF.pop('NumCent', None)
# Max number of centers to import from Group Lasso intialization - if 0,
# we don't run group lasso
cent = GetCentersData(Y_ds.transpose([2, 0, 1]), NumCent)
sig = Y_ds.shape[:-1]
# estimate size of neuron - bounding box is 3 times this size. If larger
# then data, we have no bounding box.
cnmf_obj = CNMF4Dendrites(
sig=sig, verbose=True, adaptBias=True, **options_local_NMF)
# Define CNMF parameters
_, _, _ = cnmf_obj.fit(
np.array(Y_ds.transpose([2, 0, 1]), dtype=np.float), cent)
Ain = cnmf_obj.A
Cin = cnmf_obj.C
b_in = cnmf_obj.b
f_in = cnmf_obj.f
else:
print(method)
raise Exception("Unsupported initialization method")
K = np.shape(Ain)[-1]
if Ain.size > 0 and not center_psf and ssub != 1:
Ain = np.reshape(Ain, ds + (K,), order='F')
if len(ds) == 2:
Ain = resize(Ain, d + (K,))
else: # resize only deals with 2D images, hence apply resize twice
Ain = np.reshape([resize(a, d[1:] + (K,))
for a in Ain], (ds[0], d[1] * d[2], K), order='F')
Ain = resize(Ain, (d[0], d[1] * d[2], K))
Ain = np.reshape(Ain, (np.prod(d), K), order='F')
sparse_b = spr.issparse(b_in)
if (nb > 0 or nb == -1) and (ssub != 1 or tsub != 1):
b_in = np.reshape(b_in.toarray() if sparse_b else b_in, ds + (-1,), order='F')
if len(ds) == 2:
b_in = resize(b_in, d + (b_in.shape[-1],))
else:
b_in = np.reshape([resize(b, d[1:] + (b_in.shape[-1],))
for b in b_in], (ds[0], d[1] * d[2], -1), order='F')
b_in = resize(b_in, (d[0], d[1] * d[2], b_in.shape[-1]))
b_in = np.reshape(b_in, (np.prod(d), -1), order='F')
if sparse_b:
b_in = spr.csc_matrix(b_in)
f_in = resize(np.atleast_2d(f_in), [b_in.shape[-1], T])
if Ain.size > 0:
Cin = resize(Cin, [K, T])
center = np.asarray(
[center_of_mass(a.reshape(d, order='F')) for a in Ain.T])
else:
Cin = np.empty((K, T), dtype=np.float32)
center = []
if normalize_init is True:
if Ain.size > 0:
Ain = Ain * np.reshape(img, (np.prod(d), -1), order='F')
if sparse_b:
b_in = spr.diags(img.ravel(order='F')).dot(b_in)
else:
b_in = b_in * np.reshape(img, (np.prod(d), -1), order='F')
if method == 'corr_pnr' and ring_size_factor is not None:
return scipy.sparse.csc_matrix(Ain), Cin, b_in, f_in, center, extra_1p
else:
return scipy.sparse.csc_matrix(Ain), Cin, b_in, f_in, center
#%%
def ICA_PCA(Y_ds, nr, sigma_smooth=(.5, .5, .5), truncate=2, fun='logcosh',
max_iter=1000, tol=1e-10, remove_baseline=True, perc_baseline=20, nb=1):
""" Initialization using ICA and PCA. DOES NOT WORK WELL WORK IN PROGRESS"
Args:
Y_ds
nr
sigma_smooth
truncate
fun
max_iter
tol
remove_baseline
perc_baseline
nb
"""
print("not a function to use in the moment ICA PCA \n")
m = scipy.ndimage.gaussian_filter(np.transpose(
Y_ds, [2, 0, 1]), sigma=sigma_smooth, mode='nearest', truncate=truncate)
if remove_baseline:
bl = np.percentile(m, perc_baseline, axis=0)
m1 = np.maximum(0, m - bl)
else:
bl = np.zeros(m.shape[1:])
m1 = m
pca_comp = nr
T, d1, d2 = np.shape(m1)
d = d1 * d2
yr = np.reshape(m1, [T, d], order='F')
[U, S, V] = scipy.sparse.linalg.svds(yr, pca_comp)
S = np.diag(S)
whiteningMatrix = np.dot(scipy.linalg.inv(S), U.T)
whitesig = np.dot(whiteningMatrix, yr)
f_ica = FastICA(whiten=False, fun=fun, max_iter=max_iter, tol=tol)
S_ = f_ica.fit_transform(whitesig.T)
A_in = f_ica.mixing_
A_in = np.dot(A_in, whitesig)
masks = np.reshape(A_in.T, (d1, d2, pca_comp),
order='F').transpose([2, 0, 1])
masks = np.array(caiman.base.rois.extractROIsFromPCAICA(masks)[0])
if masks.size > 0:
C_in = caiman.base.movies.movie(
m1).extract_traces_from_masks(np.array(masks)).T
A_in = np.reshape(masks, [-1, d1 * d2], order='F').T
else:
A_in = np.zeros([d1 * d2, pca_comp])
C_in = np.zeros([pca_comp, T])
m1 = yr.T - A_in.dot(C_in) + np.maximum(0, bl.flatten())[:, np.newaxis]
model = NMF(n_components=nb, init='random', random_state=0)
b_in = model.fit_transform(np.maximum(m1, 0)).astype(np.float32)
f_in = model.components_.astype(np.float32)
center = caiman.base.rois.com(A_in, d1, d2)
return A_in, C_in, center, b_in, f_in
def sparseNMF(Y_ds, nr, max_iter_snmf=500, alpha=10e2, sigma_smooth=(.5, .5, .5),
remove_baseline=True, perc_baseline=20, nb=1, truncate=2):
"""
Initialization using sparse NMF
Args:
Y_ds: nd.array or movie (T, x, y [,z])
data
nr: int
number of components
max_iter_snm: int
number of iterations
alpha_snmf:
sparsity regularizer
sigma_smooth_snmf:
smoothing along z,x, and y (.5,.5,.5)
perc_baseline_snmf:
percentile to remove frmo movie before NMF
nb: int
Number of background components
Returns:
A: np.array
2d array of size (# of pixels) x nr with the spatial components.
Each column is ordered columnwise (matlab format, order='F')
C: np.array
2d array of size nr X T with the temporal components
center: np.array
2d array of size nr x 2 [ or 3] with the components centroids
"""
m = scipy.ndimage.gaussian_filter(np.transpose(
Y_ds, np.roll(np.arange(Y_ds.ndim), 1)), sigma=sigma_smooth,
mode='nearest', truncate=truncate)
if remove_baseline:
logging.info('REMOVING BASELINE')
bl = np.percentile(m, perc_baseline, axis=0)
m1 = np.maximum(0, m - bl)
else:
logging.info('NOT REMOVING BASELINE')
bl = np.zeros(m.shape[1:])
m1 = m
T, dims = m1.shape[0], m1.shape[1:]
d = np.prod(dims)
yr = np.reshape(m1, [T, d], order='F')
mdl = NMF(n_components=nr, verbose=False, init='nndsvd', tol=1e-10,
max_iter=max_iter_snmf, shuffle=False, alpha=alpha, l1_ratio=1)
C = mdl.fit_transform(yr).T
A = mdl.components_.T
A_in = A
C_in = C
m1 = yr.T - A_in.dot(C_in) + np.maximum(0, bl.flatten())[:, np.newaxis]
model = NMF(n_components=nb, init='random',
random_state=0, max_iter=max_iter_snmf)
b_in = model.fit_transform(np.maximum(m1, 0)).astype(np.float32)
f_in = model.components_.astype(np.float32)
center = caiman.base.rois.com(A_in, *dims)
return A_in, C_in, center, b_in, f_in
def compressedNMF(Y_ds, nr, r_ov=10, max_iter_snmf=500,
sigma_smooth=(.5, .5, .5), remove_baseline=False,
perc_baseline=20, nb=1, truncate=2, tol=1e-3):
m = scipy.ndimage.gaussian_filter(np.transpose(
Y_ds, np.roll(np.arange(Y_ds.ndim), 1)), sigma=sigma_smooth,
mode='nearest', truncate=truncate)
if remove_baseline:
logging.info('REMOVING BASELINE')
bl = np.percentile(m, perc_baseline, axis=0)
m = np.maximum(0, m - bl)
else:
logging.info('NOT REMOVING BASELINE')
bl = np.zeros(m.shape[1:])
T, dims = m.shape[0], m.shape[1:]
d = np.prod(dims)
yr = np.reshape(m, [T, d], order='F')
# L = randomized_range_finder(yr, nr + r_ov, 3)
# R = randomized_range_finder(yr.T, nr + r_ov, 3)
# Yt = L.T.dot(yr).dot(R)
# c_in, a_in = compressive_nmf(Yt, L, R.T, nr)
# C_in = L.dot(c_in)
# A_in = a_in.dot(R.T)
# A_in = A_in.T
# C_in = C_in.T
A, C, USV = nnsvd_init(yr, nr, r_ov=r_ov)
W_r = np.random.randn(d, nr + r_ov)
W_l = np.random.randn(T, nr + r_ov)
US = USV[0]*USV[1]
YYt = US.dot(USV[2].dot(USV[2].T)).dot(US.T)
# YYt = yr.dot(yr.T)
B = YYt.dot(YYt.dot(US.dot(USV[2].dot(W_r))))
PC, _ = np.linalg.qr(B)
B = USV[2].T.dot(US.T.dot(YYt.dot(YYt.dot(W_l))))
PA, _ = np.linalg.qr(B)
# mdl = NMF(n_components=nr, verbose=False, init='nndsvd', tol=1e-10,
# max_iter=1)
# C = mdl.fit_transform(yr).T
# A = mdl.components_.T
yrPA = yr.dot(PA)
yrPC = PC.T.dot(yr)
for it in range(max_iter_snmf):
C__ = C.copy()
A__ = A.copy()
C_ = C.dot(PC)
A_ = PA.T.dot(A)
C = C*(yrPA.dot(A_)/(C.T.dot(A_.T.dot(A_))+np.finfo(C.dtype).eps)).T
A = A*(yrPC.T.dot(C_.T))/(A.dot(C_.dot(C_.T)) + np.finfo(C.dtype).eps)
nA = np.sqrt((A**2).sum(0))
A /= nA
C *= nA[:, np.newaxis]
if (np.linalg.norm(C - C__)/np.linalg.norm(C__) < tol) & (np.linalg.norm(A - A__)/np.linalg.norm(A__) < tol):
logging.info('Graph NMF converged after {} iterations'.format(it+1))
break
A_in = A
C_in = C
m1 = yr.T - A_in.dot(C_in) + np.maximum(0, bl.flatten(order='F'))[:, np.newaxis]
model = NMF(n_components=nb, init='random',
random_state=0, max_iter=max_iter_snmf)
b_in = model.fit_transform(np.maximum(m1, 0)).astype(np.float32)
f_in = model.components_.astype(np.float32)
center = caiman.base.rois.com(A_in, *dims)
return A_in, C_in, center, b_in, f_in
def graphNMF(Y_ds, nr, max_iter_snmf=500, lambda_gnmf=1,
sigma_smooth=(.5, .5, .5), remove_baseline=True,
perc_baseline=20, nb=1, truncate=2, tol=1e-3, SC_kernel='heat',
SC_normalize=True, SC_thr=0, SC_sigma=1, SC_use_NN=False,
SC_nnn=20):
m = scipy.ndimage.gaussian_filter(np.transpose(
Y_ds, np.roll(np.arange(Y_ds.ndim), 1)), sigma=sigma_smooth,
mode='nearest', truncate=truncate)
if remove_baseline:
logging.info('REMOVING BASELINE')
bl = np.percentile(m, perc_baseline, axis=0)
m1 = np.maximum(0, m - bl)
else:
logging.info('NOT REMOVING BASELINE')
bl = np.zeros(m.shape[1:])
m1 = m
T, dims = m1.shape[0], m1.shape[1:]
d = np.prod(dims)
yr = np.reshape(m1, [T, d], order='F')
mdl = NMF(n_components=nr, verbose=False, init='nndsvd', tol=1e-10,
max_iter=5)
C = mdl.fit_transform(yr).T
A = mdl.components_.T
W = caiman.source_extraction.cnmf.utilities.fast_graph_Laplacian_patches(
[np.reshape(m, [T, d], order='F').T, [], 'heat', SC_sigma, SC_thr,
SC_nnn, SC_normalize, SC_use_NN])
D = scipy.sparse.spdiags(W.sum(0), 0, W.shape[0], W.shape[0])
for it in range(max_iter_snmf):
C_ = C.copy()
A_ = A.copy()
C = C*(yr.dot(A)/(C.T.dot(A.T.dot(A))+np.finfo(C.dtype).eps)).T
A = A*(yr.T.dot(C.T) + lambda_gnmf*(W.dot(A)))/(A.dot(C.dot(C.T)) + lambda_gnmf*D.dot(A) + np.finfo(C.dtype).eps)
nA = np.sqrt((A**2).sum(0))
A /= nA
C *= nA[:, np.newaxis]
if (np.linalg.norm(C - C_)/np.linalg.norm(C_) < tol) & (np.linalg.norm(A - A_)/np.linalg.norm(A_) < tol):
logging.info('Graph NMF converged after {} iterations'.format(it+1))
break
A_in = A
C_in = C
m1 = yr.T - A_in.dot(C_in) + np.maximum(0, bl.flatten(order='F'))[:, np.newaxis]
model = NMF(n_components=nb, init='random',
random_state=0, max_iter=max_iter_snmf)
b_in = model.fit_transform(np.maximum(m1, 0)).astype(np.float32)
f_in = model.components_.astype(np.float32)
center = caiman.base.rois.com(A_in, *dims)
return A_in, C_in, center, b_in, f_in
def greedyROI(Y, nr=30, gSig=[5, 5], gSiz=[11, 11], nIter=5, kernel=None, nb=1,
rolling_sum=False, rolling_length=100, seed_method='auto'):
"""
Greedy initialization of spatial and temporal components using spatial Gaussian filtering
Args:
Y: np.array
3d or 4d array of fluorescence data with time appearing in the last axis.
nr: int
number of components to be found
gSig: scalar or list of integers
standard deviation of Gaussian kernel along each axis
gSiz: scalar or list of integers
size of spatial component
nIter: int
number of iterations when refining estimates
kernel: np.ndarray
User specified kernel to be used, if present, instead of Gaussian (default None)
nb: int
Number of background components
rolling_max: boolean
Detect new components based on a rolling sum of pixel activity (default: True)
rolling_length: int
Length of rolling window (default: 100)
seed_method: str {'auto', 'manual', 'semi'}
methods for choosing seed pixels
'semi' detects nr components automatically and allows to add more manually
if running as notebook 'semi' and 'manual' require a backend that does not
inline figures, e.g. %matplotlib tk
Returns:
A: np.array
2d array of size (# of pixels) x nr with the spatial components. Each column is
ordered columnwise (matlab format, order='F')
C: np.array
2d array of size nr X T with the temporal components
center: np.array
2d array of size nr x 2 [ or 3] with the components centroids
Author:
Eftychios A. Pnevmatikakis and Andrea Giovannucci based on a matlab implementation by Yuanjun Gao
Simons Foundation, 2015
See Also:
http://www.cell.com/neuron/pdf/S0896-6273(15)01084-3.pdf
"""
logging.info("Greedy initialization of spatial and temporal components using spatial Gaussian filtering")
d = np.shape(Y)
Y[np.isnan(Y)] = 0
med = np.median(Y, axis=-1)
Y = Y - med[..., np.newaxis]
gHalf = np.array(gSiz) // 2
gSiz = 2 * gHalf + 1
# we initialize every values to zero
if seed_method.lower() == 'manual':
nr = 0
A = np.zeros((np.prod(d[0:-1]), nr), dtype=np.float32)
C = np.zeros((nr, d[-1]), dtype=np.float32)
center = np.zeros((nr, Y.ndim - 1), dtype='uint16')
rho = imblur(Y, sig=gSig, siz=gSiz, nDimBlur=Y.ndim - 1, kernel=kernel)
if rolling_sum:
logging.info('Using rolling sum for initialization (RollingGreedyROI)')
rolling_filter = np.ones(
(rolling_length), dtype=np.float32) / rolling_length
rho_s = scipy.signal.lfilter(rolling_filter, 1., rho**2)
v = np.amax(rho_s, axis=-1)
else:
logging.info('Using total sum for initialization (GreedyROI)')
v = np.sum(rho**2, axis=-1)
if seed_method.lower() != 'manual':
for k in range(nr):
# we take the highest value of the blurred total image and we define it as
# the center of the neuron
ind = np.argmax(v)
ij = np.unravel_index(ind, d[0:-1])
for c, i in enumerate(ij):
center[k, c] = i
# we define a squared size around it
ijSig = [[np.maximum(ij[c] - gHalf[c], 0), np.minimum(ij[c] + gHalf[c] + 1, d[c])]
for c in range(len(ij))]
# we create an array of it (fl like) and compute the trace like the pixel ij trough time
dataTemp = np.array(
Y[tuple([slice(*a) for a in ijSig])].copy(), dtype=np.float32)
traceTemp = np.array(np.squeeze(rho[ij]), dtype=np.float32)
coef, score = finetune(dataTemp, traceTemp, nIter=nIter)
C[k, :] = np.squeeze(score)
dataSig = coef[..., np.newaxis] * \
score.reshape([1] * (Y.ndim - 1) + [-1])
xySig = np.meshgrid(*[np.arange(s[0], s[1])
for s in ijSig], indexing='xy')
arr = np.array([np.reshape(s, (1, np.size(s)), order='F').squeeze()
for s in xySig], dtype=np.int)
indices = np.ravel_multi_index(arr, d[0:-1], order='F')
A[indices, k] = np.reshape(
coef, (1, np.size(coef)), order='C').squeeze()
Y[tuple([slice(*a) for a in ijSig])] -= dataSig.copy()
if k < nr - 1 or seed_method.lower() != 'auto':
Mod = [[np.maximum(ij[c] - 2 * gHalf[c], 0),
np.minimum(ij[c] + 2 * gHalf[c] + 1, d[c])] for c in range(len(ij))]
ModLen = [m[1] - m[0] for m in Mod]
Lag = [ijSig[c] - Mod[c][0] for c in range(len(ij))]
dataTemp = np.zeros(ModLen)
dataTemp[tuple([slice(*a) for a in Lag])] = coef
dataTemp = imblur(dataTemp[..., np.newaxis],
sig=gSig, siz=gSiz, kernel=kernel)
temp = dataTemp * score.reshape([1] * (Y.ndim - 1) + [-1])
rho[tuple([slice(*a) for a in Mod])] -= temp.copy()
if rolling_sum:
rho_filt = scipy.signal.lfilter(
rolling_filter, 1., rho[tuple([slice(*a) for a in Mod])]**2)
v[tuple([slice(*a) for a in Mod])] = np.amax(rho_filt, axis=-1)
else:
v[tuple([slice(*a) for a in Mod])] = \
np.sum(rho[tuple([slice(*a) for a in Mod])]**2, axis=-1)
center = center.tolist()
else:
center = []
if seed_method.lower() in ('manual', 'semi'):
# manually pick seed pixels
while True:
fig = plt.figure(figsize=(13, 12))
ax = plt.axes([.04, .04, .95, .18])
sc_all = []
sc_select = []
plt.axes([0, .25, 1, .7])
sc_all.append(plt.scatter([], [], color='g'))
sc_select.append(plt.scatter([], [], color='r'))
plt.imshow(v, interpolation=None, vmin=np.percentile(v[~np.isnan(v)], 1),
vmax=np.percentile(v[~np.isnan(v)], 99), cmap='gray')
if len(center):
plt.scatter(*np.transpose(center)[::-1], c='b')
plt.axis('off')
plt.suptitle(
'Click to add component. Click again on it to remove it. Press any key to update figure. Add more components, or press any key again when done.')
centers = []
def key_press(event):
plt.close(fig)
def onclick(event):
new_center = int(round(event.xdata)), int(round(event.ydata))
if new_center in centers:
centers.remove(new_center)
else:
centers.append(new_center)
print(centers)
ax.clear()
if len(centers):
ax.plot(Y[centers[-1][1], centers[-1][0]], c='r')
for sc in sc_all:
sc.set_offsets(centers)
for sc in sc_select:
sc.set_offsets(centers[-1:])
else:
for sc in sc_all:
sc.set_offsets(np.zeros((0,2)))
for sc in sc_select:
sc.set_offsets(np.zeros((0,2)))
plt.draw()
cid = fig.canvas.mpl_connect('key_press_event', key_press)
fig.canvas.mpl_connect('button_press_event', onclick)
plt.show(block=True)
if centers == []:
break
centers = np.array(centers)[:,::-1].tolist()
center += centers
# we initialize every values to zero
A_ = np.zeros((np.prod(d[0:-1]), len(centers)), dtype=np.float32)
C_ = np.zeros((len(centers), d[-1]), dtype=np.float32)
for k, ij in enumerate(centers):
# we define a squared size around it
ijSig = [[np.maximum(ij[c] - gHalf[c], 0), np.minimum(ij[c] + gHalf[c] + 1, d[c])]
for c in range(len(ij))]
# we create an array of it (fl like) and compute the trace like the pixel ij trough time
dataTemp = np.array(
Y[tuple([slice(*a) for a in ijSig])].copy(), dtype=np.float32)
traceTemp = np.array(np.squeeze(rho[tuple(ij)]), dtype=np.float32)
coef, score = finetune(dataTemp, traceTemp, nIter=nIter)
C_[k, :] = np.squeeze(score)
dataSig = coef[..., np.newaxis] * \
score.reshape([1] * (Y.ndim - 1) + [-1])
xySig = np.meshgrid(*[np.arange(s[0], s[1])
for s in ijSig], indexing='xy')
arr = np.array([np.reshape(s, (1, np.size(s)), order='F').squeeze()
for s in xySig], dtype=np.int)
indices = np.ravel_multi_index(arr, d[0:-1], order='F')
A_[indices, k] = np.reshape(
coef, (1, np.size(coef)), order='C').squeeze()
Y[tuple([slice(*a) for a in ijSig])] -= dataSig.copy()
Mod = [[np.maximum(ij[c] - 2 * gHalf[c], 0),
np.minimum(ij[c] + 2 * gHalf[c] + 1, d[c])] for c in range(len(ij))]
ModLen = [m[1] - m[0] for m in Mod]
Lag = [ijSig[c] - Mod[c][0] for c in range(len(ij))]
dataTemp = np.zeros(ModLen)
dataTemp[tuple([slice(*a) for a in Lag])] = coef
dataTemp = imblur(dataTemp[..., np.newaxis],
sig=gSig, siz=gSiz, kernel=kernel)
temp = dataTemp * score.reshape([1] * (Y.ndim - 1) + [-1])
rho[tuple([slice(*a) for a in Mod])] -= temp.copy()
if rolling_sum:
rho_filt = scipy.signal.lfilter(
rolling_filter, 1., rho[tuple([slice(*a) for a in Mod])]**2)
v[tuple([slice(*a) for a in Mod])] = np.amax(rho_filt, axis=-1)
else:
v[tuple([slice(*a) for a in Mod])] = \
np.sum(rho[tuple([slice(*a) for a in Mod])]**2, axis=-1)
A = np.concatenate([A, A_], 1)
C = np.concatenate([C, C_])
res = np.reshape(Y, (np.prod(d[0:-1]), d[-1]),
order='F') + med.flatten(order='F')[:, None]
# model = NMF(n_components=nb, init='random', random_state=0)
model = NMF(n_components=nb, init='nndsvdar')
b_in = model.fit_transform(np.maximum(res, 0)).astype(np.float32)
f_in = model.components_.astype(np.float32)
return A, C, np.array(center, dtype='uint16'), b_in, f_in
#%%
def finetune(Y, cin, nIter=5):
"""compute a initialized version of A and C
Args:
Y: D1*d2*T*K patches
c: array T*K
the inital calcium traces
nIter: int
True indicates that time is listed in the last axis of Y (matlab format)
and moves it in the front
Returns:
a: array (d1,D2) the computed A as l2(Y*C)/Y*C
c: array(T) C as the sum of As on x*y axis
"""
debug_ = False
if debug_:
import os
f = open('_LOG_1_' + str(os.getpid()), 'w+')
f.write('Y:' + str(np.mean(Y)) + '\n')
f.write('cin:' + str(np.mean(cin)) + '\n')
f.close()
# we compute the multiplication of patches per traces ( non negatively )
for _ in range(nIter):
a = np.maximum(np.dot(Y, cin), 0)
a = old_div(a, np.sqrt(np.sum(a**2)) +
np.finfo(np.float32).eps) # compute the l2/a
# c as the variation of thoses patches
cin = np.sum(Y * a[..., np.newaxis], tuple(np.arange(Y.ndim - 1)))
return a, cin
def imblur(Y, sig=5, siz=11, nDimBlur=None, kernel=None, opencv=True):
"""
Spatial filtering with a Gaussian or user defined kernel
The parameters are specified in GreedyROI
Args:
Y: np.ndarray
d1 x d2 [x d3] x T movie, raw data.
sig: [optional] list,tuple
half size of neurons
siz: [optional] list,tuple
size of kernel (default 2*tau + 1).
nDimBlur: [optional]
if you want to specify the number of dimension
kernel: [optional]
if you want to specify a kernel
opencv: [optional]
if you want to process to the blur using open cv method
Returns:
the blurred image
"""
# TODO: document (jerem)
if kernel is None:
if nDimBlur is None:
nDimBlur = Y.ndim - 1
else:
nDimBlur = np.min((Y.ndim, nDimBlur))
if np.isscalar(sig):
sig = sig * np.ones(nDimBlur)
if np.isscalar(siz):
siz = siz * np.ones(nDimBlur)
X = Y.copy()
if opencv and nDimBlur == 2:
if X.ndim > 2:
# if we are on a video we repeat for each frame
for frame in range(X.shape[-1]):
if sys.version_info >= (3, 0):
X[:, :, frame] = cv2.GaussianBlur(X[:, :, frame], tuple(
siz), sig[0], None, sig[1], cv2.BORDER_CONSTANT)
else:
X[:, :, frame] = cv2.GaussianBlur(X[:, :, frame], tuple(siz), sig[
0], sig[1], cv2.BORDER_CONSTANT, 0)
else:
if sys.version_info >= (3, 0):
X = cv2.GaussianBlur(
X, tuple(siz), sig[0], None, sig[1], cv2.BORDER_CONSTANT)
else:
X = cv2.GaussianBlur(
X, tuple(siz), sig[0], sig[1], cv2.BORDER_CONSTANT, 0)
else:
for i in range(nDimBlur):
h = np.exp(
old_div(-np.arange(-np.floor(old_div(siz[i], 2)),
np.floor(old_div(siz[i], 2)) + 1)**2, (2 * sig[i]**2)))
h /= np.sqrt(h.dot(h))
shape = [1] * len(Y.shape)
shape[i] = -1
X = correlate(X, h.reshape(shape), mode='constant')
else:
X = correlate(Y, kernel[..., np.newaxis], mode='constant')
# for t in range(np.shape(Y)[-1]):
# X[:,:,t] = correlate(Y[:,:,t],kernel,mode='constant', cval=0.0)
return X
def hals(Y, A, C, b, f, bSiz=3, maxIter=5):
""" Hierarchical alternating least square method for solving NMF problem
Y = A*C + b*f
Args:
Y: d1 X d2 [X d3] X T, raw data.
It will be reshaped to (d1*d2[*d3]) X T in this
function
A: (d1*d2[*d3]) X K, initial value of spatial components
C: K X T, initial value of temporal components
b: (d1*d2[*d3]) X nb, initial value of background spatial component
f: nb X T, initial value of background temporal component
bSiz: int or tuple of int
blur size. A box kernel (bSiz X bSiz [X bSiz]) (if int) or bSiz (if tuple) will
be convolved with each neuron's initial spatial component, then all nonzero
pixels will be picked as pixels to be updated, and the rest will be
forced to be 0.
maxIter: maximum iteration of iterating HALS.
Returns:
the updated A, C, b, f
Authors:
Johannes Friedrich, Andrea Giovannucci
See Also:
http://proceedings.mlr.press/v39/kimura14.pdf
"""
# smooth the components
dims, T = np.shape(Y)[:-1], np.shape(Y)[-1]
K = A.shape[1] # number of neurons
nb = b.shape[1] # number of background components
if bSiz is not None:
if isinstance(bSiz, (int, float)):
bSiz = [bSiz] * len(dims)
ind_A = nd.filters.uniform_filter(np.reshape(A,
dims + (K,), order='F'), size=bSiz + [0])
ind_A = np.reshape(ind_A > 1e-10, (np.prod(dims), K), order='F')
else:
ind_A = A>1e-10
ind_A = spr.csc_matrix(ind_A) # indicator of nonnero pixels
def HALS4activity(Yr, A, C, iters=2):
U = A.T.dot(Yr)
V = A.T.dot(A) + np.finfo(A.dtype).eps
for _ in range(iters):
for m in range(len(U)): # neurons and background
C[m] = np.clip(C[m] + (U[m] - V[m].dot(C)) /
V[m, m], 0, np.inf)
return C
def HALS4shape(Yr, A, C, iters=2):
U = C.dot(Yr.T)
V = C.dot(C.T) + np.finfo(C.dtype).eps
for _ in range(iters):
for m in range(K): # neurons
ind_pixels = np.squeeze(ind_A[:, m].toarray())
A[ind_pixels, m] = np.clip(A[ind_pixels, m] +
((U[m, ind_pixels] - V[m].dot(A[ind_pixels].T)) /
V[m, m]), 0, np.inf)
for m in range(nb): # background
A[:, K + m] = np.clip(A[:, K + m] + ((U[K + m] - V[K + m].dot(A.T)) /
V[K + m, K + m]), 0, np.inf)
return A
Ab = np.c_[A, b]
Cf = np.r_[C, f.reshape(nb, -1)]
for _ in range(maxIter):
Cf = HALS4activity(np.reshape(
Y, (np.prod(dims), T), order='F'), Ab, Cf)
Ab = HALS4shape(np.reshape(Y, (np.prod(dims), T), order='F'), Ab, Cf)
return Ab[:, :-nb], Cf[:-nb], Ab[:, -nb:], Cf[-nb:].reshape(nb, -1)
@profile
def greedyROI_corr(Y, Y_ds, max_number=None, gSiz=None, gSig=None, center_psf=True,
min_corr=None, min_pnr=None, seed_method='auto',
min_pixel=3, bd=0, thresh_init=2, ring_size_factor=None, nb=1, options=None,
sn=None, save_video=False, video_name='initialization.mp4', ssub=1,
ssub_B=2, init_iter=2):
"""
initialize neurons based on pixels' local correlations and peak-to-noise ratios.
Args:
*** see init_neurons_corr_pnr for descriptions of following input arguments ***
data:
max_number:
gSiz:
gSig:
center_psf:
min_corr:
min_pnr:
seed_method:
min_pixel:
bd:
thresh_init:
swap_dim:
save_video:
video_name:
*** see init_neurons_corr_pnr for descriptions of above input arguments ***
ring_size_factor: float
it's the ratio between the ring radius and neuron diameters.
ring_model: Boolean
True indicates using ring model to estimate the background
components.
nb: integer
number of background components for approximating the background using NMF model
for nb=0 the exact background of the ringmodel (b0 and W) is returned
for nb=-1 the full rank background B is returned
for nb<-1 no background is returned
ssub_B: int, optional
downsampling factor for 1-photon imaging background computation
init_iter: int, optional
number of iterations for 1-photon imaging initialization
"""
if min_corr is None or min_pnr is None:
raise Exception(
'Either min_corr or min_pnr are None. Both of them must be real numbers.')
logging.info('One photon initialization (GreedyCorr)')
o = options['temporal_params'].copy()
o['s_min'] = None
if o['p'] > 1:
o['p'] = 1
A, C, _, _, center = init_neurons_corr_pnr(
Y_ds, max_number=max_number, gSiz=gSiz, gSig=gSig,
center_psf=center_psf, min_corr=min_corr,
min_pnr=min_pnr * np.sqrt(np.size(Y) / np.size(Y_ds)),
seed_method=seed_method, deconvolve_options=o,
min_pixel=min_pixel, bd=bd, thresh_init=thresh_init,
swap_dim=True, save_video=save_video, video_name=video_name)
dims = Y.shape[:2]
T = Y.shape[-1]
d1, d2, total_frames = Y_ds.shape
tsub = int(round(float(T) / total_frames))
B = Y_ds.reshape((-1, total_frames), order='F') - A.dot(C)
if ring_size_factor is not None:
# background according to ringmodel
logging.info('Computing ring model background')
W, b0 = compute_W(Y_ds.reshape((-1, total_frames), order='F'),
A, C, (d1, d2), ring_size_factor * gSiz, ssub=ssub_B)
def compute_B(b0, W, B): # actually computes -B to efficiently compute Y-B in place
if ssub_B == 1:
B = -b0[:, None] - W.dot(B - b0[:, None]) # "-B"
else:
B = -b0[:, None] - (np.repeat(np.repeat(W.dot(
downscale(B.reshape((d1, d2, B.shape[-1]), order='F'),
(ssub_B, ssub_B, 1)).reshape((-1, B.shape[-1]), order='F') -
downscale(b0.reshape((d1, d2), order='F'),
(ssub_B, ssub_B)).reshape((-1, 1), order='F'))
.reshape(((d1 - 1) // ssub_B + 1, (d2 - 1) // ssub_B + 1, -1), order='F'),
ssub_B, 0), ssub_B, 1)[:d1, :d2].reshape((-1, B.shape[-1]), order='F')) # "-B"
return B
B = compute_B(b0, W, B) # "-B"
B += Y_ds.reshape((-1, total_frames), order='F') # "Y-B"
logging.info('Updating spatial components')
A, _, C, _ = caiman.source_extraction.cnmf.spatial.update_spatial_components(
B, C=C, f=np.zeros((0, total_frames), np.float32), A_in=A,
sn=np.sqrt(downscale((sn**2).reshape(dims, order='F'),
tuple([ssub] * len(dims))).ravel() / tsub) / ssub,
b_in=np.zeros((d1 * d2, 0), np.float32),
dview=None, dims=(d1, d2), **options['spatial_params'])
logging.info('Updating temporal components')
C, A = caiman.source_extraction.cnmf.temporal.update_temporal_components(
B, spr.csc_matrix(A, dtype=np.float32),
np.zeros((d1 * d2, 0), np.float32),
C, np.zeros((0, total_frames), np.float32),
dview=None, bl=None, c1=None, sn=None, g=None, **o)[:2]
# find more neurons in residual
# print('Compute Residuals')
for i in range(init_iter - 1):
if max_number is not None:
max_number -= A.shape[-1]
if max_number is not 0:
if i == init_iter-2 and seed_method.lower()[:4] == 'semi':
seed_method, min_corr, min_pnr = 'manual', 0, 0
logging.info('Searching for more neurons in the residual')
A_R, C_R, _, _, center_R = init_neurons_corr_pnr(
(B - A.dot(C)).reshape(Y_ds.shape, order='F'),
max_number=max_number, gSiz=gSiz, gSig=gSig,
center_psf=center_psf, min_corr=min_corr, min_pnr=min_pnr,
seed_method=seed_method, deconvolve_options=o,
min_pixel=min_pixel, bd=bd, thresh_init=thresh_init,
swap_dim=True, save_video=save_video, video_name=video_name)
A = spr.coo_matrix(np.concatenate((A.toarray(), A_R), 1))
C = np.concatenate((C, C_R), 0)
# 1st iteration on decimated data
logging.info('Merging components')
A, C = caiman.source_extraction.cnmf.merging.merge_components(
B, A, [], C, None, [], C, [], o, options['spatial_params'],
dview=None, thr=options['merging']['merge_thr'], mx=np.Inf, fast_merge=True)[:2]
A = A.astype(np.float32)
C = C.astype(np.float32)
logging.info('Updating spatial components')
A, _, C, _ = caiman.source_extraction.cnmf.spatial.update_spatial_components(
B, C=C, f=np.zeros((0, total_frames), np.float32), A_in=A,
sn=np.sqrt(downscale((sn**2).reshape(dims, order='F'),
tuple([ssub] * len(dims))).ravel() / tsub) / ssub,
b_in=np.zeros((d1 * d2, 0), np.float32),
dview=None, dims=(d1, d2), **options['spatial_params'])
A = A.astype(np.float32)
logging.info('Updating temporal components')
C, A = caiman.source_extraction.cnmf.temporal.update_temporal_components(
B, spr.csc_matrix(A),
np.zeros((d1 * d2, 0), np.float32),
C, np.zeros((0, total_frames), np.float32),
dview=None, bl=None, c1=None, sn=None, g=None, **o)[:2]
logging.info('Recomputing background')
# background according to ringmodel
W, b0 = compute_W(Y_ds.reshape((-1, total_frames), order='F'),
A, C, (d1, d2), ring_size_factor * gSiz, ssub=ssub_B)
# 2nd iteration on non-decimated data
K = C.shape[0]
if T > total_frames:
C = np.repeat(C, tsub, 1)[:, :T]
Ys = (Y if ssub == 1 else downscale(
Y, (ssub, ssub, 1))).reshape((-1, T), order='F')
# N.B: upsampling B in space is fine, but upsampling in time doesn't work well,
# cause the error in upsampled background can be of similar size as neural signal
B = Ys - A.dot(C)
else:
B = Y_ds.reshape((-1, T), order='F') - A.dot(C)
B = compute_B(b0, W, B) # "-B"
if nb > 0 or nb == -1:
B0 = -B
if ssub > 1:
B = np.reshape(B, (d1, d2, -1), order='F')
B = (np.repeat(np.repeat(B, ssub, 0), ssub, 1)[:dims[0], :dims[1]]
.reshape((-1, T), order='F'))
A = A.toarray().reshape((d1, d2, K), order='F')
A = spr.csc_matrix(np.repeat(np.repeat(A, ssub, 0), ssub, 1)[:dims[0], :dims[1]]
.reshape((np.prod(dims), K), order='F'))
B += Y.reshape((-1, T), order='F') # "Y-B"
logging.info('Merging components')
A, C = caiman.source_extraction.cnmf.merging.merge_components(
B, A, [], C, None, [], C, [], o, options['spatial_params'],
dview=None, thr=options['merging']['merge_thr'], mx=np.Inf, fast_merge=True)[:2]
A = A.astype(np.float32)
C = C.astype(np.float32)
logging.info('Updating spatial components')
options['spatial_params']['se'] = np.ones((1,) * len((d1, d2)), dtype=np.uint8)
A, _, C, _ = caiman.source_extraction.cnmf.spatial.update_spatial_components(
B, C=C, f=np.zeros((0, T), np.float32), A_in=A, sn=sn,
b_in=np.zeros((np.prod(dims), 0), np.float32),
dview=None, dims=dims, **options['spatial_params'])
logging.info('Updating temporal components')
C, A, b__, f__, S, bl, c1, neurons_sn, g1, YrA, lam__ = \
caiman.source_extraction.cnmf.temporal.update_temporal_components(
B, spr.csc_matrix(A, dtype=np.float32),
np.zeros((np.prod(dims), 0), np.float32), C, np.zeros((0, T), np.float32),
dview=None, bl=None, c1=None, sn=None, g=None, **options['temporal_params'])
A = A.toarray()
if nb > 0 or nb == -1:
B = B0
use_NMF = True
if nb == -1:
logging.info('Returning full background')
b_in = spr.eye(len(B), dtype='float32')
f_in = B
elif nb > 0:
logging.info('Estimate low rank background (rank = {0})'.format(nb))
print(nb)
if use_NMF:
model = NMF(n_components=nb, init='nndsvdar')
b_in = model.fit_transform(np.maximum(B, 0))
# f_in = model.components_.squeeze()
f_in = np.linalg.lstsq(b_in, B)[0]
else:
b_in, s_in, f_in = spr.linalg.svds(B, k=nb)
f_in *= s_in[:, np.newaxis]
else:
b_in = np.empty((A.shape[0], 0))
f_in = np.empty((0, T))
if nb == 0:
logging.info('Returning background as b0 and W')
return (A, C, center.T, b_in.astype(np.float32), f_in.astype(np.float32),
(S.astype(np.float32), bl, c1, neurons_sn, g1, YrA, lam__,
W, b0))
else:
logging.info("Not returning background")
return (A, C, center.T, b_in.astype(np.float32), f_in.astype(np.float32),
None if ring_size_factor is None else
(S.astype(np.float32), bl, c1, neurons_sn, g1, YrA, lam__))
@profile
def init_neurons_corr_pnr(data, max_number=None, gSiz=15, gSig=None,
center_psf=True, min_corr=0.8, min_pnr=10,
seed_method='auto', deconvolve_options=None,
min_pixel=3, bd=1, thresh_init=2, swap_dim=True,
save_video=False, video_name='initialization.mp4',
background_filter='disk'):
"""
using greedy method to initialize neurons by selecting pixels with large
local correlation and large peak-to-noise ratio
Args:
data: np.ndarray (3D)
the data used for initializing neurons. its dimension can be
d1*d2*T or T*d1*d2. If it's the latter, swap_dim should be
False; otherwise, True.
max_number: integer
maximum number of neurons to be detected. If None, then the
algorithm will stop when all pixels are below the thresholds.
gSiz: float
average diameter of a neuron
gSig: float number or a vector with two elements.
gaussian width of the gaussian kernel used for spatial filtering.
center_psf: Boolean
True indicates centering the filtering kernel for background
removal. This is useful for data with large background
fluctuations.
min_corr: float
minimum local correlation coefficients for selecting a seed pixel.
min_pnr: float
minimum peak-to-noise ratio for selecting a seed pixel.
seed_method: str {'auto', 'manual'}
methods for choosing seed pixels
if running as notebook 'manual' requires a backend that does not
inline figures, e.g. %matplotlib tk
deconvolve_options: dict
all options for deconvolving temporal traces.
min_pixel: integer
minimum number of nonzero pixels for one neuron.
bd: integer
pixels that are bd pixels away from the boundary will be ignored for initializing neurons.
thresh_init: float
pixel values smaller than thresh_init*noise will be set as 0
when computing the local correlation image.
swap_dim: Boolean
True indicates that time is listed in the last axis of Y (matlab
format)
save_video: Boolean
save the initialization procedure if it's True
video_name: str
name of the video to be saved.
Returns:
A: np.ndarray (d1*d2*T)
spatial components of all neurons
C: np.ndarray (K*T)
nonnegative and denoised temporal components of all neurons
C_raw: np.ndarray (K*T)
raw calcium traces of all neurons
S: np.ndarray (K*T)
deconvolved calcium traces of all neurons
center: np.ndarray
center localtions of all neurons
"""
if swap_dim:
d1, d2, total_frames = data.shape
data_raw = np.transpose(data, [2, 0, 1])
else:
total_frames, d1, d2 = data.shape
data_raw = data
data_filtered = data_raw.copy()
if gSig:
# spatially filter data
if not isinstance(gSig, list):
gSig = [gSig, gSig]
ksize = tuple([int(2 * i) * 2 + 1 for i in gSig])
# create a spatial filter for removing background
if center_psf:
if background_filter == 'box':
for idx, img in enumerate(data_filtered):
data_filtered[idx, ] = cv2.GaussianBlur(
img, ksize=ksize, sigmaX=gSig[0], sigmaY=gSig[1], borderType=1) \
- cv2.boxFilter(img, ddepth=-1, ksize=ksize, borderType=1)
else:
psf = cv2.getGaussianKernel(ksize[0], gSig[0], cv2.CV_32F).dot(
cv2.getGaussianKernel(ksize[1], gSig[1], cv2.CV_32F).T)
ind_nonzero = psf >= psf[0].max()
psf -= psf[ind_nonzero].mean()
psf[~ind_nonzero] = 0
for idx, img in enumerate(data_filtered):
data_filtered[idx, ] = cv2.filter2D(img, -1, psf, borderType=1)
else:
for idx, img in enumerate(data_filtered):
data_filtered[idx, ] = cv2.GaussianBlur(img, ksize=ksize, sigmaX=gSig[0],
sigmaY=gSig[1], borderType=1)
# compute peak-to-noise ratio
data_filtered -= data_filtered.mean(axis=0)
data_max = np.max(data_filtered, axis=0)
noise_pixel = get_noise_fft(data_filtered.T, noise_method='mean')[0].T
pnr = np.divide(data_max, noise_pixel)
# remove small values and only keep pixels with large fluorescence signals
tmp_data = np.copy(data_filtered)
tmp_data[tmp_data < thresh_init * noise_pixel] = 0
# compute correlation image
cn = caiman.summary_images.local_correlations_fft(tmp_data, swap_dim=False)
del(tmp_data)
# cn[np.isnan(cn)] = 0 # remove abnormal pixels
# make required copy here, after memory intensive computation of cn
data_raw = data_raw.copy()
# screen seed pixels as neuron centers
v_search = cn * pnr
v_search[(cn < min_corr) | (pnr < min_pnr)] = 0
ind_search = (v_search <= 0) # indicate whether the pixel has
# been searched before. pixels with low correlations or low PNRs are
# ignored directly. ind_search[i]=0 means the i-th pixel is still under
# consideration of being a seed pixel
# pixels near the boundaries are ignored because of artifacts
ind_bd = np.zeros(shape=(d1, d2)).astype(
np.bool) # indicate boundary pixels
if bd > 0:
ind_bd[:bd, :] = True
ind_bd[-bd:, :] = True
ind_bd[:, :bd] = True
ind_bd[:, -bd:] = True
ind_search[ind_bd] = 1
# creating variables for storing the results
if not max_number:
# maximum number of neurons
max_number = np.int32((ind_search.size - ind_search.sum()) / 5)
Ain = np.zeros(shape=(max_number, d1, d2),
dtype=np.float32) # neuron shapes
Cin = np.zeros(shape=(max_number, total_frames),
dtype=np.float32) # de-noised traces
Sin = np.zeros(shape=(max_number, total_frames),
dtype=np.float32) # spiking # activity
Cin_raw = np.zeros(shape=(max_number, total_frames),
dtype=np.float32) # raw traces
center = np.zeros(shape=(2, max_number)) # neuron centers
num_neurons = 0 # number of initialized neurons
continue_searching = max_number > 0
min_v_search = min_corr * min_pnr
[ii, jj] = np.meshgrid(range(d2), range(d1))
pixel_v = ((ii * 10 + jj) * 1e-5).astype(np.float32)
if save_video:
FFMpegWriter = animation.writers['ffmpeg']
metadata = dict(title='Initialization procedure', artist='CaImAn',
comment='CaImAn is cool!')
writer = FFMpegWriter(fps=2, metadata=metadata)
# visualize the initialization procedure.
fig = plt.figure(figsize=(12, 8), facecolor=(0.9, 0.9, 0.9))
# with writer.saving(fig, "initialization.mp4", 150):
writer.setup(fig, video_name, 150)
ax_cn = plt.subplot2grid((2, 3), (0, 0))
ax_cn.imshow(cn)
ax_cn.set_title('Correlation')
ax_cn.set_axis_off()
ax_pnr_cn = plt.subplot2grid((2, 3), (0, 1))
ax_pnr_cn.imshow(cn * pnr)
ax_pnr_cn.set_title('Correlation*PNR')
ax_pnr_cn.set_axis_off()
ax_cn_box = plt.subplot2grid((2, 3), (0, 2))
ax_cn_box.imshow(cn)
ax_cn_box.set_xlim([54, 63])
ax_cn_box.set_ylim([54, 63])
ax_cn_box.set_title('Correlation')
ax_cn_box.set_axis_off()
ax_traces = plt.subplot2grid((2, 3), (1, 0), colspan=3)
ax_traces.set_title('Activity at the seed pixel')
writer.grab_frame()
all_centers = []
while continue_searching:
if seed_method.lower() == 'manual':
# manually pick seed pixels
fig = plt.figure(figsize=(14,6))
ax = plt.axes([.03, .05, .96, .22])
sc_all = []
sc_select = []
for i in range(3):
plt.axes([.01+.34*i, .3, .3, .61])
sc_all.append(plt.scatter([],[], color='g'))
sc_select.append(plt.scatter([],[], color='r'))
title = ('corr*pnr', 'correlation (corr)', 'peak-noise-ratio (pnr)')[i]
img = (v_search, cn, pnr)[i]
plt.imshow(img, interpolation=None, vmin=np.percentile(img[~np.isnan(img)], 1),
vmax=np.percentile(img[~np.isnan(img)], 99), cmap='gray')
if len(all_centers):
plt.scatter(*np.transpose(all_centers), c='b')
plt.axis('off')
plt.title(title)
plt.suptitle('Click to add component. Click again on it to remove it. Press any key to update figure. Add more components, or press any key again when done.')
centers = []
def key_press(event):
plt.close(fig)
def onclick(event):
new_center = int(round(event.xdata)), int(round(event.ydata))
if new_center in centers:
centers.remove(new_center)
else:
centers.append(new_center)
print(centers)
ax.clear()
if len(centers):
ax.plot(data_filtered[:, centers[-1][1], centers[-1][0]], c='r')
for sc in sc_all:
sc.set_offsets(centers)
for sc in sc_select:
sc.set_offsets(centers[-1:])
else:
for sc in sc_all:
sc.set_offsets(np.zeros((0,2)))
for sc in sc_select:
sc.set_offsets(np.zeros((0,2)))
plt.draw()
cid = fig.canvas.mpl_connect('key_press_event', key_press)
fig.canvas.mpl_connect('button_press_event', onclick)
plt.show(block=True)
if centers == []:
break
all_centers += centers
csub_max, rsub_max = np.transpose(centers)
tmp_kernel = np.ones(shape=tuple([int(round(gSiz / 4.))] * 2))
v_max = cv2.dilate(v_search, tmp_kernel)
local_max = v_max[rsub_max, csub_max]
ind_local_max = local_max.argsort()[::-1]
else:
# local maximum, for identifying seed pixels in following steps
v_search[(cn < min_corr) | (pnr < min_pnr)] = 0
# add an extra value to avoid repeated seed pixels within one ROI.
v_search = cv2.medianBlur(v_search, 3) + pixel_v
v_search[ind_search] = 0
tmp_kernel = np.ones(shape=tuple([int(round(gSiz / 4.))] * 2))
v_max = cv2.dilate(v_search, tmp_kernel)
# automatically select seed pixels as the local maximums
v_max[(v_search != v_max) | (v_search < min_v_search)] = 0
v_max[ind_search] = 0
[rsub_max, csub_max] = v_max.nonzero() # subscript of seed pixels
local_max = v_max[rsub_max, csub_max]
n_seeds = len(local_max) # number of candidates
if n_seeds == 0:
# no more candidates for seed pixels
break
else:
# order seed pixels according to their corr * pnr values
ind_local_max = local_max.argsort()[::-1]
img_vmax = np.median(local_max)
# try to initialization neurons given all seed pixels
for ith_seed, idx in enumerate(ind_local_max):
r = rsub_max[idx]
c = csub_max[idx]
ind_search[r, c] = True # this pixel won't be searched
if v_search[r, c] < min_v_search:
# skip this pixel if it's not sufficient for being a seed pixel
continue
# roughly check whether this is a good seed pixel
# y0 = data_filtered[:, r, c]
# if np.max(y0) < thresh_init * noise_pixel[r, c]:
# v_search[r, c] = 0
# continue
y0 = np.diff(data_filtered[:, r, c])
if y0.max() < 3 * y0.std():
v_search[r, c] = 0
continue
# if Ain[:, r, c].sum() > 0 and np.max([scipy.stats.pearsonr(y0, cc)[0]
# for cc in Cin_raw[Ain[:, r, c] > 0]]) > .7:
# v_search[r, c] = 0
# continue
# crop a small box for estimation of ai and ci
r_min = max(0, r - gSiz)
r_max = min(d1, r + gSiz + 1)
c_min = max(0, c - gSiz)
c_max = min(d2, c + gSiz + 1)
nr = r_max - r_min
nc = c_max - c_min
patch_dims = (nr, nc) # patch dimension
data_raw_box = \
data_raw[:, r_min:r_max, c_min:c_max].reshape(-1, nr * nc)
data_filtered_box = \
data_filtered[:, r_min:r_max, c_min:c_max].reshape(-1, nr * nc)
# index of the seed pixel in the cropped box
ind_ctr = np.ravel_multi_index((r - r_min, c - c_min),
dims=(nr, nc))
# neighbouring pixels to update after initializing one neuron
r2_min = max(0, r - 2 * gSiz)
r2_max = min(d1, r + 2 * gSiz + 1)
c2_min = max(0, c - 2 * gSiz)
c2_max = min(d2, c + 2 * gSiz + 1)
if save_video:
ax_pnr_cn.cla()
ax_pnr_cn.imshow(v_search, vmin=0, vmax=img_vmax)
ax_pnr_cn.set_title('Neuron %d' % (num_neurons + 1))
ax_pnr_cn.set_axis_off()
ax_pnr_cn.plot(csub_max[ind_local_max[ith_seed:]], rsub_max[
ind_local_max[ith_seed:]], '.r', ms=5)
ax_pnr_cn.plot(c, r, 'or', markerfacecolor='red')
ax_cn_box.imshow(cn[r_min:r_max, c_min:c_max], vmin=0, vmax=1)
ax_cn_box.set_title('Correlation')
ax_traces.cla()
ax_traces.plot(y0)
ax_traces.set_title('The fluo. trace at the seed pixel')
writer.grab_frame()
[ai, ci_raw, ind_success] = extract_ac(data_filtered_box,
data_raw_box, ind_ctr, patch_dims)
if (np.sum(ai > 0) < min_pixel) or (not ind_success):
# bad initialization. discard and continue
continue
else:
# cheers! good initialization.
center[:, num_neurons] = [c, r]
Ain[num_neurons, r_min:r_max, c_min:c_max] = ai
Cin_raw[num_neurons] = ci_raw.squeeze()
if deconvolve_options['p']:
# deconvolution
ci, baseline, c1, _, _, si, _ = \
constrained_foopsi(ci_raw, **deconvolve_options)
if ci.sum() == 0:
continue
Cin[num_neurons] = ci
Sin[num_neurons] = si
else:
# no deconvolution
ci = ci_raw.copy()
ci[ci < 0] = 0
if ci.sum() == 0:
continue
Cin[num_neurons] = ci.squeeze()
if save_video:
# mark the seed pixel on the correlation image
ax_cn.plot(c, r, '.r')
ax_cn_box.cla()
ax_cn_box.imshow(ai)
ax_cn_box.set_title('Spatial component')
ax_traces.cla()
ax_traces.plot(ci_raw)
ax_traces.plot(ci, 'r')
ax_traces.set_title('Temporal component')
writer.grab_frame()
# avoid searching nearby pixels
ind_search[r_min:r_max, c_min:c_max] += (ai > ai.max() / 2)
# remove the spatial-temporal activity of the initialized
# and update correlation image & PNR image
# update the raw data
data_raw[:, r_min:r_max, c_min:c_max] -= \
ai[np.newaxis, ...] * ci[..., np.newaxis, np.newaxis]
if gSig:
# spatially filter the neuron shape
tmp_img = Ain[num_neurons, r2_min:r2_max, c2_min:c2_max]
if center_psf:
if background_filter == 'box':
ai_filtered = cv2.GaussianBlur(tmp_img, ksize=ksize, sigmaX=gSig[0],
sigmaY=gSig[1], borderType=1) \
- cv2.boxFilter(tmp_img, ddepth=-1, ksize=ksize, borderType=1)
else:
ai_filtered = cv2.filter2D(tmp_img, -1, psf, borderType=1)
else:
ai_filtered = cv2.GaussianBlur(tmp_img, ksize=ksize, sigmaX=gSig[0],
sigmaY=gSig[1], borderType=1)
# update the filtered data
data_filtered[:, r2_min:r2_max, c2_min:c2_max] -= \
ai_filtered[np.newaxis, ...] * ci[..., np.newaxis, np.newaxis]
data_filtered_box = data_filtered[:, r2_min:r2_max, c2_min:c2_max].copy()
else:
data_filtered_box = data_raw[:, r2_min:r2_max, c2_min:c2_max].copy()
# update PNR image
# data_filtered_box -= data_filtered_box.mean(axis=0)
max_box = np.max(data_filtered_box, axis=0)
noise_box = noise_pixel[r2_min:r2_max, c2_min:c2_max]
pnr_box = np.divide(max_box, noise_box)
pnr[r2_min:r2_max, c2_min:c2_max] = pnr_box
pnr_box[pnr_box < min_pnr] = 0
# update correlation image
data_filtered_box[data_filtered_box <
thresh_init * noise_box] = 0
cn_box = caiman.summary_images.local_correlations_fft(
data_filtered_box, swap_dim=False)
cn_box[np.isnan(cn_box) | (cn_box < 0)] = 0
cn[r_min:r_max, c_min:c_max] = cn_box[
(r_min - r2_min):(r_max - r2_min), (c_min - c2_min):(c_max - c2_min)]
cn_box[cn_box < min_corr] = 0
cn_box = cn[r2_min:r2_max, c2_min:c2_max]
# update v_search
v_search[r2_min:r2_max, c2_min:c2_max] = cn_box * pnr_box
v_search[ind_search] = 0
# avoid searching nearby pixels
# v_search[r_min:r_max, c_min:c_max] *= (ai < np.max(ai) / 2.)
# increase the number of detected neurons
num_neurons += 1 #
if num_neurons == max_number:
continue_searching = False
break
else:
if num_neurons % 100 == 1:
logging.info('{0} neurons have been initialized'.format(num_neurons - 1))
logging.info('In total, {0} neurons were initialized.'.format(num_neurons))
# A = np.reshape(Ain[:num_neurons], (-1, d1 * d2)).transpose()
A = np.reshape(Ain[:num_neurons], (-1, d1 * d2), order='F').transpose()
C = Cin[:num_neurons]
C_raw = Cin_raw[:num_neurons]
S = Sin[:num_neurons]
center = center[:, :num_neurons]
if save_video:
plt.close()
writer.finish()
return A, C, C_raw, S, center
@profile
def extract_ac(data_filtered, data_raw, ind_ctr, patch_dims):
# parameters
min_corr_neuron = 0.9 # 7
max_corr_bg = 0.3
data_filtered = data_filtered.copy()
# compute the temporal correlation between each pixel and the seed pixel
data_filtered -= data_filtered.mean(axis=0) # data centering
tmp_std = np.sqrt(np.sum(data_filtered ** 2, axis=0)) # data
# normalization
tmp_std[tmp_std == 0] = 1
data_filtered /= tmp_std
y0 = data_filtered[:, ind_ctr] # fluorescence trace at the center
tmp_corr = np.dot(y0.reshape(1, -1), data_filtered) # corr. coeff. with y0
# pixels in the central area of neuron
ind_neuron = (tmp_corr > min_corr_neuron).squeeze()
# pixels outside of neuron's ROI
ind_bg = (tmp_corr < max_corr_bg).squeeze()
# extract temporal activity
ci = np.mean(data_filtered[:, ind_neuron], axis=1)
# initialize temporal activity of the neural
if ci.dot(ci) == 0: # avoid empty results
return None, None, False
# roughly estimate the background fluctuation
y_bg = np.median(data_raw[:, ind_bg], axis=1).reshape(-1, 1)\
if np.any(ind_bg) else np.ones((len(ci), 1), np.float32)
# extract spatial components
X = np.concatenate([ci.reshape(-1, 1), y_bg, np.ones(y_bg.shape, np.float32)], 1)
XX = np.dot(X.T, X)
Xy = np.dot(X.T, data_raw)
try:
#ai = np.linalg.inv(XX).dot(Xy)[0]
# ai = np.linalg.solve(XX, Xy)[0]
ai = pd_solve(XX, Xy)[0]
except:
ai = scipy.linalg.lstsq(XX, Xy)[0][0]
ai = ai.reshape(patch_dims)
ai[ai < 0] = 0
# post-process neuron shape
ai = circular_constraint(ai)
ai = connectivity_constraint(ai)
# remove baseline
# ci -= np.median(ci)
sn = get_noise_welch(ci)
y_diff = np.concatenate([[-1], np.diff(ci)])
b = np.median(ci[(y_diff >= 0) * (y_diff < sn)])
ci -= b
# return results
return ai, ci, True
@profile
def compute_W(Y, A, C, dims, radius, data_fits_in_memory=True, ssub=1, tsub=1, parallel=False):
"""compute background according to ring model
solves the problem
min_{W,b0} ||X-W*X|| with X = Y - A*C - b0*1'
subject to
W(i,j) = 0 for each pixel j that is not in ring around pixel i
Problem parallelizes over pixels i
Fluctuating background activity is W*X, constant baselines b0.
Args:
Y: np.ndarray (2D or 3D)
movie, raw data in 2D or 3D (pixels x time).
A: np.ndarray or sparse matrix
spatial footprint of each neuron.
C: np.ndarray
calcium activity of each neuron.
dims: tuple
x, y[, z] movie dimensions
radius: int
radius of ring
data_fits_in_memory: [optional] bool
If true, use faster but more memory consuming computation
ssub: int
spatial downscale factor
tsub: int
temporal downscale factor
parallel: bool
If true, use multiprocessing to process pixels in parallel
Returns:
W: scipy.sparse.csr_matrix (pixels x pixels)
estimate of weight matrix for fluctuating background
b0: np.ndarray (pixels,)
estimate of constant background baselines
"""
if current_process().name != 'MainProcess':
# no parallelization over pixels if already processing patches in parallel
parallel = False
T = Y.shape[1]
d1 = (dims[0] - 1) // ssub + 1
d2 = (dims[1] - 1) // ssub + 1
radius = int(round(radius / float(ssub)))
ring = disk(radius + 1)
ring[1:-1, 1:-1] -= disk(radius)
ringidx = [i - radius - 1 for i in np.nonzero(ring)]
def get_indices_of_pixels_on_ring(pixel):
x = pixel % d1 + ringidx[0]
y = pixel // d1 + ringidx[1]
inside = (x >= 0) * (x < d1) * (y >= 0) * (y < d2)
return x[inside] + y[inside] * d1
b0 = np.array(Y.mean(1)) - A.dot(C.mean(1))
if ssub > 1:
ds_mat = caiman.source_extraction.cnmf.utilities.decimation_matrix(dims, ssub)
ds = lambda x: ds_mat.dot(x)
else:
ds = lambda x: x
if data_fits_in_memory:
if ssub == 1 and tsub == 1:
X = Y - A.dot(C) - b0[:, None]
else:
X = decimate_last_axis(ds(Y), tsub) - \
(ds(A).dot(decimate_last_axis(C, tsub)) if A.size > 0 else 0) - \
ds(b0).reshape((-1, 1), order='F')
def process_pixel(p):
index = get_indices_of_pixels_on_ring(p)
B = X[index]
tmp = np.array(B.dot(B.T))
tmp[np.diag_indices(len(tmp))] += np.trace(tmp) * 1e-5
tmp2 = X[p]
data = pd_solve(tmp, B.dot(tmp2))
return index, data
else:
def process_pixel(p):
index = get_indices_of_pixels_on_ring(p)
if ssub == 1 and tsub == 1:
B = Y[index] - A[index].dot(C) - b0[index, None]
else:
B = decimate_last_axis(ds(Y), tsub)[index] - \
(ds(A)[index].dot(decimate_last_axis(C, tsub)) if A.size > 0 else 0) - \
ds(b0).reshape((-1, 1), order='F')[index]
tmp = np.array(B.dot(B.T))
tmp[np.diag_indices(len(tmp))] += np.trace(tmp) * 1e-5
if ssub == 1 and tsub == 1:
tmp2 = Y[p] - A[p].dot(C).ravel() - b0[p]
else:
tmp2 = decimate_last_axis(ds(Y), tsub)[p] - \
(ds(A)[p].dot(decimate_last_axis(C, tsub)) if A.size > 0 else 0) - \
ds(b0).reshape((-1, 1), order='F')[p]
data = pd_solve(tmp, B.dot(tmp2))
return index, data
Q = list((parmap if parallel else map)(process_pixel, range(d1 * d2)))
indices, data = np.transpose(Q)
indptr = np.concatenate([[0], np.cumsum(list(map(len, indices)))])
indices = np.concatenate(indices)
data = np.concatenate(data)
return spr.csr_matrix((data, indices, indptr), dtype='float32'), b0.astype(np.float32)
#%%
def nnsvd_init(X, n_components, r_ov=10, eps=1e-6, random_state=42):
# NNDSVD initialization from scikit learn package (modified)
U, S, V = randomized_svd(X, n_components + r_ov, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
C = W.T
A = H.T
return A[:, 1:n_components], C[:n_components], (U, S, V) #
#%%
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
|
agiovann/Constrained_NMF
|
caiman/source_extraction/cnmf/initialization.py
|
Python
|
gpl-2.0
| 83,794
|
[
"Gaussian",
"NEURON"
] |
865005a7aee890a50c124be55a9fcf26ddeaafc8c6e80efc213e462dd8b68f91
|
#!/usr/bin/env python3
import logging
import logging.config
import sys
logging.config.fileConfig('../logging.conf')
log = logging.getLogger('memory')
class Memory(object):
"""GameBoy Memory
==============
Memory Map
==========
Start End Description
----- --- -----------
0000 3FFF 16KB ROM bank 00
4000 7FFF 16KB ROM Bank 01~NN
8000 9FFF 8KB Video RAM (VRAM)
A000 BFFF 8KB External RAM
C000 CFFF 4KB Work RAM (WRAM) bank 0
D000 DFFF 4KB Work RAM bank 1~N
E000 FDFF Mirror of C000~DDFF (ECHO)
FE00 FE9F Sprite attribute table (OAM)
FEA0 FEFF Not Usable
FF00 FF7F I/O Registers
FF80 FFFE High RAM (HRAM)
FFFF FFFF Interrupts Enable Register
"""
def __init__(self):
"""Initialise memory."""
self.memory = bytearray(2**16)
def load(self, data, start=0x0000, end=0xFFFF):
"""Given ROM data, loads into memory at given position.
Arguments:
data - ROM data, e.g. GameBoy BIOS or cartridge
start - The first position at which data should be accessible.
end - The last position from which data should be accessible
"""
for i in range(max(0, start), min(len(data), end)):
self.memory[i] = data[i]
def write_byte(self, address, byte):
"""Writes 8 bits to address."""
dbg_msg = '0x{0:02X} at addr 0x{0:04X} being overwritten by 0x{2:02X}'
if address <= 0xFF:
log.critical('Attempting to overwrite bios ROM')
log.debug(dbg_msg.format(self.memory[address], address, byte))
raise IndexError
elif 0x0100 <= address <= 0x3FFF:
log.critical('Attempting to overwrite fixed memory bank')
log.debug(dbg_msg.format(self.memory[address], address, byte))
raise IndexError
self.memory[address] = byte
def read_byte(self, address):
"""Reads 8 bits at address."""
return self.memory[address]
def read_word(self, address):
"""Reads 16 bits starting at address."""
lowbyte = self.memory[address]
highbyte = self.memory[address + 1]
return (highbyte << 8) | lowbyte
|
jonwells90/PyBoy
|
PyBoy/memory.py
|
Python
|
mit
| 2,287
|
[
"FEFF"
] |
0fd96477a0296425af68940ce823b8bd4c01f5de10a17eed2a9780e33ce1d6a8
|
"""
Various splitters
"""
import numpy as np
import logging
logger = logging.getLogger(__name__)
from rdkit import Chem
from rdkit.Chem.Scaffolds import MurckoScaffold
from tqdm import tqdm
def generate_scaffold(smiles, include_chirality=False):
"""
Compute the Bemis-Murcko scaffold for a SMILES string.
Notes
-----
Copied from https://github.com/deepchem/deepchem/blob/master/deepchem/splits/splitters.py
"""
mol = Chem.MolFromSmiles(smiles)
if mol is None:
logger.warning("Failedcalculating scaffold for " + smiles)
return 'fail'
scaffold = MurckoScaffold.MurckoScaffoldSmiles(
mol=mol, includeChirality=include_chirality)
return scaffold
def scaffold_split(smiles, frac_train=.8, seed=777):
"""
Splits compounds into train/validation/test by scaffold.
Warning: if there is one very popular scaffold can produce unbalanced split
Params
------
smiles: list
List of smiles
frac_train: float, default: 0.8
Float in [0, 1] range indicating size of the training set
seed: int
Used to shuffle smiles before splitting
Notes
-----
Copied from https://github.com/deepchem/deepchem/blob/master/deepchem/splits/splitters.py
"""
smiles = list(smiles)
scaffolds = {}
for ind, smi in tqdm(enumerate(smiles), total=len(smiles)):
scaffold = generate_scaffold(smi)
if scaffold not in scaffolds:
scaffolds[scaffold] = [ind]
else:
scaffolds[scaffold].append(ind)
scaffolds_keys = list(scaffolds)
rng = np.random.RandomState(seed)
rng.shuffle(scaffolds_keys)
train_cutoff = frac_train * len(smiles)
train_inds, test_inds = [], []
for scaffold_key in scaffolds_keys:
if len(train_inds) > train_cutoff:
test_inds += scaffolds[scaffold_key]
else:
train_inds += scaffolds[scaffold_key]
return train_inds, test_inds
if __name__ == "__main__":
# Test scaffold splitting
# TODO: Problem for single chain molecules?
# NOTE: Last molecule is viagra
smiles = ['CC(C)(N)Cc1ccccc1', 'CC(C)(Cl)Cc1ccccc1', 'c1ccccc1',
'CCc1nn(C)c2c(=O)[nH]c(nc12)c3cc(ccc3OCC)S(=O)(=O)N4CCN(C)CC4']
splits = scaffold_split(smiles, frac_train=0.5)
assert splits[0] == [0, 1, 2] or splits[1] == [0, 1, 2], "Correctly separated scaffolds"
print(splits)
|
DentonJC/virtual_screening
|
moloi/splits/scaffold_split.py
|
Python
|
gpl-3.0
| 2,394
|
[
"RDKit"
] |
71d7a79065d6ef82c2bcab474c3f1d84c2ae858b4f99f72226fab9a1dc577ad4
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 17 12:41:17 2017
@author: kostenko
Simulate makes fake polychromatic x-ray CT data
"""
#import tomopy.misc
#import tomobox as tw
#import xraydb
class spectra():
'''
Simulates spectral phenomena that involve x-ray-matter interaction
'''
import xraylib
@staticmethod
def total_attenuation(energy, compound):
'''
Total X-ray absorption for a given compound in cm2g. Energy is given in KeV
'''
return xraylib.CS_Total_CP(compound, energy)
@staticmethod
def compton(energy, compound):
'''
Compton scaterring crossection for a given compound in cm2g. Energy is given in KeV
'''
return xraylib.CS_Compt_CP(compound, energy)
@staticmethod
def rayleigh(energy, compound):
'''
Compton scaterring crossection for a given compound in cm2g. Energy is given in KeV
'''
return xraylib.CS_Rayl_CP(compound, energy)
@staticmethod
def photoelectric(energy, compound):
'''
Photoelectric effect for a given compound in cm2g. Energy is given in KeV
'''
return xraylib.CS_Photo_CP(compound, energy)
@staticmethod
def bremsstrahlung(energy, energy_max):
'''
Simple bremstrahlung model (Kramer formula). Emax
'''
spectrum = energy * (energy_max - energy)
spectrum[spectrum < 0] = 0
# Normalize:
return spectrum / spectrum.max()
@staticmethod
def gaussian_spectrum(energy, energy_mean, energy_sigma):
'''
Generates gaussian-like spectrum with given mean and STD.
'''
return numpy.exp(-(energy - energy_mean)**2 / (2*energy_sigma**2))
@staticmethod
def scintillator_efficiency(energy, compound = 'BaFBr', rho = 5, thickness = 100):
'''
Generate QDE of a detector (scintillator). Units: KeV, g/cm3, micron.
'''
# Thickness to cm:
thickness /= 1e4
# Attenuation by the photoelectric effect:
spectrum = 1 - numpy.exp(- thickness * rho * spectra.photoelectric(energy, compound))
# Normalize:
return spectrum / spectrum.max()
@staticmethod
def transmission(energy, compound, rho, thickness):
'''
Compute fraction of x-rays transmitted through the filter.
Units: KeV, g/cm3, micron.
'''
# Thickness to cm:
thickness /= 1e4
# Attenuation by the photoelectric effect:
return 1 - numpy.exp(- thickness * rho * spectra.total_attenuation(energy, compound))
@staticmethod
def attenuation(energy, compound, rho, thickness):
'''
Compute fraction of x-rays attenuated by the filter
'''
# Thickness microns to cm:
thickness /= 1e4
return numpy.exp(- thickness * rho * spectra.total_attenuation(energy, compound))
class nist():
@staticmethod
def list_names():
return xraylib.GetCompoundDataNISTList()
@staticmethod
def find_name(compound_name):
return xraylib.GetCompoundDataNISTByName(compound_name)
@staticmethod
def parse_compound(compund):
return xraylib.CompoundParser(compund)
class phantom():
'''
Use tomopy phantom module for now
'''
@staticmethod
def shepp3d(sz = 512):
import tomopy.misc
import tomobox
vol = tomobox.volume(tomopy.misc.phantom.shepp3d(sz))
vol.meta.history.add_record('SheppLogan phantom is generated', sz)
return vol
class tomography():
'''
Forward projection into the projection data space
'''
@staticmethod
def project(volume, tomo):
'''
Forward projects a volume into a tomogram
'''
tomo.reconstruct._initialize_astra()
tomo.data._data = tomo.reconstruct._forwardproject(volume.data._data)
tomo.meta.history.add_record('simulate.tomography.project was used to generate the data')
return tomo
'''
class faker():
'''
'''
phantom = []
spectra = []
tomography = []
def __init__(self):
self.phantom = tomopy.misc.phantom
self.spectra = spectra()
self.tomography = tomography()
'''
|
cicwi/tomo_box
|
simulate.py
|
Python
|
gpl-3.0
| 4,567
|
[
"Gaussian"
] |
fad39c62dd0b7b1f8f8e38e94a61a289c21d53a92d47964dc1be0aa28884262b
|
"""Core implementation of the testing process: init, session, runtest loop."""
import argparse
import fnmatch
import functools
import importlib
import os
import sys
from typing import Callable
from typing import Dict
from typing import FrozenSet
from typing import Iterator
from typing import List
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Union
import attr
import py
import _pytest._code
from _pytest import nodes
from _pytest.compat import final
from _pytest.compat import overload
from _pytest.compat import TYPE_CHECKING
from _pytest.config import Config
from _pytest.config import directory_arg
from _pytest.config import ExitCode
from _pytest.config import hookimpl
from _pytest.config import PytestPluginManager
from _pytest.config import UsageError
from _pytest.config.argparsing import Parser
from _pytest.fixtures import FixtureManager
from _pytest.outcomes import exit
from _pytest.pathlib import absolutepath
from _pytest.pathlib import bestrelpath
from _pytest.pathlib import Path
from _pytest.pathlib import visit
from _pytest.reports import CollectReport
from _pytest.reports import TestReport
from _pytest.runner import collect_one_node
from _pytest.runner import SetupState
if TYPE_CHECKING:
from typing import Type
from typing_extensions import Literal
def pytest_addoption(parser: Parser) -> None:
parser.addini(
"norecursedirs",
"directory patterns to avoid for recursion",
type="args",
default=[".*", "build", "dist", "CVS", "_darcs", "{arch}", "*.egg", "venv"],
)
parser.addini(
"testpaths",
"directories to search for tests when no files or directories are given in the "
"command line.",
type="args",
default=[],
)
group = parser.getgroup("general", "running and selection options")
group._addoption(
"-x",
"--exitfirst",
action="store_const",
dest="maxfail",
const=1,
help="exit instantly on first error or failed test.",
)
group = parser.getgroup("pytest-warnings")
group.addoption(
"-W",
"--pythonwarnings",
action="append",
help="set which warnings to report, see -W option of python itself.",
)
parser.addini(
"filterwarnings",
type="linelist",
help="Each line specifies a pattern for "
"warnings.filterwarnings. "
"Processed after -W/--pythonwarnings.",
)
group._addoption(
"--maxfail",
metavar="num",
action="store",
type=int,
dest="maxfail",
default=0,
help="exit after first num failures or errors.",
)
group._addoption(
"--strict-config",
action="store_true",
help="any warnings encountered while parsing the `pytest` section of the configuration file raise errors.",
)
group._addoption(
"--strict-markers",
"--strict",
action="store_true",
help="markers not registered in the `markers` section of the configuration file raise errors.",
)
group._addoption(
"-c",
metavar="file",
type=str,
dest="inifilename",
help="load configuration from `file` instead of trying to locate one of the implicit "
"configuration files.",
)
group._addoption(
"--continue-on-collection-errors",
action="store_true",
default=False,
dest="continue_on_collection_errors",
help="Force test execution even if collection errors occur.",
)
group._addoption(
"--rootdir",
action="store",
dest="rootdir",
help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', "
"'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: "
"'$HOME/root_dir'.",
)
group = parser.getgroup("collect", "collection")
group.addoption(
"--collectonly",
"--collect-only",
"--co",
action="store_true",
help="only collect tests, don't execute them.",
)
group.addoption(
"--pyargs",
action="store_true",
help="try to interpret all arguments as python packages.",
)
group.addoption(
"--ignore",
action="append",
metavar="path",
help="ignore path during collection (multi-allowed).",
)
group.addoption(
"--ignore-glob",
action="append",
metavar="path",
help="ignore path pattern during collection (multi-allowed).",
)
group.addoption(
"--deselect",
action="append",
metavar="nodeid_prefix",
help="deselect item (via node id prefix) during collection (multi-allowed).",
)
group.addoption(
"--confcutdir",
dest="confcutdir",
default=None,
metavar="dir",
type=functools.partial(directory_arg, optname="--confcutdir"),
help="only load conftest.py's relative to specified dir.",
)
group.addoption(
"--noconftest",
action="store_true",
dest="noconftest",
default=False,
help="Don't load any conftest.py files.",
)
group.addoption(
"--keepduplicates",
"--keep-duplicates",
action="store_true",
dest="keepduplicates",
default=False,
help="Keep duplicate tests.",
)
group.addoption(
"--collect-in-virtualenv",
action="store_true",
dest="collect_in_virtualenv",
default=False,
help="Don't ignore tests in a local virtualenv directory",
)
group.addoption(
"--import-mode",
default="prepend",
choices=["prepend", "append", "importlib"],
dest="importmode",
help="prepend/append to sys.path when importing test modules and conftest files, "
"default is to prepend.",
)
group = parser.getgroup("debugconfig", "test session debugging and configuration")
group.addoption(
"--basetemp",
dest="basetemp",
default=None,
type=validate_basetemp,
metavar="dir",
help=(
"base temporary directory for this test run."
"(warning: this directory is removed if it exists)"
),
)
def validate_basetemp(path: str) -> str:
# GH 7119
msg = "basetemp must not be empty, the current working directory or any parent directory of it"
# empty path
if not path:
raise argparse.ArgumentTypeError(msg)
def is_ancestor(base: Path, query: Path) -> bool:
"""Return whether query is an ancestor of base."""
if base == query:
return True
for parent in base.parents:
if parent == query:
return True
return False
# check if path is an ancestor of cwd
if is_ancestor(Path.cwd(), Path(path).absolute()):
raise argparse.ArgumentTypeError(msg)
# check symlinks for ancestors
if is_ancestor(Path.cwd().resolve(), Path(path).resolve()):
raise argparse.ArgumentTypeError(msg)
return path
def wrap_session(
config: Config, doit: Callable[[Config, "Session"], Optional[Union[int, ExitCode]]]
) -> Union[int, ExitCode]:
"""Skeleton command line program."""
session = Session.from_config(config)
session.exitstatus = ExitCode.OK
initstate = 0
try:
try:
config._do_configure()
initstate = 1
config.hook.pytest_sessionstart(session=session)
initstate = 2
session.exitstatus = doit(config, session) or 0
except UsageError:
session.exitstatus = ExitCode.USAGE_ERROR
raise
except Failed:
session.exitstatus = ExitCode.TESTS_FAILED
except (KeyboardInterrupt, exit.Exception):
excinfo = _pytest._code.ExceptionInfo.from_current()
exitstatus = ExitCode.INTERRUPTED # type: Union[int, ExitCode]
if isinstance(excinfo.value, exit.Exception):
if excinfo.value.returncode is not None:
exitstatus = excinfo.value.returncode
if initstate < 2:
sys.stderr.write(
"{}: {}\n".format(excinfo.typename, excinfo.value.msg)
)
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
session.exitstatus = exitstatus
except BaseException:
session.exitstatus = ExitCode.INTERNAL_ERROR
excinfo = _pytest._code.ExceptionInfo.from_current()
try:
config.notify_exception(excinfo, config.option)
except exit.Exception as exc:
if exc.returncode is not None:
session.exitstatus = exc.returncode
sys.stderr.write("{}: {}\n".format(type(exc).__name__, exc))
else:
if isinstance(excinfo.value, SystemExit):
sys.stderr.write("mainloop: caught unexpected SystemExit!\n")
finally:
# Explicitly break reference cycle.
excinfo = None # type: ignore
session.startdir.chdir()
if initstate >= 2:
try:
config.hook.pytest_sessionfinish(
session=session, exitstatus=session.exitstatus
)
except exit.Exception as exc:
if exc.returncode is not None:
session.exitstatus = exc.returncode
sys.stderr.write("{}: {}\n".format(type(exc).__name__, exc))
config._ensure_unconfigure()
return session.exitstatus
def pytest_cmdline_main(config: Config) -> Union[int, ExitCode]:
return wrap_session(config, _main)
def _main(config: Config, session: "Session") -> Optional[Union[int, ExitCode]]:
"""Default command line protocol for initialization, session,
running tests and reporting."""
config.hook.pytest_collection(session=session)
config.hook.pytest_runtestloop(session=session)
if session.testsfailed:
return ExitCode.TESTS_FAILED
elif session.testscollected == 0:
return ExitCode.NO_TESTS_COLLECTED
return None
def pytest_collection(session: "Session") -> None:
session.perform_collect()
def pytest_runtestloop(session: "Session") -> bool:
if session.testsfailed and not session.config.option.continue_on_collection_errors:
raise session.Interrupted(
"%d error%s during collection"
% (session.testsfailed, "s" if session.testsfailed != 1 else "")
)
if session.config.option.collectonly:
return True
for i, item in enumerate(session.items):
nextitem = session.items[i + 1] if i + 1 < len(session.items) else None
item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
if session.shouldfail:
raise session.Failed(session.shouldfail)
if session.shouldstop:
raise session.Interrupted(session.shouldstop)
return True
def _in_venv(path: py.path.local) -> bool:
"""Attempt to detect if ``path`` is the root of a Virtual Environment by
checking for the existence of the appropriate activate script."""
bindir = path.join("Scripts" if sys.platform.startswith("win") else "bin")
if not bindir.isdir():
return False
activates = (
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
)
return any([fname.basename in activates for fname in bindir.listdir()])
def pytest_ignore_collect(path: py.path.local, config: Config) -> Optional[bool]:
ignore_paths = config._getconftest_pathlist("collect_ignore", path=path.dirpath())
ignore_paths = ignore_paths or []
excludeopt = config.getoption("ignore")
if excludeopt:
ignore_paths.extend([py.path.local(x) for x in excludeopt])
if py.path.local(path) in ignore_paths:
return True
ignore_globs = config._getconftest_pathlist(
"collect_ignore_glob", path=path.dirpath()
)
ignore_globs = ignore_globs or []
excludeglobopt = config.getoption("ignore_glob")
if excludeglobopt:
ignore_globs.extend([py.path.local(x) for x in excludeglobopt])
if any(fnmatch.fnmatch(str(path), str(glob)) for glob in ignore_globs):
return True
allow_in_venv = config.getoption("collect_in_virtualenv")
if not allow_in_venv and _in_venv(path):
return True
return None
def pytest_collection_modifyitems(items: List[nodes.Item], config: Config) -> None:
deselect_prefixes = tuple(config.getoption("deselect") or [])
if not deselect_prefixes:
return
remaining = []
deselected = []
for colitem in items:
if colitem.nodeid.startswith(deselect_prefixes):
deselected.append(colitem)
else:
remaining.append(colitem)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining
class FSHookProxy:
def __init__(self, pm: PytestPluginManager, remove_mods) -> None:
self.pm = pm
self.remove_mods = remove_mods
def __getattr__(self, name: str):
x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)
self.__dict__[name] = x
return x
class Interrupted(KeyboardInterrupt):
"""Signals that the test run was interrupted."""
__module__ = "builtins" # For py3.
class Failed(Exception):
"""Signals a stop as failed test run."""
@attr.s
class _bestrelpath_cache(Dict[Path, str]):
path = attr.ib(type=Path)
def __missing__(self, path: Path) -> str:
r = bestrelpath(self.path, path)
self[path] = r
return r
@final
class Session(nodes.FSCollector):
Interrupted = Interrupted
Failed = Failed
# Set on the session by runner.pytest_sessionstart.
_setupstate = None # type: SetupState
# Set on the session by fixtures.pytest_sessionstart.
_fixturemanager = None # type: FixtureManager
exitstatus = None # type: Union[int, ExitCode]
def __init__(self, config: Config) -> None:
super().__init__(
config.rootdir, parent=None, config=config, session=self, nodeid=""
)
self.testsfailed = 0
self.testscollected = 0
self.shouldstop = False # type: Union[bool, str]
self.shouldfail = False # type: Union[bool, str]
self.trace = config.trace.root.get("collection")
self.startdir = config.invocation_dir
self._initialpaths = frozenset() # type: FrozenSet[py.path.local]
self._bestrelpathcache = _bestrelpath_cache(
config.rootpath
) # type: Dict[Path, str]
self.config.pluginmanager.register(self, name="session")
@classmethod
def from_config(cls, config: Config) -> "Session":
session = cls._create(config) # type: Session
return session
def __repr__(self) -> str:
return "<%s %s exitstatus=%r testsfailed=%d testscollected=%d>" % (
self.__class__.__name__,
self.name,
getattr(self, "exitstatus", "<UNSET>"),
self.testsfailed,
self.testscollected,
)
def _node_location_to_relpath(self, node_path: Path) -> str:
# bestrelpath is a quite slow function.
return self._bestrelpathcache[node_path]
@hookimpl(tryfirst=True)
def pytest_collectstart(self) -> None:
if self.shouldfail:
raise self.Failed(self.shouldfail)
if self.shouldstop:
raise self.Interrupted(self.shouldstop)
@hookimpl(tryfirst=True)
def pytest_runtest_logreport(
self, report: Union[TestReport, CollectReport]
) -> None:
if report.failed and not hasattr(report, "wasxfail"):
self.testsfailed += 1
maxfail = self.config.getvalue("maxfail")
if maxfail and self.testsfailed >= maxfail:
self.shouldfail = "stopping after %d failures" % (self.testsfailed)
pytest_collectreport = pytest_runtest_logreport
def isinitpath(self, path: py.path.local) -> bool:
return path in self._initialpaths
def gethookproxy(self, fspath: py.path.local):
# Check if we have the common case of running
# hooks with all conftest.py files.
pm = self.config.pluginmanager
my_conftestmodules = pm._getconftestmodules(
fspath, self.config.getoption("importmode")
)
remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
if remove_mods:
# One or more conftests are not in use at this fspath.
proxy = FSHookProxy(pm, remove_mods)
else:
# All plugins are active for this fspath.
proxy = self.config.hook
return proxy
def _recurse(self, direntry: "os.DirEntry[str]") -> bool:
if direntry.name == "__pycache__":
return False
path = py.path.local(direntry.path)
ihook = self.gethookproxy(path.dirpath())
if ihook.pytest_ignore_collect(path=path, config=self.config):
return False
norecursepatterns = self.config.getini("norecursedirs")
if any(path.check(fnmatch=pat) for pat in norecursepatterns):
return False
return True
def _collectfile(
self, path: py.path.local, handle_dupes: bool = True
) -> Sequence[nodes.Collector]:
assert (
path.isfile()
), "{!r} is not a file (isdir={!r}, exists={!r}, islink={!r})".format(
path, path.isdir(), path.exists(), path.islink()
)
ihook = self.gethookproxy(path)
if not self.isinitpath(path):
if ihook.pytest_ignore_collect(path=path, config=self.config):
return ()
if handle_dupes:
keepduplicates = self.config.getoption("keepduplicates")
if not keepduplicates:
duplicate_paths = self.config.pluginmanager._duplicatepaths
if path in duplicate_paths:
return ()
else:
duplicate_paths.add(path)
return ihook.pytest_collect_file(path=path, parent=self) # type: ignore[no-any-return]
@overload
def perform_collect(
self, args: Optional[Sequence[str]] = ..., genitems: "Literal[True]" = ...
) -> Sequence[nodes.Item]:
...
@overload # noqa: F811
def perform_collect( # noqa: F811
self, args: Optional[Sequence[str]] = ..., genitems: bool = ...
) -> Sequence[Union[nodes.Item, nodes.Collector]]:
...
def perform_collect( # noqa: F811
self, args: Optional[Sequence[str]] = None, genitems: bool = True
) -> Sequence[Union[nodes.Item, nodes.Collector]]:
"""Perform the collection phase for this session.
This is called by the default
:func:`pytest_collection <_pytest.hookspec.pytest_collection>` hook
implementation; see the documentation of this hook for more details.
For testing purposes, it may also be called directly on a fresh
``Session``.
This function normally recursively expands any collectors collected
from the session to their items, and only items are returned. For
testing purposes, this may be suppressed by passing ``genitems=False``,
in which case the return value contains these collectors unexpanded,
and ``session.items`` is empty.
"""
if args is None:
args = self.config.args
self.trace("perform_collect", self, args)
self.trace.root.indent += 1
self._notfound = [] # type: List[Tuple[str, Sequence[nodes.Collector]]]
self._initial_parts = [] # type: List[Tuple[py.path.local, List[str]]]
self.items = [] # type: List[nodes.Item]
hook = self.config.hook
items = self.items # type: Sequence[Union[nodes.Item, nodes.Collector]]
try:
initialpaths = [] # type: List[py.path.local]
for arg in args:
fspath, parts = resolve_collection_argument(
self.config.invocation_params.dir,
arg,
as_pypath=self.config.option.pyargs,
)
self._initial_parts.append((fspath, parts))
initialpaths.append(fspath)
self._initialpaths = frozenset(initialpaths)
rep = collect_one_node(self)
self.ihook.pytest_collectreport(report=rep)
self.trace.root.indent -= 1
if self._notfound:
errors = []
for arg, cols in self._notfound:
line = "(no name {!r} in any of {!r})".format(arg, cols)
errors.append("not found: {}\n{}".format(arg, line))
raise UsageError(*errors)
if not genitems:
items = rep.result
else:
if rep.passed:
for node in rep.result:
self.items.extend(self.genitems(node))
self.config.pluginmanager.check_pending()
hook.pytest_collection_modifyitems(
session=self, config=self.config, items=items
)
finally:
hook.pytest_collection_finish(session=self)
self.testscollected = len(items)
return items
def collect(self) -> Iterator[Union[nodes.Item, nodes.Collector]]:
from _pytest.python import Package
# Keep track of any collected nodes in here, so we don't duplicate fixtures.
node_cache1 = {} # type: Dict[py.path.local, Sequence[nodes.Collector]]
node_cache2 = (
{}
) # type: Dict[Tuple[Type[nodes.Collector], py.path.local], nodes.Collector]
# Keep track of any collected collectors in matchnodes paths, so they
# are not collected more than once.
matchnodes_cache = (
{}
) # type: Dict[Tuple[Type[nodes.Collector], str], CollectReport]
# Dirnames of pkgs with dunder-init files.
pkg_roots = {} # type: Dict[str, Package]
for argpath, names in self._initial_parts:
self.trace("processing argument", (argpath, names))
self.trace.root.indent += 1
# Start with a Session root, and delve to argpath item (dir or file)
# and stack all Packages found on the way.
# No point in finding packages when collecting doctests.
if not self.config.getoption("doctestmodules", False):
pm = self.config.pluginmanager
for parent in reversed(argpath.parts()):
if pm._confcutdir and pm._confcutdir.relto(parent):
break
if parent.isdir():
pkginit = parent.join("__init__.py")
if pkginit.isfile() and pkginit not in node_cache1:
col = self._collectfile(pkginit, handle_dupes=False)
if col:
if isinstance(col[0], Package):
pkg_roots[str(parent)] = col[0]
node_cache1[col[0].fspath] = [col[0]]
# If it's a directory argument, recurse and look for any Subpackages.
# Let the Package collector deal with subnodes, don't collect here.
if argpath.check(dir=1):
assert not names, "invalid arg {!r}".format((argpath, names))
seen_dirs = set() # type: Set[py.path.local]
for direntry in visit(str(argpath), self._recurse):
if not direntry.is_file():
continue
path = py.path.local(direntry.path)
dirpath = path.dirpath()
if dirpath not in seen_dirs:
# Collect packages first.
seen_dirs.add(dirpath)
pkginit = dirpath.join("__init__.py")
if pkginit.exists():
for x in self._collectfile(pkginit):
yield x
if isinstance(x, Package):
pkg_roots[str(dirpath)] = x
if str(dirpath) in pkg_roots:
# Do not collect packages here.
continue
for x in self._collectfile(path):
key = (type(x), x.fspath)
if key in node_cache2:
yield node_cache2[key]
else:
node_cache2[key] = x
yield x
else:
assert argpath.check(file=1)
if argpath in node_cache1:
col = node_cache1[argpath]
else:
collect_root = pkg_roots.get(argpath.dirname, self)
col = collect_root._collectfile(argpath, handle_dupes=False)
if col:
node_cache1[argpath] = col
matching = []
work = [
(col, names)
] # type: List[Tuple[Sequence[Union[nodes.Item, nodes.Collector]], Sequence[str]]]
while work:
self.trace("matchnodes", col, names)
self.trace.root.indent += 1
matchnodes, matchnames = work.pop()
for node in matchnodes:
if not matchnames:
matching.append(node)
continue
if not isinstance(node, nodes.Collector):
continue
key = (type(node), node.nodeid)
if key in matchnodes_cache:
rep = matchnodes_cache[key]
else:
rep = collect_one_node(node)
matchnodes_cache[key] = rep
if rep.passed:
submatchnodes = []
for r in rep.result:
# TODO: Remove parametrized workaround once collection structure contains
# parametrization.
if (
r.name == matchnames[0]
or r.name.split("[")[0] == matchnames[0]
):
submatchnodes.append(r)
if submatchnodes:
work.append((submatchnodes, matchnames[1:]))
# XXX Accept IDs that don't have "()" for class instances.
elif len(rep.result) == 1 and rep.result[0].name == "()":
work.append((rep.result, matchnames))
else:
# Report collection failures here to avoid failing to run some test
# specified in the command line because the module could not be
# imported (#134).
node.ihook.pytest_collectreport(report=rep)
self.trace("matchnodes finished -> ", len(matching), "nodes")
self.trace.root.indent -= 1
if not matching:
report_arg = "::".join((str(argpath), *names))
self._notfound.append((report_arg, col))
continue
# If __init__.py was the only file requested, then the matched node will be
# the corresponding Package, and the first yielded item will be the __init__
# Module itself, so just use that. If this special case isn't taken, then all
# the files in the package will be yielded.
if argpath.basename == "__init__.py":
assert isinstance(matching[0], nodes.Collector)
try:
yield next(iter(matching[0].collect()))
except StopIteration:
# The package collects nothing with only an __init__.py
# file in it, which gets ignored by the default
# "python_files" option.
pass
continue
yield from matching
self.trace.root.indent -= 1
def genitems(
self, node: Union[nodes.Item, nodes.Collector]
) -> Iterator[nodes.Item]:
self.trace("genitems", node)
if isinstance(node, nodes.Item):
node.ihook.pytest_itemcollected(item=node)
yield node
else:
assert isinstance(node, nodes.Collector)
rep = collect_one_node(node)
if rep.passed:
for subnode in rep.result:
yield from self.genitems(subnode)
node.ihook.pytest_collectreport(report=rep)
def search_pypath(module_name: str) -> str:
"""Search sys.path for the given a dotted module name, and return its file system path."""
try:
spec = importlib.util.find_spec(module_name)
# AttributeError: looks like package module, but actually filename
# ImportError: module does not exist
# ValueError: not a module name
except (AttributeError, ImportError, ValueError):
return module_name
if spec is None or spec.origin is None or spec.origin == "namespace":
return module_name
elif spec.submodule_search_locations:
return os.path.dirname(spec.origin)
else:
return spec.origin
def resolve_collection_argument(
invocation_path: Path, arg: str, *, as_pypath: bool = False
) -> Tuple[py.path.local, List[str]]:
"""Parse path arguments optionally containing selection parts and return (fspath, names).
Command-line arguments can point to files and/or directories, and optionally contain
parts for specific tests selection, for example:
"pkg/tests/test_foo.py::TestClass::test_foo"
This function ensures the path exists, and returns a tuple:
(py.path.path("/full/path/to/pkg/tests/test_foo.py"), ["TestClass", "test_foo"])
When as_pypath is True, expects that the command-line argument actually contains
module paths instead of file-system paths:
"pkg.tests.test_foo::TestClass::test_foo"
In which case we search sys.path for a matching module, and then return the *path* to the
found module.
If the path doesn't exist, raise UsageError.
If the path is a directory and selection parts are present, raise UsageError.
"""
strpath, *parts = str(arg).split("::")
if as_pypath:
strpath = search_pypath(strpath)
fspath = invocation_path / strpath
fspath = absolutepath(fspath)
if not fspath.exists():
msg = (
"module or package not found: {arg} (missing __init__.py?)"
if as_pypath
else "file or directory not found: {arg}"
)
raise UsageError(msg.format(arg=arg))
if parts and fspath.is_dir():
msg = (
"package argument cannot contain :: selection parts: {arg}"
if as_pypath
else "directory argument cannot contain :: selection parts: {arg}"
)
raise UsageError(msg.format(arg=arg))
return py.path.local(str(fspath)), parts
|
KiChjang/servo
|
tests/wpt/web-platform-tests/tools/third_party/pytest/src/_pytest/main.py
|
Python
|
mpl-2.0
| 31,889
|
[
"VisIt"
] |
72a2cc30f717b3756ea56607242872b96a6966a2d8bf4d36085b4bf85cc633c0
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `policies.py`."""
import functools
from absl.testing import absltest
import jax
import jax.numpy as jnp
import mctx
from mctx._src import policies
import numpy as np
def _make_bandit_recurrent_fn(rewards):
"""Returns a recurrent_fn with discount=0."""
def recurrent_fn(params, rng_key, action, embedding):
del params, rng_key, embedding
reward = rewards[jnp.arange(action.shape[0]), action]
return mctx.RecurrentFnOutput(
reward=reward,
discount=jnp.zeros_like(reward),
prior_logits=jnp.zeros_like(rewards),
value=jnp.zeros_like(reward),
), ()
return recurrent_fn
def _get_deepest_leaf(tree, node_index):
"""Returns `(leaf, depth)` with maximum depth and visit count.
Args:
tree: _unbatched_ MCTS tree state.
node_index: the node of the inspected subtree.
Returns:
`(leaf, depth)` of a deepest leaf. If multiple leaves have the same depth,
the leaf with the highest visit count is returned.
"""
np.testing.assert_equal(len(tree.children_index.shape), 2)
leaf = node_index
max_found_depth = 0
for action in range(tree.children_index.shape[-1]):
next_node_index = tree.children_index[node_index, action]
if next_node_index != tree.UNVISITED:
found_leaf, found_depth = _get_deepest_leaf(tree, next_node_index)
if ((1 + found_depth, tree.node_visits[found_leaf]) >
(max_found_depth, tree.node_visits[leaf])):
leaf = found_leaf
max_found_depth = 1 + found_depth
return leaf, max_found_depth
class PoliciesTest(absltest.TestCase):
def test_apply_temperature_one(self):
"""Tests temperature=1."""
logits = jnp.arange(6, dtype=jnp.float32)
new_logits = policies._apply_temperature(logits, temperature=1.0)
np.testing.assert_allclose(logits - logits.max(), new_logits)
def test_apply_temperature_two(self):
"""Tests temperature=2."""
logits = jnp.arange(6, dtype=jnp.float32)
temperature = 2.0
new_logits = policies._apply_temperature(logits, temperature)
np.testing.assert_allclose((logits - logits.max()) / temperature,
new_logits)
def test_apply_temperature_zero(self):
"""Tests temperature=0."""
logits = jnp.arange(4, dtype=jnp.float32)
new_logits = policies._apply_temperature(logits, temperature=0.0)
np.testing.assert_allclose(
jnp.array([-2.552118e+38, -1.701412e+38, -8.507059e+37, 0.0]),
new_logits,
rtol=1e-3)
def test_apply_temperature_zero_on_large_logits(self):
"""Tests temperature=0 on large logits."""
logits = jnp.array([100.0, 3.4028235e+38, -jnp.inf, -3.4028235e+38])
new_logits = policies._apply_temperature(logits, temperature=0.0)
np.testing.assert_allclose(
jnp.array([-jnp.inf, 0.0, -jnp.inf, -jnp.inf]), new_logits)
def test_mask_invalid_actions(self):
"""Tests action masking."""
logits = jnp.array([1e6, -jnp.inf, 1e6 + 1, -100.0])
invalid_actions = jnp.array([0.0, 1.0, 0.0, 1.0])
masked_logits = policies._mask_invalid_actions(
logits, invalid_actions)
valid_probs = jax.nn.softmax(jnp.array([0.0, 1.0]))
np.testing.assert_allclose(
jnp.array([valid_probs[0], 0.0, valid_probs[1], 0.0]),
jax.nn.softmax(masked_logits))
def test_mask_all_invalid_actions(self):
"""Tests a state with no valid action."""
logits = jnp.array([-jnp.inf, -jnp.inf, -jnp.inf, -jnp.inf])
invalid_actions = jnp.array([1.0, 1.0, 1.0, 1.0])
masked_logits = policies._mask_invalid_actions(
logits, invalid_actions)
np.testing.assert_allclose(
jnp.array([0.25, 0.25, 0.25, 0.25]),
jax.nn.softmax(masked_logits))
def test_muzero_policy(self):
root = mctx.RootFnOutput(
prior_logits=jnp.array([
[-1.0, 0.0, 2.0, 3.0],
]),
value=jnp.array([0.0]),
embedding=(),
)
rewards = jnp.zeros_like(root.prior_logits)
invalid_actions = jnp.array([
[0.0, 0.0, 0.0, 1.0],
])
policy_output = mctx.muzero_policy(
params=(),
rng_key=jax.random.PRNGKey(0),
root=root,
recurrent_fn=_make_bandit_recurrent_fn(rewards),
num_simulations=1,
invalid_actions=invalid_actions,
dirichlet_fraction=0.0)
expected_action = jnp.array([2], dtype=jnp.int32)
np.testing.assert_array_equal(expected_action, policy_output.action)
expected_action_weights = jnp.array([
[0.0, 0.0, 1.0, 0.0],
])
np.testing.assert_allclose(expected_action_weights,
policy_output.action_weights)
def test_gumbel_muzero_policy(self):
root_value = jnp.array([-5.0])
root = mctx.RootFnOutput(
prior_logits=jnp.array([
[0.0, -1.0, 2.0, 3.0],
]),
value=root_value,
embedding=(),
)
rewards = jnp.array([
[20.0, 3.0, -1.0, 10.0],
])
invalid_actions = jnp.array([
[1.0, 0.0, 0.0, 1.0],
])
value_scale = 0.05
maxvisit_init = 60
num_simulations = 17
max_depth = 3
qtransform = functools.partial(
mctx.qtransform_completed_by_mix_value,
value_scale=value_scale,
maxvisit_init=maxvisit_init,
rescale_values=True)
policy_output = mctx.gumbel_muzero_policy(
params=(),
rng_key=jax.random.PRNGKey(0),
root=root,
recurrent_fn=_make_bandit_recurrent_fn(rewards),
num_simulations=num_simulations,
invalid_actions=invalid_actions,
max_depth=max_depth,
qtransform=qtransform,
gumbel_scale=1.0)
# Testing the action.
expected_action = jnp.array([1], dtype=jnp.int32)
np.testing.assert_array_equal(expected_action, policy_output.action)
# Testing the action_weights.
probs = jax.nn.softmax(jnp.where(
invalid_actions, -jnp.inf, root.prior_logits))
mix_value = 1.0 / (num_simulations + 1) * (root_value + num_simulations * (
probs[:, 1] * rewards[:, 1] + probs[:, 2] * rewards[:, 2]))
completed_qvalues = jnp.array([
[mix_value[0], rewards[0, 1], rewards[0, 2], mix_value[0]],
])
max_value = jnp.max(completed_qvalues, axis=-1, keepdims=True)
min_value = jnp.min(completed_qvalues, axis=-1, keepdims=True)
total_value_scale = (maxvisit_init + np.ceil(num_simulations / 2)
) * value_scale
rescaled_qvalues = total_value_scale * (completed_qvalues - min_value) / (
max_value - min_value)
expected_action_weights = jax.nn.softmax(
jnp.where(invalid_actions,
-jnp.inf,
root.prior_logits + rescaled_qvalues))
np.testing.assert_allclose(expected_action_weights,
policy_output.action_weights,
atol=1e-6)
# Testing the visit_counts.
summary = policy_output.search_tree.summary()
expected_visit_counts = jnp.array(
[[0.0, np.ceil(num_simulations / 2), num_simulations // 2, 0.0]])
np.testing.assert_array_equal(expected_visit_counts, summary.visit_counts)
# Testing max_depth.
leaf, max_found_depth = _get_deepest_leaf(
jax.tree_util.tree_map(lambda x: x[0], policy_output.search_tree),
policy_output.search_tree.ROOT_INDEX)
self.assertEqual(max_depth, max_found_depth)
self.assertEqual(6, policy_output.search_tree.node_visits[0, leaf])
def test_gumbel_muzero_policy_without_invalid_actions(self):
root_value = jnp.array([-5.0])
root = mctx.RootFnOutput(
prior_logits=jnp.array([
[0.0, -1.0, 2.0, 3.0],
]),
value=root_value,
embedding=(),
)
rewards = jnp.array([
[20.0, 3.0, -1.0, 10.0],
])
value_scale = 0.05
maxvisit_init = 60
num_simulations = 17
max_depth = 3
qtransform = functools.partial(
mctx.qtransform_completed_by_mix_value,
value_scale=value_scale,
maxvisit_init=maxvisit_init,
rescale_values=True)
policy_output = mctx.gumbel_muzero_policy(
params=(),
rng_key=jax.random.PRNGKey(0),
root=root,
recurrent_fn=_make_bandit_recurrent_fn(rewards),
num_simulations=num_simulations,
invalid_actions=None,
max_depth=max_depth,
qtransform=qtransform,
gumbel_scale=1.0)
# Testing the action.
expected_action = jnp.array([3], dtype=jnp.int32)
np.testing.assert_array_equal(expected_action, policy_output.action)
# Testing the action_weights.
summary = policy_output.search_tree.summary()
completed_qvalues = rewards
max_value = jnp.max(completed_qvalues, axis=-1, keepdims=True)
min_value = jnp.min(completed_qvalues, axis=-1, keepdims=True)
total_value_scale = (maxvisit_init + summary.visit_counts.max()
) * value_scale
rescaled_qvalues = total_value_scale * (completed_qvalues - min_value) / (
max_value - min_value)
expected_action_weights = jax.nn.softmax(
root.prior_logits + rescaled_qvalues)
np.testing.assert_allclose(expected_action_weights,
policy_output.action_weights,
atol=1e-6)
# Testing the visit_counts.
expected_visit_counts = jnp.array(
[[6, 2, 2, 7]])
np.testing.assert_array_equal(expected_visit_counts, summary.visit_counts)
if __name__ == "__main__":
absltest.main()
|
deepmind/mctx
|
mctx/_src/tests/policies_test.py
|
Python
|
apache-2.0
| 10,164
|
[
"VisIt"
] |
00e374c21eefac51ba24f7337e55fce645313d0491a007ec99b6a6f6e13d3705
|
"""
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import functools
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.core import overrides
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def _poly_dispatcher(seq_of_zeros):
return seq_of_zeros
@array_function_dispatch(_poly_dispatcher)
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Compute polynomial values.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):
a = a.real.copy()
return a
def _roots_dispatcher(p):
return p
@array_function_dispatch(_roots_dispatcher)
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Compute polynomial values.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if p.ndim != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def _polyint_dispatcher(p, m=None, k=None):
return (p,)
@array_function_dispatch(_polyint_dispatcher)
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def _polyder_dispatcher(p, m=None):
return (p,)
@array_function_dispatch(_polyder_dispatcher)
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None):
return (x, y, w)
@array_function_dispatch(_polyfit_dispatcher)
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error in the order `deg`, `deg-1`, ... `0`.
The `Polynomial.fit <numpy.polynomial.polynomial.Polynomial.fit>` class
method is recommended for new code as it is more stable numerically. See
the documentation of the method for more information.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
Weights to apply to the y-coordinates of the sample points. For
gaussian uncertainties, use 1/sigma (not 1/sigma**2).
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (deg + 1,) or (deg + 1, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Compute polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
https://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
https://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning, stacklevel=2)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
if len(x) <= order + 2:
raise ValueError("the number of data points must exceed order + 2 "
"for Bayesian estimate the covariance matrix")
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def _polyval_dispatcher(p, x):
return (p, x)
@array_function_dispatch(_polyval_dispatcher)
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, an array of numbers, or an instance of poly1d, at
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def _binary_op_dispatcher(a1, a2):
return (a1, a2)
@array_function_dispatch(_binary_op_dispatcher)
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print(p1)
1 x + 2
>>> print(p2)
2
9 x + 5 x + 4
>>> print(np.polyadd(p1, p2))
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
@array_function_dispatch(_binary_op_dispatcher)
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
@array_function_dispatch(_binary_op_dispatcher)
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print(p1)
2
1 x + 2 x + 3
>>> print(p2)
2
9 x + 5 x + 1
>>> print(np.polymul(p1, p2))
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def _polydiv_dispatcher(u, v):
return (u, v)
@array_function_dispatch(_polydiv_dispatcher)
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.astype(w.dtype)
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print(np.poly1d(p))
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print(p)
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
__hash__ = None
@property
def coeffs(self):
""" A copy of the polynomial coefficients """
return self._coeffs.copy()
@property
def variable(self):
""" The name of the polynomial variable """
return self._variable
# calculated attributes
@property
def order(self):
""" The order or degree of the polynomial """
return len(self._coeffs) - 1
@property
def roots(self):
""" The roots of the polynomial, where self(x) == 0 """
return roots(self._coeffs)
# our internal _coeffs property need to be backed by __dict__['coeffs'] for
# scipy to work correctly.
@property
def _coeffs(self):
return self.__dict__['coeffs']
@_coeffs.setter
def _coeffs(self, coeffs):
self.__dict__['coeffs'] = coeffs
# alias attributes
r = roots
c = coef = coefficients = coeffs
o = order
def __init__(self, c_or_r, r=False, variable=None):
if isinstance(c_or_r, poly1d):
self._variable = c_or_r._variable
self._coeffs = c_or_r._coeffs
if set(c_or_r.__dict__) - set(self.__dict__):
msg = ("In the future extra properties will not be copied "
"across when constructing one poly1d from another")
warnings.warn(msg, FutureWarning, stacklevel=2)
self.__dict__.update(c_or_r.__dict__)
if variable is not None:
self._variable = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if c_or_r.ndim > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self._coeffs = c_or_r
if variable is None:
variable = 'x'
self._variable = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
return not self.__eq__(other)
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self._coeffs = NX.concatenate((zr, self.coeffs))
ind = 0
self._coeffs[ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
|
gfyoung/numpy
|
numpy/lib/polynomial.py
|
Python
|
bsd-3-clause
| 39,818
|
[
"Gaussian"
] |
2061e8c513a396358c432d05a21e0c7b1fba2022ecd24f99a2462f7aa78d5d6b
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""NeuroM neurom morphology analysis package.
Examples:
Load a neuron
>>> import neurom as nm
>>> nrn = nm.load_neuron('some/data/path/morph_file.swc')
Obtain some morphometrics using the get function
>>> ap_seg_len = nm.get('segment_lengths', nrn, neurite_type=nm.APICAL_DENDRITE)
>>> ax_sec_len = nm.get('section_lengths', nrn, neurite_type=nm.AXON)
Load neurons from a directory. This loads all SWC, HDF5 or NeuroLucida .asc\
files it finds and returns a list of neurons
>>> import numpy as np # For mean value calculation
>>> nrns = nm.load_neurons('some/data/directory')
>>> for nrn in nrns:
... print 'mean section length', np.mean(nm.get('section_lengths', nrn))
Apply a function to a selection of neurites in a neuron or population.
This example gets the number of points in each axon in a neuron population
>>> import neurom as nm
>>> filter = lambda n : n.type == nm.AXON
>>> mapping = lambda n : len(n.points)
>>> n_points = [n for n in nm.iter_neurites(nrns, mapping, filter)]
"""
import logging as _logging
from .core import (NeuriteType, graft_neuron, iter_neurites, iter_sections,
iter_segments, Neuron)
from .core.dataformat import COLS
from .core.types import NEURITES as NEURITE_TYPES
from .features import get
from .io.utils import NeuronLoader, load_neuron, load_neurons
APICAL_DENDRITE = NeuriteType.apical_dendrite
BASAL_DENDRITE = NeuriteType.basal_dendrite
AXON = NeuriteType.axon
SOMA = NeuriteType.soma
ANY_NEURITE = NeuriteType.all
# prevent 'No handlers could be found for logger ...' errors
# https://pythonhosted.org/logutils/libraries.html
_logging.getLogger(__name__).addHandler(_logging.NullHandler())
|
wizmer/NeuroM
|
neurom/__init__.py
|
Python
|
bsd-3-clause
| 3,437
|
[
"NEURON"
] |
365899993a355a5be42424f4a95fb8937415c3d516fa9dabdec4eb798b813824
|
"""Test for memory leaks in the CNA code."""
import asap3
from ase.lattice.compounds import L1_2
import numpy as np
steps = 30
msteps = 5
def makeatoms():
atoms = L1_2(size=(10,10,10), symbol=('Au', 'Cu'), latticeconstant=4.0)
r = atoms.get_positions()
r += np.random.normal(0.0, 0.0001, r.shape)
atoms.set_positions(r)
return atoms
m0 = asap3.heap_mallinfo()
if m0 < 0:
print "Memory monitoring not supported, test skipped."
else:
print "Memory at startup", m0, 'kB'
for type in ('CNA', 'COORD'):
for haspot in (False, True):
for replaceatoms in (False, True):
print ("Running test: Type=%s HasPot=%s ReplaceAtoms=%s"
% (type, haspot, replaceatoms))
leak = 0
if not replaceatoms:
atoms = makeatoms()
if haspot:
atoms.set_calculator(asap3.EMT())
atoms.get_potential_energy()
for i in range(steps):
if replaceatoms:
atoms = makeatoms()
if haspot:
atoms.set_calculator(asap3.EMT())
atoms.get_potential_energy()
if type == 'CNA':
asap3.CNA(atoms)
else:
asap3.CoordinationNumbers(atoms)
m = int(asap3.heap_mallinfo())
if i == msteps:
m0 = m
if i % msteps == 0 and i > msteps:
print " Memory usage:", m, "kB"
if m > m0:
leak = max(leak, m - m0)
if leak > 1:
print " *** MEMORY LEAK DETECTED ***"
|
auag92/n2dm
|
Asap-3.8.4/Test/CNAleak.py
|
Python
|
mit
| 1,950
|
[
"ASE"
] |
75060680bfe5793d20a116888cf709b4e72735749e8e37f1f350e266f202087a
|
"""Neural machine translation example.
This is a pretty straightforward implementation of Badhanau et al. (2014):
https://arxiv.org/pdf/1409.0473v7.pdf
The system is word based and assumes tokenized inputs, the only bells and
whistles are variational dropout and layer normalization. Otherwise this is
meant as a starting point for NMT experiments.
"""
import numpy as np
import theano
from theano import tensor as T
from bnas.model import Model, Linear, LSTM, Sequence
from bnas.optimize import Adam, iterate_batches
from bnas.init import Gaussian
from bnas.utils import softmax_3d
from bnas.loss import batch_sequence_crossentropy
from bnas.text import TextEncoder
from bnas.fun import function
class NMT(Model):
def __init__(self, name, config):
super().__init__(name)
self.config = config
self.param('src_embeddings',
(len(config['src_encoder']), config['src_embedding_dims']),
init_f=Gaussian(fan_in=config['src_embedding_dims']))
self.param('trg_embeddings',
(len(config['trg_encoder']), config['trg_embedding_dims']),
init_f=Gaussian(fan_in=config['trg_embedding_dims']))
self.add(Linear('hidden',
config['decoder_state_dims'],
config['trg_embedding_dims']))
self.add(Linear('emission',
config['trg_embedding_dims'],
len(config['trg_encoder']),
w=self._trg_embeddings.T))
for prefix, backwards in (('fwd', False), ('back', True)):
self.add(Sequence(
prefix+'_encoder', LSTM, backwards,
config['src_embedding_dims'] + (
config['encoder_state_dims'] if backwards else 0),
config['encoder_state_dims'],
layernorm=config['encoder_layernorm'],
dropout=config['encoder_dropout'],
trainable_initial=True,
offset=0))
self.add(Sequence(
'decoder', LSTM, False,
config['trg_embedding_dims'],
config['decoder_state_dims'],
layernorm=config['decoder_layernorm'],
dropout=config['decoder_dropout'],
attention_dims=config['attention_dims'],
attended_dims=2*config['encoder_state_dims'],
trainable_initial=False,
offset=-1))
h_t = T.matrix('h_t')
self.predict_fun = function(
[h_t],
T.nnet.softmax(self.emission(T.tanh(self.hidden(h_t)))))
inputs = T.lmatrix('inputs')
inputs_mask = T.bmatrix('inputs_mask')
self.encode_fun = function(
[inputs, inputs_mask],
self.encode(inputs, inputs_mask))
def xent(self, inputs, inputs_mask, outputs, outputs_mask):
pred_outputs, pred_attention = self(
inputs, inputs_mask, outputs, outputs_mask)
outputs_xent = batch_sequence_crossentropy(
pred_outputs, outputs[1:], outputs_mask[1:])
return outputs_xent
def loss(self, *args):
outputs_xent = self.xent(*args)
return super().loss() + outputs_xent
def search(self, inputs, inputs_mask, max_length):
h_0, c_0, attended = self.encode_fun(inputs, inputs_mask)
return self.decoder.search(
self.predict_fun,
self._trg_embeddings.get_value(borrow=True),
self.config['trg_encoder']['<S>'],
self.config['trg_encoder']['</S>'],
max_length,
states_0=[h_0, c_0],
attended=attended,
attention_mask=inputs_mask)
def encode(self, inputs, inputs_mask):
embedded_inputs = self._src_embeddings[inputs]
# Forward encoding pass
fwd_h_seq, fwd_c_seq = self.fwd_encoder(embedded_inputs, inputs_mask)
# Backward encoding pass, using hidden states from forward encoder
back_h_seq, back_c_seq = self.back_encoder(
T.concatenate([embedded_inputs, fwd_h_seq], axis=-1),
inputs_mask)
# Initial states for decoder
h_0 = back_h_seq[-1]
c_0 = back_c_seq[-1]
# Attention on concatenated forward/backward sequences
attended = T.concatenate([fwd_h_seq, back_h_seq], axis=-1)
return h_0, c_0, attended
def __call__(self, inputs, inputs_mask, outputs, outputs_mask):
embedded_outputs = self._trg_embeddings[outputs]
h_0, c_0, attended = self.encode(inputs, inputs_mask)
h_seq, c_seq, attention_seq = self.decoder(
embedded_outputs, outputs_mask, states_0=(h_0, c_0),
attended=attended, attention_mask=inputs_mask)
pred_seq = softmax_3d(self.emission(T.tanh(self.hidden(h_seq))))
return pred_seq, attention_seq
def main():
import argparse
import pickle
import sys
import os.path
from time import time
parser = argparse.ArgumentParser(
description='Neural machine translation')
parser.add_argument('--model', type=str, required=True,
help='name of the model file')
parser.add_argument('--corpus', type=str,
help='name of parallel corpus file')
args = parser.parse_args()
if os.path.exists(args.model):
with open(args.model, 'rb') as f:
config = pickle.load(f)
model = NMT('nmt', config)
model.load(f)
else:
n_epochs = 1
batch_size = 64
test_size = batch_size
max_length = 30
with open(args.corpus, 'r', encoding='utf-8') as f:
def read_pairs():
for line in f:
fields = [s.strip() for s in line.split('|||')]
if len(fields) == 2:
pair = tuple(map(str.split, fields))
lens = tuple(map(len, pair))
if min(lens) >= 2 and max(lens) <= max_length:
yield pair
src_sents, trg_sents = list(zip(*read_pairs()))
src_encoder = TextEncoder(sequences=src_sents, max_vocab=10000)
trg_encoder = TextEncoder(sequences=trg_sents, max_vocab=10000)
sent_pairs = list(zip(src_sents, trg_sents))
print('Read %d sentences, vocabulary size %d/%d' % (
len(sent_pairs), len(src_encoder), len(trg_encoder)),
flush=True)
config = {
'src_encoder': src_encoder,
'trg_encoder': trg_encoder,
'src_embedding_dims': 512,
'trg_embedding_dims': 512,
'encoder_dropout': 0.2,
'decoder_dropout': 0.2,
'encoder_state_dims': 1024,
'decoder_state_dims': 1024,
'attention_dims': 1024,
'encoder_layernorm': 'ba1',
'decoder_layernorm': 'ba1',
}
model = NMT('nmt', config)
sym_inputs = T.lmatrix('inputs')
sym_inputs_mask = T.bmatrix('inputs_mask')
sym_outputs = T.lmatrix('outputs')
sym_outputs_mask = T.bmatrix('outputs_mask')
optimizer = Adam(
model.parameters(),
model.loss(sym_inputs, sym_inputs_mask,
sym_outputs, sym_outputs_mask),
[sym_inputs, sym_inputs_mask],
[sym_outputs, sym_outputs_mask],
grad_max_norm=5.0)
xent = function(
[sym_inputs, sym_inputs_mask, sym_outputs, sym_outputs_mask],
model.xent(sym_inputs, sym_inputs_mask,
sym_outputs, sym_outputs_mask))
test_set = sent_pairs[:test_size]
train_set = sent_pairs[test_size:]
test_src, test_trg = list(zip(*test_set))
test_inputs, test_inputs_mask = src_encoder.pad_sequences(test_src)
test_outputs, test_outputs_mask = trg_encoder.pad_sequences(test_trg)
start_time = time()
end_time = start_time + 24*3600
batch_nr = 0
while time() < end_time:
def pair_len(pair): return max(map(len, pair))
for batch_pairs in iterate_batches(train_set, 64, pair_len):
src_batch, trg_batch = list(zip(*batch_pairs))
inputs, inputs_mask = src_encoder.pad_sequences(src_batch)
outputs, outputs_mask = trg_encoder.pad_sequences(trg_batch)
t0 = time()
train_loss = optimizer.step(
inputs, inputs_mask, outputs, outputs_mask)
print('Train loss: %.3f (%.2f s)' % (train_loss, time()-t0),
flush=True)
batch_nr += 1
if batch_nr % 10 == 0:
test_xent = xent(test_inputs, test_inputs_mask,
test_outputs, test_outputs_mask)
print('Test xent: %.3f' % test_xent, flush=True)
if batch_nr % 100 == 0:
pred, pred_mask, scores = model.search(
test_inputs, test_inputs_mask, max_length)
for src_sent, sent, sent_mask, score in zip(
test_inputs.T,
pred[-1].T, pred_mask[-1].T, scores[-1].T):
print(' '.join(
src_encoder.vocab[x] for x in src_sent.flatten()
if x > 1))
print('%.2f'%score, ' '.join(
trg_encoder.vocab[x] for x, there
in zip(sent.flatten(), sent_mask.flatten())
if bool(there)))
print('-'*72, flush=True)
if time() >= end_time: break
with open(args.model, 'wb') as f:
pickle.dump(config, f)
model.save(f)
if __name__ == '__main__': main()
|
robertostling/bnas
|
examples/nmt.py
|
Python
|
gpl-3.0
| 10,029
|
[
"Gaussian"
] |
061bdccd094bb9fa1edb1b115712c8e369ab8562cdcf00e31260db19656d75d3
|
#!/usr/bin/env python
import pysam
import argparse
import matplotlib.pyplot as plt
import numpy as np
ap = argparse.ArgumentParser(description="Print the nucleotide frequency of a pileup.")
ap.add_argument("--bam", help="Input bam file.", required=True)
ap.add_argument("--freq", help="Print the frequency to this file", required = True)
ap.add_argument("--plot", help="Plot a SVG of the frequency")
ap.add_argument("--max", help="Limit alignments processed", default=None, type=int)
args = ap.parse_args()
bamFile = pysam.Samfile(args.bam, 'rb')
m = [4]*256
m[ord('A')] = 0
m[ord('C')] = 1
m[ord('G')] = 2
m[ord('T')] = 3
idx = 0
sequences = {}
for seq in bamFile.header["SQ"]:
print "adding ref of len " + str(seq["LN"])
sequences[idx] = np.zeros((4,seq["LN"]))
idx+=1
index = 0
for aln in bamFile.fetch():
mat = sequences[aln.tid]
tPos = aln.pos
qPos = aln.qstart
qseq = aln.query
for op in aln.cigar:
if (op[0] != 0 and op[0] != 1 and op[0] != 2):
continue
if (op[0] == 0):
for i in range(op[1]):
mat[m[ord(qseq[qPos])]][tPos] += 1
qPos +=1
tPos +=1
elif (op[0] == 1):
qPos += op[1]
elif (op[0] == 2):
tPos += op[1]
index += 1
if (index % 1000 == 0):
print "processed: " + str(index)
if (args.max is not None and args.max == index):
break
outFile = open(args.freq, 'wb')
for seq in sequences:
np.save(outFile, sequences[seq])
outFile.close()
|
yunlongliukm/chm1_scripts
|
PrintFrequency.py
|
Python
|
mit
| 1,551
|
[
"pysam"
] |
bad33329e56ce513297cb658f9e15baff45694a691849f35bea0a154fcb6e1c2
|
from builtins import str
from builtins import range
from builtins import object
import logging
import datetime
import time
import os
import shutil
import tempfile
import re
import traceback
import json
import hashlib
from biomaj_core.utils import Utils
from biomaj_download.downloadclient import DownloadClient
from biomaj_download.message import message_pb2
from biomaj_download.download.http import HTTPParse
from biomaj_download.download.localcopy import LocalDownload
from biomaj.mongo_connector import MongoConnector
from biomaj.options import Options
from biomaj.process.processfactory import RemoveProcessFactory, PreProcessFactory, PostProcessFactory
from biomaj_zipkin.zipkin import Zipkin
class Workflow(object):
"""
Bank update workflow
"""
FLOW_INIT = 'init'
FLOW_CHECK = 'check'
FLOW_DEPENDS = 'depends'
FLOW_PREPROCESS = 'preprocess'
FLOW_RELEASE = 'release'
FLOW_DOWNLOAD = 'download'
FLOW_POSTPROCESS = 'postprocess'
FLOW_REMOVEPROCESS = 'removeprocess'
FLOW_PUBLISH = 'publish'
FLOW_OVER = 'over'
FLOW = [
{'name': 'init', 'steps': []},
{'name': 'check', 'steps': []},
{'name': 'over', 'steps': []}
]
def __init__(self, bank, session=None):
"""
Instantiate a new workflow
:param bank: bank on which to apply the workflow
:type bank: :class:`biomaj.bank.Bank`
"""
self.bank = bank
if session is None:
self.session = bank.session
else:
self.session = session
self.bank.session = session
self.options = bank.options
self.name = bank.name
# Skip all remaining tasks, no need to update
self.skip_all = False
self.session._session['update'] = False
self.session._session['remove'] = False
self.session.config.set('localrelease', '')
self.session.config.set('remoterelease', '')
# For micro services
self.redis_client = None
self.redis_prefix = None
# Zipkin
self.span = None
def get_flow(self, task):
for flow in Workflow.FLOW:
if flow['name'] == task:
return flow
def start(self):
"""
Start the workflow
"""
logging.info('Workflow:Start')
if 'stats' not in self.session._session:
self.session._session['stats'] = {
'workflow': {},
'nb_downloaded_files': 0
}
for flow in self.session.flow:
dt = datetime.datetime.now()
start_timestamp = time.mktime(dt.timetuple())
if self.skip_all:
logging.info('Workflow:Skip:' + flow['name'])
self.session._session['status'][flow['name']] = None
self.session._session['status'][Workflow.FLOW_OVER] = True
continue
if self.options.get_option(Options.STOP_BEFORE) == flow['name']:
self.wf_over()
break
# Check for cancel request
if self.redis_client and self.redis_client.get(self.redis_prefix + ':' + self.bank.name + ':action:cancel'):
logging.warn('Cancel requested, stopping update')
self.redis_client.delete(self.redis_prefix + ':' + self.bank.name + ':action:cancel')
self.wf_over()
return False
# Always run INIT
if flow['name'] != Workflow.FLOW_INIT and self.session.get_status(flow['name']):
logging.info('Workflow:Skip:' + flow['name'])
if flow['name'] == Workflow.FLOW_INIT or not self.session.get_status(flow['name']):
logging.info('Workflow:Start:' + flow['name'])
span = None
if self.options.get_option('traceId'):
trace_id = self.options.get_option('traceId')
span_id = self.options.get_option('spanId')
span = Zipkin('biomaj-workflow', flow['name'], trace_id=trace_id, parent_id=span_id)
self.span = span
self.bank.config.set('zipkin_trace_id', span.get_trace_id())
self.bank.config.set('zipkin_span_id', span.get_span_id())
try:
self.session._session['status'][flow['name']] = getattr(self, 'wf_' + flow['name'])()
except Exception as e:
self.session._session['status'][flow['name']] = False
logging.exception('Workflow:' + flow['name'] + ':Exception:' + str(e))
logging.debug(traceback.format_exc())
finally:
self.wf_progress(flow['name'], self.session._session['status'][flow['name']])
if span:
span.add_binary_annotation('status', str(self.session._session['status'][flow['name']]))
span.trace()
if flow['name'] != Workflow.FLOW_OVER and not self.session.get_status(flow['name']):
logging.error('Error during task ' + flow['name'])
if flow['name'] != Workflow.FLOW_INIT:
self.wf_over()
return False
# Main task is over, execute sub tasks of main
if not self.skip_all:
for step in flow['steps']:
span = None
try:
# Check for cancel request
if self.redis_client and self.redis_client.get(self.redis_prefix + ':' + self.bank.name + ':action:cancel'):
logging.warn('Cancel requested, stopping update')
self.redis_client.delete(self.redis_prefix + ':' + self.bank.name + ':action:cancel')
self.wf_over()
return False
if self.options.get_option('traceId'):
trace_id = self.options.get_option('traceId')
span_id = self.options.get_option('spanId')
span = Zipkin('biomaj-workflow', flow['name'] + ":wf_" + step, trace_id=trace_id, parent_id=span_id)
self.span = span
self.bank.config.set('zipkin_trace_id', span.get_trace_id())
self.bank.config.set('zipkin_span_id', span.get_span_id())
res = getattr(self, 'wf_' + step)()
if span:
span.add_binary_annotation('status', str(res))
span.trace()
if not res:
logging.error('Error during ' + flow['name'] + ' subtask: wf_' + step)
logging.error('Revert main task status ' + flow['name'] + ' to error status')
self.session._session['status'][flow['name']] = False
self.wf_over()
return False
except Exception as e:
logging.error('Workflow:' + flow['name'] + ' subtask: wf_' + step + ':Exception:' + str(e))
self.session._session['status'][flow['name']] = False
logging.debug(traceback.format_exc())
self.wf_over()
return False
dt = datetime.datetime.now()
end_timestamp = time.mktime(dt.timetuple())
self.session._session['stats']['workflow'][flow['name']] = end_timestamp - start_timestamp
if self.options.get_option(Options.STOP_AFTER) == flow['name']:
self.wf_over()
break
self.wf_progress_end()
return True
def wf_progress_init(self):
"""
Set up new progress status
"""
status = {}
status['log_file'] = {'status': self.session.config.log_file, 'progress': 0}
status['session'] = self.session._session['id']
for flow in self.session.flow:
if flow['name'] == 'download':
status[flow['name']] = {'status': None, 'progress': 0, 'total': 0}
elif flow['name'].endswith('process'):
status[flow['name']] = {'status': None, 'progress': {}}
elif flow['name'] == 'release':
status[flow['name']] = {'status': None, 'progress': ''}
else:
status[flow['name']] = {'status': None, 'progress': 0}
MongoConnector.banks.update({'name': self.name}, {'$set': {'status': status}})
def wf_progress_end(self):
"""
Reset progress status when workflow is over
"""
return True
def wf_progress(self, task, status):
"""
Update bank status
"""
subtask = 'status.' + task + '.status'
MongoConnector.banks.update({'name': self.name}, {'$set': {subtask: status}})
def wf_init(self):
"""
Initialize workflow
"""
logging.info('Workflow:wf_init')
data_dir = self.session.config.get('data.dir')
lock_dir = self.session.config.get('lock.dir', default=data_dir)
if not os.path.exists(lock_dir):
os.mkdir(lock_dir)
lock_file = os.path.join(lock_dir, self.name + '.lock')
maintenance_lock_file = os.path.join(lock_dir, 'biomaj.lock')
if os.path.exists(maintenance_lock_file):
logging.error('Biomaj is in maintenance')
return False
if os.path.exists(lock_file):
logging.error('Bank ' + self.name + ' is locked, a process may be in progress, else remove the lock file ' + lock_file)
return False
f = open(lock_file, 'w')
f.write('1')
f.close()
self.wf_progress_init()
return True
def wf_over(self):
"""
Workflow is over
"""
logging.info('Workflow:wf_over')
data_dir = self.session.config.get('data.dir')
lock_dir = self.session.config.get('lock.dir', default=data_dir)
lock_file = os.path.join(lock_dir, self.name + '.lock')
os.remove(lock_file)
return True
class RemoveWorkflow(Workflow):
"""
Workflow to remove a bank instance
"""
FLOW = [
{'name': 'init', 'steps': []},
{'name': 'removeprocess', 'steps': []},
{'name': 'remove_release', 'steps': []},
{'name': 'over', 'steps': []}
]
def __init__(self, bank, session):
"""
Instantiate a new workflow
:param bank: bank on which to apply the workflow
:type bank: Bank
:param session: session to remove
:type session: :class:`biomaj.session.Session`
"""
Workflow.__init__(self, bank, session)
logging.debug('New workflow')
self.session._session['remove'] = True
def wf_remove_release(self):
logging.info('Workflow:wf_remove_release')
if not self.session.get('update_session_id'):
logging.error('Bug: update_session_id not set in session')
return False
if os.path.exists(self.session.get_full_release_directory()):
shutil.rmtree(self.session.get_full_release_directory())
return self.bank.remove_session(self.session.get('update_session_id'))
def wf_removeprocess(self):
logging.info('Workflow:wf_removepreprocess')
metas = self.session._session['process']['removeprocess']
pfactory = RemoveProcessFactory(self.bank, metas, redis_client=self.redis_client, redis_prefix=self.redis_prefix)
res = pfactory.run()
self.session._session['process']['removeprocess'] = pfactory.meta_status
return res
class UpdateWorkflow(Workflow):
"""
Workflow for a bank update
"""
FLOW = [
{'name': 'init', 'steps': []},
{'name': 'check', 'steps': []},
{'name': 'depends', 'steps': []},
{'name': 'preprocess', 'steps': []},
{'name': 'release', 'steps': []},
{'name': 'download', 'steps': ['checksum', 'uncompress', 'copy', 'copydepends']},
{'name': 'postprocess', 'steps': ['metadata', 'stats']},
{'name': 'publish', 'steps': ['old_biomaj_api', 'clean_offline', 'delete_old', 'clean_old_sessions']},
{'name': 'over', 'steps': []}
]
def __init__(self, bank):
"""
Instantiate a new workflow
:param bank: bank on which to apply the workflow
:type bank: Bank
"""
Workflow.__init__(self, bank)
logging.debug('New workflow')
self.session._session['update'] = True
def wf_init(self):
err = super(UpdateWorkflow, self).wf_init()
if not err:
return False
offline_dir = self.session.get_offline_directory()
if not os.path.exists(offline_dir):
logging.debug('Create offline directory: %s' % (str(offline_dir)))
os.makedirs(offline_dir)
if self.options.get_option(Options.FROMSCRATCH):
return self.wf_clean_offline()
return True
def _md5(self, fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def _sha256(self, fname):
hash_sha256 = hashlib.sha256()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest()
def wf_checksum(self):
logging.info('Workflow:wf_checksum')
'''
if self.bank.config.get('file.md5.check', 'false') != 'true':
logging.info('Workflow:wf_checksum:skipping')
return True
'''
offline_dir = self.session.get_offline_directory()
error = False
for downloaded_file in self.downloaded_files:
downloaded_file_name = downloaded_file['name']
if 'save_as' in downloaded_file:
downloaded_file_name = downloaded_file['save_as']
md5_file = os.path.join(offline_dir, downloaded_file_name + '.md5')
if os.path.exists(md5_file):
with open(md5_file, 'r') as md5_content:
data = md5_content.read().split()
md5_cksum = data[0]
downloaded_file_md5 = self._md5(os.path.join(offline_dir, downloaded_file_name))
logging.debug('Wf_checksum:md5:%s:%s:%s' % (downloaded_file_name, downloaded_file_md5, md5_cksum))
if downloaded_file_md5 != md5_cksum:
logging.error('Invalid md5 checksum for file %s' % (downloaded_file_name))
error = True
sha256_file = os.path.join(offline_dir, downloaded_file_name + '.sha256')
if os.path.exists(sha256_file):
with open(sha256_file, 'r') as sha256_content:
data = sha256_content.read().split()
sha256_cksum = data[0]
downloaded_file_sha256 = self._sha256(os.path.join(offline_dir, downloaded_file_name))
logging.debug('Wf_checksum:sha256:%s:%s:%s' % (downloaded_file_name, downloaded_file_sha256, sha256_cksum))
if downloaded_file_sha256 != sha256_cksum:
logging.error('Invalid sha256 checksum for file %s' % (downloaded_file_name))
error = True
if error:
return False
return True
def wf_check(self):
"""
Basic checks
"""
logging.info('Workflow:wf_check')
return True
def wf_depends(self):
"""
Checks bank dependencies with other banks. If bank has dependencies, execute update on other banks first
"""
logging.info('Workflow:wf_depends')
# Always rescan depends, there might be a new release
self.session.set('depends', {})
res = self.bank.update_dependencies()
logging.info('Workflow:wf_depends:' + str(res))
if res and len(self.bank.depends) > 0:
depend_updated = False
for bdep in self.bank.depends:
logging.info('Workflow:wf_depends:' + bdep.name + ':' + str(bdep.session.get('update')))
if bdep.session.get('update'):
depend_updated = True
break
if not depend_updated:
logging.info('Workflow:wf_depends:no bank updated')
return res
def wf_copydepends(self):
"""
Copy files from dependent banks if needed
"""
logging.info('Workflow:wf_copydepends')
deps = self.bank.get_dependencies()
for dep in deps:
if self.bank.config.get(dep + '.files.move'):
logging.info('Worflow:wf_depends:Files:Move:' + self.bank.config.get(dep + '.files.move'))
bdir = None
for bdep in self.bank.depends:
if bdep.name == dep:
bdir = bdep.session.get_full_release_directory()
break
if bdir is None:
logging.error('Could not find a session update for bank ' + dep)
return False
# b = self.bank.get_bank(dep, no_log=True)
locald = LocalDownload(bdir)
(file_list, dir_list) = locald.list()
locald.match(self.bank.config.get(dep + '.files.move').split(), file_list, dir_list)
bankdepdir = self.bank.session.get_full_release_directory() + "/" + dep
if not os.path.exists(bankdepdir):
os.mkdir(bankdepdir)
downloadedfiles = locald.download(bankdepdir)
locald.close()
if not downloadedfiles:
logging.info('Workflow:wf_copydepends:no files to copy')
return False
return True
def wf_preprocess(self):
"""
Execute pre-processes
"""
logging.info('Workflow:wf_preprocess')
metas = self.session._session['process']['preprocess']
pfactory = PreProcessFactory(self.bank, metas, redis_client=self.redis_client, redis_prefix=self.redis_prefix)
res = pfactory.run()
self.session._session['process']['preprocess'] = pfactory.meta_status
return res
def _close_download_service(self, dserv):
'''
Cleanup of downloader
'''
logging.info("Workflow:DownloadService:CleanSession")
if dserv:
dserv.clean()
dserv.close()
def __update_info(self, info):
'''
Update some info in db for current bank
'''
if info is not None:
MongoConnector.banks.update({'name': self.bank.name},
info)
def wf_release(self):
"""
Find current release on remote
"""
logging.info('Workflow:wf_release')
cf = self.session.config
if cf.get('ref.release') and self.bank.depends:
# Bank is a computed bank and we ask to set release to the same
# than an other dependant bank
depbank = self.bank.get_bank(cf.get('ref.release'), no_log=True)
got_match = False
got_update = False
for dep in self.bank.depends:
if dep.session.get('update'):
got_update = True
if dep.name == depbank.name:
self.session.set('release', dep.session.get('release'))
self.session.set('remoterelease', dep.session.get('remoterelease'))
got_match = True
if not got_match:
logging.error('Workflow:wf_release: no release found for bank ' + depbank.name)
return False
release = self.session.get('release')
self.__update_info({'$set': {'status.release.progress': str(release)}})
'''
MongoConnector.banks.update({'name': self.bank.name},
{'$set': {'status.release.progress': str(release)}})
'''
logging.info('Workflow:wf_release:FromDepends:' + depbank.name + ':' + self.session.get('release'))
if got_update:
index = 0
# Release directory exits, set index to 1
if os.path.exists(self.session.get_full_release_directory()):
index = 1
for x in range(1, 100):
if os.path.exists(self.session.get_full_release_directory() + '__' + str(x)):
index = x + 1
if index > 0:
self.session.set('release', release + '__' + str(index))
release = release + '__' + str(index)
self.session.previous_release = self.session.get('previous_release')
logging.info('Workflow:wf_release:previous_session:' + str(self.session.previous_release))
if self.session.get('release'):
# Release already set from a previous run or an other bank
logging.info('Workflow:wf_release:session:' + str(self.session.get('release')))
if self.session.previous_release == self.session.get('release') and not self.session.config.get_bool('release.control', default=False):
logging.info('Workflow:wf_release:same_as_previous_session')
return self.no_need_to_update()
else:
return True
if self.session.config.get('release.file') == '' or self.session.config.get('release.file') is None:
logging.debug('Workflow:wf_release:norelease')
self.session.set('release', None)
return True
else:
# """""""""""""""""""""""
dserv = None
if self.bank.config.get('micro.biomaj.service.download', default=None) == '1':
dserv = DownloadClient(
self.bank.config.get('micro.biomaj.rabbit_mq'),
int(self.bank.config.get('micro.biomaj.rabbit_mq_port', default='5672')),
self.bank.config.get('micro.biomaj.rabbit_mq_virtualhost', default='/'),
self.bank.config.get('micro.biomaj.rabbit_mq_user', default=None),
self.bank.config.get('micro.biomaj.rabbit_mq_password', default=None)
)
else:
dserv = DownloadClient()
proxy = self.bank.config.get('micro.biomaj.proxy')
session = dserv.create_session(self.name, proxy)
logging.info("Workflow:wf_release:DownloadSession:" + str(session))
http_parse = HTTPParse(
cf.get('http.parse.dir.line'),
cf.get('http.parse.file.line'),
int(cf.get('http.group.dir.name')),
int(cf.get('http.group.dir.date')),
int(cf.get('http.group.file.name')),
int(cf.get('http.group.file.date')),
cf.get('http.group.file.date_format', default=None),
int(cf.get('http.group.file.size'))
)
proxy = cf.get('proxy')
if cf.get('release.proxy') is not None:
proxy = cf.get('release.proxy')
proxy_auth = cf.get('proxy_auth')
if cf.get('release.proxy_auth') is not None:
proxy = cf.get('release.proxy_auth')
protocol = cf.get('protocol')
if cf.get('release.protocol') is not None:
protocol = cf.get('release.protocol')
server = cf.get('server')
if cf.get('release.server') is not None:
server = cf.get('release.server')
remote_dir = cf.get('remote.dir')
if cf.get('release.remote.dir') is not None:
remote_dir = cf.get('release.remote.dir')
params = None
keys = cf.get('url.params')
if keys is not None:
params = {}
keys = keys.split(',')
for key in keys:
param = cf.get(key.strip() + '.value')
params[key.strip()] = param.strip()
credentials = cf.get('server.credentials')
if cf.get('release.credentials') is not None:
credentials = cf.get('release.credentials')
save_as = None
method = 'GET'
if protocol == 'directhttp' or protocol == 'directhttps' or protocol == 'directftp':
save_as = cf.get('release.file')
remotes = [remote_dir]
remote_dir = '/'
method = cf.get('url.method')
if cf.get('release.url.method') is not None:
method = cf.get('release.url.method')
release_downloader = dserv.get_handler(
protocol,
server,
remote_dir,
credentials=credentials,
http_parse=http_parse,
http_method=method,
param=params,
proxy=proxy,
proxy_auth=proxy_auth,
save_as=save_as,
timeout_download=cf.get('timeout.download'),
offline_dir=self.session.get_offline_directory()
)
if protocol == 'directhttp' or protocol == 'directhttps' or protocol == 'directftp':
release_downloader.set_files_to_download(remotes)
# """"""""""""""""""""""""
if release_downloader is None:
logging.error('Protocol ' + protocol + ' not supported')
self._close_download_service(dserv)
return False
try:
(file_list, dir_list) = release_downloader.list()
except Exception as e:
self._close_download_service(dserv)
logging.exception('Workflow:wf_release:Exception:' + str(e))
return False
release_downloader.match([cf.get('release.file')], file_list, dir_list)
if len(release_downloader.files_to_download) == 0:
logging.error('release.file defined but does not match any file')
self._close_download_service(dserv)
return False
if len(release_downloader.files_to_download) > 1:
logging.error('release.file defined but matches multiple files')
self._close_download_service(dserv)
return False
if cf.get('release.regexp') is None or not cf.get('release.regexp'):
# Try to get from regexp in file name
rel = re.search(cf.get('release.file'), release_downloader.files_to_download[0]['name'])
if rel is None:
logging.error('release.file defined but does not match any file')
self._close_download_service(dserv)
return False
release = rel.group(1)
else:
# Download and extract
tmp_dir = tempfile.mkdtemp('biomaj')
rel_files = release_downloader.download(tmp_dir)
rel_file = open(tmp_dir + '/' + rel_files[0]['name'])
rel_content = rel_file.read()
rel_file.close()
shutil.rmtree(tmp_dir)
rel = re.search(cf.get('release.regexp'), rel_content)
if rel is None:
logging.error('release.regexp defined but does not match any file content')
self._close_download_service(dserv)
return False
# If regexp contains matching group, else take whole match
if len(rel.groups()) > 0:
release = rel.group(1)
else:
release = rel.group(0)
release_downloader.close()
self._close_download_service(dserv)
if release_downloader.error:
logging.error('An error occured during download')
return False
self.session.set('release', release)
self.session.set('remoterelease', release)
self.__update_info({'$set': {'status.release.progress': str(release)}})
'''
MongoConnector.banks.update(
{'name': self.bank.name},
{'$set': {'status.release.progress': str(release)}}
)
'''
# We restart from scratch, a directory with this release already exists
# Check directory existence if from scratch to change local release
if self.options.get_option(Options.FROMSCRATCH):
index = 0
# Release directory exits, set index to 1
if os.path.exists(self.session.get_full_release_directory()):
index = 1
for x in range(1, 100):
if os.path.exists(self.session.get_full_release_directory() + '__' + str(x)):
index = x + 1
if index > 0:
self.session.set('release', release + '__' + str(index))
release = release + '__' + str(index)
self.download_go_ahead = False
if self.options.get_option(Options.FROM_TASK) == 'download':
# We want to download again in same release, that's fine, we do not care it is the same release
self.download_go_ahead = True
if not self.download_go_ahead and self.session.previous_release == self.session.get('remoterelease'):
if not self.session.config.get_bool('release.control', default=False):
logging.info('Workflow:wf_release:same_as_previous_session')
return self.no_need_to_update()
logging.info('Session:RemoteRelease:' + self.session.get('remoterelease'))
logging.info('Session:Release:' + self.session.get('release'))
return True
def no_need_to_update(self):
"""
Set status to over and update = False because there is not a need to update bank
"""
self.skip_all = True
self.session._session['status'][Workflow.FLOW_OVER] = True
self.wf_progress(Workflow.FLOW_OVER, True)
self.session._session['update'] = False
self.session.set('download_files', [])
self.session.set('files', [])
last_session = self.get_last_prod_session_for_release(self.session.get('remoterelease'))
self.session.set('release', last_session['release'])
self.wf_over()
return True
def get_last_prod_session_for_release(self, release):
"""
find last session matching a release in production
"""
last_session = None
for prod in self.bank.bank['production']:
if prod['remoterelease'] == release:
# Search session related to this production release
for s in self.bank.bank['sessions']:
if s['id'] == prod['session']:
last_session = s
break
return last_session
def _load_local_files_from_session(self, session_id):
"""
Load lccal files for sessions from cache directory
"""
cache_dir = self.bank.config.get('cache.dir')
f_local_files = None
file_path = os.path.join(cache_dir, 'local_files_' + str(session_id))
if not os.path.exists(file_path):
return f_local_files
with open(file_path) as data_file:
f_local_files = json.load(data_file)
return f_local_files
def _load_download_files_from_session(self, session_id):
"""
Load download files for sessions from cache directory
"""
cache_dir = self.bank.config.get('cache.dir')
f_downloaded_files = None
file_path = os.path.join(cache_dir, 'files_' + str(session_id))
if not os.path.exists(file_path):
return f_downloaded_files
with open(file_path) as data_file:
f_downloaded_files = json.load(data_file)
return f_downloaded_files
def is_previous_release_content_identical(self):
"""
Checks if releases (previous_release and remoterelease) are identical in release id and content.
Expects release.control parameter to be set to true or 1, else skip control.
"""
if not self.session.config.get_bool('release.control', default=False):
return True
# Different releases, so different
if self.session.get('remoterelease') != self.session.previous_release:
logging.info('Workflow:wf_download:DifferentRelease')
return False
# Same release number, check further
previous_release_session = self.get_last_prod_session_for_release(self.session.previous_release)
if previous_release_session is None:
return False
previous_downloaded_files = self._load_download_files_from_session(previous_release_session.get('id'))
previous_release_session['download_files'] = previous_downloaded_files
if previous_downloaded_files is None:
# No info on previous download, consider that base release is enough
logging.warn('Workflow:wf_download:SameRelease:download_files not available, cannot compare to previous release')
return True
nb_elts = len(previous_downloaded_files)
if self.session.get('download_files') is not None and nb_elts != len(self.session.get('download_files')):
# Number of files to download vs previously downloaded files differ
logging.info('Workflow:wf_download:SameRelease:Number of files differ')
return False
# Same number of files, check hash of files
list1 = sorted(previous_downloaded_files, key=lambda k: k['hash'])
list2 = sorted(self.session.get('download_files'), key=lambda k: k['hash'])
for index in range(0, nb_elts):
if list1[index]['hash'] != list2[index]['hash']:
return False
return True
def check_and_incr_release(self):
"""
Checks if local release already exists on disk. If it exists, create a new
local release, appending __X to the release.
:returns: str local release
"""
index = 0
release = self.session.get('release')
# Release directory exits, set index to 1
if os.path.exists(self.session.get_full_release_directory()):
index = 1
for x in range(1, 100):
if os.path.exists(self.session.get_full_release_directory() + '__' + str(x)):
index = x + 1
# If we found a directory for this release: XX or XX__Y
if index > 0:
self.session.set('release', release + '__' + str(index))
release = release + '__' + str(index)
logging.info('Workflow:wf_download:release:incr_release:' + release)
return release
def _create_dir_structure(self, downloader, offline_dir):
"""
Create expected directory structure in offline directory before download
"""
logging.debug('Workflow:wf_download:create_dir_structure:start')
for rfile in downloader.files_to_download:
save_as = None
if 'save_as' not in rfile or rfile['save_as'] is None:
save_as = rfile['name']
else:
save_as = rfile['save_as']
file_dir = offline_dir + '/' + os.path.dirname(save_as)
try:
if not os.path.exists(file_dir):
os.makedirs(file_dir)
except Exception as e:
logging.error(e)
logging.debug('Workflow:wf_download:create_dir_structure:done')
def _get_list_from_file(self, remote_list):
"""
Load files to download from a file
"""
if not os.path.exists(remote_list):
logging.info("remote.list " + remote_list + " does not exists, we suppose there is no new release available")
return None
data = []
with open(remote_list) as data_file:
data = json.load(data_file)
for rfile in data:
if 'year' not in rfile or 'month' not in rfile or 'day' not in rfile:
today = datetime.date.today()
rfile['month'] = today.month
rfile['day'] = today.day
rfile['year'] = today.year
if 'permissions' not in rfile:
rfile['permissions'] = ''
if 'group' not in rfile:
rfile['group'] = ''
if 'size' not in rfile:
rfile['size'] = 0
if 'hash' not in rfile:
rfile['hash'] = None
if 'root' not in rfile and self.session.config.get('remote.dir'):
rfile['root'] = self.session.config.get('remote.dir')
return data
def wf_download(self):
"""
Download remote files or use an available local copy from last production directory if possible.
"""
logging.info('Workflow:wf_download')
# flow = self.get_flow(Workflow.FLOW_DOWNLOAD)
downloader = None
cf = self.session.config
self.session.previous_release = self.session.get('previous_release')
if self.session.get('release') is not None:
self.session.config.set('localrelease', self.session.get('release'))
self.session.config.set('remoterelease', self.session.get('remoterelease'))
if self.session.config.get_bool('releaseonly', default=False):
return True
if cf.get('protocol') == 'none':
if self.session.get('release') is None:
logging.error('Workflow:wf_download:no download file but no release found')
return False
else:
logging.info('Workflow:wf_download:no download file expected')
self.downloaded_files = []
if not os.path.exists(self.session.get_full_release_directory()):
os.makedirs(self.session.get_full_release_directory())
return True
downloaders = []
pool_size = self.session.config.get('files.num.threads', default=None)
dserv = None
if self.bank.config.get('micro.biomaj.service.download', default=None) == '1':
dserv = DownloadClient(
self.bank.config.get('micro.biomaj.rabbit_mq'),
int(self.bank.config.get('micro.biomaj.rabbit_mq_port', default='5672')),
self.bank.config.get('micro.biomaj.rabbit_mq_virtualhost', default='/'),
self.bank.config.get('micro.biomaj.rabbit_mq_user', default=None),
self.bank.config.get('micro.biomaj.rabbit_mq_password', default=None),
)
else:
dserv = DownloadClient()
if pool_size:
dserv.set_queue_size(int(pool_size))
proxy = self.bank.config.get('micro.biomaj.proxy')
session = dserv.create_session(self.name, proxy)
logging.info("Workflow:wf_download:DownloadSession:" + str(session))
use_remote_list = False
http_parse = HTTPParse(
cf.get('http.parse.dir.line'),
cf.get('http.parse.file.line'),
int(cf.get('http.group.dir.name')),
int(cf.get('http.group.dir.date')),
int(cf.get('http.group.file.name')),
int(cf.get('http.group.file.date')),
cf.get('http.group.file.date_format', default=None),
int(cf.get('http.group.file.size'))
)
proxy = cf.get('proxy')
proxy_auth = cf.get('proxy_auth')
if cf.get('protocol') == 'multi':
"""
Search for:
protocol = multi
remote.file.0.protocol = directftp
remote.file.0.server = ftp.ncbi.org
remote.file.0.path = /musmusculus/chr1/chr1.fa
=> http://ftp2.fr.debian.org/debian/README.html?key1=value&key2=value2
remote.file.1.protocol = directhttp
remote.file.1.server = ftp2.fr.debian.org
remote.file.1.path = debian/README.html
remote.file.1.method = GET
remote.file.1.params.keys = key1,key2
remote.file.1.params.key1 = value1
remote.file.1.params.key2 = value2
=> http://ftp2.fr.debian.org/debian/README.html
#POST PARAMS:
key1=value
key2=value2
remote.file.1.protocol = directhttp
remote.file.1.server = ftp2.fr.debian.org
remote.file.1.path = debian/README.html
remote.file.1.method = POST
remote.file.1.params.keys = key1,key2
remote.file.1.params.key1 = value1
remote.file.1.params.key2 = value2
......
"""
# Creates multiple downloaders
i = 0
rfile = cf.get('remote.file.' + str(i) + '.path')
server = None
while rfile is not None:
protocol = cf.get('protocol')
if cf.get('remote.file.' + str(i) + '.protocol') is not None:
protocol = cf.get('remote.file.' + str(i) + '.protocol')
server = cf.get('server')
if cf.get('remote.file.' + str(i) + '.server') is not None:
server = cf.get('remote.file.' + str(i) + '.server')
params = None
keys = cf.get('remote.file.' + str(i) + '.params.keys')
if keys is not None:
params = {}
keys = keys.split(',')
for key in keys:
param = cf.get('remote.file.' + str(i) + '.params.' + key.strip())
params[key.strip()] = param.strip()
method = cf.get('remote.file.' + str(i) + '.method')
if method is None:
if cf.get('url.method') is not None:
method = cf.get('url.method')
else:
method = 'GET'
credentials = cf.get('remote.file.' + str(i) + '.credentials')
if not method:
credentials = cf.get('server.credentials')
remotes = [cf.get('remote.file.' + str(i) + '.path')]
save_as = cf.get('remote.file.' + str(i) + '.path')
if cf.get('remote.file.' + str(i) + '.name'):
save_as = cf.get('remote.file.' + str(i) + '.name')
subdownloader = dserv.get_handler(
protocol,
server,
'',
credentials=credentials,
http_parse=http_parse,
http_method=method,
param=params,
proxy=proxy,
proxy_auth=proxy_auth,
save_as=save_as,
timeout_download=cf.get('timeout.download'),
offline_dir=self.session.get_offline_directory()
)
subdownloader.set_files_to_download(remotes)
downloaders.append(subdownloader)
i += 1
rfile = cf.get('remote.file.' + str(i) + '.path')
else:
"""
Simple case, one downloader with regexp
"""
protocol = cf.get('protocol')
server = cf.get('server')
params = None
keys = cf.get('url.params')
if keys is not None:
params = {}
keys = keys.split(',')
for key in keys:
param = cf.get(key.strip() + '.value')
params[key.strip()] = param.strip()
method = cf.get('url.method')
if method is None:
method = 'GET'
credentials = cf.get('server.credentials')
remote_dir = cf.get('remote.dir')
if protocol == 'directhttp' or protocol == 'directhttps' or protocol == 'directftp':
remotes = [cf.get('remote.dir')[:-1]]
remote_dir = '/'
save_as = cf.get('target.name')
downloader = dserv.get_handler(
protocol,
server,
remote_dir,
credentials=credentials,
http_parse=http_parse,
http_method=method,
param=params,
proxy=proxy,
proxy_auth=proxy_auth,
save_as=save_as,
timeout_download=cf.get('timeout.download'),
offline_dir=self.session.get_offline_directory()
)
if protocol == 'directhttp' or protocol == 'directhttps' or protocol == 'directftp':
downloader.set_files_to_download(remotes)
remote_list = cf.get('remote.list', default=None)
if remote_list is not None:
logging.info("Use list from " + remote_list)
downloader.files_to_download = self._get_list_from_file(remote_list)
use_remote_list = True
downloaders.append(downloader)
self._close_download_service(dserv)
for downloader in downloaders:
if downloader is None:
logging.error('Protocol ' + downloader.protocol + ' not supported')
return False
files_to_download = []
for downloader in downloaders:
if use_remote_list:
if not downloader.files_to_download:
self.session.set('remoterelease', self.session.previous_release)
return self.no_need_to_update()
else:
(file_list, dir_list) = downloader.list()
downloader.match(cf.get('remote.files', default='.*').split(), file_list, dir_list)
# Check if save_as defined, else check if regexp contains some save information with groups
for f in downloader.files_to_download:
if 'save_as' not in f or not f['save_as']:
f['save_as'] = f['name']
for p in cf.get('remote.files', default='.*').split():
if p.startswith('^'):
p = p.replace('^', '^/')
else:
p = '/' + p
res = re.match(p, f['name'])
if res is not None and res.groups() is not None and len(res.groups()) >= 1:
f['save_as'] = '/'.join(res.groups())
break
files_to_download += downloader.files_to_download
self.session.set('download_files', downloader.files_to_download)
self.session._session['stats']['nb_downloaded_files'] = len(files_to_download)
if self.session.get('release') and self.session.config.get_bool('release.control', default=False):
if self.session.previous_release == self.session.get('remoterelease'):
if self.is_previous_release_content_identical():
logging.info('Workflow:wf_release:same_as_previous_session')
return self.no_need_to_update()
else:
release = self.check_and_incr_release()
if self.session.get('release') is None:
# Not defined, or could not get it ealier
# Set release to most recent file to download
release_dict = Utils.get_more_recent_file(downloader.files_to_download)
if release_dict is None:
today = datetime.datetime.now()
release_dict = {'year': today.year, 'month': today.month, 'day': today.day}
release = str(release_dict['year']) + '-' + str(release_dict['month']) + '-' + str(release_dict['day'])
if cf.get('release.format'):
release_date = datetime.datetime.now()
release_date = release_date.replace(year=int(release_dict['year']), month=int(release_dict['month']), day=int(release_dict['day']))
# Fix configparser problem between py2 and py3
release = release_date.strftime(cf.get('release.format').replace('%%', '%'))
self.session.set('release', release)
self.session.set('remoterelease', release)
logging.info('Workflow:wf_download:release:remoterelease:' + self.session.get('remoterelease'))
logging.info('Workflow:wf_download:release:release:' + release)
self.__update_info({'$set': {'status.release.progress': str(release)}})
'''
MongoConnector.banks.update(
{'name': self.bank.name},
{'$set': {'status.release.progress': str(release)}}
)
'''
self.download_go_ahead = False
if self.options.get_option(Options.FROM_TASK) == 'download':
# We want to download again in same release, that's fine, we do not care it is the same release
self.download_go_ahead = True
if not self.download_go_ahead and self.session.previous_release == self.session.get('remoterelease') and self.is_previous_release_content_identical():
logging.info('Workflow:wf_release:same_as_previous_session')
return self.no_need_to_update()
# We restart from scratch, check if directory with this release already exists
if self.options.get_option(Options.FROMSCRATCH) or self.options.get_option('release') is None:
release = self.check_and_incr_release()
self.session.config.set('localrelease', self.session.get('release'))
self.session.config.set('remoterelease', self.session.get('remoterelease'))
if self.session.config.get_bool('releaseonly', default=False):
return True
self.banks = MongoConnector.banks
self.bank.bank = self.banks.find_one({'name': self.name})
nb_prod_dir = len(self.bank.bank['production'])
offline_dir = self.session.get_offline_directory()
copied_files = []
# Check if already in offlinedir
files_in_offline = 0
nb_expected_files = 0
for downloader in downloaders:
keep_files = []
nb_expected_files += len(downloader.files_to_download)
if os.path.exists(offline_dir):
for file_to_download in downloader.files_to_download:
# If file is in offline dir and has same date and size, do not download again
if os.path.exists(offline_dir + '/' + file_to_download['name']):
try:
file_stat = os.stat(offline_dir + '/' + file_to_download['name'])
f_stat = datetime.datetime.fromtimestamp(os.path.getmtime(offline_dir + '/' + file_to_download['name']))
year = str(f_stat.year)
month = str(f_stat.month)
day = str(f_stat.day)
if str(file_stat.st_size) != str(file_to_download['size']) or \
str(year) != str(file_to_download['year']) or \
str(month) != str(file_to_download['month']) or \
str(day) != str(file_to_download['day']):
logging.debug('Workflow:wf_download:different_from_offline:' + file_to_download['name'])
keep_files.append(file_to_download)
else:
logging.debug('Workflow:wf_download:offline:' + file_to_download['name'])
files_in_offline += 1
copied_files.append(file_to_download)
except Exception as e:
# Could not get stats on file
logging.debug('Workflow:wf_download:offline:failed to stat file: ' + str(e))
os.remove(offline_dir + '/' + file_to_download['name'])
keep_files.append(file_to_download)
else:
keep_files.append(file_to_download)
downloader.files_to_download = keep_files
logging.info("Workflow:wf_download:nb_expected_files:" + str(nb_expected_files))
logging.info("Workflow:wf_download:nb_files_in_offline_dir:" + str(files_in_offline))
# If everything was already in offline dir
everything_present = True
for downloader in downloaders:
if len(downloader.files_to_download) > 0:
everything_present = False
break
if everything_present:
self.downloaded_files = []
logging.info("Workflow:wf_download:all_files_in_offline:skip download")
return True
for downloader in downloaders:
self._create_dir_structure(downloader, offline_dir)
self.download_go_ahead = False
if self.options.get_option(Options.FROM_TASK) == 'download':
# We want to download again in same release, that's fine, we do not care it is the same release
self.download_go_ahead = True
if not self.options.get_option(Options.FROMSCRATCH) and not self.download_go_ahead and nb_prod_dir > 0:
# Get last production
last_production = self.bank.bank['production'][nb_prod_dir - 1]
# Get session corresponding to production directory
last_production_session = self.banks.find_one({'name': self.name, 'sessions.id': last_production['session']}, {'sessions.$': 1})
last_production_session_release_directory = self.session.get_full_release_directory(release=last_production['release'])
last_production_dir = os.path.join(last_production_session_release_directory, 'flat')
# Checks if some files can be copied instead of downloaded
last_production_files = None
if len(last_production_session['sessions']) > 0:
last_production_files = self._load_local_files_from_session(last_production_session['sessions'][0]['id'])
if not cf.get_bool('copy.skip', default=False):
for downloader in downloaders:
downloader.download_or_copy(last_production_files, last_production_dir)
everything_copied = True
for downloader in downloaders:
if len(downloader.files_to_download) > 0:
everything_copied = False
break
if everything_copied:
logging.info('Workflow:wf_download:all files copied from %s' % (str(last_production_dir)))
# return self.no_need_to_update()
logging.debug('Workflow:wf_download:Copy files from ' + last_production_dir)
for downloader in downloaders:
copied_files += downloader.files_to_copy
Utils.copy_files(downloader.files_to_copy, offline_dir)
downloader.close()
pool_size = self.session.config.get('files.num.threads', default=None)
dserv = None
if self.bank.config.get('micro.biomaj.service.download', default=None) == '1':
dserv = DownloadClient(
self.bank.config.get('micro.biomaj.rabbit_mq'),
int(self.bank.config.get('micro.biomaj.rabbit_mq_port', default='5672')),
self.bank.config.get('micro.biomaj.rabbit_mq_virtualhost', default='/'),
self.bank.config.get('micro.biomaj.rabbit_mq_user', default=None),
self.bank.config.get('micro.biomaj.rabbit_mq_password', default=None),
redis_client=self.redis_client,
redis_prefix=self.redis_prefix
)
if pool_size:
logging.debug('Set rate limiting: %s' % (str(pool_size)))
dserv.set_rate_limiting(int(pool_size))
else:
dserv = DownloadClient()
if pool_size:
dserv.set_queue_size(int(pool_size))
proxy = self.bank.config.get('micro.biomaj.proxy')
session = dserv.create_session(self.name, proxy)
logging.info("Workflow:wf_download:DownloadSession:" + str(session))
for downloader in downloaders:
for file_to_download in downloader.files_to_download:
operation = message_pb2.Operation()
operation.type = 1
message = message_pb2.DownloadFile()
message.bank = self.name
message.session = session
message.local_dir = offline_dir
remote_file = message_pb2.DownloadFile.RemoteFile()
protocol = downloader.protocol
remote_file.protocol = message_pb2.DownloadFile.Protocol.Value(protocol.upper())
if downloader.credentials:
remote_file.credentials = downloader.credentials
remote_file.server = downloader.server
if cf.get('remote.dir'):
remote_file.remote_dir = cf.get('remote.dir')
else:
remote_file.remote_dir = ''
if http_parse:
msg_http_parse = message_pb2.DownloadFile.HttpParse()
msg_http_parse.dir_line = http_parse.dir_line
msg_http_parse.file_line = http_parse.file_line
msg_http_parse.dir_name = http_parse.dir_name
msg_http_parse.dir_date = http_parse.dir_date
msg_http_parse.file_name = http_parse.file_name
msg_http_parse.file_date = http_parse.file_date
msg_http_parse.file_size = http_parse.file_size
if http_parse.file_date_format:
msg_http_parse.file_date_format = http_parse.file_date_format
remote_file.http_parse.MergeFrom(msg_http_parse)
biomaj_file = remote_file.files.add()
biomaj_file.name = file_to_download['name']
if 'root' in file_to_download and file_to_download['root']:
biomaj_file.root = file_to_download['root']
if downloader.param:
for key in list(downloader.param.keys()):
param = remote_file.param.add()
param.name = key
param.value = downloader.param[key]
if 'save_as' in file_to_download and file_to_download['save_as']:
biomaj_file.save_as = file_to_download['save_as']
if 'url' in file_to_download and file_to_download['url']:
biomaj_file.url = file_to_download['url']
if 'permissions' in file_to_download and file_to_download['permissions']:
biomaj_file.metadata.permissions = file_to_download['permissions']
if 'size' in file_to_download and file_to_download['size']:
biomaj_file.metadata.size = file_to_download['size']
if 'year' in file_to_download and file_to_download['year']:
biomaj_file.metadata.year = file_to_download['year']
if 'month' in file_to_download and file_to_download['month']:
biomaj_file.metadata.month = file_to_download['month']
if 'day' in file_to_download and file_to_download['day']:
biomaj_file.metadata.day = file_to_download['day']
if 'hash' in file_to_download and file_to_download['hash']:
biomaj_file.metadata.hash = file_to_download['hash']
if 'md5' in file_to_download and file_to_download['md5']:
biomaj_file.metadata.md5 = file_to_download['md5']
message.http_method = message_pb2.DownloadFile.HTTP_METHOD.Value(downloader.method.upper())
timeout_download = cf.get('timeout.download', default=None)
if timeout_download:
try:
message.timeout_download = int(timeout_download)
except Exception as e:
logging.error('Wrong timeout type for timeout.download: ' + str(e))
if self.span:
trace = message_pb2.Operation.Trace()
trace.trace_id = self.span.get_trace_id()
trace.span_id = self.span.get_span_id()
operation.trace.MergeFrom(trace)
message.remote_file.MergeFrom(remote_file)
operation.download.MergeFrom(message)
dserv.download_remote_file(operation)
logging.info("Workflow:wf_download:Download:Waiting")
download_error = False
try:
download_error = dserv.wait_for_download()
except Exception as e:
self._close_download_service(dserv)
logging.exception('Workflow:wf_download:Exception:' + str(e))
return False
except KeyboardInterrupt:
logging.warn("Ctrl-c received! Stop downloads...")
logging.warn("Running downloads will continue and process will stop.")
self._close_download_service(dserv)
return False
self._close_download_service(dserv)
self.downloaded_files = copied_files
for downloader in downloaders:
self.downloaded_files += downloader.files_to_download
if download_error:
logging.error('An error occured during download')
return False
return True
def wf_uncompress(self):
"""
Uncompress files if archives and no.extract = false
"""
logging.info('Workflow:wf_uncompress')
if len(self.downloaded_files) == 0:
logging.info("Workflow:wf_uncompress:NoFileDownload:NoExtract")
return True
no_extract = self.session.config.get('no.extract')
if no_extract is None or no_extract == 'false':
for file in self.downloaded_files:
if 'save_as' not in file:
file['save_as'] = file['name']
nb_try = 1
not_ok = True
while nb_try < 3 and not_ok:
status = Utils.uncompress(self.session.get_offline_directory() + '/' + file['save_as'])
if status:
not_ok = False
else:
logging.warn('Workflow:wf_uncompress:Failure:' + file['name'] + ':' + str(nb_try))
nb_try += 1
if not_ok:
logging.error('Workflow:wf_uncompress:Failure:' + file['name'])
return False
else:
logging.info("Workflow:wf_uncompress:NoExtract")
return True
def wf_copy(self):
"""
Copy files from offline directory to release directory
"""
logging.info('Workflow:wf_copy')
if len(self.downloaded_files) == 0:
logging.info("Workflow:wf_copy:NoFileDownload:NoCopy")
return True
from_dir = os.path.join(self.session.config.get('data.dir'),
self.session.config.get('offline.dir.name'))
regexp = self.session.config.get('local.files', default='**/*').split()
to_dir = os.path.join(
self.session.config.get('data.dir'),
self.session.config.get('dir.version'),
self.session.get_release_directory(),
'flat'
)
local_files = Utils.copy_files_with_regexp(from_dir, to_dir, regexp, True)
self.session._session['files'] = local_files
if len(self.session._session['files']) == 0:
logging.error('Workflow:wf_copy:No file match in offline dir')
return False
return True
def wf_metadata(self):
"""
Update metadata with info gathered from processes
"""
logging.info('Workflow:wf_metadata')
self.bank.session.set('formats', {})
per_process_meta_data = self.session.get('per_process_metadata')
for proc in list(per_process_meta_data.keys()):
for meta_data in list(per_process_meta_data[proc].keys()):
session_formats = self.bank.session.get('formats')
if meta_data not in session_formats:
session_formats[meta_data] = per_process_meta_data[proc][meta_data]
else:
session_formats[meta_data] += per_process_meta_data[proc][meta_data]
return True
def wf_stats(self):
"""
Get some stats from current release data dir
"""
logging.info('Workflow:wf_stats')
do_stats = self.bank.config.get('data.stats')
if do_stats is None or do_stats == '0':
self.session.set('fullsize', 0)
return True
prod_dir = self.session.get_full_release_directory()
dir_size = Utils.get_folder_size(prod_dir)
self.session.set('fullsize', dir_size)
return True
def wf_postprocess(self):
"""
Execute post processes
"""
# Creates a temporary symlink future_release to keep compatibility if process
# tries to access dir with this name
future_link = os.path.join(
self.bank.config.get('data.dir'),
self.bank.config.get('dir.version'),
'future_release'
)
# prod_dir = self.session.get_full_release_directory()
to_dir = os.path.join(
self.bank.config.get('data.dir'),
self.bank.config.get('dir.version')
)
if os.path.lexists(future_link):
os.remove(future_link)
os.chdir(to_dir)
os.symlink(self.session.get_release_directory(), 'future_release')
logging.info('Workflow:wf_postprocess')
blocks = self.session._session['process']['postprocess']
pfactory = PostProcessFactory(self.bank, blocks, redis_client=self.redis_client, redis_prefix=self.redis_prefix)
res = pfactory.run()
self.session._session['process']['postprocess'] = pfactory.blocks
# In any way, delete symlink
if os.path.lexists(future_link):
os.remove(future_link)
return res
def wf_publish(self):
"""
Add *current* symlink to this release
"""
if self.bank.config.get_bool('auto_publish', default=False):
logging.info('Workflow:wf_publish')
self.bank.publish()
return True
if not self.options.get_option(Options.PUBLISH):
logging.info('Workflow:wf_publish:no')
return True
logging.info('Workflow:wf_publish')
self.bank.publish()
return True
def wf_old_biomaj_api(self):
"""
Generates a listing.format file containing the list of files in directories declared in formats
"""
release_dir = self.session.get_full_release_directory()
for release_format in self.bank.session.get('formats'):
format_file = os.path.join(release_dir, 'listingv1.' + release_format.replace('/', '_'))
section = self.list_section(release_dir, release_format, release_format)
logging.debug("Worfklow:OldAPI:WriteListing: " + format_file)
fd = os.open(format_file, os.O_RDWR | os.O_CREAT)
os.write(fd, json.dumps(section).encode('utf-8'))
os.close(fd)
return True
def list_section(self, base_dir, release_format, base_format):
"""
Get section files and sub-section from base_dir for directory release_format
:param base_dir: root directory
:type base_dir: str
:param base_dir: sub directory to scan
:type base_dir: str
:param base_format: first directroy indicating format
:type base_format: str
:return: dict section details
"""
section = {"name": release_format, "sections": [], "files": []}
format_dir = os.path.join(base_dir, release_format)
if not os.path.exists(format_dir):
logging.info("Worfklow:OldAPI:Format directory " + release_format + " does not exists, skipping")
return section
format_dir_list = os.listdir(format_dir)
for format_dir_file in format_dir_list:
if os.path.isfile(os.path.join(format_dir, format_dir_file)):
if base_format.lower() == 'blast':
if format_dir_file.endswith('.nal'):
fileName, fileExtension = os.path.splitext(format_dir_file)
section['files'].append(os.path.join(format_dir, fileName))
else:
section['files'].append(os.path.join(format_dir, format_dir_file))
else:
# This is a sub directory
new_section = self.list_section(format_dir, format_dir_file, base_format)
section['sections'].append(new_section)
return section
def wf_clean_offline(self):
"""
Clean offline directory
"""
logging.info('Workflow:wf_clean_offline')
if os.path.exists(self.session.get_offline_directory()):
shutil.rmtree(self.session.get_offline_directory())
return True
def wf_clean_old_sessions(self):
"""
Delete old sessions not related to a production directory or last run
"""
logging.info('Workflow:wf_clean_old_sessions')
self.bank.clean_old_sessions()
return True
def wf_delete_old(self):
"""
Delete old production dirs
"""
logging.info('Workflow:wf_delete_old')
if self.options.get_option(Options.FROM_TASK) is not None:
# This is a run on an already present release, skip delete
logging.info('Workflow:wf_delete_old:Skip')
return True
if not self.session.config.get('keep.old.version'):
keep = 1
else:
keep = int(self.session.config.get('keep.old.version'))
# Current production dir is not yet in list
nb_prod = len(self.bank.bank['production'])
# save session during delete workflow
keep_session = self.bank.session
if nb_prod > keep:
for prod in self.bank.bank['production']:
if prod['release'] == keep_session.get('release'):
continue
if 'freeze' in prod and prod['freeze']:
continue
if self.bank.bank['current'] == prod['session']:
continue
if nb_prod - keep > 0:
nb_prod -= 1
session = self.bank.get_new_session(RemoveWorkflow.FLOW)
# Delete init and over because we are already in a run
i_init = -1
i_over = -1
for i in range(0, len(session.flow)):
if session.flow[i]['name'] == 'init':
i_init = i
if i_init >= 0:
del session.flow[i_init]
for i in range(0, len(session.flow)):
if session.flow[i]['name'] == 'over':
i_over = i
if i_over >= 0:
del session.flow[i_over]
session.set('action', 'remove')
session.set('release', prod['release'])
session.set('remoterelease', prod['remoterelease'])
session.set('update_session_id', prod['session'])
logging.info('Workflow:wf_delete_old:Delete:' + prod['release'])
res = self.bank.start_remove(session)
if not res:
logging.error('Workflow:wf_delete_old:ErrorDelete:' + prod['release'])
else:
break
# Set session back
self.bank.session = keep_session
return True
class ReleaseCheckWorkflow(UpdateWorkflow):
"""
Workflow for a bank update
"""
FLOW = [
{'name': 'init', 'steps': []},
{'name': 'check', 'steps': []},
{'name': 'preprocess', 'steps': []},
{'name': 'release', 'steps': []},
{'name': 'download', 'steps': []},
{'name': 'over', 'steps': []}
]
def __init__(self, bank):
"""
Instantiate a new workflow
:param bank: bank on which to apply the workflow
:type bank: Bank
"""
UpdateWorkflow.__init__(self, bank)
logging.debug('New release check workflow')
self.session.config.set('releaseonly', 'true')
def wf_init(self):
"""
Initialize workflow, do not lock bank as it is not modified
If bank is already locked, stop workflow
"""
logging.info('Workflow:wf_init')
data_dir = self.session.config.get('data.dir')
lock_dir = self.session.config.get('lock.dir', default=data_dir)
lock_file = os.path.join(lock_dir, self.name + '.lock')
if os.path.exists(lock_file):
logging.error('Bank ' + self.name + ' is locked, a process may be in progress, else remove the lock file ' + lock_file)
return False
return True
def wf_over(self):
"""
Workflow is over
"""
logging.info('Workflow:wf_over')
return True
def __update_info(self, info):
return
def wf_progress(self, task, status):
return
|
horkko/biomaj
|
biomaj/workflow.py
|
Python
|
agpl-3.0
| 74,620
|
[
"BLAST"
] |
1b98349be440a53b3febc210cb8389ce26833daffc938ae67aed05f5d3acc5db
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class AtomDft(MakefilePackage):
"""ATOM is a program for DFT calculations in atoms and pseudopotential
generation."""
homepage = "https://departments.icmab.es/leem/siesta/Pseudopotentials/"
url = "https://departments.icmab.es/leem/siesta/Pseudopotentials/Code/atom-4.2.6.tgz"
version('4.2.6', sha256='489f0d883af35525647a8b8f691e7845c92fe6b5a25b13e1ed368edfd0391ed2')
depends_on('libgridxc')
depends_on('xmlf90')
def edit(self, spec, prefix):
copy('arch.make.sample', 'arch.make')
@property
def build_targets(self):
return ['XMLF90_ROOT=%s' % self.spec['xmlf90'].prefix,
'GRIDXC_ROOT=%s' % self.spec['libgridxc'].prefix,
'FC=fc']
def install(self, spec, prefix):
mkdir(prefix.bin)
install('atm', prefix.bin)
|
LLNL/spack
|
var/spack/repos/builtin/packages/atom-dft/package.py
|
Python
|
lgpl-2.1
| 1,050
|
[
"SIESTA"
] |
d243299ef29308f4c4cf317bc8acff65e9e843068b6a0c7aa8f396868bb15b9d
|
import logging
import king_phisher.plugins as plugin_opts
import king_phisher.server.database.manager as db_manager
import king_phisher.server.database.models as db_models
import king_phisher.server.plugins as plugins
import king_phisher.server.signals as signals
import smoke_zephyr.utilities as utilities
try:
import sleekxmpp
except ImportError:
has_sleekxmpp = False
_sleekxmpp_ClientXMPP = object
else:
has_sleekxmpp = True
_sleekxmpp_ClientXMPP = sleekxmpp.ClientXMPP
EXAMPLE_CONFIG = """\
jid: king-phisher@<domain>
password: <password>
room: notifications@public.<domain>
server: <ip>:<port>
verify_cert: false
"""
class NotificationBot(_sleekxmpp_ClientXMPP):
def __init__(self, jid, password, room, verify_cert):
super(NotificationBot, self).__init__(jid, password)
self.add_event_handler('disconnect', self.on_xmpp_disconnect)
self.add_event_handler('session_start', self.on_xmpp_session_start)
self.add_event_handler('ssl_invalid_cert', self.on_xmpp_ssl_invalid_cert)
self.register_plugin('xep_0030') # service discovery
self.register_plugin('xep_0045') # multi-user chat
self.register_plugin('xep_0071') # xhtml im
self.register_plugin('xep_0199') # xmpp ping
self.room = room
self.verify_cert = verify_cert
self.logger = logging.getLogger('KingPhisher.Plugins.XMPPNotificationBot')
def send_notification(self, message):
ET = sleekxmpp.xmlstream.ET
xhtml = ET.Element('span')
xhtml.set('style', 'font-family: Monospace')
message_lines = message.split('\n')
for line in message_lines[:-1]:
p = ET.SubElement(xhtml, 'p')
p.text = line
ET.SubElement(xhtml, 'br')
p = ET.SubElement(xhtml, 'p')
p.text = message_lines[-1]
self.send_message(mto=self.room, mbody=message, mtype='groupchat', mhtml=xhtml)
def on_kp_db_new_campaign(self, sender, targets, session):
for campaign in targets:
self.send_notification("new campaign '{0}' created by {1}".format(campaign.name, campaign.user_id))
def on_kp_db_new_credentials(self, sender, targets, session):
for credential in targets:
message = db_manager.get_row_by_id(session, db_models.Message, credential.message_id)
self.send_notification("new credentials received from {0} for campaign '{1}'".format(message.target_email, message.campaign.name))
def on_kp_db_new_visit(self, sender, targets, session):
for visit in targets:
message = db_manager.get_row_by_id(session, db_models.Message, visit.message_id)
self.send_notification("new visit received from {0} for campaign '{1}'".format(message.target_email, message.campaign.name))
def on_xmpp_disconnect(self, _):
signals.db_session_inserted.disconnect(self.on_kp_db_new_campaign, sender='campaigns')
signals.db_session_inserted.disconnect(self.on_kp_db_new_credentials, sender='credentials')
signals.db_session_inserted.disconnect(self.on_kp_db_new_visit, sender='visits')
def on_xmpp_session_start(self, _):
self.send_presence()
self.get_roster()
self.plugin['xep_0045'].joinMUC(self.room, self.boundjid.user, wait=True)
signals.db_session_inserted.connect(self.on_kp_db_new_campaign, sender='campaigns')
signals.db_session_inserted.connect(self.on_kp_db_new_credentials, sender='credentials')
signals.db_session_inserted.connect(self.on_kp_db_new_visit, sender='visits')
self.send_notification('king phisher server notifications are now online')
def on_xmpp_ssl_invalid_cert(self, pem_cert):
if self.verify_cert:
self.logger.warning('received an invalid ssl certificate, disconnecting from the server')
self.disconnect(send_close=False)
else:
self.logger.warning('received an invalid ssl certificate, ignoring it per the configuration')
return
class Plugin(plugins.ServerPlugin):
authors = ['Spencer McIntyre']
classifiers = ['Plugin :: Server :: Notifications']
title = 'XMPP Notifications'
description = """
A plugin which pushes notifications regarding the King Phisher server to a
specified XMPP server.
"""
homepage = 'https://github.com/securestate/king-phisher-plugins'
options = [
plugin_opts.OptionString('jid', 'the username to login with'),
plugin_opts.OptionString('password', 'the password to login with'),
plugin_opts.OptionString('room', 'the room to send notifications to'),
plugin_opts.OptionString('server', 'the server to connect to'),
# verify_cert only functions when sleekxmpp supports it
plugin_opts.OptionBoolean('verify_cert', 'verify the ssl certificate', default=True)
]
req_min_version = '1.4.0'
req_packages = {
'sleekxmpp': has_sleekxmpp
}
version = '1.0.1'
def initialize(self):
logger = logging.getLogger('sleekxmpp')
logger.setLevel(logging.INFO)
self.bot = None
signals.server_initialized.connect(self.on_server_initialized)
return True
def on_server_initialized(self, server):
self.bot = NotificationBot(
self.config['jid'],
self.config['password'],
self.config['room'],
self.config['verify_cert']
)
self.bot.connect(utilities.parse_server(self.config['server'], 5222))
self.bot.process(block=False)
def finalize(self):
if self.bot is None:
return
self.bot.disconnect()
|
securestate/king-phisher-plugins
|
server/xmpp_notifications.py
|
Python
|
bsd-3-clause
| 5,126
|
[
"VisIt"
] |
bed2d2924a03b6eabeb388f23fe2aca534bdb7afa2143968f44d1224e83b51b6
|
# -*- coding: utf-8 -*-
"""
.. _tut-artifact-ica:
Repairing artifacts with ICA
============================
This tutorial covers the basics of independent components analysis (ICA) and
shows how ICA can be used for artifact repair; an extended example illustrates
repair of ocular and heartbeat artifacts.
We begin as always by importing the necessary Python modules and loading some
:ref:`example data <sample-dataset>`. Because ICA can be computationally
intense, we'll also crop the data to 60 seconds; and to save ourselves from
repeatedly typing ``mne.preprocessing`` we'll directly import a few functions
and classes from that submodule:
"""
import os
import mne
from mne.preprocessing import (ICA, create_eog_epochs, create_ecg_epochs,
corrmap)
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(tmax=60.)
###############################################################################
# .. note::
# Before applying ICA (or any artifact repair strategy), be sure to observe
# the artifacts in your data to make sure you choose the right repair tool.
# Sometimes the right tool is no tool at all — if the artifacts are small
# enough you may not even need to repair them to get good analysis results.
# See :ref:`tut-artifact-overview` for guidance on detecting and
# visualizing various types of artifact.
#
# What is ICA?
# ^^^^^^^^^^^^
#
# Independent components analysis (ICA) is a technique for estimating
# independent source signals from a set of recordings in which the source
# signals were mixed together in unknown ratios. A common example of this is
# the problem of `blind source separation`_: with 3 musical instruments playing
# in the same room, and 3 microphones recording the performance (each picking
# up all 3 instruments, but at varying levels), can you somehow "unmix" the
# signals recorded by the 3 microphones so that you end up with a separate
# "recording" isolating the sound of each instrument?
#
# It is not hard to see how this analogy applies to EEG/MEG analysis: there are
# many "microphones" (sensor channels) simultaneously recording many
# "instruments" (blinks, heartbeats, activity in different areas of the brain,
# muscular activity from jaw clenching or swallowing, etc). As long as these
# various source signals are `statistically independent`_ and non-gaussian, it
# is usually possible to separate the sources using ICA, and then re-construct
# the sensor signals after excluding the sources that are unwanted.
#
#
# ICA in MNE-Python
# ~~~~~~~~~~~~~~~~~
#
# .. sidebar:: ICA and dimensionality reduction
#
# If you want to perform ICA with *no* dimensionality reduction (other than
# the number of Independent Components (ICs) given in ``n_components``, and
# any subsequent exclusion of ICs you specify in ``ICA.exclude``), simply
# pass ``n_components``.
#
# However, if you *do* want to reduce dimensionality, consider this
# example: if you have 300 sensor channels and you set ``n_components=50``
# during instantiation and pass ``n_pca_components=None`` to
# `~mne.preprocessing.ICA.apply`, then the the first 50
# PCs are sent to the ICA algorithm (yielding 50 ICs), and during
# reconstruction `~mne.preprocessing.ICA.apply` will use the 50 ICs
# plus PCs number 51-300 (the full PCA residual). If instead you specify
# ``n_pca_components=120`` in `~mne.preprocessing.ICA.apply`, it will
# reconstruct using the 50 ICs plus the first 70 PCs in the PCA residual
# (numbers 51-120), thus discarding the smallest 180 components.
#
# **If you have previously been using EEGLAB**'s ``runica()`` and are
# looking for the equivalent of its ``'pca', n`` option to reduce
# dimensionality, set ``n_components=n`` during initialization and pass
# ``n_pca_components=n`` to `~mne.preprocessing.ICA.apply`.
#
# MNE-Python implements three different ICA algorithms: ``fastica`` (the
# default), ``picard``, and ``infomax``. FastICA and Infomax are both in fairly
# widespread use; Picard is a newer (2017) algorithm that is expected to
# converge faster than FastICA and Infomax, and is more robust than other
# algorithms in cases where the sources are not completely independent, which
# typically happens with real EEG/MEG data. See
# :footcite:`AblinEtAl2018` for more information.
#
# The ICA interface in MNE-Python is similar to the interface in
# `scikit-learn`_: some general parameters are specified when creating an
# `~mne.preprocessing.ICA` object, then the `~mne.preprocessing.ICA` object is
# fit to the data using its `~mne.preprocessing.ICA.fit` method. The results of
# the fitting are added to the `~mne.preprocessing.ICA` object as attributes
# that end in an underscore (``_``), such as ``ica.mixing_matrix_`` and
# ``ica.unmixing_matrix_``. After fitting, the ICA component(s) that you want
# to remove must be chosen, and the ICA fit must then be applied to the
# `~mne.io.Raw` or `~mne.Epochs` object using the `~mne.preprocessing.ICA`
# object's `~mne.preprocessing.ICA.apply` method.
#
# As is typically done with ICA, the data are first scaled to unit variance and
# whitened using principal components analysis (PCA) before performing the ICA
# decomposition. This is a two-stage process:
#
# 1. To deal with different channel types having different units
# (e.g., Volts for EEG and Tesla for MEG), data must be pre-whitened.
# If ``noise_cov=None`` (default), all data of a given channel type is
# scaled by the standard deviation across all channels. If ``noise_cov`` is
# a `~mne.Covariance`, the channels are pre-whitened using the covariance.
# 2. The pre-whitened data are then decomposed using PCA.
#
# From the resulting principal components (PCs), the first ``n_components`` are
# then passed to the ICA algorithm if ``n_components`` is an integer number.
# It can also be a float between 0 and 1, specifying the **fraction** of
# explained variance that the PCs should capture; the appropriate number of
# PCs (i.e., just as many PCs as are required to explain the given fraction
# of total variance) is then passed to the ICA.
#
# After visualizing the Independent Components (ICs) and excluding any that
# capture artifacts you want to repair, the sensor signal can be reconstructed
# using the `~mne.preprocessing.ICA` object's
# `~mne.preprocessing.ICA.apply` method. By default, signal
# reconstruction uses all of the ICs (less any ICs listed in ``ICA.exclude``)
# plus all of the PCs that were not included in the ICA decomposition (i.e.,
# the "PCA residual"). If you want to reduce the number of components used at
# the reconstruction stage, it is controlled by the ``n_pca_components``
# parameter (which will in turn reduce the rank of your data; by default
# ``n_pca_components=None`` resulting in no additional dimensionality
# reduction). The fitting and reconstruction procedures and the
# parameters that control dimensionality at various stages are summarized in
# the diagram below:
#
# .. graphviz:: ../../_static/diagrams/ica.dot
# :alt: Diagram of ICA procedure in MNE-Python
# :align: left
#
# See the Notes section of the `~mne.preprocessing.ICA` documentation
# for further details. Next we'll walk through an extended example that
# illustrates each of these steps in greater detail.
#
# Example: EOG and ECG artifact repair
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Visualizing the artifacts
# ~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Let's begin by visualizing the artifacts that we want to repair. In this
# dataset they are big enough to see easily in the raw data:
# pick some channels that clearly show heartbeats and blinks
regexp = r'(MEG [12][45][123]1|EEG 00.)'
artifact_picks = mne.pick_channels_regexp(raw.ch_names, regexp=regexp)
raw.plot(order=artifact_picks, n_channels=len(artifact_picks),
show_scrollbars=False)
###############################################################################
# We can get a summary of how the ocular artifact manifests across each channel
# type using `~mne.preprocessing.create_eog_epochs` like we did in the
# :ref:`tut-artifact-overview` tutorial:
eog_evoked = create_eog_epochs(raw).average()
eog_evoked.apply_baseline(baseline=(None, -0.2))
eog_evoked.plot_joint()
###############################################################################
# Now we'll do the same for the heartbeat artifacts, using
# `~mne.preprocessing.create_ecg_epochs`:
ecg_evoked = create_ecg_epochs(raw).average()
ecg_evoked.apply_baseline(baseline=(None, -0.2))
ecg_evoked.plot_joint()
###############################################################################
# Filtering to remove slow drifts
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Before we run the ICA, an important step is filtering the data to remove
# low-frequency drifts, which can negatively affect the quality of the ICA fit.
# The slow drifts are problematic because they reduce the independence of the
# assumed-to-be-independent sources (e.g., during a slow upward drift, the
# neural, heartbeat, blink, and other muscular sources will all tend to have
# higher values), making it harder for the algorithm to find an accurate
# solution. A high-pass filter with 1 Hz cutoff frequency is recommended.
# However, because filtering is a linear operation, the ICA solution found from
# the filtered signal can be applied to the unfiltered signal (see
# :footcite:`WinklerEtAl2015` for
# more information), so we'll keep a copy of the unfiltered
# `~mne.io.Raw` object around so we can apply the ICA solution to it
# later.
filt_raw = raw.copy()
filt_raw.load_data().filter(l_freq=1., h_freq=None)
###############################################################################
# Fitting and plotting the ICA solution
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# .. sidebar:: Ignoring the time domain
#
# The ICA algorithms implemented in MNE-Python find patterns across
# channels, but ignore the time domain. This means you can compute ICA on
# discontinuous `~mne.Epochs` or `~mne.Evoked` objects (not
# just continuous `~mne.io.Raw` objects), or only use every Nth
# sample by passing the ``decim`` parameter to ``ICA.fit()``.
#
# .. note:: `~mne.Epochs` used for fitting ICA should not be
# baseline-corrected. Because cleaning the data via ICA may
# introduce DC offsets, we suggest to baseline correct your data
# **after** cleaning (and not before), should you require
# baseline correction.
#
# Now we're ready to set up and fit the ICA. Since we know (from observing our
# raw data) that the EOG and ECG artifacts are fairly strong, we would expect
# those artifacts to be captured in the first few dimensions of the PCA
# decomposition that happens before the ICA. Therefore, we probably don't need
# a huge number of components to do a good job of isolating our artifacts
# (though it is usually preferable to include more components for a more
# accurate solution). As a first guess, we'll run ICA with ``n_components=15``
# (use only the first 15 PCA components to compute the ICA decomposition) — a
# very small number given that our data has over 300 channels, but with the
# advantage that it will run quickly and we will able to tell easily whether it
# worked or not (because we already know what the EOG / ECG artifacts should
# look like).
#
# ICA fitting is not deterministic (e.g., the components may get a sign
# flip on different runs, or may not always be returned in the same order), so
# we'll also specify a `random seed`_ so that we get identical results each
# time this tutorial is built by our web servers.
ica = ICA(n_components=15, max_iter='auto', random_state=97)
ica.fit(filt_raw)
###############################################################################
# Some optional parameters that we could have passed to the
# `~mne.preprocessing.ICA.fit` method include ``decim`` (to use only
# every Nth sample in computing the ICs, which can yield a considerable
# speed-up) and ``reject`` (for providing a rejection dictionary for maximum
# acceptable peak-to-peak amplitudes for each channel type, just like we used
# when creating epoched data in the :ref:`tut-overview` tutorial).
#
# Now we can examine the ICs to see what they captured.
# `~mne.preprocessing.ICA.plot_sources` will show the time series of the
# ICs. Note that in our call to `~mne.preprocessing.ICA.plot_sources` we
# can use the original, unfiltered `~mne.io.Raw` object:
raw.load_data()
ica.plot_sources(raw, show_scrollbars=False)
###############################################################################
# Here we can pretty clearly see that the first component (``ICA000``) captures
# the EOG signal quite well, and the second component (``ICA001``) looks a lot
# like `a heartbeat <qrs_>`_ (for more info on visually identifying Independent
# Components, `this EEGLAB tutorial`_ is a good resource). We can also
# visualize the scalp field distribution of each component using
# `~mne.preprocessing.ICA.plot_components`. These are interpolated based
# on the values in the ICA mixing matrix:
# sphinx_gallery_thumbnail_number = 9
ica.plot_components()
###############################################################################
# .. note::
#
# `~mne.preprocessing.ICA.plot_components` (which plots the scalp
# field topographies for each component) has an optional ``inst`` parameter
# that takes an instance of `~mne.io.Raw` or `~mne.Epochs`.
# Passing ``inst`` makes the scalp topographies interactive: clicking one
# will bring up a diagnostic `~mne.preprocessing.ICA.plot_properties`
# window (see below) for that component.
#
# In the plots above it's fairly obvious which ICs are capturing our EOG and
# ECG artifacts, but there are additional ways visualize them anyway just to
# be sure. First, we can plot an overlay of the original signal against the
# reconstructed signal with the artifactual ICs excluded, using
# `~mne.preprocessing.ICA.plot_overlay`:
# blinks
ica.plot_overlay(raw, exclude=[0], picks='eeg')
# heartbeats
ica.plot_overlay(raw, exclude=[1], picks='mag')
###############################################################################
# We can also plot some diagnostics of each IC using
# `~mne.preprocessing.ICA.plot_properties`:
ica.plot_properties(raw, picks=[0, 1])
###############################################################################
# In the remaining sections, we'll look at different ways of choosing which ICs
# to exclude prior to reconstructing the sensor signals.
#
#
# Selecting ICA components manually
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Once we're certain which components we want to exclude, we can specify that
# manually by setting the ``ica.exclude`` attribute. Similar to marking bad
# channels, merely setting ``ica.exclude`` doesn't do anything immediately (it
# just adds the excluded ICs to a list that will get used later when it's
# needed). Once the exclusions have been set, ICA methods like
# `~mne.preprocessing.ICA.plot_overlay` will exclude those component(s)
# even if no ``exclude`` parameter is passed, and the list of excluded
# components will be preserved when using `mne.preprocessing.ICA.save`
# and `mne.preprocessing.read_ica`.
ica.exclude = [0, 1] # indices chosen based on various plots above
###############################################################################
# Now that the exclusions have been set, we can reconstruct the sensor signals
# with artifacts removed using the `~mne.preprocessing.ICA.apply` method
# (remember, we're applying the ICA solution from the *filtered* data to the
# original *unfiltered* signal). Plotting the original raw data alongside the
# reconstructed data shows that the heartbeat and blink artifacts are repaired.
# ica.apply() changes the Raw object in-place, so let's make a copy first:
reconst_raw = raw.copy()
ica.apply(reconst_raw)
raw.plot(order=artifact_picks, n_channels=len(artifact_picks),
show_scrollbars=False)
reconst_raw.plot(order=artifact_picks, n_channels=len(artifact_picks),
show_scrollbars=False)
del reconst_raw
###############################################################################
# Using an EOG channel to select ICA components
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# It may have seemed easy to review the plots and manually select which ICs to
# exclude, but when processing dozens or hundreds of subjects this can become
# a tedious, rate-limiting step in the analysis pipeline. One alternative is to
# use dedicated EOG or ECG sensors as a "pattern" to check the ICs against, and
# automatically mark for exclusion any ICs that match the EOG/ECG pattern. Here
# we'll use `~mne.preprocessing.ICA.find_bads_eog` to automatically find
# the ICs that best match the EOG signal, then use
# `~mne.preprocessing.ICA.plot_scores` along with our other plotting
# functions to see which ICs it picked. We'll start by resetting
# ``ica.exclude`` back to an empty list:
ica.exclude = []
# find which ICs match the EOG pattern
eog_indices, eog_scores = ica.find_bads_eog(raw)
ica.exclude = eog_indices
# barplot of ICA component "EOG match" scores
ica.plot_scores(eog_scores)
# plot diagnostics
ica.plot_properties(raw, picks=eog_indices)
# plot ICs applied to raw data, with EOG matches highlighted
ica.plot_sources(raw, show_scrollbars=False)
# plot ICs applied to the averaged EOG epochs, with EOG matches highlighted
ica.plot_sources(eog_evoked)
###############################################################################
# Note that above we used `~mne.preprocessing.ICA.plot_sources` on both
# the original `~mne.io.Raw` instance and also on an
# `~mne.Evoked` instance of the extracted EOG artifacts. This can be
# another way to confirm that `~mne.preprocessing.ICA.find_bads_eog` has
# identified the correct components.
#
#
# Using a simulated channel to select ICA components
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# If you don't have an EOG channel,
# `~mne.preprocessing.ICA.find_bads_eog` has a ``ch_name`` parameter that
# you can use as a proxy for EOG. You can use a single channel, or create a
# bipolar reference from frontal EEG sensors and use that as virtual EOG
# channel. This carries a risk however: you must hope that the frontal EEG
# channels only reflect EOG and not brain dynamics in the prefrontal cortex (or
# you must not care about those prefrontal signals).
#
# For ECG, it is easier: `~mne.preprocessing.ICA.find_bads_ecg` can use
# cross-channel averaging of magnetometer or gradiometer channels to construct
# a virtual ECG channel, so if you have MEG channels it is usually not
# necessary to pass a specific channel name.
# `~mne.preprocessing.ICA.find_bads_ecg` also has two options for its
# ``method`` parameter: ``'ctps'`` (cross-trial phase statistics
# :footcite:`DammersEtAl2008`) and
# ``'correlation'`` (Pearson correlation between data and ECG channel).
ica.exclude = []
# find which ICs match the ECG pattern
ecg_indices, ecg_scores = ica.find_bads_ecg(raw, method='correlation',
threshold='auto')
ica.exclude = ecg_indices
# barplot of ICA component "ECG match" scores
ica.plot_scores(ecg_scores)
# plot diagnostics
ica.plot_properties(raw, picks=ecg_indices)
# plot ICs applied to raw data, with ECG matches highlighted
ica.plot_sources(raw, show_scrollbars=False)
# plot ICs applied to the averaged ECG epochs, with ECG matches highlighted
ica.plot_sources(ecg_evoked)
###############################################################################
# The last of these plots is especially useful: it shows us that the heartbeat
# artifact is coming through on *two* ICs, and we've only caught one of them.
# In fact, if we look closely at the output of
# `~mne.preprocessing.ICA.plot_sources` (online, you can right-click →
# "view image" to zoom in), it looks like ``ICA014`` has a weak periodic
# component that is in-phase with ``ICA001``. It might be worthwhile to re-run
# the ICA with more components to see if that second heartbeat artifact
# resolves out a little better:
# refit the ICA with 30 components this time
new_ica = ICA(n_components=30, max_iter='auto', random_state=97)
new_ica.fit(filt_raw)
# find which ICs match the ECG pattern
ecg_indices, ecg_scores = new_ica.find_bads_ecg(raw, method='correlation',
threshold='auto')
new_ica.exclude = ecg_indices
# barplot of ICA component "ECG match" scores
new_ica.plot_scores(ecg_scores)
# plot diagnostics
new_ica.plot_properties(raw, picks=ecg_indices)
# plot ICs applied to raw data, with ECG matches highlighted
new_ica.plot_sources(raw, show_scrollbars=False)
# plot ICs applied to the averaged ECG epochs, with ECG matches highlighted
new_ica.plot_sources(ecg_evoked)
###############################################################################
# Much better! Now we've captured both ICs that are reflecting the heartbeat
# artifact (and as a result, we got two diagnostic plots: one for each IC that
# reflects the heartbeat). This demonstrates the value of checking the results
# of automated approaches like `~mne.preprocessing.ICA.find_bads_ecg`
# before accepting them.
# clean up memory before moving on
del raw, ica, new_ica
###############################################################################
# Selecting ICA components using template matching
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# When dealing with multiple subjects, it is also possible to manually select
# an IC for exclusion on one subject, and then use that component as a
# *template* for selecting which ICs to exclude from other subjects' data,
# using `mne.preprocessing.corrmap` :footcite:`CamposViolaEtAl2009`.
# The idea behind
# `~mne.preprocessing.corrmap` is that the artifact patterns are similar
# enough across subjects that corresponding ICs can be identified by
# correlating the ICs from each ICA solution with a common template, and
# picking the ICs with the highest correlation strength.
# `~mne.preprocessing.corrmap` takes a list of ICA solutions, and a
# ``template`` parameter that specifies which ICA object and which component
# within it to use as a template.
#
# Since our sample dataset only contains data from one subject, we'll use a
# different dataset with multiple subjects: the EEGBCI dataset
# :footcite:`SchalkEtAl2004,GoldbergerEtAl2000`. The
# dataset has 109 subjects, we'll just download one run (a left/right hand
# movement task) from each of the first 4 subjects:
mapping = {
'Fc5.': 'FC5', 'Fc3.': 'FC3', 'Fc1.': 'FC1', 'Fcz.': 'FCz', 'Fc2.': 'FC2',
'Fc4.': 'FC4', 'Fc6.': 'FC6', 'C5..': 'C5', 'C3..': 'C3', 'C1..': 'C1',
'Cz..': 'Cz', 'C2..': 'C2', 'C4..': 'C4', 'C6..': 'C6', 'Cp5.': 'CP5',
'Cp3.': 'CP3', 'Cp1.': 'CP1', 'Cpz.': 'CPz', 'Cp2.': 'CP2', 'Cp4.': 'CP4',
'Cp6.': 'CP6', 'Fp1.': 'Fp1', 'Fpz.': 'Fpz', 'Fp2.': 'Fp2', 'Af7.': 'AF7',
'Af3.': 'AF3', 'Afz.': 'AFz', 'Af4.': 'AF4', 'Af8.': 'AF8', 'F7..': 'F7',
'F5..': 'F5', 'F3..': 'F3', 'F1..': 'F1', 'Fz..': 'Fz', 'F2..': 'F2',
'F4..': 'F4', 'F6..': 'F6', 'F8..': 'F8', 'Ft7.': 'FT7', 'Ft8.': 'FT8',
'T7..': 'T7', 'T8..': 'T8', 'T9..': 'T9', 'T10.': 'T10', 'Tp7.': 'TP7',
'Tp8.': 'TP8', 'P7..': 'P7', 'P5..': 'P5', 'P3..': 'P3', 'P1..': 'P1',
'Pz..': 'Pz', 'P2..': 'P2', 'P4..': 'P4', 'P6..': 'P6', 'P8..': 'P8',
'Po7.': 'PO7', 'Po3.': 'PO3', 'Poz.': 'POz', 'Po4.': 'PO4', 'Po8.': 'PO8',
'O1..': 'O1', 'Oz..': 'Oz', 'O2..': 'O2', 'Iz..': 'Iz'
}
raws = list()
icas = list()
for subj in range(4):
# EEGBCI subjects are 1-indexed; run 3 is a left/right hand movement task
fname = mne.datasets.eegbci.load_data(subj + 1, runs=[3])[0]
raw = mne.io.read_raw_edf(fname)
# remove trailing `.` from channel names so we can set montage
raw.rename_channels(mapping)
raw.set_montage('standard_1005')
# high-pass filter
raw_filt = raw.copy().load_data().filter(l_freq=1., h_freq=None)
# fit ICA
ica = ICA(n_components=30, max_iter='auto', random_state=97)
ica.fit(raw_filt)
raws.append(raw)
icas.append(ica)
###############################################################################
# Now let's run `~mne.preprocessing.corrmap`:
# use the first subject as template; use Fpz as proxy for EOG
raw = raws[0]
ica = icas[0]
eog_inds, eog_scores = ica.find_bads_eog(raw, ch_name='Fpz')
corrmap(icas, template=(0, eog_inds[0]))
###############################################################################
# The first figure shows the template map, while the second figure shows all
# the maps that were considered a "match" for the template (including the
# template itself). There were only three matches from the four subjects;
# notice the output message ``No maps selected for subject(s) 1, consider a
# more liberal threshold``. By default the threshold is set automatically by
# trying several values; here it may have chosen a threshold that is too high.
# Let's take a look at the ICA sources for each subject:
for index, (ica, raw) in enumerate(zip(icas, raws)):
fig = ica.plot_sources(raw, show_scrollbars=False)
fig.subplots_adjust(top=0.9) # make space for title
fig.suptitle('Subject {}'.format(index))
###############################################################################
# Notice that subject 1 *does* seem to have an IC that looks like it reflects
# blink artifacts (component ``ICA000``). Notice also that subject 3 appears to
# have *two* components that are reflecting ocular artifacts (``ICA000`` and
# ``ICA002``), but only one was caught by `~mne.preprocessing.corrmap`.
# Let's try setting the threshold manually:
corrmap(icas, template=(0, eog_inds[0]), threshold=0.9)
###############################################################################
# Now we get the message ``At least 1 IC detected for each subject`` (which is
# good). At this point we'll re-run `~mne.preprocessing.corrmap` with
# parameters ``label='blink', plot=False`` to *label* the ICs from each subject
# that capture the blink artifacts (without plotting them again).
corrmap(icas, template=(0, eog_inds[0]), threshold=0.9, label='blink',
plot=False)
print([ica.labels_ for ica in icas])
###############################################################################
# Notice that the first subject has 3 different labels for the IC at index 0:
# "eog/0/Fpz", "eog", and "blink". The first two were added by
# `~mne.preprocessing.ICA.find_bads_eog`; the "blink" label was added by
# the last call to `~mne.preprocessing.corrmap`. Notice also that each
# subject has at least one IC index labelled "blink", and subject 3 has two
# components (0 and 2) labelled "blink" (consistent with the plot of IC sources
# above). The ``labels_`` attribute of `~mne.preprocessing.ICA` objects
# can also be manually edited to annotate the ICs with custom labels. They also
# come in handy when plotting:
icas[3].plot_components(picks=icas[3].labels_['blink'])
icas[3].exclude = icas[3].labels_['blink']
icas[3].plot_sources(raws[3], show_scrollbars=False)
###############################################################################
# As a final note, it is possible to extract ICs numerically using the
# `~mne.preprocessing.ICA.get_components` method of
# `~mne.preprocessing.ICA` objects. This will return a :class:`NumPy
# array <numpy.ndarray>` that can be passed to
# `~mne.preprocessing.corrmap` instead of the :class:`tuple` of
# ``(subject_index, component_index)`` we passed before, and will yield the
# same result:
template_eog_component = icas[0].get_components()[:, eog_inds[0]]
corrmap(icas, template=template_eog_component, threshold=0.9)
print(template_eog_component)
###############################################################################
# An advantage of using this numerical representation of an IC to capture a
# particular artifact pattern is that it can be saved and used as a template
# for future template-matching tasks using `~mne.preprocessing.corrmap`
# without having to load or recompute the ICA solution that yielded the
# template originally. Put another way, when the template is a NumPy array, the
# `~mne.preprocessing.ICA` object containing the template does not need
# to be in the list of ICAs provided to `~mne.preprocessing.corrmap`.
#
# .. LINKS
#
# .. _`blind source separation`:
# https://en.wikipedia.org/wiki/Signal_separation
# .. _`statistically independent`:
# https://en.wikipedia.org/wiki/Independence_(probability_theory)
# .. _`scikit-learn`: https://scikit-learn.org
# .. _`random seed`: https://en.wikipedia.org/wiki/Random_seed
# .. _`regular expression`: https://www.regular-expressions.info/
# .. _`qrs`: https://en.wikipedia.org/wiki/QRS_complex
# .. _`this EEGLAB tutorial`: https://labeling.ucsd.edu/tutorial/labels
###############################################################################
# Compute ICA components on Epochs
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# ICA is now fit to epoched MEG data instead of the raw data.
# We assume that the non-stationary EOG artifacts have already been removed.
# The sources matching the ECG are automatically found and displayed.
#
# .. note::
# This example is computationally intensive, so it might take a few minutes
# to complete.
#
# Read and preprocess the data. Preprocessing consists of:
#
# - MEG channel selection
# - 1-30 Hz band-pass filter
# - epoching -0.2 to 0.5 seconds with respect to events
# - rejection based on peak-to-peak amplitude
#
# Note that we don't baseline correct the epochs here – we'll do this after
# cleaning with ICA is completed. Baseline correction before ICA is not
# recommended by the MNE-Python developers, as it doesn't guarantee optimal
# results.
filt_raw.pick_types(meg=True, eeg=False, exclude='bads', stim=True).load_data()
filt_raw.filter(1, 30, fir_design='firwin')
# peak-to-peak amplitude rejection parameters
reject = dict(grad=4000e-13, mag=4e-12)
# create longer and more epochs for more artifact exposure
events = mne.find_events(filt_raw, stim_channel='STI 014')
# don't baseline correct epochs
epochs = mne.Epochs(filt_raw, events, event_id=None, tmin=-0.2, tmax=0.5,
reject=reject, baseline=None)
###############################################################################
# Fit ICA model using the FastICA algorithm, detect and plot components
# explaining ECG artifacts.
ica = ICA(n_components=15, method='fastica', max_iter="auto").fit(epochs)
ecg_epochs = create_ecg_epochs(filt_raw, tmin=-.5, tmax=.5)
ecg_inds, scores = ica.find_bads_ecg(ecg_epochs, threshold='auto')
ica.plot_components(ecg_inds)
###############################################################################
# Plot the properties of the ECG components:
ica.plot_properties(epochs, picks=ecg_inds)
###############################################################################
# Plot the estimated sources of detected ECG related components:
ica.plot_sources(filt_raw, picks=ecg_inds)
###############################################################################
# References
# ^^^^^^^^^^
# .. footbibliography::
|
kambysese/mne-python
|
tutorials/preprocessing/plot_40_artifact_correction_ica.py
|
Python
|
bsd-3-clause
| 31,514
|
[
"Gaussian"
] |
75b7d90a386f2a89971796dee2662494cea8b8c44efba77697b99cfdd00d38f8
|
from views import *
from lookups import *
import requests
import re
from utils import *
import itertools
from config import config
if config.IMPORT_PYSAM_PRIMER3:
import pysam
import csv
#hpo lookup
import orm
def individuals_update(external_ids):
patients_db=get_db(app.config['DB_NAME_PATIENTS'])
users_db=get_db(app.config['DB_NAME_USERS'])
def f(eid):
p=patients_db.patients.find_one({'external_id':eid},{'_id':False})
print p['external_id']
p['features']=[f for f in p.get('features',[]) if f['observed']=='yes']
if 'solved' in p:
if 'gene' in p['solved']:
p['solved']=[p['solved']['gene']]
else:
p['solved']=[]
else: p['solved']=[]
if 'genes' in p: p['genes']=[x['gene'] for x in p['genes'] if 'gene' in x]
else: p['genes']=[]
p['genes']=list(frozenset(p['genes']+p['solved']))
p2=get_db().patients.find_one({'external_id':p['external_id']},{'rare_homozygous_variants_count':1,'rare_compound_hets_count':1, 'rare_variants_count':1,'total_variant_count':1})
if not p2: return p
p['rare_homozygous_variants_count']=p2.get('rare_homozygous_variants_count','')
p['rare_compound_hets_count']=p2.get('rare_compound_hets_count','')
p['rare_variants_count']=p2.get('rare_variants_count','')
p['total_variant_count']=p2.get('total_variant_count','')
#p['all_variants_count']=get_db().patients.find_one({'external_id':p['external_id']},{'_id':0,'all_variants_count':1})['all_variants_count']
#db.cache.find_one({"key" : "%s_blindness,macula,macular,retina,retinal,retinitis,stargardt_" % })
if '_id' in p: del p['_id']
return p
new_individuals=[f(eid) for eid in external_ids]
old_individuals=users_db.users.find_one({'user':session['user']}).get('individuals',[])
old_individuals=[ind for ind in old_individuals if ind['external_id'] not in external_ids]
individuals=new_individuals+old_individuals
users_db.users.update_one({'user':session['user']},{'$set':{'individuals':individuals}})
return individuals
def get_individuals(user):
s="""
MATCH (u:User {user:'%s'})--(p:Person)-[:PersonToObservedTerm]->(t:Term),
(p)-[:CandidateGene]-(g:Gene)
RETURN p.personId as individual,
p.gender as gender,
collect(DISTINCT t) as phenotypes,
p.score as phenotypeScore,
size((p)<-[:HomVariantToPerson]-()) as hom_count,
size((p)<-[:HetVariantToPerson]-()) as het_count,
collect(DISTINCT g.gene_name) as genes;
""" % user
with neo4j_driver.session() as db_session:
result=db_session.run(s)
data = []
for r in result:
data.append({
'individual': r['individual'],
'gender': r['gender'],
'phenotypes': [dict(x) for x in r['phenotypes']],
'phenotypeScore': r['phenotypeScore'],
'hom_count': r['hom_count'],
'het_count': r['het_count'],
'genes': [y for y in r['genes']]
})
return data
@app.route('/my_patients_json')
@requires_auth
def my_patients_json():
users_db=get_db(app.config['DB_NAME_USERS'])
user=users_db.users.find_one({'user':session['user']})
individuals=get_individuals(user['user'])
return(jsonify(result=individuals))
# shows each patients,
# all_individuals
@app.route('/my_patients')
@requires_auth
def my_patients():
return render_template('my_patients.html')
# shows each individual,
# all_individuals
@app.route('/individuals_csv')
@requires_auth
def individuals_csv():
page=int(request.args.get('page',0))
number=int(request.args.get('number',200))
hpo_db=get_db(app.config['DB_NAME_HPO'])
def f(p):
print p['external_id']
p['features']=[f for f in p.get('features',[]) if f['observed']=='yes']
if 'solved' in p:
if 'gene' in p['solved']:
p['solved']=[p['solved']['gene']]
else:
p['solved']=[]
else: p['solved']=[]
if 'genes' in p: p['genes']=[x['gene'] for x in p['genes'] if 'gene' in x]
else: p['genes']=[]
p['genes']=list(frozenset(p['genes']+p['solved']))
p2=get_db().patients.find_one({'external_id':p['external_id']},{'rare_homozygous_variants_count':1,'rare_compound_hets_count':1, 'rare_variants_count':1,'total_variant_count':1})
if not p2: return p
p['rare_homozygous_variants_count']=p2.get('rare_homozygous_variants_count','')
p['rare_compound_hets_count']=p2.get('rare_compound_hets_count','')
p['rare_variants_count']=p2.get('rare_variants_count','')
p['total_variant_count']=p2.get('total_variant_count','')
#p['all_variants_count']=get_db().patients.find_one({'external_id':p['external_id']},{'_id':0,'all_variants_count':1})['all_variants_count']
#db.cache.find_one({"key" : "%s_blindness,macula,macular,retina,retinal,retinitis,stargardt_" % })
return p
conn=PhenotipsClient()
all_patients=conn.get_patient(session=session).get('patientSummaries',[])
all_eids=[p['eid'] for p in all_patients if p['eid']]
total=len(all_eids)
print('TOTAL NUMBER OF PATIENTS',total)
patients=conn.get_patient(session=session,start=page*number,number=number).get('patientSummaries',[])
eids=[p['eid'] for p in patients if p['eid']]
print(eids)
patients=get_db(app.config['DB_NAME_PATIENTS']).patients.find({'external_id':{'$in':eids}})
#patients=get_db(app.config['DB_NAME_PATIENTS']).patients.find({'external_id':re.compile('^IRDC')},{'pubmedBatch':0})
individuals=[f(p) for p in patients if 'external_id' in p]
# family_history":{"consanguinity":true}
#if session['user']=='demo': for ind in individuals: ind['external_id']=encrypt(ind['external_id'])
#return render_template('individuals_page.html',individuals=individuals,page=page,number=number,total=total)
return '\n'.join([','.join([ind['external_id'],ind['total_variant_count'],ind['rare_variants_count']]) for ind in individuals])
|
Withington/phenopolis
|
views/my_patients.py
|
Python
|
mit
| 6,095
|
[
"pysam"
] |
a1f3ed17f6c52decdd7e41ec46a0fc7ae8027637849a144bd2cdddd09fb494f4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Example script loading and execiting a simulation with the
Hay et al. 2011 L5b-pyramidal cell model, which is implemented by default
using templates.
This script assume that the model files is downloaded and unzipped inside
this folder from ModelDB:
http://senselab.med.yale.edu/modeldb/ShowModel.asp?model=139653
The mod-files inside /L5bPCmodelsEH/mod/ must be compiled using nrnivmodl.
Note that LFPy can only deal with one cell at the time, creating several
cell objects will slow everything down, but each cell *should* get the correct
cell responses.
Execution:
python example_loadL5bPCmodelsEH.py
Copyright (C) 2017 Computational Neuroscience Group, NMBU.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
import LFPy
import neuron
import matplotlib.pyplot as plt
import os
import sys
if sys.version < '3':
from urllib2 import urlopen
else:
from urllib.request import urlopen
import zipfile
import ssl
from warnings import warn
# Fetch Hay et al. 2011 model files
if not os.path.isfile('L5bPCmodelsEH/morphologies/cell1.asc'):
# get the model files:
url = '{}{}'.format('http://senselab.med.yale.edu/ModelDB/eavBinDown.asp',
'?o=139653&a=23&mime=application/zip')
u = urlopen(url, context=ssl._create_unverified_context())
localFile = open('L5bPCmodelsEH.zip', 'w')
localFile.write(u.read())
localFile.close()
# unzip:
myzip = zipfile.ZipFile('L5bPCmodelsEH.zip', 'r')
myzip.extractall('.')
myzip.close()
# compile mod files every time, because of incompatibility with Mainen96 files:
if "win32" in sys.platform:
pth = "L5bPCmodelsEH/mod/"
warn("no autompile of NMODL (.mod) files on Windows."
+ "Run mknrndll from NEURON bash in the folder "
+ "L5bPCmodelsEH/mod and rerun example script")
if pth not in neuron.nrn_dll_loaded:
neuron.h.nrn_load_dll(pth + "nrnmech.dll")
neuron.nrn_dll_loaded.append(pth)
else:
os.system('''
cd L5bPCmodelsEH/mod/
nrnivmodl
''')
neuron.load_mechanisms('L5bPCmodelsEH/mod/')
# remove cells from previous script executions
neuron.h('forall delete_section()')
# cell parameters with additional arguments for the TemplateCell-class.
# Note that 'morphology' is required, even though it is loaded through
# 'templateargs'!
# Reason is LFPy looks for a default rotation .rot-file.
cellParams = {
'morphology': 'L5bPCmodelsEH/morphologies/cell1.asc',
'templatefile': ['L5bPCmodelsEH/models/L5PCbiophys3.hoc',
'L5bPCmodelsEH/models/L5PCtemplate.hoc'],
'templatename': 'L5PCtemplate',
'templateargs': 'L5bPCmodelsEH/morphologies/cell1.asc',
'nsegs_method': None,
'v_init': -80,
'tstart': 0,
'tstop': 3000,
'dt': 2**-3,
'verbose': True,
'extracellular': False,
}
# Use the TemplateCell-class to create the cell
cell = LFPy.TemplateCell(**cellParams)
# some stimuli
PointProcParams = {
'idx': 0,
'record_current': False,
'pptype': 'IClamp',
'amp': 0.793,
'dur': 2000,
'delay': 700,
}
pointProcess = LFPy.StimIntElectrode(cell, **PointProcParams)
# run simulation
cell.simulate(rec_variables=[])
# plot response
plt.plot(cell.tvec, cell.somav)
plt.show()
|
LFPy/LFPy
|
examples/example_loadL5bPCmodelsEH.py
|
Python
|
gpl-3.0
| 3,737
|
[
"NEURON"
] |
0183169ce60210a3ca6eea311b0b9d7d0e494ea808eed5b1822893c4838a19ad
|
import os
import numpy as np
from functools import partial
from menpo.base import name_of_callable
from menpo.shape import bounding_box
from menpofit.visualize import print_progress
from menpo.visualize import print_dynamic
from menpo.transform import Scale
from menpo.image import Image
from .correlationfilter import CorrelationFilter
from .normalisation import (normalise_norm_array, image_normalisation,
create_cosine_mask)
from .feature import fast_dsift_hsi
from .result import DetectionResult, print_str, ClassificationResult
def data_dir_path():
r"""
The path to the data folder.
:type: `pathlib.Path`
"""
from pathlib import Path # to avoid cluttering the menpo.base namespace
return Path(os.path.abspath(__file__)).parent / 'data'
def load_pretrained_model():
r"""
Method that loads a pretrained classification model.
:type: `Classification`
"""
import menpo.io as mio
return mio.import_pickle(data_dir_path() / 'pretrained_model.pkl')
def get_bounding_box(center, shape):
r"""
Method that returns a bounding box PointDirectedGraph, given the box center
and shape.
Parameters
----------
center : (`float`, `float`)
The box center.
shape : (`int`, `int`)
The box shape.
Returns
-------
bbox : `menpo.shape.PointDirectedGraph`
The bounding box
"""
half_size = np.asarray(shape) / 2
return bounding_box((center[0] - half_size[0], center[1] - half_size[1]),
(center[0] + half_size[0], center[1] + half_size[1]))
def response_thresholding(response, threshold, filter_shape, scale,
correction_transform):
r"""
Method for selecting candidate detections by thresholding the response map.
The bounding boxes of these detections are transformed back to the original
image resolution.
Parameters
----------
response : `ndarray`
The response map.
threshold : `float`
The score threshold to use selecting candidate locations.
filter_shape : (`int`, `int`)
The shape of the filter.
scale : `float`
The current scale factor.
correction_transform : `menpo.transform.AffineTransform`
The transform object to go back to the original image resolution.
Returns
-------
bboxes : `list` of `menpo.shape.PointDirectedGraph`
The list of selected bounding boxes in the original image resolution.
scores : `list`
The corresponding scores.
"""
# Find all response values abave threshold
all_x, all_y = np.nonzero(response >= threshold)
# Find corresponding scores
scores = response[response >= threshold]
# Create bounding boxes for the above candidate detections. Note that the
# bounding boxes need to be transformed to the original image resolution.
bboxes = []
for x, y in zip(all_x, all_y):
# Get bounding box at current scale
bbox = get_bounding_box((x, y), filter_shape)
# Transform bounding box to original scale (scale = 1)
bbox = Scale(1 / scale, n_dims=2).apply(bbox)
# Apply the correction affine transform to go to original image
# resolution
if correction_transform is not None:
bbox = correction_transform.apply(bbox)
bboxes.append(bbox)
return bboxes, list(scores)
def non_max_suppression(bboxes, scores, overlap_thresh):
r"""
Faster Non-Maximum Suppression by Malisiewicz et al.
Parameters
----------
bboxes : `list` of `menpo.shape.PointDirectedGraph`
The candidate bounding boxes.
scores : `list` of `float`
The corresponding scores per bounding box.
overlap_thresh : `float`
The overlapping threshold.
Returns
-------
bboxes : `list` of `menpo.shape.PointDirectedGraph`
The list of final bounding boxes in the original image resolution.
scores : `list`
The corresponding scores.
"""
# Malisiewicz et al. method.
# if there are no boxes, return an empty list
if len(bboxes) == 0:
return [], []
# grab the coordinates of the bounding boxes
x1 = np.empty((len(bboxes), 1))
y1 = np.empty((len(bboxes), 1))
x2 = np.empty((len(bboxes), 1))
y2 = np.empty((len(bboxes), 1))
for i, b in enumerate(bboxes):
x1[i] = np.min(b.points[:, 0])
y1[i] = np.min(b.points[:, 1])
x2[i] = np.max(b.points[:, 0])
y2[i] = np.max(b.points[:, 1])
sc = np.asarray(scores) # score confidence
# initialize the list of picked indexes
pick = []
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(sc)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlap_thresh)[0])))
# return only the bounding boxes that were picked using the
# integer data type
return [bboxes[i] for i in pick], [scores[i] for i in pick]
def attach_bboxes_to_image(image, bboxes):
r"""
Method that attaches the given bounding boxes to the landmark manager of the
provided image.
"""
for i, bbox in enumerate(bboxes):
image.landmarks['bbox_{:0{}d}'.format(i, len(str(len(bboxes))))] = bbox
class Detector(object):
r"""
Class for training a multi-channel correlation filter object detector.
Parameters
----------
images : `list` of `menpo.image.Image`
The training images from which to learn the detector.
algorithm : ``{'mosse', 'mccf'}``, optional
If 'mosse', then the Minimum Output Sum of Squared Errors (MOSSE)
filter [1] will be used. If 'mccf', then the Multi-Channel Correlation
(MCCF) filter [2] will be used.
filter_shape : (`int`, `int`), optional
The shape of the filter.
features : `callable`, optional
The holistic dense features to be extracted from the images.
normalisation : `callable`, optional
The callable to be used for normalising the images.
cosine_mask : `bool`, optional
If ``True``, then a cosine mask (Hanning window) will be applied on the
images.
response_covariance : `int`, optional
The covariance of the Gaussian desired response that will be used during
training of the correlation filter.
l : `float`, optional
Regularization parameter of the correlation filter.
boundary : ``{'constant', 'symmetric'}``, optional
Determines the type of padding that will be applied on the images.
prefix : `str`, optional
The prefix of the progress bar.
verbose : `bool`, optional
If ``True``, then a progress bar is printed.
References
----------
.. [1] D. S. Bolme, J. R. Beveridge, B. A. Draper, and Y. M. Lui. "Visual
Object Tracking using Adaptive Correlation Filters", IEEE Proceedings
of International Conference on Computer Vision and Pattern Recognition
(CVPR), 2010.
.. [2] H. K. Galoogahi, T. Sim, and Simon Lucey. "Multi-Channel
Correlation Filters". IEEE Proceedings of International Conference on
Computer Vision (ICCV), 2013.
"""
def __init__(self, images, algorithm='mosse', filter_shape=(25, 25),
features=fast_dsift_hsi, normalisation=normalise_norm_array,
cosine_mask=False, response_covariance=2, l=0.01,
boundary='symmetric', prefix='', verbose=True):
# Assign properties
self.algorithm = algorithm
self.features = features
self.filter_shape = filter_shape
self.normalisation = normalisation
self.cosine_mask = cosine_mask
self.boundary = boundary
# Create cosine mask if asked
cosine_mask = None
if cosine_mask:
cosine_mask = create_cosine_mask(filter_shape)
# Prepare data
wrap = partial(print_progress, prefix=prefix + 'Pre-processing data',
verbose=verbose, end_with_newline=False)
normalized_data = []
for im in wrap(images):
im = features(im)
im = image_normalisation(im, normalisation=normalisation,
cosine_mask=cosine_mask)
normalized_data.append(im.pixels)
# Create data array
normalized_data = np.asarray(normalized_data)
# Train correlation filter
self.model = CorrelationFilter(
normalized_data, algorithm=algorithm, filter_shape=filter_shape,
response_covariance=response_covariance, l=l, boundary=boundary,
prefix=prefix, verbose=verbose)
@property
def n_channels(self):
r"""
Returns the model's number of channels.
:type: `int`
"""
return self.model.n_channels
def detect(self, image, scales='all', diagonal=400, score_thresh=0.025,
overlap_thresh=0.1, return_responses=False, prefix='Detecting ',
verbose=True):
r"""
Perform detection in a test image.
Parameters
----------
image : `menpo.image.Image`
The test image.
scales : `list` of `float` or ``'all'`` or None, optional
The scales on which to apply the detection. The scales must be
defined with respect to the original image resolution (after the
diagonal normalisation). If ``None``, then no pyramid is used. If
``'all'``, then ``scales = np.arange(0.05, 1.05, 0.05)``.
diagonal : `float` or ``None``, optional
The diagonal to which the input image will be rescaled before the
detection.
score_thresh : `float`, optional
The threshold to use for the response map (scores).
overlap_thresh: `float`, optional,
The overlapping threshold of non-maximum suppression.
return_responses : `bool`, optional
If ``True``, then the response maps per scale will be stored and
returned.
prefix : `str`, optional
The prefix of the progress bar.
verbose : `bool`, optional
If ``True``, a progress bar is printed.
Returns
-------
result : `DetectionResult`
A detection result object.
"""
# Normalise the input image size with respect to diagonal.
# Keep the transform object, because we need to transform it back after
# the detection is done.
if diagonal is not None:
tmp_image, correction_transform = image.rescale_to_diagonal(
diagonal, return_transform=True)
else:
tmp_image = image
correction_transform = None
# Parse scales argument
if scales == 'all':
scales = tuple(np.arange(0.05, 1.05, 0.05))
elif scales is None:
scales = [1.]
# Compute features of the original image
feat_image = self.features(tmp_image)
# Initialize lists
selected_bboxes = []
selected_scores = []
responses = None
if return_responses:
responses = []
# Get response and candidate bounding boxes at each scale
wrap = partial(print_progress, prefix=prefix, verbose=verbose,
end_with_newline=False, show_count=False)
for scale in wrap(list(scales)[::-1]):
# Scale image
if scale != 1:
# Scale feature image only if scale is different than 1
scaled_image = feat_image.rescale(scale)
else:
# Otherwise the image remains the same
scaled_image = feat_image
# Normalise the scaled image. Do not use cosine mask.
scaled_image = image_normalisation(
scaled_image, normalisation=self.normalisation, cosine_mask=None)
# Convolve image with filter
response = self.model.convolve(scaled_image, as_sum=True)
if return_responses:
responses.append(Image(response))
# Threshold the response and transform resulting bounding boxes to
# original image resolution
bboxes, scores = response_thresholding(
response, score_thresh, self.filter_shape, scale,
correction_transform)
# Updated selected bboxes and scores lists
selected_bboxes += bboxes
selected_scores += scores
# Perform non-maximum suppression
bboxes, scores = non_max_suppression(selected_bboxes, selected_scores,
overlap_thresh)
if verbose:
print_dynamic(print_str(bboxes, len(scales)))
# Return detection result object
return DetectionResult(image, bboxes, scores, scales, responses)
def view_spatial_filter(self, figure_id=None, new_figure=False,
channels='all', interpolation='bilinear',
cmap_name='afmhot', alpha=1., render_axes=False,
axes_font_name='sans-serif', axes_font_size=10,
axes_font_style='normal', axes_font_weight='normal',
axes_x_limits=None, axes_y_limits=None,
axes_x_ticks=None, axes_y_ticks=None,
figure_size=(10, 8)):
r"""
View the multi-channel filter on the spatial domain.
Parameters
----------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
channels : `int` or `list` of `int` or ``all`` or ``None``
If `int` or `list` of `int`, the specified channel(s) will be
rendered. If ``all``, all the channels will be rendered in subplots.
If ``None`` and the image is RGB, it will be rendered in RGB mode.
If ``None`` and the image is not RGB, it is equivalent to ``all``.
interpolation : See Below, optional
The interpolation used to render the image. For example, if
``bilinear``, the image will be smooth and if ``nearest``, the
image will be pixelated.
Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36,
hanning, hamming, hermite, kaiser, quadric, catrom, gaussian,
bessel, mitchell, sinc, lanczos}
cmap_name: `str`, optional,
If ``None``, single channel and three channel images default
to greyscale and rgb colormaps respectively.
alpha : `float`, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes.
Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : {``normal``, ``italic``, ``oblique``}, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the Image as a percentage of the Image's width. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the Image as a percentage of the Image's height. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None``, optional
The size of the figure in inches.
Returns
-------
viewer : `ImageViewer`
The image viewing object.
"""
return self.model.view_spatial_filter(
figure_id=figure_id, new_figure=new_figure, channels=channels,
interpolation=interpolation, cmap_name=cmap_name, alpha=alpha,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size, axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits, axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks, figure_size=figure_size)
def view_frequency_filter(self, figure_id=None, new_figure=False,
channels='all', interpolation='bilinear',
cmap_name='afmhot', alpha=1., render_axes=False,
axes_font_name='sans-serif', axes_font_size=10,
axes_font_style='normal', axes_font_weight='normal',
axes_x_limits=None, axes_y_limits=None,
axes_x_ticks=None, axes_y_ticks=None,
figure_size=(10, 8)):
r"""
View the multi-channel filter on the frequency domain.
Parameters
----------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
channels : `int` or `list` of `int` or ``all`` or ``None``
If `int` or `list` of `int`, the specified channel(s) will be
rendered. If ``all``, all the channels will be rendered in subplots.
If ``None`` and the image is RGB, it will be rendered in RGB mode.
If ``None`` and the image is not RGB, it is equivalent to ``all``.
interpolation : See Below, optional
The interpolation used to render the image. For example, if
``bilinear``, the image will be smooth and if ``nearest``, the
image will be pixelated.
Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36,
hanning, hamming, hermite, kaiser, quadric, catrom, gaussian,
bessel, mitchell, sinc, lanczos}
cmap_name: `str`, optional,
If ``None``, single channel and three channel images default
to greyscale and rgb colormaps respectively.
alpha : `float`, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes.
Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : {``normal``, ``italic``, ``oblique``}, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the Image as a percentage of the Image's width. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the Image as a percentage of the Image's height. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None``, optional
The size of the figure in inches.
Returns
-------
viewer : `ImageViewer`
The image viewing object.
"""
return self.model.view_frequency_filter(
figure_id=figure_id, new_figure=new_figure, channels=channels,
interpolation=interpolation, cmap_name=cmap_name, alpha=alpha,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size, axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits, axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks, figure_size=figure_size)
def __str__(self):
output_str = r"""Correlation Filter Detector
- Features: {}
- Channels: {}
""".format(name_of_callable(self.features), self.n_channels)
return output_str + self.model.__str__()
class Classification(object):
r"""
Class for training a filter-bank of multi-channel correlation filters for
object classification.
Parameters
----------
images : `list` of `list` of `menpo.image.Image`
The training images per class.
labels : `list` of `str`
The label per class.
algorithm : ``{'mosse', 'mccf'}``, optional
If 'mosse', then the Minimum Output Sum of Squared Errors (MOSSE)
filter [1] will be used. If 'mccf', then the Multi-Channel Correlation
(MCCF) filter [2] will be used.
filter_shape : (`int`, `int`), optional
The shape of the filter.
features : `callable`, optional
The holistic dense features to be extracted from the images.
normalisation : `callable`, optional
The callable to be used for normalising the images.
cosine_mask : `bool`, optional
If ``True``, then a cosine mask (Hanning window) will be applied on the
images.
response_covariance : `int`, optional
The covariance of the Gaussian desired response that will be used during
training of the correlation filter.
l : `float`, optional
Regularization parameter of the correlation filter.
boundary : ``{'constant', 'symmetric'}``, optional
Determines the type of padding that will be applied on the images.
prefix : `str`, optional
The prefix of the progress bar.
verbose : `bool`, optional
If ``True``, then a progress bar is printed.
References
----------
.. [1] D. S. Bolme, J. R. Beveridge, B. A. Draper, and Y. M. Lui. "Visual
Object Tracking using Adaptive Correlation Filters", IEEE Proceedings
of International Conference on Computer Vision and Pattern Recognition
(CVPR), 2010.
.. [2] H. K. Galoogahi, T. Sim, and Simon Lucey. "Multi-Channel
Correlation Filters". IEEE Proceedings of International Conference on
Computer Vision (ICCV), 2013.
"""
def __init__(self, images, labels, algorithm='mosse',
filter_shape=(29, 29), features=fast_dsift_hsi,
normalisation=normalise_norm_array, cosine_mask=False,
response_covariance=2, l=0.01, boundary='symmetric',
verbose=True):
# Check images
if len(images) != len(labels):
raise ValueError('The provided images and labels have different '
'number of classes.')
# Assign properties
self.algorithm = algorithm
self.features = features
self.filter_shape = filter_shape
self.normalisation = normalisation
self.cosine_mask = cosine_mask
self.boundary = boundary
self.labels = labels
self.n_classes = len(labels)
# Train filters
self.models = []
for cl in range(self.n_classes):
class_str = 'Class {}: '.format(cl)
detector = Detector(
images[cl], algorithm=algorithm, filter_shape=filter_shape,
features=features, normalisation=normalisation,
cosine_mask=cosine_mask, response_covariance=response_covariance,
l=l, boundary=boundary, prefix=class_str, verbose=verbose)
self.models.append(detector)
def fit(self, image, scales='all', diagonal=400, score_thresh=0.025,
overlap_thresh=0.1, return_all_detections=True, verbose=True):
r"""
Fit a test image.
Parameters
----------
image : `menpo.image.Image`
The test image.
scales : `list` of `float` or ``'all'`` or None, optional
The scales on which to apply the detection. The scales must be
defined with respect to the original image resolution (after the
diagonal normalisation). If ``None``, then no pyramid is used. If
``'all'``, then ``scales = np.arange(0.05, 1.05, 0.05)``.
diagonal : `float` or ``None``, optional
The diagonal to which the input image will be rescaled before the
detection.
score_thresh : `float`, optional
The threshold to use for the response map (scores).
overlap_thresh: `float`, optional,
The overlapping threshold of non-maximum suppression.
return_all_detections : `bool`, optional
If ``True``, then all the detections from all filters will be
returned.
verbose : `bool`, optional
If ``True``, a progress bar is printed.
Returns
-------
result : `DetectionResult`
A detection result object.
"""
# Initialize lists
all_bboxes = []
all_scores = []
all_classnames = []
# initialize final result
classname = None
bbox = None
max_score = -np.inf
results = []
# For each class filter
for cl in range(self.n_classes):
# Perform detection
result = self.models[cl].detect(
image, scales=scales, diagonal=diagonal,
return_responses=False, score_thresh=score_thresh,
overlap_thresh=overlap_thresh,
prefix="Filter '{}'".format(self.labels[cl]), verbose=verbose)
# If at least one bounding box was returned, then check if there is
# a score larger than the current maximum.
if len(result.scores) > 0:
if np.max(result.scores) > max_score:
max_score = np.max(result.scores)
classname = self.labels[cl]
idx = np.argmax(result.scores)
bbox = result.bboxes[idx]
if return_all_detections:
all_bboxes += result.bboxes
all_scores += result.scores
all_classnames += [self.labels[cl]] * len(result.bboxes)
results.append(result)
if verbose:
if classname is not None:
print_dynamic("Detected class: '{}'".format(classname))
else:
print_dynamic('No detections.')
# Return all detected results, if required
all_detections = None
if return_all_detections:
all_detections = (all_bboxes, all_scores, all_classnames)
# Return a classification result object
return ClassificationResult(image, bbox, classname, scales, self.labels,
all_detections=all_detections)
def view_spatial_filters_widget(self, browser_style='buttons',
figure_size=(10, 8), style='coloured'):
r"""
Visualize the spatial filters using an interactive widget.
Parameters
----------
browser_style : {``'buttons'``, ``'slider'``}, optional
It defines whether the selector of the images will have the form of
plus/minus buttons or a slider.
figure_size : (`int`, `int`), optional
The initial size of the rendered figure.
style : {``'coloured'``, ``'minimal'``}, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
"""
filters = [Image(m.model.correlation_filter) for m in self.models]
try:
from menpowidgets import visualize_images
visualize_images(filters, figure_size=figure_size,
style=style, browser_style=browser_style)
except ImportError:
from menpo.visualize.base import MenpowidgetsMissingError
raise MenpowidgetsMissingError()
def view_frequency_filters_widget(self, browser_style='buttons',
figure_size=(10, 8), style='coloured'):
r"""
Visualize the frequency filters using an interactive widget.
Parameters
----------
browser_style : {``'buttons'``, ``'slider'``}, optional
It defines whether the selector of the images will have the form of
plus/minus buttons or a slider.
figure_size : (`int`, `int`), optional
The initial size of the rendered figure.
style : {``'coloured'``, ``'minimal'``}, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
"""
filters = []
for m in self.models:
freq_f = np.abs(np.fft.fftshift(np.fft.fft2(m.model.correlation_filter)))
filters.append(Image(freq_f))
try:
from menpowidgets import visualize_images
visualize_images(filters, figure_size=figure_size,
style=style, browser_style=browser_style)
except ImportError:
from menpo.visualize.base import MenpowidgetsMissingError
raise MenpowidgetsMissingError()
def __str__(self):
output_str = r"""Filter-bank of Correlation Filters Classification
- Classes: {}
- {}
- Features: {}
""".format(self.n_classes, self.labels, name_of_callable(self.features))
return output_str + self.models[0].__str__()
|
nontas/trafficsignrecognition
|
trafficsignrecognition/base.py
|
Python
|
bsd-3-clause
| 32,400
|
[
"Gaussian"
] |
91cfc4adf5cdc7f672c62ed7b8a7cc06c488ec40c53ff0de65ac297f06c3f564
|
# -*- coding: utf-8 -*-
"""
mchem.fps
~~~~~~~~~
Functions for generating fingerprints using RDKit.
:copyright: Copyright 2014 by Matt Swain.
:license: MIT, see LICENSE file for more details.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from collections import defaultdict
import logging
import pymongo
from rdkit import Chem
from rdkit.Chem import AllChem
log = logging.getLogger(__name__)
def generate(mol_collection, fp_collection, fingerprinter):
"""Generate a fingerprint for all molecules in a collection.
Example::
generate(mols, MorganFingerprinter(radius=2))
generate(mols, MorganFingerprinter(radius=2, length=1024))
:param mol_collection: MongoDB database collection containing molecules.
:param fp_collection: MongoDB database collection to store fingerprints.
:param fingerprinter: fingerprinter instance to generate fingerprint for each molecule.
"""
log.info('Generating %s fingerprints for %s into %s' % (fingerprinter.name, mol_collection.name, fp_collection.name))
success, skip = 0, 0
for molecule in mol_collection.find(timeout=False):
log.debug('Generating %s for %s' % (fingerprinter.name, molecule['_id']))
bits = fingerprinter.generate(Chem.Mol(molecule['rdmol']))
fp = {
'_id': molecule['_id'],
'bits': bits,
'count': len(bits)
}
try:
fp_collection.insert(fp)
log.debug('Inserted fingerprint for %s' % fp['_id'])
success += 1
except pymongo.errors.DuplicateKeyError:
log.debug('Skipped %s: Fingerprint already exists' % fp['_id'])
skip += 1
log.info('%s successes, %s skipped' % (success, skip))
log.info('Ensuring index on bits and counts for %s' % fp_collection.name)
fp_collection.ensure_index('bits')
fp_collection.ensure_index('count')
def count(fp_collection, count_collection):
"""Build collection containing total counts of all occurrences of each fingerprint bit."""
counts = defaultdict(int)
count_collection.drop()
log.info('Counting fingerprint bits in %s' % count_collection.name)
for fp in fp_collection.find(timeout=False):
log.debug('Processing %s' % fp['_id'])
for bit in fp['bits']:
counts[bit] += 1
for k, v in counts.items():
log.debug('Saving count %s: %s' % (k, v))
count_collection.insert({'_id': k, 'count': v})
class Fingerprinter(object):
"""Fingerprinter interface."""
def generate(self, mol):
"""Generate this fingerprint for a molecule."""
raise NotImplementedError('Fingerprinter subclasses must implement a generate method')
@property
def name(self):
"""Unique name for this fingerprint."""
raise NotImplementedError('Fingerprinter subclasses must implement a name property')
class MorganFingerprinter(Fingerprinter):
"""Class for generating morgan fingerprints."""
def __init__(self, radius=2, length=None):
"""Initialize with a radius and an optional length.
:param int radius: The Morgan fingerprint radius (default: 2).
:param length: The number of bits to optionally fold the fingerprint down to.
"""
self.radius = radius
self.length = length
def generate(self, mol):
"""Generate Morgan fingerprint for a molecule.
:param mol: The RDKit Mol to generate the fingerprint for.
"""
if self.length:
fp = AllChem.GetHashedMorganFingerprint(mol, radius=self.radius, nBits=self.length)
else:
fp = AllChem.GetMorganFingerprint(mol, radius=self.radius)
return sorted(fp.GetNonzeroElements().keys())
@property
def name(self):
"""A unique identifier for this fingerprint with the current settings."""
n = 'm%s' % self.radius
if self.length:
n = '%sl%s' % (n, self.length)
return n
def get_fingerprinter(name, radius, length=None):
fingerprinter = {
'morgan': MorganFingerprinter(radius=radius, length=length)
# Add other fingerprinters here in future
}[name]
return fingerprinter
|
mcs07/mongodb-chemistry
|
mchem/fps.py
|
Python
|
mit
| 4,263
|
[
"RDKit"
] |
6c3e5055ec983fce452ac3b6c4c6457a46ab8dd2b7b31e6d82a00a97c2c3d10a
|
# coding: utf-8
from __future__ import unicode_literals, division
import glob
import logging
import shlex
import socket
import re
import time
from pkg_resources import parse_version
"""
This module implements basic kinds of jobs for QChem runs.
"""
import os
import shutil
import copy
import subprocess
from pymatgen.io.qchem import QcInput, QcOutput
from custodian.custodian import Job, gzip_dir
__author__ = "Xiaohui Qu"
__version__ = "0.1"
__maintainer__ = "Xiaohui Qu"
__email__ = "xhqu1981@gmail.com"
__status__ = "Alpha"
__date__ = "12/03/13"
class QchemJob(Job):
"""
A basic QChem Job.
"""
def __init__(self, qchem_cmd, input_file="mol.qcinp",
output_file="mol.qcout", chk_file=None, qclog_file=None,
gzipped=False, backup=True, alt_cmd=None,
large_static_mem=False):
"""
This constructor is necessarily complex due to the need for
flexibility. For standard kinds of runs, it's often better to use one
of the static constructors.
Args:
qchem_cmd ([str]): Command to run QChem as a list args (without
input/output file name). For example: ["qchem", "-np", "24"]
input_file (str): Name of the QChem input file.
output_file (str): Name of the QChem output file.
chk_file (str): Name of the QChem check point file. None means no
checkpoint point file. Defaults to None.
qclog_file (str): Name of the file to redirect the standard output
to. None means not to record the standard output. Defaults to
None.
gzipped (bool): Whether to gzip the final output. Defaults to False.
backup (bool): Whether to backup the initial input files. If True,
the input files will be copied with a ".orig" appended.
Defaults to True.
alt_cmd (dict of list): Alternate commands.
For example: {"openmp": ["qchem", "-seq", "-nt", "24"]
"half_cpus": ["qchem", "-np", "12"]}
large_static_mem: use ultra large static memory
"""
self.qchem_cmd = self._modify_qchem_according_to_version(copy.deepcopy(qchem_cmd))
self.input_file = input_file
self.output_file = output_file
self.chk_file = chk_file
self.qclog_file = qclog_file
self.gzipped = gzipped
self.backup = backup
self.current_command = self.qchem_cmd
self.current_command_name = "general"
self.large_static_mem = large_static_mem
self.alt_cmd = {k: self._modify_qchem_according_to_version(c)
for k, c in copy.deepcopy(alt_cmd).items()}
available_commands = ["general"]
if self.alt_cmd:
available_commands.extend(self.alt_cmd.keys())
self._set_qchem_memory()
@classmethod
def _modify_qchem_according_to_version(cls, qchem_cmd):
cmd2 = copy.deepcopy(qchem_cmd)
try:
from rubicon.utils.qchem_info import get_qchem_version
cur_version = get_qchem_version()
except:
cur_version = parse_version("4.3.0")
if cmd2 is not None:
if cur_version >= parse_version("4.3.0"):
if cmd2[0] == "qchem":
if "-seq" in cmd2:
cmd2.remove("-seq")
if "NERSC_HOST" in os.environ and \
os.environ["NERSC_HOST"] in ["cori", "edison"]:
if "-dbg" not in cmd2:
cmd2.insert(1, "-dbg")
if "-seq" in cmd2:
cmd2.remove("-seq")
elif "NERSC_HOST" in os.environ and \
os.environ["NERSC_HOST"] == "matgen":
if "-dbg" not in cmd2:
cmd2.insert(1, "-dbg")
if "-seq" in cmd2:
cmd2.remove("-seq")
else:
if "-dbg" in cmd2:
cmd2.remove("-dbg")
if "-pbs" in cmd2:
cmd2.remove("-pbs")
return cmd2
def _set_qchem_memory(self, qcinp=None):
if not qcinp:
qcinp = QcInput.from_file(self.input_file)
if "PBS_JOBID" in os.environ:
if "hopque" in os.environ["PBS_JOBID"]:
# on Hopper
for j in qcinp.jobs:
if self.current_command_name == "general":
if self.large_static_mem:
j.set_memory(total=1100, static=300)
else:
j.set_memory(total=1100, static=100)
elif self.current_command_name == "half_cpus":
if self.large_static_mem:
j.set_memory(total=2200, static=500)
else:
j.set_memory(total=2200, static=100)
elif self.current_command_name == "openmp":
if self.large_static_mem:
j.set_memory(total=28000, static=10000)
else:
j.set_memory(total=28000, static=3000)
elif "NERSC_HOST" in os.environ and os.environ["NERSC_HOST"] == "cori":
if "QCSCRATCH" in os.environ and "eg_qchem" in os.environ["QCSCRATCH"]:
# in memory scratch
for j in qcinp.jobs:
if self.current_command_name == "general":
if self.large_static_mem:
j.set_memory(total=1400, static=200)
else:
j.set_memory(total=1500, static=100)
elif self.current_command_name == "half_cpus":
if self.large_static_mem:
j.set_memory(total=3000, static=500)
else:
j.set_memory(total=3200, static=300)
elif self.current_command_name == "openmp":
if self.large_static_mem:
j.set_memory(total=50000, static=12000)
else:
j.set_memory(total=60000, static=2000)
else:
# disk scratch
for j in qcinp.jobs:
if self.current_command_name == "general":
if self.large_static_mem:
j.set_memory(total=2700, static=500)
else:
j.set_memory(total=3000, static=200)
elif self.current_command_name == "half_cpus":
if self.large_static_mem:
j.set_memory(total=6000, static=1000)
else:
j.set_memory(total=6500, static=500)
elif self.current_command_name == "openmp":
if self.large_static_mem:
j.set_memory(total=100000, static=25000)
else:
j.set_memory(total=120000, static=8000)
elif "NERSC_HOST" in os.environ and os.environ["NERSC_HOST"] == "edison":
if "QCSCRATCH" in os.environ and "/tmp/eg_qchem" in os.environ["QCSCRATCH"]:
# in memory scratch
for j in qcinp.jobs:
if self.current_command_name == "general":
if self.large_static_mem:
j.set_memory(total=1200, static=300)
else:
j.set_memory(total=1200, static=100)
elif self.current_command_name == "half_cpus":
if self.large_static_mem:
j.set_memory(total=2400, static=400)
else:
j.set_memory(total=2400, static=200)
elif self.current_command_name == "openmp":
if self.large_static_mem:
j.set_memory(total=25000, static=1000)
else:
j.set_memory(total=25000, static=500)
else:
# disk scratch
for j in qcinp.jobs:
if self.current_command_name == "general":
if self.large_static_mem:
j.set_memory(total=2500, static=500)
else:
j.set_memory(total=2500, static=100)
elif self.current_command_name == "half_cpus":
if self.large_static_mem:
j.set_memory(total=5000, static=1000)
else:
j.set_memory(total=5000, static=200)
elif self.current_command_name == "openmp":
if self.large_static_mem:
j.set_memory(total=60000, static=20000)
else:
j.set_memory(total=60000, static=5000)
elif "NERSC_HOST" in os.environ and os.environ["NERSC_HOST"] == "matgen":
if "QCSCRATCH" in os.environ and "eg_qchem" in os.environ["QCSCRATCH"]:
# in memory scratch
for j in qcinp.jobs:
if self.current_command_name == "general":
if self.large_static_mem:
j.set_memory(total=1500, static=200)
else:
j.set_memory(total=1600, static=100)
elif self.current_command_name == "half_cpus":
if self.large_static_mem:
j.set_memory(total=3000, static=600)
else:
j.set_memory(total=3200, static=400)
elif self.current_command_name == "openmp":
if self.large_static_mem:
j.set_memory(total=15000, static=5500)
else:
j.set_memory(total=29000, static=2000)
else:
# disk scratch
for j in qcinp.jobs:
if self.current_command_name == "general":
if self.large_static_mem:
j.set_memory(total=2800, static=500)
else:
j.set_memory(total=3100, static=200)
elif self.current_command_name == "half_cpus":
if self.large_static_mem:
j.set_memory(total=6000, static=1100)
else:
j.set_memory(total=6500, static=600)
elif self.current_command_name == "openmp":
if self.large_static_mem:
j.set_memory(total=50000, static=10000)
else:
j.set_memory(total=59000, static=3000)
elif 'vesta' in socket.gethostname():
for j in qcinp.jobs:
j.set_memory(total=14500, static=800)
qcinp.write_file(self.input_file)
@staticmethod
def is_openmp_compatible(qcinp):
for j in qcinp.jobs:
if j.params["rem"]["jobtype"] == "freq":
return False
try:
from rubicon.utils.qchem_info import get_qchem_version
cur_version = get_qchem_version()
except:
cur_version = parse_version("4.3.0")
if cur_version < parse_version("4.3.0"):
if j.params["rem"]["exchange"] in ["pbe", "b"] \
and "correlation" in j.params['rem'] \
and j.params["rem"]["correlation"] in ["pbe", "lyp"]:
return False
return True
def command_available(self, cmd_name):
available_commands = ["general"]
if self.alt_cmd:
available_commands.extend(self.alt_cmd.keys())
return cmd_name in available_commands
def select_command(self, cmd_name, qcinp=None):
"""
Set the command to run QChem by name. "general" set to the default one.
Args:
cmd_name: the command name to change to.
qcinp: the QcInput object to operate on.
Returns:
True: success
False: failed
"""
if not self.command_available(cmd_name):
raise Exception("Command mode \"{cmd_name}\" is not available".format(cmd_name=cmd_name))
if cmd_name == "general":
self.current_command = self.qchem_cmd
else:
self.current_command = self.alt_cmd[cmd_name]
self.current_command_name = cmd_name
self._set_qchem_memory(qcinp)
return True
def setup(self):
if self.backup:
i = 0
while os.path.exists("{}.{}.orig".format(self.input_file, i)):
i += 1
shutil.copy(self.input_file,
"{}.{}.orig".format(self.input_file, i))
if self.chk_file and os.path.exists(self.chk_file):
shutil.copy(self.chk_file,
"{}.{}.orig".format(self.chk_file, i))
if os.path.exists(self.output_file):
shutil.copy(self.output_file,
"{}.{}.orig".format(self.output_file, i))
if self.qclog_file and os.path.exists(self.qclog_file):
shutil.copy(self.qclog_file,
"{}.{}.orig".format(self.qclog_file, i))
def _run_qchem(self, log_file_object=None):
if 'vesta' in socket.gethostname():
# on ALCF
returncode = self._run_qchem_on_alcf(log_file_object=log_file_object)
else:
qc_cmd = copy.deepcopy(self.current_command)
qc_cmd += [self.input_file, self.output_file]
qc_cmd = [str(t) for t in qc_cmd]
if self.chk_file:
qc_cmd.append(self.chk_file)
if log_file_object:
returncode = subprocess.call(qc_cmd, stdout=log_file_object)
else:
returncode = subprocess.call(qc_cmd)
return returncode
def _run_qchem_on_alcf(self, log_file_object=None):
parent_qcinp = QcInput.from_file(self.input_file)
njobs = len(parent_qcinp.jobs)
return_codes = []
alcf_cmds = []
qc_jobids = []
for i, j in enumerate(parent_qcinp.jobs):
qsub_cmd = copy.deepcopy(self.current_command)
sub_input_filename = "alcf_{}_{}".format(i+1, self.input_file)
sub_output_filename = "alcf_{}_{}".format(i+1, self.output_file)
sub_log_filename = "alcf_{}_{}".format(i+1, self.qclog_file)
qsub_cmd[-2] = sub_input_filename
sub_qcinp = QcInput([copy.deepcopy(j)])
if "scf_guess" in sub_qcinp.jobs[0].params["rem"] and \
sub_qcinp.jobs[0].params["rem"]["scf_guess"] == "read":
sub_qcinp.jobs[0].params["rem"].pop("scf_guess")
if i > 0:
if isinstance(j.mol, str) and j.mol == "read":
prev_qcout_filename = "alcf_{}_{}".format(i+1-1, self.output_file)
prev_qcout = QcOutput(prev_qcout_filename)
prev_final_mol = prev_qcout.data[0]["molecules"][-1]
j.mol = prev_final_mol
sub_qcinp.write_file(sub_input_filename)
logging.info("The command to run QChem is {}".format(' '.join(qsub_cmd)))
alcf_cmds.append(qsub_cmd)
p = subprocess.Popen(qsub_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
qc_jobid = int(out.strip())
qc_jobids.append(qc_jobid)
cqwait_cmd = shlex.split("cqwait {}".format(qc_jobid))
subprocess.call(cqwait_cmd)
output_file_name = "{}.output".format(qc_jobid)
cobaltlog_file_name = "{}.cobaltlog".format(qc_jobid)
with open(cobaltlog_file_name) as f:
cobaltlog_last_line = f.readlines()[-1]
exit_code_pattern = re.compile("an exit code of (?P<code>\d+);")
m = exit_code_pattern.search(cobaltlog_last_line)
if m:
rc = float(m.group("code"))
else:
rc = -99999
return_codes.append(rc)
for name_change_trial in range(10):
if not os.path.exists(output_file_name):
message = "{} is not found in {}, {}th wait " \
"for 5 mins\n".format(
output_file_name,
os.getcwd(), name_change_trial)
logging.info(message)
if log_file_object:
log_file_object.writelines([message])
time.sleep(60 * 5)
pass
else:
message = "Found qchem output file {} in {}, change file " \
"name\n".format(output_file_name,
os.getcwd(),
name_change_trial)
logging.info(message)
if log_file_object:
log_file_object.writelines([message])
break
log_file_object.flush()
os.fsync(log_file_object.fileno())
shutil.move(output_file_name, sub_output_filename)
shutil.move(cobaltlog_file_name, sub_log_filename)
overall_return_code = min(return_codes)
with open(self.output_file, "w") as out_file_object:
for i, job_cmd, rc, qc_jobid in zip(range(njobs), alcf_cmds, return_codes, qc_jobids):
sub_output_filename = "alcf_{}_{}".format(i+1, self.output_file)
sub_log_filename = "alcf_{}_{}".format(i+1, self.qclog_file)
with open(sub_output_filename) as sub_out_file_object:
header_lines = ["Running Job {} of {} {}\n".format(i + 1, njobs, self.input_file),
" ".join(job_cmd) + "\n"]
if i > 0:
header_lines = ['', ''] + header_lines
sub_out = sub_out_file_object.readlines()
out_file_object.writelines(header_lines)
out_file_object.writelines(sub_out)
if rc < 0 and rc != -99999:
out_file_object.writelines(["Application {} exit codes: {}\n".format(qc_jobid, rc), '\n', '\n'])
if log_file_object:
with open(sub_log_filename) as sub_log_file_object:
sub_log = sub_log_file_object.readlines()
log_file_object.writelines(sub_log)
return overall_return_code
def run(self):
if "NERSC_HOST" in os.environ and (os.environ["NERSC_HOST"] in ["cori", "edison"]):
nodelist = os.environ["QCNODE"]
num_nodes = len(nodelist.split(","))
tmp_creation_cmd = shlex.split("srun -N {} --ntasks-per-node 1 --nodelist {} mkdir /dev/shm/eg_qchem".format(num_nodes, nodelist))
tmp_clean_cmd = shlex.split("srun -N {} --ntasks-per-node 1 --nodelist {} rm -rf /dev/shm/eg_qchem".format(num_nodes, nodelist))
elif "NERSC_HOST" in os.environ and os.environ["NERSC_HOST"] == "matgen":
nodelist = os.environ["QCNODE"]
num_nodes = len(nodelist.split(","))
tmp_creation_cmd = shlex.split("mpirun -np {} --npernode 1 --host {} mkdir /dev/shm/eg_qchem".format(num_nodes, nodelist))
tmp_clean_cmd = shlex.split("mpirun -np {} --npernode 1 --host {} rm -rf /dev/shm/eg_qchem".format(num_nodes, nodelist))
else:
tmp_clean_cmd = None
tmp_creation_cmd = None
logging.info("Scratch dir creation command is {}".format(tmp_creation_cmd))
logging.info("Scratch dir deleting command is {}".format(tmp_clean_cmd))
if self.qclog_file:
with open(self.qclog_file, "a") as filelog:
if tmp_clean_cmd:
filelog.write("delete scratch before running qchem using command {}\n".format(tmp_clean_cmd))
subprocess.call(tmp_clean_cmd, stdout=filelog)
if tmp_creation_cmd:
filelog.write("Create scratch dir before running qchem using command {}\n".format(tmp_creation_cmd))
subprocess.call(tmp_creation_cmd, stdout=filelog)
returncode = self._run_qchem(log_file_object=filelog)
if tmp_clean_cmd:
filelog.write("clean scratch after running qchem using command {}\n".format(tmp_clean_cmd))
subprocess.call(tmp_clean_cmd, stdout=filelog)
else:
if tmp_clean_cmd:
subprocess.call(tmp_clean_cmd)
if tmp_creation_cmd:
subprocess.call(tmp_creation_cmd)
returncode = self._run_qchem()
if tmp_clean_cmd:
subprocess.call(tmp_clean_cmd)
return returncode
def as_dict(self):
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"qchem_cmd": self.qchem_cmd,
"input_file": self.input_file,
"output_file": self.output_file,
"chk_file": self.chk_file,
"qclog_file": self.qclog_file,
"gzipped": self.gzipped,
"backup": self.backup,
"large_static_mem": self.large_static_mem,
"alt_cmd": self.alt_cmd}
return d
@classmethod
def from_dict(cls, d):
return QchemJob(qchem_cmd=d["qchem_cmd"],
input_file=d["input_file"],
output_file=d["output_file"],
chk_file=d["chk_file"],
qclog_file=d["qclog_file"],
gzipped=d["gzipped"],
backup=d["backup"],
alt_cmd=d["alt_cmd"],
large_static_mem=d["large_static_mem"])
def postprocess(self):
if self.gzipped:
if "NERSC_HOST" in os.environ and os.environ["NERSC_HOST"] == "edison":
cur_dir = os.getcwd()
file_list = [os.path.join(cur_dir, name) for name in glob.glob("*")]
nodelist = os.environ["QCNODE"]
gzip_cmd = shlex.split("srun -N 1 --ntasks-per-node 1 --nodelist "
"{} gzip".format(nodelist)) + file_list
subprocess.call(gzip_cmd)
else:
gzip_dir(".")
|
davidwaroquiers/custodian
|
custodian/qchem/jobs.py
|
Python
|
mit
| 23,412
|
[
"pymatgen"
] |
34bce716bc14af4c5f2a528a22a682a3c9a688a53938db5676d0548cf988cdd8
|
try: paraview.simple
except: from paraview.simple import *
paraview.simple._DisableFirstRenderCameraReset()
InterpolateDataSetAttributes()
|
jeromevelut/Peavip
|
Testing/InterpolateDataSetAttributes.py
|
Python
|
gpl-3.0
| 141
|
[
"ParaView"
] |
d6ffcb956b6034d732319938c1841d84b39ed92c13063649a6086219ada4d904
|
#!/usr/bin/env python
#file decontaminate.py: helper functions for removing contaminants
__author__ = "Jon Sanders"
__copyright__ = "Copyright 2014, Jon Sanders"
__credits__ = ["Jon Sanders"]
__license__ = "GPL"
__version__ = "1.9.1"
__maintainer__ = "Jon Sanders"
__email__ = "jonsan@gmail.com"
from biom import load_table, parse_table
from bfillings.uclust import get_clusters_from_fasta_filepath
from bfillings.usearch import usearch_qf
from scipy.stats import spearmanr
import os.path
import numpy as np
def pick_ref_contaminants(queries, ref_db_fp, input_fasta_fp, contaminant_similarity, output_dir):
# Blast against contaminant DB
clusters, failures, seeds = get_clusters_from_fasta_filepath(
input_fasta_fp,
input_fasta_fp,
percent_ID=contaminant_similarity,
max_accepts=1,
max_rejects=8,
stepwords=8,
word_length=8,
optimal=False,
exact=False,
suppress_sort=False,
output_dir=output_dir,
enable_rev_strand_matching=False,
subject_fasta_filepath=ref_db_fp,
suppress_new_clusters=True,
return_cluster_maps=True,
stable_sort=False,
save_uc_files=False,
HALT_EXEC=False)
# Pick seqs that fail the similarity to contaminants rule
ref_contaminants = set(queries) - set(failures)
return(ref_contaminants)
def pick_corr_contaminants(sample_biom,
corr_data_dict,
max_r):
# Filter biom to only samples for which correlate data available
sample_biom_filt = sample_biom.filter(
lambda val, id_, metadata: id_ in corr_data_dict,
invert=False,
inplace=False)
otus = sample_biom_filt.ids(axis='observation')
samples = sample_biom_filt.ids(axis='sample')
# Make array of correlate data in same order as biom file
correlate = [corr_data_dict[x] for x in samples]
obs_corr_dict = {}
# Make a 2D array of normalized biom table values
norm_array = sample_biom_filt.norm(inplace=False).matrix_data.toarray()
t = 0
for otu in otus:
obs_corr_dict[otu] = spearmanr(norm_array[t], correlate)
t += 1
# get keys (otu names) for OTUs with less than minimum correlation
obs_corr_contaminants = [x for x in obs_corr_dict if obs_corr_dict[x][0] < max_r]
return(set(obs_corr_contaminants), obs_corr_dict)
def reinstate_abund_seqs(putative_contaminants,
contamination_stats_dict,
contamination_stats_header,
reinstatement_stat_sample,
reinstatement_stat_blank,
reinstatement_differential):
abund_reinstated_seqs = compare_blank_abundances(contamination_stats_dict,
contamination_stats_header,
reinstatement_stat_sample,
reinstatement_stat_blank,
reinstatement_differential,
negate=False)
# Only consider seqs as reinstated if previously identified as contaminants
abund_reinstated_seqs = set(putative_contaminants) & set(abund_reinstated_seqs)
return(abund_reinstated_seqs)
def reinstate_incidence_seqs(putative_contaminants,
unique_seq_biom,
blank_sample_ids,
reinstatement_sample_number):
sample_biom = unique_seq_biom.filter(lambda val, id_, metadata:
id_ in blank_sample_ids, invert=True, inplace=False)
incidence_reinstated_seqs = sample_biom.pa().filter(
lambda val, id_, metadata: val.sum() >= reinstatement_sample_number,
axis='observation', inplace=False).ids(
axis='observation')
# Only consider seqs as reinstated if previously identified as contaminants
incidence_reinstated_seqs = set(putative_contaminants) & set(incidence_reinstated_seqs)
return(incidence_reinstated_seqs)
def mothur_counts_to_biom(mothur_f):
mothur_biom = parse_table(mothur_f)
mothur_biom.type = u'OTU table'
filter_biom = mothur_biom.filter(
lambda val, id_, metadata: id_ in 'total', invert=True)
return(filter_biom)
def biom_to_mothur_counts(biom_obj):
sample_ids = biom_obj.ids(axis='sample')
otu_ids = biom_obj.ids(axis='observation')
otu_totals = biom_obj.sum(axis='observation')
outstring = 'Representative_Sequence\ttotal\t' + '\t'.join(sample_ids) + '\n'
for otu in otu_ids:
otu_data = biom_obj.data(id = otu, axis = 'observation')
outstring += '{0}\t{1}\t{2}\n'.format(otu,
int(otu_data.sum()),
'\t'.join(str(x) for x in otu_data.astype('int')))
return(outstring)
def prescreen_libraries(unique_seq_biom,
contamination_stats_header,
contamination_stats_dict,
removal_stat_sample,
removal_stat_blank,
removal_differential,
prescreen_threshold):
abund_contaminants = compare_blank_abundances(contamination_stats_dict,
contamination_stats_header,
removal_stat_sample,
removal_stat_blank,
removal_differential,
negate=True)
# make relabund table
norm_biom = unique_seq_biom.norm(inplace = False)
# filter out sequences marked as contaminants
norm_biom.filter(lambda val, id_, metadata: id_ in abund_contaminants,
axis='observation', invert=True, inplace=True)
# filter out samples above threshold
norm_biom.filter(lambda val, id_, metadata: sum(val) > prescreen_threshold,
axis='sample', invert=False, inplace=True)
# Now only have samples passing the prescreening
above_threshold_samples = norm_biom.ids(axis='sample')
return above_threshold_samples
def get_contamination_stats(biom_file, qS=50, qB=50, interpolation='midpoint', blank_sample_ids=None, exp_sample_ids=[], proportional=False):
if not proportional:
biom_file = biom_file.norm(inplace=False)
header = ['maxS','avgS','q%sS' % qS]
# Calculate blank stats if blank sample names are provided
if blank_sample_ids:
blanks = True
blank_sample_ids = set(blank_sample_ids) & set(biom_file.ids(axis='sample'))
blank_data = biom_file.filter(blank_sample_ids, axis='sample',
invert=False, inplace=False).matrix_data
maxB = [x[0] for x in blank_data.max(axis=1).todense().tolist()]
avgB = [x[0] for x in blank_data.mean(axis=1).tolist()]
quantB = np.percentile(blank_data.todense(),qB,axis=1, interpolation=interpolation).tolist()
header.append('maxB')
header.append('avgB')
header.append('q%sB' % qB)
else:
# Otherwise, set the 'blanks' to an empty list
blank_sample_ids = []
blanks = False
# If specific list of experimental sample IDs aren't provided,
# assume everything not marked blank is an experimental sample
if len(exp_sample_ids) == 0:
exp_sample_ids = set(biom_file.ids(axis='sample')) - set(blank_sample_ids)
sample_data = biom_file.filter(exp_sample_ids, axis='sample',
invert=False, inplace=False).matrix_data
maxS = [x[0] for x in sample_data.max(axis=1).todense().tolist()]
avgS = [x[0] for x in sample_data.mean(axis=1).tolist()]
quantS = np.percentile(sample_data.todense(),qS,axis=1, interpolation=interpolation).tolist()
stats_dict = {}
i = 0
if blanks:
for otu in biom_file.ids(axis='observation'):
stats_dict[otu] = [maxS[i], avgS[i], quantS[i], maxB[i], avgB[i], quantB[i]]
i += 1
else:
for otu in biom_file.ids(axis='observation'):
stats_dict[otu] = [maxS[i], avgS[i], quantS[i]]
i += 1
return(header, stats_dict)
def pick_min_relabund_threshold(stats_dict, stats_header, min_relabund, sample_stat='maxS'):
i_s = stats_header.index(sample_stat)
passed_otus = set()
for otu in stats_dict:
if(float(stats_dict[otu][i_s]) < float(min_relabund)):
passed_otus.add(otu)
return(passed_otus)
def compare_blank_abundances(stats_dict, stats_header,
sample_stat, blank_stat, scalar=1, negate=False):
"""Note that this method will default to returning sequences for which
the criteria sample_stat > blank_stat * scalar are TRUE, i.e. non-contam
sequences. To return contaminants (sequences that FAIL the inequality),
set negate to True."""
i_s = stats_header.index(sample_stat)
i_b = stats_header.index(blank_stat)
passed_otus = set()
for otu in stats_dict:
if((float(stats_dict[otu][i_s]) >= (float(scalar) * float(stats_dict[otu][i_b]))) != negate):
passed_otus.add(otu)
# print passed_otus
return(passed_otus)
def calc_per_category_decontam_stats(biom_obj, filter_otus):
reads = biom_obj.filter(lambda val, id_, metadata: id_ in filter_otus,
axis='observation', invert=False, inplace=False).sum(axis = 'sample')
otus = biom_obj.pa(inplace = False).filter(lambda val, id_, metadata: id_ in filter_otus,
axis='observation', invert=False, inplace=False).sum(axis = 'sample')
return(reads.tolist(),otus.tolist())
def calc_per_library_decontam_stats(start_biom, output_dict):
# calculate starting number of sequences and unique sequences per library
steps = ['below_relabund_threshold','putative_contaminants','ever_good_seqs','reinstated_seqs','all_good_seqs']
results_dict = {}
results_dict['starting'] = calc_per_category_decontam_stats(start_biom, start_biom.ids(axis='observation'))
results_header = ['starting']
for step in steps:
if step in output_dict:
results_dict[step] = calc_per_category_decontam_stats(start_biom, output_dict[step])
results_header.append(step)
return(results_dict, results_header)
def filter_contaminated_libraries(unique_seq_biom, contaminant_otus, contam_threshold):
# make relabund table
norm_biom = unique_seq_biom.norm(inplace = False)
# filter out sequences marked as contaminants
norm_biom.filter(lambda val, id_, metadata: id_ in contaminant_otus,
axis='observation', invert=True, inplace=True)
# filter out samples above threshold
norm_biom.filter(lambda val, id_, metadata: sum(val) > contam_threshold,
axis='sample', invert=False, inplace=True)
# filter contam sequences from original biom
filtered_biom = unique_seq_biom.filter(lambda val, id_, metadata: id_ in contaminant_otus,
axis='observation', invert=True, inplace=False)
# filter samples that lost too much relative to starting from original biom
filtered_biom = filtered_biom.filter(lambda val, id_, metadata: id_ in norm_biom.ids(axis='sample'),
axis='sample', invert=False, inplace=True)
return(filtered_biom)
def print_filtered_otu_map(input_otu_map_fp, output_otu_map_fp, filter_set):
output_otu_map_f = open(output_otu_map_fp, 'w')
for line in open(input_otu_map_fp, 'U'):
seq_identifier = line.strip().split('\t')[0]
# write OTU line if present in the filter set
if seq_identifier in filter_set:
output_otu_map_f.write(line)
output_otu_map_f.close()
return
def print_filtered_mothur_counts(mothur_counts_fp, output_counts_fp, filter_set):
output_counts_f = open(output_counts_fp, 'w')
t = 0
for line in open(mothur_counts_fp, 'U'):
seq_identifier = line.strip().split('\t')[0]
# only write this line if the otu has more than n sequences (so
# greater than n tab-separated fields including the otu identifier)
# or if it's the header (first) line
if seq_identifier in filter_set or t == 0:
output_counts_f.write(line)
t += 1
output_counts_f.close()
return
def print_per_library_stats(per_library_stats, per_library_stats_header, lib_ids, dropped_libs=[]):
outline = 'Library\t'
outline += '_reads\t'.join(per_library_stats_header) + '_reads\t'
outline += '_otus\t'.join(per_library_stats_header) + '_otus'
if len(dropped_libs) > 0:
outline += '\tlibrary_discarded'
discard = True
else:
discard = False
outline += '\n'
t = 0
for lib in lib_ids:
outline += lib
for category in per_library_stats_header:
outline += '\t' + str(int(per_library_stats[category][0][t]))
for category in per_library_stats_header:
outline += '\t' + str(int(per_library_stats[category][1][t]))
if discard:
if lib in dropped_libs:
outline += '\tTrue'
else:
outline += '\tFalse'
outline += '\n'
t += 1
return(outline)
def print_otu_disposition(input_seqs, output_dict, hierarchy=[]):
outline = ''
if hierarchy == []:
hierarchy = ['below_relabund_threshold', 'putative_contaminants','reinstated_seqs','ever_good_seqs']
# Subset hierarchy to levels also in output dict:
hierarchy = [x for x in hierarchy if x in output_dict]
# Check that the levels of the hierarchy are non-overlapping:
for x in range(len(hierarchy) - 1):
for y in range(x + 1,len(hierarchy)):
if not output_dict[hierarchy[x]].isdisjoint(output_dict[hierarchy[y]]):
print('warning: non-disjoint sets in the disposition hierarchy')
seqs_left = set(input_seqs)
for seq in input_seqs:
for level in hierarchy:
if seq in output_dict[level]:
outline += '{0}\t{1}\n'.format(seq,level)
break
return(outline)
def print_filtered_seq_headers(seq_headers, output_headers_fp, filter_set):
output_headers_f = open(output_headers_fp, 'w')
for x in seq_headers:
if x in filter_set:
output_headers_f.write('{0}\n'.format(x))
output_headers_f.close()
return
def print_filtered_output(output_method, unfiltered_input, output_dir, output_dict, output_categories=None):
output_fn = 'print_filtered_' + output_method
if not output_categories:
output_categories = output_dict.keys()
if output_method == 'seq_headers':
output_fn = print_filtered_seq_headers
elif output_method == 'mothur_counts':
output_fn = print_filtered_mothur_counts
elif output_method == 'otu_map':
output_fn = print_filtered_otu_map
for category in output_categories:
output_fn(unfiltered_input,
os.path.join(output_dir,
'{0}_{1}.txt'.format(category, output_method)),
output_dict[category])
return
def print_results_file(seq_ids,
output_dict,
output_fp,
stats_header=None,
stats_dict=None,
corr_data_dict=None):
output_f = open(output_fp, 'w')
header = "SeqID"
sorted_categories = sorted(output_dict.keys())
for category in sorted_categories:
header += '\t{0}'.format(category)
if stats_header:
for x in stats_header:
header += '\t{0}'.format(x)
if corr_data_dict:
header += '\t{0}\t{1}'.format('spearman_r','spearman_p')
output_f.write(header + '\n')
for otu in seq_ids:
outline = str(otu)
for category in sorted_categories:
outline += '\t{0}'.format(1 if otu in output_dict[category] else 0)
if stats_header:
t = 0
for x in stats_header:
outline += '\t{0:.10f}'.format(stats_dict[otu][t])
t += 1
if corr_data_dict:
outline += '\t{0:.3f}\t{1:.3f}'.format(
corr_data_dict[otu][0],
corr_data_dict[otu][1])
output_f.write(outline + '\n')
return
|
tanaes/decontaminate
|
qiime_scripts/qiime/decontaminate.py
|
Python
|
mit
| 16,496
|
[
"BLAST"
] |
0de04c6905071b5eac25d49fad8f0a400e500cd9358bfe23579757c57b1808a7
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This class implements smart io classes that performs intelligent io based on
file extensions.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 29, 2012"
import os
import json
import re
from fnmatch import fnmatch
from monty.dev import deprecated
from pymatgen.core.structure import Structure, Molecule
from pymatgen.io.vasp import Vasprun, Poscar, Chgcar
from pymatgen.io.cif import CifParser, CifWriter
from pymatgen.io.cssr import Cssr
from pymatgen.io.xyz import XYZ
from pymatgen.io.gaussian import GaussianInput, GaussianOutput
from monty.io import zopen
from monty.json import MontyDecoder, MontyEncoder
from monty.string import str2unicode
from pymatgen.io.babel import BabelMolAdaptor
@deprecated(Structure.from_file, message="Will be removed in pymatgen 4.0.")
def read_structure(filename, primitive=True, sort=False):
"""
Reads a structure based on file extension. For example, anything ending in
a "cif" is assumed to be a Crystallographic Information Format file.
Supported formats include CIF, POSCAR/CONTCAR, CHGCAR, LOCPOT,
vasprun.xml, CSSR and pymatgen's JSON serialized structures.
Args:
filename (str): A filename to read from.
primitive (bool): Whether to convert to a primitive cell for cifs.
Defaults to True.
sort (bool): Whether to sort sites. Default to False.
Returns:
A Structure object.
"""
fname = os.path.basename(filename)
if fnmatch(fname.lower(), "*.cif*"):
parser = CifParser(filename)
s = parser.get_structures(primitive=primitive)[0]
elif fnmatch(fname, "POSCAR*") or fnmatch(fname, "CONTCAR*"):
s = Poscar.from_file(filename, False).structure
elif fnmatch(fname, "CHGCAR*") or fnmatch(fname, "LOCPOT*"):
s = Chgcar.from_file(filename).structure
elif fnmatch(fname, "vasprun*.xml*"):
s = Vasprun(filename).final_structure
elif fnmatch(fname.lower(), "*.cssr*"):
cssr = Cssr.from_file(filename)
s = cssr.structure
elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
with zopen(filename) as f:
s = json.load(f, cls=MontyDecoder)
if type(s) != Structure:
raise IOError("File does not contain a valid serialized "
"structure")
else:
raise ValueError("Unrecognized file extension!")
if sort:
s = s.get_sorted_structure()
return s
@deprecated(replacement=Structure.to, message="Will be removed in pymatgen 4.0")
def write_structure(structure, filename):
"""
Write a structure to a file based on file extension. For example, anything
ending in a "cif" is assumed to be a Crystallographic Information Format
file. Supported formats include CIF, POSCAR, CSSR and pymatgen's JSON
serialized structures.
Args:
structure (Structure/IStructure): Structure to write
filename (str): A filename to write to.
"""
fname = os.path.basename(filename)
if fnmatch(fname, "*.cif*"):
writer = CifWriter(structure)
elif fnmatch(fname, "POSCAR*") or fnmatch(fname, "CONTCAR*"):
writer = Poscar(structure)
elif fnmatch(fname.lower(), "*.cssr*"):
writer = Cssr(structure)
elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
with zopen(filename, "wt") as f:
f.write(str2unicode(json.dumps(structure, cls=MontyEncoder)))
return
else:
raise ValueError("Unrecognized file extension!")
writer.write_file(filename)
@deprecated(Molecule.from_file, message="Will be removed in pymatgen 4.0.")
def read_mol(filename):
"""
Reads a molecule based on file extension. For example, anything ending in
a "xyz" is assumed to be a XYZ file. Supported formats include xyz,
gaussian input (gjf|g03|g09|com|inp), Gaussian output (.out|and
pymatgen's JSON serialized molecules. Using openbabel,
many more extensions are supported but requires openbabel to be installed.
Args:
filename (str): A filename to read from.
Returns:
A Molecule object.
"""
fname = os.path.basename(filename)
if fnmatch(fname.lower(), "*.xyz*"):
return XYZ.from_file(filename).molecule
elif any([fnmatch(fname.lower(), "*.{}*".format(r))
for r in ["gjf", "g03", "g09", "com", "inp"]]):
return GaussianInput.from_file(filename).molecule
elif any([fnmatch(fname.lower(), "*.{}*".format(r))
for r in ["out", "lis", "log"]]):
return GaussianOutput(filename).final_structure
elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
with zopen(filename) as f:
s = json.load(f, cls=MontyDecoder)
if type(s) != Molecule:
raise IOError("File does not contain a valid serialized "
"molecule")
return s
else:
m = re.search("\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)",
filename.lower())
if m:
return BabelMolAdaptor.from_file(filename,
m.group(1)).pymatgen_mol
raise ValueError("Unrecognized file extension!")
@deprecated(replacement=Molecule.to, message="Will be removed in pymatgen 4.0.")
def write_mol(mol, filename):
"""
Write a molecule to a file based on file extension. For example, anything
ending in a "xyz" is assumed to be a XYZ file. Supported formats include
xyz, Gaussian input (gjf|g03|g09|com|inp), and pymatgen's JSON serialized
molecules.
Args:
mol (Molecule/IMolecule): Molecule to write
filename (str): A filename to write to.
"""
fname = os.path.basename(filename)
if fnmatch(fname.lower(), "*.xyz*"):
return XYZ(mol).write_file(filename)
elif any([fnmatch(fname.lower(), "*.{}*".format(r))
for r in ["gjf", "g03", "g09", "com", "inp"]]):
return GaussianInput(mol).write_file(filename)
elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
with zopen(filename, "wt") as f:
return f.write(str2unicode(json.dumps(mol, cls=MontyEncoder)))
else:
m = re.search("\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)",
filename.lower())
if m:
return BabelMolAdaptor(mol).write_file(filename, m.group(1))
raise ValueError("Unrecognized file extension!")
|
Bismarrck/pymatgen
|
pymatgen/io/smart.py
|
Python
|
mit
| 6,754
|
[
"Gaussian",
"VASP",
"pymatgen"
] |
8b9a4c33b8177da78432925024cbe8e45f753325fc08b056ea8826d12cc31b1f
|
import unittest
import json
from freezegun import freeze_time
from datetime import datetime, timedelta
from eachday.tests.base import BaseTestCase
from eachday.models import User, BlacklistToken
from eachday import db
class TestAuthRoutes(BaseTestCase):
def test_registration(self):
''' Test for user registration '''
response = self.client.post(
'/register',
data=json.dumps(dict(
email='foo@bar.com',
password='123456',
name='joe'
)),
content_type='application/json'
)
data = json.loads(response.data.decode())
self.assertEqual(data['status'], 'success')
self.assertEqual(data['message'], 'Successfully registered.')
self.assertIn('auth_token', data)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, 201)
def test_registration_missing_fields(self):
''' Test for user registration with missing fields '''
response = self.client.post(
'/register',
data=json.dumps(dict(
email='foo@bar.com',
)),
content_type='application/json'
)
data = json.loads(response.data.decode())
self.assertEqual(data['status'], 'error')
self.assertIn('error', data)
self.assertIn('password', data['error'])
self.assertIn('name', data['error'])
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, 400)
def test_registered_with_already_registered_user(self):
''' Test registration with already registered email'''
user = User(
email='foo@bar.com',
password='test',
name='joe'
)
db.session.add(user)
db.session.commit()
response = self.client.post(
'/register',
data=json.dumps(dict(
email='foo@bar.com',
password='123456',
name='moe'
)),
content_type='application/json'
)
data = json.loads(response.data.decode())
self.assertEqual(data['status'], 'error')
self.assertEqual(
data['error'], 'User already exists.')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, 400)
def test_registered_user_login(self):
''' Test for login of registered-user login '''
# user registration
resp_register = self.client.post(
'/register',
data=json.dumps({
'email': 'joe@gmail.com',
'password': '123456',
'name': 'joe'
}),
content_type='application/json',
)
data_register = json.loads(resp_register.data.decode())
self.assertEqual(data_register['status'], 'success')
self.assertEqual(
data_register['message'], 'Successfully registered.'
)
self.assertIn('auth_token', data_register)
self.assertEqual(resp_register.content_type, 'application/json')
self.assertEqual(resp_register.status_code, 201)
# registered user login
response = self.client.post(
'/login',
data=json.dumps({
'email': 'joe@gmail.com',
'password': '123456'
}),
content_type='application/json'
)
data = json.loads(response.data.decode())
self.assertEqual(data['status'], 'success')
self.assertEqual(data['message'], 'Successfully logged in.')
self.assertIn('auth_token', data_register)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, 200)
def test_incorrect_password(self):
''' Test for login rejection of registered-user with bad password '''
# user registration
resp_register = self.client.post(
'/register',
data=json.dumps({
'email': 'joe@gmail.com',
'password': '123456',
'name': 'joe'
}),
content_type='application/json',
)
data_register = json.loads(resp_register.data.decode())
self.assertEqual(data_register['status'], 'success')
self.assertEqual(
data_register['message'], 'Successfully registered.'
)
self.assertIn('auth_token', data_register)
self.assertEqual(resp_register.content_type, 'application/json')
self.assertEqual(resp_register.status_code, 201)
# login with bad password
response = self.client.post(
'/login',
data=json.dumps({
'email': 'joe@gmail.com',
'password': 'invalid password'
}),
content_type='application/json'
)
data = json.loads(response.data.decode())
self.assertEqual(data['status'], 'error')
self.assertEqual(data['error'], 'Invalid login.')
self.assertNotIn('auth_token', data)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, 401)
def test_non_registered_user_login(self):
''' Test for login of non-registered user '''
response = self.client.post(
'/login',
data=json.dumps({
'email': 'joe@gmail.com',
'password': '123456'
}),
content_type='application/json'
)
data = json.loads(response.data.decode())
self.assertEqual(data['status'], 'error')
self.assertEqual(data['error'], 'User does not exist.')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, 404)
def test_logout_blacklist_token(self):
''' Test that logging out blacklists current token '''
user = User(
email='foo@bar.com',
password='test',
name='joe'
)
db.session.add(user)
db.session.commit()
auth_token = user.encode_auth_token(user.id).decode()
response = self.client.post(
'/logout',
headers={
'Authorization': 'Bearer ' + auth_token
}
)
data = json.loads(response.data.decode())
self.assertEqual(data['status'], 'success')
self.assertEqual(data['message'], 'Successfully logged out')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, 200)
blacklist = BlacklistToken.query.filter_by(token=auth_token).first()
self.assertTrue(blacklist is not None)
def test_blacklist_token_rejection(self):
''' Test that blacklisted auth tokens are rejected '''
# Create user / auth_token
user = User(
email='foo@bar.com',
password='test',
name='joe'
)
db.session.add(user)
db.session.commit()
auth_token = user.encode_auth_token(user.id).decode()
# Blacklist auth_token
blacklist_token = BlacklistToken(token=auth_token)
db.session.add(blacklist_token)
db.session.commit()
# Check to make sure that the blacklisted token cannot be used
response = self.client.get(
'/user',
headers={
'Authorization': 'Bearer ' + auth_token
}
)
data = json.loads(response.data.decode())
self.assertEqual(data['status'], 'error')
self.assertEqual(data['error'],
'Token blacklisted. Please log in again.')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, 401)
def test_invalid_token_rejection(self):
''' Test that using an invalid token gives correct error '''
auth_token = 'not_an_auth_token ;)'
response = self.client.get(
'/user',
headers={
'Authorization': 'Bearer ' + auth_token
}
)
data = json.loads(response.data.decode())
self.assertEqual(data['status'], 'error')
self.assertEqual(data['error'], 'Invalid token. Please log in again.')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, 401)
def test_expired_token_rejection(self):
''' Test that using an expired token gives correct error '''
with freeze_time(datetime.utcnow()) as frozen_datetime:
user = User(
email='foo@bar.com',
password='test',
name='joe'
)
db.session.add(user)
db.session.commit()
auth_token = user.encode_auth_token(user.id).decode()
# Jump time to just after token has expired
td = timedelta(days=1, seconds=1)
frozen_datetime.move_to(datetime.utcnow() + td)
response = self.client.get(
'/user',
headers={
'Authorization': 'Bearer ' + auth_token
}
)
data = json.loads(response.data.decode())
self.assertEqual(data['status'], 'error')
self.assertEqual(
data['error'], 'Signature expired. Please log in again.'
)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, 401)
if __name__ == '__main__':
unittest.main()
|
bcongdon/EachDay
|
eachday/tests/test_auth.py
|
Python
|
mit
| 9,773
|
[
"MOE"
] |
1be1db7586884d2048bc6407106223a2f9becec1f16800a77bb1169528a22d7c
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import warnings
"""
Created on Mar 19, 2012
"""
__author__ = "Shyue Ping Ong, Stephen Dacek"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 19, 2012"
import os
import unittest
from collections import defaultdict
from math import sqrt
from pathlib import Path
import pytest
from monty.json import MontyDecoder
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.entries.compatibility import (
MU_H2O,
AqueousCorrection,
Compatibility,
CompatibilityError,
MaterialsProject2020Compatibility,
MaterialsProjectAqueousCompatibility,
MaterialsProjectCompatibility,
MITAqueousCompatibility,
MITCompatibility,
)
from pymatgen.entries.computed_entries import (
ComputedEntry,
ComputedStructureEntry,
ConstantEnergyAdjustment,
)
from pymatgen.util.testing import PymatgenTest
# abstract Compatibility tests
class DummyCompatibility(Compatibility):
"""
Dummy class to test abstract Compatibility interface
"""
def get_adjustments(self, entry):
return [ConstantEnergyAdjustment(-10, name="Dummy adjustment")]
def test_process_entries_return_type():
"""
process_entries should accept single entries or a list, and always return a list
"""
entry = ComputedEntry("Fe2O3", -2)
compat = DummyCompatibility()
assert isinstance(compat.process_entries(entry), list)
assert isinstance(compat.process_entries([entry]), list)
def test_no_duplicate_corrections():
"""
Compatibility should never apply the same correction twice
"""
entry = ComputedEntry("Fe2O3", -2)
compat = DummyCompatibility()
assert entry.correction == 0
compat.process_entries(entry)
assert entry.correction == -10
compat.process_entries(entry)
assert entry.correction == -10
compat.process_entries(entry, clean=True)
assert entry.correction == -10
def test_clean_arg():
"""
clean=False should preserve existing corrections, clean=True should delete
them before processing
"""
entry = ComputedEntry("Fe2O3", -2, correction=-4)
compat = DummyCompatibility()
assert entry.correction == -4
compat.process_entries(entry, clean=False)
assert entry.correction == -14
compat.process_entries(entry)
assert entry.correction == -10
def test_energy_adjustment_normalize():
"""
Both manual and automatically generated energy adjustments should be scaled
by the normalize method
"""
entry = ComputedEntry("Fe4O6", -2, correction=-4)
entry = entry.normalize()
for ea in entry.energy_adjustments:
if "Manual" in ea.name:
assert ea.value == -2
compat = DummyCompatibility()
entry = ComputedEntry("Fe4O6", -2, correction=-4)
entry = compat.process_entries(entry)[0]
entry = entry.normalize()
for ea in entry.energy_adjustments:
if "Dummy" in ea.name:
assert ea.value == -5
def test_overlapping_adjustments():
"""
Compatibility should raise a CompatibilityError if there is already a
correction with the same name, but a different value, and process_entries
should skip that entry.
"""
ea = ConstantEnergyAdjustment(-5, name="Dummy adjustment")
entry = ComputedEntry("Fe2O3", -2, energy_adjustments=[ea])
compat = DummyCompatibility()
assert entry.correction == -5
# in case of a collision between EnergyAdjustment, check for a UserWarning
with pytest.warns(UserWarning, match="already has an energy adjustment called Dummy"):
processed = compat.process_entries(entry, clean=False)
assert len(processed) == 0
class MaterialsProjectCompatibilityTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
self.entry1 = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 5.3, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.entry_sulfide = ComputedEntry(
"FeS",
-1,
correction=0.0,
parameters={
"is_hubbard": False,
"run_type": "GGA",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE S 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.entry4 = ComputedEntry(
"H8",
-27.1,
correction=0.0,
parameters={
"run_type": "LDA",
"is_hubbard": False,
"pseudo_potential": {
"functional": "PBE",
"labels": ["H"],
"pot_type": "paw",
},
"hubbards": {},
"potcar_symbols": ["PBE H"],
"oxide_type": "None",
},
)
self.entry2 = ComputedEntry(
"Fe3O4",
-2,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 5.3, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.entry3 = ComputedEntry(
"FeO",
-2,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 4.3, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.compat = MaterialsProjectCompatibility(check_potcar_hash=False)
self.ggacompat = MaterialsProjectCompatibility("GGA", check_potcar_hash=False)
def tearDown(self):
warnings.simplefilter("default")
def test_process_entry(self):
# Correct parameters
self.assertIsNotNone(self.compat.process_entry(self.entry1))
self.assertIsNone(self.ggacompat.process_entry(self.entry1))
# Correct parameters
entry = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": False,
"hubbards": {},
"run_type": "GGA",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.assertIsNone(self.compat.process_entry(entry))
self.assertIsNotNone(self.ggacompat.process_entry(entry))
entry = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 5.3, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.assertIsNotNone(self.compat.process_entry(entry))
def test_correction_values(self):
# test_corrections
self.assertAlmostEqual(self.compat.process_entry(self.entry1).correction, -2.733 * 2 - 0.70229 * 3)
entry = ComputedEntry(
"FeF3",
-2,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 5.3, "F": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE F 08Apr2002",
"hash": "180141c33d032bfbfff30b3bea9d23dd",
},
],
},
)
self.assertIsNotNone(self.compat.process_entry(entry))
# Check actual correction
self.assertAlmostEqual(self.compat.process_entry(entry).correction, -2.733)
self.assertAlmostEqual(self.compat.process_entry(self.entry_sulfide).correction, -0.66346)
def test_U_values(self):
# Wrong U value
entry = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 5.2, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.assertIsNone(self.compat.process_entry(entry))
# GGA run of U
entry = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": False,
"hubbards": None,
"run_type": "GGA",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.assertIsNone(self.compat.process_entry(entry))
# GGA+U run of non-U
entry = ComputedEntry(
"Al2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Al": 5.3, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Al 06Sep2000",
"hash": "805c888bbd2793e462311f6a20d873d9",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.assertIsNone(self.compat.process_entry(entry))
# Materials project should not have a U for sulfides
entry = ComputedEntry(
"FeS2",
-2,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 5.3, "S": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE S 08Apr2002",
"hash": "f7f8e4a74a6cbb8d63e41f4373b54df2",
},
],
},
)
self.assertIsNone(self.compat.process_entry(entry))
def test_wrong_psp(self):
# Wrong psp
entry = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 5.3, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.assertIsNone(self.compat.process_entry(entry))
def test_element_processing(self):
entry = ComputedEntry(
"O",
-1,
correction=0.0,
parameters={
"is_hubbard": False,
"hubbards": {},
"potcar_spec": [
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
}
],
"run_type": "GGA",
},
)
entry = self.compat.process_entry(entry)
# self.assertEqual(entry.entry_id, -8)
self.assertAlmostEqual(entry.energy, -1)
self.assertAlmostEqual(self.ggacompat.process_entry(entry).energy, -1)
def test_get_explanation_dict(self):
compat = MaterialsProjectCompatibility(check_potcar_hash=False)
entry = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 5.3, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
d = compat.get_explanation_dict(entry)
self.assertEqual("MPRelaxSet Potcar Correction", d["corrections"][0]["name"])
def test_get_corrections_dict(self):
compat = MaterialsProjectCompatibility(check_potcar_hash=False)
ggacompat = MaterialsProjectCompatibility("GGA", check_potcar_hash=False)
# Correct parameters
entry = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 5.3, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
c = compat.get_corrections_dict(entry)[0]
self.assertAlmostEqual(c["MP Anion Correction"], -2.10687)
self.assertAlmostEqual(c["MP Advanced Correction"], -5.466)
entry.parameters["is_hubbard"] = False
del entry.parameters["hubbards"]
c = ggacompat.get_corrections_dict(entry)[0]
self.assertNotIn("MP Advanced Correction", c)
def test_process_entries(self):
entries = self.compat.process_entries([self.entry1, self.entry2, self.entry3, self.entry4])
self.assertEqual(len(entries), 2)
def test_msonable(self):
compat_dict = self.compat.as_dict()
decoder = MontyDecoder()
temp_compat = decoder.process_decoded(compat_dict)
self.assertIsInstance(temp_compat, MaterialsProjectCompatibility)
class MaterialsProject2020CompatibilityTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
self.entry1 = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 5.3, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.entry_sulfide = ComputedEntry(
"FeS",
-1,
correction=0.0,
parameters={
"is_hubbard": False,
"run_type": "GGA",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE S 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.entry2 = ComputedEntry(
"Fe3O4",
-2,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 5.3, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.entry3 = ComputedEntry(
"FeO",
-2,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 4.3, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.compat = MaterialsProject2020Compatibility(check_potcar_hash=False)
self.ggacompat = MaterialsProject2020Compatibility("GGA", check_potcar_hash=False)
def tearDown(self):
warnings.simplefilter("default")
def test_process_entry(self):
# Correct parameters
self.assertIsNotNone(self.compat.process_entry(self.entry1))
self.assertIsNone(self.ggacompat.process_entry(self.entry1))
# Correct parameters
entry = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": False,
"hubbards": {},
"run_type": "GGA",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.assertIsNone(self.compat.process_entry(entry))
self.assertIsNotNone(self.ggacompat.process_entry(entry))
entry = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 5.3, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.assertIsNotNone(self.compat.process_entry(entry))
def test_correction_values(self):
# test_corrections
self.assertAlmostEqual(self.compat.process_entry(self.entry1).correction, -2.256 * 2 - 0.688 * 3)
entry = ComputedEntry(
"FeF3",
-2,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 5.3, "F": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE F 08Apr2002",
"hash": "180141c33d032bfbfff30b3bea9d23dd",
},
],
},
)
self.assertIsNotNone(self.compat.process_entry(entry))
# Check actual correction
self.assertAlmostEqual(self.compat.process_entry(entry).correction, -0.462 * 3 + -2.256)
self.assertAlmostEqual(self.compat.process_entry(self.entry_sulfide).correction, -0.504)
def test_oxdiation_by_electronegativity(self):
# make sure anion corrections are only applied when the element has
# a negative oxidation state (e.g., correct CaSi but not SiO2 for Si)
# as determined by electronegativity (i.e., the data.oxidation_states key is absent)
entry1 = ComputedEntry.from_dict(
{
"@module": "pymatgen.entries.computed_entries",
"@class": "ComputedEntry",
"energy": -17.01015622,
"composition": defaultdict(float, {"Si": 2.0, "Ca": 2.0}),
"energy_adjustments": [],
"parameters": {
"run_type": "GGA",
"is_hubbard": False,
"pseudo_potential": {
"functional": "PBE",
"labels": ["Ca_sv", "Si"],
"pot_type": "paw",
},
"hubbards": {},
"potcar_symbols": ["PBE Ca_sv", "PBE Si"],
"oxide_type": "None",
},
"data": {"oxide_type": "None"},
"entry_id": "mp-1563",
"correction": 0.0,
}
)
entry2 = ComputedEntry.from_dict(
{
"@module": "pymatgen.entries.computed_entries",
"@class": "ComputedEntry",
"energy": -47.49120119,
"composition": defaultdict(float, {"Si": 2.0, "O": 4.0}),
"energy_adjustments": [],
"parameters": {
"run_type": "GGA",
"is_hubbard": False,
"pseudo_potential": {
"functional": "PBE",
"labels": ["Si", "O"],
"pot_type": "paw",
},
"hubbards": {},
"potcar_symbols": ["PBE Si", "PBE O"],
"oxide_type": "oxide",
},
"data": {"oxide_type": "oxide"},
"entry_id": "mp-546794",
"correction": 0.0,
}
)
# CaSi; only correction should be Si
self.assertAlmostEqual(self.compat.process_entry(entry1).correction, 0.072 * 2)
# SiO2; only corrections should be oxide
self.assertAlmostEqual(self.compat.process_entry(entry2).correction, -0.688 * 4)
def test_oxdiation(self):
# make sure anion corrections are only applied when the element has
# a negative oxidation state (e.g., correct CaSi but not SiO2 for Si)
# as determined by the data.oxidation_states key
entry1 = ComputedEntry.from_dict(
{
"@module": "pymatgen.entries.computed_entries",
"@class": "ComputedEntry",
"energy": -17.01015622,
"composition": defaultdict(float, {"Si": 2.0, "Ca": 2.0}),
"energy_adjustments": [],
"parameters": {
"run_type": "GGA",
"is_hubbard": False,
"pseudo_potential": {
"functional": "PBE",
"labels": ["Ca_sv", "Si"],
"pot_type": "paw",
},
"hubbards": {},
"potcar_symbols": ["PBE Ca_sv", "PBE Si"],
"oxide_type": "None",
},
"data": {
"oxide_type": "None",
"oxidation_states": {"Ca": 2.0, "Si": -2.0},
},
"entry_id": "mp-1563",
"correction": 0.0,
}
)
entry2 = ComputedEntry.from_dict(
{
"@module": "pymatgen.entries.computed_entries",
"@class": "ComputedEntry",
"energy": -47.49120119,
"composition": defaultdict(float, {"Si": 2.0, "O": 4.0}),
"energy_adjustments": [],
"parameters": {
"run_type": "GGA",
"is_hubbard": False,
"pseudo_potential": {
"functional": "PBE",
"labels": ["Si", "O"],
"pot_type": "paw",
},
"hubbards": {},
"potcar_symbols": ["PBE Si", "PBE O"],
"oxide_type": "oxide",
},
"data": {
"oxide_type": "oxide",
"oxidation_states": {"Si": 4.0, "O": -2.0},
},
"entry_id": "mp-546794",
"correction": 0.0,
}
)
# CaSi; only correction should be Si
self.assertAlmostEqual(self.compat.process_entry(entry1).correction, 0.072 * 2)
# SiO2; only corrections should be oxide
self.assertAlmostEqual(self.compat.process_entry(entry2).correction, -0.688 * 4)
def test_U_values(self):
# Wrong U value
entry = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 5.2, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.assertIsNone(self.compat.process_entry(entry))
# GGA run of U
entry = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": False,
"hubbards": None,
"run_type": "GGA",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.assertIsNone(self.compat.process_entry(entry))
# GGA+U run of non-U
entry = ComputedEntry(
"Al2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Al": 5.3, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Al 06Sep2000",
"hash": "805c888bbd2793e462311f6a20d873d9",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.assertIsNone(self.compat.process_entry(entry))
# Materials project should not have a U for sulfides
entry = ComputedEntry(
"FeS2",
-2,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 5.3, "S": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE S 08Apr2002",
"hash": "f7f8e4a74a6cbb8d63e41f4373b54df2",
},
],
},
)
self.assertIsNone(self.compat.process_entry(entry))
def test_wrong_psp(self):
# Wrong psp
entry = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 5.3, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.assertIsNone(self.compat.process_entry(entry))
def test_element_processing(self):
entry = ComputedEntry(
"O",
-1,
correction=0.0,
parameters={
"is_hubbard": False,
"hubbards": {},
"potcar_spec": [
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
}
],
"run_type": "GGA",
},
)
entry = self.compat.process_entry(entry)
self.assertAlmostEqual(entry.energy, -1)
self.assertAlmostEqual(self.ggacompat.process_entry(entry).energy, -1)
def test_get_explanation_dict(self):
compat = MaterialsProjectCompatibility(check_potcar_hash=False)
entry = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 5.3, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
d = compat.get_explanation_dict(entry)
self.assertEqual("MPRelaxSet Potcar Correction", d["corrections"][0]["name"])
def test_energy_adjustments(self):
compat = MaterialsProject2020Compatibility(check_potcar_hash=False)
ggacompat = MaterialsProject2020Compatibility("GGA", check_potcar_hash=False)
# Fe 4 Co 2 O 8 (Fe2CoO4)
entry = {
"@module": "pymatgen.entries.computed_entries",
"@class": "ComputedEntry",
"energy": -91.94962744,
"composition": defaultdict(float, {"Fe": 4.0, "Co": 2.0, "O": 8.0}),
"energy_adjustments": [],
"parameters": {
"run_type": "GGA+U",
"is_hubbard": True,
"pseudo_potential": {
"functional": "PBE",
"labels": ["Fe_pv", "Co", "O"],
"pot_type": "paw",
},
"hubbards": {"Fe": 5.3, "Co": 3.32, "O": 0.0},
"potcar_symbols": ["PBE Fe_pv", "PBE Co", "PBE O"],
"oxide_type": "oxide",
},
"data": {"oxide_type": "oxide"},
"entry_id": "mp-753222",
"correction": 0,
}
entry = ComputedEntry.from_dict(entry)
c = compat.process_entry(entry)
assert "MP2020 anion correction (oxide)" in [ea.name for ea in c.energy_adjustments]
assert "MP2020 GGA/GGA+U mixing correction (Fe)" in [ea.name for ea in c.energy_adjustments]
assert "MP2020 GGA/GGA+U mixing correction (Co)" in [ea.name for ea in c.energy_adjustments]
for ea in c.energy_adjustments:
if ea.name == "MP2020 GGA/GGA+U mixing correction (Fe)":
self.assertAlmostEqual(ea.value, -2.256 * 4)
self.assertAlmostEqual(ea.uncertainty, 0.0101 * 4)
elif ea.name == "MP2020 GGA/GGA+U mixing correction (Co)":
self.assertAlmostEqual(ea.value, -1.638 * 2)
self.assertAlmostEqual(ea.uncertainty, 0.006 * 2)
elif ea.name == "MP2020 anion correction (oxide)":
self.assertAlmostEqual(ea.value, -0.688 * 8)
self.assertAlmostEqual(ea.uncertainty, 0.002 * 8)
entry.parameters["is_hubbard"] = False
del entry.parameters["hubbards"]
c = ggacompat.process_entry(entry)
self.assertNotIn(
"MP2020 GGA/GGA+U mixing correction",
[ea.name for ea in c.energy_adjustments],
)
def test_process_entries(self):
entries = self.compat.process_entries([self.entry1, self.entry2, self.entry3])
self.assertEqual(len(entries), 2)
def test_config_file(self):
config_file = Path(PymatgenTest.TEST_FILES_DIR / "MP2020Compatibility_alternate.yaml")
compat = MaterialsProject2020Compatibility(config_file=config_file)
entry = compat.process_entry(self.entry1)
for ea in entry.energy_adjustments:
if ea.name == "MP2020 GGA/GGA+U mixing correction (Fe)":
self.assertAlmostEqual(ea.value, -0.224 * 2)
def test_msonable(self):
compat_dict = self.compat.as_dict()
decoder = MontyDecoder()
temp_compat = decoder.process_decoded(compat_dict)
self.assertIsInstance(temp_compat, MaterialsProject2020Compatibility)
class MITCompatibilityTest(unittest.TestCase):
def tearDown(self):
warnings.simplefilter("default")
def setUp(self):
warnings.simplefilter("ignore")
self.compat = MITCompatibility(check_potcar_hash=True)
self.ggacompat = MITCompatibility("GGA", check_potcar_hash=True)
self.entry_O = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 4.0, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.entry_F = ComputedEntry(
"FeF3",
-2,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 4.0, "F": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115",
},
{
"titel": "PAW_PBE F 08Apr2002",
"hash": "180141c33d032bfbfff30b3bea9d23dd",
},
],
},
)
self.entry_S = ComputedEntry(
"FeS2",
-2,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 1.9, "S": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115",
},
{
"titel": "PAW_PBE S 08Apr2002",
"hash": "d368db6899d8839859bbee4811a42a88",
},
],
},
)
def test_process_entry(self):
# Correct parameters
self.assertIsNotNone(self.compat.process_entry(self.entry_O))
self.assertIsNotNone(self.compat.process_entry(self.entry_F))
def test_correction_value(self):
# Check actual correction
self.assertAlmostEqual(self.compat.process_entry(self.entry_O).correction, -1.723 * 2 - 0.66975 * 3)
self.assertAlmostEqual(self.compat.process_entry(self.entry_F).correction, -1.723)
self.assertAlmostEqual(self.compat.process_entry(self.entry_S).correction, -1.113)
def test_U_value(self):
# MIT should have a U value for Fe containing sulfides
self.assertIsNotNone(self.compat.process_entry(self.entry_S))
# MIT should not have a U value for Ni containing sulfides
entry = ComputedEntry(
"NiS2",
-2,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Ni": 1.9, "S": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Ni 06Sep2000",
"hash": "653f5772e68b2c7fd87ffd1086c0d710",
},
{
"titel": "PAW_PBE S 08Apr2002",
"hash": "d368db6899d8839859bbee4811a42a88",
},
],
},
)
self.assertIsNone(self.compat.process_entry(entry))
entry = ComputedEntry(
"NiS2",
-2,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": None,
"run_type": "GGA",
"potcar_spec": [
{
"titel": "PAW_PBE Ni 06Sep2000",
"hash": "653f5772e68b2c7fd87ffd1086c0d710",
},
{
"titel": "PAW_PBE S 08Apr2002",
"hash": "d368db6899d8839859bbee4811a42a88",
},
],
},
)
self.assertIsNotNone(self.ggacompat.process_entry(entry))
def test_wrong_U_value(self):
# Wrong U value
entry = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 5.2, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.assertIsNone(self.compat.process_entry(entry))
# GGA run
entry = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": False,
"hubbards": None,
"run_type": "GGA",
"potcar_spec": [
{
"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.assertIsNone(self.compat.process_entry(entry))
self.assertIsNotNone(self.ggacompat.process_entry(entry))
def test_wrong_psp(self):
# Wrong psp
entry = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 4.0, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.assertIsNone(self.compat.process_entry(entry))
def test_element_processing(self):
# Testing processing of elements.
entry = ComputedEntry(
"O",
-1,
correction=0.0,
parameters={
"is_hubbard": False,
"hubbards": {},
"potcar_spec": [
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
}
],
"run_type": "GGA",
},
)
entry = self.compat.process_entry(entry)
self.assertAlmostEqual(entry.energy, -1)
def test_same_potcar_symbol(self):
# Same symbol different hash thus a different potcar
# Correct Hash Correct Symbol
entry = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 4.0, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
# Incorrect Hash Correct Symbol
entry2 = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 4.0, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{"titel": "PAW_PBE Fe 06Sep2000", "hash": "DifferentHash"},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
compat = MITCompatibility()
self.assertEqual(len(compat.process_entries([entry, entry2])), 2)
self.assertEqual(len(self.compat.process_entries([entry, entry2])), 1)
def test_revert_to_symbols(self):
# Test that you can revert to potcar_symbols if potcar_spec is not present
compat = MITCompatibility()
entry = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 4.0, "O": 0},
"run_type": "GGA+U",
"potcar_symbols": ["PAW_PBE Fe 06Sep2000", "PAW_PBE O 08Apr2002"],
},
)
self.assertIsNotNone(compat.process_entry(entry))
# raise if check_potcar_hash is set
self.assertRaises(ValueError, self.compat.process_entry, entry)
def test_potcar_doenst_match_structure(self):
compat = MITCompatibility()
entry = ComputedEntry(
"Li2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 4.0, "O": 0},
"run_type": "GGA+U",
"potcar_symbols": ["PAW_PBE Fe_pv 06Sep2000", "PAW_PBE O 08Apr2002"],
},
)
self.assertIsNone(compat.process_entry(entry))
def test_potcar_spec_is_none(self):
compat = MITCompatibility(check_potcar_hash=True)
entry = ComputedEntry(
"Li2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 4.0, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [None, None],
},
)
self.assertIsNone(compat.process_entry(entry))
def test_get_explanation_dict(self):
compat = MITCompatibility(check_potcar_hash=False)
entry = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 4.0, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
d = compat.get_explanation_dict(entry)
self.assertEqual("MITRelaxSet Potcar Correction", d["corrections"][0]["name"])
def test_msonable(self):
compat_dict = self.compat.as_dict()
decoder = MontyDecoder()
temp_compat = decoder.process_decoded(compat_dict)
self.assertIsInstance(temp_compat, MITCompatibility)
class OxideTypeCorrectionTest(unittest.TestCase):
def setUp(self):
self.compat = MITCompatibility(check_potcar_hash=True)
def test_no_struct_compat(self):
lio2_entry_nostruct = ComputedEntry(
Composition("Li2O4"),
-3,
data={"oxide_type": "superoxide"},
parameters={
"is_hubbard": False,
"hubbards": None,
"run_type": "GGA",
"potcar_spec": [
{
"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
lio2_entry_corrected = self.compat.process_entry(lio2_entry_nostruct)
self.assertAlmostEqual(lio2_entry_corrected.energy, -3 - 0.13893 * 4, 4)
def test_process_entry_superoxide(self):
el_li = Element("Li")
el_o = Element("O")
latt = Lattice([[3.985034, 0.0, 0.0], [0.0, 4.881506, 0.0], [0.0, 0.0, 2.959824]])
elts = [el_li, el_li, el_o, el_o, el_o, el_o]
coords = list()
coords.append([0.500000, 0.500000, 0.500000])
coords.append([0.0, 0.0, 0.0])
coords.append([0.632568, 0.085090, 0.500000])
coords.append([0.367432, 0.914910, 0.500000])
coords.append([0.132568, 0.414910, 0.000000])
coords.append([0.867432, 0.585090, 0.000000])
struct = Structure(latt, elts, coords)
lio2_entry = ComputedStructureEntry(
struct,
-3,
parameters={
"is_hubbard": False,
"hubbards": None,
"run_type": "GGA",
"potcar_spec": [
{
"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
lio2_entry_corrected = self.compat.process_entry(lio2_entry)
self.assertAlmostEqual(lio2_entry_corrected.energy, -3 - 0.13893 * 4, 4)
def test_process_entry_peroxide(self):
latt = Lattice.from_parameters(3.159597, 3.159572, 7.685205, 89.999884, 89.999674, 60.000510)
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_li, el_li, el_li, el_o, el_o, el_o, el_o]
coords = [
[0.666656, 0.666705, 0.750001],
[0.333342, 0.333378, 0.250001],
[0.000001, 0.000041, 0.500001],
[0.000001, 0.000021, 0.000001],
[0.333347, 0.333332, 0.649191],
[0.333322, 0.333353, 0.850803],
[0.666666, 0.666686, 0.350813],
[0.666665, 0.666684, 0.149189],
]
struct = Structure(latt, elts, coords)
li2o2_entry = ComputedStructureEntry(
struct,
-3,
parameters={
"is_hubbard": False,
"hubbards": None,
"run_type": "GGA",
"potcar_spec": [
{
"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
li2o2_entry_corrected = self.compat.process_entry(li2o2_entry)
self.assertAlmostEqual(li2o2_entry_corrected.energy, -3 - 0.44317 * 4, 4)
def test_process_entry_ozonide(self):
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_o, el_o, el_o]
latt = Lattice.from_parameters(3.999911, 3.999911, 3.999911, 133.847504, 102.228244, 95.477342)
coords = [
[0.513004, 0.513004, 1.000000],
[0.017616, 0.017616, 0.000000],
[0.649993, 0.874790, 0.775203],
[0.099587, 0.874790, 0.224797],
]
struct = Structure(latt, elts, coords)
lio3_entry = ComputedStructureEntry(
struct,
-3,
parameters={
"is_hubbard": False,
"hubbards": None,
"run_type": "GGA",
"potcar_spec": [
{
"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
lio3_entry_corrected = self.compat.process_entry(lio3_entry)
self.assertAlmostEqual(lio3_entry_corrected.energy, -3.0)
def test_process_entry_oxide(self):
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_li, el_o]
latt = Lattice.from_parameters(3.278, 3.278, 3.278, 60, 60, 60)
coords = [[0.25, 0.25, 0.25], [0.75, 0.75, 0.75], [0.0, 0.0, 0.0]]
struct = Structure(latt, elts, coords)
li2o_entry = ComputedStructureEntry(
struct,
-3,
parameters={
"is_hubbard": False,
"hubbards": None,
"run_type": "GGA",
"potcar_spec": [
{
"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
li2o_entry_corrected = self.compat.process_entry(li2o_entry)
self.assertAlmostEqual(li2o_entry_corrected.energy, -3.0 - 0.66975, 4)
class SulfideTypeCorrection2020Test(unittest.TestCase):
def setUp(self):
self.compat = MaterialsProject2020Compatibility(check_potcar_hash=False)
def test_struct_no_struct(self):
# Processing an Entry should produce the same correction whether or not
# that entry has a Structure attached to it.
# Na2S2, entry mp-2400, with and without structure
from collections import defaultdict
entry_struct_as_dict = {
"@module": "pymatgen.entries.computed_entries",
"@class": "ComputedStructureEntry",
"energy": -28.42580746,
"composition": defaultdict(float, {"Na": 4.0, "S": 4.0}),
"correction": 0,
"parameters": {
"run_type": "GGA",
"is_hubbard": False,
"pseudo_potential": {
"functional": "PBE",
"labels": ["Na_pv", "S"],
"pot_type": "paw",
},
"hubbards": {},
"potcar_symbols": ["PBE Na_pv", "PBE S"],
"oxide_type": "None",
},
"data": {"oxide_type": "None"},
"entry_id": "mp-2400",
"structure": {
"@module": "pymatgen.core.structure",
"@class": "Structure",
"charge": None,
"lattice": {
"matrix": [
[4.5143094, 0.0, 0.0],
[-2.2571547, 3.90950662, 0.0],
[0.0, 0.0, 10.28414905],
],
"a": 4.5143094,
"b": 4.514309399183436,
"c": 10.28414905,
"alpha": 90.0,
"beta": 90.0,
"gamma": 120.00000000598358,
"volume": 181.50209256783256,
},
"sites": [
{
"species": [{"element": "Na", "occu": 1}],
"abc": [0.0, 0.0, 0.0],
"xyz": [0.0, 0.0, 0.0],
"label": "Na",
"properties": {"magmom": 0.0},
},
{
"species": [{"element": "Na", "occu": 1}],
"abc": [0.0, 0.0, 0.5],
"xyz": [0.0, 0.0, 5.142074525],
"label": "Na",
"properties": {"magmom": 0.0},
},
{
"species": [{"element": "Na", "occu": 1}],
"abc": [0.33333333, 0.66666667, 0.25],
"xyz": [
-2.2571547075855847e-08,
2.6063377596983557,
2.5710372625,
],
"label": "Na",
"properties": {"magmom": 0.0},
},
{
"species": [{"element": "Na", "occu": 1}],
"abc": [0.66666667, 0.33333333, 0.75],
"xyz": [2.2571547225715474, 1.3031688603016447, 7.7131117875],
"label": "Na",
"properties": {"magmom": 0.0},
},
{
"species": [{"element": "S", "occu": 1}],
"abc": [0.33333333, 0.66666667, 0.644551],
"xyz": [
-2.2571547075855847e-08,
2.6063377596983557,
6.62865855432655,
],
"label": "S",
"properties": {"magmom": 0.0},
},
{
"species": [{"element": "S", "occu": 1}],
"abc": [0.66666667, 0.33333333, 0.144551],
"xyz": [
2.2571547225715474,
1.3031688603016447,
1.4865840293265502,
],
"label": "S",
"properties": {"magmom": 0.0},
},
{
"species": [{"element": "S", "occu": 1}],
"abc": [0.66666667, 0.33333333, 0.355449],
"xyz": [
2.2571547225715474,
1.3031688603016447,
3.65549049567345,
],
"label": "S",
"properties": {"magmom": 0.0},
},
{
"species": [{"element": "S", "occu": 1}],
"abc": [0.33333333, 0.66666667, 0.855449],
"xyz": [
-2.2571547075855847e-08,
2.6063377596983557,
8.79756502067345,
],
"label": "S",
"properties": {"magmom": 0.0},
},
],
},
}
entry_no_struct_as_dict = {
"@module": "pymatgen.entries.computed_entries",
"@class": "ComputedEntry",
"energy": -28.42580746,
"composition": defaultdict(float, {"Na": 4.0, "S": 4.0}),
"correction": 0,
"parameters": {
"run_type": "GGA",
"is_hubbard": False,
"pseudo_potential": {
"functional": "PBE",
"labels": ["Na_pv", "S"],
"pot_type": "paw",
},
"hubbards": {},
"potcar_symbols": ["PBE Na_pv", "PBE S"],
"oxide_type": "None",
},
"data": {"oxide_type": "None"},
"entry_id": "mp-2400",
}
na2s2_entry_struct = ComputedStructureEntry.from_dict(entry_struct_as_dict)
na2s2_entry_nostruct = ComputedEntry.from_dict(entry_no_struct_as_dict)
struct_corrected = self.compat.process_entry(na2s2_entry_struct)
nostruct_corrected = self.compat.process_entry(na2s2_entry_nostruct)
self.assertAlmostEqual(struct_corrected.correction, nostruct_corrected.correction, 4)
class OxideTypeCorrectionNoPeroxideCorrTest(unittest.TestCase):
def setUp(self):
self.compat = MITCompatibility(correct_peroxide=False)
def test_oxide_energy_corr(self):
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_li, el_o]
latt = Lattice.from_parameters(3.278, 3.278, 3.278, 60, 60, 60)
coords = [[0.25, 0.25, 0.25], [0.75, 0.75, 0.75], [0.0, 0.0, 0.0]]
struct = Structure(latt, elts, coords)
li2o_entry = ComputedStructureEntry(
struct,
-3,
parameters={
"is_hubbard": False,
"hubbards": None,
"run_type": "GGA",
"potcar_spec": [
{
"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
li2o_entry_corrected = self.compat.process_entry(li2o_entry)
self.assertAlmostEqual(li2o_entry_corrected.energy, -3.0 - 0.66975, 4)
def test_peroxide_energy_corr(self):
latt = Lattice.from_parameters(3.159597, 3.159572, 7.685205, 89.999884, 89.999674, 60.000510)
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_li, el_li, el_li, el_o, el_o, el_o, el_o]
coords = [
[0.666656, 0.666705, 0.750001],
[0.333342, 0.333378, 0.250001],
[0.000001, 0.000041, 0.500001],
[0.000001, 0.000021, 0.000001],
[0.333347, 0.333332, 0.649191],
[0.333322, 0.333353, 0.850803],
[0.666666, 0.666686, 0.350813],
[0.666665, 0.666684, 0.149189],
]
struct = Structure(latt, elts, coords)
li2o2_entry = ComputedStructureEntry(
struct,
-3,
parameters={
"is_hubbard": False,
"hubbards": None,
"run_type": "GGA",
"potcar_spec": [
{
"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
li2o2_entry_corrected = self.compat.process_entry(li2o2_entry)
self.assertRaises(AssertionError, self.assertAlmostEqual, *(li2o2_entry_corrected.energy, -3 - 0.44317 * 4, 4))
self.assertAlmostEqual(li2o2_entry_corrected.energy, -3 - 0.66975 * 4, 4)
def test_ozonide(self):
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_o, el_o, el_o]
latt = Lattice.from_parameters(3.999911, 3.999911, 3.999911, 133.847504, 102.228244, 95.477342)
coords = [
[0.513004, 0.513004, 1.000000],
[0.017616, 0.017616, 0.000000],
[0.649993, 0.874790, 0.775203],
[0.099587, 0.874790, 0.224797],
]
struct = Structure(latt, elts, coords)
lio3_entry = ComputedStructureEntry(
struct,
-3,
parameters={
"is_hubbard": False,
"hubbards": None,
"run_type": "GGA",
"potcar_spec": [
{
"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
lio3_entry_corrected = self.compat.process_entry(lio3_entry)
self.assertAlmostEqual(lio3_entry_corrected.energy, -3.0 - 3 * 0.66975)
class TestMaterialsProjectAqueousCompatibility:
"""
Test MaterialsProjectAqueousCompatibility
-x- formation energy of H2O should always be -2.458 eV/H2O
-x- H2 energy should always be the same value
-x- H2O energy should always be the same value
-x- Should get warnings if you init without all energy args
-x- Should get CompatibilityError if you get_entry without all energy args
-x- energy args should auto-populate from entries passed to process_entries
-x- check compound entropies appropriately added
-x- check hydrate adjustment appropriately applied
Notes:
Argument values from MaterialsProjectCompatibility as of April 2020:
corrected DFT energy of H2O = -15.5875 eV/H2O (mp-697111) or -5.195 eV/atom
corrected DFT energy of O2 = -4.9276 eV/atom (mp-12957)
total energy corrections applied to H2O (eV/H2O) -0.70229 eV/H2O or -0.234 eV/atom
"""
def test_h_h2o_energy_with_args(self):
compat = MaterialsProjectAqueousCompatibility(
o2_energy=-4.9276,
h2o_energy=-5.195,
h2o_adjustments=-0.234,
solid_compat=None,
)
h2o_entry_1 = ComputedEntry(Composition("H2O"), -16)
h2o_entry_2 = ComputedEntry(Composition("H4O2"), -10)
h2_entry_1 = ComputedEntry(Composition("H2"), -16)
h2_entry_2 = ComputedEntry(Composition("H8"), -100)
for entry in [h2o_entry_1, h2o_entry_2, h2_entry_1, h2_entry_2]:
compat.process_entries(entry)
assert h2o_entry_1.energy_per_atom == pytest.approx(h2o_entry_2.energy_per_atom)
assert h2_entry_1.energy_per_atom == pytest.approx(h2_entry_2.energy_per_atom)
o2_entry_1 = ComputedEntry(Composition("O2"), -4.9276 * 2)
o2_entry_1 = compat.process_entries(o2_entry_1)[0]
h2o_form_e = 3 * h2o_entry_2.energy_per_atom - 2 * h2_entry_2.energy_per_atom - o2_entry_1.energy_per_atom
assert h2o_form_e == pytest.approx(MU_H2O)
def test_h_h2o_energy_no_args(self):
with pytest.warns(UserWarning, match="You did not provide the required O2 and H2O energies."):
compat = MaterialsProjectAqueousCompatibility(solid_compat=None)
h2o_entry_1 = ComputedEntry(Composition("H2O"), (-5.195 + 0.234) * 3, correction=-0.234 * 3)
h2o_entry_2 = ComputedEntry(Composition("H4O2"), -10)
h2_entry_1 = ComputedEntry(Composition("H2"), -16)
h2_entry_2 = ComputedEntry(Composition("H8"), -100)
o2_entry_1 = ComputedEntry(Composition("O2"), -4.9276 * 2)
with pytest.raises(CompatibilityError, match="Either specify the energies as arguments to "):
compat.get_adjustments(h2_entry_1)
entries = compat.process_entries([h2o_entry_1, h2o_entry_2, h2_entry_1, h2_entry_2, o2_entry_1])
assert compat.o2_energy == -4.9276
assert compat.h2o_energy == -5.195
assert compat.h2o_adjustments == -0.234
h2o_entries = [e for e in entries if e.composition.reduced_formula == "H2O"]
h2_entries = [e for e in entries if e.composition.reduced_formula == "H2"]
assert h2o_entries[0].energy_per_atom == pytest.approx(h2o_entries[1].energy_per_atom)
assert h2_entries[0].energy_per_atom == pytest.approx(h2_entries[1].energy_per_atom)
h2o_form_e = 3 * h2o_entries[1].energy_per_atom - 2 * h2_entries[0].energy_per_atom - o2_entry_1.energy_per_atom
assert h2o_form_e == pytest.approx(MU_H2O)
def test_compound_entropy(self):
compat = MaterialsProjectAqueousCompatibility(
o2_energy=-10, h2o_energy=-20, h2o_adjustments=-0.5, solid_compat=None
)
o2_entry_1 = ComputedEntry(Composition("O2"), -4.9276 * 2)
initial_energy = o2_entry_1.energy_per_atom
o2_entry_1 = compat.process_entries(o2_entry_1)[0]
processed_energy = o2_entry_1.energy_per_atom
assert initial_energy - processed_energy == pytest.approx(compat.cpd_entropies["O2"])
def test_hydrate_adjustment(self):
compat = MaterialsProjectAqueousCompatibility(
o2_energy=-10, h2o_energy=-20, h2o_adjustments=-0.5, solid_compat=None
)
hydrate_entry = ComputedEntry(Composition("FeH4O2"), -10)
initial_energy = hydrate_entry.energy
hydrate_entry = compat.process_entries(hydrate_entry)[0]
processed_energy = hydrate_entry.energy
assert initial_energy - processed_energy == pytest.approx(2 * (compat.h2o_adjustments * 3 + MU_H2O))
class AqueousCorrectionTest(unittest.TestCase):
def setUp(self):
module_dir = os.path.dirname(os.path.abspath(__file__))
fp = os.path.join(module_dir, os.path.pardir, "MITCompatibility.yaml")
self.corr = AqueousCorrection(fp)
def test_compound_energy(self):
O2_entry = self.corr.correct_entry(ComputedEntry(Composition("O2"), -4.9355 * 2))
H2_entry = self.corr.correct_entry(ComputedEntry(Composition("H2"), 3))
H2O_entry = self.corr.correct_entry(ComputedEntry(Composition("H2O"), 3))
H2O_formation_energy = H2O_entry.energy - (H2_entry.energy + O2_entry.energy / 2.0)
self.assertAlmostEqual(H2O_formation_energy, -2.46, 2)
entry = ComputedEntry(Composition("H2O"), -16)
entry = self.corr.correct_entry(entry)
self.assertAlmostEqual(entry.energy, -14.916, 4)
entry = ComputedEntry(Composition("H2O"), -24)
entry = self.corr.correct_entry(entry)
self.assertAlmostEqual(entry.energy, -14.916, 4)
entry = ComputedEntry(Composition("Cl"), -24)
entry = self.corr.correct_entry(entry)
self.assertAlmostEqual(entry.energy, -24.344373, 4)
class MITAqueousCompatibilityTest(unittest.TestCase):
def setUp(self):
self.compat = MITCompatibility(check_potcar_hash=True)
self.aqcompat = MITAqueousCompatibility(check_potcar_hash=True)
module_dir = os.path.dirname(os.path.abspath(__file__))
fp = os.path.join(module_dir, os.path.pardir, "MITCompatibility.yaml")
self.aqcorr = AqueousCorrection(fp)
def test_aqueous_compat(self):
el_li = Element("Li")
el_o = Element("O")
el_h = Element("H")
latt = Lattice.from_parameters(3.565276, 3.565276, 4.384277, 90.000000, 90.000000, 90.000000)
elts = [el_h, el_h, el_li, el_li, el_o, el_o]
coords = [
[0.000000, 0.500000, 0.413969],
[0.500000, 0.000000, 0.586031],
[0.000000, 0.000000, 0.000000],
[0.500000, 0.500000, 0.000000],
[0.000000, 0.500000, 0.192672],
[0.500000, 0.000000, 0.807328],
]
struct = Structure(latt, elts, coords)
lioh_entry = ComputedStructureEntry(
struct,
-3,
parameters={
"is_hubbard": False,
"hubbards": None,
"run_type": "GGA",
"potcar_spec": [
{
"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
{
"titel": "PAW_PBE H 15Jun2001",
"hash": "bb43c666e3d36577264afe07669e9582",
},
],
},
)
lioh_entry_compat = self.compat.process_entry(lioh_entry)
lioh_entry_compat_aqcorr = self.aqcorr.correct_entry(lioh_entry_compat)
lioh_entry_aqcompat = self.aqcompat.process_entry(lioh_entry)
self.assertAlmostEqual(lioh_entry_compat_aqcorr.energy, lioh_entry_aqcompat.energy, 4)
def test_potcar_doenst_match_structure(self):
compat = MITCompatibility()
el_li = Element("Li")
el_o = Element("O")
el_h = Element("H")
latt = Lattice.from_parameters(3.565276, 3.565276, 4.384277, 90.000000, 90.000000, 90.000000)
elts = [el_h, el_h, el_li, el_li, el_o, el_o]
coords = [
[0.000000, 0.500000, 0.413969],
[0.500000, 0.000000, 0.586031],
[0.000000, 0.000000, 0.000000],
[0.500000, 0.500000, 0.000000],
[0.000000, 0.500000, 0.192672],
[0.500000, 0.000000, 0.807328],
]
struct = Structure(latt, elts, coords)
lioh_entry = ComputedStructureEntry(
struct,
-3,
parameters={
"is_hubbard": False,
"hubbards": None,
"run_type": "GGA",
"potcar_symbols": [
"PAW_PBE Fe 17Jan2003",
"PAW_PBE O 08Apr2002",
"PAW_PBE H 15Jun2001",
],
},
)
self.assertIsNone(compat.process_entry(lioh_entry))
def test_msonable(self):
compat_dict = self.aqcompat.as_dict()
decoder = MontyDecoder()
temp_compat = decoder.process_decoded(compat_dict)
self.assertIsInstance(temp_compat, MITAqueousCompatibility)
def test_dont_error_on_weird_elements(self):
entry = ComputedEntry(
"AmSi",
-1,
correction=0.0,
parameters={
"potcar_spec": [
{
"titel": "PAW_PBE Am 08May2007",
"hash": "ed5eebd8a143e35a0c19e9f8a2c42a93",
},
{
"titel": "PAW_PBE Si 05Jan2001",
"hash": "b2b0ea6feb62e7cde209616683b8f7f5",
},
]
},
)
self.assertIsNone(self.compat.process_entry(entry))
class CorrectionErrors2020CompatibilityTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
self.compat = MaterialsProject2020Compatibility()
self.entry1 = ComputedEntry(
"Fe2O3",
-1,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 5.3, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.entry_sulfide = ComputedEntry(
"FeS",
-1,
0.0,
parameters={
"is_hubbard": False,
"run_type": "GGA",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE S 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.entry2 = ComputedEntry(
"Fe3O4",
-2,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 5.3, "O": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
},
)
self.entry_fluoride = ComputedEntry(
"FeF3",
-2,
correction=0.0,
parameters={
"is_hubbard": True,
"hubbards": {"Fe": 5.3, "F": 0},
"run_type": "GGA+U",
"potcar_spec": [
{
"titel": "PAW_PBE Fe_pv 06Sep2000",
"hash": "994537de5c4122b7f1b77fb604476db4",
},
{
"titel": "PAW_PBE F 08Apr2002",
"hash": "180141c33d032bfbfff30b3bea9d23dd",
},
],
},
)
self.entry_hydride = ComputedEntry(
"LiH",
-2,
correction=0.0,
parameters={
"is_hubbard": False,
"run_type": "GGA",
"potcar_spec": [
{
"titel": "PAW_PBE Li_sv 10Sep2004",
"hash": "8245d7383d7556214082aa40a887cd96",
},
{
"titel": "PAW_PBE H 15Jun2001",
"hash": "bb43c666e3d36577264afe07669e9582",
},
],
},
)
def tearDown(self):
warnings.simplefilter("default")
def test_errors(self):
entry1_corrected = self.compat.process_entry(self.entry1)
self.assertAlmostEqual(
entry1_corrected.correction_uncertainty,
sqrt((2 * 0.0101) ** 2 + (3 * 0.002) ** 2),
)
entry2_corrected = self.compat.process_entry(self.entry2)
self.assertAlmostEqual(
entry2_corrected.correction_uncertainty,
sqrt((3 * 0.0101) ** 2 + (4 * 0.002) ** 2),
)
entry_sulfide_corrected = self.compat.process_entry(self.entry_sulfide)
self.assertAlmostEqual(entry_sulfide_corrected.correction_uncertainty, 0.0094)
entry_fluoride_corrected = self.compat.process_entry(self.entry_fluoride)
self.assertAlmostEqual(
entry_fluoride_corrected.correction_uncertainty,
sqrt((3 * 0.0027) ** 2 + 0.0101 ** 2),
)
entry_hydride_corrected = self.compat.process_entry(self.entry_hydride)
self.assertAlmostEqual(entry_hydride_corrected.correction_uncertainty, 0.0013)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
richardtran415/pymatgen
|
pymatgen/entries/tests/test_compatibility.py
|
Python
|
mit
| 80,319
|
[
"pymatgen"
] |
9b02910a6873324c3d4a0deee56172bd9c01a43a31c9e04261b40fec47db47b7
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_cloudconnectoruser
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of CloudConnectorUser Avi RESTful Object
description:
- This module is used to configure CloudConnectorUser object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
azure_serviceprincipal:
description:
- Field introduced in 17.2.1.
version_added: "2.5"
azure_userpass:
description:
- Field introduced in 17.2.1.
version_added: "2.5"
name:
description:
- Name of the object.
required: true
private_key:
description:
- Private_key of cloudconnectoruser.
public_key:
description:
- Public_key of cloudconnectoruser.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a Cloud connector user that is used for integration into cloud platforms
avi_cloudconnectoruser:
controller: '{{ controller }}'
name: root
password: '{{ password }}'
private_key: |
-----BEGIN RSA PRIVATE KEY-----
-----END RSA PRIVATE KEY-----'
public_key: 'ssh-rsa ...'
tenant_ref: admin
username: '{{ username }}'
"""
RETURN = '''
obj:
description: CloudConnectorUser (api/cloudconnectoruser) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
azure_serviceprincipal=dict(type='dict',),
azure_userpass=dict(type='dict',),
name=dict(type='str', required=True),
private_key=dict(type='str', no_log=True,),
public_key=dict(type='str',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'cloudconnectoruser',
set(['private_key']))
if __name__ == '__main__':
main()
|
le9i0nx/ansible
|
lib/ansible/modules/network/avi/avi_cloudconnectoruser.py
|
Python
|
gpl-3.0
| 4,708
|
[
"VisIt"
] |
e629b2a9203397e58dabc7aef1e12da4eca703ec7f2328b037d6fe85a03bf808
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .script_interface import ScriptInterfaceHelper, script_interface_register
@script_interface_register
class ComFixed(ScriptInterfaceHelper):
"""Fix the center of mass of specific types.
Subtracts mass-weighted fraction of the total
force action on all particles of the type from
the particles after each force calculation. This
keeps the center of mass of the type fixed iff
the total momentum of the type is zero.
Parameters
----------
types : array_like
List of types for which the center of mass should be fixed.
"""
_so_name = "ComFixed"
_so_creation_policy = "GLOBAL"
|
mkuron/espresso
|
src/python/espressomd/comfixed.py
|
Python
|
gpl-3.0
| 1,349
|
[
"ESPResSo"
] |
151580e9836fa5ea971211572c68d6e8a7681dc50304ac9563e7856b4d3bdc6c
|
"""
Single page performance tests for Studio.
"""
from bok_choy.web_app_test import with_cache
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage
from ..tests.helpers import AcceptanceTest
class StudioPagePerformanceTest(AcceptanceTest):
"""
Base class to capture studio performance with HTTP Archives.
To import courses for the bok choy tests, pass the --imports_dir=<course directory> argument to the paver command
where <course directory> contains the (un-archived) courses to be imported.
"""
course_org = 'edX'
course_num = 'Open_DemoX'
course_run = 'edx_demo_course'
har_mode = 'explicit'
def setUp(self):
"""
Authenticate as staff so we can view and edit courses.
"""
super(StudioPagePerformanceTest, self).setUp()
AutoAuthPage(self.browser, staff=True).visit()
def record_visit_outline(self):
"""
Produce a HAR for loading the course outline page.
"""
course_outline_page = CourseOutlinePage(self.browser, self.course_org, self.course_num, self.course_run)
har_name = 'OutlinePage_{org}_{course}'.format(
org=self.course_org,
course=self.course_num
)
self.har_capturer.add_page(self.browser, har_name)
course_outline_page.visit()
self.har_capturer.save_har(self.browser, har_name)
def record_visit_unit(self, section_title, subsection_title, unit_title):
"""
Produce a HAR for loading a unit page.
"""
course_outline_page = CourseOutlinePage(self.browser, self.course_org, self.course_num, self.course_run).visit()
course_outline_unit = course_outline_page.section(section_title).subsection(subsection_title).expand_subsection().unit(unit_title)
har_name = 'UnitPage_{org}_{course}'.format(
org=self.course_org,
course=self.course_num
)
self.har_capturer.add_page(self.browser, har_name)
course_outline_unit.go_to()
self.har_capturer.save_har(self.browser, har_name)
class StudioJusticePerformanceTest(StudioPagePerformanceTest):
"""
Test performance on the HarvardX Justice course.
"""
course_org = 'HarvardX'
course_num = 'ER22x'
course_run = '2013_Spring'
@with_cache
def test_visit_outline(self):
"""Record visiting the Justice course outline page"""
self.record_visit_outline()
@with_cache
def test_visit_unit(self):
"""Record visiting a Justice unit page"""
self.record_visit_unit(
'Lecture 1 - Doing the Right Thing',
'Discussion Prompt: Ethics of Torture',
'Discussion Prompt: Ethics of Torture'
)
class StudioPub101PerformanceTest(StudioPagePerformanceTest):
"""
Test performance on Andy's PUB101 outline page.
"""
course_org = 'AndyA'
course_num = 'PUB101'
course_run = 'PUB101'
@with_cache
def test_visit_outline(self):
"""Record visiting the PUB101 course outline page"""
self.record_visit_outline()
@with_cache
def test_visit_unit(self):
"""Record visiting the PUB101 unit page"""
self.record_visit_unit('Released', 'Released', 'Released')
|
ahmedaljazzar/edx-platform
|
common/test/acceptance/performance/test_studio_performance.py
|
Python
|
agpl-3.0
| 3,348
|
[
"VisIt"
] |
8cbcfffe21f6a998f120428c9fc29064b96392235370b112a08bfef1f5a5433c
|
"""
==========================
DKI MultiTensor Simulation
==========================
In this example we show how to simulate the diffusion kurtosis imaging (DKI)
data of a single voxel. DKI captures information about the non-Gaussian
properties of water diffusion which is a consequence of the existence of tissue
barriers and compartments. In these simulations compartmental heterogeneity is
taken into account by modeling different compartments for the intra- and
extra-cellular media of two populations of fibers. These simulations are
performed according to [RNH2015]_.
We first import all relevant modules.
"""
import numpy as np
import matplotlib.pyplot as plt
from dipy.sims.voxel import (multi_tensor_dki, single_tensor)
from dipy.data import get_data
from dipy.io.gradients import read_bvals_bvecs
from dipy.core.gradients import gradient_table
from dipy.reconst.dti import (decompose_tensor, from_lower_triangular)
"""
For the simulation we will need a GradientTable with the b-values and
b-vectors. Here we use the GradientTable of the sample Dipy dataset
'small_64D'.
"""
fimg, fbvals, fbvecs = get_data('small_64D')
bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
"""
DKI requires data from more than one non-zero b-value. Since the dataset
'small_64D' was acquired with one non-zero bvalue we artificialy produce a
second non-zero b-value.
"""
bvals = np.concatenate((bvals, bvals * 2), axis=0)
bvecs = np.concatenate((bvecs, bvecs), axis=0)
"""
The b-values and gradient directions are then converted to Dipy's
GradientTable format.
"""
gtab = gradient_table(bvals, bvecs)
"""
In ``mevals`` we save the eigenvalues of each tensor. To simulate crossing
fibers with two different media (representing intra and extra-cellular media),
a total of four components have to be taken in to account (i.e. the first two
compartments correspond to the intra and extra cellular media for the first
fiber population while the others correspond to the media of the second fiber
population)
"""
mevals = np.array([[0.00099, 0, 0],
[0.00226, 0.00087, 0.00087],
[0.00099, 0, 0],
[0.00226, 0.00087, 0.00087]])
"""
In ``angles`` we save in polar coordinates (:math:`\theta, \phi`) the principal
axis of each compartment tensor. To simulate crossing fibers at 70 degrees
the compartments of the first fiber are aligned to the x-axis while the
compartments of the second fiber are aligned to the x-z plane with an angular
deviation of 70 degrees from the first one.
"""
angles = [(90, 0), (90, 0), (20, 0), (20, 0)]
"""
In ``fractions`` we save the percentage of the contribution of each
compartment, which is computed by multiplying the percentage of contribution
of each fiber population and the water fraction of each different medium
"""
fie = 0.49 # intra axonal water fraction
fractions = [fie*50, (1 - fie)*50, fie*50, (1 - fie)*50]
"""
Having defined the parameters for all tissue compartments, the elements of the
diffusion tensor (dt), the elements of the kurtosis tensor (kt) and the DW
signals simulated from the DKI model can be obtain using the function
``multi_tensor_dki``.
"""
signal_dki, dt, kt = multi_tensor_dki(gtab, mevals, S0=200, angles=angles,
fractions=fractions, snr=None)
"""
We can also add rician noise with a specific SNR.
"""
signal_noisy, dt, kt = multi_tensor_dki(gtab, mevals, S0=200,
angles=angles, fractions=fractions,
snr=10)
"""
For comparison purposes, we also compute the DW signal if only the diffusion
tensor components are taken into account. For this we use Dipy's function
single_tensor which requires that dt is decomposed into its eigenvalues and
eigenvectors.
"""
dt_evals, dt_evecs = decompose_tensor(from_lower_triangular(dt))
signal_dti = single_tensor(gtab, S0=200, evals=dt_evals, evecs=dt_evecs,
snr=None)
"""
Finally, we can visualize the values of the different version of simulated
signals for all assumed gradient directions and bvalues.
"""
plt.plot(signal_dti, label='noiseless dti')
plt.plot(signal_dki, label='noiseless dki')
plt.plot(signal_noisy, label='with noise')
plt.legend()
plt.show()
plt.savefig('simulated_dki_signal.png')
"""
.. figure:: simulated_dki_signal.png
:align: center
**Simulated signals obtain from the DTI and DKI models**.
Non-Gaussian diffusion properties in tissues are responsible to smaller signal
attenuations for larger bvalues when compared to signal attenuations from free
gaussian water diffusion. This feature can be shown from the figure above,
since signals simulated from the DKI models reveals larger DW signal
intensities than the signals obtained only from the diffusion tensor
components.
References:
[RNH2015] R. Neto Henriques et al., "Exploring the 3D geometry of the diffusion
kurtosis tensor - Impact on the development of robust tractography
procedures and novel biomarkers", NeuroImage (2015) 111, 85-99.
"""
|
sinkpoint/dipy
|
doc/examples/simulate_dki.py
|
Python
|
bsd-3-clause
| 5,070
|
[
"Gaussian"
] |
2bb1bd50b22a9287ef34bfbad13e7ae665238513c5d561cc42e8712275667147
|
"""
SCATTER_ADVANCED.PY
Support material for the blog post "Working with Spyder", on Programando
Ciência.
* Author: Alexandre 'Jaguar' Fioravante de Siqueira
* Contact: http://www.programandociencia.com/about/
* Support material:
http://www.github.com/alexandrejaguar/programandociencia
* In order to cite this material, please use the reference below
(this is a Chicago-like style):
de Siqueira, Alexandre Fioravante. "Complex scatter plots on Python
[Part I] - Obtaining data and creating a preliminary plot". Programando
Ciência. 2016, May 08. Available at
http://www.programandociencia.com/2016/05/08/complex-scatter-plots-on-python-part-i-obtaining-data-and-creating-a-preliminary-plot/.
Access date: <ACCESS DATE>.
Copyright (C) Alexandre Fioravante de Siqueira
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option)
any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# importing necessary packages.
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import pandas as pd
# reading data_ibge.xls
data_brazil = pd.read_excel('data_ibge.xls', sheetname=2)
# color palette 5-class Dark2, from ColorBrewer2: http://colorbrewer2.org/
colors = ['#1b9e77',
'#d95f02',
'#7570b3',
'#e7298a',
'#66a61e']
# attribute_color() points to the color correspondent to each region.
def attribute_color(region):
colors = {
'North': '#1b9e77',
'Northeast': '#d95f02',
'Southeast': '#7570b3',
'South': '#e7298a',
'Central-West': '#66a61e'
}
return colors.get(region, 'black')
# creating the color vector.
color_region = list()
qty_states = len(data_brazil['Region'])
for state in range(qty_states):
color_region.append(attribute_color(data_brazil['Region'][state]))
# generating the plot.
plt.scatter(x=data_brazil['LifeExpec'],
y=data_brazil['GDPperCapita'],
s=data_brazil['PopX1000'],
c=color_region,
alpha=0.6)
plt.title('Brazilian development in 2013, according to each state',
fontsize=22)
plt.xlabel('Life expectancy (years)', fontsize=22)
plt.ylabel('GDP per capita (R$)', fontsize=22)
plt.grid(True)
# inserting state abbreviation into each circle.
for state in range(len(data_brazil['UF'])):
plt.text(x=data_brazil['LifeExpec'][state],
y=data_brazil['GDPperCapita'][state],
s=data_brazil['UF'][state],
fontsize=16)
# inserting legend; the "normal" legend does not work, so we adapt a
# 2D object with the colors we defined previously.
regions = ['North',
'Northeast',
'Southeast',
'South',
'Central-West']
# legend 1
legend1_line2d = list()
for step in range(len(colors)):
legend1_line2d.append(mlines.Line2D([0], [0],
linestyle='none',
marker='o',
alpha=0.6,
markersize=15,
markerfacecolor=colors[step]))
legend1 = plt.legend(legend1_line2d,
regions,
numpoints=1,
fontsize=22,
loc='best',
shadow=True)
# legend 2
legend2_line2d = list()
legend2_line2d.append(mlines.Line2D([0], [0],
linestyle='none',
marker='o',
alpha=0.6,
markersize=np.sqrt(100),
markerfacecolor='#D3D3D3'))
legend2_line2d.append(mlines.Line2D([0], [0],
linestyle='none',
marker='o',
alpha=0.6,
markersize=np.sqrt(1000),
markerfacecolor='#D3D3D3'))
legend2_line2d.append(mlines.Line2D([0], [0],
linestyle='none',
marker='o',
alpha=0.6,
markersize=np.sqrt(10000),
markerfacecolor='#D3D3D3'))
legend2 = plt.legend(legend2_line2d,
['1', '10', '100'],
title='Population (in 100,000)',
numpoints=1,
fontsize=20,
loc='upper left',
frameon=False, # no edges
labelspacing=3, # increase space between labels
handlelength=5, # increase space between objects
borderpad=4) # increase the margins of the legend
plt.gca().add_artist(legend1)
plt.setp(legend2.get_title(), fontsize=22) # increasing the legend font
plt.show()
|
alexandrejaguar/programandociencia
|
2016/0508-scatteradv/scatter_advanced.py
|
Python
|
gpl-2.0
| 5,408
|
[
"Jaguar"
] |
e543e891d47f0d21c98b3795175eeb56b02f39f613eac5dd1c88cdb8f9fb3b93
|
# -*- coding: utf-8 -*-
#
# documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 2 11:37:36 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os.path
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
# 'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'HIV-95-vaccine'
author = ', '.join(('Jan Medlock', 'Abhishek Pandey',
'Alyssa S. Parpia', 'Amber Tang',
'Laura A. Skrip', 'Alison P. Galvani'))
years = '2015–2017'
copyright = '{}: {}'.format(years, author)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = u'1'
import subprocess
import shlex
cmd = shlex.split('git describe --tags --always')
version = subprocess.check_output(cmd).decode().strip()
# The full version, including alpha/beta/rc tags.
# release = u'1'
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = '{}_doc'.format(project)
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '{}.tex'.format(project), '{} Documentation'.format(project),
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, project, '{} Documentation'.format(project),
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project, '{} Documentation'.format(project),
author, project, 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'ipython': ('https://ipython.org/', None),
'matplotlib': ('http://matplotlib.org/', None),
'seaborn': # ('https://seaborn.pydata.org/', None),
('https://github.com/seaborn/seaborn.github.io/raw/master/',
None),
'joblib': ('https://pythonhosted.org/joblib/', None),
'cartopy': ('http://scitools.org.uk/cartopy/docs/latest/', None),
'geopy': ('https://geopy.readthedocs.io/en/latest/', None),
}
autodoc_default_flags = ['members', 'undoc-members']
|
janmedlock/HIV-95-vaccine
|
docs/conf.py
|
Python
|
agpl-3.0
| 10,557
|
[
"Amber"
] |
406dad324cc57471ad3a6bf120cd1099092b8ad6ee7c8353ebb3a3ec865ef673
|
# -*- coding: utf-8 -*-
from Visitor.CNodeVisitor import *
from CNode import *
from helper.id import identity
from helper.timeconv import *
from helper.icalconv import *
from helper.recurrence import *
from hashlib import sha1
import datetime
class ToLowerCaseVisitor(CNodeVisitor):
def visit_any(self, o):
o.name = o.name.lower()
new_attr = {}
for k,v in o.attr.iteritems():
new_attr[k.lower()] = v
o.attr = new_attr
[self.visit(c) for c in o.children]
class ICS2ErebusVisitor(CNodeVisitor):
def __init__(self,cnode):
self.calendar = CNode(name='calendar')
self.timezones = CNode(name='timezones')
self.timezone_ids = {}
self.events = CNode(name='events')
self.calendar.add_child(self.timezones)
self.calendar.add_child(self.events)
ToLowerCaseVisitor().visit(cnode)
self.ics = cnode
def run(self):
timezones = self.accept(self.ics, 'vtimezone')
events = self.accept(self.ics, 'vevent')
for e in timezones:
self.timezones.add_child(e)
for e in events:
self.events.add_child(e)
return self.calendar
def __convert_timezone(self,ics):
if ics.name == 'standard':
tz_e = CNode(name='Standard')
elif ics.name == 'daylight':
tz_e = CNode(name='Daylight')
else:
raise ValueError("Unknown timezone type: %s", ics.name)
offset = utcoffset2vDDD(ics.attr['tzoffsetto'], negate=True)
offset_e = CNode(name='Offset',content=offset)
tz_e.add_child(offset_e)
rrule = ics.attr['rrule']
start = ics.attr['dtstart']
if rrule:
rec = rrule2recurrence(rrule, start)
tz_e.add_child(rec.children[0])
time = start.dt
timestr = "%.2d:%.2d:%.2d" %(time.hour, time.minute, time.second)
time_e = CNode(name='Time',content=timestr)
tz_e.add_child(time_e)
return tz_e
def visit_vtimezone(self,ics):
tz = CNode(name='TimeZone')
baseoffset_e = CNode(name='BaseOffset',content='PT0M')
tz.add_child(baseoffset_e)
if len(ics.children) == 1:
# Just add a base offset
std_e = ics.children[0]
# TODO: fix this
else:
tz_s = self.__convert_timezone(ics.search('standard'))
tz.add_child(tz_s)
tz_d = self.__convert_timezone(ics.search('daylight'))
tz.add_child(tz_d)
tzid = gen_tz_id(tz)
tz.attr['TimeZoneName'] = tzid
tzid_e = CNode(name='tzid',content=tzid)
tz.add_child(tzid_e)
self.timezone_ids[ics.attr['tzid']] = tzid
return tz
def visit_vevent(self,ics):
event = CNode(name='event')
def conv(icaln, ebus, f):
if not ics.attr.has_key(icaln): return
ics_e = ics.attr[icaln]
if not ics_e: return
new = f(ics_e)
if not new: return
event.attr[ebus] = new
conv('uid', 'ical_uid', identity)
conv('summary', 'summary', identity)
conv('dtstart', 'start', vDDD2dt)
conv('dtend', 'end', vDDD2dt)
conv('class', 'class', identity)
conv('location', 'location', identity)
conv('dtstamp', 'timestamp', vDDD2dt)
conv('description', 'description', identity)
if ics.attr.has_key('rrule'):
rec = rrule2recurrence(ics.attr['rrule'], event.attr['start'])
if rec:
event.add_child(rec)
rec_range = rrule2range(ics.attr['rrule'], event.attr['start'])
rec.add_child(rec_range)
if type(ics.attr['dtstart']) != datetime.date and \
ics.attr['dtstart'].params.has_key('tzid'):
i_tzid = ics.attr['dtstart'].params['tzid']
tz = self.timezone_ids[i_tzid]
tz_e = CNode(name='tzid',content=tz)
event.add_child(tz_e)
return event
|
orbekk/erebus
|
Visitor/ICS2ErebusVisitor.py
|
Python
|
gpl-2.0
| 4,075
|
[
"VisIt"
] |
b7866f1392b32989c35d1094bf6fd616cbef787330b378a55804f4eea1571314
|
"""
Functions for automatic selection optimisation.
"""
import warnings
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import bayes_mvs
from scipy.stats.kde import gaussian_kde
from latools.helpers import Bunch, _warning
from latools.helpers.signal import rolling_window, bool_2_indices
from latools.helpers.stat_fns import nominal_values
from latools.helpers.plot import trace_plot
warnings.showwarning = _warning
def calc_windows(fn, s, min_points):
"""
Apply fn to all contiguous regions in s that have at least min_points.
"""
max_points = np.sum(~np.isnan(s))
n_points = max_points - min_points
out = np.full((n_points, s.size), np.nan)
# skip nans, for speed
ind = ~np.isnan(s)
s = s[ind]
for i, w in enumerate(range(min_points, s.size)):
r = rolling_window(s, w, pad=np.nan)
out[i, ind] = np.apply_along_axis(fn, 1, r)
return out
def calc_window_mean_std(s, min_points, ind=None):
"""
Apply fn to all contiguous regions in s that have at least min_points.
"""
max_points = np.sum(~np.isnan(s))
n_points = max_points - min_points
mean = np.full((n_points, s.size), np.nan)
std = np.full((n_points, s.size), np.nan)
# skip nans, for speed
if ind is None:
ind = ~np.isnan(s)
else:
ind = ind & ~np.isnan(s)
s = s[ind]
for i, w in enumerate(range(min_points, s.size)):
r = rolling_window(s, w, pad=np.nan)
mean[i, ind] = r.sum(1) / w
std[i, ind] = (((r - mean[i, ind][:, np.newaxis])**2).sum(1) / (w - 1))**0.5
# mean[i, ind] = np.apply_along_axis(np.nanmean, 1, r)
# std[i, ind] = np.apply_along_axis(np.nanstd, 1, r)
return mean, std
def scale(s):
"""
Remove the mean, and divide by the standard deviation.
"""
return (s - np.nanmean(s)) / np.nanstd(s)
def bayes_scale(s):
"""
Remove mean and divide by standard deviation, using bayes_kvm statistics.
"""
if sum(~np.isnan(s)) > 1:
bm, bv, bs = bayes_mvs(s[~np.isnan(s)])
return (s - bm.statistic) / bs.statistic
else:
return np.full(s.shape, np.nan)
def median_scaler(s):
"""
Remove median, divide by IQR.
"""
if sum(~np.isnan(s)) > 2:
ss = s[~np.isnan(s)]
median = np.median(ss)
IQR = np.diff(np.percentile(ss, [25, 75]))
return (s - median) / IQR
else:
return np.full(s.shape, np.nan)
# scaler = bayes_scale
scaler = median_scaler
def calculate_optimisation_stats(d, analytes, min_points, weights, ind, x_bias=0):
# calculate statistics
stds = []
means = []
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for a in analytes:
m, s = calc_window_mean_std(nominal_values(d.focus[a]), min_points, ind)
means.append(m)
stds.append(s)
# compile stats
stds = np.array(stds)
means = np.array(means)
# calculate rsd
sstds = stds / abs(means)
# scale means for each analyte
smeans = np.apply_along_axis(scaler, 2, means)
# sstds = np.apply_along_axis(scaler, 2, stds)
# apply weights
if weights is not None:
sstds *= np.reshape(weights, (len(analytes), 1, 1))
smeans *= np.reshape(weights, (len(analytes), 1, 1))
# average of all means and standard deviations
msstds = sstds.mean(0)
msmeans = smeans.mean(0)
# aply bias
if x_bias > 0:
nonan = ~np.isnan(smeans[0,0])
fill = np.full(smeans[0,0].shape, np.nan)
fill[nonan] = np.linspace(1 - x_bias, 1 + x_bias, sum(nonan))
bias = np.full(smeans[0].shape, fill)
msmeans *= bias
msstds *= bias
return msmeans, msstds
def signal_optimiser(d, analytes, min_points=5,
threshold_mode='kde_first_max',
threshold_mult=1., x_bias=0,
weights=None, ind=None, mode='minimise'):
"""
Optimise data selection based on specified analytes.
Identifies the longest possible contiguous data region in
the signal where the relative standard deviation (std) and
concentration of all analytes is minimised.
Optimisation is performed via a grid search of all possible
contiguous data regions. For each region, the mean std and
mean scaled analyte concentration ('amplitude') are calculated.
The size and position of the optimal data region are identified
using threshold std and amplitude values. Thresholds are derived
from all calculated stds and amplitudes using the method specified
by `threshold_mode`. For example, using the 'kde_max' method, a
probability density function (PDF) is calculated for std and
amplitude values, and the threshold is set as the maximum of the
PDF. These thresholds are then used to identify the size and position
of the longest contiguous region where the std is below the threshold,
and the amplitude is either below the threshold.
All possible regions of the data that have at least
`min_points` are considered.
For a graphical demonstration of the action of signal_optimiser,
use `optimisation_plot`.
Parameters
----------
d : latools.D object
An latools data object.
analytes : str or array-like
Which analytes to consider.
min_points : int
The minimum number of contiguous points to
consider.
threshold_mode : str
The method used to calculate the optimisation
thresholds. Can be 'mean', 'median', 'kde_max'
or 'bayes_mvs', or a custom function. If a
function, must take a 1D array, and return a
single, real number.
threshood_mult : float or tuple
A multiplier applied to the calculated threshold
before use. If a tuple, the first value is applied
to the mean threshold, and the second is applied to
the standard deviation threshold. Reduce this to make
data selection more stringent.
x_bias : float
If non-zero, a bias is applied to the calculated statistics
to prefer the beginning (if > 0) or end (if < 0) of the
signal. Should be between zero and 1.
weights : array-like of length len(analytes)
An array of numbers specifying the importance of
each analyte considered. Larger number makes the
analyte have a greater effect on the optimisation.
Default is None.
ind : boolean array
A boolean array the same length as the data. Where
false, data will not be included.
mode : str
Whether to 'minimise' or 'maximise' the concentration
of the elements.
Returns
-------
dict, str : optimisation result, error message
"""
errmsg = ''
if isinstance(analytes, str):
analytes = [analytes]
if ind is None:
ind = np.full(len(d.Time), True)
# initial catch
if not any(ind) or (np.diff(bool_2_indices(ind)).max() < min_points):
errmsg = 'Optmisation failed. No contiguous data regions longer than {:.0f} points.'.format(min_points)
return Bunch({'means': np.nan,
'stds': np.nan,
'mean_threshold': np.nan,
'std_threshold': np.nan,
'lims': np.nan,
'filt': ind,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': np.nan,
'opt_n_points': np.nan,
'weights': weights,
'optimisation_success': False,
'errmsg': errmsg}), errmsg
msmeans, msstds = calculate_optimisation_stats(d, analytes, min_points, weights, ind, x_bias)
# second catch
if all(np.isnan(msmeans).flat) or all(np.isnan(msmeans).flat):
errmsg = 'Optmisation failed. No contiguous data regions longer than {:.0f} points.'.format(min_points)
return Bunch({'means': np.nan,
'stds': np.nan,
'mean_threshold': np.nan,
'std_threshold': np.nan,
'lims': np.nan,
'filt': ind,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': np.nan,
'opt_n_points': np.nan,
'weights': weights,
'optimisation_success': False,
'errmsg': errmsg}), errmsg
# define thresholds
valid = ['kde_first_max', 'kde_max', 'median', 'bayes_mvs', 'mean']
n_under = 0
i = np.argwhere(np.array(valid) == threshold_mode)[0, 0]
o_threshold_mode = threshold_mode
while (n_under <= 0) & (i < len(valid)):
if threshold_mode == 'median':
# median - OK, but best?
std_threshold = np.nanmedian(msstds)
mean_threshold = np.nanmedian(msmeans)
elif threshold_mode == 'mean':
# mean
std_threshold = np.nanmean(msstds)
mean_threshold = np.nanmean(msmeans)
elif threshold_mode == 'kde_max':
# maximum of gaussian kernel density estimator
mkd = gaussian_kde(msmeans[~np.isnan(msmeans)].flat)
xm = np.linspace(*np.percentile(msmeans.flatten()[~np.isnan(msmeans.flatten())], (1, 99)), 100)
mdf = mkd.pdf(xm)
mean_threshold = xm[np.argmax(mdf)]
rkd = gaussian_kde(msstds[~np.isnan(msstds)])
xr = np.linspace(*np.percentile(msstds.flatten()[~np.isnan(msstds.flatten())], (1, 99)), 100)
rdf = rkd.pdf(xr)
std_threshold = xr[np.argmax(rdf)]
elif threshold_mode == 'kde_first_max':
# first local maximum of gaussian kernel density estimator
mkd = gaussian_kde(msmeans[~np.isnan(msmeans)].flat)
xm = np.linspace(*np.percentile(msmeans.flatten()[~np.isnan(msmeans.flatten())], (1, 99)), 100)
mdf = mkd.pdf(xm)
inds = np.argwhere(np.r_[False, mdf[1:] > mdf[:-1]] &
np.r_[mdf[:-1] > mdf[1:], False] &
(mdf > 0.25 * mdf.max()))
mean_threshold = xm[np.min(inds)]
rkd = gaussian_kde(msstds[~np.isnan(msstds)])
xr = np.linspace(*np.percentile(msstds.flatten()[~np.isnan(msstds.flatten())], (1, 99)), 100)
rdf = rkd.pdf(xr)
inds = np.argwhere(np.r_[False, rdf[1:] > rdf[:-1]] &
np.r_[rdf[:-1] > rdf[1:], False] &
(rdf > 0.25 * rdf.max()))
std_threshold = xr[np.min(inds)]
elif threshold_mode == 'bayes_mvs':
# bayesian mvs.
bm, _, bs = bayes_mvs(msstds[~np.isnan(msstds)])
std_threshold = bm.statistic
bm, _, bs = bayes_mvs(msmeans[~np.isnan(msmeans)])
mean_threshold = bm.statistic
elif callable(threshold_mode):
std_threshold = threshold_mode(msstds[~np.isnan(msstds)].flatten())
mean_threshold = threshold_mode(msmeans[~np.isnan(msmeans)].flatten())
else:
try:
mean_threshold, std_threshold = threshold_mode
except:
raise ValueError('\nthreshold_mode must be one of:\n ' + ', '.join(valid) + ',\na custom function, or a \n(mean_threshold, std_threshold) tuple.')
# apply threshold_mult
if isinstance(threshold_mult, (int, float)):
std_threshold *= threshold_mult
mean_threshold *= threshold_mult
elif len(threshold_mult) == 2:
mean_threshold *= threshold_mult[0]
std_threshold *= threshold_mult[1]
else:
raise ValueError('\nthreshold_mult must be a float, int or tuple of length 2.')
rind = (msstds < std_threshold)
if mode == 'minimise':
mind = (msmeans < mean_threshold)
else:
mind = (msmeans > mean_threshold)
ind = rind & mind
n_under = ind.sum()
if n_under == 0:
i += 1
if i <= len(valid) - 1:
threshold_mode = valid[i]
else:
errmsg = 'Optimisation failed. No of the threshold_mode would work. Try reducting min_points.'
return Bunch({'means': np.nan,
'stds': np.nan,
'mean_threshold': np.nan,
'std_threshold': np.nan,
'lims': np.nan,
'filt': ind,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': np.nan,
'opt_n_points': np.nan,
'weights': weights,
'optimisation_success': False,
'errmsg': errmsg}), errmsg
if i > 0:
errmsg = "optimisation failed using threshold_mode='{:}', falling back to '{:}'".format(o_threshold_mode, threshold_mode)
# identify max number of points within thresholds
passing = np.argwhere(ind)
opt_n_points = passing[:, 0].max()
opt_centre = passing[passing[:, 0] == opt_n_points, 1].min()
opt_n_points += min_points
# centres, npoints = np.meshgrid(np.arange(msmeans.shape[1]),
# np.arange(min_points, min_points + msmeans.shape[0]))
# opt_n_points = npoints[ind].max()
# plus/minus one point to allow some freedom to shift selection window.
# cind = ind & (npoints == opt_n_points)
# opt_centre = centres[cind].min()
if opt_n_points % 2 == 0:
lims = (opt_centre - opt_n_points // 2,
opt_centre + opt_n_points // 2)
else:
lims = (opt_centre - opt_n_points // 2,
opt_centre + opt_n_points // 2 + 1)
filt = np.zeros(d.Time.shape, dtype=bool)
filt[lims[0]:lims[1]] = True
return Bunch({'means': msmeans,
'stds': msstds,
'mean_threshold': mean_threshold,
'std_threshold': std_threshold,
'lims': lims,
'filt': filt,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': opt_centre,
'opt_n_points': opt_n_points,
'weights': weights,
'optimisation_success': True,
'errmsg': errmsg}), errmsg
def optimisation_plot(d, overlay_alpha=0.5, **kwargs):
"""
Plot the result of signal_optimise.
`signal_optimiser` must be run first, and the output
stored in the `opt` attribute of the latools.D object.
Parameters
----------
d : latools.D object
A latools data object.
overlay_alpha : float
The opacity of the threshold overlays. Between 0 and 1.
**kwargs
Passed to `tplot`
"""
if not hasattr(d, 'opt'):
raise ValueError('Please run `signal_optimiser` before trying to plot its results.')
out = []
for n, opt in d.opt.items():
if not opt['optimisation_success']:
out.append((None, None))
else:
# unpack variables
means = opt['means']
stds = opt['stds']
min_points = opt['min_points']
mean_threshold = opt['mean_threshold']
std_threshold = opt['std_threshold']
opt_centre = opt['opt_centre']
opt_n_points = opt['opt_n_points']
centres, npoints = np.meshgrid(np.arange(means.shape[1]), np.arange(min_points, min_points + means.shape[0]))
rind = (stds < std_threshold)
mind = (means < mean_threshold)
# color scale and histogram limits
mlim = np.percentile(means.flatten()[~np.isnan(means.flatten())], (0, 99))
rlim = np.percentile(stds.flatten()[~np.isnan(stds.flatten())], (0, 99))
cmr = plt.cm.Blues
cmr.set_bad((0,0,0,0.3))
cmm = plt.cm.Reds
cmm.set_bad((0,0,0,0.3))
# create figure
fig = plt.figure(figsize=[7,7])
ma = fig.add_subplot(3, 2, 1)
ra = fig.add_subplot(3, 2, 2)
# work out image limits
nonan = np.argwhere(~np.isnan(means))
xdif = np.ptp(nonan[:, 1])
ydif = np.ptp(nonan[:, 0])
extent = (nonan[:, 1].min() - np.ceil(0.1 * xdif), # x min
nonan[:, 1].max() + np.ceil(0.1 * xdif), # x max
nonan[:, 0].min() + min_points, # y min
nonan[:, 0].max() + np.ceil(0.1 * ydif) + min_points) # y max
mm = ma.imshow(means, origin='lower', cmap=cmm, vmin=mlim[0], vmax=mlim[1],
extent=(centres.min(), centres.max(), npoints.min(), npoints.max()))
ma.set_ylabel('N points')
ma.set_xlabel('Center')
fig.colorbar(mm, ax=ma, label='Amplitude')
mr = ra.imshow(stds, origin='lower', cmap=cmr, vmin=rlim[0], vmax=rlim[1],
extent=(centres.min(), centres.max(), npoints.min(), npoints.max()))
ra.set_xlabel('Center')
fig.colorbar(mr, ax=ra, label='std')
# view limits
ra.imshow(~rind, origin='lower', cmap=plt.cm.Greys, alpha=overlay_alpha,
extent=(centres.min(), centres.max(), npoints.min(), npoints.max()))
ma.imshow(~mind, origin='lower', cmap=plt.cm.Greys, alpha=overlay_alpha,
extent=(centres.min(), centres.max(), npoints.min(), npoints.max()))
for ax in [ma, ra]:
ax.scatter(opt_centre, opt_n_points, color=(1,1,1,0.7), edgecolor='k',marker='o')
ax.set_xlim(extent[:2])
ax.set_ylim(extent[-2:])
# draw histograms
mah = fig.add_subplot(3, 2, 3)
rah = fig.add_subplot(3, 2, 4)
mah.set_xlim(mlim)
mbin = np.linspace(*mah.get_xlim(), 50)
mah.hist(means.flatten()[~np.isnan(means.flatten())], mbin)
mah.axvspan(mean_threshold, mah.get_xlim()[1], color=(0,0,0,overlay_alpha))
mah.axvline(mean_threshold, c='r')
mah.set_xlabel('Scaled Mean Analyte Conc')
mah.set_ylabel('N')
rah.set_xlim(rlim)
rbin = np.linspace(*rah.get_xlim(), 50)
rah.hist(stds.flatten()[~np.isnan(stds.flatten())], rbin)
rah.axvspan(std_threshold, rah.get_xlim()[1], color=(0,0,0,0.4))
rah.axvline(std_threshold, c='r')
rah.set_xlabel('std')
tax = fig.add_subplot(3,1,3)
trace_plot(d, opt.analytes, ax=tax, **kwargs)
tax.axvspan(*d.Time[[opt.lims[0], opt.lims[1]]], alpha=0.2)
tax.set_xlim(d.Time[d.ns == n].min() - 3, d.Time[d.ns == n].max() + 3)
fig.tight_layout()
out.append((fig, (ma, ra, mah, rah, tax)))
return out
|
oscarbranson/latools
|
latools/filtering/signal_optimiser.py
|
Python
|
mit
| 19,522
|
[
"Gaussian"
] |
782b64a273afebcc1951c949aa6a9bf915c8f7de892f18c12e6c7d8c6209325c
|
"""sim_utils.py:
Helper function related with simulation.
Last modified: Sat Jan 18, 2014 05:01PM
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, NCBS Bangalore"
__credits__ = ["NCBS Bangalore", "Bhalla Lab"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
from . import _moose
from . import print_utils
from . import verification_utils
def recordTarget(tablePath, target, field = 'vm', **kwargs):
"""Setup a table to record at given path.
Make sure that all root paths in tablePath exists.
Returns a table.
"""
# If target is not an moose object but a string representing intended path
# then we need to fetch the object first.
if type( target) == str:
if not _moose.exists(target):
msg = "Given target `{}` does not exists. ".format( target )
raise RuntimeError( msg )
else:
target = _moose.Neutral( target )
assert target.path, "Target must have a valid moose path."
table = _moose.Table( tablePath )
assert table
# Sanities field.
if field == "output":
pass
elif 'get' not in field:
field = 'get'+field[0].upper()+field[1:]
else:
field = field[:2]+field[3].upper()+field[4:]
try:
print_utils.dump("TABLE"
, "Connecting table {} to target {} field {}".format(
table.path
, target.path
, field
)
)
table.connect( 'requestOut', target, field )
except Exception as e:
debug.dump("ERROR"
, [ "Failed to connect table to target"
, e
]
)
raise e
assert table, "Moose is not able to create a recording table"
return table
def run(simTime, verify=False):
if verify:
verification_utils.verify()
_moose.start(simTime)
|
subhacom/moose-core
|
python/moose/sim_utils.py
|
Python
|
gpl-3.0
| 2,098
|
[
"MOOSE"
] |
5de8e1ea30ee8f93f5cf8657db274786a6f4680a8aeb93f9d56c673dff3b83f5
|
#!/usr/bin/env python
# Author: Andrew Jewett (jewett.aij at g mail)
# http://www.moltemplate.org
# http://www.chem.ucsb.edu/~sheagroup
# License: MIT License (See LICENSE.md)
# Copyright (c) 2013, Regents of the University of California
# All rights reserved.
"""
lttree.py
lttree.py is an extension of the generic ttree.py program.
This version can understand and manipulate ttree-style templates which
are specialized for storing molecule-specific data for use in LAMMPS.
The main difference between lttree.py and ttree.py is:
Unlike ttree.py, lttree.py understands rigid-body movement commands like
"rot()" and "move()" which allows it to reorient and move each copy
of a molecule to a new location. (ttree.py just ignores these commands.
Consequently LAMMPS input file (fragments) created with ttree.py have
invalid (overlapping) atomic coordinates and must be modified or aguemted
later (by loading atomic coordinates from a PDB file or an XYZ file).
lttree.py understands the "Data Atoms" section of a LAMMPS
data file (in addition to the various "atom_styles" which effect it).
Additional LAMMPS-specific features may be added in the future.
"""
g_program_name = __file__.split('/')[-1] # ='lttree.py'
g_date_str = '2020-3-10'
g_version_str = '0.80.2'
import sys
from collections import defaultdict
import pkg_resources
try:
from .ttree import BasicUISettings, BasicUIParseArgs, EraseTemplateFiles, \
StackableCommand, PopCommand, PopRightCommand, PopLeftCommand, \
PushCommand, PushLeftCommand, PushRightCommand, ScopeCommand, \
WriteVarBindingsFile, StaticObj, InstanceObj, \
BasicUI, ScopeBegin, ScopeEnd, WriteFileCommand, Render
from .ttree_lex import InputError, TextBlock, DeleteLinesWithBadVars, \
TemplateLexer, TableFromTemplate, VarRef, TextBlock, ErrorLeader
from .lttree_styles import AtomStyle2ColNames, ColNames2AidAtypeMolid, \
ColNames2Coords, ColNames2Vects, \
data_atoms, data_prefix, data_masses, \
data_velocities, data_ellipsoids, data_triangles, data_lines, \
data_pair_coeffs, data_bond_coeffs, data_angle_coeffs, \
data_dihedral_coeffs, data_improper_coeffs, data_bondbond_coeffs, \
data_bondangle_coeffs, data_middlebondtorsion_coeffs, \
data_endbondtorsion_coeffs, data_angletorsion_coeffs, \
data_angleangletorsion_coeffs, data_bondbond13_coeffs, \
data_angleangle_coeffs, data_bonds_by_type, data_angles_by_type, \
data_dihedrals_by_type, data_impropers_by_type, \
data_bonds, data_bond_list, data_angles, data_dihedrals, data_impropers, \
data_boundary, data_pbc, data_prefix_no_space, in_init, in_settings, \
in_prefix
from .ttree_matrix_stack import AffineTransform, MultiAffineStack, \
LinTransform, Matrix2Quaternion, MultQuat
except (ImportError, SystemError, ValueError):
# not installed as a package
from ttree import *
from ttree_lex import *
from lttree_styles import *
from ttree_matrix_stack import *
try:
unicode
except NameError:
# Python 3
basestring = unicode = str
class LttreeSettings(BasicUISettings):
def __init__(self,
user_bindings_x=None,
user_bindings=None,
order_method='by_command'):
BasicUISettings.__init__(self,
user_bindings_x,
user_bindings,
order_method)
# The following new member data indicate which columns store
# LAMMPS-specific information.
# The next 6 members store keep track of the different columns
# of the "Data Atoms" section of a LAMMPS data file:
self.column_names = [] # <--A list of column names (optional)
self.ii_coords = [] # <--A list of triplets of column indexes storing coordinate data
self.ii_vects = [] # <--A list of triplets of column indexes storing directional data
# (such as dipole or ellipsoid orientations)
self.i_atomid = None # <--An integer indicating which column has the atomid
self.i_atomtype = None # <--An integer indicating which column has the atomtype
self.i_molid = None # <--An integer indicating which column has the molid, if applicable
self.print_full_atom_type_name_in_masses = False # <--how to print atom type names in the "Masses" section of a DATA file?
def LttreeParseArgs(argv, settings, main=False, show_warnings=True):
# By default, include force_fields provided with the package
argv.extend(["-import-path",
pkg_resources.resource_filename(__name__, 'force_fields/')])
BasicUIParseArgs(argv, settings)
# Loop over the remaining arguments not processed yet.
# These arguments are specific to the lttree.py program
# and are not understood by ttree.py:
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if ((argv[i].lower() == '-atomstyle') or
(argv[i].lower() == '-atom-style') or
(argv[i].lower() == '-atom_style')):
if i + 1 >= len(argv):
raise InputError('Error(' + g_program_name + '): The ' + argv[i] + ' flag should be followed by a LAMMPS\n'
' atom_style name (or single quoted string containing a space-separated\n'
' list of column names such as: atom-ID atom-type q x y z molecule-ID.)\n')
settings.column_names = AtomStyle2ColNames(argv[i + 1])
sys.stderr.write('\n \"' + data_atoms + '\" column format:\n')
sys.stderr.write(
' ' + (' '.join(settings.column_names)) + '\n\n')
settings.ii_coords = ColNames2Coords(settings.column_names)
settings.ii_vects = ColNames2Vects(settings.column_names)
settings.i_atomid, settings.i_atomtype, settings.i_molid = ColNames2AidAtypeMolid(
settings.column_names)
del(argv[i:i + 2])
elif (argv[i].lower() == '-icoord'):
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by list of integers\n'
' corresponding to column numbers for coordinates in\n'
' the \"' + data_atoms + '\" section of a LAMMPS data file.\n')
ilist = argv[i + 1].split()
if (len(ilist) % 3) != 0:
raise InputError('Error: ' + argv[i] + ' flag should be followed by list of integers.\n'
' This is usually a list of 3 integers, but it can contain more.\n'
' The number of cooridnate columns must be divisible by 3,\n'
' (even if the simulation is in 2 dimensions)\n')
settings.iaffinevects = []
for i in range(0, len(ilist) / 3):
cols = [int(ilist[3 * i]) + 1,
int(ilist[3 * i + 1]) + 1,
int(ilist[3 * i + 2]) + 1]
settings.iaffinevects.append(cols)
del(argv[i:i + 2])
elif (argv[i].lower() == '-ivect'):
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by list of integers\n'
' corresponding to column numbers for direction vectors in\n'
' the \"' + data_atoms + '\" section of a LAMMPS data file.\n')
ilist = argv[i + 1].split()
if (len(ilist) % 3) != 0:
raise InputError('Error: ' + argv[i] + ' flag should be followed by list of integers.\n'
' This is usually a list of 3 integers, but it can contain more.\n'
' The number of cooridnate columns must be divisible by 3,\n'
' (even if the simulation is in 2 dimensions)\n')
settings.ivects = []
for i in range(0, len(ilist) / 3):
cols = [int(ilist[3 * i]) + 1,
int(ilist[3 * i + 1]) + 1,
int(ilist[3 * i + 2]) + 1]
settings.ivects.append(cols)
del(argv[i:i + 2])
elif ((argv[i].lower() == '-iatomid') or
(argv[i].lower() == '-iid') or
(argv[i].lower() == '-iatom-id')):
if ((i + 1 >= len(argv)) or (not str.isdigit(argv[i + 1]))):
raise InputError('Error: ' + argv[i] + ' flag should be followed by an integer\n'
' (>=1) indicating which column in the \"' +
data_atoms + '\" section of a\n'
' LAMMPS data file contains the atom id number (typically 1).\n'
' (This argument is unnecessary if you use the -atomstyle argument.)\n')
i_atomid = int(argv[i + 1]) - 1
del(argv[i:i + 2])
elif ((argv[i].lower() == '-iatomtype') or
(argv[i].lower() == '-itype') or
(argv[i].lower() == '-iatom-type')):
if ((i + 1 >= len(argv)) or (not str.isdigit(argv[i + 1]))):
raise InputError('Error: ' + argv[i] + ' flag should be followed by an integer\n'
' (>=1) indicating which column in the \"' +
data_atoms + '\" section of a\n'
' LAMMPS data file contains the atom type.\n'
' (This argument is unnecessary if you use the -atomstyle argument.)\n')
i_atomtype = int(argv[i + 1]) - 1
del(argv[i:i + 2])
elif ((argv[i].lower() == '-imolid') or
(argv[i].lower() == '-imol') or
(argv[i].lower() == '-imol-id') or
(argv[i].lower() == '-imoleculeid') or
(argv[i].lower() == '-imolecule-id')):
if ((i + 1 >= len(argv)) or (not str.isdigit(argv[i + 1]))):
raise InputError('Error: ' + argv[i] + ' flag should be followed by an integer\n'
' (>=1) indicating which column in the \"' +
data_atoms + '\" section of a\n'
' LAMMPS data file contains the molecule id number.\n'
' (This argument is unnecessary if you use the -atomstyle argument.)\n')
i_molid = int(argv[i + 1]) - 1
del(argv[i:i + 2])
elif (argv[i].lower() == '-full-comment-names'):
settings.print_full_atom_type_name_in_masses = True
del(argv[i:i + 1])
elif (argv[i].lower() == '-short-comment-names'):
settings.print_full_atom_type_name_in_masses = False
del(argv[i:i + 1])
elif (argv[i].find('-') == 0) and main:
# elif (__name__ == "__main__"):
raise InputError('Error(' + g_program_name + '):\n'
'Unrecogized command line argument \"' + argv[i] + '\"\n')
else:
i += 1
if main:
# Instantiate the lexer we will be using.
# (The lexer's __init__() function requires an openned file.
# Assuming __name__ == "__main__", then the name of that file should
# be the last remaining (unprocessed) argument in the argument list.
# Otherwise, then name of that file will be determined later by the
# python script which imports this module, so we let them handle it.)
if len(argv) == 1:
raise InputError('Error: This program requires at least one argument\n'
' the name of a file containing ttree template commands\n')
elif len(argv) == 2:
try:
# Parse text from the file named argv[1]
settings.lex.infile = argv[1]
settings.lex.instream = open(argv[1], 'r')
except IOError:
sys.stderr.write('Error: unable to open file\n'
' \"' + argv[1] + '\"\n'
' for reading.\n')
sys.exit(1)
del(argv[1:2])
else:
# if there are more than 2 remaining arguments,
problem_args = ['\"' + arg + '\"' for arg in argv[1:]]
raise InputError('Syntax Error(' + g_program_name + '):\n\n'
' Problem with argument list.\n'
' The remaining arguments are:\n\n'
' ' + (' '.join(problem_args)) + '\n\n'
' (The actual problem may be earlier in the argument list.\n'
' If these arguments are source files, then keep in mind\n'
' that this program can not parse multiple source files.)\n'
' Check the syntax of the entire argument list.\n')
if len(settings.ii_coords) == 0 and show_warnings:
sys.stderr.write('########################################################\n'
'## WARNING: atom_style unspecified ##\n'
'## --> \"' + data_atoms + '\" column data has an unknown format ##\n'
'## Assuming atom_style = \"full\" ##\n'
# '########################################################\n'
# '## To specify the \"'+data_atoms+'\" column format you can: ##\n'
# '## 1) Use the -atomstyle \"STYLE\" argument ##\n'
# '## where \"STYLE\" is a string indicating a LAMMPS ##\n'
# '## atom_style, including hybrid styles.(Standard ##\n'
# '## atom styles defined in 2011 are supported.) ##\n'
# '## 2) Use the -atomstyle \"COL_LIST\" argument ##\n'
# '## where \"COL_LIST" is a quoted list of strings ##\n'
# '## indicating the name of each column. ##\n'
# '## Names \"x\",\"y\",\"z\" are interpreted as ##\n'
# '## atomic coordinates. \"mux\",\"muy\",\"muz\" ##\n'
# '## are interpreted as direction vectors. ##\n'
# '## 3) Use the -icoord \"cx cy cz...\" argument ##\n'
# '## where \"cx cy cz\" is a list of integers ##\n'
# '## indicating the column numbers for the x,y,z ##\n'
# '## coordinates of each atom. ##\n'
# '## 4) Use the -ivect \"cmux cmuy cmuz...\" argument ##\n'
# '## where \"cmux cmuy cmuz...\" is a list of ##\n'
# '## integers indicating the column numbers for ##\n'
# '## the vector that determines the direction of a ##\n'
# '## dipole or ellipsoid (ie. a rotateable vector).##\n'
# '## (More than one triplet can be specified. The ##\n'
# '## number of entries must be divisible by 3.) ##\n'
'########################################################\n')
# The default atom_style is "full"
settings.column_names = AtomStyle2ColNames('full')
settings.ii_coords = ColNames2Coords(settings.column_names)
settings.ii_vects = ColNames2Vects(settings.column_names)
settings.i_atomid, settings.i_atomtype, settings.i_molid = ColNames2AidAtypeMolid(
settings.column_names)
return
def TransformAtomText(text, matrix, settings):
""" Apply transformations to the coordinates and other vector degrees
of freedom stored in the \"Data Atoms\" section of a LAMMPS data file.
This is the \"text\" argument.
The \"matrix\" stores the aggregate sum of combined transformations
to be applied.
"""
#sys.stderr.write('matrix_stack.M = \n'+ MatToStr(matrix) + '\n')
lines = text.split('\n')
for i in range(0, len(lines)):
line_orig = lines[i]
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
comment = ' ' + line_orig[ic:].rstrip('\n')
else:
line = line_orig.rstrip('\n')
comment = ''
columns = line.split()
if len(columns) > 0:
if len(columns) == len(settings.column_names) + 3:
raise InputError('Error: lttree.py does not yet support integer unit-cell counters \n'
' within the \"' + data_atoms + '\" section of a LAMMPS data file.\n'
' Instead please add the appropriate offsets (these offsets\n'
' should be multiples of the cell size) to the atom coordinates\n'
' in the data file, and eliminate the extra columns. Then try again.\n'
' (If you get this message often, email me and I\'ll fix this limitation.)')
if len(columns) < len(settings.column_names):
raise InputError('Error: The number of columns in your data file does not\n'
' match the LAMMPS atom_style you selected.\n'
' Use the -atomstyle <style> command line argument.\n')
x0 = [0.0, 0.0, 0.0]
x = [0.0, 0.0, 0.0]
# Atomic coordinates transform using "affine" transformations
# (translations plus rotations [or other linear transformations])
for cxcycz in settings.ii_coords:
for d in range(0, 3):
x0[d] = float(columns[cxcycz[d]])
AffineTransform(x, matrix, x0) # x = matrix * x0 + b
for d in range(0, 3): # ("b" is part of "matrix")
columns[cxcycz[d]] = str(x[d])
# Dipole moments and other direction-vectors
# are not effected by translational movement
for cxcycz in settings.ii_vects:
for d in range(0, 3):
x0[d] = float(columns[cxcycz[d]])
LinTransform(x, matrix, x0) # x = matrix * x0
for d in range(0, 3):
columns[cxcycz[d]] = str(x[d])
lines[i] = ' '.join(columns) + comment
return '\n'.join(lines)
def TransformEllipsoidText(text, matrix, settings):
""" Apply the transformation matrix to the quaternions represented
by the last four numbers on each line.
The \"matrix\" stores the aggregate sum of combined transformations
to be applied and the rotational part of this matrix
must be converted to a quaternion.
"""
#sys.stderr.write('matrix_stack.M = \n'+ MatToStr(matrix) + '\n')
lines = text.split('\n')
for i in range(0, len(lines)):
line_orig = lines[i]
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
comment = ' ' + line_orig[ic:].rstrip('\n')
else:
line = line_orig.rstrip('\n')
comment = ''
columns = line.split()
if len(columns) != 0:
if len(columns) != 8:
raise InputError('Error (lttree.py): Expected 7 numbers'
+ ' instead of '
+ str(len(columns))
+ '\nline:\n'
+ line
+ ' in each line of the ellipsoids\" section.\n"')
q_orig = [float(columns[-4]),
float(columns[-3]),
float(columns[-2]),
float(columns[-1])]
qRot = [0.0, 0.0, 0.0, 0.0]
Matrix2Quaternion(matrix, qRot)
q_new = [0.0, 0.0, 0.0, 0.0]
MultQuat(q_new, qRot, q_orig)
columns[-4] = str(q_new[0])
columns[-3] = str(q_new[1])
columns[-2] = str(q_new[2])
columns[-1] = str(q_new[3])
lines[i] = ' '.join(columns) + comment
return '\n'.join(lines)
def CalcCM(text_Atoms,
text_Masses=None,
settings=None):
types2masses = None
# Loop through the "Masses" section: what is the mass of each atom type?
if text_Masses != None:
types2masses = {}
lines = text_Masses.split('\n')
for i in range(0, len(lines)):
line = lines[i]
columns = line.split()
if len(columns) == 2:
atomtype = columns[0]
m = float(columns[1])
types2masses[atomtype] = m
lines = text_Atoms.split('\n')
# Pass 1 through the "Data Atoms" section: Determine each atom's mass
if text_Masses != None:
assert(settings != None)
for i in range(0, len(lines)):
line = lines[i]
columns = line.split()
atomid = columns[settings.i_atomid]
atomtype = columns[settings.i_atomtype]
if atomtype not in types2masses[atomtype]:
raise InputError('Error(lttree): You have neglected to define the mass of atom type: \"' + atomtype + '\"\n'
'Did you specify the mass of every atom type using write(\"Masses\"){}?')
atomid2mass[atomid] = atomtype2mass[atomtype]
# Pass 2 through the "Data Atoms" section: Find the center of mass.
for i in range(0, len(lines)):
line = lines[i]
columns = line.split()
if len(columns) > 0:
if len(columns) == len(settings.column_names) + 3:
raise InputError('Error: lttree.py does not yet support integer unit-cell counters (ix, iy, iz)\n'
' within the \"' + data_atoms + '\" section of a LAMMPS data file.\n'
' Instead please add the appropriate offsets (these offsets\n'
' should be multiples of the cell size) to the atom coordinates\n'
' in the data file, and eliminate the extra columns. Then try again.\n'
' (If you get this message often, email me and I\'ll fix this limitation.)')
if len(columns) != len(settings.column_names):
raise InputError('Error: The number of columns in your data file does not\n'
' match the LAMMPS atom_style you selected.\n'
' Use the -atomstyle <style> command line argument.\n')
x = [0.0, 0.0, 0.0]
if atomids2masses != None:
m = atomids2masses[atomid]
else:
m = 1.0
tot_m += m
for cxcycz in settings.ii_coords:
for d in range(0, 3):
x[d] = float(columns[cxcycz[d]])
tot_x[d] += x[d]
# Note: dipole moments and other direction vectors don't effect
# the center of mass. So I commented out the loop below.
# for cxcycz in settings.ii_vects:
# for d in range(0,3):
# v[d] = float(columns[cxcycz[d]])
lines[i] = ' '.join(columns)
xcm = [0.0, 0.0, 0.0]
for d in range(0, 3):
xcm[d] = tot_x[d] / tot_m
return xcm
def AddAtomTypeComments(tmpl_list, substitute_vars, print_full_atom_type_names):
"""
This ugly code attempts to parse the text in the "Masses" section
of a LAMMPS DATA file, and append comments to the end of every line
defining the atom type. Each comment will contain a string which stores
the name of the @atom-style variable (excluding the "@atom:" prefix).
This is unfortunately complicated and messy because we have to do
this before we render the text. (IE before we substutite numeric
values into the variables. Once we've rendered the text,
the variable names are discarded.)
Therefore we have to work with a messy "tmpl_list" object
which contains the text in a pre-rendered form. The "tmpl_list" object
is a list of alternating TextBlocks and VarRef objects.
This function rebuilds this tmpl_list object, splitting it into separate
lines (which it currently is not) and then adding comments to the end
of each line (if there isn't one there already). Finally it renders
the resulting template and returns that text to the caller.
"""
table = TableFromTemplate(tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, True])
for i in range(0, len(table)):
j = 0
if isinstance(table[i][0], TextBlock):
j += 1
assert(hasattr(table[i], '__len__'))
syntax_err = False
if len(table[i]) == j+0:
pass # skip blank lines
elif ((len(table[i]) > j+0) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass # skip comment lines
if ((len(table[i]) > j+1) and
isinstance(table[i][j+0], VarRef) and
isinstance(table[i][j+1], TextBlock)):
var_ref = table[i][j+0]
if print_full_atom_type_names:
var_name = var_ref.prefix[0] + \
CanonicalDescrStr(var_ref.nptr.cat_name,
var_ref.nptr.cat_node,
var_ref.nptr.leaf_node,
var_ref.srcloc)
else:
var_name = var_ref.nptr.leaf_node.name
# remove the "@atom:" prefix before the variable name:
if var_name.find('@atom:') == 0:
var_name = var_name[6:]
elif var_name.find('@/atom:') == 0:
var_name = var_name[7:]
new_comment = ' # ' + var_name
if (len(table[i]) == j+2):
table[i].append(TextBlock(new_comment,
table[i][j+1].srcloc))
else:
assert(len(table[i]) > j+2)
assert(isinstance(table[i][j+2], TextBlock))
# If this line doesn't already contain a comment, then add one
if table[i][j+2].text.find('#') == -1:
table[i][j+2].text += new_comment
else:
# Insert a space between 2nd column and the comment
table[i][j+2].text = ' '+table[i][j+2].text
# Also add spaces between any words within the comments. This is
# necessary because TableFromTemplate() removed all whitespace
for k in range(j+3, len(table[i])):
table[i][k].text = ' '+table[i][k].text
# We must insert a space between the first and second columns
# because TableFromTemplate() removes this whitespace separator.
table[i].insert(j+1, TextBlock(' ', table[i][j+1].srcloc))
else:
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][j+0].srcloc.infile,
table[i][j+0].srcloc.lineno) + '\n'
' The format is incorrect.\n')
# Add a newline:
table[i].append(TextBlock('\n',table[i][j+1].srcloc))
# Now flatten the "table" (which is a list-of-lists)
# into a simple 1-dimensional list
# (of alternating VarRefs and TextBlocks, in this case)
templ_list = [entry for sublist in table for entry in sublist]
# Note: This is equivalent to
# templ_list = []
# for sublist in table:
# for entry in sublist:
# templ_list.append(entry)
# When building list comprehensions with multiple "for" tokens,
# the outer loop comes first (ie "for sublist in table")
# Now render this text and return it to the caller:
return Render(templ_list, substitute_vars)
def _ExecCommands(command_list,
index,
global_files_content,
settings,
matrix_stack,
current_scope_id=None,
substitute_vars=True):
"""
_ExecCommands():
The argument "commands" is a nested list of lists of
"Command" data structures (defined in ttree.py).
Carry out the write() and write_once() commands (which
write out the contents of the templates contain inside them).
Instead of writing the files, save their contents in a string.
The argument "global_files_content" should be of type defaultdict(list)
It is an associative array whose key is a string (a filename)
and whose value is a lists of strings (of rendered templates).
"""
files_content = defaultdict(list)
postprocessing_commands = []
while index < len(command_list):
command = command_list[index]
index += 1
# For debugging only
if ((not isinstance(command, StackableCommand)) and
(not isinstance(command, ScopeCommand)) and
(not isinstance(command, WriteFileCommand))):
sys.stderr.write(str(command) + '\n')
if isinstance(command, PopCommand):
assert(current_scope_id != None)
if command.context_node == None:
command.context_node = current_scope_id
if isinstance(command, PopRightCommand):
matrix_stack.PopRight(which_stack=command.context_node)
elif isinstance(command, PopLeftCommand):
matrix_stack.PopLeft(which_stack=command.context_node)
else:
assert(False)
elif isinstance(command, PushCommand):
assert(current_scope_id != None)
if command.context_node == None:
command.context_node = current_scope_id
# Some commands are post-processing commands, and must be
# carried out AFTER all the text has been rendered. For example
# the "movecm(0,0,0)" waits until all of the coordinates have
# been rendered, calculates the center-of-mass, and then applies
# a translation moving the center of mass to the origin (0,0,0).
# We need to figure out which of these commands need to be
# postponed, and which commands can be carried out now.
# ("now"=pushing transformation matrices onto the matrix stack).
# UNFORTUNATELY POSTPONING SOME COMMANDS MAKES THE CODE UGLY
transform_list = command.contents.split('.')
transform_blocks = []
i_post_process = -1
# Example: Suppose:
#command.contents = '.rot(30,0,0,1).movecm(0,0,0).rot(45,1,0,0).scalecm(2.0).move(-2,1,0)'
# then
#transform_list = ['rot(30,0,0,1)', 'movecm(0,0,0)', 'rot(45,1,0,0)', 'scalecm(2.0)', 'move(-2,1,0)']
# Note: the first command 'rot(30,0,0,1)' is carried out now.
# The remaining commands are carried out during post-processing,
# (when processing the "ScopeEnd" command.
#
# We break up the commands into "blocks" separated by center-
# of-mass transformations ('movecm', 'rotcm', or 'scalecm')
#
# transform_blocks = ['.rot(30,0,0,1)',
# '.movecm(0,0,0).rot(45,1,0,0)',
# '.scalecm(2.0).move(-2,1,0)']
i = 0
while i < len(transform_list):
transform_block = ''
while i < len(transform_list):
transform = transform_list[i]
i += 1
if transform != '':
transform_block += '.' + transform
transform = transform.split('(')[0]
if ((transform == 'movecm') or
(transform == 'rotcm') or
(transform == 'scalecm')):
break
transform_blocks.append(transform_block)
if len(postprocessing_commands) == 0:
# The first block (before movecm, rotcm, or scalecm)
# can be executed now by modifying the matrix stack.
if isinstance(command, PushRightCommand):
matrix_stack.PushCommandsRight(transform_blocks[0].strip('.'),
command.srcloc,
which_stack=command.context_node)
elif isinstance(command, PushLeftCommand):
matrix_stack.PushCommandsLeft(transform_blocks[0].strip('.'),
command.srcloc,
which_stack=command.context_node)
# Everything else must be saved for later.
postprocessing_blocks = transform_blocks[1:]
else:
# If we already encountered a "movecm" "rotcm" or "scalecm"
# then all of the command blocks must be handled during
# postprocessing.
postprocessing_blocks = transform_blocks
for transform_block in postprocessing_blocks:
assert(isinstance(block, basestring))
if isinstance(command, PushRightCommand):
postprocessing_commands.append(PushRightCommand(transform_block,
command.srcloc,
command.context_node))
elif isinstance(command, PushLeftCommand):
postprocessing_commands.append(PushLeftCommand(transform_block,
command.srcloc,
command.context_node))
elif isinstance(command, WriteFileCommand):
# --- Throw away lines containin references to deleted variables:---
# First: To edit the content of a template,
# you need to make a deep local copy of it
tmpl_list = []
for entry in command.tmpl_list:
if isinstance(entry, TextBlock):
tmpl_list.append(TextBlock(entry.text,
entry.srcloc)) # , entry.srcloc_end))
else:
tmpl_list.append(entry)
# Now throw away lines with deleted variables
DeleteLinesWithBadVars(tmpl_list)
# --- Now render the text ---
text = Render(tmpl_list,
substitute_vars)
# ---- Coordinates of the atoms, must be rotated
# and translated after rendering.
# In addition, other vectors (dipoles, ellipsoid orientations)
# must be processed.
# This requires us to re-parse the contents of this text
# (after it has been rendered), and apply these transformations
# before passing them on to the caller.
if command.filename == data_atoms:
text = TransformAtomText(text, matrix_stack.M, settings)
elif command.filename == data_ellipsoids:
text = TransformEllipsoidText(text, matrix_stack.M, settings)
if command.filename == data_masses:
text = AddAtomTypeComments(tmpl_list,
substitute_vars,
settings.print_full_atom_type_name_in_masses)
files_content[command.filename].append(text)
elif isinstance(command, ScopeBegin):
if isinstance(command.node, InstanceObj):
if ((command.node.children != None) and
(len(command.node.children) > 0)):
matrix_stack.PushStack(command.node)
# "command_list" is a long list of commands.
# ScopeBegin and ScopeEnd are (usually) used to demarcate/enclose
# the commands which are issued for a single class or
# class instance. _ExecCommands() carries out the commands for
# a single class/instance. If we reach a ScopeBegin(),
# then recursively process the commands belonging to the child.
index = _ExecCommands(command_list,
index,
files_content,
settings,
matrix_stack,
command.node,
substitute_vars)
elif isinstance(command, ScopeEnd):
if data_atoms in files_content:
for ppcommand in postprocessing_commands:
if data_masses in files_content:
xcm = CalcCM(files_content[data_atoms],
files_content[data_masses],
settings)
else:
xcm = CalcCM(files_content[data_atoms])
if isinstance(ppcommand, PushRightCommand):
matrix_stack.PushCommandsRight(ppcommand.contents,
ppcommand.srcloc,
xcm,
which_stack=command.context_node)
elif isinstance(ppcommand, PushLeftCommand):
matrix_stack.PushCommandsLeft(ppcommand.contents,
ppcommand.srcloc,
xcm,
which_stack=command.context_node)
files_content[data_atoms] = \
TransformAtomText(files_content[data_atoms],
matrix_stack.M, settings)
files_content[data_ellipsoids] = \
TransformEllipsoidText(files_content[data_ellipsoids],
matrix_stack.M, settings)
for ppcommand in postprocessing_commands:
matrix_stack.Pop(which_stack=command.context_node)
#(same as PopRight())
if isinstance(command.node, InstanceObj):
if ((command.node.children != None) and
(len(command.node.children) > 0)):
matrix_stack.PopStack()
# "ScopeEnd" means we're done with this class/instance.
break
else:
assert(False)
# no other command types allowed at this point
# After processing the commands in this list,
# merge the templates with the callers template list
for filename, tmpl_list in files_content.items():
global_files_content[filename] += \
files_content[filename]
return index
def ExecCommands(commands,
files_content,
settings,
substitute_vars=True):
matrix_stack = MultiAffineStack()
index = _ExecCommands(commands,
0,
files_content,
settings,
matrix_stack,
None,
substitute_vars)
assert(index == len(commands))
def WriteFiles(files_content, suffix='', write_to_stdout=True):
for filename, str_list in files_content.items():
if filename != None:
out_file = None
if filename == '':
if write_to_stdout:
out_file = sys.stdout
else:
out_file = open(filename + suffix, 'a')
if out_file != None:
out_file.write(''.join(str_list))
if filename != '':
out_file.close()
return
def main():
"""
This is is a "main module" wrapper for invoking lttree.py
as a stand alone program. This program:
1)reads a ttree file,
2)constructs a tree of class definitions (g_objectdefs)
3)constructs a tree of instantiated class objects (g_objects),
4)automatically assigns values to the variables,
5)and carries out the "write" commands to write the templates a file(s).
"""
####### Main Code Below: #######
sys.stderr.write(g_program_name + ' v' +
g_version_str + ' ' + g_date_str + ' ')
sys.stderr.write('\n(python version ' + str(sys.version) + ')\n')
if sys.version < '2.6':
raise InputError(
'Error: Alas, you must upgrade to a newer version of python.')
try:
#settings = BasicUISettings()
#BasicUIParseArgs(sys.argv, settings)
settings = LttreeSettings()
LttreeParseArgs([arg for arg in sys.argv], #(deep copy of sys.argv)
settings, main=True, show_warnings=True)
# Data structures to store the class definitionss and instances
g_objectdefs = StaticObj('', None) # The root of the static tree
# has name '' (equivalent to '/')
g_objects = InstanceObj('', None) # The root of the instance tree
# has name '' (equivalent to '/')
# A list of commands to carry out
g_static_commands = []
g_instance_commands = []
BasicUI(settings,
g_objectdefs,
g_objects,
g_static_commands,
g_instance_commands)
# Interpret the the commands. (These are typically write() or
# write_once() commands, rendering templates into text.
# This step also handles coordinate transformations and delete commands.
# Coordinate transformations can be applied to the rendered text
# as a post-processing step.
sys.stderr.write(' done\nbuilding templates...')
files_content = defaultdict(list)
ExecCommands(g_static_commands,
files_content,
settings,
False)
ExecCommands(g_instance_commands,
files_content,
settings,
False)
# Finally: write the rendered text to actual files.
# Erase the files that will be written to:
sys.stderr.write(' done\nwriting templates...')
EraseTemplateFiles(g_static_commands)
EraseTemplateFiles(g_instance_commands)
# Write the files as templates
# (with the original variable names present)
WriteFiles(files_content, suffix=".template", write_to_stdout=False)
# Write the files with the variables substituted by values
sys.stderr.write(' done\nbuilding and rendering templates...')
files_content = defaultdict(list)
ExecCommands(g_static_commands, files_content, settings, True)
ExecCommands(g_instance_commands, files_content, settings, True)
sys.stderr.write(' done\nwriting rendered templates...\n')
WriteFiles(files_content)
sys.stderr.write(' done\n')
# Now write the variable bindings/assignments table.
sys.stderr.write('writing \"ttree_assignments.txt\" file...')
# <-- erase previous version.
open('ttree_assignments.txt', 'w').close()
WriteVarBindingsFile(g_objectdefs)
WriteVarBindingsFile(g_objects)
sys.stderr.write(' done\n')
except (ValueError, InputError) as err:
if isinstance(err, ValueError):
sys.stderr.write('Error converting string to numeric format.\n'
' This sometimes means you have neglected to specify the atom style\n'
' (using the \"-atomstyle\" command). Alternatively it could indicate\n'
' that the moltemplate file contains non-numeric text in one of the\n'
' .move(), .rot(), .scale(), .matrix(), or .quat() commands. If neither of\n'
' these scenarios apply, please report this bug. (jewett.aij at gmail.com)\n')
sys.exit(-1)
else:
sys.stderr.write('\n\n' + str(err) + '\n')
sys.exit(-1)
return
if __name__ == '__main__':
main()
|
smsaladi/moltemplate
|
moltemplate/lttree.py
|
Python
|
bsd-3-clause
| 46,054
|
[
"LAMMPS"
] |
02a35d4d6022865243ca022dea85138f064a1e8b18614954a888500b69477591
|
__author__ = 'civa'
OBJECT_TYPES = dict(STAR=0, CATALOG_STAR=1, PLANET=2, OPEN_CLUSTER=3, GLOBULAR_CLUSTER=4,
GASEOUS_NEBULA=5, PLANETARY_NEBULA=6, SUPERNOVA_REMNANT=7, GALAXY=8,
COMET=9, ASTEROID=10, CONSTELLATION=11, MOON=12, ASTERISM=13,
GALAXY_CLUSTER=14, DARK_NEBULA=15, QUASAR=16, MULT_STAR=17, RADIO_SOURCE=18,
SATELLITE=19, SUPERNOVA=20, UNKNOWN=99)
OBJECT_TYPE_MAPS = [
(['*iC','*iN','*iA','*i*','V*?','Pe*','HB*','Y*O','Ae*','Em*','Be*','BS*','RG*','AB*','C*','S*','sg*','s*r','s*y','s*b','pA*','WD*','ZZ*','LM*','BD*','N*','OH*','CH*','pr*','TT*','WR*','PM*','HV*','V* ','Ir*','Or*','RI*','Er*','Fl*','FU*','RC*','RC?','Ro*','a2*','Psr','BY*','RS*','Pu*','RR*','Ce*','dS*','RV*','WV*','bC*','cC*','gD*','SX*','LP*','Mi*','sr*','SN*','su*','Pl?','Pl'], 'STAR'),
#([''], 'CATALOG_STAR'),
(['Pl?', 'Pl'], 'PLANET'),
(['OpC', 'C?*', 'OpC'], 'OPEN_CLUSTER'),
(['GlC', 'Gl?'], 'GLOBULAR_CLUSTER'),
('GNe,BNe,RNe,MoC,glb,cor,SFR,HVC,HII'.split(','), 'GASEOUS_NEBULA'),
(['PN','PN?'], 'PLANETARY_NEBULA'),
(['SR?', 'SNR'], 'SUPERNOVA_REMNANT'),
('IG,PaG,G,PoG,GiC,BiC,GiG,GiP,HzG,ALS,LyA,DLA,mAL,LLS,BAL,rG,H2G,LSB,LeI,LeG,LeQ,EmG,SBG,bCG'.split(','), 'GALAXY'),
('SCG,ClG,GrG,CGG'.split(','), 'GALAXY_CLUSTER'),
(['DNe'], 'DARK_NEBULA'),
('AGN,LIN,SyG,Sy1,Sy2,Bla,BLL,OVV,QSO,AG?,Q?,Bz?,BL?'.split(','), 'QUASAR'),
(['LXB,HXB, As*,St*,MGr,**,EB*,Al*,bL*,WU*,EP*,SB*,El*,Sy*,CV*,DQ*,AM*,NL*,No*,DN*,XB*,LXB,HXB'], 'MULT_STAR'),
('Rad,mR,cm,mm,smm'.split(','), 'RADIO_SOURCE'),
(['SN?', 'SN*'], 'SUPERNOVA'),
# ([''], 'SATELLITE'), # no satellites in Simbad, they don't have fixed positions
#([''], 'COMET'),
#([''], 'ASTEROID'),
#([''], 'CONSTELLATION'),
#([''], 'MOON'),
#([''], 'ASTERISM'),
]
|
Civa/Zenith
|
src/Backend/Distributed/shared/tables.py
|
Python
|
gpl-3.0
| 1,723
|
[
"Galaxy"
] |
24d1364b609b63120e61ee7e20d4a890d21dc9cb759a1f73e27dcaa418b3989b
|
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
import logging
logger = logging.getLogger('camelot.view.controls.delegates.plaintextdelegate')
from PyQt4.QtCore import Qt
from customdelegate import CustomDelegate
from customdelegate import DocumentationMetaclass
from camelot.core.utils import ugettext
from camelot.core.utils import variant_to_pyobject
from camelot.view.controls import editors
from camelot.view.proxy import ValueLoading
DEFAULT_COLUMN_WIDTH = 20
class PlainTextDelegate( CustomDelegate ):
"""Custom delegate for simple string values"""
__metaclass__ = DocumentationMetaclass
editor = editors.TextLineEditor
def __init__( self,
parent = None,
length = DEFAULT_COLUMN_WIDTH,
translate_content=False,
**kw ):
CustomDelegate.__init__( self, parent, length = length, **kw )
self._translate_content = translate_content
char_width = self._font_metrics.averageCharWidth()
self._width = char_width * min( DEFAULT_COLUMN_WIDTH, length or DEFAULT_COLUMN_WIDTH )
def paint(self, painter, option, index):
painter.save()
self.drawBackground(painter, option, index)
value = variant_to_pyobject( index.model().data( index, Qt.EditRole ) )
value_str = u''
if value not in (None, ValueLoading):
if self._translate_content:
value_str = ugettext( unicode(value) )
else:
value_str = unicode(value)
self.paint_text(painter, option, index, value_str)
painter.restore()
|
jeroendierckx/Camelot
|
camelot/view/controls/delegates/plaintextdelegate.py
|
Python
|
gpl-2.0
| 2,645
|
[
"VisIt"
] |
58595ba2296358f754948df5f4e6f9e84a855fce488f2dfe94127b7dcd465c91
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import io
import unittest
from skbio import TreeNode
from skbio.io import NewickFormatError
from skbio.io.format.newick import (
_newick_to_tree_node, _tree_node_to_newick, _newick_sniffer)
class TestNewick(unittest.TestCase):
def _assert_node_equal(self, n1, n2):
self.assertEqual(n1.name, n2.name)
self.assertEqual(n1.length, n2.length)
self.assertEqual(len(n1.children), len(n2.children))
def _assert_equal(self, n1, n2):
def name(x):
return (str(x.name),
float(x.length) if x.length is not None else 0,
len(x.children))
self._assert_node_equal(n1, n2)
for c1, c2 in zip(sorted(n1.children, key=name),
sorted(n2.children, key=name)):
self.assertTrue(c1.parent is n1)
self.assertTrue(c2.parent is n2)
self._assert_equal(c1, c2)
def _setup_tree(self, kwargs_list):
trees = []
for kwargs in kwargs_list:
trees.append(TreeNode(**kwargs))
trees[4].extend([trees[2], trees[3]])
trees[5].extend([trees[0], trees[1], trees[4]])
return trees[5]
def _setup_linked_list(self, kwargs_list):
last_node = None
for idx, kwargs in enumerate(kwargs_list):
new_node = TreeNode(**kwargs)
if last_node is not None:
new_node.append(last_node)
last_node = new_node
return last_node
def _setup_balanced_binary(self, kwargs_list):
trees = []
for kwargs in kwargs_list:
trees.append(TreeNode(**kwargs))
trees[0].extend([trees[2], trees[3]])
trees[1].extend([trees[4], trees[5]])
trees[6].extend([trees[0], trees[1]])
return trees[6]
def setUp(self):
# Using the factory functions above, we will construct different tree
# instances. Each tree is expected to serialize to the first newick
# string in the list. Each string in the list is expected to
# deserialize into an equivilent rotation of the constructed instance.
tree_blank = (self._setup_tree([
{}, {}, {}, {}, {}, {}
]), [
"(,,(,));\n",
"(,(,),);",
"((,),,);",
" ((,[ this is a comment ]) , , ) ; ",
"((,[ i_can_do_this[0] or escape unmatched '[ ]),[more words],);",
])
tree_leaves_named = (self._setup_tree([
{'name': 'a_'},
{'name': 'b'},
{'name': 'c'},
{'name': 'd'},
{},
{}
]), [
"('a_',b,(c,d));\n",
"(b,(c,d),'a_');",
"(b\n,'a_'\n ,(d \t,c) ) ;",
])
tree_all_named = (self._setup_tree([
{'name': 'a'},
{'name': 'b'},
{'name': 'c'},
{'name': '[whaaat!\']'},
{'name': 'e'},
{'name': 'f'}
]), [
"(a,b,(c,'[whaaat!'']')e)f;\n",
"(b,(c,'[whaaat!'']')e,a)f;",
"(b,[comment] \na,('[whaaat!'']',c)e)f;",
])
tree_all_but_root_distances = (self._setup_tree([
{'length': 0.1},
{'length': 0.2},
{'length': 0.3},
{'length': 0.4},
{'length': 0.5},
{}
]), [
"(:0.1,:0.2,(:0.3,:0.4):0.5);\n",
"(:0.2,(:0.3,:0.4):0.5,:0.1);",
"(:0.2,:0.1,(:0.4,:0.3):0.5);",
])
tree_all_distances = (self._setup_tree([
{'length': 0.1},
{'length': 0.2},
{'length': 0.3},
{'length': 0.4},
{'length': 0.5},
{'length': 0.0}
]), [
"(:0.1,:0.2,(:0.3,:0.4):0.5):0.0;\n",
"(:0.2,(:0.3,:0.4):0.5,:0.1):0.0;",
"(:0.2,\n:0.1,(:0.4,\n:0.3):0.5)\n:0.0;",
])
tree_all_leaves_named_with_distances = (self._setup_tree([
{'name': 'a', 'length': 0.1},
{'name': 'b_a\'', 'length': 0.2},
{'name': 'c', 'length': 0.3},
{'name': 'de d', 'length': 0.4},
{'length': 0.5},
{'length': 0.0}
]), [
"(a:0.1,'b_a''':0.2,(c:0.3,de_d:0.4):0.5):0.0;\n",
"('b_a''':0.2,(c:0.3,'de d':0.4):0.5,a:0.1):0.0;",
"('b_a''':0.2,a:0.1,('de d'[why not]:0.4,c:0.3):0.5):0.0;",
])
tree_all_leaves_named_with_distances_no_root = (self._setup_tree([
{'name': 'a', 'length': 0.1},
{'name': 'b_a\'', 'length': 0.2},
{'name': 'c', 'length': 0.3},
{'name': 'de d', 'length': 0.4},
{'length': 0.5},
{}
]), [
"(a:0.1,'b_a''':0.2,(c:0.3,de__d:0.4):0.5);\n",
"('b_a''':0.2\n[comment ahoy]\n,(c:0.3,'de d':0.4):0.5,a:0.1);",
"('b_a''':0.2,a:0.1,(de__d:0.4,c:0.3):0.5);"
])
tree_all = (self._setup_tree([
{'name': 'a', 'length': 0.1},
{'name': 'b_a\'', 'length': 0.2},
{'name': 'c', 'length': 0.3},
{'name': 'de\' d', 'length': 0.4},
{'name': 'e', 'length': 0.5},
{'name': 'f', 'length': 0.0}
]), [
"(a:0.1,'b_a''':0.2,(c:0.3,de''_d:0.4)e:0.5)f:0.0;\n",
"('b_a''':0.2,(c:0.3,de''_d:0.4)e:0.5,a:0.1)f:0.0;",
"((de''_d:0.4, c:0.3)e:0.5, 'b_a''':0.2, a:0.1)f:0.0;"
])
balanced_blank = (self._setup_balanced_binary([
{}, {}, {}, {}, {}, {}, {}
]), [
"((,),(,));\n",
])
balanced_named = (self._setup_balanced_binary([
{'name': 'a'},
{'name': 'b'},
{'name': 'c'},
{'name': 'd'},
{'name': 'e'},
{'name': 'f'},
{'name': 'g'}
]), [
"((c,d)a,(e,f)b)g;\n",
])
balanced_distances = (self._setup_balanced_binary([
{'length': 1.0},
{'length': 2.0},
{'length': 3.0},
{'length': 4.0},
{'length': 5.0},
{'length': 6.0},
{'length': 0.0}
]), [
"((:3.0,:4.0):1.0,(:5.0,:6.0):2.0):0.0;\n",
])
blanaced_all = (self._setup_balanced_binary([
{'name': 'a', 'length': 1.0},
{'name': 'b', 'length': 2.0},
{'name': 'c', 'length': 3.0},
{'name': 'd', 'length': 4.0},
{'name': 'e', 'length': 5.0},
{'name': 'f:f\'f', 'length': 6.0},
{'name': 'g', 'length': 0.0}
]), [
"((c:3.0,d:4.0)a:1.0,(e:5.0,'f:f''f':6.0)b:2.0)g:0.0;\n",
])
linked_list_blank = (self._setup_linked_list([
{}, {}, {}, {}, {}
]), [
"(((())));\n",
"[(((())));](((())));",
"[[(((())));](((())));](((())));\t\t\n"
])
linked_list_named = (self._setup_linked_list([
{'name': 'aaa'},
{'name': 'b_a\''},
{'name': 'c'},
{'name': 'de d'},
{'name': 'e'},
]), [
"((((aaa)'b_a''')c)de_d)e;\n"
])
inked_list_distances = (self._setup_linked_list([
{'length': 0.4},
{'length': 0.3},
{'length': 0.2},
{'length': 0.1},
{'length': 0.0},
]), [
"((((:0.4):0.3):0.2):0.1):0.0;\n",
"((((:0.4)[not a label]:0.3):0.2):0.1):0.0;\t\t\n"
])
linked_list_all = (self._setup_linked_list([
{'name': 'a', 'length': 0.4},
{'name': 'b_a\'', 'length': 0.3},
{'name': 'c', 'length': 0.2},
{'name': 'de d', 'length': 0.1},
{'name': 'eee', 'length': 0.0},
]), [
"((((a:0.4)'b_a''':0.3)c:0.2)de_d:0.1)eee:0.0;\n"
])
single_empty = (TreeNode(), [";\n", "[comment about the root"
" and its properties];"])
single_named = (TreeNode(name='athing'), ["athing;\n"])
single_distance = (TreeNode(length=200.0), [":200.0;\n"])
single_all = (TreeNode(name='[a]', length=200.0), ["'[a]':200.0;\n"])
self.trees_newick_lists = [
tree_blank,
tree_leaves_named,
tree_all_named,
tree_all_but_root_distances,
tree_all_distances,
tree_all_leaves_named_with_distances,
tree_all_leaves_named_with_distances_no_root,
tree_all,
balanced_blank,
balanced_named,
balanced_distances,
blanaced_all,
linked_list_blank,
linked_list_named,
inked_list_distances,
linked_list_all,
single_empty,
single_named,
single_distance,
single_all
]
# Invalid newick strings and list of error fragments that should be
# a part of the error message when read.
self.invalid_newicks = [
("", ['root']),
("This is not a newick file.", ['whitespace', 'label']),
("((();", ['Parenthesis', 'unbalanced']),
("(,,,)(,);\n", ['unnested', 'children']),
("(()());", ['unnested', 'children']),
("(():,,)", ['length']),
("[][[]('comment is the gotcha':0.2,,);", ['unbalanced', 'root']),
("#SampleID\tHeaderA\tHeaderB\n0\t'yellow'\t0.45;", ['whitespace',
'label']),
("))();", ['Parenthesis', 'unbalanced']),
("((,,),((,,));", ['Parenthesis', 'unbalanced']),
("\n".join([",".join(str(i) for i in range(100))
for _ in range(100)]), ['whitespace', 'label'])
]
def test_newick_to_tree_node_valid_files(self):
for tree, newicks in self.trees_newick_lists:
for newick in newicks:
fh = io.StringIO(newick)
read_tree = _newick_to_tree_node(fh)
self._assert_equal(tree, read_tree)
fh.close()
def test_newick_to_tree_node_invalid_files(self):
for invalid, error_fragments in self.invalid_newicks:
fh = io.StringIO(invalid)
with self.assertRaises(NewickFormatError) as cm:
_newick_to_tree_node(fh)
for frag in error_fragments:
self.assertIn(frag, str(cm.exception))
fh.close()
def test_tree_node_to_newick(self):
for tree, newicks in self.trees_newick_lists:
newick = newicks[0]
fh = io.StringIO()
_tree_node_to_newick(tree, fh)
self.assertEqual(newick, fh.getvalue())
fh.close()
def test_roundtrip(self):
for tree, newicks in self.trees_newick_lists:
newick = newicks[0]
fh = io.StringIO(newick)
tree = _newick_to_tree_node(fh)
fh2 = io.StringIO()
_tree_node_to_newick(tree, fh2)
fh2.seek(0)
tree2 = _newick_to_tree_node(fh2)
self.assertEqual(newick, fh2.getvalue())
self._assert_equal(tree, tree2)
fh.close()
fh2.close()
def test_newick_to_tree_node_convert_underscores(self):
fh = io.StringIO('(_:0.1, _a, _b)__;')
tree = _newick_to_tree_node(fh, convert_underscores=False)
fh2 = io.StringIO()
_tree_node_to_newick(tree, fh2)
self.assertEqual(fh2.getvalue(), "('_':0.1,'_a','_b')'__';\n")
fh2.close()
fh.close()
def test_newick_sniffer_valid_files(self):
for _, newicks in self.trees_newick_lists:
for newick in newicks:
fh = io.StringIO(newick)
self.assertEqual(_newick_sniffer(fh), (True, {}))
fh.close()
def test_newick_sniffer_invalid_files(self):
for invalid, _ in self.invalid_newicks:
fh = io.StringIO(invalid)
self.assertEqual(_newick_sniffer(fh), (False, {}))
fh.close()
if __name__ == '__main__':
unittest.main()
|
gregcaporaso/scikit-bio
|
skbio/io/format/tests/test_newick.py
|
Python
|
bsd-3-clause
| 12,594
|
[
"scikit-bio"
] |
7c9bcafa96b229e5b563be25286390b0e6f67b3b84c817257590fc745dc44bc2
|
'''
Functions for working with DESI mocks and fiberassignment
TODO (maybe):
This contains hardcoded hacks, especially wrt priorities and
interpretation of object types
'''
from __future__ import print_function, division
import sys, os
import numpy as np
from astropy.table import Table, Column
from fiberassign import io
from desitarget import desi_mask
import desitarget
import desispec.brick
def rdzipn2targets(infile):
"""Read rdzipn infile and return target and truth tables
"""
ra, dec, z, itype, priority, numobs = io.read_rdzipn(infile)
n = len(ra)
#- Martin's itype is 1 to n, while Bob's fiberassign is 0 to n-1
itype -= 1
assert np.min(itype >= 0)
#- rdzipn has float32 ra, dec, but it should be float64
ra = ra.astype('float64') % 360 #- enforce 0 <= ra < 360
dec = dec.astype('float64')
#- Hardcoded in rdzipn format
# 0 : 'QSO', #- QSO-LyA
# 1 : 'QSO', #- QSO-Tracer
# 2 : 'LRG', #- LRG
# 3 : 'ELG', #- ELG
# 4 : 'STAR', #- QSO-Fake
# 5 : 'UNKNOWN', #- LRG-Fake
# 6 : 'STAR', #- StdStar
# 7 : 'SKY', #- Sky
qso_lya = (itype==0)
qso_tracer = (itype==1)
qso_fake = (itype==4)
qso = qso_lya | qso_tracer | qso_fake
lrg_real = (itype==2)
lrg_fake = (itype==5)
lrg = lrg_real | lrg_fake
elg = (itype==3)
std = (itype==6)
sky = (itype==7)
if not np.any(std):
print("WARNING: no standard stars found")
if not np.any(sky):
print("WARNING: no sky locations found")
if not np.any(~(std | sky)):
print("WARNING: no science targets found")
#- Create a DESI_TARGET mask
desi_target = np.zeros(n, dtype='i8')
desi_target[qso] |= desi_mask.QSO
desi_target[elg] |= desi_mask.ELG
desi_target[lrg] |= desi_mask.LRG
desi_target[sky] |= desi_mask.SKY
desi_target[std] |= desi_mask.STD_FSTAR
bgs_target = np.zeros(n, dtype='i8') #- TODO
mws_target = np.zeros(n, dtype='i8') #- TODO
#- True type
truetype = np.zeros(n, dtype='S10')
assert np.all(truetype == '')
truetype[qso_lya | qso_tracer] = 'QSO'
truetype[qso_fake] = 'STAR'
truetype[elg] = 'GALAXY'
truetype[lrg_real] = 'GALAXY'
truetype[lrg_fake] = 'UNKNOWN'
truetype[std] = 'STAR'
truetype[sky] = 'SKY'
assert np.all(truetype != '')
#- Misc other
targetid = np.random.randint(2**62, size=n)
### brickname = np.zeros(n, dtype='S8')
brickname = desispec.brick.brickname(ra, dec)
subpriority = np.random.uniform(0, 1, size=n)
targets = Table()
targets['TARGETID'] = targetid
targets['BRICKNAME'] = brickname
targets['RA'] = ra
targets['DEC'] = dec
targets['DESI_TARGET'] = desi_target
targets['BGS_TARGET'] = bgs_target
targets['MWS_TARGET'] = mws_target
targets['SUBPRIORITY'] = subpriority
truth = Table()
truth['TARGETID'] = targetid
truth['BRICKNAME'] = brickname
truth['RA'] = ra
truth['DEC'] = dec
truth['TRUEZ'] = z
truth['TRUETYPE'] = truetype
truth['CATEGORY'] = itype
return targets, truth
|
desihub/fiberassign
|
old/py/mock.py
|
Python
|
bsd-3-clause
| 3,161
|
[
"Galaxy"
] |
1cb4cca42a55d8140ae3eb9ab3186980f652b8985d4c59aa7958d37b2a6a5a2b
|
## -*- coding: utf-8 -*-
## Copyright (c) 2015-2020, Exa Analytics Development Team
## Distributed under the terms of the Apache License 2.0
#"""
#Parser for ADF Output
##########################
#Parser for the output of the DIRAC program (part of the ADF suite).
#"""
#import pandas as pd
#from exa import Parser, Matches, Typed
#from .scf import SCF
#from .geometry import Geometry
#from exatomic.core import Atom
#from exatomic.base import sym2z
#
#
#
#class ADF(Parser):
# """
# """
# _start = "* | A D F | *"
# _stop = -1
# atom = Typed(Atom)
#
# def _parse_stops_1(self, starts):
# """Find the end of the section."""
# key = "Hash table lookups:"
# matches = [self.find_next(key, cursor=s[0]) for s in starts]
# return Matches(key, *matches)
#
# def parse_atom(self):
# self.parse()
#
# def _parse(self):
# self._parse_atom()
#
# def _parse_atom(self):
# """Parse the atom table."""
# secs = self.sections[self.sections['parser'] == Geometry].index
# atom = []
# for i, j in enumerate(secs):
# atm = self.get_section(j).atom
# atm['frame'] = i
# atom.append(atm)
# atom = pd.concat(atom, ignore_index=True)
# atom['Z'] = atom['symbol'].map(sym2z)
# del atom['symbol']
# self.atom = Atom(atom)
#
#
#ADF.add_parsers(SCF, Geometry)
|
exa-analytics/atomic
|
exatomic/adf/adf/adf.py
|
Python
|
apache-2.0
| 1,460
|
[
"ADF",
"DIRAC"
] |
b15c7c1e15bbfbb8cc01dc3f4f8ac463bae3e18483fd98d7bb0db1d75841906a
|
#!/usr/bin/env python3
# [1] https://doi.org/10.1063/1.1515483 optimization review
# [2] https://doi.org/10.1063/1.471864 delocalized internal coordinates
# [3] https://doi.org/10.1016/0009-2614(95)00646-L lindh model hessian
# [4] 10.1002/(SICI)1096-987X(19990730)20:10<1067::AID-JCC9>3.0.CO;2-V
# Handling of corner cases
# [5] https://doi.org/10.1063/1.462844
from collections import namedtuple
from functools import reduce
import itertools as it
import logging
import numpy as np
from scipy.spatial.distance import pdist, squareform
from pysisyphus.constants import BOHR2ANG
from pysisyphus.elem_data import VDW_RADII, COVALENT_RADII as CR
from pysisyphus.intcoords.derivatives import d2q_b, d2q_a, d2q_d
from pysisyphus.intcoords.exceptions import NeedNewInternalsException
from pysisyphus.intcoords.findbonds import get_pair_covalent_radii
from pysisyphus.intcoords.fragments import merge_fragments
PrimitiveCoord = namedtuple(
"PrimitiveCoord",
"inds val grad",
)
class RedundantCoords:
RAD_175 = 3.05432619
BEND_MIN_DEG = 15
BEND_MAX_DEG = 180
def __init__(self, atoms, cart_coords, bond_factor=1.3,
prim_indices=None, define_prims=None, bonds_only=False,
check_bends=True, check_dihedrals=False):
self.atoms = atoms
self._cart_coords = cart_coords
self.bond_factor = bond_factor
self.define_prims = define_prims
self.bonds_only = bonds_only
self.check_bends = check_bends
self.check_dihedrals = check_dihedrals
self._B_prim = None
self.bond_indices = list()
self.bending_indices = list()
self.dihedral_indices = list()
self.hydrogen_bond_indices = list()
if prim_indices is None:
self.set_primitive_indices(self.define_prims)
else:
to_arr = lambda _: np.array(list(_), dtype=int)
bonds, bends, dihedrals = prim_indices
# We accept all bond indices. What could possibly go wrong?! :)
self.bond_indices = to_arr(bonds)
valid_bends = [inds for inds in bends
if self.is_valid_bend(inds)]
self.bending_indices = to_arr(valid_bends)
valid_dihedrals = [inds for inds in dihedrals if
self.is_valid_dihedral(inds)]
self.dihedral_indices = to_arr(valid_dihedrals)
if self.bonds_only:
self.bending_indices = list()
self.dihedral_indices = list()
self._prim_internals = self.calculate(self.cart_coords)
self._prim_coords = np.array([pc.val for pc in self._prim_internals])
def log(self, message):
logger = logging.getLogger("internal_coords")
logger.debug(message)
@property
def prim_indices(self):
return [self.bond_indices, self.bending_indices, self.dihedral_indices]
@property
def prim_indices_set(self):
return set([tuple(prim_ind) for prim_ind in it.chain(*self.prim_indices)])
@property
def prim_coords(self):
if self._prim_coords is None:
self._prim_coords = np.array(
[pc.val for pc in self.calculate(self.cart_coords)]
)
return self._prim_coords
@property
def cart_coords(self):
return self._cart_coords
@cart_coords.setter
def cart_coords(self, cart_coords):
self._cart_coords = cart_coords
self._B_prim = None
@property
def coords(self):
return self.prim_coords
@property
def coord_indices(self):
ic_ind_tuples = [tuple(ic.inds) for ic in self._prim_internals]
return {ic_inds: i for i, ic_inds in enumerate(ic_ind_tuples)}
@property
def dihed_start(self):
return len(self.bond_indices) + len(self.bending_indices)
def get_index_of_prim_coord(self, prim_ind):
"""Index of primitive internal for the given atom indices.
TODO: simplify this so when we get a prim_ind of len 2
(bond) we don't have to check the bending and dihedral indices."""
prim_ind_set = set(prim_ind)
indices = [i for i, pi in enumerate(it.chain(*self.prim_indices))
if set(pi) == prim_ind_set]
index = None
try:
index = indices[0]
except IndexError:
self.log(f"Primitive internal with indices {prim_ind} "
"is not defined!")
return index
@property
def c3d(self):
return self.cart_coords.reshape(-1, 3)
@property
def B_prim(self):
"""Wilson B-Matrix"""
if self._B_prim is None:
self._B_prim = np.array([c.grad for c in self.calculate(self.cart_coords)])
return self._B_prim
@property
def B(self):
"""Wilson B-Matrix"""
return self.B_prim
@property
def Bt_inv(self):
"""Transposed generalized inverse of the Wilson B-Matrix."""
B = self.B
return np.linalg.pinv(B.dot(B.T)).dot(B)
@property
def B_inv(self):
"""Generalized inverse of the Wilson B-Matrix."""
B = self.B
return B.T.dot(np.linalg.pinv(B.dot(B.T)))
@property
def P(self):
"""Projection matrix onto B. See [1] Eq. (4)."""
return self.B.dot(self.B_inv)
def transform_forces(self, cart_forces):
"""Combination of Eq. (9) and (11) in [1]."""
return self.Bt_inv.dot(cart_forces)
def get_K_matrix(self, int_gradient=None):
if int_gradient is not None:
assert len(int_gradient) == len(self._prim_internals)
size_ = self.cart_coords.size
if int_gradient is None:
return np.zeros((size_, size_))
dg_funcs = {
2: d2q_b,
3: d2q_a,
4: d2q_d,
}
def grad_deriv_wrapper(inds):
coords_flat = self.c3d[inds].flatten()
dgrad = dg_funcs[len(inds)](*coords_flat)
return dgrad
K_flat = np.zeros(size_ * size_)
for pc, int_grad_item in zip(self._prim_internals, int_gradient):
# Contract with gradient
try:
dg = int_grad_item * grad_deriv_wrapper(pc.inds)
except (ValueError, ZeroDivisionError) as err:
self.log( "Error in calculation of 2nd derivative of primitive "
f"internal {pc.inds}."
)
continue
# Depending on the type of internal coordinate dg is a flat array
# of size 36 (stretch), 81 (bend) or 144 (torsion).
#
# An internal coordinate contributes to an element K[j, k] of the
# K matrix if the cartesian coordinate indices j and k belong to an
# atom that contributes to the respective internal coordinate.
#
# As for now we build up the K matrix as flat array. To add the dg
# entries at the appropriate places in K_flat we have to calculate
# the corresponding flat indices of dg in K_flat.
cart_inds = list(it.chain(*[range(3*i,3*i+3) for i in pc.inds]))
flat_inds = [row*size_ + col for row, col in it.product(cart_inds, cart_inds)]
K_flat[flat_inds] += dg
K = K_flat.reshape(size_, size_)
return K
def transform_hessian(self, cart_hessian, int_gradient=None):
"""Transform Cartesian Hessian to internal coordinates."""
if int_gradient is None:
self.log("Supplied 'int_gradient' is None. K matrix will be zero, "
"so derivatives of the Wilson-B-matrix are neglected in "
"the hessian transformation."
)
K = self.get_K_matrix(int_gradient)
return self.Bt_inv.dot(cart_hessian-K).dot(self.B_inv)
def backtransform_hessian(self, redund_hessian, int_gradient=None):
"""Transform Hessian in internal coordinates to Cartesians."""
if int_gradient is None:
self.log("Supplied 'int_gradient' is None. K matrix will be zero, "
"so derivatives of the Wilson-B-matrix are neglected in "
"the hessian transformation."
)
K = self.get_K_matrix(int_gradient)
return self.B.T.dot(redund_hessian).dot(self.B) + K
def project_hessian(self, H, shift=1000):
"""Expects a hessian in internal coordinates. See Eq. (11) in [1]."""
P = self.P
return P.dot(H).dot(P) + shift*(np.eye(P.shape[0]) - P)
def project_vector(self, vector):
"""Project supplied vector onto range of B."""
return self.P.dot(vector)
def connect_fragments(self, cdm, fragments):
"""Determine the smallest interfragment bond for a list
of fragments and a condensed distance matrix."""
dist_mat = squareform(cdm)
interfragment_indices = list()
for frag1, frag2 in it.combinations(fragments, 2):
arr1 = np.array(list(frag1))[None,:]
arr2 = np.array(list(frag2))[:,None]
indices = [(i1, i2) for i1, i2 in it.product(frag1, frag2)]
distances = np.array([dist_mat[ind] for ind in indices])
min_index = indices[distances.argmin()]
interfragment_indices.append(min_index)
# Or as Philipp proposed: two loops over the fragments and only
# generate interfragment distances. So we get a full matrix with
# the original indices but only the required distances.
return interfragment_indices
def set_hydrogen_bond_indices(self, bond_indices):
coords3d = self.cart_coords.reshape(-1, 3)
tmp_sets = [frozenset(bi) for bi in bond_indices]
# Check for hydrogen bonds as described in [1] A.1 .
# Find hydrogens bonded to small electronegative atoms X = (N, O
# F, P, S, Cl).
hydrogen_inds = [i for i, a in enumerate(self.atoms)
if a.lower() == "h"]
x_inds = [i for i, a in enumerate(self.atoms)
if a.lower() in "n o f p s cl".split()]
hydrogen_bond_inds = list()
for h_ind, x_ind in it.product(hydrogen_inds, x_inds):
as_set = set((h_ind, x_ind))
if not as_set in tmp_sets:
continue
# Check if distance of H to another electronegative atom Y is
# greater than the sum of their covalent radii but smaller than
# the 0.9 times the sum of their van der Waals radii. If the
# angle X-H-Y is greater than 90° a hydrogen bond is asigned.
y_inds = set(x_inds) - set((x_ind, ))
for y_ind in y_inds:
y_atom = self.atoms[y_ind].lower()
cov_rad_sum = CR["h"] + CR[y_atom]
distance = self.calc_stretch(coords3d, (h_ind, y_ind))
vdw = 0.9 * (VDW_RADII["h"] + VDW_RADII[y_atom])
angle = self.calc_bend(coords3d, (x_ind, h_ind, y_ind))
if (cov_rad_sum < distance < vdw) and (angle > np.pi/2):
self.hydrogen_bond_indices.append((h_ind, y_ind))
self.log(f"Added hydrogen bond between atoms {h_ind} "
f"({self.atoms[h_ind]}) and {y_ind} ({self.atoms[y_ind]})")
self.hydrogen_bond_indices = np.array(self.hydrogen_bond_indices)
def set_bond_indices(self, define_bonds=None, factor=None):
"""
Default factor of 1.3 taken from [1] A.1.
Gaussian uses somewhat less, like 1.2, or different radii than we do.
"""
bond_factor = factor if factor else self.bond_factor
coords3d = self.cart_coords.reshape(-1, 3)
# Condensed distance matrix
cdm = pdist(coords3d)
# Generate indices corresponding to the atom pairs in the
# condensed distance matrix cdm.
atom_indices = list(it.combinations(range(len(coords3d)),2))
atom_indices = np.array(atom_indices, dtype=int)
cov_rad_sums = get_pair_covalent_radii(self.atoms)
cov_rad_sums *= bond_factor
bond_flags = cdm <= cov_rad_sums
bond_indices = atom_indices[bond_flags]
if define_bonds:
bond_indices = np.concatenate(((bond_indices, define_bonds)), axis=0)
self.bare_bond_indices = bond_indices
# Look for hydrogen bonds
self.set_hydrogen_bond_indices(bond_indices)
if self.hydrogen_bond_indices.size > 0:
bond_indices = np.concatenate((bond_indices,
self.hydrogen_bond_indices))
# Merge bond index sets into fragments
bond_ind_sets = [frozenset(bi) for bi in bond_indices]
fragments = merge_fragments(bond_ind_sets)
# Look for unbonded single atoms and create fragments for them.
bonded_set = set(tuple(bond_indices.flatten()))
unbonded_set = set(range(len(self.atoms))) - bonded_set
fragments.extend(
[frozenset((atom, )) for atom in unbonded_set]
)
self.fragments = fragments
# Check if there are any disconnected fragments. If there are some
# create interfragment bonds between all of them.
if len(fragments) != 1:
interfragment_inds = self.connect_fragments(cdm, fragments)
bond_indices = np.concatenate((bond_indices, interfragment_inds))
self.bond_indices = bond_indices
def are_parallel(self, vec1, vec2, angle_ind=None, thresh=1e-6):
dot = max(min(vec1.dot(vec2), 1), -1)
rad = np.arccos(dot)#vec1.dot(vec2))
# angle > 175°
if abs(rad) > self.RAD_175:
# self.log(f"Nearly linear angle {angle_ind}: {np.rad2deg(rad)}")
ind_str = f" ({angle_ind})" if (angle_ind is not None) else ""
self.log(f"Nearly linear angle{ind_str}: {np.rad2deg(rad)}")
return abs(rad) > (np.pi - thresh)
def sort_by_central(self, set1, set2):
"""Determines a common index in two sets and returns a length 3
tuple with the central index at the middle position and the two
terminal indices as first and last indices."""
central_set = set1 & set2
union = set1 | set2
assert len(central_set) == 1
terminal1, terminal2 = union - central_set
(central, ) = central_set
return (terminal1, central, terminal2), central
def is_valid_bend(self, bend_ind):
val = self.calc_bend(self.c3d, bend_ind)
deg = np.rad2deg(val)
# Always return true if bends should not be checked
return (
not self.check_bends) or (self.BEND_MIN_DEG <= deg <= self.BEND_MAX_DEG
)
def set_bending_indices(self, define_bends=None):
bond_sets = {frozenset(bi) for bi in self.bond_indices}
for bond_set1, bond_set2 in it.combinations(bond_sets, 2):
union = bond_set1 | bond_set2
if len(union) == 3:
as_tpl, _ = self.sort_by_central(bond_set1, bond_set2)
if not self.is_valid_bend(as_tpl):
self.log(f"Didn't create bend {list(as_tpl)}")
# f" with value of {deg:.3f}°")
continue
self.bending_indices.append(as_tpl)
self.bending_indices = np.array(self.bending_indices, dtype=int)
if define_bends:
bis = np.concatenate(( (self.bending_indices, define_bends)), axis=0)
self.bending_indices = bis
def is_valid_dihedral(self, dihedral_ind, thresh=1e-6):
# Check for linear atoms
first_angle = self.calc_bend(self.c3d, dihedral_ind[:3])
second_angle = self.calc_bend(self.c3d, dihedral_ind[1:])
pi_thresh = np.pi - thresh
return ((abs(first_angle) < pi_thresh)
and (abs(second_angle) < pi_thresh)
)
def set_dihedral_indices(self, define_dihedrals=None):
dihedrals = list()
def set_dihedral_index(dihedral_ind):
dihed = tuple(dihedral_ind)
# Check if this dihedral is already present
if (dihed in dihedrals) or (dihed[::-1] in dihedrals):
return
# Assure that the angles are below 175° (3.054326 rad)
if not self.is_valid_dihedral(dihedral_ind, thresh=0.0873):
self.log(f"Skipping generation of dihedral {dihedral_ind} "
"as some of the the atoms are (nearly) linear."
)
return
self.dihedral_indices.append(dihedral_ind)
dihedrals.append(dihed)
improper_dihedrals = list()
coords3d = self.cart_coords.reshape(-1, 3)
for bond, bend in it.product(self.bond_indices, self.bending_indices):
central = bend[1]
bend_set = set(bend)
bond_set = set(bond)
# Check if the two sets share one common atom. If not continue.
intersect = bend_set & bond_set
if len(intersect) != 1:
continue
# When the common atom is a terminal atom of the bend, that is
# it's not the central atom of the bend, we create a
# proper dihedral. Before we create any improper dihedrals we
# create these proper dihedrals.
if central not in bond_set:
# The new terminal atom in the dihedral is the one that
# doesn' intersect.
terminal = tuple(bond_set - intersect)[0]
intersecting_atom = tuple(intersect)[0]
if intersecting_atom == bend[0]:
dihedral_ind = [terminal] + bend.tolist()
else:
dihedral_ind = bend.tolist() + [terminal]
set_dihedral_index(dihedral_ind)
# If the common atom is the central atom we try to form an out
# of plane bend / improper torsion. They may be created later on.
else:
fourth_atom = list(bond_set - intersect)
dihedral_ind = bend.tolist() + fourth_atom
# This way dihedrals may be generated that contain linear
# atoms and these would be undefinied. So we check for this.
dihed = self.calc_dihedral(coords3d, dihedral_ind)
if not np.isnan(dihed):
improper_dihedrals.append(dihedral_ind)
else:
self.log(f"Dihedral {dihedral_ind} is undefinied. Skipping it!")
# Now try to create the remaining improper dihedrals.
if (len(self.atoms) >= 4) and (len(self.dihedral_indices) == 0):
for improp in improper_dihedrals:
set_dihedral_index(improp)
self.log("Permutational symmetry not considerd in "
"generation of improper dihedrals.")
self.dihedral_indices = np.array(self.dihedral_indices)
if define_dihedrals:
dis = np.concatenate(((self.dihedral_indices, define_dihedrals)), axis=0)
self.dihedral_indices = dis
def sort_by_prim_type(self, to_sort):
by_prim_type = [[], [], []]
if to_sort is None:
to_sort = list()
for item in to_sort:
len_ = len(item)
by_prim_type[len_-2].append(item)
return by_prim_type
def set_primitive_indices(self, define_prims=None):
stretches, bends, dihedrals = self.sort_by_prim_type(define_prims)
self.set_bond_indices(stretches)
self.set_bending_indices(bends)
self.set_dihedral_indices(dihedrals)
def calculate(self, coords, attr=None):
coords3d = coords.reshape(-1, 3)
def per_type(func, ind):
val, grad = func(coords3d, ind, True)
return PrimitiveCoord(ind, val, grad)
self.bonds = list()
self.bends = list()
self.dihedrals = list()
for ind in self.bond_indices:
bonds = per_type(self.calc_stretch, ind)
self.bonds.append(bonds)
for ind in self.bending_indices:
bend = per_type(self.calc_bend, ind)
self.bends.append(bend)
for ind in self.dihedral_indices:
dihedral = per_type(self.calc_dihedral, ind)
self.dihedrals.append(dihedral)
int_coords = self.bonds + self.bends + self.dihedrals
if attr:
return np.array([getattr(ic,attr) for ic in int_coords])
return int_coords
def calc_stretch(self, coords3d, bond_ind, grad=False):
n, m = bond_ind
bond = coords3d[m] - coords3d[n]
bond_length = np.linalg.norm(bond)
if grad:
bond_normed = bond / bond_length
row = np.zeros_like(coords3d)
# 1 / -1 correspond to the sign factor [1] Eq. 18
row[m,:] = bond_normed
row[n,:] = -bond_normed
row = row.flatten()
return bond_length, row
return bond_length
def calc_bend(self, coords3d, angle_ind, grad=False):
m, o, n = angle_ind
u_dash = coords3d[m] - coords3d[o]
v_dash = coords3d[n] - coords3d[o]
u_norm = np.linalg.norm(u_dash)
v_norm = np.linalg.norm(v_dash)
u = u_dash / u_norm
v = v_dash / v_norm
angle_rad = np.arccos(u.dot(v))
if grad:
# Eq. (24) in [1]
if self.are_parallel(u, v, angle_ind):
tmp_vec = np.array((1, -1, 1))
par = self.are_parallel(u, tmp_vec) and self.are_parallel(v, tmp_vec)
tmp_vec = np.array((-1, 1, 1)) if par else tmp_vec
w_dash = np.cross(u, tmp_vec)
else:
w_dash = np.cross(u, v)
w_norm = np.linalg.norm(w_dash)
w = w_dash / w_norm
uxw = np.cross(u, w)
wxv = np.cross(w, v)
row = np.zeros_like(coords3d)
# | m | n | o |
# -----------------------------------
# sign_factor(amo) | 1 | 0 | -1 | first_term
# sign_factor(ano) | 0 | 1 | -1 | second_term
first_term = uxw / u_norm
second_term = wxv / v_norm
row[m,:] = first_term
row[o,:] = -first_term - second_term
row[n,:] = second_term
row = row.flatten()
return angle_rad, row
return angle_rad
def calc_dihedral(self, coords3d, dihedral_ind, grad=False, cos_tol=1e-9):
m, o, p, n = dihedral_ind
u_dash = coords3d[m] - coords3d[o]
v_dash = coords3d[n] - coords3d[p]
w_dash = coords3d[p] - coords3d[o]
u_norm = np.linalg.norm(u_dash)
v_norm = np.linalg.norm(v_dash)
w_norm = np.linalg.norm(w_dash)
u = u_dash / u_norm
v = v_dash / v_norm
w = w_dash / w_norm
phi_u = np.arccos(u.dot(w))
phi_v = np.arccos(-w.dot(v))
uxw = np.cross(u, w)
vxw = np.cross(v, w)
cos_dihed = uxw.dot(vxw)/(np.sin(phi_u)*np.sin(phi_v))
# Restrict cos_dihed to [-1, 1]
if cos_dihed >= 1 - cos_tol:
dihedral_rad = 0
elif cos_dihed <= -1 + cos_tol:
dihedral_rad = np.arccos(-1)
else:
dihedral_rad = np.arccos(cos_dihed)
if dihedral_rad != np.pi:
# wxv = np.cross(w, v)
# if wxv.dot(u) < 0:
if vxw.dot(u) < 0:
dihedral_rad *= -1
if grad:
row = np.zeros_like(coords3d)
# | m | n | o | p |
# ------------------------------------------
# sign_factor(amo) | 1 | 0 | -1 | 0 | 1st term
# sign_factor(apn) | 0 | -1 | 0 | 1 | 2nd term
# sign_factor(aop) | 0 | 0 | 1 | -1 | 3rd term
# sign_factor(apo) | 0 | 0 | -1 | 1 | 4th term
sin2_u = np.sin(phi_u)**2
sin2_v = np.sin(phi_v)**2
first_term = uxw/(u_norm*sin2_u)
second_term = vxw/(v_norm*sin2_v)
third_term = uxw*np.cos(phi_u)/(w_norm*sin2_u)
fourth_term = -vxw*np.cos(phi_v)/(w_norm*sin2_v)
row[m,:] = first_term
row[n,:] = -second_term
row[o,:] = -first_term + third_term - fourth_term
row[p,:] = second_term - third_term + fourth_term
row = row.flatten()
return dihedral_rad, row
return dihedral_rad
def update_internals(self, new_cartesians, prev_internals):
new_internals = self.calculate(new_cartesians, attr="val")
internal_diffs = np.array(new_internals - prev_internals)
_, _, dihedrals = self.prim_indices
dihedral_diffs = internal_diffs[-len(dihedrals):]
# Find differences that are shifted by 2*pi
shifted_by_2pi = np.abs(np.abs(dihedral_diffs) - 2*np.pi) < np.pi/2
org = dihedral_diffs.copy()
new_dihedrals = new_internals[-len(dihedrals):]
new_dihedrals[shifted_by_2pi] -= 2*np.pi * np.sign(dihedral_diffs[shifted_by_2pi])
new_internals[-len(dihedrals):] = new_dihedrals
return new_internals
def dihedrals_are_valid(self, cart_coords):
_, _, dihedrals = self.prim_indices
def collinear(v1, v2, thresh=1e-4):
# ~4e-5 corresponds to 179.5°
return 1 - abs(v1.dot(v2)) <= thresh
coords3d = cart_coords.reshape(-1, 3)
def check(indices):
m, o, p, n = indices
u_dash = coords3d[m] - coords3d[o]
v_dash = coords3d[n] - coords3d[p]
w_dash = coords3d[p] - coords3d[o]
u_norm = np.linalg.norm(u_dash)
v_norm = np.linalg.norm(v_dash)
w_norm = np.linalg.norm(w_dash)
u = u_dash / u_norm
v = v_dash / v_norm
w = w_dash / w_norm
valid = not (collinear(u, w) or collinear(v, w))
return valid
all_valid = all([check(indices) for indices in dihedrals])
return all_valid
def transform_int_step(self, step, cart_rms_thresh=1e-6):
"""This is always done in primitive internal coordinates so care
has to be taken that the supplied step is given in primitive internal
coordinates."""
remaining_int_step = step
cur_cart_coords = self.cart_coords.copy()
cur_internals = self.prim_coords
target_internals = cur_internals + step
B_prim = self.B_prim
# Bt_inv may be overriden in other coordiante systems so we
# calculate it 'manually' here.
Bt_inv_prim = np.linalg.pinv(B_prim.dot(B_prim.T)).dot(B_prim)
last_rms = 9999
prev_internals = cur_internals
self.backtransform_failed = True
for i in range(25):
cart_step = Bt_inv_prim.T.dot(remaining_int_step)
# Recalculate exact Bt_inv every cycle. Costly.
# cart_step = self.Bt_inv.T.dot(remaining_int_step)
cart_rms = np.sqrt(np.mean(cart_step**2))
# Update cartesian coordinates
cur_cart_coords += cart_step
# Determine new internal coordinates
new_internals = self.update_internals(cur_cart_coords, prev_internals)
remaining_int_step = target_internals - new_internals
internal_rms = np.sqrt(np.mean(remaining_int_step**2))
self.log(f"Cycle {i}: rms(Δcart)={cart_rms:1.4e}, "
f"rms(Δinternal) = {internal_rms:1.5e}"
)
# This assumes the first cart_rms won't be > 9999 ;)
if (cart_rms < last_rms):
# Store results of the conversion cycle for laster use, if
# the internal-cartesian-transformation goes bad.
best_cycle = (cur_cart_coords.copy(), new_internals.copy())
best_cycle_ind = i
elif i != 0:
# If the conversion somehow fails we return the step
# saved above.
self.log( "Internal to cartesian failed! Using from step "
f"from cycle {best_cycle_ind}."
)
cur_cart_coords, new_internals = best_cycle
break
else:
raise Exception("Internal-cartesian back-transformation already "
"failed in the first step. Aborting!"
)
prev_internals = new_internals
last_rms = cart_rms
if cart_rms < cart_rms_thresh:
self.log("Internal to cartesian transformation converged!")
self.backtransform_failed = False
break
self._prim_coords = np.array(new_internals)
if self.check_dihedrals and (not self.dihedrals_are_valid(cur_cart_coords)):
raise NeedNewInternalsException(cur_cart_coords)
self.log("")
# Return the difference between the new cartesian coordinates that yield
# the desired internal coordinates and the old cartesian coordinates.
return cur_cart_coords - self.cart_coords
def __str__(self):
bonds = len(self.bond_indices)
bends = len(self.bending_indices)
dihedrals = len(self.dihedral_indices)
name = self.__class__.__name__
return f"{name}({bonds} bonds, {bends} bends, {dihedrals} dihedrals)"
|
eljost/pysisyphus
|
deprecated/intcoords/InternalCoordinatesOld.py
|
Python
|
gpl-3.0
| 29,577
|
[
"Gaussian"
] |
63fdb965159c68d5f5e7ccde73d56110cd5bcee015424a9a88cdca2370c1faf9
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("parilis.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
|
primoz-k/parilis
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,229
|
[
"VisIt"
] |
5a078ee8a3b36ff80fef8c88cfb67e6feb0575b0ea734d5444854a5dbfeea7cc
|
from __future__ import annotations
import collections
import itertools
import math
import types
import wx
from wx.lib.intctrl import IntCtrl
from cctbx import crystal, uctbx
from cctbx.miller import index_generator
from dxtbx.imageset import ImageSet
from dxtbx.model.detector_helpers import get_detector_projection_2d_axes
from dxtbx.model.experiment_list import ExperimentList, ExperimentListFactory
from libtbx.utils import flat_list
from scitbx import matrix
from wxtbx import bitmaps, icons
from wxtbx.phil_controls import EVT_PHIL_CONTROL
from wxtbx.phil_controls.floatctrl import FloatCtrl
from wxtbx.phil_controls.intctrl import IntCtrl as PhilIntCtrl
from wxtbx.phil_controls.ints import IntsCtrl
from wxtbx.phil_controls.strctrl import StrCtrl
from dials.algorithms.image.threshold import (
DispersionExtendedThresholdDebug,
DispersionThresholdDebug,
)
from dials.algorithms.shoebox import MaskCode
from dials.array_family import flex
from dials.command_line.find_spots import phil_scope as find_spots_phil_scope
from dials.extensions import SpotFinderThreshold
from dials.util import masking
from dials.util.image_viewer.mask_frame import MaskSettingsFrame
from dials.util.image_viewer.spotfinder_wrap import chooser_wrapper
from .slip_viewer import pyslip
from .slip_viewer.frame import MASK_VAL, XrayFrame
from .viewer_tools import (
EVT_ZEROMQ_EVENT,
ImageChooserControl,
ImageCollectionWithSelection,
LegacyChooserAdapter,
)
try:
from typing import Optional
except ImportError:
pass
SpotfinderData = collections.namedtuple(
"SpotfinderData",
[
"all_pix_data",
"all_foreground_circles",
"ctr_mass_data",
"max_pix_data",
"miller_indices_data",
"predictions_data",
"shoebox_data",
"vector_data",
"vector_text_data",
],
)
myEVT_LOADIMG = wx.NewEventType()
EVT_LOADIMG = wx.PyEventBinder(myEVT_LOADIMG, 1)
class LoadImageEvent(wx.PyCommandEvent):
"""Event to signal that an image should be loaded"""
def __init__(self, etype, eid, filename=None):
"""Creates the event object"""
wx.PyCommandEvent.__init__(self, etype, eid)
self._filename = filename
def get_filename(self):
return self._filename
def create_load_image_event(destination, filename):
wx.PostEvent(destination, LoadImageEvent(myEVT_LOADIMG, -1, filename))
class RadialProfileThresholdDebug:
# The radial_profile threshold algorithm does not have an associated
# 'Debug' class. It does not create the same set of intermediate images
# as the dispersion algorithms, so we can delegate to a
# DispersionThresholdDebug object for those, while overriding the final_mask
# method. This wrapper class handles that.
def __init__(self, imageset, n_iqr, blur, n_bins):
self.imageset = imageset
params = find_spots_phil_scope.extract()
params.spotfinder.threshold.radial_profile.blur = blur
params.spotfinder.threshold.radial_profile.n_bins = n_bins
self.radial_profile = SpotFinderThreshold.load("radial_profile")(params)
self._i_panel = 0
def __call__(self, *args):
dispersion = DispersionThresholdDebug(*args)
image = args[0]
mask = args[1]
dispersion._final_mask = self.radial_profile.compute_threshold(
image, mask, imageset=self.imageset, i_panel=self._i_panel
)
dispersion.final_mask = types.MethodType(lambda x: x._final_mask, dispersion)
self._i_panel += 1
return dispersion
class SpotFrame(XrayFrame):
def __init__(self, *args, **kwds):
self.experiments = kwds.pop("experiments")
self.reflections = kwds.pop("reflections")
self.imagesets = list(
itertools.chain(*[x.imagesets() for x in self.experiments])
)
self.crystals = list(itertools.chain(*[x.crystals() for x in self.experiments]))
if len(self.imagesets) == 0:
raise RuntimeError("No imageset could be constructed")
# Store the list of images we can view
self.images = ImageCollectionWithSelection()
super().__init__(*args, **kwds)
# Precalculate best-fit frame for image display if required
for experiment_list in self.experiments:
for experiment in experiment_list:
detector = experiment.detector
if not detector:
self.params.projection = None
continue
if detector.has_projection_2d():
self.params.projection = None
continue
else:
detector.projection_2d_axes = get_detector_projection_2d_axes(
detector
)
detector.projection = self.params.projection
self.viewing_stills = True
for experiment_list in self.experiments:
if any(exp.scan or exp.goniometer for exp in experiment_list):
self.viewing_stills = False
break
if self.viewing_stills:
is_multi_shot_exp = any(len(exp_list) > 1 for exp_list in self.experiments)
if is_multi_shot_exp:
assert len(self.experiments) == 1
if self.reflections:
assert len(self.reflections) == 1
assert len(self.experiments[0]) == len(
set(self.reflections[0]["id"])
)
new_experiments = []
new_reflections = []
for i_expt, expt in enumerate(self.experiments[0]):
print(
"Perparing experiments (%d / %d)"
% (i_expt + 1, len(self.experiments[0]))
)
exp_list = ExperimentList()
exp_list.append(expt)
new_experiments.append(exp_list)
if self.reflections:
refls = self.reflections[0].select(
self.reflections[0]["id"] == i_expt
)
refls["id"] = flex.int(len(refls), 0)
new_reflections.append(refls)
self.experiments = new_experiments
self.reflections = new_reflections
else:
new_reflections = []
for refls in self.reflections:
refls["id"] = flex.int(len(refls), 0)
new_reflections.append(refls)
self.reflections = new_reflections
# If we have only one imageset, unindexed filtering becomes easier
self.have_one_imageset = len(set(self.imagesets)) <= 1
if self.viewing_stills:
self.have_one_imageset = True
self.viewer.reflections = self.reflections
self.viewer.frames = self.imagesets
self.dials_spotfinder_layers = []
self.shoebox_layer = None
self.ctr_mass_layer = None
self.max_pix_layer = None
self.predictions_layer = None
self.miller_indices_layer = None
self.vector_layer = None
self.vector_text_layer = None
self._ring_layer = None
self._resolution_text_layer = None
self.sel_image_polygon_layer = None
self.sel_image_circle_layers = []
self.mask_input = self.params.mask
self.mask_image_viewer = None
self._mask_frame = None
self.display_foreground_circles_patch = False # hard code this option, for now
self._dispersion_debug_list_hash = 0
if (
self.experiments is not None
and not self.reflections
and self.params.predict_reflections
):
self.reflections = self.predict()
if self.params.d_min is not None and len(self.reflections):
reflections = []
for expt, refl in zip(self.experiments, self.reflections):
if "rlp" not in refl:
if "xyzobs.mm.value" not in refl:
if "xyzobs.px.value" not in refl:
refl["xyzobs.px.value"] = refl["xyzcal.px"]
refl["xyzobs.px.variance"] = flex.vec3_double(
len(refl), (1, 1, 1)
)
refl.centroid_px_to_mm(ExperimentList([expt]))
refl.map_centroids_to_reciprocal_space(ExperimentList([expt]))
d_spacings = 1 / refl["rlp"].norms()
refl = refl.select(d_spacings > self.params.d_min)
reflections.append(refl)
self.reflections = reflections
self.Bind(EVT_LOADIMG, self.load_file_event)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateUIMask, id=self._id_mask)
self.Bind(EVT_ZEROMQ_EVENT, self.OnZeroMQEvent)
def setup_toolbar(self):
btn = self.toolbar.AddTool(
toolId=-1,
label="Load file",
bitmap=icons.hkl_file.GetBitmap(),
shortHelp="Load file",
kind=wx.ITEM_NORMAL,
)
self.Bind(wx.EVT_MENU, self.OnLoadFile, btn)
# btn = self.toolbar.AddTool(toolId=-1,
# label="Settings",
# bitmap=icons.advancedsettings.GetBitmap(),
# shortHelp="Settings",
# kind=wx.ITEM_NORMAL)
# self.Bind(wx.EVT_MENU, self.OnShowSettings, btn)
# btn = self.toolbar.AddTool(toolId=-1,
# label="Zoom",
# bitmap=icons.search.GetBitmap(),
# shortHelp="Zoom",
# kind=wx.ITEM_NORMAL)
# self.Bind(wx.EVT_MENU, self.OnZoom, btn
btn = self.toolbar.AddTool(
toolId=wx.ID_SAVEAS,
label="Save As...",
bitmap=bitmaps.fetch_icon_bitmap("actions", "save_all", 32),
shortHelp="Save As...",
kind=wx.ITEM_NORMAL,
)
self.Bind(wx.EVT_MENU, self.OnSaveAs, btn)
txt = wx.StaticText(self.toolbar, -1, "Image:")
self.toolbar.AddControl(txt)
# Because parent classes (e.g. XRayFrame) depends on this control explicitly
# created in this subclass, we create an adapter to connect to the new design
self.image_chooser = LegacyChooserAdapter(self.images, self.load_image)
# Create a sub-control with our image selection slider and label
# Manually tune the height for now - don't understand toolbar sizing
panel = ImageChooserControl(self.toolbar, size=(300, 40))
# The Toolbar doesn't call layout for its children?!
panel.Layout()
# Platform support for slider events seems a little inconsistent
# with wxPython 3, so we just trap all EVT_SLIDER events.
panel.Bind(wx.EVT_SLIDER, self.OnChooseImage)
# These events appear to be a more reliable indicator?
panel.Bind(wx.EVT_SCROLL_CHANGED, self.OnChooseImage)
# Finally, add our new control to the toolbar
self.toolbar.AddControl(panel)
self.image_chooser_panel = panel
btn = self.toolbar.AddTool(
toolId=wx.ID_BACKWARD,
label="Previous",
bitmap=bitmaps.fetch_icon_bitmap("actions", "1leftarrow"),
shortHelp="Previous",
kind=wx.ITEM_NORMAL,
)
self.Bind(wx.EVT_MENU, self.OnPrevious, btn)
btn = self.toolbar.AddTool(
toolId=wx.ID_FORWARD,
label="Next",
bitmap=bitmaps.fetch_icon_bitmap("actions", "1rightarrow"),
shortHelp="Next",
kind=wx.ITEM_NORMAL,
)
self.Bind(wx.EVT_MENU, self.OnNext, btn)
txt = wx.StaticText(self.toolbar, -1, "Jump:")
self.toolbar.AddControl(txt)
self.jump_to_image = PhilIntCtrl(self.toolbar, -1, name="image", size=(65, -1))
self.jump_to_image.SetMin(1)
self.jump_to_image.SetValue(1)
self.toolbar.AddControl(self.jump_to_image)
self.Bind(EVT_PHIL_CONTROL, self.OnJumpToImage, self.jump_to_image)
txt = wx.StaticText(self.toolbar, -1, "Stack:")
self.toolbar.AddControl(txt)
self.stack = PhilIntCtrl(self.toolbar, -1, name="stack", size=(65, -1))
self.stack.SetMin(1)
self.stack.SetValue(self.params.stack_images)
self.toolbar.AddControl(self.stack)
self.Bind(EVT_PHIL_CONTROL, self.OnStack, self.stack)
def setup_menus(self):
super().setup_menus()
# XXX Placement
self._id_mask = wx.NewId()
item = self._actions_menu.Append(self._id_mask, " ")
self.Bind(wx.EVT_MENU, self.OnMask, source=item)
def OnMask(self, event):
if not self._mask_frame:
self._mask_frame = MaskSettingsFrame(
self,
wx.ID_ANY,
"Mask tool",
style=wx.CAPTION | wx.CLOSE_BOX | wx.RESIZE_BORDER,
)
self._mask_frame.Show()
self._mask_frame.Raise()
else:
self._mask_frame.Destroy()
def OnUpdateUIMask(self, event):
# Toggle the menu item text depending on the state of the tool.
if self._mask_frame:
event.SetText("Hide mask tool")
else:
event.SetText("Show mask tool")
def OnChooseImage(self, event):
# Whilst scrolling and choosing, show what we are looking at
selected_image = self.images[self.image_chooser_panel.GetValue() - 1]
# Always show the current 'loaded' image as such
if selected_image == self.images.selected:
self.image_chooser_panel.set_label(self.get_key(selected_image))
else:
self.image_chooser_panel.set_temporary_label(self.get_key(selected_image))
# Don't update whilst dragging the slider
if event.EventType == wx.EVT_SLIDER.typeId:
if wx.GetMouseState().LeftIsDown():
return
# Once we've stopped scrolling, load the selected item
self.load_image(selected_image)
def OnPrevious(self, event):
super().OnPrevious(event)
# Parent function moves - now update the UI to match
self.jump_to_image.SetValue(self.images.selected_index + 1)
def OnNext(self, event):
super().OnNext(event)
# Parent function moves - now update the UI to match
self.jump_to_image.SetValue(self.images.selected_index + 1)
def OnJumpToImage(self, event):
phil_value = self.jump_to_image.GetPhilValue()
if self.images.selected_index != (phil_value - 1):
self.load_image(self.images[phil_value - 1])
def OnStack(self, event):
value = self.stack.GetPhilValue()
if value == 1:
for button in self.settings_frame.panel.dispersion_buttons:
button.Enable()
else:
for button in self.settings_frame.panel.dispersion_buttons:
button.Disable()
if value != self.params.stack_images:
self.params.stack_images = value
self.reload_image()
def GetBoxCorners(self, layer, p1, p2):
"""Get list of points inside box.
layer reference to layer object we are working on
p1 one corner point of selection box
p2 opposite corner point of selection box
We have to figure out which corner is which.
Return a list of (lon, lat) of points inside box.
Return None (no selection) or list [((lon, lat), data), ...]
of points inside the selection box.
"""
# TODO: speed this up? Do we need to??
# get canonical box limits
(p1x, p1y) = p1
(p2x, p2y) = p2
lx = min(p1x, p2x) # left x coord
rx = max(p1x, p2x)
ty = max(p1y, p2y) # top y coord
by = min(p1y, p2y)
return [(lx, by), (lx, ty), (rx, ty), (rx, by)]
def boxSelect(self, event):
"""Select event from pyslip."""
point = event.point
assert event.evtype == pyslip.EventBoxSelect
if point:
assert len(point) == 4
x0, y0 = point[0]
x1, y1 = point[2]
assert point == [(x0, y0), (x0, y1), (x1, y1), (x1, y0)]
point = [
self.pyslip.tiles.map_relative_to_picture_fast_slow(*p) for p in point
]
point_ = []
panel_id = None
for p in point:
p1, p0, p_id = self.pyslip.tiles.flex_image.picture_to_readout(
p[1], p[0]
)
assert p_id >= 0, "Point must be within a panel"
if panel_id is not None:
assert (
panel_id == p_id
), "All points must be contained within a single panel"
panel_id = p_id
point_.append((p0, p1))
point = point_
region = masking.phil_scope.extract().untrusted[0]
region.polygon = flat_list(point)
region.panel = panel_id
self.settings.untrusted.append(region)
self.drawUntrustedPolygons()
return True
def drawUntrustedPolygons(self):
# remove any previous selection
if self.sel_image_polygon_layer:
self.pyslip.DeleteLayer(self.sel_image_polygon_layer)
self.sel_image_polygon_layer = None
for layer in self.sel_image_circle_layers:
self.pyslip.DeleteLayer(layer)
self.sel_image_circle_layers = []
if not len(self.settings.untrusted):
return
polygon_data = []
circle_data = []
d = {}
for region in self.settings.untrusted:
polygon = None
circle = None
if region.rectangle is not None:
x0, x1, y0, y1 = region.rectangle
polygon = [x0, y0, x1, y0, x1, y1, x0, y1]
elif region.polygon is not None:
polygon = region.polygon
elif region.circle is not None:
circle = region.circle
if polygon is not None:
assert len(polygon) % 2 == 0, "Polygon must contain 2D coords"
vertices = []
for i in range(int(len(polygon) / 2)):
x = polygon[2 * i]
y = polygon[2 * i + 1]
vertices.append((x, y))
vertices = [
self.pyslip.tiles.flex_image.tile_readout_to_picture(
int(region.panel), v[1], v[0]
)
for v in vertices
]
vertices = [(v[1], v[0]) for v in vertices]
points_rel = [
self.pyslip.tiles.picture_fast_slow_to_map_relative(*v)
for v in vertices
]
points_rel.append(points_rel[0])
for i in range(len(points_rel) - 1):
polygon_data.append(((points_rel[i], points_rel[i + 1]), d))
if circle is not None:
x, y, r = circle
y, x = self.pyslip.tiles.flex_image.tile_readout_to_picture(
int(region.panel), y, x
)
x, y = self.pyslip.tiles.picture_fast_slow_to_map_relative(x, y)
center = matrix.col((x, y))
e1 = matrix.col((1, 0))
e2 = matrix.col((0, 1))
circle_data.append(
(
center + r * (e1 + e2),
center + r * (e1 - e2),
center + r * (-e1 - e2),
center + r * (-e1 + e2),
center + r * (e1 + e2),
)
)
if polygon_data:
self.sel_image_polygon_layer = self.pyslip.AddPolygonLayer(
polygon_data,
map_rel=True,
color="#00ffff",
radius=5,
visible=True,
# show_levels=[3,4],
name="<boxsel_pt_layer>",
)
if circle_data:
for circle in circle_data:
self.sel_image_circle_layers.append(
self.pyslip.AddEllipseLayer(
circle,
map_rel=True,
color="#00ffff",
radius=5,
visible=True,
# show_levels=[3,4],
name="<boxsel_pt_layer>",
)
)
def add_file_name_or_data(self, image_data):
"""
Adds an image to the viewer's list of images.
This just adds the image to the virtual list, and updates the UI
where necessary e.g. image chooser maximums. If the image already
exists, will not add a duplicate.
AKA and better named something like 'add_image' but we can't rename
whilst tied to rstbx.
:param image_data: The image metadata object
:type image_data: chooser_wrapper
:returns: The index of the image in the list of images
:rtype: int
"""
assert isinstance(image_data, chooser_wrapper)
# If this is already loaded, then return the index
if image_data in self.images:
return self.images.index(image_data)
self.images.add(image_data)
self.image_chooser_panel.SetMax(len(self.images))
self.jump_to_image.SetMax(len(self.images))
self.stack.SetMax(len(self.images))
return len(self.images) - 1
def load_file_event(self, evt):
self.load_image(evt.get_filename())
def reload_image(self):
"""Re-load the currently displayed image"""
with wx.BusyCursor():
self.load_image(self.images.selected, refresh=True)
def load_image(self, file_name_or_data, refresh=False):
"""
Load and display an image.
Given either a filename or a pre-existing image data object, loads the
image from disk, displays it, and updates the UI to reflect the new image.
:param file_name_or_data: The image item to load
:param refresh: Should the image be reloaded if currently selected?
:type file_name_or_data: str or chooser_wrapper
"""
# If this image is already loaded, then don't reload it
if not refresh and file_name_or_data == self.images.selected:
return
# If given a string, we need to load and convert to a chooser_wrapper
if isinstance(file_name_or_data, str):
experiments = ExperimentListFactory.from_filenames([file_name_or_data])
assert len(experiments) == 1
imagesets = experiments.imagesets()
imageset = imagesets[0]
file_name_or_data = chooser_wrapper(imageset, imageset.indices()[0])
self.add_file_name_or_data(file_name_or_data)
assert isinstance(file_name_or_data, chooser_wrapper)
# We are never called without first calling add_file_name_or_data?
assert file_name_or_data in self.images
show_untrusted = False
if self.params.show_mask:
show_untrusted = True
previously_selected_image = self.images.selected
self.images.selected = file_name_or_data
# Do the actual data/image loading and update the viewer
super().load_image(
file_name_or_data,
get_image_data=self.get_image_data,
show_untrusted=show_untrusted,
)
# Update the navigation UI controls to reflect this loaded image
self.image_chooser_panel.SetValue(self.images.selected_index + 1)
self.image_chooser_panel.set_label(self.get_key(file_name_or_data))
self.jump_to_image.SetValue(self.images.selected_index + 1)
# Destroy the cached data for the previous image
if (
previously_selected_image
and previously_selected_image != self.images.selected
):
previously_selected_image.set_image_data(None)
def OnShowSettings(self, event):
if self.settings_frame is None:
frame_rect = self.GetRect()
display_rect = wx.GetClientDisplayRect()
x_start = frame_rect[0] + frame_rect[2]
if x_start > (display_rect[2] - 400):
x_start = display_rect[2] - 400
y_start = frame_rect[1]
self.settings_frame = SpotSettingsFrame(
self,
-1,
"Settings",
style=wx.CAPTION | wx.MINIMIZE_BOX,
pos=(x_start, y_start),
)
self.settings_frame.Show()
def _choose_text_colour(self):
"""Choose a text colour contrasting with the image pixels"""
if self.settings.color_scheme > 1: # heatmap or invert
return "#cdcdcd" # light grey
else:
return "#3b3b3b" # dark grey
def _draw_resolution_polygons(
self, twotheta, spacings, beamvec, bor1, bor2, detector, unit_cell, space_group
):
"""Draw resolution rings for arbitrary detector geometry using a polygon path"""
resolution_text_data = []
ring_data = []
n_rays = 720
for tt, d in zip(twotheta, spacings):
# Generate rays at 2θ
cone_base_centre = beamvec * math.cos(tt)
cone_base_radius = (beamvec * math.sin(tt)).length()
rad1 = bor1.normalize() * cone_base_radius
rad2 = bor2.normalize() * cone_base_radius
ticks = (2 * math.pi / n_rays) * flex.double_range(n_rays)
offset1 = flex.vec3_double(n_rays, rad1) * flex.cos(ticks)
offset2 = flex.vec3_double(n_rays, rad2) * flex.sin(ticks)
rays = flex.vec3_double(n_rays, cone_base_centre) + offset1 + offset2
# Get the ray intersections. Need to set a dummy phi value
rt = flex.reflection_table.empty_standard(n_rays)
rt["s1"] = rays
rt["phi"] = flex.double(n_rays, 0)
from dials.algorithms.spot_prediction import ray_intersection
intersect = ray_intersection(detector, rt)
rt = rt.select(intersect)
if len(rt) == 0:
continue
curr_panel_id = rt[0]["panel"]
panel = detector[curr_panel_id]
# Split the intersections into sets of vertices in separate paths
paths = []
vertices = []
for ref in rt.rows():
if ref["panel"] != curr_panel_id:
# close off the current path and reset the vertices
paths.append(vertices)
vertices = []
curr_panel_id = ref["panel"]
panel = detector[curr_panel_id]
x, y = panel.millimeter_to_pixel(ref["xyzcal.mm"][0:2])
vertices.append(self.map_coords(x, y, curr_panel_id))
paths.append(vertices)
# For each path, convert vertices to segments and add to the ring data
segments = []
for vertices in paths:
for i in range(len(vertices) - 1):
segments.append(
(
(vertices[i], vertices[i + 1]),
{
"width": self.pyslip.DefaultPolygonWidth,
"color": "red",
"closed": False,
},
)
)
ring_data.extend(segments)
# Add labels to the iso-resolution lines
if unit_cell is None and space_group is None:
cb1 = beamvec.rotate_around_origin(axis=bor1, angle=tt)
for angle in (45, 135, 225, 315):
txtvec = cb1.rotate_around_origin(
axis=beamvec, angle=math.radians(angle)
)
try:
panel_id, txtpos = detector.get_ray_intersection(txtvec)
except RuntimeError:
continue
txtpos = detector[panel_id].millimeter_to_pixel(txtpos)
txtpos = self.pyslip.tiles.flex_image.tile_readout_to_picture(
panel_id, txtpos[1], txtpos[0]
)[::-1]
x, y = self.pyslip.tiles.picture_fast_slow_to_map_relative(
txtpos[0], txtpos[1]
)
resolution_text_data.append(
(
x,
y,
f"{d:.2f}",
{
"placement": "cc",
"colour": "red",
},
)
)
# Remove the old ring layer, and draw a new one.
if hasattr(self, "_ring_layer") and self._ring_layer is not None:
self.pyslip.DeleteLayer(self._ring_layer, update=False)
self._ring_layer = None
self._ring_layer = self.pyslip.AddPolygonLayer(
ring_data,
name="<ring_layer>",
show_levels=[-3, -2, -1, 0, 1, 2, 3, 4, 5],
update=False,
)
# Remove the old resolution text layer, and draw a new one.
if (
hasattr(self, "_resolution_text_layer")
and self._resolution_text_layer is not None
):
self.pyslip.DeleteLayer(self._resolution_text_layer, update=False)
self._resolution_text_layer = None
if resolution_text_data:
self._resolution_text_layer = self.pyslip.AddTextLayer(
resolution_text_data,
map_rel=True,
visible=True,
show_levels=[-3, -2, -1, 0, 1, 2, 3, 4, 5],
selectable=False,
name="<resolution_text_layer>",
fontsize=self.settings.fontsize,
textcolour=self._choose_text_colour(),
update=False,
)
return
def draw_resolution_rings(self, unit_cell=None, space_group=None):
image = self.image_chooser.GetClientData(
self.image_chooser.GetSelection()
).image_set
detector = image.get_detector()
beam = image.get_beam()
d_min = detector.get_max_resolution(beam.get_s0())
d_star_sq_max = uctbx.d_as_d_star_sq(d_min)
if unit_cell is not None and space_group is not None:
unit_cell = space_group.average_unit_cell(unit_cell)
generator = index_generator(unit_cell, space_group.type(), False, d_min)
indices = generator.to_array()
spacings = flex.sorted(unit_cell.d(indices))
else:
n_rings = 5
step = d_star_sq_max / n_rings
spacings = flex.double(
[uctbx.d_star_sq_as_d((i + 1) * step) for i in range(0, n_rings)]
)
wavelength = beam.get_wavelength()
twotheta = uctbx.d_star_sq_as_two_theta(
uctbx.d_as_d_star_sq(spacings), wavelength
)
# Get beam vector and two orthogonal vectors
beamvec = matrix.col(beam.get_s0())
bor1 = beamvec.ortho()
bor2 = beamvec.cross(bor1)
# For non-coplanar detectors use a polygon method rather than ellipses
if detector.has_projection_2d():
return self._draw_resolution_polygons(
twotheta,
spacings,
beamvec,
bor1,
bor2,
detector,
unit_cell,
space_group,
)
resolution_text_data = []
ring_data = []
# FIXME Currently assuming that all panels are in same plane
p_id = detector.get_panel_intersection(beam.get_s0())
if p_id == -1:
# XXX beam doesn't intersect with any panels - is there a better solution?
p_id = 0
pan = detector[p_id]
for tt, d in zip(twotheta, spacings):
try:
# Find 4 rays for given d spacing / two theta angle
cb1 = beamvec.rotate_around_origin(axis=bor1, angle=tt)
cb2 = beamvec.rotate_around_origin(axis=bor1, angle=-tt)
cb3 = beamvec.rotate_around_origin(axis=bor2, angle=tt)
cb4 = beamvec.rotate_around_origin(axis=bor2, angle=-tt)
# Find intersection points with panel plane
dp1 = pan.get_ray_intersection_px(cb1)
dp2 = pan.get_ray_intersection_px(cb2)
dp3 = pan.get_ray_intersection_px(cb3)
dp4 = pan.get_ray_intersection_px(cb4)
# If all four points are in positive beam direction, draw an ellipse.
# Otherwise it's a hyperbola (not implemented yet)
except RuntimeError:
continue
# find ellipse centre, the only point equidistant to each axial pair
xs1 = dp1[0] + dp2[0]
xs2 = dp3[0] + dp4[0]
ys1 = dp1[1] + dp2[1]
ys2 = dp3[1] + dp4[1]
xd1 = dp2[0] - dp1[0]
xd2 = dp4[0] - dp3[0]
yd1 = dp1[1] - dp2[1]
yd2 = dp3[1] - dp4[1]
if abs(xd1) < 0.00001:
cy = ys1 / 2
elif abs(xd2) < 0.00001:
cy = ys2 / 2
else:
t2 = (xs1 - xs2 + (ys2 - ys1) * yd1 / xd1) / (yd2 - xd2 * yd1 / xd1)
t1 = (ys2 + t2 * xd2 - ys1) / xd1
cy = (ys1 + t1 * xd1) / 2
assert abs(cy - (ys2 + t2 * xd2) / 2) < 0.1
if abs(yd1) < 0.00001:
cx = xs1 / 2
elif abs(yd2) < 0.00001:
cx = xs2 / 2
else:
t2 = (xs1 - xs2 + (ys2 - ys1) * yd1 / xd1) / (yd2 - xd2 * yd1 / xd1)
t1 = (ys2 + t2 * xd2 - ys1) / xd1
cx = (xs1 + t1 * yd1) / 2
assert abs(cx - (xs2 + t2 * yd2) / 2) < 0.1
centre = self.pyslip.tiles.flex_image.tile_readout_to_picture(p_id, cy, cx)[
::-1
]
dp1 = self.pyslip.tiles.flex_image.tile_readout_to_picture(
p_id, dp1[1], dp1[0]
)[::-1]
dp3 = self.pyslip.tiles.flex_image.tile_readout_to_picture(
p_id, dp3[1], dp3[0]
)[::-1]
# translate ellipse centre and four points to map coordinates
centre = self.pyslip.tiles.picture_fast_slow_to_map_relative(*centre)
dp1 = self.pyslip.tiles.picture_fast_slow_to_map_relative(dp1[0], dp1[1])
dp3 = self.pyslip.tiles.picture_fast_slow_to_map_relative(dp3[0], dp3[1])
# Determine eccentricity, cf. https://en.wikipedia.org/wiki/Eccentricity_(mathematics)
ecc = math.sin(matrix.col(pan.get_normal()).angle(beamvec)) / math.sin(
math.pi / 2 - tt
)
# Assuming that one detector axis is aligned with a major axis of
# the ellipse, obtain the semimajor axis length a to calculate the
# semiminor axis length b using the eccentricity ecc.
ldp1 = math.hypot(dp1[0] - centre[0], dp1[1] - centre[1])
ldp3 = math.hypot(dp3[0] - centre[0], dp3[1] - centre[1])
if ldp1 >= ldp3:
major = dp1
a = ldp1
else:
major = dp3
a = ldp3
b = math.sqrt(a * a * (1 - (ecc * ecc)))
# since e = f / a and f = sqrt(a^2 - b^2), cf. https://en.wikipedia.org/wiki/Ellipse
# calculate co-vertex
minor = (
matrix.col([-centre[1] - dp1[1], centre[0] - dp1[0]]).normalize() * b
)
minor = (minor[0] + centre[0], minor[1] + centre[1])
p = (centre, major, minor)
ring_data.append(
(
p,
self.pyslip.DefaultPolygonPlacement,
self.pyslip.DefaultPolygonWidth,
"red",
True,
self.pyslip.DefaultPolygonFilled,
self.pyslip.DefaultPolygonFillcolour,
self.pyslip.DefaultPolygonOffsetX,
self.pyslip.DefaultPolygonOffsetY,
None,
)
)
if unit_cell is None and space_group is None:
for angle in (45, 135, 225, 315):
txtvec = cb1.rotate_around_origin(
axis=beamvec, angle=math.radians(angle)
)
txtpos = pan.get_ray_intersection_px(txtvec)
txtpos = self.pyslip.tiles.flex_image.tile_readout_to_picture(
p_id, txtpos[1], txtpos[0]
)[::-1]
x, y = self.pyslip.tiles.picture_fast_slow_to_map_relative(
txtpos[0], txtpos[1]
)
resolution_text_data.append(
(
x,
y,
f"{d:.2f}",
{
"placement": "cc",
"colour": "red",
},
)
)
# XXX Transparency?
# Remove the old ring layer, and draw a new one.
if hasattr(self, "_ring_layer") and self._ring_layer is not None:
self.pyslip.DeleteLayer(self._ring_layer, update=False)
self._ring_layer = None
if ring_data:
self._ring_layer = self.pyslip.AddLayer(
self.pyslip.DrawLightweightEllipticalSpline,
ring_data,
True,
True,
show_levels=[-3, -2, -1, 0, 1, 2, 3, 4, 5],
selectable=False,
type=self.pyslip.TypeEllipse,
name="<ring_layer>",
update=False,
)
# Remove the old resolution text layer, and draw a new one.
if (
hasattr(self, "_resolution_text_layer")
and self._resolution_text_layer is not None
):
self.pyslip.DeleteLayer(self._resolution_text_layer, update=False)
self._resolution_text_layer = None
if resolution_text_data:
self._resolution_text_layer = self.pyslip.AddTextLayer(
resolution_text_data,
map_rel=True,
visible=True,
show_levels=[-3, -2, -1, 0, 1, 2, 3, 4, 5],
selectable=False,
name="<resolution_text_layer>",
fontsize=self.settings.fontsize,
textcolour=self._choose_text_colour(),
update=False,
)
def stack_images(self):
mode = self.params.stack_mode
if self.params.stack_images > 1:
self.settings.display = "image"
image = self.pyslip.tiles.raw_image
image_data = image.get_image_data()
if not isinstance(image_data, tuple):
image_data = (image_data,)
i_frame = self.image_chooser.GetClientData(
self.image_chooser.GetSelection()
).index
imageset = self.images.selected.image_set
for i in range(1, self.params.stack_images):
if (i_frame + i) >= len(imageset):
break
image_data_i = imageset[i_frame + i]
for j, rd in enumerate(image_data):
data = image_data_i[j]
if mode == "max":
sel = data > rd
rd = rd.as_1d().set_selected(sel.as_1d(), data.as_1d())
else:
rd += data
# /= stack_images to put on consistent scale with single image
# so that -1 etc. handled correctly (mean mode)
if mode == "mean":
image_data = tuple(i / self.params.stack_images for i in image_data)
# Don't show summed images with overloads
self.pyslip.tiles.set_image_data(image_data, show_saturated=False)
self.pyslip.ZoomToLevel(self.pyslip.tiles.zoom_level)
self.update_statusbar() # XXX Not always working?
self.Layout()
def get_image_data(self, image):
image.set_image_data(None)
if self.settings.display == "image":
if self.settings.image_type == "corrected":
image_data = image.get_image_data()
else:
image_data = image.get_image_data(corrected=False)
if isinstance(image_data, tuple):
image_data = tuple(id.as_double() for id in image_data)
else:
image_data = (image_data.as_double(),)
else:
dispersion_debug_list = self._calculate_dispersion_debug(image)
if self.settings.display == "mean":
mean = [dispersion.mean() for dispersion in dispersion_debug_list]
image_data = mean
elif self.settings.display == "variance":
variance = [
dispersion.variance() for dispersion in dispersion_debug_list
]
image_data = variance
elif self.settings.display == "dispersion":
cv = [
dispersion.index_of_dispersion()
for dispersion in dispersion_debug_list
]
image_data = cv
elif self.settings.display == "sigma_b":
cv = [
dispersion.index_of_dispersion()
for dispersion in dispersion_debug_list
]
cv_mask = [dispersion.cv_mask() for dispersion in dispersion_debug_list]
cv_mask = [mask.as_1d().as_double() for mask in cv_mask]
for i, mask in enumerate(cv_mask):
mask.reshape(cv[i].accessor())
image_data = cv_mask
elif self.settings.display == "sigma_s":
cv = [
dispersion.index_of_dispersion()
for dispersion in dispersion_debug_list
]
value_mask = [
dispersion.value_mask() for dispersion in dispersion_debug_list
]
value_mask = [mask.as_1d().as_double() for mask in value_mask]
for i, mask in enumerate(value_mask):
mask.reshape(cv[i].accessor())
image_data = value_mask
elif self.settings.display == "global":
cv = [
dispersion.index_of_dispersion()
for dispersion in dispersion_debug_list
]
global_mask = [
dispersion.global_mask() for dispersion in dispersion_debug_list
]
global_mask = [mask.as_1d().as_double() for mask in global_mask]
for i, mask in enumerate(global_mask):
mask.reshape(cv[i].accessor())
image_data = global_mask
elif self.settings.display == "threshold":
cv = [
dispersion.index_of_dispersion()
for dispersion in dispersion_debug_list
]
final_mask = [
dispersion.final_mask() for dispersion in dispersion_debug_list
]
final_mask = [mask.as_1d().as_double() for mask in final_mask]
for i, mask in enumerate(final_mask):
mask.reshape(cv[i].accessor())
image_data = final_mask
if self.settings.display in ("sigma_b", "sigma_s", "global", "threshold"):
image_data = (500 * d for d in image_data)
image_data = tuple(image_data)
if self.params.show_mask:
self.mask_image_data(image_data)
return image_data
def _calculate_dispersion_debug(self, image):
# hash current settings
dispersion_debug_list_hash = hash(
(
image.index,
self.images.selected.image_set,
self.settings.gain,
self.settings.nsigma_b,
self.settings.nsigma_s,
self.settings.global_threshold,
self.settings.min_local,
tuple(self.settings.kernel_size),
self.settings.threshold_algorithm,
self.settings.n_iqr,
self.settings.blur,
self.settings.n_bins,
)
)
# compare current settings to last calculation of "dispersion debug" list
if dispersion_debug_list_hash == self._dispersion_debug_list_hash:
return self._dispersion_debug_list
detector = image.get_detector()
image_mask = self.get_mask(image)
image_data = image.get_image_data()
assert self.settings.gain > 0
gain_map = [
flex.double(image_data[i].accessor(), self.settings.gain)
for i in range(len(detector))
]
if self.settings.threshold_algorithm == "dispersion_extended":
algorithm = DispersionExtendedThresholdDebug
elif self.settings.threshold_algorithm == "dispersion":
algorithm = DispersionThresholdDebug
else:
algorithm = RadialProfileThresholdDebug(
image, self.settings.n_iqr, self.settings.blur, self.settings.n_bins
)
dispersion_debug_list = []
for i_panel in range(len(detector)):
dispersion_debug_list.append(
algorithm(
image_data[i_panel].as_double(),
image_mask[i_panel],
gain_map[i_panel],
self.settings.kernel_size,
self.settings.nsigma_b,
self.settings.nsigma_s,
self.settings.global_threshold,
self.settings.min_local,
)
)
self._dispersion_debug_list = dispersion_debug_list
self._dispersion_debug_list_hash = dispersion_debug_list_hash
return dispersion_debug_list
def show_filters(self):
image_data = self.get_image_data(self.pyslip.tiles.raw_image)
show_saturated = (
self.settings.display == "image" and self.settings.image_type == "corrected"
)
self.pyslip.tiles.set_image_data(image_data, show_saturated)
self.pyslip.ZoomToLevel(self.pyslip.tiles.zoom_level)
self.update_statusbar() # XXX Not always working?
self.Layout()
def update_settings(self, layout=True):
# super(SpotFrame, self).update_settings(layout=layout)
new_brightness = self.settings.brightness
new_color_scheme = self.settings.color_scheme
if (
new_brightness is not self.pyslip.tiles.current_brightness
or new_color_scheme is not self.pyslip.tiles.current_color_scheme
):
self.pyslip.tiles.update_brightness(new_brightness, new_color_scheme)
detector = self.pyslip.tiles.raw_image.get_detector()
detector.projection = self.params.projection
if self.settings.show_beam_center:
if self.beam_layer is None and hasattr(self, "beam_center_cross_data"):
self.beam_layer = self.pyslip.AddPolygonLayer(
self.beam_center_cross_data,
name="<beam_layer>",
show_levels=[-3, -2, -1, 0, 1, 2, 3, 4, 5],
update=False,
)
elif self.beam_layer is not None:
self.pyslip.DeleteLayer(self.beam_layer, update=False)
self.beam_layer = None
if self.settings.show_dials_spotfinder_spots:
spotfinder_data = self.get_spotfinder_data()
shoebox_data = spotfinder_data.shoebox_data
all_pix_data = spotfinder_data.all_pix_data
all_foreground_circles = spotfinder_data.all_foreground_circles
ctr_mass_data = spotfinder_data.ctr_mass_data
max_pix_data = spotfinder_data.max_pix_data
predictions_data = spotfinder_data.predictions_data
miller_indices_data = spotfinder_data.miller_indices_data
vector_data = spotfinder_data.vector_data
vector_text_data = spotfinder_data.vector_text_data
if len(self.dials_spotfinder_layers) > 0:
for layer in self.dials_spotfinder_layers:
self.pyslip.DeleteLayer(layer, update=False)
self.dials_spotfinder_layers = []
if self.shoebox_layer is not None:
self.pyslip.DeleteLayer(self.shoebox_layer, update=False)
self.shoebox_layer = None
if self.ctr_mass_layer is not None:
self.pyslip.DeleteLayer(self.ctr_mass_layer, update=False)
self.ctr_mass_layer = None
if self.max_pix_layer is not None:
self.pyslip.DeleteLayer(self.max_pix_layer, update=False)
self.max_pix_layer = None
if self.predictions_layer is not None:
self.pyslip.DeleteLayer(self.predictions_layer, update=False)
self.predictions_layer = None
if self.miller_indices_layer is not None:
self.pyslip.DeleteLayer(self.miller_indices_layer, update=False)
self.miller_indices_layer = None
if self.vector_layer is not None:
self.pyslip.DeleteLayer(self.vector_layer, update=False)
self.vector_layer = None
if self.vector_text_layer is not None:
self.pyslip.DeleteLayer(self.vector_text_layer, update=False)
self.vector_text_layer = None
if self._ring_layer is not None:
self.pyslip.DeleteLayer(self._ring_layer, update=False)
self._ring_layer = None
if self._resolution_text_layer is not None:
self.pyslip.DeleteLayer(self._resolution_text_layer, update=False)
self._resolution_text_layer = None
if self.settings.show_miller_indices and len(miller_indices_data):
self.miller_indices_layer = self.pyslip.AddTextLayer(
miller_indices_data,
map_rel=True,
visible=True,
show_levels=[-3, -2, -1, 0, 1, 2, 3, 4, 5],
selectable=False,
fontsize=self.settings.fontsize,
textcolour=self._choose_text_colour(),
name="<miller_indices_layer>",
update=False,
)
if self.settings.show_predictions and len(predictions_data):
self.predictions_layer = self.pyslip.AddPointLayer(
predictions_data,
name="<predictions_layer>",
radius=3,
renderer=self.pyslip.DrawPointLayer,
show_levels=[-3, -2, -1, 0, 1, 2, 3, 4, 5],
update=False,
)
if self.settings.show_all_pix:
if len(all_pix_data) > 1:
if not self.display_foreground_circles_patch:
for key, value in all_pix_data.items():
base_color = self.prediction_colours[key][1:]
# dim the color so it stands apart from the prediction
r = base_color[0:2]
g = base_color[2:4]
b = base_color[4:6]
r = max(int(r, 16) - int("50", 16), 0)
g = max(int(g, 16) - int("50", 16), 0)
b = max(int(b, 16) - int("50", 16), 0)
color = f"#{r:02x}{g:02x}{b:02x}"
self.dials_spotfinder_layers.append(
self.pyslip.AddPointLayer(
value,
color=color,
name="<all_pix_layer_%d>" % key,
radius=2,
renderer=self.pyslip.LightweightDrawPointLayer2,
show_levels=[-3, -2, -1, 0, 1, 2, 3, 4, 5],
update=False,
)
)
else:
e1 = matrix.col((1.0, 0.0))
e2 = matrix.col((0.0, 1.0))
for key, value in all_foreground_circles.items():
base_color = self.prediction_colours[key][1:]
positions = [i["position"] for i in value]
good_radius = flex.mean(
flex.double([i["radius"] for i in value])
)
vertices = []
for model_center in positions:
for vertex in [
model_center + good_radius * (e1 + e2),
model_center + good_radius * (e1 - e2),
model_center + good_radius * (-e1 - e2),
model_center + good_radius * (-e1 + e2),
model_center + good_radius * (e1 + e2),
]:
vertices.append(vertex)
self.dials_spotfinder_layers.append(
self.pyslip.AddEllipseLayer(
vertices,
color=f"#{base_color}",
name="<all_foreground_circles_%d>" % key,
width=2,
show_levels=[-3, -2, -1, 0, 1, 2, 3, 4, 5],
update=False,
)
)
print(
"Circles: center of foreground masks for the %d spots actually integrated"
% (len(vertices) // 5)
)
else:
if len(all_pix_data) > 0:
self.dials_spotfinder_layers.append(
self.pyslip.AddPointLayer(
all_pix_data[list(all_pix_data.keys())[0]],
color="green",
name="<all_pix_layer>",
radius=2,
renderer=self.pyslip.LightweightDrawPointLayer2,
show_levels=[-3, -2, -1, 0, 1, 2, 3, 4, 5],
update=False,
)
)
if self.settings.show_shoebox and len(shoebox_data):
self.shoebox_layer = self.pyslip.AddPolygonLayer(
shoebox_data,
map_rel=True,
visible=True,
show_levels=[-3, -2, -1, 0, 1, 2, 3, 4, 5],
selectable=False,
name="<shoebox_layer>",
update=False,
)
if self.settings.show_ctr_mass and len(ctr_mass_data):
self.ctr_mass_layer = self.pyslip.AddPolygonLayer(
ctr_mass_data,
map_rel=True,
visible=True,
show_levels=[-3, -2, -1, 0, 1, 2, 3, 4, 5],
selectable=False,
name="<ctr_mass_layer>",
update=False,
)
if self.settings.show_max_pix and len(max_pix_data):
self.max_pix_layer = self.pyslip.AddPointLayer(
max_pix_data,
color="pink",
name="<max_pix_layer>",
radius=2,
renderer=self.pyslip.LightweightDrawPointLayer,
show_levels=[-3, -2, -1, 0, 1, 2, 3, 4, 5],
update=False,
)
if len(vector_data) and len(vector_text_data):
self.vector_layer = self.pyslip.AddPolygonLayer(
vector_data,
map_rel=True,
visible=True,
show_levels=[-3, -2, -1, 0, 1, 2, 3, 4, 5],
selectable=False,
name="<vector_layer>",
update=False,
)
self.vector_text_layer = self.pyslip.AddTextLayer(
vector_text_data,
map_rel=True,
visible=True,
show_levels=[-3, -2, -1, 0, 1, 2, 3, 4, 5],
selectable=False,
name="<vector_text_layer>",
textcolour=self._choose_text_colour(),
update=False,
fontsize=self.settings.fontsize,
)
self.stack_images()
# if self.params.stack_images == 1:
# self.show_filters()
if self.settings.show_threshold_pix:
image = self.pyslip.tiles.raw_image
dispersion_debug_list = self._calculate_dispersion_debug(image)
final_mask = [
dispersion.final_mask() for dispersion in dispersion_debug_list
]
value = []
for pnl, mask in enumerate(final_mask):
width = mask.all()[1]
idx = mask.iselection()
for i in idx:
y = i // width
x = i % width
y, x = self.pyslip.tiles.flex_image.tile_readout_to_picture(
pnl, y, x
)
value.append(
self.pyslip.tiles.picture_fast_slow_to_map_relative(x, y)
)
base_color = self.prediction_colours[0][1:]
# dim the color so it stands apart from the prediction
r = base_color[0:2]
g = base_color[2:4]
b = base_color[4:6]
r = max(int(r, 16) - int("50", 16), 0)
g = max(int(g, 16) - int("50", 16), 0)
b = max(int(b, 16) - int("50", 16), 0)
color = f"#{r:02x}{g:02x}{b:02x}"
self.dials_spotfinder_layers.append(
self.pyslip.AddPointLayer(
value,
color=color,
name="<thresh_pix_layer_0>",
radius=2,
renderer=self.pyslip.LightweightDrawPointLayer2,
show_levels=[-3, -2, -1, 0, 1, 2, 3, 4, 5],
update=False,
)
)
if self.settings.show_resolution_rings:
self.draw_resolution_rings()
elif self.settings.show_ice_rings:
unit_cell = self.settings.ice_rings.unit_cell
space_group = self.settings.ice_rings.space_group.group()
self.draw_resolution_rings(unit_cell=unit_cell, space_group=space_group)
self.drawUntrustedPolygons()
self.pyslip.Update()
def get_mask(self, image):
mask = image.get_mask()
if self.mask_input is not None:
for p1, p2 in zip(self.mask_input, mask):
p2 &= p1
if self.mask_image_viewer is not None:
for p1, p2 in zip(self.mask_image_viewer, mask):
p2 &= p1
assert mask is not None, "Mask should never be None here"
return mask
def mask_image_data(self, image_data):
mask = self.get_mask(self.pyslip.tiles.raw_image)
for rd, m in zip(image_data, mask):
rd.set_selected(~m, MASK_VAL)
def __get_imageset_filter(
self, reflections: flex.reflection_table, imageset: ImageSet
) -> Optional[flex.bool]:
"""Get a filter to ensure only reflections from an imageset.
This is not a well-defined problem because of unindexed reflections
- any unindexed reflections never get assigned an experiment. Using the
imageset_id column you can disentangle this, but but at integration this
data is currently not copied. This means that you can separate, but only if
there is a single imageset.
Args:
reflections:
The reflections table to filter
imageset:
The imageset to filter reflections to
Returns:
The selection, or None if there is nothing to select.
"""
reflections_id = self.reflections.index(reflections)
experimentlist = self.experiments[reflections_id]
# If this imageset is not in this experiment, then skip
if imageset not in experimentlist.imagesets():
return None
if "imageset_id" in reflections:
# Only choose reflections that match this imageset
imageset_id = experimentlist.imagesets().index(imageset)
selection = reflections["imageset_id"] == imageset_id
elif self.have_one_imageset:
# If one imageset, no filtering is necessary
selection = flex.bool(len(reflections), True)
else:
# Fallback:
# Do filtering in a way that cannot handle complex unindexed reflections
# Get the experiment IDs of every experiment with this imageset
exp_ids = [
i for i, exp in enumerate(experimentlist) if exp.imageset == imageset
]
# No way to tell - don't show any unindexed
selection = flex.bool(len(reflections), False)
# OR together selections for all ids that have this imageset
for eid in exp_ids:
selection = selection | (reflections["id"] == eid)
return selection
def map_coords(self, x, y, p):
"""Convert coordinates in pixel, pixel, panel to picture coordinates
required for correct positioning of overlays"""
y, x = self.pyslip.tiles.flex_image.tile_readout_to_picture(p, y - 0.5, x - 0.5)
return self.pyslip.tiles.picture_fast_slow_to_map_relative(x, y)
def _rotation_axis_overlay_data(self):
imageset = self.images.selected.image_set
detector = self.pyslip.tiles.raw_image.get_detector()
scan = imageset.get_scan()
beam = imageset.get_beam()
gonio = imageset.get_goniometer()
still = scan is None or gonio is None
if still:
return
axis = gonio.get_rotation_axis()
try:
panel, beam_centre = detector.get_ray_intersection(beam.get_s0())
except RuntimeError as e:
if "DXTBX_ASSERT(w_max > 0)" in str(e):
# direct beam didn't hit a panel
panel = 0
beam_centre = detector[panel].get_ray_intersection(beam.get_s0())
else:
raise
beam_x, beam_y = detector[panel].millimeter_to_pixel(beam_centre)
beam_x, beam_y = self.map_coords(beam_x, beam_y, panel)
# Find the plane containing the rotation axis and s0
normal = matrix.col(beam.get_unit_s0()).cross(matrix.col(axis))
# Find scattering angle at max inscribed resolution
d_min = detector.get_max_inscribed_resolution(beam.get_s0())
theta = math.asin(beam.get_wavelength() / (2.0 * d_min))
# Rotate s0 in the plane so as to point to the inscribed circle
# along the rotation axis
a = matrix.col(beam.get_s0()).rotate(normal, 2.0 * theta)
b = matrix.col(beam.get_s0()).rotate(normal, -2.0 * theta)
panel_a = detector.get_panel_intersection(a)
if panel_a < 0:
return
panel_b = detector.get_panel_intersection(b)
if panel_b < 0:
return
x_a, y_a = detector[panel_a].get_ray_intersection_px(a)
x_a, y_a = self.map_coords(x_a, y_a, panel_a)
x_b, y_b = detector[panel_b].get_ray_intersection_px(b)
x_b, y_b = self.map_coords(x_b, y_b, panel_b)
result = []
result.append(
(
((x_b, y_b), (x_a, y_a)),
{"width": 4, "color": "#1776f6", "closed": False},
)
)
result.append(
(
x_a,
y_a,
"axis",
{
"placement": "ne",
"fontsize": self.settings.fontsize,
"textcolor": "#1776f6",
},
)
)
return result
def _reflection_overlay_data(self, i_frame):
fg_code = MaskCode.Valid | MaskCode.Foreground
strong_code = MaskCode.Valid | MaskCode.Strong
shoebox_dict = {"width": 2, "color": "#0000FFA0", "closed": False}
ctr_mass_dict = {"width": 2, "color": "#FF0000", "closed": False}
shoebox_data = []
all_pix_data = {}
all_foreground_circles = {}
overlapped_data = []
ctr_mass_data = []
max_pix_data = []
predictions_data = []
miller_indices_data = []
for ref_list_id, ref_list in enumerate(self.reflections):
if self.viewing_stills and ref_list_id != i_frame:
continue
# If we have more than one imageset, then we could be on the wrong one
if not self.have_one_imageset:
exp_filter = self.__get_imageset_filter(
ref_list, self.images.selected.image_set
)
if exp_filter is None:
continue
ref_list = ref_list.select(exp_filter)
if self.settings.show_indexed:
indexed_sel = ref_list.get_flags(ref_list.flags.indexed, all=False)
ref_list = ref_list.select(indexed_sel)
if self.settings.show_integrated:
integrated_sel = ref_list.get_flags(
ref_list.flags.integrated, all=False
)
ref_list = ref_list.select(integrated_sel)
# Fast-fail if there's no reflections after filtering
if len(ref_list) == 0:
continue
if "bbox" in ref_list:
bbox = ref_list["bbox"]
x0, x1, y0, y1, z0, z1 = bbox.parts()
# ticket #107
n = self.params.stack_images - 1
if self.viewing_stills:
selected = ref_list
else:
bbox_sel = ~((i_frame >= z1) | ((i_frame + n) < z0))
selected = ref_list.select(bbox_sel)
for reflection in selected.rows():
x0, x1, y0, y1, z0, z1 = reflection["bbox"]
panel = reflection["panel"]
nx = x1 - x0 # size of reflection box in x-direction
ny = y1 - y0 # size of reflection box in y-direction
# nz = z1 - z0 # number of frames this spot appears on
if (
self.settings.show_all_pix
and "shoebox" in reflection
and reflection["shoebox"].mask.size() > 0
and n == 0
):
shoebox = reflection["shoebox"]
iz = i_frame - z0 if not self.viewing_stills else 0
if not reflection["id"] in all_pix_data:
all_pix_data[reflection["id"]] = []
all_foreground_circles[reflection["id"]] = []
this_spot_foreground_pixels = []
for ix in range(nx):
for iy in range(ny):
mask_value = shoebox.mask[iz, iy, ix]
if (mask_value == strong_code) or (
mask_value == fg_code
):
x_, y_ = self.map_coords(
ix + x0 + 0.5, iy + y0 + 0.5, panel
)
this_spot_foreground_pixels.append(
matrix.col((x_, y_))
)
if len(all_pix_data) > 1:
# look for overlapped pixels
found_it = False
for key, value in all_pix_data.items():
if (x_, y_) in value:
value.pop(value.index((x_, y_)))
found_it = True
if found_it:
overlapped_data.append((x_, y_))
else:
all_pix_data[reflection["id"]].append(
(x_, y_)
)
else:
all_pix_data[reflection["id"]].append((x_, y_))
if (
self.display_foreground_circles_patch
and len(this_spot_foreground_pixels) > 1
):
per_spot_mean = matrix.col((0.0, 0.0))
for pxl in this_spot_foreground_pixels:
per_spot_mean += pxl
per_spot_mean /= len(this_spot_foreground_pixels)
all_foreground_circles[reflection["id"]].append(
{
"position": per_spot_mean,
"radius": max(
[
(t - per_spot_mean).length()
for t in this_spot_foreground_pixels
]
),
}
)
if self.settings.show_shoebox:
x0y0 = self.map_coords(x0, y0, panel)
x0y1 = self.map_coords(x0, y1, panel)
x1y0 = self.map_coords(x1, y0, panel)
x1y1 = self.map_coords(x1, y1, panel)
# Change shoebox colour depending on index id
my_attrs = dict(shoebox_dict)
# Reflections with *only* strong set should get default
if not (reflection["flags"] == ref_list.flags.strong):
my_attrs["color"] = self.prediction_colours[
reflection["id"]
]
lines = [
((x0y0, x0y1), my_attrs),
((x0y1, x1y1), my_attrs),
((x1y1, x1y0), my_attrs),
((x1y0, x0y0), my_attrs),
]
shoebox_data.extend(lines)
if (
self.settings.show_max_pix
and "shoebox" in reflection
and reflection["shoebox"].data.size() > 0
):
shoebox = reflection["shoebox"].data
offset = flex.max_index(shoebox)
offset, k = divmod(offset, shoebox.all()[2])
offset, j = divmod(offset, shoebox.all()[1])
offset, i = divmod(offset, shoebox.all()[0])
max_index = (i, j, k)
if z0 + max_index[0] == i_frame or self.viewing_stills:
x, y = self.map_coords(
x0 + max_index[2] + 0.5,
y0 + max_index[1] + 0.5,
reflection["panel"],
)
max_pix_data.append((x, y))
if self.settings.show_ctr_mass and "xyzobs.px.value" in reflection:
centroid = reflection["xyzobs.px.value"]
# ticket #107
if self.viewing_stills or (
i_frame
<= centroid[2]
<= (i_frame + self.params.stack_images)
):
x, y = self.map_coords(
centroid[0], centroid[1], reflection["panel"]
)
xm1, ym1 = self.map_coords(
centroid[0] - 1, centroid[1] - 1, reflection["panel"]
)
xp1, yp1 = self.map_coords(
centroid[0] + 1, centroid[1] + 1, reflection["panel"]
)
lines = [
(((x, ym1), (x, yp1)), ctr_mass_dict),
(((xm1, y), (xp1, y)), ctr_mass_dict),
]
ctr_mass_data.extend(lines)
if ("xyzcal.px" in ref_list or "xyzcal.mm" in ref_list) and (
self.settings.show_predictions
or (self.settings.show_miller_indices and "miller_index" in ref_list)
):
if "xyzcal.px" in ref_list:
frame_numbers = ref_list["xyzcal.px"].parts()[2]
else:
phi = ref_list["xyzcal.mm"].parts()[2]
scan = self.pyslip.tiles.raw_image.get_scan()
frame_numbers = scan.get_array_index_from_angle(math.degrees(phi))
n = self.params.stack_images
for i_expt in range(flex.max(ref_list["id"]) + 1):
expt_sel = ref_list["id"] == i_expt
frame_predictions_sel = (frame_numbers >= (i_frame)) & (
frame_numbers < (i_frame + n)
)
sel = expt_sel
if not self.viewing_stills:
sel = frame_predictions_sel & expt_sel
selected = ref_list.select(sel)
for reflection in selected.rows():
if (
self.settings.show_predictions
or self.settings.show_miller_indices
):
x = None
if "xyzcal.px" in reflection:
x, y = self.map_coords(
reflection["xyzcal.px"][0],
reflection["xyzcal.px"][1],
reflection["panel"],
)
elif "xyzcal.mm" in reflection:
detector = self.pyslip.tiles.raw_image.get_detector()
x, y = detector[
reflection["panel"]
].millimeter_to_pixel(reflection["xyzcal.mm"][:2])
x, y = self.map_coords(x, y, reflection["panel"])
if x is None:
next
if self.settings.show_predictions:
predictions_data.append(
(x, y, {"colour": self.prediction_colours[i_expt]})
)
if (
self.settings.show_miller_indices
and "miller_index" in reflection
and reflection["miller_index"] != (0, 0, 0)
):
miller_indices_data.append(
(
x,
y,
str(reflection["miller_index"]),
{
"placement": "ne",
"radius": 0,
},
)
)
if len(overlapped_data) > 0:
# show overlapped pixels in a different color
all_pix_data[max(all_pix_data.keys()) + 1] = overlapped_data
return {
"shoebox_data": shoebox_data,
"all_pix_data": all_pix_data,
"all_foreground_circles": all_foreground_circles,
"overlapped_data": overlapped_data,
"ctr_mass_data": ctr_mass_data,
"max_pix_data": max_pix_data,
"predictions_data": predictions_data,
"miller_indices_data": miller_indices_data,
}
def _basis_vector_overlay_data(self, i_expt, i_frame, experiment):
imageset = self.images.selected.image_set
detector = self.pyslip.tiles.raw_image.get_detector()
crystal_model = experiment.crystal
cs = crystal.symmetry(
unit_cell=crystal_model.get_unit_cell(),
space_group=crystal_model.get_space_group(),
)
cb_op = cs.change_of_basis_op_to_reference_setting()
crystal_model = crystal_model.change_basis(cb_op)
A = matrix.sqr(crystal_model.get_A())
scan = imageset.get_scan()
beam = imageset.get_beam()
gonio = imageset.get_goniometer()
still = scan is None or gonio is None
if not still:
phi = scan.get_angle_from_array_index(
i_frame - imageset.get_array_range()[0], deg=True
)
axis = matrix.col(imageset.get_goniometer().get_rotation_axis())
try:
panel, beam_centre = detector.get_ray_intersection(beam.get_s0())
except RuntimeError as e:
if "DXTBX_ASSERT(w_max > 0)" in str(e):
# direct beam didn't hit a panel
panel = 0
beam_centre = detector[panel].get_ray_intersection(beam.get_s0())
else:
raise
beam_x, beam_y = detector[panel].millimeter_to_pixel(beam_centre)
beam_x, beam_y = self.map_coords(beam_x, beam_y, panel)
vector_data = []
label_data = []
for i, h in enumerate(((1, 0, 0), (0, 1, 0), (0, 0, 1))):
r = A * matrix.col(h) * self.settings.basis_vector_scale
if still:
s1 = matrix.col(beam.get_s0()) + r
else:
r_phi = r.rotate_around_origin(axis, phi, deg=True)
s1 = matrix.col(beam.get_s0()) + r_phi
panel = detector.get_panel_intersection(s1)
if panel < 0:
continue
x, y = detector[panel].get_ray_intersection_px(s1)
x, y = self.map_coords(x, y, panel)
vector_data.append(
(
((beam_x, beam_y), (x, y)),
{
"width": 4,
"color": self.prediction_colours[i_expt],
"closed": False,
},
),
)
label_data.append(
(
x,
y,
("a*", "b*", "c*")[i],
{
"placement": "ne",
"fontsize": self.settings.fontsize,
"textcolor": self.prediction_colours[i_expt],
},
)
)
return vector_data, label_data
def get_spotfinder_data(self):
self.prediction_colours = [
"#e41a1c",
"#377eb8",
"#4daf4a",
"#984ea3",
"#ff7f00",
"#ffff33",
"#a65628",
"#f781bf",
"#999999",
] * 10
if self.viewing_stills:
i_frame = self.images.selected_index # NOTE, the underbar is intentional
else:
i_frame = self.images.selected.index
imageset = self.images.selected.image_set
if imageset.get_scan() is not None:
i_frame += imageset.get_scan().get_array_range()[0]
refl_data = self._reflection_overlay_data(i_frame)
vector_data = []
vector_text_data = []
if self.settings.show_rotation_axis:
axis_data = self._rotation_axis_overlay_data()
if axis_data:
vector_data.append(axis_data[0])
vector_text_data.append(axis_data[1])
if (
self.settings.show_basis_vectors
and self.crystals is not None
and self.crystals[0] is not None
):
for experiments in self.experiments:
for i_expt, experiment in enumerate(experiments):
if experiment.imageset != imageset:
continue
basis_vector_data = self._basis_vector_overlay_data(
i_expt, i_frame, experiment
)
vector_data.extend(basis_vector_data[0])
vector_text_data.extend(basis_vector_data[1])
return SpotfinderData(
all_pix_data=refl_data["all_pix_data"],
all_foreground_circles=refl_data["all_foreground_circles"],
shoebox_data=refl_data["shoebox_data"],
ctr_mass_data=refl_data["ctr_mass_data"],
max_pix_data=refl_data["max_pix_data"],
predictions_data=refl_data["predictions_data"],
miller_indices_data=refl_data["miller_indices_data"],
vector_data=vector_data,
vector_text_data=vector_text_data,
)
def get_detector(self):
return self.imagesets[0].get_detector()
def get_beam(self):
return self.imagesets[0].get_beam()
def predict(self):
predicted_all = []
for experiments in self.experiments:
this_predicted = flex.reflection_table()
for i_expt, expt in enumerate(experiments):
# Populate the reflection table with predictions
params = self.params.prediction
predicted = flex.reflection_table.from_predictions(
expt, force_static=params.force_static, dmin=params.d_min
)
predicted["id"] = flex.int(len(predicted), i_expt)
if expt.profile is not None:
expt.profile.params = self.params.profile
try:
predicted.compute_bbox(ExperimentList([expt]))
except Exception:
pass
this_predicted.extend(predicted)
predicted_all.append(this_predicted)
return predicted_all
def OnZeroMQEvent(self, event):
message = event.message
print("ZMQ Event received by gui:", message)
try:
if message["command"] == "load_image":
filename = message["image"]
self.load_image(filename)
except Exception:
print("Error parsing zeromq message")
raise
class SpotSettingsFrame(wx.MiniFrame):
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
self.settings = self.GetParent().settings
self.params = self.GetParent().params
szr = wx.BoxSizer(wx.VERTICAL)
panel = SpotSettingsPanel(self, -1)
self.SetSizer(szr)
szr.Add(panel, 1, wx.EXPAND)
szr.Fit(panel)
self.panel = panel
self.sizer = szr
self.Fit()
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy)
def OnDestroy(self, event):
# Allow the panel to cleanup when destroying the Frame
self.panel.OnDestroy(event)
class SpotSettingsPanel(wx.Panel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.settings = self.GetParent().settings
self.params = self.GetParent().params
# CONTROLS 4: additional settings for derived class
self.settings.image_type = "corrected"
self.settings.brightness = self.params.brightness
self.settings.color_scheme = self.params.color_scheme
self.settings.projection = self.params.projection
self.settings.show_spotfinder_spots = False
self.settings.show_dials_spotfinder_spots = True
self.settings.show_resolution_rings = self.params.show_resolution_rings
self.settings.untrusted = self.params.masking.untrusted
self.settings.show_ice_rings = self.params.show_ice_rings
self.settings.ice_rings = self.params.masking.ice_rings
self.settings.show_ctr_mass = self.params.show_ctr_mass
self.settings.show_max_pix = self.params.show_max_pix
self.settings.show_all_pix = self.params.show_all_pix
self.settings.show_threshold_pix = self.params.show_threshold_pix
self.settings.show_shoebox = self.params.show_shoebox
self.settings.show_indexed = self.params.show_indexed
self.settings.show_integrated = self.params.show_integrated
self.settings.show_predictions = self.params.show_predictions
self.settings.show_miller_indices = self.params.show_miller_indices
self.settings.fontsize = 10
self.settings.basis_vector_scale = self.params.basis_vector_scale
self.settings.show_mask = self.params.show_mask
self.settings.show_basis_vectors = self.params.show_basis_vectors
self.settings.show_rotation_axis = self.params.show_rotation_axis
self.settings.display = self.params.display
if self.settings.display == "global_threshold":
self.settings.display = "global"
self.settings.threshold_algorithm = "dispersion_extended"
self.settings.nsigma_b = self.params.nsigma_b
self.settings.nsigma_s = self.params.nsigma_s
self.settings.global_threshold = self.params.global_threshold
self.settings.kernel_size = self.params.kernel_size
self.settings.min_local = self.params.min_local
self.settings.gain = self.params.gain
self.settings.n_iqr = self.params.n_iqr
self.settings.blur = self.params.blur
self.settings.n_bins = self.params.n_bins
self.settings.find_spots_phil = "find_spots.phil"
self._sizer = wx.BoxSizer(wx.VERTICAL)
s = self._sizer
self.SetSizer(self._sizer)
grid = wx.FlexGridSizer(cols=2, rows=3, vgap=0, hgap=0)
s.Add(grid)
txt1 = wx.StaticText(self, -1, "Zoom level:")
grid.Add(txt1, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.levels = self.GetParent().GetParent().pyslip.tiles.levels
# from scitbx.math import continued_fraction as cf
# choices = ["%s" %(cf.from_real(2**l).as_rational()) for l in self.levels]
choices = [f"{100 * 2 ** l:g}%" for l in self.levels]
self.zoom_ctrl = wx.Choice(self, -1, choices=choices)
self.zoom_ctrl.SetSelection(self.settings.zoom_level)
grid.Add(self.zoom_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
txt11 = wx.StaticText(self, -1, "Color scheme:")
grid.Add(txt11, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
color_schemes = ["grayscale", "rainbow", "heatmap", "invert"]
self.color_ctrl = wx.Choice(self, -1, choices=color_schemes)
self.color_ctrl.SetSelection(color_schemes.index(self.params.color_scheme))
grid.Add(self.color_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self._sizer.Fit(self)
txt12 = wx.StaticText(self, -1, "Projection:")
projection_choices = ["lab", "image"]
self.projection_ctrl = wx.Choice(self, -1, choices=projection_choices)
if self.params.projection is None:
self.projection_ctrl.SetSelection(1)
self.projection_ctrl.Enable(False)
txt12.Enable(False)
else:
self.projection_ctrl.SetSelection(
projection_choices.index(self.params.projection)
)
grid.Add(txt12, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
grid.Add(self.projection_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self._sizer.Fit(self)
box = wx.BoxSizer(wx.HORIZONTAL)
s.Add(box)
grid = wx.FlexGridSizer(cols=1, rows=2, vgap=0, hgap=0)
box.Add(grid)
txt2 = wx.StaticText(self, -1, "Brightness:")
grid.Add(txt2, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
# Add a textual brightness control
self.brightness_txt_ctrl = IntCtrl(
self,
value=self.settings.brightness,
min=1,
max=1000,
name="brightness",
style=wx.TE_PROCESS_ENTER,
)
grid.Add(self.brightness_txt_ctrl, 0, wx.ALL, 5)
# Add a slider brightness control
self.brightness_ctrl = wx.Slider(
self, -1, size=(150, -1), style=wx.SL_AUTOTICKS | wx.SL_LABELS
)
self.brightness_ctrl.SetMin(1)
self.brightness_ctrl.SetMax(1000)
self.brightness_ctrl.SetValue(self.settings.brightness)
self.brightness_ctrl.SetTickFreq(25)
box.Add(self.brightness_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
grid = wx.FlexGridSizer(cols=2, rows=2, vgap=0, hgap=0)
s.Add(grid)
# Font size control
txt = wx.StaticText(self, -1, "Font size:")
grid.Add(txt, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
# Add a textual brightness control
self.fontsize_ctrl = IntCtrl(
self,
value=self.settings.fontsize,
min=8,
max=32,
name="Font size",
style=wx.TE_PROCESS_ENTER,
)
grid.Add(self.fontsize_ctrl, 0, wx.ALL, 5)
# Basis vector scale control
txt = wx.StaticText(self, -1, "Basis scale:")
grid.Add(txt, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.basis_vector_scale_ctrl = IntCtrl(
self,
value=self.settings.basis_vector_scale,
min=1,
max=20,
name="Basis scale",
style=wx.TE_PROCESS_ENTER,
)
grid.Add(self.basis_vector_scale_ctrl, 0, wx.ALL, 5)
grid = wx.FlexGridSizer(cols=2, rows=9, vgap=0, hgap=0)
s.Add(grid)
# Resolution rings control
self.resolution_rings_ctrl = wx.CheckBox(self, -1, "Show resolution rings")
self.resolution_rings_ctrl.SetValue(self.settings.show_resolution_rings)
grid.Add(self.resolution_rings_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
# Ice rings control
self.ice_rings_ctrl = wx.CheckBox(self, -1, "Show ice rings")
self.ice_rings_ctrl.SetValue(self.settings.show_ice_rings)
grid.Add(self.ice_rings_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
# Center control
self.center_ctrl = wx.CheckBox(self, -1, "Mark beam center")
self.center_ctrl.SetValue(self.settings.show_beam_center)
grid.Add(self.center_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
# Center of mass control
self.ctr_mass = wx.CheckBox(self, -1, "Mark centers of mass")
self.ctr_mass.SetValue(self.settings.show_ctr_mass)
grid.Add(self.ctr_mass, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
# Max pixel control
self.max_pix = wx.CheckBox(self, -1, "Spot max pixels")
self.max_pix.SetValue(self.settings.show_max_pix)
grid.Add(self.max_pix, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
# Spot pixels control
self.all_pix = wx.CheckBox(self, -1, "Spot all pixels")
self.all_pix.SetValue(self.settings.show_all_pix)
grid.Add(self.all_pix, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
# Threshold control
self.thresh_pix = wx.CheckBox(self, -1, "Threshold pixels")
self.thresh_pix.SetValue(self.settings.show_threshold_pix)
grid.Add(self.thresh_pix, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
# Spot shoebox control
self.shoebox = wx.CheckBox(self, -1, "Draw reflection shoebox")
self.shoebox.SetValue(self.settings.show_shoebox)
grid.Add(self.shoebox, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
# Spot predictions control
self.predictions = wx.CheckBox(self, -1, "Show predictions")
self.predictions.SetValue(self.settings.show_predictions)
grid.Add(self.predictions, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
# Spot predictions control
self.miller_indices = wx.CheckBox(self, -1, "Show hkl")
self.miller_indices.SetValue(self.settings.show_miller_indices)
grid.Add(self.miller_indices, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
# Spot predictions control
self.show_mask = wx.CheckBox(self, -1, "Show mask")
self.show_mask.SetValue(self.settings.show_mask)
grid.Add(self.show_mask, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
# Toggle basis vector display
self.show_basis_vectors = wx.CheckBox(self, -1, "Basis vectors")
self.show_basis_vectors.SetValue(self.settings.show_basis_vectors)
grid.Add(self.show_basis_vectors, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
# Integration shoeboxes only
self.indexed = wx.CheckBox(self, -1, "Indexed only")
self.indexed.SetValue(self.settings.show_indexed)
grid.Add(self.indexed, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
# Integration shoeboxes only
self.integrated = wx.CheckBox(self, -1, "Integrated only")
self.integrated.SetValue(self.settings.show_integrated)
grid.Add(self.integrated, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
# Toggle rotation axis display
self.show_rotation_axis = wx.CheckBox(self, -1, "Rotation axis")
self.show_rotation_axis.SetValue(self.settings.show_rotation_axis)
grid.Add(self.show_rotation_axis, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
grid = wx.FlexGridSizer(cols=2, rows=1, vgap=0, hgap=0)
self.clear_all_button = wx.Button(self, -1, "Clear all")
grid.Add(self.clear_all_button, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(wx.EVT_BUTTON, self.OnClearAll, self.clear_all_button)
s.Add(grid)
# Minimum spot area control
# box = wx.BoxSizer(wx.HORIZONTAL)
# self.minspotarea_ctrl = PhilIntCtrl(self, -1, pos=(300,180), size=(80,-1),
# value=self.GetParent().GetParent().horizons_phil.distl.minimum_spot_area,
# name="Minimum spot area (pxls)")
# self.minspotarea_ctrl.SetOptional(False)
# box.Add(self.minspotarea_ctrl, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
# txtd = wx.StaticText(self, -1, "Minimum spot area (pxls)",)
# box.Add(txtd, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
# s.Add(box)
# Stack type choice
grid = wx.FlexGridSizer(cols=2, rows=1, vgap=0, hgap=0)
txt1 = wx.StaticText(self, -1, "Stack type:")
grid.Add(txt1, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.stack_modes = ["max", "mean", "sum"]
self.stack_mode_ctrl = wx.Choice(self, -1, choices=self.stack_modes)
self.stack_mode_ctrl.SetSelection(
self.stack_modes.index(self.params.stack_mode)
)
grid.Add(self.stack_mode_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
s.Add(grid)
# Image type choice
grid = wx.FlexGridSizer(cols=2, rows=1, vgap=0, hgap=0)
txt1 = wx.StaticText(self, -1, "Image type:")
grid.Add(txt1, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.image_types = ["corrected", "raw"]
self.image_type_ctrl = wx.Choice(self, -1, choices=self.image_types)
self.image_type_ctrl.SetSelection(
self.image_types.index(self.settings.image_type)
)
grid.Add(self.image_type_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
s.Add(grid)
# Choice of thresholding algorithm
grid = wx.FlexGridSizer(cols=2, rows=1, vgap=0, hgap=0)
txt1 = wx.StaticText(self, -1, "Threshold algorithm:")
grid.Add(txt1, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.threshold_algorithm_types = [
"dispersion",
"dispersion_extended",
"radial_profile",
]
self.threshold_algorithm_ctrl = wx.Choice(
self, -1, choices=self.threshold_algorithm_types
)
self.threshold_algorithm_ctrl.SetSelection(
self.threshold_algorithm_types.index(self.settings.threshold_algorithm)
)
grid.Add(self.threshold_algorithm_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
s.Add(grid)
# Spotfinding parameters relevant to dispersion algorithms
self.dispersion_params_grid = wx.FlexGridSizer(cols=2, rows=6, vgap=0, hgap=0)
s.Add(self.dispersion_params_grid)
txt1 = wx.StaticText(self, -1, "Sigma background")
self.dispersion_params_grid.Add(txt1, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.nsigma_b_ctrl = FloatCtrl(
self, value=self.settings.nsigma_b, name="sigma_background"
)
self.nsigma_b_ctrl.SetMin(0)
self.dispersion_params_grid.Add(self.nsigma_b_ctrl, 0, wx.ALL, 5)
txt2 = wx.StaticText(self, -1, "Sigma strong")
self.dispersion_params_grid.Add(txt2, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.nsigma_s_ctrl = FloatCtrl(
self, value=self.settings.nsigma_s, name="sigma_strong"
)
self.nsigma_s_ctrl.SetMin(0)
self.dispersion_params_grid.Add(self.nsigma_s_ctrl, 0, wx.ALL, 5)
txt1 = wx.StaticText(self, -1, "Global Threshold")
self.dispersion_params_grid.Add(txt1, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.global_threshold_ctrl = FloatCtrl(
self, value=self.settings.global_threshold, name="global_threshold"
)
self.global_threshold_ctrl.SetMin(0)
self.dispersion_params_grid.Add(self.global_threshold_ctrl, 0, wx.ALL, 5)
txt4 = wx.StaticText(self, -1, "Min. local")
self.dispersion_params_grid.Add(txt4, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.min_local_ctrl = PhilIntCtrl(
self, value=self.settings.min_local, name="min_local"
)
self.min_local_ctrl.SetMin(0)
self.dispersion_params_grid.Add(self.min_local_ctrl, 0, wx.ALL, 5)
txt4 = wx.StaticText(self, -1, "Gain")
self.dispersion_params_grid.Add(txt4, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.gain_ctrl = FloatCtrl(self, value=self.settings.gain, name="gain")
self.gain_ctrl.SetMin(1e-6)
self.dispersion_params_grid.Add(self.gain_ctrl, 0, wx.ALL, 5)
txt3 = wx.StaticText(self, -1, "Kernel size")
self.dispersion_params_grid.Add(txt3, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.kernel_size_ctrl = IntsCtrl(
self, value=self.settings.kernel_size, name="kernel_size"
)
self.kernel_size_ctrl.SetSize(2)
self.kernel_size_ctrl.SetMin(1)
self.dispersion_params_grid.Add(self.kernel_size_ctrl, 0, wx.ALL, 5)
self.Bind(
EVT_PHIL_CONTROL, self.OnUpdateThresholdParameters, self.nsigma_b_ctrl
)
self.Bind(
EVT_PHIL_CONTROL, self.OnUpdateThresholdParameters, self.nsigma_s_ctrl
)
self.Bind(
EVT_PHIL_CONTROL,
self.OnUpdateThresholdParameters,
self.global_threshold_ctrl,
)
self.Bind(
EVT_PHIL_CONTROL,
self.OnUpdateThresholdParameters,
self.kernel_size_ctrl,
)
self.Bind(
EVT_PHIL_CONTROL, self.OnUpdateThresholdParameters, self.min_local_ctrl
)
self.Bind(EVT_PHIL_CONTROL, self.OnUpdateThresholdParameters, self.gain_ctrl)
# Spotfinding parameters relevant to the radial_profile algorithm
self.radial_profile_params_grid = wx.FlexGridSizer(
cols=2, rows=3, vgap=0, hgap=0
)
s.Add(self.radial_profile_params_grid)
txt1 = wx.StaticText(self, -1, "IQR multiplier")
self.radial_profile_params_grid.Add(
txt1, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5
)
self.n_iqr_ctrl = FloatCtrl(
self, value=self.settings.n_iqr, name="iqr_multiplier"
)
self.n_iqr_ctrl.SetMin(0)
self.radial_profile_params_grid.Add(self.n_iqr_ctrl, 0, wx.ALL, 5)
txt1 = wx.StaticText(self, -1, "Blur")
self.radial_profile_params_grid.Add(
txt1, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5
)
self.blur_choices = [
"None",
"narrow",
"wide",
]
self.blur_ctrl = wx.Choice(self, -1, choices=self.blur_choices)
self.blur_ctrl.SetSelection(self.blur_choices.index(str(self.settings.blur)))
self.radial_profile_params_grid.Add(
self.blur_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5
)
txt1 = wx.StaticText(self, -1, "N bins")
self.radial_profile_params_grid.Add(
txt1, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5
)
self.n_bins_ctrl = PhilIntCtrl(self, value=self.settings.n_bins, name="n_bins")
self.n_bins_ctrl.SetMin(10)
self.radial_profile_params_grid.Add(self.n_bins_ctrl, 0, wx.ALL, 5)
self.Bind(EVT_PHIL_CONTROL, self.OnUpdateThresholdParameters, self.n_iqr_ctrl)
self.Bind(wx.EVT_CHOICE, self.OnUpdateThresholdParameters, self.blur_ctrl)
self.Bind(
EVT_PHIL_CONTROL,
self.OnUpdateThresholdParameters,
self.n_bins_ctrl,
)
# Save spotfinding PHIL control
grid1 = wx.FlexGridSizer(cols=2, rows=1, vgap=0, hgap=0)
s.Add(grid1)
self.save_params_txt_ctrl = StrCtrl(
self, value=self.settings.find_spots_phil, name="find_spots_phil"
)
grid1.Add(self.save_params_txt_ctrl, 0, wx.ALL, 5)
self.Bind(EVT_PHIL_CONTROL, self.OnUpdate, self.save_params_txt_ctrl)
self.save_params_button = wx.Button(self, -1, "Save")
grid1.Add(self.save_params_button, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(wx.EVT_BUTTON, self.OnSaveFindSpotsParams, self.save_params_button)
grid2 = wx.FlexGridSizer(cols=4, rows=2, vgap=0, hgap=0)
s.Add(grid2)
self.dispersion_buttons = []
self.dispersion_labels = [
"image",
"mean",
"variance",
"dispersion",
"sigma_b",
"sigma_s",
"global",
"threshold",
]
for label in self.dispersion_labels:
btn = wx.ToggleButton(self, -1, label)
self.dispersion_buttons.append(btn)
grid2.Add(btn, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnDispersionThresholdDebug, btn)
for label, button in zip(self.dispersion_labels, self.dispersion_buttons):
if self.params.stack_images > 1:
button.Disable()
for button in self.dispersion_buttons:
if button.GetLabelText() == self.settings.display:
button.SetValue(True)
break
self.collect_values()
# Hide parameters for deselected threshold algorithm
self._toggle_params_grid(self.settings.threshold_algorithm)
# CONTROLS 3: Bind events to actions
self.Bind(wx.EVT_TEXT_ENTER, self.OnUpdate, self.fontsize_ctrl)
self.fontsize_ctrl.Bind(wx.EVT_KILL_FOCUS, self.OnUpdate)
self.Bind(wx.EVT_TEXT_ENTER, self.OnUpdate, self.basis_vector_scale_ctrl)
self.basis_vector_scale_ctrl.Bind(wx.EVT_KILL_FOCUS, self.OnUpdate)
# Brightness-related events
self.Bind(wx.EVT_SCROLL_CHANGED, self.OnUpdateBrightness, self.brightness_ctrl)
self.Bind(wx.EVT_SLIDER, self.OnUpdateBrightness, self.brightness_ctrl)
self.Bind(wx.EVT_TEXT_ENTER, self.OnUpdateBrightness, self.brightness_txt_ctrl)
self.brightness_txt_ctrl.Bind(wx.EVT_KILL_FOCUS, self.OnUpdateBrightness)
self.Bind(wx.EVT_CHOICE, self.OnUpdateZoomLevel, self.zoom_ctrl)
self.Bind(wx.EVT_CHOICE, self.OnUpdateImage, self.image_type_ctrl)
self.Bind(
wx.EVT_CHOICE,
self.OnUpdateThresholdAlgorithm,
self.threshold_algorithm_ctrl,
)
self.Bind(wx.EVT_CHOICE, self.OnUpdateImage, self.stack_mode_ctrl)
self.Bind(wx.EVT_CHOICE, self.OnUpdate, self.color_ctrl)
self.Bind(wx.EVT_CHOICE, self.OnUpdateProjection, self.projection_ctrl)
self.Bind(wx.EVT_CHECKBOX, self.OnUpdate, self.resolution_rings_ctrl)
self.Bind(wx.EVT_CHECKBOX, self.OnUpdate, self.ice_rings_ctrl)
self.Bind(wx.EVT_CHECKBOX, self.OnUpdate, self.center_ctrl)
self.Bind(wx.EVT_CHECKBOX, self.OnUpdate, self.ctr_mass)
self.Bind(wx.EVT_CHECKBOX, self.OnUpdate, self.max_pix)
self.Bind(wx.EVT_CHECKBOX, self.OnUpdate, self.all_pix)
self.Bind(wx.EVT_CHECKBOX, self.OnUpdate, self.thresh_pix)
self.Bind(wx.EVT_CHECKBOX, self.OnUpdate, self.shoebox)
self.Bind(wx.EVT_CHECKBOX, self.OnUpdate, self.predictions)
self.Bind(wx.EVT_CHECKBOX, self.OnUpdate, self.miller_indices)
self.Bind(wx.EVT_CHECKBOX, self.OnUpdate, self.indexed)
self.Bind(wx.EVT_CHECKBOX, self.OnUpdate, self.integrated)
self.Bind(wx.EVT_CHECKBOX, self.OnUpdate, self.show_basis_vectors)
self.Bind(wx.EVT_CHECKBOX, self.OnUpdate, self.show_rotation_axis)
self.Bind(wx.EVT_CHECKBOX, self.OnUpdateShowMask, self.show_mask)
self.Bind(wx.EVT_UPDATE_UI, self.UpdateZoomCtrl)
def OnDestroy(self, event):
"Handle any cleanup when the windows is being destroyed. Manually Called."
# If we don't remove this here, then we can get called after destroy
self.brightness_txt_ctrl.Unbind(wx.EVT_KILL_FOCUS)
# CONTROLS 2: Fetch values from widgets
def collect_values(self):
if self.settings.enable_collect_values:
self.settings.image_type = self.image_types[
self.image_type_ctrl.GetSelection()
]
self.params.stack_mode = self.stack_modes[
self.stack_mode_ctrl.GetSelection()
]
self.settings.show_resolution_rings = self.resolution_rings_ctrl.GetValue()
self.settings.show_ice_rings = self.ice_rings_ctrl.GetValue()
self.settings.zoom_level = self.levels[self.zoom_ctrl.GetSelection()]
# Brightness has its own handler, so just make sure the controls are synced
if self.brightness_txt_ctrl.GetValue() != self.settings.brightness:
try:
self.brightness_txt_ctrl.ChangeValue(self.settings.brightness)
except Exception: # workaround for wxPython 2.8
self.brightness_txt_ctrl.ChangeValue(str(self.settings.brightness))
if self.brightness_ctrl.GetValue() != self.settings.brightness:
self.brightness_ctrl.SetValue(self.settings.brightness)
self.settings.show_beam_center = self.center_ctrl.GetValue()
self.settings.show_ctr_mass = self.ctr_mass.GetValue()
self.settings.show_max_pix = self.max_pix.GetValue()
self.settings.show_all_pix = self.all_pix.GetValue()
self.settings.show_threshold_pix = self.thresh_pix.GetValue()
self.settings.show_shoebox = self.shoebox.GetValue()
self.settings.show_indexed = self.indexed.GetValue()
self.settings.show_integrated = self.integrated.GetValue()
self.settings.show_predictions = self.predictions.GetValue()
self.settings.show_miller_indices = self.miller_indices.GetValue()
self.settings.fontsize = self.fontsize_ctrl.GetValue()
self.settings.basis_vector_scale = self.basis_vector_scale_ctrl.GetValue()
self.settings.show_mask = self.show_mask.GetValue()
self.settings.show_basis_vectors = self.show_basis_vectors.GetValue()
self.settings.show_rotation_axis = self.show_rotation_axis.GetValue()
self.settings.threshold_algorithm = self.threshold_algorithm_types[
self.threshold_algorithm_ctrl.GetSelection()
]
self.settings.color_scheme = self.color_ctrl.GetSelection()
self.settings.projection = self.projection_ctrl.GetSelection()
self.settings.nsigma_b = self.nsigma_b_ctrl.GetPhilValue()
self.settings.nsigma_s = self.nsigma_s_ctrl.GetPhilValue()
self.settings.global_threshold = self.global_threshold_ctrl.GetPhilValue()
self.settings.kernel_size = self.kernel_size_ctrl.GetPhilValue()
self.settings.min_local = self.min_local_ctrl.GetPhilValue()
self.settings.gain = self.gain_ctrl.GetPhilValue()
self.settings.n_iqr = self.n_iqr_ctrl.GetPhilValue()
self.settings.blur = self.blur_choices[self.blur_ctrl.GetSelection()]
self.settings.n_bins = self.n_bins_ctrl.GetPhilValue()
self.settings.find_spots_phil = self.save_params_txt_ctrl.GetPhilValue()
def UpdateZoomCtrl(self, event):
self.settings.zoom_level = self.levels.index(
self.GetParent().GetParent().pyslip.level
)
self.zoom_ctrl.SetSelection(self.settings.zoom_level)
def OnUpdate(self, event):
"""Collects all settings from the GUI and forwards to the viewer"""
self.collect_values()
self.GetParent().GetParent().update_settings()
def OnUpdateImage(self, event):
"""Forces an update of the image"""
self.OnUpdate(event)
self.GetParent().GetParent().reload_image()
def OnUpdateBrightness(self, event):
"""Handle updates from the brightness-related controls"""
# Don't update whilst dragging the slider
if event.GetEventType() == wx.EVT_SLIDER.typeId:
if wx.GetMouseState().LeftIsDown():
return
# For e.g. IntCtrl check the value is valid
if hasattr(event.EventObject, "IsInBounds"):
if not event.EventObject.IsInBounds():
return
# Read the new value then update everything if we need to
if self.settings.brightness != event.EventObject.GetValue():
self.settings.brightness = event.EventObject.GetValue()
self.OnUpdate(event)
def OnUpdateShowMask(self, event):
self.OnUpdate(event)
self.params.show_mask = self.settings.show_mask
self.GetParent().GetParent().reload_image()
def OnClearAll(self, event):
for btn in (
self.center_ctrl,
self.ctr_mass,
self.max_pix,
self.all_pix,
self.thresh_pix,
self.shoebox,
self.predictions,
self.miller_indices,
self.show_mask,
self.show_basis_vectors,
self.show_rotation_axis,
self.ice_rings_ctrl,
self.resolution_rings_ctrl,
):
btn.SetValue(False)
self.OnUpdate(event)
def OnUpdateZoomLevel(self, event):
self.collect_values()
pyslip = self.GetParent().GetParent().pyslip
# get center of view in map coords
x, y = pyslip.view_width / 2, pyslip.view_height / 2
center = pyslip.ConvertView2Geo((x, y))
pyslip.ZoomToLevel(self.settings.zoom_level)
pyslip.ZoomIn((x, y), update=False)
pyslip.GotoPosition(center)
def _toggle_params_grid(self, to_show):
if to_show == "radial_profile":
self.dispersion_params_grid.ShowItems(False)
self.radial_profile_params_grid.ShowItems(True)
else:
self.dispersion_params_grid.ShowItems(True)
self.radial_profile_params_grid.ShowItems(False)
self._sizer.Layout()
def OnUpdateThresholdAlgorithm(self, event):
self._toggle_params_grid(event.GetString())
self.OnUpdateThresholdParameters(event)
def OnUpdateProjection(self, event):
self.params.projection = event.GetString()
self.OnUpdateImage(event)
def OnSaveFindSpotsParams(self, event):
params = find_spots_phil_scope.extract()
threshold = params.spotfinder.threshold
threshold.algorithm = self.settings.threshold_algorithm
dispersion = threshold.dispersion
dispersion.gain = self.settings.gain
dispersion.global_threshold = self.settings.global_threshold
dispersion.kernel_size = self.settings.kernel_size
dispersion.min_local = self.settings.min_local
dispersion.sigma_background = self.settings.nsigma_b
dispersion.sigma_strong = self.settings.nsigma_s
radial_profile = threshold.radial_profile
radial_profile.n_iqr = self.settings.n_iqr
if self.settings.blur == "None":
radial_profile.blur = None
else:
radial_profile.blur = self.settings.blur
radial_profile.n_bins = self.settings.n_bins
print(f"Saving parameters to {self.settings.find_spots_phil}")
with open(self.settings.find_spots_phil, "w") as f:
find_spots_phil_scope.fetch_diff(find_spots_phil_scope.format(params)).show(
f
)
def OnUpdateThresholdParameters(self, event):
self.GetParent().GetParent().show_filters()
self.OnUpdateImage(event)
def OnDispersionThresholdDebug(self, event):
button = event.GetEventObject()
selected = button.GetLabelText()
self.settings.display = selected
# Disable corrected/raw selection when showing dispersion debug images
if self.settings.display != "image":
self.image_type_ctrl.SetSelection(0)
self.image_type_ctrl.Disable()
else:
self.image_type_ctrl.Enable()
# reset buttons
for btn in self.dispersion_buttons:
if btn.GetLabelText() == selected:
btn.SetValue(True)
else:
btn.SetValue(False)
self.GetParent().GetParent().show_filters()
|
dials/dials
|
util/image_viewer/spotfinder_frame.py
|
Python
|
bsd-3-clause
| 116,312
|
[
"CRYSTAL"
] |
3ab9850899fec2857fe31cb86e2157ebd65afc5954edef25244cb2957e84fddc
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Gregory Bowman
# Contributors: Robert McGibbon
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import os
import tempfile
import unittest
import numpy as np
import tables
from mdtraj import io
from mdtraj.testing import eq
fd, temp = tempfile.mkstemp(suffix='.h5')
os.close(fd)
def teardown_module(module):
"""remove the temporary file created by tests in this file
this gets automatically called by pytest"""
os.unlink(temp)
def test_overwrite_1():
fid, fn = tempfile.mkstemp()
try:
a = np.arange(10)
b = a + 1
io.saveh(fn, a=a)
io.saveh(fn, b=b)
eq(io.loadh(fn, 'a'), a)
eq(io.loadh(fn, 'b'), b)
except:
raise
finally:
if os.path.exists(fn):
os.close(fid)
os.unlink(fn)
def test_overwrite_2():
fid, fn = tempfile.mkstemp()
try:
a = np.arange(10)
b = a + 1
io.saveh(fn, a=a)
io.saveh(fn, a=b)
eq(io.loadh(fn, 'a'), b)
except:
raise
finally:
if os.path.exists(fn):
os.close(fid)
os.unlink(fn)
class test_io(unittest.TestCase):
def setUp(self):
# setup() is called before very test and just creates a
# temporary work space for reading/writing files.
fid, self.filename1 = tempfile.mkstemp()
fid, self.filename2 = tempfile.mkstemp()
self.data = np.arange(10000, dtype=np.float32)
# Write Data to an HDF5 file as a compressed CArray.
hdf_file = tables.open_file(self.filename1, 'a')
hdf_file.create_carray("/", "arr_0", tables.Float32Atom(),
self.data.shape, filters=io.COMPRESSION)
hdf_file.root.arr_0[:] = self.data[:]
hdf_file.flush()
hdf_file.close()
def test_load_1(self):
# Load by specifying array name
TestData = io.loadh(self.filename1, 'arr_0')
eq(TestData, self.data)
def test_load_2(self):
# load using deferred=False
TestData = io.loadh(self.filename1, deferred=False)['arr_0']
eq(TestData, self.data)
def test_load_3(self):
# load using deferred=True
deferred = io.loadh(self.filename1, deferred=True)
eq(deferred['arr_0'], self.data)
deferred.close()
def test_save(self):
# Save HDF5 to disk and load it back up
io.saveh(self.filename2, self.data)
TestData = io.loadh(self.filename2, 'arr_0')
eq(TestData, self.data)
def teardown(self):
os.remove(self.filename1)
os.remove(self.filename2)
class test_io_int(test_io):
"Run the same test as the class above, but using int64 data"
def setUp(self):
# setup() is called before very test and just creates
# a temporary work space for reading/writing files.
fid, self.filename1 = tempfile.mkstemp()
fid, self.filename2 = tempfile.mkstemp()
self.data = np.arange(10000, dtype=np.int64)
# Write Data to an HDF5 file as a compressed CArray.
hdf_file = tables.open_file(self.filename1, 'a')
hdf_file.create_carray("/", "arr_0", tables.Int64Atom(),
self.data.shape, filters=io.COMPRESSION)
hdf_file.root.arr_0[:] = self.data[:]
hdf_file.flush()
hdf_file.close()
def test_groups():
# Test to ensure that files are loaded correctly even if they contain
# nested groups and stuff
x = np.random.randn(10)
y = np.random.randn(11)
f = tables.open_file(temp, 'w')
f.create_group(where='/', name='mygroup')
f.create_array(where='/mygroup', name='myarray', obj=x)
f.create_array(where='/', name='mya2', obj=y)
f.close()
assert eq(io.loadh(temp)['mygroup/myarray'], x)
assert eq(io.loadh(temp)['mya2'], y)
assert eq(io.loadh(temp, deferred=False)['mygroup/myarray'], x)
assert eq(io.loadh(temp, deferred=False)['mya2'], y)
assert eq(io.loadh(temp, 'mygroup/myarray'), x)
assert eq(io.loadh(temp, 'mya2'), y)
|
rmcgibbo/mdtraj
|
tests/test_io.py
|
Python
|
lgpl-2.1
| 4,978
|
[
"MDTraj"
] |
69e8cf7f4f9ca135bfe9c387fd76ff81577729726398ab37b827e5cebf77f2a8
|
"""
Support code for the freedesktop.org basedir spec.
This module provides functions for locating configuration files.
@see: U{http://freedesktop.org/wiki/Standards/basedir-spec}
@var home: The value of $HOME (or '/' if not set). If we're running as root and
$HOME isn't owned by root, then this will be root's home from /etc/passwd
instead.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _
import os
home = os.environ.get('HOME', '/')
try:
_euid = os.geteuid()
except AttributeError:
pass # Windows?
else:
if _euid == 0:
# We're running as root. Ensure that $HOME really is root's home,
# not the user's home, or we're likely to fill it will unreadable
# root-owned files.
home_owner = os.stat(home).st_uid
if home_owner != 0:
import pwd
from logging import info
old_home = home
home = pwd.getpwuid(0).pw_dir or '/'
info(_("$HOME (%(home)s) is owned by user %(user)d, but we are root (0). Using %(root_home)s instead."), {'old_home': old_home, 'user': home_owner, 'root_home': home})
del old_home
del home_owner
if os.name == "nt":
from win32com.shell import shell, shellcon
appData = shell.SHGetFolderPath(0, shellcon.CSIDL_APPDATA, 0, 0)
localAppData = shell.SHGetFolderPath(0, shellcon.CSIDL_LOCAL_APPDATA, 0, 0)
commonAppData = shell.SHGetFolderPath(0, shellcon.CSIDL_COMMON_APPDATA, 0, 0)
xdg_data_home = appData
xdg_data_dirs = [xdg_data_home, commonAppData]
xdg_cache_home = localAppData
xdg_cache_dirs = [xdg_cache_home, commonAppData]
xdg_config_home = appData
xdg_config_dirs = [xdg_config_home, commonAppData]
else:
xdg_data_home = os.environ.get('XDG_DATA_HOME',
os.path.join(home, '.local', 'share'))
xdg_data_dirs = [xdg_data_home] + \
os.environ.get('XDG_DATA_DIRS', '/usr/local/share:/usr/share').split(':')
xdg_cache_home = os.environ.get('XDG_CACHE_HOME',
os.path.join(home, '.cache'))
xdg_cache_dirs = [xdg_cache_home] + \
os.environ.get('XDG_CACHE_DIRS', '/var/cache').split(':')
xdg_config_home = os.environ.get('XDG_CONFIG_HOME',
os.path.join(home, '.config'))
xdg_config_dirs = [xdg_config_home] + \
os.environ.get('XDG_CONFIG_DIRS', '/etc/xdg').split(':')
xdg_data_dirs = filter(lambda x: x, xdg_data_dirs)
xdg_cache_dirs = filter(lambda x: x, xdg_cache_dirs)
xdg_config_dirs = filter(lambda x: x, xdg_config_dirs)
def save_config_path(*resource):
"""Ensure $XDG_CONFIG_HOME/<resource>/ exists, and return its path.
'resource' should normally be the name of your application. Use this
when SAVING configuration settings. Use the xdg_config_dirs variable
for loading."""
resource = os.path.join(*resource)
assert not os.path.isabs(resource)
path = os.path.join(xdg_config_home, resource)
if not os.path.isdir(path):
os.makedirs(path, 0700)
return path
def load_config_paths(*resource):
"""Returns an iterator which gives each directory named 'resource' in the
configuration search path. Information provided by earlier directories should
take precedence over later ones (ie, the user's config dir comes first)."""
resource = os.path.join(*resource)
for config_dir in xdg_config_dirs:
path = os.path.join(config_dir, resource)
if os.path.exists(path): yield path
def load_first_config(*resource):
"""Returns the first result from load_config_paths, or None if there is nothing
to load."""
for x in load_config_paths(*resource):
return x
return None
def save_cache_path(*resource):
"""Ensure $XDG_CACHE_HOME/<resource>/ exists, and return its path.
'resource' should normally be the name of your application."""
resource = os.path.join(*resource)
assert not os.path.isabs(resource)
path = os.path.join(xdg_cache_home, resource)
if not os.path.isdir(path):
os.makedirs(path, 0700)
return path
def load_cache_paths(*resource):
"""Returns an iterator which gives each directory named 'resource' in the
cache search path. Information provided by earlier directories should
take precedence over later ones (ie, the user's cache dir comes first)."""
resource = os.path.join(*resource)
for cache_dir in xdg_cache_dirs:
path = os.path.join(cache_dir, resource)
if os.path.exists(path): yield path
def load_first_cache(*resource):
"""Returns the first result from load_cache_paths, or None if there is nothing
to load."""
for x in load_cache_paths(*resource):
return x
return None
def load_data_paths(*resource):
"""Returns an iterator which gives each directory named 'resource' in the
shared data search path. Information provided by earlier directories should
take precedence over later ones.
@since: 0.28"""
resource = os.path.join(*resource)
for data_dir in xdg_data_dirs:
path = os.path.join(data_dir, resource)
if os.path.exists(path): yield path
def load_first_data(*resource):
"""Returns the first result from load_data_paths, or None if there is nothing
to load.
@since: 0.28"""
for x in load_data_paths(*resource):
return x
return None
|
pombredanne/zero-install
|
zeroinstall/support/basedir.py
|
Python
|
lgpl-2.1
| 5,004
|
[
"VisIt"
] |
99329136c3457280c598edf5f483b39bfff25d869333d95415ea6994f5173516
|
import pytest
from lissero.scripts.Blast import Blast
run_blast = Blast()
def test_blast_version(blast_version):
run_blast.version()
expected = tuple(blast_version.split("."))
assert expected == run_blast.version_no
|
MDU-PHL/LisSero
|
tests/test_BlastClass.py
|
Python
|
gpl-2.0
| 232
|
[
"BLAST"
] |
e3b43526332753d2f349640623b05200978a574a37f9e1deb9310b5cc94357d1
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent2000A import *
import numpy as np
import struct
from .. import ivi
from .. import fgen
OutputMode = set(['function', 'arbitrary'])
StandardWaveformMapping = {
'sine': 'sin',
'square': 'squ',
#'triangle': 'tri',
'ramp_up': 'ramp',
#'ramp_down',
#'dc'
'pulse': 'puls',
'noise': 'nois',
'dc': 'dc',
'sinc': 'sinc',
'exprise': 'expr',
'expfall': 'expf',
'cardiac': 'card',
'gaussian': 'gaus'
}
class agilent3000A(agilent2000A, fgen.ArbWfm, fgen.ArbFrequency,
fgen.ArbChannelWfm):
"Agilent InfiniiVision 3000A series IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
super(agilent3000A, self).__init__(*args, **kwargs)
self._analog_channel_name = list()
self._analog_channel_count = 4
self._digital_channel_name = list()
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 1e9
self._horizontal_divisions = 10
self._vertical_divisions = 8
# wavegen option
self._output_count = 1
self._output_standard_waveform_mapping = StandardWaveformMapping
self._output_mode_list = OutputMode
self._arbitrary_sample_rate = 0
self._arbitrary_waveform_number_waveforms_max = 0
self._arbitrary_waveform_size_max = 8192
self._arbitrary_waveform_size_min = 2
self._arbitrary_waveform_quantum = 1
self._identity_description = "Agilent InfiniiVision 3000A X-series IVI oscilloscope driver"
self._identity_supported_instrument_models = ['DSOX3012A','DSOX3014A','DSOX3024A',
'DSOX3032A','DSOX3034A','DSOX3052A','DSOX3054A','DSOX3104A','MSOX3012A','MSOX3014A',
'MSOX3024A','MSOX3032A','MSOX3034A','MSOX3052A','MSOX3054A','MSOX3104A']
self._init_outputs()
self._init_channels()
def _get_output_arbitrary_gain(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_arbitrary_gain[index]
def _set_output_arbitrary_gain(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
self._output_arbitrary_gain[index] = value
def _get_output_arbitrary_offset(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_arbitrary_offset[index]
def _set_output_arbitrary_offset(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
self._output_arbitrary_offset[index] = value
def _get_output_arbitrary_waveform(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_arbitrary_waveform[index]
def _set_output_arbitrary_waveform(self, index, value):
index = ivi.get_index(self._output_name, index)
value = str(value)
self._output_arbitrary_waveform[index] = value
def _get_arbitrary_sample_rate(self):
return self._arbitrary_sample_rate
def _set_arbitrary_sample_rate(self, value):
value = float(value)
self._arbitrary_sample_rate = value
def _get_arbitrary_waveform_number_waveforms_max(self):
return self._arbitrary_waveform_number_waveforms_max
def _get_arbitrary_waveform_size_max(self):
return self._arbitrary_waveform_size_max
def _get_arbitrary_waveform_size_min(self):
return self._arbitrary_waveform_size_min
def _get_arbitrary_waveform_quantum(self):
return self._arbitrary_waveform_quantum
def _arbitrary_waveform_clear(self, handle):
pass
def _arbitrary_waveform_configure(self, index, handle, gain, offset):
self._set_output_arbitrary_waveform(index, handle)
self._set_output_arbitrary_gain(index, gain)
self._set_output_arbitrary_offset(index, offset)
def _arbitrary_waveform_create(self, data):
return "handle"
def _get_output_arbitrary_frequency(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_arbitrary_frequency[index]
def _set_output_arbitrary_frequency(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
self._output_arbitrary_frequency[index] = value
def _arbitrary_waveform_create_channel_waveform(self, index, data):
y = None
x = None
if type(data) == list and type(data[0]) == float:
# list
y = array(data)
elif type(data) == np.ndarray and len(data.shape) == 1:
# 1D array
y = data
elif type(data) == np.ndarray and len(data.shape) == 2 and data.shape[0] == 1:
# 2D array, hieght 1
y = data[0]
elif type(data) == np.ndarray and len(data.shape) == 2 and data.shape[1] == 1:
# 2D array, width 1
y = data[:,0]
else:
x, y = ivi.get_sig(data)
if len(y) % self._arbitrary_waveform_quantum != 0:
raise ivi.ValueNotSupportedException()
raw_data = b''
for f in y:
# clip at -1 and 1
if f > 1.0: f = 1.0
if f < -1.0: f = -1.0
raw_data = raw_data + struct.pack('<f', f)
self._write_ieee_block(raw_data, ':%s:arbitrary:data ' % self._output_name[index])
return self._output_name[index]
|
python-ivi/python-ivi
|
ivi/agilent/agilent3000A.py
|
Python
|
mit
| 6,792
|
[
"Gaussian"
] |
7f8099e2822c33603176c3d271b1ae79002c3a345df5a9c39d6211a186af1084
|
# -*- coding: utf-8 -*-
# SyConn - Synaptic connectivity inference toolkit
#
# Copyright (c) 2016 - now
# Max Planck Institute of Neurobiology, Martinsried, Germany
# Authors: Philipp Schubert
import datetime
import glob
import logging
import os
from logging import Logger
from typing import Tuple, Optional, Union, Dict, Any, List
import coloredlogs
import numpy as np
import yaml
from termcolor import colored
from .. import global_params
__all__ = ['DynConfig', 'generate_default_conf', 'initialize_logging']
class Config(object):
"""
Basic config object based on yaml. If no ``config.yml`` file exists
in `working_dir` :py:attr:`~initialized` will be False without raising an
error.
"""
def __init__(self, working_dir):
self._config = None
self._configspec = None
self._working_dir = working_dir
self.initialized = False
if self._working_dir is not None and len(self._working_dir) > 0:
self._working_dir = os.path.abspath(self._working_dir)
self.parse_config()
def __eq__(self, other: 'Config') -> bool:
return other.entries == self.entries and \
other.path_config == self.path_config
@property
def entries(self) -> dict:
"""
Entries stored in the ``config.yml`` file.
Returns:
All entries.
"""
if not self.initialized:
raise ValueError('Config object was not initialized. "entries" '
'are not available.')
return self._config
@property
def working_dir(self) -> str:
"""
Returns:
Path to working directory.
"""
return self._working_dir
@property
def path_config(self) -> str:
"""
Returns:
Path to config file (``config.yml``).
"""
return self._working_dir + "/config.yml"
@property
def config_exists(self):
"""
Returns:
``True`` if config file exists,
``False`` otherwise.
"""
return os.path.exists(self.path_config)
@property
def sections(self) -> List[str]:
"""
Returns:
Keys to all sections present in the config file.
"""
return list(self.entries.keys())
def parse_config(self):
"""
Reads the content stored in the config file.
"""
try:
with open(self.path_config, 'r') as f:
self._config = yaml.load(f, Loader=yaml.FullLoader)
self.initialized = True
except FileNotFoundError:
pass
def write_config(self, target_dir=None):
"""
Write config and configspec to disk.
Args:
target_dir: If None, write config to
:py:attr:`~path_config`. Else,
writes it to ``target_dir + 'config.yml'``
"""
if self._config is None:
raise ValueError('ConfigObj not yet parsed.')
if target_dir is None:
fname_conf = self.path_config
else:
fname_conf = target_dir + '/config.yml'
with open(fname_conf, 'w') as f:
f.write(yaml.dump(self.entries, default_flow_style=False))
@staticmethod
def version():
from syconn import __version__
return __version__
class DynConfig(Config):
"""
Enables dynamic and SyConn-wide update of working directory 'wd' and provides an
interface to all working directory dependent parameters.
Notes:
* Due to sync. checks it is favorable to not use :func:`~__getitem__`
inside loops.
Todo:
* Start to use ``__getitem__`` instead of :py:attr:`~entries`.
* Adapt all ``global_params.config.`` usages accordingly.
* Do not replace any property call for now (e.g. `~allow_mesh_gen_cells`)
because they convey default parameters for old datasets in case they
are not present in the default ``config.yml``.
Examples:
To initialize a working directory at the beginning of your script, run::
from syconn import global_params
global_params.wd = '~/SyConn/example_cube1/'
cfg = global_params.config # this is the `DynConfig` object
"""
def __init__(self, wd: Optional[str] = None, log: Optional[Logger] = None, fix_config: bool = False):
"""
Args:
wd: Path to working directory
log:
fix_config: Keep config constant.
"""
verbose = False
if wd is None:
wd = global_params.wd
verbose = True if wd is not None else False
super().__init__(wd)
self.fix_config = fix_config
if fix_config and self.working_dir is None:
raise ValueError('Fixed config must have a valid working directory.')
self._default_conf = None
if log is None:
log = logging.getLogger('syconn')
coloredlogs.install(level=self['log_level'], logger=log)
level = logging.getLevelName(self['log_level'])
log.setLevel(level)
if not self['disable_file_logging'] and verbose:
# create file handler
log_dir = os.path.expanduser('~') + "/SyConn/logs/"
os.makedirs(log_dir, exist_ok=True)
fh = logging.FileHandler(log_dir + 'syconn.log')
fh.setLevel(level)
# add the handlers to log
if os.path.isfile(log_dir + 'syconn.log'):
os.remove(log_dir + 'syconn.log')
log.addHandler(fh)
log.info("Initialized file logging. Log-files are stored at"
" {}.".format(log_dir))
self.log_main = log
if verbose:
self.log_main.info("Initialized stdout logging (level: {}). "
"Current working directory:"
" ".format(self['log_level']) +
colored("'{}'".format(self.working_dir), 'red'))
if self.initialized is False:
from syconn import handler
default_conf_p = os.path.dirname(handler.__file__) + 'config.yml'
self.log_main.warning(f'Initialized working directory without '
f'existing config file at'
f' {self.path_config}. Using default '
f'parameters as defined in {default_conf_p}.')
def __getitem__(self, item: str) -> Any:
"""
If `item` is not set in this config, the return value will be taken from
the default ``config.yml``.
Args:
item: Key of the requested value.
Returns:
The value which corresponds to `item`.
"""
try:
return self.entries[item]
except (KeyError, ValueError, AttributeError):
return self.default_conf.entries[item]
def __setitem__(self, key: str, value: Any) -> Any:
"""
If `item` is not set in this config, the return value will be taken from
the default ``config.yml``.
Args:
key: Key of the item.
value: Value of the item.
Returns:
The value which corresponds to `item`.
"""
self.log_main.warning('Modifying DynConfig items via `__setitem__` '
'is currently experimental. To change config '
'parameters please make changes in the '
'corresponding config.yml entries.')
try:
self.entries[key] = value
except (KeyError, ValueError, AttributeError):
self.default_conf.entries[key] = value
def _check_actuality(self):
"""
Checks os.environ and global_params and triggers an update if the therein
specified WD is not the same as :py:attr:`~working dir`.
"""
if self.fix_config:
return
# first check if working directory was set in environ, else check if it was changed in memory.
new_wd = None
if 'syconn_wd' in os.environ and os.environ['syconn_wd'] is not None and len(os.environ['syconn_wd']) > 0 \
and os.environ['syconn_wd'] != "None":
if super().working_dir != os.path.abspath(os.environ['syconn_wd']):
new_wd = os.path.abspath(os.environ['syconn_wd'])
elif (global_params.wd is not None) and (len(global_params.wd) > 0) and (global_params.wd != "None") and \
(super().working_dir != os.path.abspath(global_params.wd)):
new_wd = os.path.abspath(global_params.wd)
if new_wd is None:
return
super().__init__(new_wd)
self.log_main.info("Initialized stdout logging (level: {}). "
"Current working directory:"
" ".format(self['log_level']) +
colored("'{}'".format(new_wd), 'red'))
if self.initialized is False:
from syconn import handler
default_conf_p = f'{os.path.dirname(handler.__file__)}/config.yml'
self.log_main.warning(f'Initialized working directory without '
f'existing config file at'
f' {self.path_config}. Using default '
f'parameters as defined in {default_conf_p}.')
@property
def default_conf(self) -> Config:
"""
Load default ``config.yml`` if necessary.
"""
if self._default_conf is None:
self._default_conf = Config(os.path.split(os.path.abspath(__file__))[0])
self._default_conf._working_dir = None
return self._default_conf
@property
def entries(self):
self._check_actuality()
return super().entries
@property
def working_dir(self):
"""
Returns:
Path to working directory.
"""
self._check_actuality()
return super().working_dir
@property
def kd_seg_path(self) -> str:
"""
Returns:
Path to cell supervoxel segmentation ``KnossosDataset``.
"""
return self.entries['paths']['kd_seg']
@property
def kd_sym_path(self) -> str:
"""
Returns:
Path to synaptic sym. type probability map stored as ``KnossosDataset``.
"""
return self.entries['paths']['kd_sym']
@property
def kd_asym_path(self) -> str:
"""
Returns:
Path to synaptic asym. type probability map stored as ``KnossosDataset``.
"""
return self.entries['paths']['kd_asym']
@property
def kd_sj_path(self) -> str:
"""
Returns:
Path to synaptic junction probability map or binary predictions stored as
``KnossosDataset``.
"""
return self.entries['paths']['kd_sj']
@property
def kd_vc_path(self) -> str:
"""
Returns:
Path to vesicle cloud probability map or binary predictions stored as
``KnossosDataset``.
"""
return self.entries['paths']['kd_vc']
@property
def kd_mi_path(self) -> str:
"""
Returns:
Path to mitochondria probability map or binary predictions stored as
``KnossosDataset``.
"""
return self.entries['paths']['kd_mi']
@property
def kd_er_path(self) -> str:
"""
Returns:
Path to ER probability map or binary predictions stored as
``KnossosDataset``.
"""
return self.entries['paths']['kd_er']
@property
def kd_golgi_path(self) -> str:
"""
Returns:
Path to Golgi probability map or binary predictions stored as
``KnossosDataset``.
"""
return self.entries['paths']['kd_golgi']
@property
def kd_organelles_paths(self) -> Dict[str, str]:
"""
KDs of subcell. organelle probability maps
Returns:
Dictionary containing the paths to ``KnossosDataset`` of available
cellular containing ``global_params.config['process_cell_organelles']``.
"""
path_dict = {k: self.entries['paths']['kd_{}'.format(k)] for k in
self['process_cell_organelles']}
return path_dict
@property
def kd_organelle_seg_paths(self) -> Dict[str, str]:
"""
KDs of subcell. organelle segmentations.
Returns:
Dictionary containing the paths to ``KnossosDataset`` of available
cellular organelles ``global_params.config['process_cell_organelles']``.
"""
path_dict = {k: "{}/knossosdatasets/{}_seg/".format(
self.working_dir, k) for k in self['process_cell_organelles']}
return path_dict
@property
def temp_path(self) -> str:
"""
Returns:
Path to temporary directory used to store data caches.
"""
return "{}/tmp/".format(self.working_dir)
@property
def init_svgraph_path(self) -> str:
"""
Returns:
Path to initial RAG.
"""
self._check_actuality()
p = self.entries['paths']['init_svgraph']
if p is None or len(p) == 0:
p = self.working_dir + "/rag.bz2"
return p
@property
def pruned_svgraph_path(self) -> str:
"""
Pruned SV graph. All cells or cell fragments with bounding box diagonal
of less than ``global_params.config['min_cc_size_ssv']`` are filtered.
Returns:
Path to pruned SV graph after size filtering.
"""
self._check_actuality()
return self.working_dir + '/pruned_svgraph.bz2'
@property
def pruned_svagg_list_path(self) -> str:
"""
Pruned SV lists. All cells or cell fragments with bounding box diagonal
of less than ``global_params.config['min_cc_size_ssv']`` are filtered.
Returns:
Path to pruned agglomeration list (list of SV IDs for every cell) after size filtering.
"""
self._check_actuality()
return self.working_dir + '/pruned_svagg_list.txt'
@property
def neuron_svgraph_path(self) -> str:
"""
Neuron SV graph.
Returns:
Path to neuron SV graph.
"""
self._check_actuality()
return "{}/glia/neuron_svgraph.bz2".format(self.working_dir)
@property
def neuron_svagg_list_path(self) -> str:
"""
Neuron SV lists.
Returns:
Path to agglomeration list (list of SV IDs for every cell).
"""
self._check_actuality()
return "{}/glia/neuron_svagg_list.txt".format(self.working_dir)
def astrocyte_svgraph_path(self) -> str:
"""
Astrocyte SV graph.
Returns:
Path to neuron SV graph.
"""
self._check_actuality()
return "{}/glia/astrocyte_svgraph.bz2".format(global_params.config.working_dir)
@property
def astrocyte_svagg_list_path(self) -> str:
"""
Astrocyte SV lists.
Returns:
Path to agglomeration list (list of SV IDs for every cell).
"""
self._check_actuality()
return "{}/glia/astrocyte_svagg_list.txt".format(self.working_dir)
# --------- CLASSIFICATION MODELS
@property
def model_dir(self) -> str:
"""
Returns:
Path to model directory.
"""
return self.working_dir + '/models/'
@property
def mpath_tnet(self) -> str:
"""
Returns:
Path to tCMN - an encoder network of local cell morphology trained via
triplet loss.
"""
return self.model_dir + '/tCMN/model.pts'
# return self.model_dir + '/tCMN/'
@property
def mpath_tnet_pts(self) -> str:
"""
Returns:
Path to an encoder network of local cell morphology trained via
triplet loss on point data.
"""
mpath = glob.glob(self.model_dir + '/pts/*tnet*/state_dict.pth')
if len(mpath) > 1:
ixs = [int('j0126' in os.path.split(os.path.dirname(m))[1]) for m in mpath]
if 'j0126' in global_params.config.working_dir and np.sum(ixs) == 1:
return mpath[ixs.index(1)]
ixs = [int('j0251' in os.path.split(os.path.dirname(m))[1]) for m in mpath]
if 'j0251' in global_params.config.working_dir and np.sum(ixs) == 1:
return mpath[ixs.index(1)]
# assume its j0126
if 'j0251' not in global_params.config.working_dir and np.sum(ixs) == 1:
mpath.pop(ixs.index(1))
assert len(mpath) == 1
return mpath[0]
@property
def mpath_tnet_pts_wholecell(self) -> str:
"""
Returns:
Path to an encoder network of local cell morphology trained via
triplet loss on point data.
"""
mpath = glob.glob(self.model_dir + '/pts/whole_cell_embedding/*tnet*/state_dict.pth')
if len(mpath) > 1:
ixs = [int('j0126' in os.path.split(os.path.dirname(m))[1]) for m in mpath]
if 'j0126' in global_params.config.working_dir and np.sum(ixs) == 1:
return mpath[ixs.index(1)]
ixs = [int('j0251' in os.path.split(os.path.dirname(m))[1]) for m in mpath]
if 'j0251' in global_params.config.working_dir and np.sum(ixs) == 1:
return mpath[ixs.index(1)]
# assume its j0126
if 'j0251' not in global_params.config.working_dir and np.sum(ixs) == 1:
mpath.pop(ixs.index(1))
assert len(mpath) == 1
return mpath[0]
@property
def mpath_spiness(self) -> str:
"""
Returns:
Path to model trained on detecting spine head, neck, dendritic shaft,
and ``other`` (soma and axon) via 2D projections (-> semantic segmentation).
"""
return self.model_dir + '/spiness/model.pts'
@property
def mpath_axonsem(self) -> str:
"""
Returns:
Path to model trained on detecting axon, terminal boutons and en-passant,
dendrites and somata via 2D projections.
"""
return self.model_dir + '/axoness_semseg/model.pts'
@property
def mpath_compartment_pts(self) -> str:
"""
Returns:
Path to model trained on detecting axon, terminal and en-passant boutons,
dendritic shaft, spine head and neck, and soma from point data.
"""
return self.model_dir + '/compartment_pts/'
@property
def mpath_celltype_e3(self) -> str:
"""
Returns:
Path to model trained on prediction cell types from multi-view sets.
"""
return self.model_dir + '/celltype_e3/model.pts'
@property
def mpath_celltype_pts(self) -> str:
"""
Returns:
Path to model trained on prediction cell types from point data.
"""
mpath = glob.glob(self.model_dir + '/pts/*celltype*/state_dict.pth')
if len(mpath) > 1:
mpath = [m for m in mpath if 'tnet' not in m]
ixs = [int('j0126' in os.path.split(os.path.dirname(m))[1]) for m in mpath]
if 'j0126' in global_params.config.working_dir and np.sum(ixs) == 1:
return mpath[ixs.index(1)]
ixs = [int('j0251' in os.path.split(os.path.dirname(m))[1]) for m in mpath]
if 'j0251' in global_params.config.working_dir and np.sum(ixs) == 1:
return mpath[ixs.index(1)]
# assume its j0126
if 'j0251' not in global_params.config.working_dir and np.sum(ixs) == 1:
mpath.pop(ixs.index(1))
assert len(mpath) == 1
return mpath[0]
@property
def mpath_glia_e3(self) -> str:
"""
Returns:
Path to model trained to classify local 2D projections into glia
vs. neuron (img2scalar).
"""
return self.model_dir + '/glia_e3/'
@property
def mpath_glia_pts(self) -> str:
"""
Returns:
Path to point-based model trained to classify local 2D projections into glia
vs. neuron.
"""
mpath = glob.glob(self.model_dir + '/pts/*glia*/state_dict.pth')
assert len(mpath) == 1
return mpath[0]
@property
def mpath_myelin(self) -> str:
"""
Returns:
Path to model trained on identifying myelinated cell parts
within 3D EM raw data.
"""
return self.model_dir + '/myelin/model.pts'
@property
def mpath_syntype(self) -> str:
"""
Returns:
Path to model trained on identifying synapse types (symmetric
vs. asymmetric) within 3D EM raw data.
"""
return self.model_dir + '/syntype/model.pts'
@property
def mpath_er(self) -> str:
"""
Returns:
Path to model trained on identifying cell parts occupied
by ER within 3D EM raw data.
"""
return self.model_dir + '/er/model.pts'
@property
def mpath_golgi(self) -> str:
"""
Returns:
Path to model trained on identifying cell parts occupied
by Golgi Apparatus within 3D EM raw data.
"""
return self.model_dir + '/golgi/model.pts'
@property
def mpath_mivcsj(self) -> str:
"""
Returns:
Path to model trained on identifying synapse types (symmetric
vs. asymmetric) within 3D EM raw data.
"""
return self.model_dir + '/mivcsj/model.pt'
@property
def mpath_syn_rfc(self) -> str:
return self.model_dir + '/conn_syn_rfc//rfc'
@property
def mpath_syn_rfc_fallback(self) -> str:
"""
Path to rfc model created with sklearn==0.21.0
"""
return self.model_dir + '/conn_syn_rfc//rfc_fallback'
@property
def allow_mesh_gen_cells(self) -> bool:
"""
If ``True``, meshes are not provided for cell supervoxels and will be
computed from scratch, see :attr:`~syconn.handler.config.DynConf.use_new_meshing`.
"""
return bool(self['meshes']['allow_mesh_gen_cells'])
@property
def allow_ssv_skel_gen(self) -> bool:
"""
Controls whether cell supervoxel skeletons are provided a priori or
can be computed from scratch. Currently this is done via a naive sampling
procedure.
Returns:
Value stored at the config.yml file.
"""
return bool(self['skeleton']['allow_ssv_skel_gen'])
@property
def use_kimimaro(self) -> bool:
"""
Controls if skeletons should be generated with kimimaro
Returns: value stores in config.yml file
"""
return bool(self['skeleton']['use_kimimaro'])
# New config attributes, enable backwards compat. in case these entries do not exist
@property
def syntype_available(self) -> bool:
"""
Synaptic types are available as KnossosDataset. Will be used during the
matrix generation.
Returns:
Value stored at the config.yml file.
"""
return bool(self['syntype_avail'])
@property
def use_point_models(self) -> bool:
"""
Use point cloud based models instead of multi-views.
Returns:
Value stored at the config.yml file.
"""
return bool(self['use_point_models'])
@property
def use_onthefly_views(self) -> bool:
"""
Generate views for cell type prediction on the fly.
Returns:
Value stored at the config.yml file.
"""
return bool(self['views']['use_onthefly_views'])
@property
def use_new_renderings_locs(self) -> bool:
"""
Use new rendering locations which are faster to computed and are located
closer to the neuron surface.
Returns:
Value stored at the config.yml file.
"""
return bool(self['views']['use_new_renderings_locs'])
@property
def use_new_meshing(self) -> bool:
"""
Use new, dense meshing (``zmesh``) computed distributed on 3D sub-cubes.
If ``False`` meshes are computed sparsely, i.e. per object/supervoxel.
Returns:
Value stored at the config.yml file.
"""
return bool(self['meshes']['use_new_meshing'])
@property
def qsub_work_folder(self) -> str:
"""
Directory where intermediate batchjob results are stored.
Returns:
Path to directory.
"""
return f"{self.working_dir}/{self['batch_proc_system']}/"
@property
def prior_astrocyte_removal(self) -> bool:
"""
If ``True`` astrocyte separation procedure will be initiated to create a
astrocyte-separated RAG (see ``glia/neuron_svgraph.bz2`` and
``glia/astrocyte_svgraph.bz2``).
Returns:
Value stored in ``config.yml``.
"""
return self.entries['glia']['prior_astrocyte_removal']
@property
def use_new_subfold(self) -> bool:
"""
Use new subfolder hierarchy where objects with similar IDs are stored
in the same file.
Returns:
Value stored in ``config.yml``.
"""
use_new_subfold = self['paths']['use_new_subfold']
if use_new_subfold is not None:
return bool(use_new_subfold)
else:
return False
@property
def batchjob_script_folder(self) -> str:
return os.path.abspath(os.path.dirname(os.path.abspath(__file__)) +
"/../batchjob_scripts/")
@property
def ncore_total(self) -> int:
return self['nnodes_total'] * self['ncores_per_node']
@property
def ngpu_total(self) -> int:
return self['nnodes_total'] * self['ngpus_per_node']
@property
def asym_label(self) -> Optional[int]:
return self['cell_objects']['asym_label']
@property
def sym_label(self) -> Optional[int]:
return self['cell_objects']['sym_label']
def generate_default_conf(working_dir: str, scaling: Union[Tuple, np.ndarray],
syntype_avail: bool = True,
use_new_renderings_locs: bool = True,
kd_seg: Optional[str] = None, kd_sym: Optional[str] = None,
kd_asym: Optional[str] = None,
kd_sj: Optional[str] = None, kd_mi: Optional[str] = None,
kd_vc: Optional[str] = None, kd_er: Optional[str] = None,
kd_golgi: Optional[str] = None, init_svgraph_path: str = "",
prior_astrocyte_removal: bool = True,
use_new_meshing: bool = True,
allow_mesh_gen_cells: bool = True,
use_new_subfold: bool = True, force_overwrite=False,
key_value_pairs: Optional[List[tuple]] = None):
"""
Generates the default SyConn configuration file, including paths to
``KnossosDatasets`` of e.g. cellular organelle predictions/prob.
maps and the cell supervoxel segmentation, general settings for
OpenGL (egl vs osmesa), the scheduling system (SLURM vs QSUB vs None) and
various parameters for processing the data. See
``SyConn/scripts/example_run/start.py`` for an example.
``init_svgraph_path`` can be set specifically in the config-file which is optional.
By default it is set to ``init_svgraph_path = working_dir + "rag.bz2"``. SyConn then
will require an edge list of the supervoxel graph, see also
``SyConn/scripts/example_run/start.py``.
Writes the file ``config.yml`` to `working_dir` after adapting the
attributes as given by the method input. This file can also only contain
the values of attributes which should differ from the default config
at ``SyConn/syconn/handlers/config.yml``. SyConn refers to the latter in
a parameter cannot be found in the config file inside the currently active
working directory.
Examples:
The default config content is located at SyConn/syconn/handler/config.yml
Args:
working_dir: Folder of the working directory.
scaling: Voxel size in NM.
syntype_avail: If True, synapse objects will contain additional type
property (symmetric vs asymmetric).
use_new_renderings_locs: If True, uses new heuristic for generating
rendering locations.
kd_seg: Path to the KnossosDataset which contains the cell segmentation.
kd_sym: Path to the symmetric type prediction.
kd_asym: Path to the asymmetric type prediction.
kd_sj: Path to the synaptic junction predictions.
kd_mi: Path to the mitochondria predictions.
kd_vc: Path to the vesicle cloud predictions.
kd_er: Path to the ER predictions.
kd_golgi: Path to the Golgi-Apparatus predictions.
init_svgraph_path: Path to the initial supervoxel graph.
prior_astrocyte_removal: If True, applies astrocyte separation before analysing
cell reconstructions.
use_new_meshing: If True, uses new meshing procedure based on `zmesh`.
allow_mesh_gen_cells: If True, meshing of cell supervoxels will be
permitted.
use_new_subfold: If True, similar object IDs will be stored in the same
storage file.
force_overwrite: Will overwrite existing ``config.yml`` file.
key_value_pairs: List of key-value pairs used to modify attributes in
the config file.
"""
if kd_seg is None:
kd_seg = working_dir + 'knossosdatasets/seg/'
if kd_sym is None:
kd_sym = working_dir + 'knossosdatasets/sym/'
if kd_asym is None:
kd_asym = working_dir + 'knossosdatasets/asym/'
if kd_sj is None:
kd_sj = working_dir + 'knossosdatasets/sj/'
if kd_mi is None:
kd_mi = working_dir + 'knossosdatasets/mi/'
if kd_vc is None:
kd_vc = working_dir + 'knossosdatasets/vc/'
if kd_er is None:
kd_er = working_dir + 'knossosdatasets/er/'
if kd_golgi is None:
kd_golgi = working_dir + 'knossosdatasets/golgi/'
default_conf = Config(os.path.split(os.path.abspath(__file__))[0])
entries = default_conf.entries
entries['paths']['kd_seg'] = kd_seg
entries['paths']['kd_sym'] = kd_sym
entries['paths']['kd_asym'] = kd_asym
entries['paths']['kd_sj'] = kd_sj
entries['paths']['kd_vc'] = kd_vc
entries['paths']['kd_mi'] = kd_mi
entries['paths']['kd_er'] = kd_er
entries['paths']['kd_golgi'] = kd_golgi
entries['paths']['init_svgraph'] = init_svgraph_path
entries['paths']['use_new_subfold'] = use_new_subfold
if type(scaling) is np.ndarray:
scaling = scaling.tolist()
entries['scaling'] = scaling
entries['version'] = default_conf.version()
entries['syntype_avail'] = syntype_avail
entries['meshes']['allow_mesh_gen_cells'] = allow_mesh_gen_cells
entries['meshes']['use_new_meshing'] = use_new_meshing
entries['views']['use_new_renderings_locs'] = use_new_renderings_locs
entries['glia']['prior_astrocyte_removal'] = prior_astrocyte_removal
if key_value_pairs is not None:
_update_key_value_pair_rec(key_value_pairs, entries)
default_conf._working_dir = working_dir
if os.path.isfile(default_conf.path_config) and not force_overwrite:
raise ValueError(f'Overwrite attempt of existing config file at '
f'"{default_conf.path_config}".')
default_conf.write_config(working_dir)
def _update_key_value_pair_rec(key_value_pairs, entries):
for k, v in key_value_pairs:
if k not in entries:
raise KeyError(f'Key in provided key-value {k}:{v} pair '
f'does not exist in default config.')
if type(v) is dict:
_update_key_value_pair_rec(list(v.items()), entries[k])
else:
entries[k] = v
def initialize_logging(log_name: str, log_dir: Optional[str] = None,
overwrite: bool = True):
"""
Logger for each package module. For import processing steps individual
logger can be defined (e.g. ``proc``, ``reps``).
Args:
log_name: Name of the logger.
log_dir: Set log_dir specifically. Will then create a filehandler and
ignore the state of ``global_params.config['disable_file_logging']``
state.
overwrite: Overwrite previous log file.
Returns:
The logger.
"""
if log_dir is None:
log_dir = global_params.config['default_log_dir']
level = global_params.config['log_level']
logger = logging.getLogger(log_name)
logger.setLevel(level)
coloredlogs.install(level=global_params.config['log_level'], logger=logger,
reconfigure=False) # True possibly leads to stderr output
if not global_params.config['disable_file_logging'] or log_dir is not None:
# create file handler which logs even debug messages
if log_dir is None:
log_dir = os.path.expanduser('~') + "/.SyConn/logs/"
try:
os.makedirs(log_dir, exist_ok=True)
except TypeError:
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
log_fname = log_dir + '/' + log_name + '.log'
if overwrite and os.path.isfile(log_fname):
os.remove(log_fname)
# add the handlers to logger
fh = logging.FileHandler(log_fname)
fh.setLevel(level)
formatter = logging.Formatter(
'%(asctime)s (%(relative)smin) - %(name)s - %(levelname)s - %(message)s')
fh.addFilter(TimeFilter())
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
class TimeFilter(logging.Filter):
"""https://stackoverflow.com/questions/31521859/python-logging-module-time-since-last-log"""
def filter(self, record):
try:
last = self.last
except AttributeError:
last = record.relativeCreated
delta = datetime.datetime.fromtimestamp(record.relativeCreated / 1000.0) - \
datetime.datetime.fromtimestamp(last / 1000.0)
record.relative = '{0:.1f}'.format(delta.seconds / 60.)
self.last = record.relativeCreated
return True
|
StructuralNeurobiologyLab/SyConn
|
syconn/handler/config.py
|
Python
|
gpl-2.0
| 34,923
|
[
"NEURON"
] |
5c541c00d94fb4b8392ed47121b43775ed45c4cff97c414ddf60f7c8b2500335
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2010 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Support for "safe" evaluation of Python expressions."""
import builtins
from textwrap import dedent
from types import CodeType
from genshi.core import Markup
from genshi.template.astutil import ASTTransformer, ASTCodeGenerator, \
_ast, parse
from genshi.template.base import TemplateRuntimeError
from genshi.util import flatten
from genshi.compat import get_code_params, build_code_chunk, IS_PYTHON2
__all__ = ['Code', 'Expression', 'Suite', 'LenientLookup', 'StrictLookup',
'Undefined', 'UndefinedError']
__docformat__ = 'restructuredtext en'
# Check for a Python 2.4 bug in the eval loop
has_star_import_bug = False
try:
class _FakeMapping(object):
__getitem__ = __setitem__ = lambda *a: None
exec('from sys import *', {}, _FakeMapping())
except SystemError:
has_star_import_bug = True
del _FakeMapping
def _star_import_patch(mapping, modname):
"""This function is used as helper if a Python version with a broken
star-import opcode is in use.
"""
module = __import__(modname, None, None, ['__all__'])
if hasattr(module, '__all__'):
members = module.__all__
else:
members = [x for x in module.__dict__ if not x.startswith('_')]
mapping.update([(name, getattr(module, name)) for name in members])
class Code(object):
"""Abstract base class for the `Expression` and `Suite` classes."""
__slots__ = ['source', 'code', 'ast', '_globals']
def __init__(self, source, filename=None, lineno=-1, lookup='strict',
xform=None):
"""Create the code object, either from a string, or from an AST node.
:param source: either a string containing the source code, or an AST
node
:param filename: the (preferably absolute) name of the file containing
the code
:param lineno: the number of the line on which the code was found
:param lookup: the lookup class that defines how variables are looked
up in the context; can be either "strict" (the default),
"lenient", or a custom lookup class
:param xform: the AST transformer that should be applied to the code;
if `None`, the appropriate transformation is chosen
depending on the mode
"""
if isinstance(source, str):
self.source = source
node = _parse(source, mode=self.mode)
else:
assert isinstance(source, _ast.AST), \
'Expected string or AST node, but got %r' % source
self.source = '?'
if self.mode == 'eval':
node = _ast.Expression()
node.body = source
else:
node = _ast.Module()
node.body = [source]
self.ast = node
self.code = _compile(node, self.source, mode=self.mode,
filename=filename, lineno=lineno, xform=xform)
if lookup is None:
lookup = LenientLookup
elif isinstance(lookup, str):
lookup = {'lenient': LenientLookup, 'strict': StrictLookup}[lookup]
self._globals = lookup.globals
def __getstate__(self):
state = {'source': self.source, 'ast': self.ast,
'lookup': self._globals.__self__}
state['code'] = get_code_params(self.code)
return state
def __setstate__(self, state):
self.source = state['source']
self.ast = state['ast']
self.code = CodeType(0, *state['code'])
self._globals = state['lookup'].globals
def __eq__(self, other):
return (type(other) == type(self)) and (self.code == other.code)
def __hash__(self):
return hash(self.code)
def __ne__(self, other):
return not self == other
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.source)
class Expression(Code):
"""Evaluates Python expressions used in templates.
>>> data = dict(test='Foo', items=[1, 2, 3], dict={'some': 'thing'})
>>> Expression('test').evaluate(data)
'Foo'
>>> Expression('items[0]').evaluate(data)
1
>>> Expression('items[-1]').evaluate(data)
3
>>> Expression('dict["some"]').evaluate(data)
'thing'
Similar to e.g. Javascript, expressions in templates can use the dot
notation for attribute access to access items in mappings:
>>> Expression('dict.some').evaluate(data)
'thing'
This also works the other way around: item access can be used to access
any object attribute:
>>> class MyClass(object):
... myattr = 'Bar'
>>> data = dict(mine=MyClass(), key='myattr')
>>> Expression('mine.myattr').evaluate(data)
'Bar'
>>> Expression('mine["myattr"]').evaluate(data)
'Bar'
>>> Expression('mine[key]').evaluate(data)
'Bar'
All of the standard Python operators are available to template expressions.
Built-in functions such as ``len()`` are also available in template
expressions:
>>> data = dict(items=[1, 2, 3])
>>> Expression('len(items)').evaluate(data)
3
"""
__slots__ = []
mode = 'eval'
def evaluate(self, data):
"""Evaluate the expression against the given data dictionary.
:param data: a mapping containing the data to evaluate against
:return: the result of the evaluation
"""
__traceback_hide__ = 'before_and_this'
_globals = self._globals(data)
return eval(self.code, _globals, {'__data__': data})
class Suite(Code):
"""Executes Python statements used in templates.
>>> data = dict(test='Foo', items=[1, 2, 3], dict={'some': 'thing'})
>>> Suite("foo = dict['some']").execute(data)
>>> data['foo']
'thing'
"""
__slots__ = []
mode = 'exec'
def execute(self, data):
"""Execute the suite in the given data dictionary.
:param data: a mapping containing the data to execute in
"""
__traceback_hide__ = 'before_and_this'
_globals = self._globals(data)
exec(self.code, _globals, data)
UNDEFINED = object()
class UndefinedError(TemplateRuntimeError):
"""Exception thrown when a template expression attempts to access a variable
not defined in the context.
:see: `LenientLookup`, `StrictLookup`
"""
def __init__(self, name, owner=UNDEFINED):
if owner is not UNDEFINED:
message = '%s has no member named "%s"' % (repr(owner), name)
else:
message = '"%s" not defined' % name
TemplateRuntimeError.__init__(self, message)
class Undefined(object):
"""Represents a reference to an undefined variable.
Unlike the Python runtime, template expressions can refer to an undefined
variable without causing a `NameError` to be raised. The result will be an
instance of the `Undefined` class, which is treated the same as ``False`` in
conditions, but raise an exception on any other operation:
>>> foo = Undefined('foo')
>>> bool(foo)
False
>>> list(foo)
[]
>>> print(foo)
undefined
However, calling an undefined variable, or trying to access an attribute
of that variable, will raise an exception that includes the name used to
reference that undefined variable.
>>> try:
... foo('bar')
... except UndefinedError as e:
... print(e.msg)
"foo" not defined
>>> try:
... foo.bar
... except UndefinedError as e:
... print(e.msg)
"foo" not defined
:see: `LenientLookup`
"""
__slots__ = ['_name', '_owner']
def __init__(self, name, owner=UNDEFINED):
"""Initialize the object.
:param name: the name of the reference
:param owner: the owning object, if the variable is accessed as a member
"""
self._name = name
self._owner = owner
def __iter__(self):
return iter([])
def __bool__(self):
return False
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self._name)
def __str__(self):
return 'undefined'
def _die(self, *args, **kwargs):
"""Raise an `UndefinedError`."""
__traceback_hide__ = True
raise UndefinedError(self._name, self._owner)
__call__ = __getattr__ = __getitem__ = _die
# Hack around some behavior introduced in Python 2.6.2
# http://genshi.edgewall.org/ticket/324
__length_hint__ = None
class LookupBase(object):
"""Abstract base class for variable lookup implementations."""
@classmethod
def globals(cls, data):
"""Construct the globals dictionary to use as the execution context for
the expression or suite.
"""
return {
'__data__': data,
'_lookup_name': cls.lookup_name,
'_lookup_attr': cls.lookup_attr,
'_lookup_item': cls.lookup_item,
'_star_import_patch': _star_import_patch,
'UndefinedError': UndefinedError,
}
@classmethod
def lookup_name(cls, data, name):
__traceback_hide__ = True
val = data.get(name, UNDEFINED)
if val is UNDEFINED:
val = BUILTINS.get(name, val)
if val is UNDEFINED:
val = cls.undefined(name)
return val
@classmethod
def lookup_attr(cls, obj, key):
__traceback_hide__ = True
try:
val = getattr(obj, key)
except AttributeError:
if hasattr(obj.__class__, key):
raise
else:
try:
val = obj[key]
except (KeyError, TypeError):
val = cls.undefined(key, owner=obj)
return val
@classmethod
def lookup_item(cls, obj, key):
__traceback_hide__ = True
if len(key) == 1:
key = key[0]
try:
return obj[key]
except (AttributeError, KeyError, IndexError, TypeError) as e:
if isinstance(key, str):
val = getattr(obj, key, UNDEFINED)
if val is UNDEFINED:
val = cls.undefined(key, owner=obj)
return val
raise
@classmethod
def undefined(cls, key, owner=UNDEFINED):
"""Can be overridden by subclasses to specify behavior when undefined
variables are accessed.
:param key: the name of the variable
:param owner: the owning object, if the variable is accessed as a member
"""
raise NotImplementedError
class LenientLookup(LookupBase):
"""Default variable lookup mechanism for expressions.
When an undefined variable is referenced using this lookup style, the
reference evaluates to an instance of the `Undefined` class:
>>> expr = Expression('nothing', lookup='lenient')
>>> undef = expr.evaluate({})
>>> undef
<Undefined 'nothing'>
The same will happen when a non-existing attribute or item is accessed on
an existing object:
>>> expr = Expression('something.nil', lookup='lenient')
>>> expr.evaluate({'something': dict()})
<Undefined 'nil'>
See the documentation of the `Undefined` class for details on the behavior
of such objects.
:see: `StrictLookup`
"""
@classmethod
def undefined(cls, key, owner=UNDEFINED):
"""Return an ``Undefined`` object."""
__traceback_hide__ = True
return Undefined(key, owner=owner)
class StrictLookup(LookupBase):
"""Strict variable lookup mechanism for expressions.
Referencing an undefined variable using this lookup style will immediately
raise an ``UndefinedError``:
>>> expr = Expression('nothing', lookup='strict')
>>> try:
... expr.evaluate({})
... except UndefinedError as e:
... print(e.msg)
"nothing" not defined
The same happens when a non-existing attribute or item is accessed on an
existing object:
>>> expr = Expression('something.nil', lookup='strict')
>>> try:
... expr.evaluate({'something': dict()})
... except UndefinedError as e:
... print(e.msg)
{} has no member named "nil"
"""
@classmethod
def undefined(cls, key, owner=UNDEFINED):
"""Raise an ``UndefinedError`` immediately."""
__traceback_hide__ = True
raise UndefinedError(key, owner=owner)
def _parse(source, mode='eval'):
source = source.strip()
if mode == 'exec':
lines = [line.expandtabs() for line in source.splitlines()]
if lines:
first = lines[0]
rest = dedent('\n'.join(lines[1:])).rstrip()
if first.rstrip().endswith(':') and not rest[0].isspace():
rest = '\n'.join([' %s' % line for line in rest.splitlines()])
source = '\n'.join([first, rest])
if isinstance(source, str):
source = ('\ufeff' + source).encode('utf-8')
return parse(source, mode)
def _compile(node, source=None, mode='eval', filename=None, lineno=-1,
xform=None):
if not filename:
filename = '<string>'
if IS_PYTHON2:
# Python 2 requires non-unicode filenames
if isinstance(filename, str):
filename = filename.encode('utf-8', 'replace')
else:
# Python 3 requires unicode filenames
if not isinstance(filename, str):
filename = filename.decode('utf-8', 'replace')
if lineno <= 0:
lineno = 1
if xform is None:
xform = {
'eval': ExpressionASTTransformer
}.get(mode, TemplateASTTransformer)
tree = xform().visit(node)
if mode == 'eval':
name = '<Expression %r>' % (source or '?')
else:
lines = source.splitlines()
if not lines:
extract = ''
else:
extract = lines[0]
if len(lines) > 1:
extract += ' ...'
name = '<Suite %r>' % (extract)
new_source = ASTCodeGenerator(tree).code
code = compile(new_source, filename, mode)
try:
# We'd like to just set co_firstlineno, but it's readonly. So we need
# to clone the code object while adjusting the line number
return build_code_chunk(code, filename, name, lineno)
except RuntimeError:
return code
def _new(class_, *args, **kwargs):
ret = class_()
for attr, value in zip(ret._fields, args):
if attr in kwargs:
raise ValueError('Field set both in args and kwargs')
setattr(ret, attr, value)
for attr, value in kwargs:
setattr(ret, attr, value)
return ret
BUILTINS = builtins.__dict__.copy()
BUILTINS.update({'Markup': Markup, 'Undefined': Undefined})
CONSTANTS = frozenset(['False', 'True', 'None', 'NotImplemented', 'Ellipsis'])
class TemplateASTTransformer(ASTTransformer):
"""Concrete AST transformer that implements the AST transformations needed
for code embedded in templates.
"""
def __init__(self):
self.locals = [CONSTANTS]
def _extract_names(self, node):
names = set()
def _process(node):
if not IS_PYTHON2 and isinstance(node, _ast.arg):
names.add(node.arg)
if isinstance(node, _ast.Name):
names.add(node.id)
elif isinstance(node, _ast.alias):
names.add(node.asname or node.name)
elif isinstance(node, _ast.Tuple):
for elt in node.elts:
_process(elt)
if hasattr(node, 'args'):
for arg in node.args:
_process(arg)
if hasattr(node, 'vararg'):
names.add(node.vararg)
if hasattr(node, 'kwarg'):
names.add(node.kwarg)
elif hasattr(node, 'names'):
for elt in node.names:
_process(elt)
return names
def visit_Str(self, node):
if not isinstance(node.s, str):
try: # If the string is ASCII, return a `str` object
node.s.decode('ascii')
except ValueError: # Otherwise return a `unicode` object
return _new(_ast.Str, node.s.decode('utf-8'))
return node
def visit_ClassDef(self, node):
if len(self.locals) > 1:
self.locals[-1].add(node.name)
self.locals.append(set())
try:
return ASTTransformer.visit_ClassDef(self, node)
finally:
self.locals.pop()
def visit_Import(self, node):
if len(self.locals) > 1:
self.locals[-1].update(self._extract_names(node))
return ASTTransformer.visit_Import(self, node)
def visit_ImportFrom(self, node):
if [a.name for a in node.names] == ['*']:
if has_star_import_bug:
# This is a Python 2.4 bug. Only if we have a broken Python
# version do we need to apply this hack
node = _new(_ast.Expr, _new(_ast.Call,
_new(_ast.Name, '_star_import_patch'), [
_new(_ast.Name, '__data__'),
_new(_ast.Str, node.module)
], (), ()))
return node
if len(self.locals) > 1:
self.locals[-1].update(self._extract_names(node))
return ASTTransformer.visit_ImportFrom(self, node)
def visit_FunctionDef(self, node):
if len(self.locals) > 1:
self.locals[-1].add(node.name)
self.locals.append(self._extract_names(node.args))
try:
return ASTTransformer.visit_FunctionDef(self, node)
finally:
self.locals.pop()
# GeneratorExp(expr elt, comprehension* generators)
def visit_GeneratorExp(self, node):
gens = []
for generator in node.generators:
# comprehension = (expr target, expr iter, expr* ifs)
self.locals.append(set())
gen = _new(_ast.comprehension, self.visit(generator.target),
self.visit(generator.iter),
[self.visit(if_) for if_ in generator.ifs])
gens.append(gen)
# use node.__class__ to make it reusable as ListComp
ret = _new(node.__class__, self.visit(node.elt), gens)
#delete inserted locals
del self.locals[-len(node.generators):]
return ret
# ListComp(expr elt, comprehension* generators)
visit_ListComp = visit_GeneratorExp
def visit_Lambda(self, node):
self.locals.append(self._extract_names(node.args))
try:
return ASTTransformer.visit_Lambda(self, node)
finally:
self.locals.pop()
def visit_Name(self, node):
# If the name refers to a local inside a lambda, list comprehension, or
# generator expression, leave it alone
if isinstance(node.ctx, _ast.Load) and \
node.id not in flatten(self.locals):
# Otherwise, translate the name ref into a context lookup
name = _new(_ast.Name, '_lookup_name', _ast.Load())
namearg = _new(_ast.Name, '__data__', _ast.Load())
strarg = _new(_ast.Str, node.id)
node = _new(_ast.Call, name, [namearg, strarg], [])
elif isinstance(node.ctx, _ast.Store):
if len(self.locals) > 1:
self.locals[-1].add(node.id)
return node
class ExpressionASTTransformer(TemplateASTTransformer):
"""Concrete AST transformer that implements the AST transformations needed
for code embedded in templates.
"""
def visit_Attribute(self, node):
if not isinstance(node.ctx, _ast.Load):
return ASTTransformer.visit_Attribute(self, node)
func = _new(_ast.Name, '_lookup_attr', _ast.Load())
args = [self.visit(node.value), _new(_ast.Str, node.attr)]
return _new(_ast.Call, func, args, [])
def visit_Subscript(self, node):
if not isinstance(node.ctx, _ast.Load) or \
not isinstance(node.slice, _ast.Index):
return ASTTransformer.visit_Subscript(self, node)
func = _new(_ast.Name, '_lookup_item', _ast.Load())
args = [
self.visit(node.value),
_new(_ast.Tuple, (self.visit(node.slice.value),), _ast.Load())
]
return _new(_ast.Call, func, args, [])
|
Lyleo/OmniMarkupPreviewer
|
OmniMarkupLib/Renderers/libs/python3/genshi/template/eval.py
|
Python
|
mit
| 21,040
|
[
"VisIt"
] |
538bb7c1e4460ffd237ee4969124e5e340a2ccf4b3d0d1b6689e068cd252bf08
|
"""A simple example of how to use IPython.config.application.Application.
This should serve as a simple example that shows how the IPython config
system works. The main classes are:
* IPython.config.configurable.Configurable
* IPython.config.configurable.SingletonConfigurable
* IPython.config.loader.Config
* IPython.config.application.Application
To see the command line option help, run this program from the command line::
$ python appconfig.py -h
To make one of your classes configurable (from the command line and config
files) inherit from Configurable and declare class attributes as traits (see
classes Foo and Bar below). To make the traits configurable, you will need
to set the following options:
* ``config``: set to ``True`` to make the attribute configurable.
* ``shortname``: by default, configurable attributes are set using the syntax
"Classname.attributename". At the command line, this is a bit verbose, so
we allow "shortnames" to be declared. Setting a shortname is optional, but
when you do this, you can set the option at the command line using the
syntax: "shortname=value".
* ``help``: set the help string to display a help message when the ``-h``
option is given at the command line. The help string should be valid ReST.
When the config attribute of an Application is updated, it will fire all of
the trait's events for all of the config=True attributes.
"""
from IPython.config.configurable import Configurable
from IPython.config.application import Application
from IPython.utils.traitlets import (
Bool, Unicode, Int, List, Dict
)
class Foo(Configurable):
"""A class that has configurable, typed attributes.
"""
i = Int(0, config=True, help="The integer i.")
j = Int(1, config=True, help="The integer j.")
name = Unicode(u'Brian', config=True, help="First name.")
class Bar(Configurable):
enabled = Bool(True, config=True, help="Enable bar.")
class MyApp(Application):
name = Unicode(u'myapp')
running = Bool(False, config=True,
help="Is the app running?")
classes = List([Bar, Foo])
config_file = Unicode(u'', config=True,
help="Load this config file")
aliases = Dict(dict(i='Foo.i',j='Foo.j',name='Foo.name', running='MyApp.running',
enabled='Bar.enabled', log_level='MyApp.log_level'))
flags = Dict(dict(enable=({'Bar': {'enabled' : True}}, "Enable Bar"),
disable=({'Bar': {'enabled' : False}}, "Disable Bar"),
debug=({'MyApp':{'log_level':10}}, "Set loglevel to DEBUG")
))
def init_foo(self):
# Pass config to other classes for them to inherit the config.
self.foo = Foo(config=self.config)
def init_bar(self):
# Pass config to other classes for them to inherit the config.
self.bar = Bar(config=self.config)
def initialize(self, argv=None):
self.parse_command_line(argv)
if self.config_file:
self.load_config_file(self.config_file)
self.init_foo()
self.init_bar()
def start(self):
print("app.config:")
print(self.config)
def main():
app = MyApp()
app.initialize()
app.start()
if __name__ == "__main__":
main()
|
pioneers/topgear
|
ipython-in-depth/examples/Customization/appconfig.py
|
Python
|
apache-2.0
| 3,290
|
[
"Brian"
] |
12734d94645cb21d94adf0f7b65c94958336b4b496bb5d336ca76a7fb6c2ffb4
|
#!/usr/bin/env python
##############################################################################################
#
#
# regrid_emissions_N96e.py
#
#
# Requirements:
# Iris 1.10, cf_units, numpy
#
#
# This Python script has been written by N.L. Abraham as part of the UKCA Tutorials:
# http://www.ukca.ac.uk/wiki/index.php/UKCA_Chemistry_and_Aerosol_Tutorials_at_vn10.4
#
# Copyright (C) 2015 University of Cambridge
#
# This is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# It is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You find a copy of the GNU Lesser General Public License at <http://www.gnu.org/licenses/>.
#
# Written by N. Luke Abraham 2016-10-20 <nla27@cam.ac.uk>
#
#
##############################################################################################
# preamble
import os
import time
import iris
import cf_units
import numpy
# --- CHANGE THINGS BELOW THIS LINE TO WORK WITH YOUR FILES ETC. ---
# name of file containing an ENDGame grid, e.g. your model output
# NOTE: all the fields in the file should be on the same horizontal
# grid, as the field used MAY NOT be the first in order of STASH
grid_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/um/archer/ag542/apm.pp/ag542a.pm1988dec'
#
# name of emissions file
emissions_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/emissions/combined_1960/0.5x0.5/combined_sources_C3H8_lumped_1960_360d.nc'
#
# STASH code emissions are associated with
# 301-320: surface
# m01s00i306: C3H8 surface emissions
#
# 321-340: full atmosphere
#
stash='m01s00i306'
# --- BELOW THIS LINE, NOTHING SHOULD NEED TO BE CHANGED ---
species_name='C3H8'
# this is the grid we want to regrid to, e.g. N96 ENDGame
grd=iris.load(grid_file)[0]
grd.coord(axis='x').guess_bounds()
grd.coord(axis='y').guess_bounds()
# This is the original data
ems=iris.load_cube(emissions_file)
# make intersection between 0 and 360 longitude to ensure that
# the data is regridded correctly
nems = ems.intersection(longitude=(0, 360))
# make sure that we use the same coordinate system, otherwise regrid won't work
nems.coord(axis='x').coord_system=grd.coord_system()
nems.coord(axis='y').coord_system=grd.coord_system()
# now guess the bounds of the new grid prior to regridding
nems.coord(axis='x').guess_bounds()
nems.coord(axis='y').guess_bounds()
# now regrid
ocube=nems.regrid(grd,iris.analysis.AreaWeighted())
# now add correct attributes and names to netCDF file
ocube.var_name='emissions_'+str.strip(species_name)
ocube.long_name=str.strip(species_name)+' surf emissions'
ocube.units=cf_units.Unit('kg m-2 s-1')
ocube.attributes['vertical_scaling']='surface'
ocube.attributes['um_stash_source']=stash
ocube.attributes['tracer_name']=str.strip(species_name)
# global attributes, so don't set in local_keys
# NOTE: all these should be strings, including the numbers!
# basic emissions type
ocube.attributes['lumped_species']='C3H8 and C3H6' # lumping of species
ocube.attributes['emission_type']='2' # periodic time series
ocube.attributes['update_type']='2' # same as above
ocube.attributes['update_freq_in_hours']='120' # i.e. 5 days
ocube.attributes['um_version']='10.6' # UM version
ocube.attributes['source']='combined_sources_C3H8_lumped_1960_360d.nc'
ocube.attributes['title']='Monthly surface emissions of propane, lumped with propene, for 1960'
ocube.attributes['File_version']='v1'
ocube.attributes['File_creation_date']=time.ctime(time.time())
ocube.attributes['grid']='regular 1.875 x 1.25 degree longitude-latitude grid (N96e)'
ocube.attributes['history']=time.ctime(time.time())+': '+__file__+' \n'+ocube.attributes['history']
ocube.attributes['institution']='Centre for Atmospheric Science, Department of Chemistry, University of Cambridge, U.K.'
ocube.attributes['reference']='Granier et al., Clim. Change, 2011; Lamarque et al., Atmos. Chem. Phys., 2010'
del ocube.attributes['NCO']
del ocube.attributes['file_creation_date']
del ocube.attributes['description']
# rename and set time coord - mid-month from 1960-Jan to 2020-Dec
# this bit is annoyingly fiddly
ocube.coord(axis='t').var_name='time'
ocube.coord(axis='t').standard_name='time'
ocube.coords(axis='t')[0].units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='360_day')
ocube.coord(axis='t').points=numpy.array([15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345])
# make z-direction.
zdims=iris.coords.DimCoord(numpy.array([0]),standard_name = 'model_level_number',
units='1',attributes={'positive':'up'})
ocube.add_aux_coord(zdims)
ocube=iris.util.new_axis(ocube, zdims)
# now transpose cube to put Z 2nd
ocube.transpose([1,0,2,3])
# make coordinates 64-bit
ocube.coord(axis='x').points=ocube.coord(axis='x').points.astype(dtype='float64')
ocube.coord(axis='y').points=ocube.coord(axis='y').points.astype(dtype='float64')
#ocube.coord(axis='z').points=ocube.coord(axis='z').points.astype(dtype='float64') # integer
ocube.coord(axis='t').points=ocube.coord(axis='t').points.astype(dtype='float64')
# for some reason, longitude_bounds are double, but latitude_bounds are float
ocube.coord('latitude').bounds=ocube.coord('latitude').bounds.astype(dtype='float64')
# add forecast_period & forecast_reference_time
# forecast_reference_time
frt=numpy.array([15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345], dtype='float64')
frt_dims=iris.coords.AuxCoord(frt,standard_name = 'forecast_reference_time',
units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='360_day'))
ocube.add_aux_coord(frt_dims,data_dims=0)
ocube.coord('forecast_reference_time').guess_bounds()
# forecast_period
fp=numpy.array([-360],dtype='float64')
fp_dims=iris.coords.AuxCoord(fp,standard_name = 'forecast_period',
units=cf_units.Unit('hours'),bounds=numpy.array([-720,0],dtype='float64'))
ocube.add_aux_coord(fp_dims,data_dims=None)
# add-in cell_methods
ocube.cell_methods = [iris.coords.CellMethod('mean', 'time')]
# set _FillValue
fillval=1e+20
ocube.data = numpy.ma.array(data=ocube.data, fill_value=fillval, dtype='float32')
# output file name, based on species
outpath='ukca_emiss_'+species_name+'.nc'
# don't want time to be cattable, as is a periodic emissions file
iris.FUTURE.netcdf_no_unlimited=True
# annoying hack to set a missing_value attribute as well as a _FillValue attribute
dict.__setitem__(ocube.attributes, 'missing_value', fillval)
# now write-out to netCDF
saver = iris.fileformats.netcdf.Saver(filename=outpath, netcdf_format='NETCDF3_CLASSIC')
saver.update_global_attributes(Conventions=iris.fileformats.netcdf.CF_CONVENTIONS_VERSION)
saver.write(ocube, local_keys=['vertical_scaling', 'missing_value','um_stash_source','tracer_name','lumped_species'])
# end of script
|
acsis-project/emissions
|
emissions/python/periodic_1960/regrid_C3H8_emissions_n96e_360d_1960.py
|
Python
|
gpl-3.0
| 7,136
|
[
"NetCDF"
] |
39632054268ee6dc0dea91fef40e078d7416f732180456e60d36ef4e317305fd
|
from UTILS import *
from UTILS.BED import maskChr,mask,BED
from VCF import gz,VCF
def loadPiarPop(f,pop,popxp,negate=False):
load=pd.read_pickle
if f[-3:]=='.gz':load=gz.load
try:return load(f.format(pop, popxp))
except:
alpha=(1,-1)[negate]
return load(f.format(popxp, pop))*alpha
class GENOME:
@staticmethod
def after(x, pos=1e7):
if len(x.index.names)==1:
x.index.name='POS'
if pos > 0:
return x[x.index.get_level_values('POS') > pos]
else:
return x[x.index.get_level_values('POS') < abs(pos)]
def __init__(self,assembly=38,dmel=False,faPath='{}storage/Data/Human/ref/'.format(home)):
"conda install -c bioconda pysam "
import pysam
self.assembly=assembly
organism=('hg','dmel')[dmel]
self.name='{}{}'.format(organism,self.assembly)
GENOMEFA = '{}{}.fa'.format(faPath,self.name)
self.g = pysam.Fastafile(GENOMEFA)
def chrom(self, a,CHROM):
return pd.DataFrame(a.groupby(level=0).apply(lambda x: self.base(CHROM,x.name)))#.loc[CHROM]#.rename(self.name)
def base(self,CHROM,POS):
return self.g.fetch('chr{}'.format(CHROM), POS - 1, POS).upper()
def genome(self,a,join=True):
b=a.groupby(level=0).apply(lambda x: self.chrom(a.loc[x.name], x.name))[0].rename(self.name)
if join:
b=GENOME.join(pd.DataFrame(b),a,CHROMS=a.index.get_level_values(0).unique().tolist())
b.index.names = ["CHROM", "POS"]
return b
@staticmethod
def mergeCHROM(a, verbose=False, keys=None):
"""
:param a: list of series each of which is a chromosome
:return:
"""
a = [x for x in a if x is not None]
if not len(a): return None
CHROM = a[0].index[0][0]
if verbose: print(CHROM)
b = pd.concat([pd.concat([dedup(x.loc[CHROM]) for x in a], 1, keys=keys)], keys=[CHROM])
b.index.names = ['CHROM', 'POS']
return b
@staticmethod
def merge(a, CHROMS=range(1, 23), keys=None):
if CHROMS is None: CHROMS = a[0].index.levels[0]
def xs(x, c):
try:
return x.loc[[c]]
except:
pass
a = [GENOME.mergeCHROM([xs(x, c) for x in a], keys=keys) for c in CHROMS]
a = [x for x in a if x is not None]
return pd.concat(a)
@staticmethod
def joinCHROM(a, b, how, verbose=False):
"""
:param a: list of series each of which is a chromosome
:return:
"""
if a is None: return
if a.shape[0] == 0: return
CHROM = a.index[0][0]
return pd.concat([a.loc[CHROM].join(b.loc[CHROM], how=how)], keys=[CHROM])
@staticmethod
def join(a, b, CHROMS=range(1, 23), how='inner'):
if CHROMS is None: CHROMS = a.index.get_level_values(0).unique().tolist()
if how == 'inner': CHROMS = b.index.get_level_values(0).unique().tolist()
def xs(x, c):
try:
return x.loc[[c]]
except:
pass
a = [GENOME.joinCHROM(xs(a, c), xs(b, c), how=how) for c in CHROMS]
a = [x for x in a if x is not None]
a = pd.concat(a)
a.index.names = ['CHROM', 'POS']
return a
@staticmethod
def safeConcat(a, keys=None):
return pd.concat([x for x in a if x is not None], keys=keys)
@staticmethod
def filterGapChr(a, chr, GAP):
b = a.loc[chr]
gap = GAP.loc[chr]
gap['len'] = gap.end - gap.start
return b.drop(pd.concat([maskChr(b, i) for _, i in gap.iterrows()]).index)
@staticmethod
def filterGapChr(a, CHROM, gap):
b = a.loc[CHROM]
return b.drop(pd.concat([maskChr(b, i) for _, i in gap.loc[CHROM].iterrows()]).index)
@staticmethod
def filterGap(a, assempbly=19, pad=200000):
CHROMS = a.index.get_level_values('CHROM').unique()
gap = loadGap(assempbly, pad)
return pd.concat([GENOME.filterGapChr(a, chr, gap) for chr in CHROMS], keys=CHROMS)
class scan:
@staticmethod
def cdf(x):
import pylab as plt
ax=plt.subplots(1,2,figsize=(8,3),dpi=1)[1]
# sns.distplot(x,ax=ax[0])
CDF(x).plot(label='CDF',lw=4,c='k',alpha=0.75, ax=ax[1]);
c='darkblue'
ax[1].axvline(x.quantile(0.5),c=c,alpha=0.5,label='Median={}'.format(x.quantile(0.5)));
ax[1].axvline(x.quantile(0.95),c=c,ls='--',alpha=0.5,label='Q95 ={}'.format(x.quantile(0.95)));
ax[1].axvline(x.quantile(0.99), c=c,ls='-.', alpha=0.5, label='Q99 ={}'.format(x.quantile(0.99)));
ax[1].legend();
@staticmethod
def topK(x, k=2000):
return x.sort_values(ascending=False).iloc[:k]
@staticmethod
def idf(a, winSize=50000, names=None):
if names == None: names = [a.name, 'n']
x=scan.Genome(a.dropna(), f={names[0]: np.mean, names[1]: len}, winSize=winSize)
x.columns=[0,'n']
return x
@staticmethod
def Genome(genome, f=lambda x: x.mean(), uf=None,winSize=50000, step=None, nsteps=5, minSize=None):
"""
Args:
genome: scans genome, a series which CHROM and POS are its indices
windowSize:
step:
f: is a SCALAR function or dict of SCALAR fucntions e.g. f= {'Mean' : np.mean, 'Max' : np.max, 'Custom' : np.min}
Only good for scanning a series with dictionary of scalar fucntions
uf: is a universal function which returns a dataframe e.g. uf=lambda x: pd.DataFrame(np.random.rand(2,3))
good for scanning a dataframe (which each column to be scanned) with a scalar or univesal fucntions
Returns:
"""
if len(genome.shape)>1:
return genome.apply(lambda x: scan.Genome(x,f=f,uf=uf,winSize=winSize,step=step,nsteps=nsteps))
if step is None:step=winSize/nsteps
df = genome.groupby(level='CHROM').apply(lambda ch: scan.Chromosome(ch.loc[ch.name],f,uf,winSize,step))
if minSize is not None:
n=scan.Genome(genome, f=lambda x: x.size, winSize=winSize, step=step, minSize=None)
if f==np.sum:
df=df.loc[TI(n>=minSize)]
else:
df=df[n>=minSize]
return df
@staticmethod
def Chromosome(x,f=np.mean,uf=None,winSize=50000,step=10000):
"""
Args:
chrom: dataframe containing chromosome, positions are index and the index name should be set
windowSize: winsize
step: steps in sliding widnow
f: is a SCALAR function or dict of SCALAR fucntions e.g. f= {'Mean' : np.mean, 'Max' : np.max, 'Custom' : np.min}
uf: is a universal function which returns a dataframe e.g. uf=lambda x: pd.DataFrame(np.random.rand(2,3))
Returns:
"""
# print 'Chromosome',x.name
if x.index[-1] - x.index[0] < winSize:
f=(f,uf)[uf is not None]
i= roundto(((x.index[-1] + x.index[0]) / 2.),10000)+5000
z=pd.DataFrame([f(x)], index=[i])
z.index.name='POS'
return z
POS=x.index.get_level_values('POS')
res=[]
# Bins=np.arange(max(0,roundto(POS.min()-winSize,base=step)), roundto(POS.max(),base=step),winSize)
Bins = np.arange(0, roundto(POS.max(), base=step), winSize)
for i in range(int(winSize/step)):
bins=i*step +Bins
windows=pd.cut( POS, bins,labels=(bins[:-1] + winSize/2).astype(int))
if uf is None:
tmp=x.groupby(windows).agg(f)
tmp.index=tmp.index.astype(int);
tmp.index.name='POS'
else:
tmp=x.groupby(windows).apply(uf)
tmp=tmp.reset_index()
tmp.iloc[:,0]=tmp.iloc[:,0].astype(int)
tmp.columns=['POS']+tmp.columns[1:].tolist()
tmp= tmp.set_index(tmp.columns[:-1].tolist()).iloc[:,0]
res+=[tmp]
df=pd.concat(res).sort_index().dropna()
# if minSize is not None:
# df[df.COUNT < minSize] = None
# df = df.loc[:, df.columns != 'COUNT'].dropna()
return df
@staticmethod
def scanGenomeSNP(genome, f=np.mean, winSize=300,skipFromFirst=0,step=None):
if step is None:step=int(winSize/5)
return genome.groupby(level=0).apply(lambda x: scan.ChromosomeSNP(x.iloc[skipFromFirst:],f,winSize,step))
@staticmethod
def scanChromosomeSNP(x,f,winSize,step):
"""
Args:
chrom: dataframe containing chromosome, positions are index and the index name should be set
windowSize: winsize
step: steps in sliding widnow
f: is a function or dict of fucntions e.g. f= {'Mean' : np.mean, 'Max' : np.max, 'Custom' : np.min}
Returns:
"""
BinsStart=pd.Series(np.arange(0, roundto(x.size,base=step),winSize),name='start')
def createBins(i):
bins=pd.DataFrame(i*step +BinsStart)
bins['end'] = bins.start+ winSize
bins.index=((bins.start+bins.end)/2).astype(int)
return bins
bins=pd.concat(map(createBins,range(int(winSize/step)))).sort_index()
bins[bins>x.size]=None
bins=bins.dropna().astype(int)
bins=bins.apply(lambda bin: f(x.iloc[range(bin.start,bin.end)]),axis=1)
bins.index=x.index[bins.index]
if bins.shape[0]:return bins.loc[x.name]
@staticmethod
def smooth(a, winsize, normalize=True):
if normalize:
f = lambda x: x / x.sum()
else:
f = lambda x: x
return scan.scan3way(f(a), winsize, np.mean)
@staticmethod
def threeWay(a, winsize, f):
return pd.concat([a.rolling(window=winsize).apply(f),
a.rolling(window=winsize, center=True).apply(f),
a.iloc[::-1].rolling(window=winsize).apply(f).iloc[::-1]],
axis=1)
@staticmethod
def scan3way(a, winsize, f):
return scan.threeWay(a, winsize, f).apply(lambda x: np.mean(x), axis=1)
@staticmethod
def scan2wayLeft(a, winsize, f):
"""Moving average with left ellements and centered"""
X = scan.threeWay(a, winsize, f)
x = X[[0, 1]].mean(1)
x[x.isnull] = x[2]
return x
@staticmethod
def scan2wayRight(a, winsize, f):
"""Moving average with left ellements and centered"""
return scan.threeWay(a, winsize, f).iloc[:, 1:].apply(lambda x: np.mean(x), axis=1)
@staticmethod
def plotBestFly(windowStat, X, pad=30000, i=None, mann=True, foldOn=None,rep=None):
# i0 = (x.sum(1) > 0.05) & (x.sum(1) < 6.95)
if rep is None: x=X
else: x=X.xs(rep,1,1)
if i is None:
i = BED.intervali(windowStat.dropna().sort_values().index[-1], pad);
import UTILS.Plots as pplt
pplt.Trajectory.Fly(mask(x, i), subsample=2000, reps=[1, 2, 3], foldOn=foldOn);
# plt.title('Rep {}, {} '.format(rep, utl.BED.strMbp(i)));plt.show()
if mann: pplt.Manhattan(windowStat, top_k=1)
return BED.str(i)
def scanXPSFS(pops=['CEU','CHB'],nProc=8):
from itertools import product
from multiprocessing import Pool
try:
exit()
return loadPiarPop(PATH.scan + 'SFS/{}.{}.df',pops[0], pops[1])
except:
fname = PATH.scan + 'SFS/{}.{}.df'.format(pops[0], pops[1])
CHROMS=range(1,23)
pool = Pool(nProc)
a=pd.concat(pool.map(scanXPSFSChr,product([pops],CHROMS))).sort_index()
pool.terminate()
a.to_pickle(fname)
return a
def scanXPSFSChr(args):
pops, CHROM=args
import UTILS.Estimate as est
df = gz.loadFreqChrom(pops, str(CHROM))
N=pd.concat(map(lambda x: pd.Series({x:len(VCF.ID(x))}),pops))*2
w=N/N.sum()
df=df.join(df.dot(w).rename('all'))
N['all']=N.sum()
N = (1 / df[df > 0].min()).astype(int)
removeFixedSites = False;
winSize = 5e4
f = lambda x: pd.DataFrame(scan.Genome(x[x.name],
uf=lambda X: est.Estimate.getEstimate(X.dropna(), n=N[x.name], bins=20,
removeFixedSites=removeFixedSites,
normalizeTajimaD=False),
winSize=int(winSize)))
a=df.groupby(level=0, axis=1).apply(f).T.reset_index(level=0, drop=True).T
n = df[(df > 0) & (df < 1)].apply(lambda x: scan.Genome(x.dropna(), len))
n['stat'] = 'n'
a = pd.concat([n.set_index('stat', append=True), a]).sort_index()
return a
|
airanmehr/Utils
|
Genome.py
|
Python
|
mit
| 12,815
|
[
"Bioconda",
"pysam"
] |
3859aabe755e12cb76f4798ca10a73614960e7831a77ac83df41bab736986418
|
# Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# _generate_pyx.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
from typing import Dict
docdict: Dict[str, str] = {}
def get(name):
return docdict.get(name)
def add_newdoc(name, doc):
docdict[name] = doc
add_newdoc("_sf_error_test_function",
"""
Private function; do not use.
""")
add_newdoc("_cosine_cdf",
"""
_cosine_cdf(x)
Cumulative distribution function (CDF) of the cosine distribution::
{ 0, x < -pi
cdf(x) = { (pi + x + sin(x))/(2*pi), -pi <= x <= pi
{ 1, x > pi
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
The cosine distribution CDF evaluated at `x`.
""")
add_newdoc("_cosine_invcdf",
"""
_cosine_invcdf(p)
Inverse of the cumulative distribution function (CDF) of the cosine
distribution.
The CDF of the cosine distribution is::
cdf(x) = (pi + x + sin(x))/(2*pi)
This function computes the inverse of cdf(x).
Parameters
----------
p : array_like
`p` must contain real numbers in the interval ``0 <= p <= 1``.
`nan` is returned for values of `p` outside the interval [0, 1].
Returns
-------
float
The inverse of the cosine distribution CDF evaluated at `p`.
""")
add_newdoc("sph_harm",
r"""
sph_harm(m, n, theta, phi)
Compute spherical harmonics.
The spherical harmonics are defined as
.. math::
Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi} \frac{(n-m)!}{(n+m)!}}
e^{i m \theta} P^m_n(\cos(\phi))
where :math:`P_n^m` are the associated Legendre functions; see `lpmv`.
Parameters
----------
m : array_like
Order of the harmonic (int); must have ``|m| <= n``.
n : array_like
Degree of the harmonic (int); must have ``n >= 0``. This is
often denoted by ``l`` (lower case L) in descriptions of
spherical harmonics.
theta : array_like
Azimuthal (longitudinal) coordinate; must be in ``[0, 2*pi]``.
phi : array_like
Polar (colatitudinal) coordinate; must be in ``[0, pi]``.
Returns
-------
y_mn : complex float
The harmonic :math:`Y^m_n` sampled at ``theta`` and ``phi``.
Notes
-----
There are different conventions for the meanings of the input
arguments ``theta`` and ``phi``. In SciPy ``theta`` is the
azimuthal angle and ``phi`` is the polar angle. It is common to
see the opposite convention, that is, ``theta`` as the polar angle
and ``phi`` as the azimuthal angle.
Note that SciPy's spherical harmonics include the Condon-Shortley
phase [2]_ because it is part of `lpmv`.
With SciPy's conventions, the first several spherical harmonics
are
.. math::
Y_0^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{1}{\pi}} \\
Y_1^{-1}(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{-i\theta} \sin(\phi) \\
Y_1^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{\pi}}
\cos(\phi) \\
Y_1^1(\theta, \phi) &= -\frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{i\theta} \sin(\phi).
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30.
https://dlmf.nist.gov/14.30
.. [2] https://en.wikipedia.org/wiki/Spherical_harmonics#Condon.E2.80.93Shortley_phase
""")
add_newdoc("_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("voigt_profile",
r"""
voigt_profile(x, sigma, gamma, out=None)
Voigt profile.
The Voigt profile is a convolution of a 1-D Normal distribution with
standard deviation ``sigma`` and a 1-D Cauchy distribution with half-width at
half-maximum ``gamma``.
If ``sigma = 0``, PDF of Cauchy distribution is returned.
Conversely, if ``gamma = 0``, PDF of Normal distribution is returned.
If ``sigma = gamma = 0``, the return value is ``Inf`` for ``x = 0``, and ``0`` for all other ``x``.
Parameters
----------
x : array_like
Real argument
sigma : array_like
The standard deviation of the Normal distribution part
gamma : array_like
The half-width at half-maximum of the Cauchy distribution part
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
The Voigt profile at the given arguments
Notes
-----
It can be expressed in terms of Faddeeva function
.. math:: V(x; \sigma, \gamma) = \frac{Re[w(z)]}{\sigma\sqrt{2\pi}},
.. math:: z = \frac{x + i\gamma}{\sqrt{2}\sigma}
where :math:`w(z)` is the Faddeeva function.
See Also
--------
wofz : Faddeeva function
References
----------
.. [1] https://en.wikipedia.org/wiki/Voigt_profile
""")
add_newdoc("wrightomega",
r"""
wrightomega(z, out=None)
Wright Omega function.
Defined as the solution to
.. math::
\omega + \log(\omega) = z
where :math:`\log` is the principal branch of the complex logarithm.
Parameters
----------
z : array_like
Points at which to evaluate the Wright Omega function
Returns
-------
omega : ndarray
Values of the Wright Omega function
Notes
-----
.. versionadded:: 0.19.0
The function can also be defined as
.. math::
\omega(z) = W_{K(z)}(e^z)
where :math:`K(z) = \lceil (\Im(z) - \pi)/(2\pi) \rceil` is the
unwinding number and :math:`W` is the Lambert W function.
The implementation here is taken from [1]_.
See Also
--------
lambertw : The Lambert W function
References
----------
.. [1] Lawrence, Corless, and Jeffrey, "Algorithm 917: Complex
Double-Precision Evaluation of the Wright :math:`\omega`
Function." ACM Transactions on Mathematical Software,
2012. :doi:`10.1145/2168773.2168779`.
""")
add_newdoc("agm",
"""
agm(a, b)
Compute the arithmetic-geometric mean of `a` and `b`.
Start with a_0 = a and b_0 = b and iteratively compute::
a_{n+1} = (a_n + b_n)/2
b_{n+1} = sqrt(a_n*b_n)
a_n and b_n converge to the same limit as n increases; their common
limit is agm(a, b).
Parameters
----------
a, b : array_like
Real values only. If the values are both negative, the result
is negative. If one value is negative and the other is positive,
`nan` is returned.
Returns
-------
float
The arithmetic-geometric mean of `a` and `b`.
Examples
--------
>>> from scipy.special import agm
>>> a, b = 24.0, 6.0
>>> agm(a, b)
13.458171481725614
Compare that result to the iteration:
>>> while a != b:
... a, b = (a + b)/2, np.sqrt(a*b)
... print("a = %19.16f b=%19.16f" % (a, b))
...
a = 15.0000000000000000 b=12.0000000000000000
a = 13.5000000000000000 b=13.4164078649987388
a = 13.4582039324993694 b=13.4581390309909850
a = 13.4581714817451772 b=13.4581714817060547
a = 13.4581714817256159 b=13.4581714817256159
When array-like arguments are given, broadcasting applies:
>>> a = np.array([[1.5], [3], [6]]) # a has shape (3, 1).
>>> b = np.array([6, 12, 24, 48]) # b has shape (4,).
>>> agm(a, b)
array([[ 3.36454287, 5.42363427, 9.05798751, 15.53650756],
[ 4.37037309, 6.72908574, 10.84726853, 18.11597502],
[ 6. , 8.74074619, 13.45817148, 21.69453707]])
""")
add_newdoc("airy",
r"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
Ai, Aip, Bi, Bip : ndarrays
Airy functions Ai and Bi, and their derivatives Aip and Bip.
Notes
-----
The Airy functions Ai and Bi are two independent solutions of
.. math:: y''(x) = x y(x).
For real `z` in [-10, 10], the computation is carried out by calling
the Cephes [1]_ `airy` routine, which uses power series summation
for small `z` and rational minimax approximations for large `z`.
Outside this range, the AMOS [2]_ `zairy` and `zbiry` routines are
employed. They are computed using power series for :math:`|z| < 1` and
the following relations to modified Bessel functions for larger `z`
(where :math:`t \equiv 2 z^{3/2}/3`):
.. math::
Ai(z) = \frac{1}{\pi \sqrt{3}} K_{1/3}(t)
Ai'(z) = -\frac{z}{\pi \sqrt{3}} K_{2/3}(t)
Bi(z) = \sqrt{\frac{z}{3}} \left(I_{-1/3}(t) + I_{1/3}(t) \right)
Bi'(z) = \frac{z}{\sqrt{3}} \left(I_{-2/3}(t) + I_{2/3}(t)\right)
See also
--------
airye : exponentially scaled Airy functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
Examples
--------
Compute the Airy functions on the interval [-15, 5].
>>> from scipy import special
>>> x = np.linspace(-15, 5, 201)
>>> ai, aip, bi, bip = special.airy(x)
Plot Ai(x) and Bi(x).
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, ai, 'r', label='Ai(x)')
>>> plt.plot(x, bi, 'b--', label='Bi(x)')
>>> plt.ylim(-0.5, 1.0)
>>> plt.grid()
>>> plt.legend(loc='upper left')
>>> plt.show()
""")
add_newdoc("airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
eBip = Bip * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
eAi, eAip, eBi, eBip : array_like
Exponentially scaled Airy functions eAi and eBi, and their derivatives
eAip and eBip
Notes
-----
Wrapper for the AMOS [1]_ routines `zairy` and `zbiry`.
See also
--------
airy
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
Examples
--------
We can compute exponentially scaled Airy functions and their derivatives:
>>> from scipy.special import airye
>>> import matplotlib.pyplot as plt
>>> z = np.linspace(0, 50, 500)
>>> eAi, eAip, eBi, eBip = airye(z)
>>> f, ax = plt.subplots(2, 1, sharex=True)
>>> for ind, data in enumerate([[eAi, eAip, ["eAi", "eAip"]],
... [eBi, eBip, ["eBi", "eBip"]]]):
... ax[ind].plot(z, data[0], "-r", z, data[1], "-b")
... ax[ind].legend(data[2])
... ax[ind].grid(True)
>>> plt.show()
We can compute these using usual non-scaled Airy functions by:
>>> from scipy.special import airy
>>> Ai, Aip, Bi, Bip = airy(z)
>>> np.allclose(eAi, Ai * np.exp(2.0 / 3.0 * z * np.sqrt(z)))
True
>>> np.allclose(eAip, Aip * np.exp(2.0 / 3.0 * z * np.sqrt(z)))
True
>>> np.allclose(eBi, Bi * np.exp(-abs(np.real(2.0 / 3.0 * z * np.sqrt(z)))))
True
>>> np.allclose(eBip, Bip * np.exp(-abs(np.real(2.0 / 3.0 * z * np.sqrt(z)))))
True
Comparing non-scaled and exponentially scaled ones, the usual non-scaled
function quickly underflows for large values, whereas the exponentially
scaled function does not.
>>> airy(200)
(0.0, 0.0, nan, nan)
>>> airye(200)
(0.07501041684381093, -1.0609012305109042, 0.15003188417418148, 2.1215836725571093)
""")
add_newdoc("bdtr",
r"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through `floor(k)` of the Binomial probability density.
.. math::
\mathrm{bdtr}(k, n, p) = \sum_{j=0}^{\lfloor k \rfloor} {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (double), rounded down to the nearest integer.
n : array_like
Number of events (int).
p : array_like
Probability of success in a single event (float).
Returns
-------
y : ndarray
Probability of `floor(k)` or fewer successes in `n` independent events with
success probabilities of `p`.
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtr}(k, n, p) = I_{1 - p}(n - \lfloor k \rfloor, \lfloor k \rfloor + 1).
Wrapper for the Cephes [1]_ routine `bdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("bdtrc",
r"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms `floor(k) + 1` through `n` of the binomial probability
density,
.. math::
\mathrm{bdtrc}(k, n, p) = \sum_{j=\lfloor k \rfloor +1}^n {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (double), rounded down to nearest integer.
n : array_like
Number of events (int)
p : array_like
Probability of success in a single event.
Returns
-------
y : ndarray
Probability of `floor(k) + 1` or more successes in `n` independent
events with success probabilities of `p`.
See also
--------
bdtr
betainc
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtrc}(k, n, p) = I_{p}(\lfloor k \rfloor + 1, n - \lfloor k \rfloor).
Wrapper for the Cephes [1]_ routine `bdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("bdtri",
r"""
bdtri(k, n, y)
Inverse function to `bdtr` with respect to `p`.
Finds the event probability `p` such that the sum of the terms 0 through
`k` of the binomial probability density is equal to the given cumulative
probability `y`.
Parameters
----------
k : array_like
Number of successes (float), rounded down to the nearest integer.
n : array_like
Number of events (float)
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
Returns
-------
p : ndarray
The event probability such that `bdtr(\lfloor k \rfloor, n, p) = y`.
See also
--------
bdtr
betaincinv
Notes
-----
The computation is carried out using the inverse beta integral function
and the relation,::
1 - p = betaincinv(n - k, k + 1, y).
Wrapper for the Cephes [1]_ routine `bdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("bdtrik",
"""
bdtrik(y, n, p)
Inverse function to `bdtr` with respect to `k`.
Finds the number of successes `k` such that the sum of the terms 0 through
`k` of the Binomial probability density for `n` events with probability
`p` is equal to the given cumulative probability `y`.
Parameters
----------
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
n : array_like
Number of events (float).
p : array_like
Success probability (float).
Returns
-------
k : ndarray
The number of successes `k` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `k` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `k`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("bdtrin",
"""
bdtrin(k, y, p)
Inverse function to `bdtr` with respect to `n`.
Finds the number of events `n` such that the sum of the terms 0 through
`k` of the Binomial probability density for events with probability `p` is
equal to the given cumulative probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
p : array_like
Success probability (float).
Returns
-------
n : ndarray
The number of events `n` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `n` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `n`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("binom",
"""
binom(n, k)
Binomial coefficient
See Also
--------
comb : The number of combinations of N things taken k at a time.
""")
add_newdoc("btdtria",
r"""
btdtria(p, b, x)
Inverse of `btdtr` with respect to `a`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `a`, returning the value of `a` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
p : array_like
Cumulative probability, in [0, 1].
b : array_like
Shape parameter (`b` > 0).
x : array_like
The quantile, in [0, 1].
Returns
-------
a : ndarray
The value of the shape parameter `a` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative distribution function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtrib : Inverse with respect to `b`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("btdtrib",
r"""
btdtria(a, p, x)
Inverse of `btdtr` with respect to `b`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `b`, returning the value of `b` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
p : array_like
Cumulative probability, in [0, 1].
x : array_like
The quantile, in [0, 1].
Returns
-------
b : ndarray
The value of the shape parameter `b` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative distribution function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtria : Inverse with respect to `a`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("bei",
r"""
bei(x, out=None)
Kelvin function bei.
Defined as
.. math::
\mathrm{bei}(x) = \Im[J_0(x e^{3 \pi i / 4})]
where :math:`J_0` is the Bessel function of the first kind of
order zero (see `jv`). See [dlmf]_ for more details.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the Kelvin function.
See Also
--------
ber : the corresponding real part
beip : the derivative of bei
jv : Bessel function of the first kind
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10.61
Examples
--------
It can be expressed using Bessel functions.
>>> import scipy.special as sc
>>> x = np.array([1.0, 2.0, 3.0, 4.0])
>>> sc.jv(0, x * np.exp(3 * np.pi * 1j / 4)).imag
array([0.24956604, 0.97229163, 1.93758679, 2.29269032])
>>> sc.bei(x)
array([0.24956604, 0.97229163, 1.93758679, 2.29269032])
""")
add_newdoc("beip",
r"""
beip(x, out=None)
Derivative of the Kelvin function bei.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
The values of the derivative of bei.
See Also
--------
bei
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10#PT5
""")
add_newdoc("ber",
r"""
ber(x, out=None)
Kelvin function ber.
Defined as
.. math::
\mathrm{ber}(x) = \Re[J_0(x e^{3 \pi i / 4})]
where :math:`J_0` is the Bessel function of the first kind of
order zero (see `jv`). See [dlmf]_ for more details.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the Kelvin function.
See Also
--------
bei : the corresponding real part
berp : the derivative of bei
jv : Bessel function of the first kind
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10.61
Examples
--------
It can be expressed using Bessel functions.
>>> import scipy.special as sc
>>> x = np.array([1.0, 2.0, 3.0, 4.0])
>>> sc.jv(0, x * np.exp(3 * np.pi * 1j / 4)).real
array([ 0.98438178, 0.75173418, -0.22138025, -2.56341656])
>>> sc.ber(x)
array([ 0.98438178, 0.75173418, -0.22138025, -2.56341656])
""")
add_newdoc("berp",
r"""
berp(x, out=None)
Derivative of the Kelvin function ber.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
The values of the derivative of ber.
See Also
--------
ber
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10#PT5
""")
add_newdoc("besselpoly",
r"""
besselpoly(a, lmb, nu, out=None)
Weighted integral of the Bessel function of the first kind.
Computes
.. math::
\int_0^1 x^\lambda J_\nu(2 a x) \, dx
where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
Parameters
----------
a : array_like
Scale factor inside the Bessel function.
lmb : array_like
Power of `x`
nu : array_like
Order of the Bessel function.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Value of the integral.
""")
add_newdoc("beta",
r"""
beta(a, b, out=None)
Beta function.
This function is defined in [1]_ as
.. math::
B(a, b) = \int_0^1 t^{a-1}(1-t)^{b-1}dt
= \frac{\Gamma(a)\Gamma(b)}{\Gamma(a+b)},
where :math:`\Gamma` is the gamma function.
Parameters
----------
a, b : array-like
Real-valued arguments
out : ndarray, optional
Optional output array for the function result
Returns
-------
scalar or ndarray
Value of the beta function
See Also
--------
gamma : the gamma function
betainc : the incomplete beta function
betaln : the natural logarithm of the absolute
value of the beta function
References
----------
.. [1] NIST Digital Library of Mathematical Functions,
Eq. 5.12.1. https://dlmf.nist.gov/5.12
Examples
--------
>>> import scipy.special as sc
The beta function relates to the gamma function by the
definition given above:
>>> sc.beta(2, 3)
0.08333333333333333
>>> sc.gamma(2)*sc.gamma(3)/sc.gamma(2 + 3)
0.08333333333333333
As this relationship demonstrates, the beta function
is symmetric:
>>> sc.beta(1.7, 2.4)
0.16567527689031739
>>> sc.beta(2.4, 1.7)
0.16567527689031739
This function satisfies :math:`B(1, b) = 1/b`:
>>> sc.beta(1, 4)
0.25
""")
add_newdoc("betainc",
r"""
betainc(a, b, x, out=None)
Incomplete beta function.
Computes the incomplete beta function, defined as [1]_:
.. math::
I_x(a, b) = \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)} \int_0^x
t^{a-1}(1-t)^{b-1}dt,
for :math:`0 \leq x \leq 1`.
Parameters
----------
a, b : array-like
Positive, real-valued parameters
x : array-like
Real-valued such that :math:`0 \leq x \leq 1`,
the upper limit of integration
out : ndarray, optional
Optional output array for the function values
Returns
-------
array-like
Value of the incomplete beta function
See Also
--------
beta : beta function
betaincinv : inverse of the incomplete beta function
Notes
-----
The incomplete beta function is also sometimes defined
without the `gamma` terms, in which case the above
definition is the so-called regularized incomplete beta
function. Under this definition, you can get the incomplete
beta function by multiplying the result of the SciPy
function by `beta`.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/8.17
Examples
--------
Let :math:`B(a, b)` be the `beta` function.
>>> import scipy.special as sc
The coefficient in terms of `gamma` is equal to
:math:`1/B(a, b)`. Also, when :math:`x=1`
the integral is equal to :math:`B(a, b)`.
Therefore, :math:`I_{x=1}(a, b) = 1` for any :math:`a, b`.
>>> sc.betainc(0.2, 3.5, 1.0)
1.0
It satisfies
:math:`I_x(a, b) = x^a F(a, 1-b, a+1, x)/ (aB(a, b))`,
where :math:`F` is the hypergeometric function `hyp2f1`:
>>> a, b, x = 1.4, 3.1, 0.5
>>> x**a * sc.hyp2f1(a, 1 - b, a + 1, x)/(a * sc.beta(a, b))
0.8148904036225295
>>> sc.betainc(a, b, x)
0.8148904036225296
This functions satisfies the relationship
:math:`I_x(a, b) = 1 - I_{1-x}(b, a)`:
>>> sc.betainc(2.2, 3.1, 0.4)
0.49339638807619446
>>> 1 - sc.betainc(3.1, 2.2, 1 - 0.4)
0.49339638807619446
""")
add_newdoc("betaincinv",
r"""
betaincinv(a, b, y, out=None)
Inverse of the incomplete beta function.
Computes :math:`x` such that:
.. math::
y = I_x(a, b) = \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)}
\int_0^x t^{a-1}(1-t)^{b-1}dt,
where :math:`I_x` is the normalized incomplete beta
function `betainc` and
:math:`\Gamma` is the `gamma` function [1]_.
Parameters
----------
a, b : array-like
Positive, real-valued parameters
y : array-like
Real-valued input
out : ndarray, optional
Optional output array for function values
Returns
-------
array-like
Value of the inverse of the incomplete beta function
See Also
--------
betainc : incomplete beta function
gamma : gamma function
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/8.17
Examples
--------
>>> import scipy.special as sc
This function is the inverse of `betainc` for fixed
values of :math:`a` and :math:`b`.
>>> a, b = 1.2, 3.1
>>> y = sc.betainc(a, b, 0.2)
>>> sc.betaincinv(a, b, y)
0.2
>>>
>>> a, b = 7.5, 0.4
>>> x = sc.betaincinv(a, b, 0.5)
>>> sc.betainc(a, b, x)
0.5
""")
add_newdoc("betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(a, b)))``.
""")
add_newdoc("boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox1p
>>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("inv_boxcox",
"""
inv_boxcox(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox, inv_boxcox
>>> y = boxcox([1, 4, 10], 2.5)
>>> inv_boxcox(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("inv_boxcox1p",
"""
inv_boxcox1p(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox1p, inv_boxcox1p
>>> y = boxcox1p([1, 4, 10], 2.5)
>>> inv_boxcox1p(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("btdtr",
r"""
btdtr(a, b, x)
Cumulative distribution function of the beta distribution.
Returns the integral from zero to `x` of the beta probability density
function,
.. math::
I = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
Shape parameter (a > 0).
b : array_like
Shape parameter (b > 0).
x : array_like
Upper limit of integration, in [0, 1].
Returns
-------
I : ndarray
Cumulative distribution function of the beta distribution with
parameters `a` and `b` at `x`.
See Also
--------
betainc
Notes
-----
This function is identical to the incomplete beta integral function
`betainc`.
Wrapper for the Cephes [1]_ routine `btdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("btdtri",
r"""
btdtri(a, b, p)
The `p`-th quantile of the beta distribution.
This function is the inverse of the beta cumulative distribution function,
`btdtr`, returning the value of `x` for which `btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
b : array_like
Shape parameter (`b` > 0).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
See Also
--------
betaincinv
btdtr
Notes
-----
The value of `x` is found by interval halving or Newton iterations.
Wrapper for the Cephes [1]_ routine `incbi`, which solves the equivalent
problem of finding the inverse of the incomplete beta integral.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("cbrt",
"""
cbrt(x)
Element-wise cube root of `x`.
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
The cube root of each value in `x`.
Examples
--------
>>> from scipy.special import cbrt
>>> cbrt(8)
2.0
>>> cbrt([-8, -3, 0.125, 1.331])
array([-2. , -1.44224957, 0.5 , 1.1 ])
""")
add_newdoc("chdtr",
r"""
chdtr(v, x, out=None)
Chi square cumulative distribution function.
Returns the area under the left tail (from 0 to `x`) of the Chi
square probability density function with `v` degrees of freedom:
.. math::
\frac{1}{2^{v/2} \Gamma(v/2)} \int_0^x t^{v/2 - 1} e^{-t/2} dt
Here :math:`\Gamma` is the Gamma function; see `gamma`. This
integral can be expressed in terms of the regularized lower
incomplete gamma function `gammainc` as
``gammainc(v / 2, x / 2)``. [1]_
Parameters
----------
v : array_like
Degrees of freedom.
x : array_like
Upper bound of the integral.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the cumulative distribution function.
See Also
--------
chdtrc, chdtri, chdtriv, gammainc
References
----------
.. [1] Chi-Square distribution,
https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
Examples
--------
>>> import scipy.special as sc
It can be expressed in terms of the regularized lower incomplete
gamma function.
>>> v = 1
>>> x = np.arange(4)
>>> sc.chdtr(v, x)
array([0. , 0.68268949, 0.84270079, 0.91673548])
>>> sc.gammainc(v / 2, x / 2)
array([0. , 0.68268949, 0.84270079, 0.91673548])
""")
add_newdoc("chdtrc",
r"""
chdtrc(v, x, out=None)
Chi square survival function.
Returns the area under the right hand tail (from `x` to infinity)
of the Chi square probability density function with `v` degrees of
freedom:
.. math::
\frac{1}{2^{v/2} \Gamma(v/2)} \int_x^\infty t^{v/2 - 1} e^{-t/2} dt
Here :math:`\Gamma` is the Gamma function; see `gamma`. This
integral can be expressed in terms of the regularized upper
incomplete gamma function `gammaincc` as
``gammaincc(v / 2, x / 2)``. [1]_
Parameters
----------
v : array_like
Degrees of freedom.
x : array_like
Lower bound of the integral.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the survival function.
See Also
--------
chdtr, chdtri, chdtriv, gammaincc
References
----------
.. [1] Chi-Square distribution,
https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
Examples
--------
>>> import scipy.special as sc
It can be expressed in terms of the regularized upper incomplete
gamma function.
>>> v = 1
>>> x = np.arange(4)
>>> sc.chdtrc(v, x)
array([1. , 0.31731051, 0.15729921, 0.08326452])
>>> sc.gammaincc(v / 2, x / 2)
array([1. , 0.31731051, 0.15729921, 0.08326452])
""")
add_newdoc("chdtri",
"""
chdtri(v, p, out=None)
Inverse to `chdtrc` with respect to `x`.
Returns `x` such that ``chdtrc(v, x) == p``.
Parameters
----------
v : array_like
Degrees of freedom.
p : array_like
Probability.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
x : scalar or ndarray
Value so that the probability a Chi square random variable
with `v` degrees of freedom is greater than `x` equals `p`.
See Also
--------
chdtrc, chdtr, chdtriv
References
----------
.. [1] Chi-Square distribution,
https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
Examples
--------
>>> import scipy.special as sc
It inverts `chdtrc`.
>>> v, p = 1, 0.3
>>> sc.chdtrc(v, sc.chdtri(v, p))
0.3
>>> x = 1
>>> sc.chdtri(v, sc.chdtrc(v, x))
1.0
""")
add_newdoc("chdtriv",
"""
chdtriv(p, x, out=None)
Inverse to `chdtr` with respect to `v`.
Returns `v` such that ``chdtr(v, x) == p``.
Parameters
----------
p : array_like
Probability that the Chi square random variable is less than
or equal to `x`.
x : array_like
Nonnegative input.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Degrees of freedom.
See Also
--------
chdtr, chdtrc, chdtri
References
----------
.. [1] Chi-Square distribution,
https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
Examples
--------
>>> import scipy.special as sc
It inverts `chdtr`.
>>> p, x = 0.5, 1
>>> sc.chdtr(sc.chdtriv(p, x), x)
0.5000000000202172
>>> v = 1
>>> sc.chdtriv(sc.chdtr(v, x), v)
1.0000000000000013
""")
add_newdoc("chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("chndtrix",
"""
chndtrix(p, df, nc)
Inverse to `chndtr` vs `x`
""")
add_newdoc("chndtridf",
"""
chndtridf(x, p, nc)
Inverse to `chndtr` vs `df`
""")
add_newdoc("chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to `chndtr` vs `nc`
""")
add_newdoc("cosdg",
"""
cosdg(x, out=None)
Cosine of the angle `x` given in degrees.
Parameters
----------
x : array_like
Angle, given in degrees.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Cosine of the input.
See Also
--------
sindg, tandg, cotdg
Examples
--------
>>> import scipy.special as sc
It is more accurate than using cosine directly.
>>> x = 90 + 180 * np.arange(3)
>>> sc.cosdg(x)
array([-0., 0., -0.])
>>> np.cos(x * np.pi / 180)
array([ 6.1232340e-17, -1.8369702e-16, 3.0616170e-16])
""")
add_newdoc("cosm1",
"""
cosm1(x, out=None)
cos(x) - 1 for use when `x` is near zero.
Parameters
----------
x : array_like
Real valued argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of ``cos(x) - 1``.
See Also
--------
expm1, log1p
Examples
--------
>>> import scipy.special as sc
It is more accurate than computing ``cos(x) - 1`` directly for
``x`` around 0.
>>> x = 1e-30
>>> np.cos(x) - 1
0.0
>>> sc.cosm1(x)
-5.0000000000000005e-61
""")
add_newdoc("cotdg",
"""
cotdg(x, out=None)
Cotangent of the angle `x` given in degrees.
Parameters
----------
x : array_like
Angle, given in degrees.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Cotangent at the input.
See Also
--------
sindg, cosdg, tandg
Examples
--------
>>> import scipy.special as sc
It is more accurate than using cotangent directly.
>>> x = 90 + 180 * np.arange(3)
>>> sc.cotdg(x)
array([0., 0., 0.])
>>> 1 / np.tan(x * np.pi / 180)
array([6.1232340e-17, 1.8369702e-16, 3.0616170e-16])
""")
add_newdoc("dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2), t=0..x).
See Also
--------
wofz, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-15, 15, num=1000)
>>> plt.plot(x, special.dawsn(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$dawsn(x)$')
>>> plt.show()
""")
add_newdoc("ellipe",
r"""
ellipe(m)
Complete elliptic integral of the second kind
This function is defined as
.. math:: E(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{1/2} dt
Parameters
----------
m : array_like
Defines the parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpe`.
For `m > 0` the computation uses the approximation,
.. math:: E(m) \approx P(1-m) - (1-m) \log(1-m) Q(1-m),
where :math:`P` and :math:`Q` are tenth-order polynomials. For
`m < 0`, the relation
.. math:: E(m) = E(m/(m - 1)) \sqrt(1-m)
is used.
The parameterization in terms of :math:`m` follows that of section
17.2 in [2]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
The Legendre E integral is related to Carlson's symmetric R_D or R_G
functions in multiple ways [3]_. For example,
.. math:: E(m) = 2 R_G(0, 1-k^2, 1) .
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipeinc : Incomplete elliptic integral of the second kind
elliprd : Symmetric elliptic integral of the second kind.
elliprg : Completely-symmetric elliptic integral of the second kind.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [3] NIST Digital Library of Mathematical
Functions. http://dlmf.nist.gov/, Release 1.0.28 of
2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i
Examples
--------
This function is used in finding the circumference of an
ellipse with semi-major axis `a` and semi-minor axis `b`.
>>> from scipy import special
>>> a = 3.5
>>> b = 2.1
>>> e_sq = 1.0 - b**2/a**2 # eccentricity squared
Then the circumference is found using the following:
>>> C = 4*a*special.ellipe(e_sq) # circumference formula
>>> C
17.868899204378693
When `a` and `b` are the same (meaning eccentricity is 0),
this reduces to the circumference of a circle.
>>> 4*a*special.ellipe(0.0) # formula for ellipse with a = b
21.991148575128552
>>> 2*np.pi*a # formula for circle of radius a
21.991148575128552
""")
add_newdoc("ellipeinc",
r"""
ellipeinc(phi, m)
Incomplete elliptic integral of the second kind
This function is defined as
.. math:: E(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral.
m : array_like
parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellie`.
Computation uses arithmetic-geometric means algorithm.
The parameterization in terms of :math:`m` follows that of section
17.2 in [2]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
The Legendre E incomplete integral can be related to combinations
of Carlson's symmetric integrals R_D, R_F, and R_G in multiple
ways [3]_. For example, with :math:`c = \csc^2\phi`,
.. math::
E(\phi, m) = R_F(c-1, c-k^2, c)
- \frac{1}{3} k^2 R_D(c-1, c-k^2, c) .
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
elliprd : Symmetric elliptic integral of the second kind.
elliprf : Completely-symmetric elliptic integral of the first kind.
elliprg : Completely-symmetric elliptic integral of the second kind.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [3] NIST Digital Library of Mathematical
Functions. http://dlmf.nist.gov/, Release 1.0.28 of
2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i
""")
add_newdoc("ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter `m` between
0 and 1, and real argument `u`.
Parameters
----------
m : array_like
Parameter.
u : array_like
Argument.
Returns
-------
sn, cn, dn, ph : ndarrays
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value `ph` is such that if `u = ellipkinc(ph, m)`,
then `sn(u|m) = sin(ph)` and `cn(u|m) = cos(ph)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpj`.
These functions are periodic, with quarter-period on the real axis
equal to the complete elliptic integral `ellipk(m)`.
Relation to incomplete elliptic integral: If `u = ellipkinc(phi,m)`, then
`sn(u|m) = sin(phi)`, and `cn(u|m) = cos(phi)`. The `phi` is called
the amplitude of `u`.
Computation is by means of the arithmetic-geometric mean algorithm,
except when `m` is within 1e-9 of 0 or 1. In the latter case with `m`
close to 1, the approximation applies only for `phi < pi/2`.
See also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("ellipkm1",
"""
ellipkm1(p)
Complete elliptic integral of the first kind around `m` = 1
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as `m = 1 - p`.
Returns
-------
K : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpk`.
For `p <= 1`, computation uses the approximation,
.. math:: K(p) \\approx P(p) - \\log(p) Q(p),
where :math:`P` and :math:`Q` are tenth-order polynomials. The
argument `p` is used internally rather than `m` so that the logarithmic
singularity at `m = 1` will be shifted to the origin; this preserves
maximum accuracy. For `p > 1`, the identity
.. math:: K(p) = K(1/p)/\\sqrt(p)
is used.
See Also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
elliprf : Completely-symmetric elliptic integral of the first kind.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("ellipk",
r"""
ellipk(m)
Complete elliptic integral of the first kind.
This function is defined as
.. math:: K(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{-1/2} dt
Parameters
----------
m : array_like
The parameter of the elliptic integral.
Returns
-------
K : array_like
Value of the elliptic integral.
Notes
-----
For more precision around point m = 1, use `ellipkm1`, which this
function calls.
The parameterization in terms of :math:`m` follows that of section
17.2 in [1]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
The Legendre K integral is related to Carlson's symmetric R_F
function by [2]_:
.. math:: K(m) = R_F(0, 1-k^2, 1) .
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind around m = 1
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
elliprf : Completely-symmetric elliptic integral of the first kind.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] NIST Digital Library of Mathematical
Functions. http://dlmf.nist.gov/, Release 1.0.28 of
2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i
""")
add_newdoc("ellipkinc",
r"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
This function is defined as
.. math:: K(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{-1/2} dt
This function is also called :math:`F(\phi, m)`.
Parameters
----------
phi : array_like
amplitude of the elliptic integral
m : array_like
parameter of the elliptic integral
Returns
-------
K : ndarray
Value of the elliptic integral
Notes
-----
Wrapper for the Cephes [1]_ routine `ellik`. The computation is
carried out using the arithmetic-geometric mean algorithm.
The parameterization in terms of :math:`m` follows that of section
17.2 in [2]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
The Legendre K incomplete integral (or F integral) is related to
Carlson's symmetric R_F function [3]_.
Setting :math:`c = \csc^2\phi`,
.. math:: F(\phi, m) = R_F(c-1, c-k^2, c) .
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
elliprf : Completely-symmetric elliptic integral of the first kind.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [3] NIST Digital Library of Mathematical
Functions. http://dlmf.nist.gov/, Release 1.0.28 of
2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i
""")
add_newdoc(
"elliprc",
r"""
elliprc(x, y)
Degenerate symmetric elliptic integral.
The function RC is defined as [1]_
.. math::
R_{\mathrm{C}}(x, y) =
\frac{1}{2} \int_0^{+\infty} (t + x)^{-1/2} (t + y)^{-1} dt
= R_{\mathrm{F}}(x, y, y)
Parameters
----------
x, y : array_like
Real or complex input parameters. `x` can be any number in the
complex plane cut along the negative real axis. `y` must be non-zero.
Returns
-------
R : ndarray
Value of the integral. If `y` is real and negative, the Cauchy
principal value is returned. If both of `x` and `y` are real, the
return value is real. Otherwise, the return value is complex.
Notes
-----
RC is a degenerate case of the symmetric integral RF: ``elliprc(x, y) ==
elliprf(x, y, y)``. It is an elementary function rather than an elliptic
integral.
The code implements Carlson's algorithm based on the duplication theorems
and series expansion up to the 7th order. [2]_
.. versionadded:: 1.8.0
See Also
--------
elliprf : Completely-symmetric elliptic integral of the first kind.
elliprd : Symmetric elliptic integral of the second kind.
elliprg : Completely-symmetric elliptic integral of the second kind.
elliprj : Symmetric elliptic integral of the third kind.
References
----------
.. [1] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical
Functions," NIST, US Dept. of Commerce.
https://dlmf.nist.gov/19.16.E6
.. [2] B. C. Carlson, "Numerical computation of real or complex elliptic
integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.
https://arxiv.org/abs/math/9409227
https://doi.org/10.1007/BF02198293
""")
add_newdoc(
"elliprd",
r"""
elliprd(x, y, z)
Symmetric elliptic integral of the second kind.
The function RD is defined as [1]_
.. math::
R_{\mathrm{D}}(x, y, z) =
\frac{3}{2} \int_0^{+\infty} [(t + x) (t + y)]^{-1/2} (t + z)^{-3/2}
dt
Parameters
----------
x, y, z : array_like
Real or complex input parameters. `x` or `y` can be any number in the
complex plane cut along the negative real axis, but at most one of them
can be zero, while `z` must be non-zero.
Returns
-------
R : ndarray
Value of the integral. If all of `x`, `y`, and `z` are real, the
return value is real. Otherwise, the return value is complex.
Notes
-----
RD is a degenerate case of the elliptic integral RJ: ``elliprd(x, y, z) ==
elliprj(x, y, z, z)``.
The code implements Carlson's algorithm based on the duplication theorems
and series expansion up to the 7th order. [2]_
.. versionadded:: 1.8.0
See Also
--------
elliprc : Degenerate symmetric elliptic integral.
elliprf : Completely-symmetric elliptic integral of the first kind.
elliprg : Completely-symmetric elliptic integral of the second kind.
elliprj : Symmetric elliptic integral of the third kind.
References
----------
.. [1] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical
Functions," NIST, US Dept. of Commerce.
https://dlmf.nist.gov/19.16.E5
.. [2] B. C. Carlson, "Numerical computation of real or complex elliptic
integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.
https://arxiv.org/abs/math/9409227
https://doi.org/10.1007/BF02198293
""")
add_newdoc(
"elliprf",
r"""
elliprf(x, y, z)
Completely-symmetric elliptic integral of the first kind.
The function RF is defined as [1]_
.. math::
R_{\mathrm{F}}(x, y, z) =
\frac{1}{2} \int_0^{+\infty} [(t + x) (t + y) (t + z)]^{-1/2} dt
Parameters
----------
x, y, z : array_like
Real or complex input parameters. `x`, `y`, or `z` can be any number in
the complex plane cut along the negative real axis, but at most one of
them can be zero.
Returns
-------
R : ndarray
Value of the integral. If all of `x`, `y`, and `z` are real, the return
value is real. Otherwise, the return value is complex.
Notes
-----
The code implements Carlson's algorithm based on the duplication theorems
and series expansion up to the 7th order (cf.:
https://dlmf.nist.gov/19.36.i) and the AGM algorithm for the complete
integral. [2]_
.. versionadded:: 1.8.0
See Also
--------
elliprc : Degenerate symmetric integral.
elliprd : Symmetric elliptic integral of the second kind.
elliprg : Completely-symmetric elliptic integral of the second kind.
elliprj : Symmetric elliptic integral of the third kind.
References
----------
.. [1] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical
Functions," NIST, US Dept. of Commerce.
https://dlmf.nist.gov/19.16.E1
.. [2] B. C. Carlson, "Numerical computation of real or complex elliptic
integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.
https://arxiv.org/abs/math/9409227
https://doi.org/10.1007/BF02198293
""")
add_newdoc(
"elliprg",
r"""
elliprg(x, y, z)
Completely-symmetric elliptic integral of the second kind.
The function RG is defined as [1]_
.. math::
R_{\mathrm{G}}(x, y, z) =
\frac{1}{4} \int_0^{+\infty} [(t + x) (t + y) (t + z)]^{-1/2}
\left(\frac{x}{t + x} + \frac{y}{t + y} + \frac{z}{t + z}\right) t
dt
Parameters
----------
x, y, z : array_like
Real or complex input parameters. `x`, `y`, or `z` can be any number in
the complex plane cut along the negative real axis.
Returns
-------
R : ndarray
Value of the integral. If all of `x`, `y`, and `z` are real, the return
value is real. Otherwise, the return value is complex.
Notes
-----
The implementation uses the relation [1]_
.. math::
2 R_{\mathrm{G}}(x, y, z) =
z R_{\mathrm{F}}(x, y, z) -
\frac{1}{3} (x - z) (y - z) R_{\mathrm{D}}(x, y, z) +
\sqrt{\frac{x y}{z}}
and the symmetry of `x`, `y`, `z` when at least one non-zero parameter can
be chosen as the pivot. When one of the arguments is close to zero, the AGM
method is applied instead. Other special cases are computed following Ref.
[2]_
.. versionadded:: 1.8.0
See Also
--------
elliprc : Degenerate symmetric integral.
elliprd : Symmetric elliptic integral of the second kind.
elliprf : Completely-symmetric elliptic integral of the first kind.
elliprj : Symmetric elliptic integral of the third kind.
References
----------
.. [1] B. C. Carlson, "Numerical computation of real or complex elliptic
integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.
https://arxiv.org/abs/math/9409227
https://doi.org/10.1007/BF02198293
.. [2] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical
Functions," NIST, US Dept. of Commerce.
https://dlmf.nist.gov/19.16.E1
https://dlmf.nist.gov/19.20.ii
Examples
--------
The surface area of a triaxial ellipsoid with semiaxes ``a``, ``b``, and
``c`` is given by
.. math::
S = 4 \pi a b c R_{\mathrm{G}}(1 / a^2, 1 / b^2, 1 / c^2).
>>> from scipy.special import elliprg
>>> def ellipsoid_area(a, b, c):
... r = 4.0 * np.pi * a * b * c
... return r * elliprg(1.0 / (a * a), 1.0 / (b * b), 1.0 / (c * c))
>>> print(ellipsoid_area(1, 3, 5))
108.62688289491807
""")
add_newdoc(
"elliprj",
r"""
elliprj(x, y, z, p)
Symmetric elliptic integral of the third kind.
The function RJ is defined as [1]_
.. math::
R_{\mathrm{J}}(x, y, z, p) =
\frac{3}{2} \int_0^{+\infty} [(t + x) (t + y) (t + z)]^{-1/2}
(t + p)^{-1} dt
.. warning::
This function should be considered experimental when the inputs are
unbalanced. Check correctness with another independent implementation.
Parameters
----------
x, y, z, p : array_like
Real or complex input parameters. `x`, `y`, or `z` are numbers in
the complex plane cut along the negative real axis (subject to further
constraints, see Notes), and at most one of them can be zero. `p` must
be non-zero.
Returns
-------
R : ndarray
Value of the integral. If all of `x`, `y`, `z`, and `p` are real, the
return value is real. Otherwise, the return value is complex.
If `p` is real and negative, while `x`, `y`, and `z` are real,
non-negative, and at most one of them is zero, the Cauchy principal
value is returned. [1]_ [2]_
Notes
-----
The code implements Carlson's algorithm based on the duplication theorems
and series expansion up to the 7th order. [3]_ The algorithm is slightly
different from its earlier incarnation as it appears in [1]_, in that the
call to `elliprc` (or ``atan``/``atanh``, see [4]_) is no longer needed in
the inner loop. Asymptotic approximations are used where arguments differ
widely in the order of magnitude. [5]_
The input values are subject to certain sufficient but not necessary
constaints when input arguments are complex. Notably, ``x``, ``y``, and
``z`` must have non-negative real parts, unless two of them are
non-negative and complex-conjugates to each other while the other is a real
non-negative number. [1]_ If the inputs do not satisfy the sufficient
condition described in Ref. [1]_ they are rejected outright with the output
set to NaN.
In the case where one of ``x``, ``y``, and ``z`` is equal to ``p``, the
function ``elliprd`` should be preferred because of its less restrictive
domain.
.. versionadded:: 1.8.0
See Also
--------
elliprc : Degenerate symmetric integral.
elliprd : Symmetric elliptic integral of the second kind.
elliprf : Completely-symmetric elliptic integral of the first kind.
elliprg : Completely-symmetric elliptic integral of the second kind.
References
----------
.. [1] B. C. Carlson, "Numerical computation of real or complex elliptic
integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.
https://arxiv.org/abs/math/9409227
https://doi.org/10.1007/BF02198293
.. [2] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical
Functions," NIST, US Dept. of Commerce.
https://dlmf.nist.gov/19.20.iii
.. [3] B. C. Carlson, J. FitzSimmons, "Reduction Theorems for Elliptic
Integrands with the Square Root of Two Quadratic Factors," J.
Comput. Appl. Math., vol. 118, nos. 1-2, pp. 71-85, 2000.
https://doi.org/10.1016/S0377-0427(00)00282-X
.. [4] F. Johansson, "Numerical Evaluation of Elliptic Functions, Elliptic
Integrals and Modular Forms," in J. Blumlein, C. Schneider, P.
Paule, eds., "Elliptic Integrals, Elliptic Functions and Modular
Forms in Quantum Field Theory," pp. 269-293, 2019 (Cham,
Switzerland: Springer Nature Switzerland)
https://arxiv.org/abs/1806.06725
https://doi.org/10.1007/978-3-030-04480-0
.. [5] B. C. Carlson, J. L. Gustafson, "Asymptotic Approximations for
Symmetric Elliptic Integrals," SIAM J. Math. Anls., vol. 25, no. 2,
pp. 288-303, 1994.
https://arxiv.org/abs/math/9310223
https://doi.org/10.1137/S0036141092228477
""")
add_newdoc("entr",
r"""
entr(x)
Elementwise function for computing entropy.
.. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The value of the elementwise entropy function at the given points `x`.
See Also
--------
kl_div, rel_entr
Notes
-----
This function is concave.
.. versionadded:: 0.15.0
""")
add_newdoc("erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points `x`.
See Also
--------
erfc, erfinv, erfcinv, wofz, erfcx, erfi
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erf(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erf(x)$')
>>> plt.show()
""")
add_newdoc("erfc",
"""
erfc(x, out=None)
Complementary error function, ``1 - erf(x)``.
Parameters
----------
x : array_like
Real or complex valued argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the complementary error function
See Also
--------
erf, erfi, erfcx, dawsn, wofz
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfc(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfc(x)$')
>>> plt.show()
""")
add_newdoc("erfi",
"""
erfi(z, out=None)
Imaginary error function, ``-i erf(i z)``.
Parameters
----------
z : array_like
Real or complex valued argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the imaginary error function
See Also
--------
erf, erfc, erfcx, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfi(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfi(x)$')
>>> plt.show()
""")
add_newdoc("erfcx",
"""
erfcx(x, out=None)
Scaled complementary error function, ``exp(x**2) * erfc(x)``.
Parameters
----------
x : array_like
Real or complex valued argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the scaled complementary error function
See Also
--------
erf, erfc, erfi, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfcx(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfcx(x)$')
>>> plt.show()
""")
add_newdoc("erfinv",
"""Inverse of the error function.
Computes the inverse of the error function.
In the complex domain, there is no unique complex number w satisfying
erf(w)=z. This indicates a true inverse function would have multi-value.
When the domain restricts to the real, -1 < x < 1, there is a unique real
number satisfying erf(erfinv(x)) = x.
Parameters
----------
y : ndarray
Argument at which to evaluate. Domain: [-1, 1]
Returns
-------
erfinv : ndarray
The inverse of erf of y, element-wise)
See Also
--------
erf : Error function of a complex argument
erfc : Complementary error function, ``1 - erf(x)``
erfcinv : Inverse of the complementary error function
Examples
--------
1) evaluating a float number
>>> from scipy import special
>>> special.erfinv(0.5)
0.4769362762044698
2) evaluating an ndarray
>>> from scipy import special
>>> y = np.linspace(-1.0, 1.0, num=10)
>>> special.erfinv(y)
array([ -inf, -0.86312307, -0.5407314 , -0.30457019, -0.0987901 ,
0.0987901 , 0.30457019, 0.5407314 , 0.86312307, inf])
""")
add_newdoc("erfcinv",
"""Inverse of the complementary error function.
Computes the inverse of the complementary error function.
In the complex domain, there is no unique complex number w satisfying
erfc(w)=z. This indicates a true inverse function would have multi-value.
When the domain restricts to the real, 0 < x < 2, there is a unique real
number satisfying erfc(erfcinv(x)) = erfcinv(erfc(x)).
It is related to inverse of the error function by erfcinv(1-x) = erfinv(x)
Parameters
----------
y : ndarray
Argument at which to evaluate. Domain: [0, 2]
Returns
-------
erfcinv : ndarray
The inverse of erfc of y, element-wise
See Also
--------
erf : Error function of a complex argument
erfc : Complementary error function, ``1 - erf(x)``
erfinv : Inverse of the error function
Examples
--------
1) evaluating a float number
>>> from scipy import special
>>> special.erfcinv(0.5)
0.4769362762044698
2) evaluating an ndarray
>>> from scipy import special
>>> y = np.linspace(0.0, 2.0, num=11)
>>> special.erfcinv(y)
array([ inf, 0.9061938 , 0.59511608, 0.37080716, 0.17914345,
-0. , -0.17914345, -0.37080716, -0.59511608, -0.9061938 ,
-inf])
""")
add_newdoc("eval_jacobi",
r"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
The Jacobi polynomials can be defined via the Gauss hypergeometric
function :math:`{}_2F_1` as
.. math::
P_n^{(\alpha, \beta)}(x) = \frac{(\alpha + 1)_n}{\Gamma(n + 1)}
{}_2F_1(-n, 1 + \alpha + \beta + n; \alpha + 1; (1 - z)/2)
where :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When
:math:`n` is an integer the result is a polynomial of degree
:math:`n`. See 22.5.42 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the Gauss hypergeometric
function.
alpha : array_like
Parameter
beta : array_like
Parameter
x : array_like
Points at which to evaluate the polynomial
Returns
-------
P : ndarray
Values of the Jacobi polynomial
See Also
--------
roots_jacobi : roots and quadrature weights of Jacobi polynomials
jacobi : Jacobi polynomial object
hyp2f1 : Gauss hypergeometric function
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_sh_jacobi",
r"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
Defined by
.. math::
G_n^{(p, q)}(x)
= \binom{2n + p - 1}{n}^{-1} P_n^{(p - q, q - 1)}(2x - 1),
where :math:`P_n^{(\cdot, \cdot)}` is the n-th Jacobi
polynomial. See 22.5.2 in [AS]_ for details.
Parameters
----------
n : int
Degree of the polynomial. If not an integer, the result is
determined via the relation to `binom` and `eval_jacobi`.
p : float
Parameter
q : float
Parameter
Returns
-------
G : ndarray
Values of the shifted Jacobi polynomial.
See Also
--------
roots_sh_jacobi : roots and quadrature weights of shifted Jacobi
polynomials
sh_jacobi : shifted Jacobi polynomial object
eval_jacobi : evaluate Jacobi polynomials
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_gegenbauer",
r"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
The Gegenbauer polynomials can be defined via the Gauss
hypergeometric function :math:`{}_2F_1` as
.. math::
C_n^{(\alpha)} = \frac{(2\alpha)_n}{\Gamma(n + 1)}
{}_2F_1(-n, 2\alpha + n; \alpha + 1/2; (1 - z)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. See 22.5.46 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
alpha : array_like
Parameter
x : array_like
Points at which to evaluate the Gegenbauer polynomial
Returns
-------
C : ndarray
Values of the Gegenbauer polynomial
See Also
--------
roots_gegenbauer : roots and quadrature weights of Gegenbauer
polynomials
gegenbauer : Gegenbauer polynomial object
hyp2f1 : Gauss hypergeometric function
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_chebyt",
r"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev polynomial of the first kind at a point.
The Chebyshev polynomials of the first kind can be defined via the
Gauss hypergeometric function :math:`{}_2F_1` as
.. math::
T_n(x) = {}_2F_1(n, -n; 1/2; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. See 22.5.47 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
T : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyt : roots and quadrature weights of Chebyshev
polynomials of the first kind
chebyu : Chebychev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
hyp2f1 : Gauss hypergeometric function
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
Notes
-----
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_chebyu",
r"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev polynomial of the second kind at a point.
The Chebyshev polynomials of the second kind can be defined via
the Gauss hypergeometric function :math:`{}_2F_1` as
.. math::
U_n(x) = (n + 1) {}_2F_1(-n, n + 2; 3/2; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. See 22.5.48 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
U : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyu : roots and quadrature weights of Chebyshev
polynomials of the second kind
chebyu : Chebyshev polynomial object
eval_chebyt : evaluate Chebyshev polynomials of the first kind
hyp2f1 : Gauss hypergeometric function
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_chebys",
r"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a
point.
These polynomials are defined as
.. math::
S_n(x) = U_n(x/2)
where :math:`U_n` is a Chebyshev polynomial of the second
kind. See 22.5.13 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyu`.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
S : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebys : roots and quadrature weights of Chebyshev
polynomials of the second kind on [-2, 2]
chebys : Chebyshev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> import scipy.special as sc
They are a scaled version of the Chebyshev polynomials of the
second kind.
>>> x = np.linspace(-2, 2, 6)
>>> sc.eval_chebys(3, x)
array([-4. , 0.672, 0.736, -0.736, -0.672, 4. ])
>>> sc.eval_chebyu(3, x / 2)
array([-4. , 0.672, 0.736, -0.736, -0.672, 4. ])
""")
add_newdoc("eval_chebyc",
r"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a
point.
These polynomials are defined as
.. math::
C_n(x) = 2 T_n(x/2)
where :math:`T_n` is a Chebyshev polynomial of the first kind. See
22.5.11 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyt`.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
C : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyc : roots and quadrature weights of Chebyshev
polynomials of the first kind on [-2, 2]
chebyc : Chebyshev polynomial object
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
eval_chebyt : evaluate Chebycshev polynomials of the first kind
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> import scipy.special as sc
They are a scaled version of the Chebyshev polynomials of the
first kind.
>>> x = np.linspace(-2, 2, 6)
>>> sc.eval_chebyc(3, x)
array([-2. , 1.872, 1.136, -1.136, -1.872, 2. ])
>>> 2 * sc.eval_chebyt(3, x / 2)
array([-2. , 1.872, 1.136, -1.136, -1.872, 2. ])
""")
add_newdoc("eval_sh_chebyt",
r"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev polynomial of the first kind at a
point.
These polynomials are defined as
.. math::
T_n^*(x) = T_n(2x - 1)
where :math:`T_n` is a Chebyshev polynomial of the first kind. See
22.5.14 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyt`.
x : array_like
Points at which to evaluate the shifted Chebyshev polynomial
Returns
-------
T : ndarray
Values of the shifted Chebyshev polynomial
See Also
--------
roots_sh_chebyt : roots and quadrature weights of shifted
Chebyshev polynomials of the first kind
sh_chebyt : shifted Chebyshev polynomial object
eval_chebyt : evaluate Chebyshev polynomials of the first kind
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_sh_chebyu",
r"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev polynomial of the second kind at a
point.
These polynomials are defined as
.. math::
U_n^*(x) = U_n(2x - 1)
where :math:`U_n` is a Chebyshev polynomial of the first kind. See
22.5.15 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyu`.
x : array_like
Points at which to evaluate the shifted Chebyshev polynomial
Returns
-------
U : ndarray
Values of the shifted Chebyshev polynomial
See Also
--------
roots_sh_chebyu : roots and quadrature weights of shifted
Chebychev polynomials of the second kind
sh_chebyu : shifted Chebyshev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_legendre",
r"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
The Legendre polynomials can be defined via the Gauss
hypergeometric function :math:`{}_2F_1` as
.. math::
P_n(x) = {}_2F_1(-n, n + 1; 1; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. See 22.5.49 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Legendre polynomial
Returns
-------
P : ndarray
Values of the Legendre polynomial
See Also
--------
roots_legendre : roots and quadrature weights of Legendre
polynomials
legendre : Legendre polynomial object
hyp2f1 : Gauss hypergeometric function
numpy.polynomial.legendre.Legendre : Legendre series
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> from scipy.special import eval_legendre
Evaluate the zero-order Legendre polynomial at x = 0
>>> eval_legendre(0, 0)
1.0
Evaluate the first-order Legendre polynomial between -1 and 1
>>> X = np.linspace(-1, 1, 5) # Domain of Legendre polynomials
>>> eval_legendre(1, X)
array([-1. , -0.5, 0. , 0.5, 1. ])
Evaluate Legendre polynomials of order 0 through 4 at x = 0
>>> N = range(0, 5)
>>> eval_legendre(N, 0)
array([ 1. , 0. , -0.5 , 0. , 0.375])
Plot Legendre polynomials of order 0 through 4
>>> X = np.linspace(-1, 1)
>>> import matplotlib.pyplot as plt
>>> for n in range(0, 5):
... y = eval_legendre(n, X)
... plt.plot(X, y, label=r'$P_{}(x)$'.format(n))
>>> plt.title("Legendre Polynomials")
>>> plt.xlabel("x")
>>> plt.ylabel(r'$P_n(x)$')
>>> plt.legend(loc='lower right')
>>> plt.show()
""")
add_newdoc("eval_sh_legendre",
r"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
These polynomials are defined as
.. math::
P_n^*(x) = P_n(2x - 1)
where :math:`P_n` is a Legendre polynomial. See 2.2.11 in [AS]_
for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the value is
determined via the relation to `eval_legendre`.
x : array_like
Points at which to evaluate the shifted Legendre polynomial
Returns
-------
P : ndarray
Values of the shifted Legendre polynomial
See Also
--------
roots_sh_legendre : roots and quadrature weights of shifted
Legendre polynomials
sh_legendre : shifted Legendre polynomial object
eval_legendre : evaluate Legendre polynomials
numpy.polynomial.legendre.Legendre : Legendre series
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_genlaguerre",
r"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
The generalized Laguerre polynomials can be defined via the
confluent hypergeometric function :math:`{}_1F_1` as
.. math::
L_n^{(\alpha)}(x) = \binom{n + \alpha}{n}
{}_1F_1(-n, \alpha + 1, x).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. See 22.5.54 in [AS]_ for details. The Laguerre
polynomials are the special case where :math:`\alpha = 0`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the confluent hypergeometric
function.
alpha : array_like
Parameter; must have ``alpha > -1``
x : array_like
Points at which to evaluate the generalized Laguerre
polynomial
Returns
-------
L : ndarray
Values of the generalized Laguerre polynomial
See Also
--------
roots_genlaguerre : roots and quadrature weights of generalized
Laguerre polynomials
genlaguerre : generalized Laguerre polynomial object
hyp1f1 : confluent hypergeometric function
eval_laguerre : evaluate Laguerre polynomials
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_laguerre",
r"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
The Laguerre polynomials can be defined via the confluent
hypergeometric function :math:`{}_1F_1` as
.. math::
L_n(x) = {}_1F_1(-n, 1, x).
See 22.5.16 and 22.5.54 in [AS]_ for details. When :math:`n` is an
integer the result is a polynomial of degree :math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the confluent hypergeometric
function.
x : array_like
Points at which to evaluate the Laguerre polynomial
Returns
-------
L : ndarray
Values of the Laguerre polynomial
See Also
--------
roots_laguerre : roots and quadrature weights of Laguerre
polynomials
laguerre : Laguerre polynomial object
numpy.polynomial.laguerre.Laguerre : Laguerre series
eval_genlaguerre : evaluate generalized Laguerre polynomials
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_hermite",
r"""
eval_hermite(n, x, out=None)
Evaluate physicist's Hermite polynomial at a point.
Defined by
.. math::
H_n(x) = (-1)^n e^{x^2} \frac{d^n}{dx^n} e^{-x^2};
:math:`H_n` is a polynomial of degree :math:`n`. See 22.11.7 in
[AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial
x : array_like
Points at which to evaluate the Hermite polynomial
Returns
-------
H : ndarray
Values of the Hermite polynomial
See Also
--------
roots_hermite : roots and quadrature weights of physicist's
Hermite polynomials
hermite : physicist's Hermite polynomial object
numpy.polynomial.hermite.Hermite : Physicist's Hermite series
eval_hermitenorm : evaluate Probabilist's Hermite polynomials
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_hermitenorm",
r"""
eval_hermitenorm(n, x, out=None)
Evaluate probabilist's (normalized) Hermite polynomial at a
point.
Defined by
.. math::
He_n(x) = (-1)^n e^{x^2/2} \frac{d^n}{dx^n} e^{-x^2/2};
:math:`He_n` is a polynomial of degree :math:`n`. See 22.11.8 in
[AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial
x : array_like
Points at which to evaluate the Hermite polynomial
Returns
-------
He : ndarray
Values of the Hermite polynomial
See Also
--------
roots_hermitenorm : roots and quadrature weights of probabilist's
Hermite polynomials
hermitenorm : probabilist's Hermite polynomial object
numpy.polynomial.hermite_e.HermiteE : Probabilist's Hermite series
eval_hermite : evaluate physicist's Hermite polynomials
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("exp1",
r"""
exp1(z, out=None)
Exponential integral E1.
For complex :math:`z \ne 0` the exponential integral can be defined as
[1]_
.. math::
E_1(z) = \int_z^\infty \frac{e^{-t}}{t} dt,
where the path of the integral does not cross the negative real
axis or pass through the origin.
Parameters
----------
z: array_like
Real or complex argument.
out: ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the exponential integral E1
See Also
--------
expi : exponential integral :math:`Ei`
expn : generalization of :math:`E_1`
Notes
-----
For :math:`x > 0` it is related to the exponential integral
:math:`Ei` (see `expi`) via the relation
.. math::
E_1(x) = -Ei(-x).
References
----------
.. [1] Digital Library of Mathematical Functions, 6.2.1
https://dlmf.nist.gov/6.2#E1
Examples
--------
>>> import scipy.special as sc
It has a pole at 0.
>>> sc.exp1(0)
inf
It has a branch cut on the negative real axis.
>>> sc.exp1(-1)
nan
>>> sc.exp1(complex(-1, 0))
(-1.8951178163559368-3.141592653589793j)
>>> sc.exp1(complex(-1, -0.0))
(-1.8951178163559368+3.141592653589793j)
It approaches 0 along the positive real axis.
>>> sc.exp1([1, 10, 100, 1000])
array([2.19383934e-01, 4.15696893e-06, 3.68359776e-46, 0.00000000e+00])
It is related to `expi`.
>>> x = np.array([1, 2, 3, 4])
>>> sc.exp1(x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
>>> -sc.expi(-x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
""")
add_newdoc("exp10",
"""
exp10(x)
Compute ``10**x`` element-wise.
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
``10**x``, computed element-wise.
Examples
--------
>>> from scipy.special import exp10
>>> exp10(3)
1000.0
>>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]])
>>> exp10(x)
array([[ 0.1 , 0.31622777, 1. ],
[ 3.16227766, 10. , 31.6227766 ]])
""")
add_newdoc("exp2",
"""
exp2(x)
Compute ``2**x`` element-wise.
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
``2**x``, computed element-wise.
Examples
--------
>>> from scipy.special import exp2
>>> exp2(3)
8.0
>>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]])
>>> exp2(x)
array([[ 0.5 , 0.70710678, 1. ],
[ 1.41421356, 2. , 2.82842712]])
""")
add_newdoc("expi",
r"""
expi(x, out=None)
Exponential integral Ei.
For real :math:`x`, the exponential integral is defined as [1]_
.. math::
Ei(x) = \int_{-\infty}^x \frac{e^t}{t} dt.
For :math:`x > 0` the integral is understood as a Cauchy principal
value.
It is extended to the complex plane by analytic continuation of
the function on the interval :math:`(0, \infty)`. The complex
variant has a branch cut on the negative real axis.
Parameters
----------
x: array_like
Real or complex valued argument
out: ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the exponential integral
Notes
-----
The exponential integrals :math:`E_1` and :math:`Ei` satisfy the
relation
.. math::
E_1(x) = -Ei(-x)
for :math:`x > 0`.
See Also
--------
exp1 : Exponential integral :math:`E_1`
expn : Generalized exponential integral :math:`E_n`
References
----------
.. [1] Digital Library of Mathematical Functions, 6.2.5
https://dlmf.nist.gov/6.2#E5
Examples
--------
>>> import scipy.special as sc
It is related to `exp1`.
>>> x = np.array([1, 2, 3, 4])
>>> -sc.expi(-x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
>>> sc.exp1(x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
The complex variant has a branch cut on the negative real axis.
>>> import scipy.special as sc
>>> sc.expi(-1 + 1e-12j)
(-0.21938393439552062+3.1415926535894254j)
>>> sc.expi(-1 - 1e-12j)
(-0.21938393439552062-3.1415926535894254j)
As the complex variant approaches the branch cut, the real parts
approach the value of the real variant.
>>> sc.expi(-1)
-0.21938393439552062
The SciPy implementation returns the real variant for complex
values on the branch cut.
>>> sc.expi(complex(-1, 0.0))
(-0.21938393439552062-0j)
>>> sc.expi(complex(-1, -0.0))
(-0.21938393439552062-0j)
""")
add_newdoc('expit',
"""
expit(x)
Expit (a.k.a. logistic sigmoid) ufunc for ndarrays.
The expit function, also known as the logistic sigmoid function, is
defined as ``expit(x) = 1/(1+exp(-x))``. It is the inverse of the
logit function.
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are `expit` of the corresponding entry of x.
See Also
--------
logit
Notes
-----
As a ufunc expit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
Examples
--------
>>> from scipy.special import expit, logit
>>> expit([-np.inf, -1.5, 0, 1.5, np.inf])
array([ 0. , 0.18242552, 0.5 , 0.81757448, 1. ])
`logit` is the inverse of `expit`:
>>> logit(expit([-2.5, 0, 3.1, 5.0]))
array([-2.5, 0. , 3.1, 5. ])
Plot expit(x) for x in [-6, 6]:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-6, 6, 121)
>>> y = expit(x)
>>> plt.plot(x, y)
>>> plt.grid()
>>> plt.xlim(-6, 6)
>>> plt.xlabel('x')
>>> plt.title('expit(x)')
>>> plt.show()
""")
add_newdoc("expm1",
"""
expm1(x)
Compute ``exp(x) - 1``.
When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation
of ``exp(x) - 1`` can suffer from catastrophic loss of precision.
``expm1(x)`` is implemented to avoid the loss of precision that occurs when
`x` is near zero.
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
``exp(x) - 1`` computed element-wise.
Examples
--------
>>> from scipy.special import expm1
>>> expm1(1.0)
1.7182818284590451
>>> expm1([-0.2, -0.1, 0, 0.1, 0.2])
array([-0.18126925, -0.09516258, 0. , 0.10517092, 0.22140276])
The exact value of ``exp(7.5e-13) - 1`` is::
7.5000000000028125000000007031250000001318...*10**-13.
Here is what ``expm1(7.5e-13)`` gives:
>>> expm1(7.5e-13)
7.5000000000028135e-13
Compare that to ``exp(7.5e-13) - 1``, where the subtraction results in
a "catastrophic" loss of precision:
>>> np.exp(7.5e-13) - 1
7.5006667543675576e-13
""")
add_newdoc("expn",
r"""
expn(n, x, out=None)
Generalized exponential integral En.
For integer :math:`n \geq 0` and real :math:`x \geq 0` the
generalized exponential integral is defined as [dlmf]_
.. math::
E_n(x) = x^{n - 1} \int_x^\infty \frac{e^{-t}}{t^n} dt.
Parameters
----------
n: array_like
Non-negative integers
x: array_like
Real argument
out: ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the generalized exponential integral
See Also
--------
exp1 : special case of :math:`E_n` for :math:`n = 1`
expi : related to :math:`E_n` when :math:`n = 1`
References
----------
.. [dlmf] Digital Library of Mathematical Functions, 8.19.2
https://dlmf.nist.gov/8.19#E2
Examples
--------
>>> import scipy.special as sc
Its domain is nonnegative n and x.
>>> sc.expn(-1, 1.0), sc.expn(1, -1.0)
(nan, nan)
It has a pole at ``x = 0`` for ``n = 1, 2``; for larger ``n`` it
is equal to ``1 / (n - 1)``.
>>> sc.expn([0, 1, 2, 3, 4], 0)
array([ inf, inf, 1. , 0.5 , 0.33333333])
For n equal to 0 it reduces to ``exp(-x) / x``.
>>> x = np.array([1, 2, 3, 4])
>>> sc.expn(0, x)
array([0.36787944, 0.06766764, 0.01659569, 0.00457891])
>>> np.exp(-x) / x
array([0.36787944, 0.06766764, 0.01659569, 0.00457891])
For n equal to 1 it reduces to `exp1`.
>>> sc.expn(1, x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
>>> sc.exp1(x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
""")
add_newdoc("exprel",
r"""
exprel(x)
Relative error exponential, ``(exp(x) - 1)/x``.
When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation
of ``exp(x) - 1`` can suffer from catastrophic loss of precision.
``exprel(x)`` is implemented to avoid the loss of precision that occurs when
`x` is near zero.
Parameters
----------
x : ndarray
Input array. `x` must contain real numbers.
Returns
-------
float
``(exp(x) - 1)/x``, computed element-wise.
See Also
--------
expm1
Notes
-----
.. versionadded:: 0.17.0
Examples
--------
>>> from scipy.special import exprel
>>> exprel(0.01)
1.0050167084168056
>>> exprel([-0.25, -0.1, 0, 0.1, 0.25])
array([ 0.88479687, 0.95162582, 1. , 1.05170918, 1.13610167])
Compare ``exprel(5e-9)`` to the naive calculation. The exact value
is ``1.00000000250000000416...``.
>>> exprel(5e-9)
1.0000000025
>>> (np.exp(5e-9) - 1)/5e-9
0.99999999392252903
""")
add_newdoc("fdtr",
r"""
fdtr(dfn, dfd, x)
F cumulative distribution function.
Returns the value of the cumulative distribution function of the
F-distribution, also known as Snedecor's F-distribution or the
Fisher-Snedecor distribution.
The F-distribution with parameters :math:`d_n` and :math:`d_d` is the
distribution of the random variable,
.. math::
X = \frac{U_n/d_n}{U_d/d_d},
where :math:`U_n` and :math:`U_d` are random variables distributed
:math:`\chi^2`, with :math:`d_n` and :math:`d_d` degrees of freedom,
respectively.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The CDF of the F-distribution with parameters `dfn` and `dfd` at `x`.
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{xd_n/(d_d + xd_n)}(d_n/2, d_d/2).
Wrapper for the Cephes [1]_ routine `fdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("fdtrc",
r"""
fdtrc(dfn, dfd, x)
F survival function.
Returns the complemented F-distribution function (the integral of the
density from `x` to infinity).
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The complemented F-distribution function with parameters `dfn` and
`dfd` at `x`.
See also
--------
fdtr
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{d_d/(d_d + xd_n)}(d_d/2, d_n/2).
Wrapper for the Cephes [1]_ routine `fdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("fdtri",
r"""
fdtri(dfn, dfd, p)
The `p`-th quantile of the F-distribution.
This function is the inverse of the F-distribution CDF, `fdtr`, returning
the `x` such that `fdtr(dfn, dfd, x) = p`.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
Notes
-----
The computation is carried out using the relation to the inverse
regularized beta function, :math:`I^{-1}_x(a, b)`. Let
:math:`z = I^{-1}_p(d_d/2, d_n/2).` Then,
.. math::
x = \frac{d_d (1 - z)}{d_n z}.
If `p` is such that :math:`x < 0.5`, the following relation is used
instead for improved stability: let
:math:`z' = I^{-1}_{1 - p}(d_n/2, d_d/2).` Then,
.. math::
x = \frac{d_d z'}{d_n (1 - z')}.
Wrapper for the Cephes [1]_ routine `fdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to `fdtr` vs dfd
Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to `fdtr` vs dfn
finds the F density argument dfn such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("fresnel",
r"""
fresnel(z, out=None)
Fresnel integrals.
The Fresnel integrals are defined as
.. math::
S(z) &= \int_0^z \sin(\pi t^2 /2) dt \\
C(z) &= \int_0^z \cos(\pi t^2 /2) dt.
See [dlmf]_ for details.
Parameters
----------
z : array_like
Real or complex valued argument
out : 2-tuple of ndarrays, optional
Optional output arrays for the function results
Returns
-------
S, C : 2-tuple of scalar or ndarray
Values of the Fresnel integrals
See Also
--------
fresnel_zeros : zeros of the Fresnel integrals
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/7.2#iii
Examples
--------
>>> import scipy.special as sc
As z goes to infinity along the real axis, S and C converge to 0.5.
>>> S, C = sc.fresnel([0.1, 1, 10, 100, np.inf])
>>> S
array([0.00052359, 0.43825915, 0.46816998, 0.4968169 , 0.5 ])
>>> C
array([0.09999753, 0.7798934 , 0.49989869, 0.4999999 , 0.5 ])
They are related to the error function `erf`.
>>> z = np.array([1, 2, 3, 4])
>>> zeta = 0.5 * np.sqrt(np.pi) * (1 - 1j) * z
>>> S, C = sc.fresnel(z)
>>> C + 1j*S
array([0.7798934 +0.43825915j, 0.48825341+0.34341568j,
0.60572079+0.496313j , 0.49842603+0.42051575j])
>>> 0.5 * (1 + 1j) * sc.erf(zeta)
array([0.7798934 +0.43825915j, 0.48825341+0.34341568j,
0.60572079+0.496313j , 0.49842603+0.42051575j])
""")
add_newdoc("gamma",
r"""
gamma(z)
gamma function.
The gamma function is defined as
.. math::
\Gamma(z) = \int_0^\infty t^{z-1} e^{-t} dt
for :math:`\Re(z) > 0` and is extended to the rest of the complex
plane by analytic continuation. See [dlmf]_ for more details.
Parameters
----------
z : array_like
Real or complex valued argument
Returns
-------
scalar or ndarray
Values of the gamma function
Notes
-----
The gamma function is often referred to as the generalized
factorial since :math:`\Gamma(n + 1) = n!` for natural numbers
:math:`n`. More generally it satisfies the recurrence relation
:math:`\Gamma(z + 1) = z \cdot \Gamma(z)` for complex :math:`z`,
which, combined with the fact that :math:`\Gamma(1) = 1`, implies
the above identity for :math:`z = n`.
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/5.2#E1
Examples
--------
>>> from scipy.special import gamma, factorial
>>> gamma([0, 0.5, 1, 5])
array([ inf, 1.77245385, 1. , 24. ])
>>> z = 2.5 + 1j
>>> gamma(z)
(0.77476210455108352+0.70763120437959293j)
>>> gamma(z+1), z*gamma(z) # Recurrence property
((1.2292740569981171+2.5438401155000685j),
(1.2292740569981158+2.5438401155000658j))
>>> gamma(0.5)**2 # gamma(0.5) = sqrt(pi)
3.1415926535897927
Plot gamma(x) for real x
>>> x = np.linspace(-3.5, 5.5, 2251)
>>> y = gamma(x)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'b', alpha=0.6, label='gamma(x)')
>>> k = np.arange(1, 7)
>>> plt.plot(k, factorial(k-1), 'k*', alpha=0.6,
... label='(x-1)!, x = 1, 2, ...')
>>> plt.xlim(-3.5, 5.5)
>>> plt.ylim(-10, 25)
>>> plt.grid()
>>> plt.xlabel('x')
>>> plt.legend(loc='lower right')
>>> plt.show()
""")
add_newdoc("gammainc",
r"""
gammainc(a, x)
Regularized lower incomplete gamma function.
It is defined as
.. math::
P(a, x) = \frac{1}{\Gamma(a)} \int_0^x t^{a - 1}e^{-t} dt
for :math:`a > 0` and :math:`x \geq 0`. See [dlmf]_ for details.
Parameters
----------
a : array_like
Positive parameter
x : array_like
Nonnegative argument
Returns
-------
scalar or ndarray
Values of the lower incomplete gamma function
Notes
-----
The function satisfies the relation ``gammainc(a, x) +
gammaincc(a, x) = 1`` where `gammaincc` is the regularized upper
incomplete gamma function.
The implementation largely follows that of [boost]_.
See also
--------
gammaincc : regularized upper incomplete gamma function
gammaincinv : inverse of the regularized lower incomplete gamma function
gammainccinv : inverse of the regularized upper incomplete gamma function
References
----------
.. [dlmf] NIST Digital Library of Mathematical functions
https://dlmf.nist.gov/8.2#E4
.. [boost] Maddock et. al., "Incomplete Gamma Functions",
https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
Examples
--------
>>> import scipy.special as sc
It is the CDF of the gamma distribution, so it starts at 0 and
monotonically increases to 1.
>>> sc.gammainc(0.5, [0, 1, 10, 100])
array([0. , 0.84270079, 0.99999226, 1. ])
It is equal to one minus the upper incomplete gamma function.
>>> a, x = 0.5, 0.4
>>> sc.gammainc(a, x)
0.6289066304773024
>>> 1 - sc.gammaincc(a, x)
0.6289066304773024
""")
add_newdoc("gammaincc",
r"""
gammaincc(a, x)
Regularized upper incomplete gamma function.
It is defined as
.. math::
Q(a, x) = \frac{1}{\Gamma(a)} \int_x^\infty t^{a - 1}e^{-t} dt
for :math:`a > 0` and :math:`x \geq 0`. See [dlmf]_ for details.
Parameters
----------
a : array_like
Positive parameter
x : array_like
Nonnegative argument
Returns
-------
scalar or ndarray
Values of the upper incomplete gamma function
Notes
-----
The function satisfies the relation ``gammainc(a, x) +
gammaincc(a, x) = 1`` where `gammainc` is the regularized lower
incomplete gamma function.
The implementation largely follows that of [boost]_.
See also
--------
gammainc : regularized lower incomplete gamma function
gammaincinv : inverse of the regularized lower incomplete gamma function
gammainccinv : inverse of the regularized upper incomplete gamma function
References
----------
.. [dlmf] NIST Digital Library of Mathematical functions
https://dlmf.nist.gov/8.2#E4
.. [boost] Maddock et. al., "Incomplete Gamma Functions",
https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
Examples
--------
>>> import scipy.special as sc
It is the survival function of the gamma distribution, so it
starts at 1 and monotonically decreases to 0.
>>> sc.gammaincc(0.5, [0, 1, 10, 100, 1000])
array([1.00000000e+00, 1.57299207e-01, 7.74421643e-06, 2.08848758e-45,
0.00000000e+00])
It is equal to one minus the lower incomplete gamma function.
>>> a, x = 0.5, 0.4
>>> sc.gammaincc(a, x)
0.37109336952269756
>>> 1 - sc.gammainc(a, x)
0.37109336952269756
""")
add_newdoc("gammainccinv",
"""
gammainccinv(a, y)
Inverse of the regularized upper incomplete gamma function.
Given an input :math:`y` between 0 and 1, returns :math:`x` such
that :math:`y = Q(a, x)`. Here :math:`Q` is the regularized upper
incomplete gamma function; see `gammaincc`. This is well-defined
because the upper incomplete gamma function is monotonic as can
be seen from its definition in [dlmf]_.
Parameters
----------
a : array_like
Positive parameter
y : array_like
Argument between 0 and 1, inclusive
Returns
-------
scalar or ndarray
Values of the inverse of the upper incomplete gamma function
See Also
--------
gammaincc : regularized upper incomplete gamma function
gammainc : regularized lower incomplete gamma function
gammaincinv : inverse of the regularized lower incomplete gamma function
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/8.2#E4
Examples
--------
>>> import scipy.special as sc
It starts at infinity and monotonically decreases to 0.
>>> sc.gammainccinv(0.5, [0, 0.1, 0.5, 1])
array([ inf, 1.35277173, 0.22746821, 0. ])
It inverts the upper incomplete gamma function.
>>> a, x = 0.5, [0, 0.1, 0.5, 1]
>>> sc.gammaincc(a, sc.gammainccinv(a, x))
array([0. , 0.1, 0.5, 1. ])
>>> a, x = 0.5, [0, 10, 50]
>>> sc.gammainccinv(a, sc.gammaincc(a, x))
array([ 0., 10., 50.])
""")
add_newdoc("gammaincinv",
"""
gammaincinv(a, y)
Inverse to the regularized lower incomplete gamma function.
Given an input :math:`y` between 0 and 1, returns :math:`x` such
that :math:`y = P(a, x)`. Here :math:`P` is the regularized lower
incomplete gamma function; see `gammainc`. This is well-defined
because the lower incomplete gamma function is monotonic as can be
seen from its definition in [dlmf]_.
Parameters
----------
a : array_like
Positive parameter
y : array_like
Parameter between 0 and 1, inclusive
Returns
-------
scalar or ndarray
Values of the inverse of the lower incomplete gamma function
See Also
--------
gammainc : regularized lower incomplete gamma function
gammaincc : regularized upper incomplete gamma function
gammainccinv : inverse of the regularized upper incomplete gamma function
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/8.2#E4
Examples
--------
>>> import scipy.special as sc
It starts at 0 and monotonically increases to infinity.
>>> sc.gammaincinv(0.5, [0, 0.1 ,0.5, 1])
array([0. , 0.00789539, 0.22746821, inf])
It inverts the lower incomplete gamma function.
>>> a, x = 0.5, [0, 0.1, 0.5, 1]
>>> sc.gammainc(a, sc.gammaincinv(a, x))
array([0. , 0.1, 0.5, 1. ])
>>> a, x = 0.5, [0, 10, 25]
>>> sc.gammaincinv(a, sc.gammainc(a, x))
array([ 0. , 10. , 25.00001465])
""")
add_newdoc("gammaln",
r"""
gammaln(x, out=None)
Logarithm of the absolute value of the gamma function.
Defined as
.. math::
\ln(\lvert\Gamma(x)\rvert)
where :math:`\Gamma` is the gamma function. For more details on
the gamma function, see [dlmf]_.
Parameters
----------
x : array_like
Real argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the log of the absolute value of gamma
See Also
--------
gammasgn : sign of the gamma function
loggamma : principal branch of the logarithm of the gamma function
Notes
-----
It is the same function as the Python standard library function
:func:`math.lgamma`.
When used in conjunction with `gammasgn`, this function is useful
for working in logspace on the real axis without having to deal
with complex numbers via the relation ``exp(gammaln(x)) =
gammasgn(x) * gamma(x)``.
For complex-valued log-gamma, use `loggamma` instead of `gammaln`.
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/5
Examples
--------
>>> import scipy.special as sc
It has two positive zeros.
>>> sc.gammaln([1, 2])
array([0., 0.])
It has poles at nonpositive integers.
>>> sc.gammaln([0, -1, -2, -3, -4])
array([inf, inf, inf, inf, inf])
It asymptotically approaches ``x * log(x)`` (Stirling's formula).
>>> x = np.array([1e10, 1e20, 1e40, 1e80])
>>> sc.gammaln(x)
array([2.20258509e+11, 4.50517019e+21, 9.11034037e+41, 1.83206807e+82])
>>> x * np.log(x)
array([2.30258509e+11, 4.60517019e+21, 9.21034037e+41, 1.84206807e+82])
""")
add_newdoc("gammasgn",
r"""
gammasgn(x)
Sign of the gamma function.
It is defined as
.. math::
\text{gammasgn}(x) =
\begin{cases}
+1 & \Gamma(x) > 0 \\
-1 & \Gamma(x) < 0
\end{cases}
where :math:`\Gamma` is the gamma function; see `gamma`. This
definition is complete since the gamma function is never zero;
see the discussion after [dlmf]_.
Parameters
----------
x : array_like
Real argument
Returns
-------
scalar or ndarray
Sign of the gamma function
Notes
-----
The gamma function can be computed as ``gammasgn(x) *
np.exp(gammaln(x))``.
See Also
--------
gamma : the gamma function
gammaln : log of the absolute value of the gamma function
loggamma : analytic continuation of the log of the gamma function
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/5.2#E1
Examples
--------
>>> import scipy.special as sc
It is 1 for `x > 0`.
>>> sc.gammasgn([1, 2, 3, 4])
array([1., 1., 1., 1.])
It alternates between -1 and 1 for negative integers.
>>> sc.gammasgn([-0.5, -1.5, -2.5, -3.5])
array([-1., 1., -1., 1.])
It can be used to compute the gamma function.
>>> x = [1.5, 0.5, -0.5, -1.5]
>>> sc.gammasgn(x) * np.exp(sc.gammaln(x))
array([ 0.88622693, 1.77245385, -3.5449077 , 2.3632718 ])
>>> sc.gamma(x)
array([ 0.88622693, 1.77245385, -3.5449077 , 2.3632718 ])
""")
add_newdoc("gdtr",
r"""
gdtr(a, b, x)
Gamma distribution cumulative distribution function.
Returns the integral from zero to `x` of the gamma probability density
function,
.. math::
F = \int_0^x \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (upper limit of integration; float).
See also
--------
gdtrc : 1 - CDF of the gamma distribution.
Returns
-------
F : ndarray
The CDF of the gamma distribution with parameters `a` and `b`
evaluated at `x`.
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("gdtrc",
r"""
gdtrc(a, b, x)
Gamma distribution survival function.
Integral from `x` to infinity of the gamma probability density function,
.. math::
F = \int_x^\infty \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (lower limit of integration; float).
Returns
-------
F : ndarray
The survival function of the gamma distribution with parameters `a`
and `b` evaluated at `x`.
See Also
--------
gdtr, gdtrix
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of `gdtr` vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtria
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of `gdtr` vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrib
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of `gdtr` vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the pth quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `x` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `x`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrix
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("hankel1",
r"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the first kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
See also
--------
hankel1e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("hankel1e",
r"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v, z) = hankel1(v, z) * exp(-1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("hankel2",
r"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\imath \pi v/2) K_v(z \exp(\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
See also
--------
hankel2e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("hankel2e",
r"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel2e(v, z) = hankel2(v, z) * exp(1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\frac{\imath \pi v}{2}) K_v(z exp(\frac{\imath\pi}{2}))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("huber",
r"""
huber(delta, r)
Huber loss function.
.. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
Parameters
----------
delta : ndarray
Input array, indicating the quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Huber loss function values.
Notes
-----
This function is convex in r.
.. versionadded:: 0.15.0
""")
add_newdoc("hyp0f1",
r"""
hyp0f1(v, z, out=None)
Confluent hypergeometric limit function 0F1.
Parameters
----------
v : array_like
Real-valued parameter
z : array_like
Real- or complex-valued argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
The confluent hypergeometric limit function
Notes
-----
This function is defined as:
.. math:: _0F_1(v, z) = \sum_{k=0}^{\infty}\frac{z^k}{(v)_k k!}.
It's also the limit as :math:`q \to \infty` of :math:`_1F_1(q; v; z/q)`,
and satisfies the differential equation :math:`f''(z) + vf'(z) =
f(z)`. See [1]_ for more information.
References
----------
.. [1] Wolfram MathWorld, "Confluent Hypergeometric Limit Function",
http://mathworld.wolfram.com/ConfluentHypergeometricLimitFunction.html
Examples
--------
>>> import scipy.special as sc
It is one when `z` is zero.
>>> sc.hyp0f1(1, 0)
1.0
It is the limit of the confluent hypergeometric function as `q`
goes to infinity.
>>> q = np.array([1, 10, 100, 1000])
>>> v = 1
>>> z = 1
>>> sc.hyp1f1(q, v, z / q)
array([2.71828183, 2.31481985, 2.28303778, 2.27992985])
>>> sc.hyp0f1(v, z)
2.2795853023360673
It is related to Bessel functions.
>>> n = 1
>>> x = np.linspace(0, 1, 5)
>>> sc.jv(n, x)
array([0. , 0.12402598, 0.24226846, 0.3492436 , 0.44005059])
>>> (0.5 * x)**n / sc.factorial(n) * sc.hyp0f1(n + 1, -0.25 * x**2)
array([0. , 0.12402598, 0.24226846, 0.3492436 , 0.44005059])
""")
add_newdoc("hyp1f1",
r"""
hyp1f1(a, b, x, out=None)
Confluent hypergeometric function 1F1.
The confluent hypergeometric function is defined by the series
.. math::
{}_1F_1(a; b; x) = \sum_{k = 0}^\infty \frac{(a)_k}{(b)_k k!} x^k.
See [dlmf]_ for more details. Here :math:`(\cdot)_k` is the
Pochhammer symbol; see `poch`.
Parameters
----------
a, b : array_like
Real parameters
x : array_like
Real or complex argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the confluent hypergeometric function
See also
--------
hyperu : another confluent hypergeometric function
hyp0f1 : confluent hypergeometric limit function
hyp2f1 : Gaussian hypergeometric function
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/13.2#E2
Examples
--------
>>> import scipy.special as sc
It is one when `x` is zero:
>>> sc.hyp1f1(0.5, 0.5, 0)
1.0
It is singular when `b` is a nonpositive integer.
>>> sc.hyp1f1(0.5, -1, 0)
inf
It is a polynomial when `a` is a nonpositive integer.
>>> a, b, x = -1, 0.5, np.array([1.0, 2.0, 3.0, 4.0])
>>> sc.hyp1f1(a, b, x)
array([-1., -3., -5., -7.])
>>> 1 + (a / b) * x
array([-1., -3., -5., -7.])
It reduces to the exponential function when `a = b`.
>>> sc.hyp1f1(2, 2, [1, 2, 3, 4])
array([ 2.71828183, 7.3890561 , 20.08553692, 54.59815003])
>>> np.exp([1, 2, 3, 4])
array([ 2.71828183, 7.3890561 , 20.08553692, 54.59815003])
""")
add_newdoc("hyp2f1",
r"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z)
Parameters
----------
a, b, c : array_like
Arguments, should be real-valued.
z : array_like
Argument, real or complex.
Returns
-------
hyp2f1 : scalar or ndarray
The values of the gaussian hypergeometric function.
See also
--------
hyp0f1 : confluent hypergeometric limit function.
hyp1f1 : Kummer's (confluent hypergeometric) function.
Notes
-----
This function is defined for :math:`|z| < 1` as
.. math::
\mathrm{hyp2f1}(a, b, c, z) = \sum_{n=0}^\infty
\frac{(a)_n (b)_n}{(c)_n}\frac{z^n}{n!},
and defined on the rest of the complex z-plane by analytic
continuation [1]_.
Here :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When
:math:`n` is an integer the result is a polynomial of degree :math:`n`.
The implementation for complex values of ``z`` is described in [2]_,
except for ``z`` in the region defined by
.. math::
0.9 <= \left|z\right| < 1.1,
\left|1 - z\right| >= 0.9,
\mathrm{real}(z) >= 0
in which the implementation follows [4]_.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/15.2
.. [2] S. Zhang and J.M. Jin, "Computation of Special Functions", Wiley 1996
.. [3] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [4] J.L. Lopez and N.M. Temme, "New series expansions of the Gauss
hypergeometric function", Adv Comput Math 39, 349-365 (2013).
https://doi.org/10.1007/s10444-012-9283-y
Examples
--------
>>> import scipy.special as sc
It has poles when `c` is a negative integer.
>>> sc.hyp2f1(1, 1, -2, 1)
inf
It is a polynomial when `a` or `b` is a negative integer.
>>> a, b, c = -1, 1, 1.5
>>> z = np.linspace(0, 1, 5)
>>> sc.hyp2f1(a, b, c, z)
array([1. , 0.83333333, 0.66666667, 0.5 , 0.33333333])
>>> 1 + a * b * z / c
array([1. , 0.83333333, 0.66666667, 0.5 , 0.33333333])
It is symmetric in `a` and `b`.
>>> a = np.linspace(0, 1, 5)
>>> b = np.linspace(0, 1, 5)
>>> sc.hyp2f1(a, b, 1, 0.5)
array([1. , 1.03997334, 1.1803406 , 1.47074441, 2. ])
>>> sc.hyp2f1(b, a, 1, 0.5)
array([1. , 1.03997334, 1.1803406 , 1.47074441, 2. ])
It contains many other functions as special cases.
>>> z = 0.5
>>> sc.hyp2f1(1, 1, 2, z)
1.3862943611198901
>>> -np.log(1 - z) / z
1.3862943611198906
>>> sc.hyp2f1(0.5, 1, 1.5, z**2)
1.098612288668109
>>> np.log((1 + z) / (1 - z)) / (2 * z)
1.0986122886681098
>>> sc.hyp2f1(0.5, 1, 1.5, -z**2)
0.9272952180016117
>>> np.arctan(z) / z
0.9272952180016123
""")
add_newdoc("hyperu",
r"""
hyperu(a, b, x, out=None)
Confluent hypergeometric function U
It is defined as the solution to the equation
.. math::
x \frac{d^2w}{dx^2} + (b - x) \frac{dw}{dx} - aw = 0
which satisfies the property
.. math::
U(a, b, x) \sim x^{-a}
as :math:`x \to \infty`. See [dlmf]_ for more details.
Parameters
----------
a, b : array_like
Real-valued parameters
x : array_like
Real-valued argument
out : ndarray
Optional output array for the function values
Returns
-------
scalar or ndarray
Values of `U`
References
----------
.. [dlmf] NIST Digital Library of Mathematics Functions
https://dlmf.nist.gov/13.2#E6
Examples
--------
>>> import scipy.special as sc
It has a branch cut along the negative `x` axis.
>>> x = np.linspace(-0.1, -10, 5)
>>> sc.hyperu(1, 1, x)
array([nan, nan, nan, nan, nan])
It approaches zero as `x` goes to infinity.
>>> x = np.array([1, 10, 100])
>>> sc.hyperu(1, 1, x)
array([0.59634736, 0.09156333, 0.00990194])
It satisfies Kummer's transformation.
>>> a, b, x = 2, 1, 1
>>> sc.hyperu(a, b, x)
0.1926947246463881
>>> x**(1 - b) * sc.hyperu(a - b + 1, 2 - b, x)
0.1926947246463881
""")
add_newdoc("i0",
r"""
i0(x)
Modified Bessel function of order 0.
Defined as,
.. math::
I_0(x) = \sum_{k=0}^\infty \frac{(x^2/4)^k}{(k!)^2} = J_0(\imath x),
where :math:`J_0` is the Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i0`.
See also
--------
iv
i0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 0
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i0`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i0e`.
See also
--------
iv
i0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("i1",
r"""
i1(x)
Modified Bessel function of order 1.
Defined as,
.. math::
I_1(x) = \frac{1}{2}x \sum_{k=0}^\infty \frac{(x^2/4)^k}{k! (k + 1)!}
= -\imath J_1(\imath x),
where :math:`J_1` is the Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i1`.
See also
--------
iv
i1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 1.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 1
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i1`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i1e`.
See also
--------
iv
i1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("_igam_fac",
"""
Internal function, do not use.
""")
add_newdoc("it2i0k0",
r"""
it2i0k0(x, out=None)
Integrals related to modified Bessel functions of order 0.
Computes the integrals
.. math::
\int_0^x \frac{I_0(t) - 1}{t} dt \\
\int_x^\infty \frac{K_0(t)}{t} dt.
Parameters
----------
x : array_like
Values at which to evaluate the integrals.
out : tuple of ndarrays, optional
Optional output arrays for the function results.
Returns
-------
ii0 : scalar or ndarray
The integral for `i0`
ik0 : scalar or ndarray
The integral for `k0`
""")
add_newdoc("it2j0y0",
r"""
it2j0y0(x, out=None)
Integrals related to Bessel functions of the first kind of order 0.
Computes the integrals
.. math::
\int_0^x \frac{1 - J_0(t)}{t} dt \\
\int_x^\infty \frac{Y_0(t)}{t} dt.
For more on :math:`J_0` and :math:`Y_0` see `j0` and `y0`.
Parameters
----------
x : array_like
Values at which to evaluate the integrals.
out : tuple of ndarrays, optional
Optional output arrays for the function results.
Returns
-------
ij0 : scalar or ndarray
The integral for `j0`
iy0 : scalar or ndarray
The integral for `y0`
""")
add_newdoc("it2struve0",
r"""
it2struve0(x)
Integral related to the Struve function of order 0.
Returns the integral,
.. math::
\int_x^\infty \frac{H_0(t)}{t}\,dt
where :math:`H_0` is the Struve function of order 0.
Parameters
----------
x : array_like
Lower limit of integration.
Returns
-------
I : ndarray
The value of the integral.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("itairy",
"""
itairy(x)
Integrals of Airy functions
Calculates the integrals of Airy functions from 0 to `x`.
Parameters
----------
x: array_like
Upper limit of integration (float).
Returns
-------
Apt
Integral of Ai(t) from 0 to x.
Bpt
Integral of Bi(t) from 0 to x.
Ant
Integral of Ai(-t) from 0 to x.
Bnt
Integral of Bi(-t) from 0 to x.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("iti0k0",
r"""
iti0k0(x, out=None)
Integrals of modified Bessel functions of order 0.
Computes the integrals
.. math::
\int_0^x I_0(t) dt \\
\int_0^x K_0(t) dt.
For more on :math:`I_0` and :math:`K_0` see `i0` and `k0`.
Parameters
----------
x : array_like
Values at which to evaluate the integrals.
out : tuple of ndarrays, optional
Optional output arrays for the function results.
Returns
-------
ii0 : scalar or ndarray
The integral for `i0`
ik0 : scalar or ndarray
The integral for `k0`
""")
add_newdoc("itj0y0",
r"""
itj0y0(x, out=None)
Integrals of Bessel functions of the first kind of order 0.
Computes the integrals
.. math::
\int_0^x J_0(t) dt \\
\int_0^x Y_0(t) dt.
For more on :math:`J_0` and :math:`Y_0` see `j0` and `y0`.
Parameters
----------
x : array_like
Values at which to evaluate the integrals.
out : tuple of ndarrays, optional
Optional output arrays for the function results.
Returns
-------
ij0 : scalar or ndarray
The integral of `j0`
iy0 : scalar or ndarray
The integral of `y0`
""")
add_newdoc("itmodstruve0",
r"""
itmodstruve0(x)
Integral of the modified Struve function of order 0.
.. math::
I = \int_0^x L_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`L_0` from 0 to `x`.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("itstruve0",
r"""
itstruve0(x)
Integral of the Struve function of order 0.
.. math::
I = \int_0^x H_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`H_0` from 0 to `x`.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("iv",
r"""
iv(v, z)
Modified Bessel function of the first kind of real order.
Parameters
----------
v : array_like
Order. If `z` is of real type and negative, `v` must be integer
valued.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the modified Bessel function.
Notes
-----
For real `z` and :math:`v \in [-50, 50]`, the evaluation is carried out
using Temme's method [1]_. For larger orders, uniform asymptotic
expansions are applied.
For complex `z` and positive `v`, the AMOS [2]_ `zbesi` routine is
called. It uses a power series for small `z`, the asymptotic expansion
for large `abs(z)`, the Miller algorithm normalized by the Wronskian
and a Neumann series for intermediate magnitudes, and the uniform
asymptotic expansions for :math:`I_v(z)` and :math:`J_v(z)` for large
orders. Backward recurrence is used to generate sequences or reduce
orders when necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
See also
--------
kve : This function with leading exponential behavior stripped off.
References
----------
.. [1] Temme, Journal of Computational Physics, vol 21, 343 (1976)
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("ive",
r"""
ive(v, z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v, z) = iv(v, z) * exp(-abs(z.real))
Parameters
----------
v : array_like of float
Order.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the exponentially scaled modified Bessel function.
Notes
-----
For positive `v`, the AMOS [1]_ `zbesi` routine is called. It uses a
power series for small `z`, the asymptotic expansion for large
`abs(z)`, the Miller algorithm normalized by the Wronskian and a
Neumann series for intermediate magnitudes, and the uniform asymptotic
expansions for :math:`I_v(z)` and :math:`J_v(z)` for large orders.
Backward recurrence is used to generate sequences or reduce orders when
necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("j0",
r"""
j0(x)
Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval the following rational approximation is used:
.. math::
J_0(x) \approx (w - r_1^2)(w - r_2^2) \frac{P_3(w)}{Q_8(w)},
where :math:`w = x^2` and :math:`r_1`, :math:`r_2` are the zeros of
:math:`J_0`, and :math:`P_3` and :math:`Q_8` are polynomials of degrees 3
and 8, respectively.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `j0`.
It should not be confused with the spherical Bessel functions (see
`spherical_jn`).
See also
--------
jv : Bessel function of real order and complex argument.
spherical_jn : spherical Bessel functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("j1",
"""
j1(x)
Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 24 term Chebyshev expansion is used. In the second, the
asymptotic trigonometric representation is employed using two rational
functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `j1`.
It should not be confused with the spherical Bessel functions (see
`spherical_jn`).
See also
--------
jv
spherical_jn : spherical Bessel functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("jn",
"""
jn(n, x)
Bessel function of the first kind of integer order and real argument.
Notes
-----
`jn` is an alias of `jv`.
Not to be confused with the spherical Bessel functions (see `spherical_jn`).
See also
--------
jv
spherical_jn : spherical Bessel functions.
""")
add_newdoc("jv",
r"""
jv(v, z)
Bessel function of the first kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the Bessel function, :math:`J_v(z)`.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
Not to be confused with the spherical Bessel functions (see `spherical_jn`).
See also
--------
jve : :math:`J_v` with leading exponential behavior stripped off.
spherical_jn : spherical Bessel functions.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("jve",
r"""
jve(v, z)
Exponentially scaled Bessel function of order `v`.
Defined as::
jve(v, z) = jv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("k0",
r"""
k0(x)
Modified Bessel function of the second kind of order 0, :math:`K_0`.
This function is also sometimes referred to as the modified Bessel
function of the third kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
K : ndarray
Value of the modified Bessel function :math:`K_0` at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0`.
See also
--------
kv
k0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0e`.
See also
--------
kv
k0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("k1",
"""
k1(x)
Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the modified Bessel function K of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1`.
See also
--------
kv
k1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1e`.
See also
--------
kv
k1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("kei",
r"""
kei(x, out=None)
Kelvin function kei.
Defined as
.. math::
\mathrm{kei}(x) = \Im[K_0(x e^{\pi i / 4})]
where :math:`K_0` is the modified Bessel function of the second
kind (see `kv`). See [dlmf]_ for more details.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the Kelvin function.
See Also
--------
ker : the corresponding real part
keip : the derivative of kei
kv : modified Bessel function of the second kind
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10.61
Examples
--------
It can be expressed using the modified Bessel function of the
second kind.
>>> import scipy.special as sc
>>> x = np.array([1.0, 2.0, 3.0, 4.0])
>>> sc.kv(0, x * np.exp(np.pi * 1j / 4)).imag
array([-0.49499464, -0.20240007, -0.05112188, 0.0021984 ])
>>> sc.kei(x)
array([-0.49499464, -0.20240007, -0.05112188, 0.0021984 ])
""")
add_newdoc("keip",
r"""
keip(x, out=None)
Derivative of the Kelvin function kei.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
The values of the derivative of kei.
See Also
--------
kei
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10#PT5
""")
add_newdoc("kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at `x`. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("ker",
r"""
ker(x, out=None)
Kelvin function ker.
Defined as
.. math::
\mathrm{ker}(x) = \Re[K_0(x e^{\pi i / 4})]
Where :math:`K_0` is the modified Bessel function of the second
kind (see `kv`). See [dlmf]_ for more details.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
See Also
--------
kei : the corresponding imaginary part
kerp : the derivative of ker
kv : modified Bessel function of the second kind
Returns
-------
scalar or ndarray
Values of the Kelvin function.
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10.61
Examples
--------
It can be expressed using the modified Bessel function of the
second kind.
>>> import scipy.special as sc
>>> x = np.array([1.0, 2.0, 3.0, 4.0])
>>> sc.kv(0, x * np.exp(np.pi * 1j / 4)).real
array([ 0.28670621, -0.04166451, -0.06702923, -0.03617885])
>>> sc.ker(x)
array([ 0.28670621, -0.04166451, -0.06702923, -0.03617885])
""")
add_newdoc("kerp",
r"""
kerp(x, out=None)
Derivative of the Kelvin function ker.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the derivative of ker.
See Also
--------
ker
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10#PT5
""")
add_newdoc("kl_div",
r"""
kl_div(x, y, out=None)
Elementwise function for computing Kullback-Leibler divergence.
.. math::
\mathrm{kl\_div}(x, y) =
\begin{cases}
x \log(x / y) - x + y & x > 0, y > 0 \\
y & x = 0, y \ge 0 \\
\infty & \text{otherwise}
\end{cases}
Parameters
----------
x, y : array_like
Real arguments
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the Kullback-Liebler divergence.
See Also
--------
entr, rel_entr
Notes
-----
.. versionadded:: 0.15.0
This function is non-negative and is jointly convex in `x` and `y`.
The origin of this function is in convex programming; see [1]_ for
details. This is why the the function contains the extra :math:`-x
+ y` terms over what might be expected from the Kullback-Leibler
divergence. For a version of the function without the extra terms,
see `rel_entr`.
References
----------
.. [1] Grant, Boyd, and Ye, "CVX: Matlab Software for Disciplined Convex
Programming", http://cvxr.com/cvx/
""")
add_newdoc("kn",
r"""
kn(n, x)
Modified Bessel function of the second kind of integer order `n`
Returns the modified Bessel function of the second kind for integer order
`n` at real `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions.
Parameters
----------
n : array_like of int
Order of Bessel functions (floats will truncate with a warning)
z : array_like of float
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kv : Same function, but accepts real order and complex argument
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kn
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in range(6):
... plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kn([4, 5, 6], 1)
array([ 44.23241585, 360.9605896 , 3653.83831186])
""")
add_newdoc("kolmogi",
"""
kolmogi(p)
Inverse Survival Function of Kolmogorov distribution
It is the inverse function to `kolmogorov`.
Returns y such that ``kolmogorov(y) == p``.
Parameters
----------
p : float array_like
Probability
Returns
-------
float
The value(s) of kolmogi(p)
Notes
-----
`kolmogorov` is used by `stats.kstest` in the application of the
Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
function is exposed in `scpy.special`, but the recommended way to achieve
the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
`stats.kstwobign` distribution.
See Also
--------
kolmogorov : The Survival Function for the distribution
scipy.stats.kstwobign : Provides the functionality as a continuous distribution
smirnov, smirnovi : Functions for the one-sided distribution
Examples
--------
>>> from scipy.special import kolmogi
>>> kolmogi([0, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0])
array([ inf, 1.22384787, 1.01918472, 0.82757356, 0.67644769,
0.57117327, 0. ])
""")
add_newdoc("kolmogorov",
r"""
kolmogorov(y)
Complementary cumulative distribution (Survival Function) function of
Kolmogorov distribution.
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (``D_n*\sqrt(n)`` as n goes to infinity)
of a two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that ``sqrt(n) * max absolute deviation > y``.
Parameters
----------
y : float array_like
Absolute deviation between the Empirical CDF (ECDF) and the target CDF,
multiplied by sqrt(n).
Returns
-------
float
The value(s) of kolmogorov(y)
Notes
-----
`kolmogorov` is used by `stats.kstest` in the application of the
Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
function is exposed in `scpy.special`, but the recommended way to achieve
the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
`stats.kstwobign` distribution.
See Also
--------
kolmogi : The Inverse Survival Function for the distribution
scipy.stats.kstwobign : Provides the functionality as a continuous distribution
smirnov, smirnovi : Functions for the one-sided distribution
Examples
--------
Show the probability of a gap at least as big as 0, 0.5 and 1.0.
>>> from scipy.special import kolmogorov
>>> from scipy.stats import kstwobign
>>> kolmogorov([0, 0.5, 1.0])
array([ 1. , 0.96394524, 0.26999967])
Compare a sample of size 1000 drawn from a Laplace(0, 1) distribution against
the target distribution, a Normal(0, 1) distribution.
>>> from scipy.stats import norm, laplace
>>> rng = np.random.default_rng()
>>> n = 1000
>>> lap01 = laplace(0, 1)
>>> x = np.sort(lap01.rvs(n, random_state=rng))
>>> np.mean(x), np.std(x)
(-0.05841730131499543, 1.3968109101997568)
Construct the Empirical CDF and the K-S statistic Dn.
>>> target = norm(0,1) # Normal mean 0, stddev 1
>>> cdfs = target.cdf(x)
>>> ecdfs = np.arange(n+1, dtype=float)/n
>>> gaps = np.column_stack([cdfs - ecdfs[:n], ecdfs[1:] - cdfs])
>>> Dn = np.max(gaps)
>>> Kn = np.sqrt(n) * Dn
>>> print('Dn=%f, sqrt(n)*Dn=%f' % (Dn, Kn))
Dn=0.043363, sqrt(n)*Dn=1.371265
>>> print(chr(10).join(['For a sample of size n drawn from a N(0, 1) distribution:',
... ' the approximate Kolmogorov probability that sqrt(n)*Dn>=%f is %f' % (Kn, kolmogorov(Kn)),
... ' the approximate Kolmogorov probability that sqrt(n)*Dn<=%f is %f' % (Kn, kstwobign.cdf(Kn))]))
For a sample of size n drawn from a N(0, 1) distribution:
the approximate Kolmogorov probability that sqrt(n)*Dn>=1.371265 is 0.046533
the approximate Kolmogorov probability that sqrt(n)*Dn<=1.371265 is 0.953467
Plot the Empirical CDF against the target N(0, 1) CDF.
>>> import matplotlib.pyplot as plt
>>> plt.step(np.concatenate([[-3], x]), ecdfs, where='post', label='Empirical CDF')
>>> x3 = np.linspace(-3, 3, 100)
>>> plt.plot(x3, target.cdf(x3), label='CDF for N(0, 1)')
>>> plt.ylim([0, 1]); plt.grid(True); plt.legend();
>>> # Add vertical lines marking Dn+ and Dn-
>>> iminus, iplus = np.argmax(gaps, axis=0)
>>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], color='r', linestyle='dashed', lw=4)
>>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], color='r', linestyle='dashed', lw=4)
>>> plt.show()
""")
add_newdoc("_kolmogc",
r"""
Internal function, do not use.
""")
add_newdoc("_kolmogci",
r"""
Internal function, do not use.
""")
add_newdoc("_kolmogp",
r"""
Internal function, do not use.
""")
add_newdoc("kv",
r"""
kv(v, z)
Modified Bessel function of the second kind of real order `v`
Returns the modified Bessel function of the second kind for real order
`v` at complex `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions. They are defined as those solutions
of the modified Bessel equation for which,
.. math::
K_v(x) \sim \sqrt{\pi/(2x)} \exp(-x)
as :math:`x \to \infty` [3]_.
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results. Note that input must be of complex type to get complex
output, e.g. ``kv(3, -2+0j)`` instead of ``kv(3, -2)``.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kve : This function with leading exponential behavior stripped off.
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
.. [3] NIST Digital Library of Mathematical Functions,
Eq. 10.25.E3. https://dlmf.nist.gov/10.25.E3
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kv
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in np.linspace(0, 6, 5):
... plt.plot(x, kv(N, x), label='$K_{{{}}}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_\nu(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kv([4, 4.5, 5], 1+2j)
array([ 0.1992+2.3892j, 2.3493+3.6j , 7.2827+3.8104j])
""")
add_newdoc("kve",
r"""
kve(v, z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order `v` at
complex `z`::
kve(v, z) = kv(v, z) * exp(z)
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The exponentially scaled modified Bessel function of the second kind.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
""")
add_newdoc("_lanczos_sum_expg_scaled",
"""
Internal function, do not use.
""")
add_newdoc("_lgam1p",
"""
Internal function, do not use.
""")
add_newdoc("log1p",
"""
log1p(x, out=None)
Calculates log(1 + x) for use when `x` is near zero.
Parameters
----------
x : array_like
Real or complex valued input.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of ``log(1 + x)``.
See Also
--------
expm1, cosm1
Examples
--------
>>> import scipy.special as sc
It is more accurate than using ``log(1 + x)`` directly for ``x``
near 0. Note that in the below example ``1 + 1e-17 == 1`` to
double precision.
>>> sc.log1p(1e-17)
1e-17
>>> np.log(1 + 1e-17)
0.0
""")
add_newdoc("_log1pmx",
"""
Internal function, do not use.
""")
add_newdoc('log_expit',
"""
log_expit(x)
Logarithm of the logistic sigmoid function.
The SciPy implementation of the logistic sigmoid function is
`scipy.special.expit`, so this function is called ``log_expit``.
The function is mathematically equivalent to ``log(expit(x))``, but
is formulated to avoid loss of precision for inputs with large
(positive or negative) magnitude.
Parameters
----------
x : array_like
The values to apply ``log_expit`` to element-wise.
Returns
-------
out : ndarray
The computed values, an ndarray of the same shape as ``x``.
See Also
--------
expit
Notes
-----
As a ufunc, ``log_expit`` takes a number of optional keyword arguments.
For more information see
`ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 1.8.0
Examples
--------
>>> from scipy.special import log_expit, expit
>>> log_expit([-3.0, 0.25, 2.5, 5.0])
array([-3.04858735, -0.57593942, -0.07888973, -0.00671535])
Large negative values:
>>> log_expit([-100, -500, -1000])
array([ -100., -500., -1000.])
Note that ``expit(-1000)`` returns 0, so the naive implementation
``log(expit(-1000))`` return ``-inf``.
Large positive values:
>>> log_expit([29, 120, 400])
array([-2.54366565e-013, -7.66764807e-053, -1.91516960e-174])
Compare that to the naive implementation:
>>> np.log(expit([29, 120, 400]))
array([-2.54463117e-13, 0.00000000e+00, 0.00000000e+00])
The first value is accurate to only 3 digits, and the larger inputs
lose all precision and return 0.
""")
add_newdoc('logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
See Also
--------
expit
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
Examples
--------
>>> from scipy.special import logit, expit
>>> logit([0, 0.25, 0.5, 0.75, 1])
array([ -inf, -1.09861229, 0. , 1.09861229, inf])
`expit` is the inverse of `logit`:
>>> expit(logit([0.1, 0.75, 0.999]))
array([ 0.1 , 0.75 , 0.999])
Plot logit(x) for x in [0, 1]:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 1, 501)
>>> y = logit(x)
>>> plt.plot(x, y)
>>> plt.grid()
>>> plt.ylim(-6, 6)
>>> plt.xlabel('x')
>>> plt.title('logit(x)')
>>> plt.show()
""")
add_newdoc("lpmv",
r"""
lpmv(m, v, x)
Associated Legendre function of integer order and real degree.
Defined as
.. math::
P_v^m = (-1)^m (1 - x^2)^{m/2} \frac{d^m}{dx^m} P_v(x)
where
.. math::
P_v = \sum_{k = 0}^\infty \frac{(-v)_k (v + 1)_k}{(k!)^2}
\left(\frac{1 - x}{2}\right)^k
is the Legendre function of the first kind. Here :math:`(\cdot)_k`
is the Pochhammer symbol; see `poch`.
Parameters
----------
m : array_like
Order (int or float). If passed a float not equal to an
integer the function returns NaN.
v : array_like
Degree (float).
x : array_like
Argument (float). Must have ``|x| <= 1``.
Returns
-------
pmv : ndarray
Value of the associated Legendre function.
See Also
--------
lpmn : Compute the associated Legendre function for all orders
``0, ..., m`` and degrees ``0, ..., n``.
clpmn : Compute the associated Legendre function at complex
arguments.
Notes
-----
Note that this implementation includes the Condon-Shortley phase.
References
----------
.. [1] Zhang, Jin, "Computation of Special Functions", John Wiley
and Sons, Inc, 1996.
""")
add_newdoc("mathieu_a",
"""
mathieu_a(m, q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("mathieu_b",
"""
mathieu_b(m, q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("mathieu_cem",
"""
mathieu_cem(m, q, x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x, q)``, of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of ce_m(x, q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x, q)``, and its derivative at `x` for order `m` and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("mathieu_modsem1",
"""
mathieu_modsem1(m, q, x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x, q), of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of se_m(x, q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t), t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t), t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("modstruve",
r"""
modstruve(v, x)
Modified Struve function.
Return the value of the modified Struve function of order `v` at `x`. The
modified Struve function is defined as,
.. math::
L_v(x) = -\imath \exp(-\pi\imath v/2) H_v(\imath x),
where :math:`H_v` is the Struve function.
Parameters
----------
v : array_like
Order of the modified Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
L : ndarray
Value of the modified Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the function:
- power series
- expansion in Bessel functions (if :math:`|x| < |v| + 20`)
- asymptotic large-x expansion (if :math:`x \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
struve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/11
""")
add_newdoc("nbdtr",
r"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function.
Returns the sum of the terms 0 through `k` of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=0}^k {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that `k` or fewer failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k` or fewer failures before `n` successes in a
sequence of events with individual success probability `p`.
See also
--------
nbdtrc
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtr}(k, n, p) = I_{p}(n, k + 1).
Wrapper for the Cephes [1]_ routine `nbdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("nbdtrc",
r"""
nbdtrc(k, n, p)
Negative binomial survival function.
Returns the sum of the terms `k + 1` to infinity of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that more than `k` failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k + 1` or more failures before `n` successes in a
sequence of events with individual success probability `p`.
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtrc}(k, n, p) = I_{1 - p}(k + 1, n).
Wrapper for the Cephes [1]_ routine `nbdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("nbdtri",
"""
nbdtri(k, n, y)
Inverse of `nbdtr` vs `p`.
Returns the inverse with respect to the parameter `p` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
Returns
-------
p : ndarray
Probability of success in a single event (float) such that
`nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `nbdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("nbdtrik",
r"""
nbdtrik(y, n, p)
Inverse of `nbdtr` vs `k`.
Returns the inverse with respect to the parameter `k` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
k : ndarray
The maximum number of allowed failures such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `k` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `k`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("nbdtrin",
r"""
nbdtrin(k, y, p)
Inverse of `nbdtr` vs `n`.
Returns the inverse with respect to the parameter `n` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
p : array_like
Probability of success in a single event (float).
Returns
-------
n : ndarray
The number of successes `n` such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `n` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `n`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("ncfdtr",
r"""
ncfdtr(dfn, dfd, nc, f)
Cumulative distribution function of the non-central F distribution.
The non-central F describes the distribution of,
.. math::
Z = \frac{X/d_n}{Y/d_d}
where :math:`X` and :math:`Y` are independently distributed, with
:math:`X` distributed non-central :math:`\chi^2` with noncentrality
parameter `nc` and :math:`d_n` degrees of freedom, and :math:`Y`
distributed :math:`\chi^2` with :math:`d_d` degrees of freedom.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdffnc`.
The cumulative distribution function is computed using Formula 26.6.20 of
[2]_:
.. math::
F(d_n, d_d, n_c, f) = \sum_{j=0}^\infty e^{-n_c/2} \frac{(n_c/2)^j}{j!} I_{x}(\frac{d_n}{2} + j, \frac{d_d}{2}),
where :math:`I` is the regularized incomplete beta function, and
:math:`x = f d_n/(f d_n + d_d)`.
The computation time required for this routine is proportional to the
noncentrality parameter `nc`. Very large values of this parameter can
consume immense computer resources. This is why the search range is
bounded by 10,000.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central F distribution, for nc=0. Compare with the
F-distribution from scipy.stats:
>>> x = np.linspace(-1, 8, num=500)
>>> dfn = 3
>>> dfd = 2
>>> ncf_stats = stats.f.cdf(x, dfn, dfd)
>>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, ncf_stats, 'b-', lw=3)
>>> ax.plot(x, ncf_special, 'r-')
>>> plt.show()
""")
add_newdoc("ncfdtri",
"""
ncfdtri(dfn, dfd, nc, p)
Inverse with respect to `f` of the CDF of the non-central F distribution.
See `ncfdtr` for more details.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
Returns
-------
f : float
Quantiles, i.e., the upper limit of integration.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtri
Compute the CDF for several values of `f`:
>>> f = [0.5, 1, 1.5]
>>> p = ncfdtr(2, 3, 1.5, f)
>>> p
array([ 0.20782291, 0.36107392, 0.47345752])
Compute the inverse. We recover the values of `f`, as expected:
>>> ncfdtri(2, 3, 1.5, p)
array([ 0.5, 1. , 1.5])
""")
add_newdoc("ncfdtridfd",
"""
ncfdtridfd(dfn, p, nc, f)
Calculate degrees of freedom (denominator) for the noncentral F-distribution.
This is the inverse with respect to `dfd` of `ncfdtr`.
See `ncfdtr` for more details.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e., the upper limit of integration.
Returns
-------
dfd : float
Degrees of freedom of the denominator sum of squares.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtridfd
Compute the CDF for several values of `dfd`:
>>> dfd = [1, 2, 3]
>>> p = ncfdtr(2, dfd, 0.25, 15)
>>> p
array([ 0.8097138 , 0.93020416, 0.96787852])
Compute the inverse. We recover the values of `dfd`, as expected:
>>> ncfdtridfd(2, p, 0.25, 15)
array([ 1., 2., 3.])
""")
add_newdoc("ncfdtridfn",
"""
ncfdtridfn(p, dfd, nc, f)
Calculate degrees of freedom (numerator) for the noncentral F-distribution.
This is the inverse with respect to `dfn` of `ncfdtr`.
See `ncfdtr` for more details.
Parameters
----------
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : float
Quantiles, i.e., the upper limit of integration.
Returns
-------
dfn : float
Degrees of freedom of the numerator sum of squares.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtridfn
Compute the CDF for several values of `dfn`:
>>> dfn = [1, 2, 3]
>>> p = ncfdtr(dfn, 2, 0.25, 15)
>>> p
array([ 0.92562363, 0.93020416, 0.93188394])
Compute the inverse. We recover the values of `dfn`, as expected:
>>> ncfdtridfn(p, 2, 0.25, 15)
array([ 1., 2., 3.])
""")
add_newdoc("ncfdtrinc",
"""
ncfdtrinc(dfn, dfd, p, f)
Calculate non-centrality parameter for non-central F distribution.
This is the inverse with respect to `nc` of `ncfdtr`.
See `ncfdtr` for more details.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
f : array_like
Quantiles, i.e., the upper limit of integration.
Returns
-------
nc : float
Noncentrality parameter.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtrinc
Compute the CDF for several values of `nc`:
>>> nc = [0.5, 1.5, 2.0]
>>> p = ncfdtr(2, 3, nc, 15)
>>> p
array([ 0.96309246, 0.94327955, 0.93304098])
Compute the inverse. We recover the values of `nc`, as expected:
>>> ncfdtrinc(2, 3, p, 15)
array([ 0.5, 1.5, 2. ])
""")
add_newdoc("nctdtr",
"""
nctdtr(df, nc, t)
Cumulative distribution function of the non-central `t` distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e., the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise, it will be an array.
See Also
--------
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central t distribution, for nc=0. Compare with the
t-distribution from scipy.stats:
>>> x = np.linspace(-5, 5, num=500)
>>> df = 3
>>> nct_stats = stats.t.cdf(x, df)
>>> nct_special = special.nctdtr(df, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, nct_stats, 'b-', lw=3)
>>> ax.plot(x, nct_special, 'r-')
>>> plt.show()
""")
add_newdoc("nctdtridf",
"""
nctdtridf(p, nc, t)
Calculate degrees of freedom for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e., the upper limit of integration.
""")
add_newdoc("nctdtrinc",
"""
nctdtrinc(df, p, t)
Calculate non-centrality parameter for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
p : array_like
CDF values, in range (0, 1].
t : array_like
Quantiles, i.e., the upper limit of integration.
""")
add_newdoc("nctdtrit",
"""
nctdtrit(df, nc, p)
Inverse cumulative distribution function of the non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
p : array_like
CDF values, in range (0, 1].
""")
add_newdoc("ndtr",
r"""
ndtr(x)
Gaussian cumulative distribution function.
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`
.. math::
\frac{1}{\sqrt{2\pi}} \int_{-\infty}^x \exp(-t^2/2) dt
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
log_ndtr
""")
add_newdoc("nrdtrimn",
"""
nrdtrimn(p, x, std)
Calculate mean of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
std : array_like
Standard deviation.
Returns
-------
mn : float or ndarray
The mean of the normal distribution.
See Also
--------
nrdtrimn, ndtr
""")
add_newdoc("nrdtrisd",
"""
nrdtrisd(p, x, mn)
Calculate standard deviation of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
mn : float or ndarray
The mean of the normal distribution.
Returns
-------
std : array_like
Standard deviation.
See Also
--------
ndtr
""")
add_newdoc("log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function.
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the log of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
ndtr
""")
add_newdoc("ndtri",
"""
ndtri(y)
Inverse of `ndtr` vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to `x`)
is equal to y.
""")
add_newdoc("obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("obl_rad1",
"""
obl_rad1(m, n, c, x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("obl_rad1_cv",
"""
obl_rad1_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("obl_rad2",
"""
obl_rad2(m, n, c, x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("obl_rad2_cv",
"""
obl_rad2_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d, dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("pbvv",
"""
pbvv(v, x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("pbwa",
r"""
pbwa(a, x)
Parabolic cylinder function W.
The function is a particular solution to the differential equation
.. math::
y'' + \left(\frac{1}{4}x^2 - a\right)y = 0,
for a full definition see section 12.14 in [1]_.
Parameters
----------
a : array_like
Real parameter
x : array_like
Real argument
Returns
-------
w : scalar or ndarray
Value of the function
wp : scalar or ndarray
Value of the derivative in x
Notes
-----
The function is a wrapper for a Fortran routine by Zhang and Jin
[2]_. The implementation is accurate only for ``|a|, |x| < 5`` and
returns NaN outside that range.
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30.
https://dlmf.nist.gov/14.30
.. [2] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("pdtr",
r"""
pdtr(k, m, out=None)
Poisson cumulative distribution function.
Defined as the probability that a Poisson-distributed random
variable with event rate :math:`m` is less than or equal to
:math:`k`. More concretely, this works out to be [1]_
.. math::
\exp(-m) \sum_{j = 0}^{\lfloor{k}\rfloor} \frac{m^j}{j!}.
Parameters
----------
k : array_like
Nonnegative real argument
m : array_like
Nonnegative real shape parameter
out : ndarray
Optional output array for the function results
See Also
--------
pdtrc : Poisson survival function
pdtrik : inverse of `pdtr` with respect to `k`
pdtri : inverse of `pdtr` with respect to `m`
Returns
-------
scalar or ndarray
Values of the Poisson cumulative distribution function
References
----------
.. [1] https://en.wikipedia.org/wiki/Poisson_distribution
Examples
--------
>>> import scipy.special as sc
It is a cumulative distribution function, so it converges to 1
monotonically as `k` goes to infinity.
>>> sc.pdtr([1, 10, 100, np.inf], 1)
array([0.73575888, 0.99999999, 1. , 1. ])
It is discontinuous at integers and constant between integers.
>>> sc.pdtr([1, 1.5, 1.9, 2], 1)
array([0.73575888, 0.73575888, 0.73575888, 0.9196986 ])
""")
add_newdoc("pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be non-negative doubles.
""")
add_newdoc("pdtri",
"""
pdtri(k, y)
Inverse to `pdtr` vs m
Returns the Poisson variable `m` such that the sum from 0 to `k` of
the Poisson density is equal to the given probability `y`:
calculated by gammaincinv(k+1, y). `k` must be a nonnegative
integer and `y` between 0 and 1.
""")
add_newdoc("pdtrik",
"""
pdtrik(p, m)
Inverse to `pdtr` vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("poch",
r"""
poch(z, m)
Pochhammer symbol.
The Pochhammer symbol (rising factorial) is defined as
.. math::
(z)_m = \frac{\Gamma(z + m)}{\Gamma(z)}
For positive integer `m` it reads
.. math::
(z)_m = z (z + 1) ... (z + m - 1)
See [dlmf]_ for more details.
Parameters
----------
z, m : array_like
Real-valued arguments.
Returns
-------
scalar or ndarray
The value of the function.
References
----------
.. [dlmf] Nist, Digital Library of Mathematical Functions
https://dlmf.nist.gov/5.2#iii
Examples
--------
>>> import scipy.special as sc
It is 1 when m is 0.
>>> sc.poch([1, 2, 3, 4], 0)
array([1., 1., 1., 1.])
For z equal to 1 it reduces to the factorial function.
>>> sc.poch(1, 5)
120.0
>>> 1 * 2 * 3 * 4 * 5
120
It can be expressed in terms of the gamma function.
>>> z, m = 3.7, 2.1
>>> sc.poch(z, m)
20.529581933776953
>>> sc.gamma(z + m) / sc.gamma(z)
20.52958193377696
""")
add_newdoc("pro_ang1",
"""
pro_ang1(m, n, c, x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("pro_ang1_cv",
"""
pro_ang1_cv(m, n, c, cv, x)
Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("pro_cv",
"""
pro_cv(m, n, c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("pro_rad1",
"""
pro_rad1(m, n, c, x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("pro_rad1_cv",
"""
pro_rad1_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("pro_rad2",
"""
pro_rad2(m, n, c, x)
Prolate spheroidal radial function of the second kind and its derivative
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("pro_rad2_cv",
"""
pro_rad2_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("pseudo_huber",
r"""
pseudo_huber(delta, r)
Pseudo-Huber loss function.
.. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
Parameters
----------
delta : ndarray
Input array, indicating the soft quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Pseudo-Huber loss function values.
Notes
-----
This function is convex in :math:`r`.
.. versionadded:: 0.15.0
""")
add_newdoc("psi",
"""
psi(z, out=None)
The digamma function.
The logarithmic derivative of the gamma function evaluated at ``z``.
Parameters
----------
z : array_like
Real or complex argument.
out : ndarray, optional
Array for the computed values of ``psi``.
Returns
-------
digamma : ndarray
Computed values of ``psi``.
Notes
-----
For large values not close to the negative real axis, ``psi`` is
computed using the asymptotic series (5.11.2) from [1]_. For small
arguments not close to the negative real axis, the recurrence
relation (5.5.2) from [1]_ is used until the argument is large
enough to use the asymptotic series. For values close to the
negative real axis, the reflection formula (5.5.4) from [1]_ is
used first. Note that ``psi`` has a family of zeros on the
negative real axis which occur between the poles at nonpositive
integers. Around the zeros the reflection formula suffers from
cancellation and the implementation loses precision. The sole
positive zero and the first negative zero, however, are handled
separately by precomputing series expansions using [2]_, so the
function should maintain full accuracy around the origin.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/5
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
Examples
--------
>>> from scipy.special import psi
>>> z = 3 + 4j
>>> psi(z)
(1.55035981733341+1.0105022091860445j)
Verify psi(z) = psi(z + 1) - 1/z:
>>> psi(z + 1) - 1/z
(1.55035981733341+1.0105022091860445j)
""")
add_newdoc("radian",
"""
radian(d, m, s, out=None)
Convert from degrees to radians.
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
Parameters
----------
d : array_like
Degrees, can be real-valued.
m : array_like
Minutes, can be real-valued.
s : array_like
Seconds, can be real-valued.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the inputs in radians.
Examples
--------
>>> import scipy.special as sc
There are many ways to specify an angle.
>>> sc.radian(90, 0, 0)
1.5707963267948966
>>> sc.radian(0, 60 * 90, 0)
1.5707963267948966
>>> sc.radian(0, 0, 60**2 * 90)
1.5707963267948966
The inputs can be real-valued.
>>> sc.radian(1.5, 0, 0)
0.02617993877991494
>>> sc.radian(1, 30, 0)
0.02617993877991494
""")
add_newdoc("rel_entr",
r"""
rel_entr(x, y, out=None)
Elementwise function for computing relative entropy.
.. math::
\mathrm{rel\_entr}(x, y) =
\begin{cases}
x \log(x / y) & x > 0, y > 0 \\
0 & x = 0, y \ge 0 \\
\infty & \text{otherwise}
\end{cases}
Parameters
----------
x, y : array_like
Input arrays
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Relative entropy of the inputs
See Also
--------
entr, kl_div
Notes
-----
.. versionadded:: 0.15.0
This function is jointly convex in x and y.
The origin of this function is in convex programming; see
[1]_. Given two discrete probability distributions :math:`p_1,
\ldots, p_n` and :math:`q_1, \ldots, q_n`, to get the relative
entropy of statistics compute the sum
.. math::
\sum_{i = 1}^n \mathrm{rel\_entr}(p_i, q_i).
See [2]_ for details.
References
----------
.. [1] Grant, Boyd, and Ye, "CVX: Matlab Software for Disciplined Convex
Programming", http://cvxr.com/cvx/
.. [2] Kullback-Leibler divergence,
https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
""")
add_newdoc("rgamma",
r"""
rgamma(z, out=None)
Reciprocal of the gamma function.
Defined as :math:`1 / \Gamma(z)`, where :math:`\Gamma` is the
gamma function. For more on the gamma function see `gamma`.
Parameters
----------
z : array_like
Real or complex valued input
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Function results
Notes
-----
The gamma function has no zeros and has simple poles at
nonpositive integers, so `rgamma` is an entire function with zeros
at the nonpositive integers. See the discussion in [dlmf]_ for
more details.
See Also
--------
gamma, gammaln, loggamma
References
----------
.. [dlmf] Nist, Digital Library of Mathematical functions,
https://dlmf.nist.gov/5.2#i
Examples
--------
>>> import scipy.special as sc
It is the reciprocal of the gamma function.
>>> sc.rgamma([1, 2, 3, 4])
array([1. , 1. , 0.5 , 0.16666667])
>>> 1 / sc.gamma([1, 2, 3, 4])
array([1. , 1. , 0.5 , 0.16666667])
It is zero at nonpositive integers.
>>> sc.rgamma([0, -1, -2, -3])
array([0., 0., 0., 0.])
It rapidly underflows to zero along the positive real axis.
>>> sc.rgamma([10, 100, 179])
array([2.75573192e-006, 1.07151029e-156, 0.00000000e+000])
""")
add_newdoc("round",
"""
round(x, out=None)
Round to the nearest integer.
Returns the nearest integer to `x`. If `x` ends in 0.5 exactly,
the nearest even integer is chosen.
Parameters
----------
x : array_like
Real valued input.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
The nearest integers to the elements of `x`. The result is of
floating type, not integer type.
Examples
--------
>>> import scipy.special as sc
It rounds to even.
>>> sc.round([0.5, 1.5])
array([0., 2.])
""")
add_newdoc("shichi",
r"""
shichi(x, out=None)
Hyperbolic sine and cosine integrals.
The hyperbolic sine integral is
.. math::
\int_0^x \frac{\sinh{t}}{t}dt
and the hyperbolic cosine integral is
.. math::
\gamma + \log(x) + \int_0^x \frac{\cosh{t} - 1}{t} dt
where :math:`\gamma` is Euler's constant and :math:`\log` is the
principal branch of the logarithm.
Parameters
----------
x : array_like
Real or complex points at which to compute the hyperbolic sine
and cosine integrals.
Returns
-------
si : ndarray
Hyperbolic sine integral at ``x``
ci : ndarray
Hyperbolic cosine integral at ``x``
Notes
-----
For real arguments with ``x < 0``, ``chi`` is the real part of the
hyperbolic cosine integral. For such points ``chi(x)`` and ``chi(x
+ 0j)`` differ by a factor of ``1j*pi``.
For real arguments the function is computed by calling Cephes'
[1]_ *shichi* routine. For complex arguments the algorithm is based
on Mpmath's [2]_ *shi* and *chi* routines.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("sici",
r"""
sici(x, out=None)
Sine and cosine integrals.
The sine integral is
.. math::
\int_0^x \frac{\sin{t}}{t}dt
and the cosine integral is
.. math::
\gamma + \log(x) + \int_0^x \frac{\cos{t} - 1}{t}dt
where :math:`\gamma` is Euler's constant and :math:`\log` is the
principal branch of the logarithm.
Parameters
----------
x : array_like
Real or complex points at which to compute the sine and cosine
integrals.
Returns
-------
si : ndarray
Sine integral at ``x``
ci : ndarray
Cosine integral at ``x``
Notes
-----
For real arguments with ``x < 0``, ``ci`` is the real part of the
cosine integral. For such points ``ci(x)`` and ``ci(x + 0j)``
differ by a factor of ``1j*pi``.
For real arguments the function is computed by calling Cephes'
[1]_ *sici* routine. For complex arguments the algorithm is based
on Mpmath's [2]_ *si* and *ci* routines.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("sindg",
"""
sindg(x, out=None)
Sine of the angle `x` given in degrees.
Parameters
----------
x : array_like
Angle, given in degrees.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Sine at the input.
See Also
--------
cosdg, tandg, cotdg
Examples
--------
>>> import scipy.special as sc
It is more accurate than using sine directly.
>>> x = 180 * np.arange(3)
>>> sc.sindg(x)
array([ 0., -0., 0.])
>>> np.sin(x * np.pi / 180)
array([ 0.0000000e+00, 1.2246468e-16, -2.4492936e-16])
""")
add_newdoc("smirnov",
r"""
smirnov(n, d)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function,(aka the Survival Function) of Dn+ (or Dn-)
for a one-sided test of equality between an empirical and a
theoretical distribution. It is equal to the probability that the
maximum difference between a theoretical distribution and an empirical
one based on `n` samples is greater than d.
Parameters
----------
n : int
Number of samples
d : float array_like
Deviation between the Empirical CDF (ECDF) and the target CDF.
Returns
-------
float
The value(s) of smirnov(n, d), Prob(Dn+ >= d) (Also Prob(Dn- >= d))
Notes
-----
`smirnov` is used by `stats.kstest` in the application of the
Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
function is exposed in `scpy.special`, but the recommended way to achieve
the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
`stats.ksone` distribution.
See Also
--------
smirnovi : The Inverse Survival Function for the distribution
scipy.stats.ksone : Provides the functionality as a continuous distribution
kolmogorov, kolmogi : Functions for the two-sided distribution
Examples
--------
>>> from scipy.special import smirnov
Show the probability of a gap at least as big as 0, 0.5 and 1.0 for a sample of size 5
>>> smirnov(5, [0, 0.5, 1.0])
array([ 1. , 0.056, 0. ])
Compare a sample of size 5 drawn from a source N(0.5, 1) distribution against
a target N(0, 1) CDF.
>>> from scipy.stats import norm
>>> rng = np.random.default_rng()
>>> n = 5
>>> gendist = norm(0.5, 1) # Normal distribution, mean 0.5, stddev 1
>>> x = np.sort(gendist.rvs(size=n, random_state=rng))
>>> x
array([-1.3922078 , -0.13526532, 0.1371477 , 0.18981686, 1.81948167])
>>> target = norm(0, 1)
>>> cdfs = target.cdf(x)
>>> cdfs
array([0.08192974, 0.44620105, 0.55454297, 0.57527368, 0.96558101])
# Construct the Empirical CDF and the K-S statistics (Dn+, Dn-, Dn)
>>> ecdfs = np.arange(n+1, dtype=float)/n
>>> cols = np.column_stack([x, ecdfs[1:], cdfs, cdfs - ecdfs[:n], ecdfs[1:] - cdfs])
>>> np.set_printoptions(precision=3)
>>> cols
array([[-1.392, 0.2 , 0.082, 0.082, 0.118],
[-0.135, 0.4 , 0.446, 0.246, -0.046],
[ 0.137, 0.6 , 0.555, 0.155, 0.045],
[ 0.19 , 0.8 , 0.575, -0.025, 0.225],
[ 1.819, 1. , 0.966, 0.166, 0.034]])
>>> gaps = cols[:, -2:]
>>> Dnpm = np.max(gaps, axis=0)
>>> print('Dn-=%f, Dn+=%f' % (Dnpm[0], Dnpm[1]))
Dn-=0.246201, Dn+=0.224726
>>> probs = smirnov(n, Dnpm)
>>> print(chr(10).join(['For a sample of size %d drawn from a N(0, 1) distribution:' % n,
... ' Smirnov n=%d: Prob(Dn- >= %f) = %.4f' % (n, Dnpm[0], probs[0]),
... ' Smirnov n=%d: Prob(Dn+ >= %f) = %.4f' % (n, Dnpm[1], probs[1])]))
For a sample of size 5 drawn from a N(0, 1) distribution:
Smirnov n=5: Prob(Dn- >= 0.246201) = 0.4713
Smirnov n=5: Prob(Dn+ >= 0.224726) = 0.5243
Plot the Empirical CDF against the target N(0, 1) CDF
>>> import matplotlib.pyplot as plt
>>> plt.step(np.concatenate([[-3], x]), ecdfs, where='post', label='Empirical CDF')
>>> x3 = np.linspace(-3, 3, 100)
>>> plt.plot(x3, target.cdf(x3), label='CDF for N(0, 1)')
>>> plt.ylim([0, 1]); plt.grid(True); plt.legend();
# Add vertical lines marking Dn+ and Dn-
>>> iminus, iplus = np.argmax(gaps, axis=0)
>>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], color='r', linestyle='dashed', lw=4)
>>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], color='m', linestyle='dashed', lw=4)
>>> plt.show()
""")
add_newdoc("smirnovi",
"""
smirnovi(n, p)
Inverse to `smirnov`
Returns `d` such that ``smirnov(n, d) == p``, the critical value
corresponding to `p`.
Parameters
----------
n : int
Number of samples
p : float array_like
Probability
Returns
-------
float
The value(s) of smirnovi(n, p), the critical values.
Notes
-----
`smirnov` is used by `stats.kstest` in the application of the
Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
function is exposed in `scpy.special`, but the recommended way to achieve
the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
`stats.ksone` distribution.
See Also
--------
smirnov : The Survival Function (SF) for the distribution
scipy.stats.ksone : Provides the functionality as a continuous distribution
kolmogorov, kolmogi, scipy.stats.kstwobign : Functions for the two-sided distribution
""")
add_newdoc("_smirnovc",
"""
_smirnovc(n, d)
Internal function, do not use.
""")
add_newdoc("_smirnovci",
"""
Internal function, do not use.
""")
add_newdoc("_smirnovp",
"""
_smirnovp(n, p)
Internal function, do not use.
""")
add_newdoc("spence",
r"""
spence(z, out=None)
Spence's function, also known as the dilogarithm.
It is defined to be
.. math::
\int_1^z \frac{\log(t)}{1 - t}dt
for complex :math:`z`, where the contour of integration is taken
to avoid the branch cut of the logarithm. Spence's function is
analytic everywhere except the negative real axis where it has a
branch cut.
Parameters
----------
z : array_like
Points at which to evaluate Spence's function
Returns
-------
s : ndarray
Computed values of Spence's function
Notes
-----
There is a different convention which defines Spence's function by
the integral
.. math::
-\int_0^z \frac{\log(1 - t)}{t}dt;
this is our ``spence(1 - z)``.
""")
add_newdoc("stdtr",
"""
stdtr(df, t)
Student t distribution cumulative distribution function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("stdtridf",
"""
stdtridf(p, t)
Inverse of `stdtr` vs df
Returns the argument df such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("stdtrit",
"""
stdtrit(df, p)
Inverse of `stdtr` vs `t`
Returns the argument `t` such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("struve",
r"""
struve(v, x)
Struve function.
Return the value of the Struve function of order `v` at `x`. The Struve
function is defined as,
.. math::
H_v(x) = (z/2)^{v + 1} \sum_{n=0}^\infty \frac{(-1)^n (z/2)^{2n}}{\Gamma(n + \frac{3}{2}) \Gamma(n + v + \frac{3}{2})},
where :math:`\Gamma` is the gamma function.
Parameters
----------
v : array_like
Order of the Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
H : ndarray
Value of the Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the Struve function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
modstruve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/11
""")
add_newdoc("tandg",
"""
tandg(x, out=None)
Tangent of angle `x` given in degrees.
Parameters
----------
x : array_like
Angle, given in degrees.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Tangent at the input.
See Also
--------
sindg, cosdg, cotdg
Examples
--------
>>> import scipy.special as sc
It is more accurate than using tangent directly.
>>> x = 180 * np.arange(3)
>>> sc.tandg(x)
array([0., 0., 0.])
>>> np.tan(x * np.pi / 180)
array([ 0.0000000e+00, -1.2246468e-16, -2.4492936e-16])
""")
add_newdoc("tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2) * erfc(-i*z)
See Also
--------
dawsn, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> z = special.wofz(x)
>>> plt.plot(x, z.real, label='wofz(x).real')
>>> plt.plot(x, z.imag, label='wofz(x).imag')
>>> plt.xlabel('$x$')
>>> plt.legend(framealpha=1, shadow=True)
>>> plt.grid(alpha=0.25)
>>> plt.show()
""")
add_newdoc("xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("y0",
r"""
y0(x)
Bessel function of the second kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval a rational approximation :math:`R(x)` is employed to
compute,
.. math::
Y_0(x) = R(x) + \frac{2 \log(x) J_0(x)}{\pi},
where :math:`J_0` is the Bessel function of the first kind of order 0.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `y0`.
See also
--------
j0
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("y1",
"""
y1(x)
Bessel function of the second kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 25 term Chebyshev expansion is used, and computing
:math:`J_1` (the Bessel function of the first kind) is required. In the
second, the asymptotic trigonometric representation is employed using two
rational functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `y1`.
See also
--------
j1
yn
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("yn",
r"""
yn(n, x)
Bessel function of the second kind of integer order and real argument.
Parameters
----------
n : array_like
Order (integer).
z : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function, :math:`Y_n(x)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `yn`.
The function is evaluated by forward recurrence on `n`, starting with
values computed by the Cephes routines `y0` and `y1`. If `n = 0` or 1,
the routine for `y0` or `y1` is called directly.
See also
--------
yv : For real order and real or complex argument.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("yv",
r"""
yv(v, z)
Bessel function of the second kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind, :math:`Y_v(x)`.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
See also
--------
yve : :math:`Y_v` with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("yve",
r"""
yve(v, z)
Exponentially scaled Bessel function of the second kind of real order.
Returns the exponentially scaled Bessel function of the second
kind of real order `v` at complex `z`::
yve(v, z) = yv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("_zeta",
"""
_zeta(x, q)
Internal function, Hurwitz zeta.
""")
add_newdoc("zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``. For ``x < 1`` the analytic continuation is
computed. For more information on the Riemann zeta function, see
[dlmf]_.
Parameters
----------
x : array_like of float
Values at which to compute zeta(x) - 1 (must be real).
Returns
-------
out : array_like
Values of zeta(x) - 1.
See Also
--------
zeta
Examples
--------
>>> from scipy.special import zetac, zeta
Some special values:
>>> zetac(2), np.pi**2/6 - 1
(0.64493406684822641, 0.6449340668482264)
>>> zetac(-1), -1.0/12 - 1
(-1.0833333333333333, -1.0833333333333333)
Compare ``zetac(x)`` to ``zeta(x) - 1`` for large `x`:
>>> zetac(60), zeta(60) - 1
(8.673617380119933e-19, 0.0)
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/25
""")
add_newdoc("_riemann_zeta",
"""
Internal function, use `zeta` instead.
""")
add_newdoc("_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using Bessel function series
Returns
-------
v, err
""")
add_newdoc("_spherical_jn",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("_spherical_jn_d",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("_spherical_yn",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("_spherical_yn_d",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("_spherical_in",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("_spherical_in_d",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("_spherical_kn",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("_spherical_kn_d",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("loggamma",
r"""
loggamma(z, out=None)
Principal branch of the logarithm of the gamma function.
Defined to be :math:`\log(\Gamma(x))` for :math:`x > 0` and
extended to the complex plane by analytic continuation. The
function has a single branch cut on the negative real axis.
.. versionadded:: 0.18.0
Parameters
----------
z : array-like
Values in the complex plain at which to compute ``loggamma``
out : ndarray, optional
Output array for computed values of ``loggamma``
Returns
-------
loggamma : ndarray
Values of ``loggamma`` at z.
Notes
-----
It is not generally true that :math:`\log\Gamma(z) =
\log(\Gamma(z))`, though the real parts of the functions do
agree. The benefit of not defining `loggamma` as
:math:`\log(\Gamma(z))` is that the latter function has a
complicated branch cut structure whereas `loggamma` is analytic
except for on the negative real axis.
The identities
.. math::
\exp(\log\Gamma(z)) &= \Gamma(z) \\
\log\Gamma(z + 1) &= \log(z) + \log\Gamma(z)
make `loggamma` useful for working in complex logspace.
On the real line `loggamma` is related to `gammaln` via
``exp(loggamma(x + 0j)) = gammasgn(x)*exp(gammaln(x))``, up to
rounding error.
The implementation here is based on [hare1997]_.
See also
--------
gammaln : logarithm of the absolute value of the gamma function
gammasgn : sign of the gamma function
References
----------
.. [hare1997] D.E.G. Hare,
*Computing the Principal Branch of log-Gamma*,
Journal of Algorithms, Volume 25, Issue 2, November 1997, pages 221-236.
""")
add_newdoc("_sinpi",
"""
Internal function, do not use.
""")
add_newdoc("_cospi",
"""
Internal function, do not use.
""")
add_newdoc("owens_t",
"""
owens_t(h, a)
Owen's T Function.
The function T(h, a) gives the probability of the event
(X > h and 0 < Y < a * X) where X and Y are independent
standard normal random variables.
Parameters
----------
h: array_like
Input value.
a: array_like
Input value.
Returns
-------
t: scalar or ndarray
Probability of the event (X > h and 0 < Y < a * X),
where X and Y are independent standard normal random variables.
Examples
--------
>>> from scipy import special
>>> a = 3.5
>>> h = 0.78
>>> special.owens_t(h, a)
0.10877216734852274
References
----------
.. [1] M. Patefield and D. Tandy, "Fast and accurate calculation of
Owen's T Function", Statistical Software vol. 5, pp. 1-25, 2000.
""")
add_newdoc("_factorial",
"""
Internal function, do not use.
""")
add_newdoc("wright_bessel",
r"""
wright_bessel(a, b, x)
Wright's generalized Bessel function.
Wright's generalized Bessel function is an entire function and defined as
.. math:: \Phi(a, b; x) = \sum_{k=0}^\infty \frac{x^k}{k! \Gamma(a k + b)}
See also [1].
Parameters
----------
a : array_like of float
a >= 0
b : array_like of float
b >= 0
x : array_like of float
x >= 0
Notes
-----
Due to the compexity of the function with its three parameters, only
non-negative arguments are implemented.
Examples
--------
>>> from scipy.special import wright_bessel
>>> a, b, x = 1.5, 1.1, 2.5
>>> wright_bessel(a, b-1, x)
4.5314465939443025
Now, let us verify the relation
.. math:: \Phi(a, b-1; x) = a x \Phi(a, b+a; x) + (b-1) \Phi(a, b; x)
>>> a * x * wright_bessel(a, b+a, x) + (b-1) * wright_bessel(a, b, x)
4.5314465939443025
References
----------
.. [1] Digital Library of Mathematical Functions, 10.46.
https://dlmf.nist.gov/10.46.E1
""")
add_newdoc("ndtri_exp",
r"""
ndtri_exp(y)
Inverse of `log_ndtr` vs x. Allows for greater precision than
`ndtri` composed with `numpy.exp` for very small values of y and for
y close to 0.
Parameters
----------
y : array_like of float
Returns
-------
scalar or ndarray
Inverse of the log CDF of the standard normal distribution, evaluated
at y.
Examples
--------
>>> import scipy.special as sc
`ndtri_exp` agrees with the naive implementation when the latter does
not suffer from underflow.
>>> sc.ndtri_exp(-1)
-0.33747496376420244
>>> sc.ndtri(np.exp(-1))
-0.33747496376420244
For extreme values of y, the naive approach fails
>>> sc.ndtri(np.exp(-800))
-inf
>>> sc.ndtri(np.exp(-1e-20))
inf
whereas `ndtri_exp` is still able to compute the result to high precision.
>>> sc.ndtri_exp(-800)
-39.88469483825668
>>> sc.ndtri_exp(-1e-20)
9.262340089798409
See Also
--------
log_ndtr, ndtri, ndtr
""")
|
tylerjereddy/scipy
|
scipy/special/_add_newdocs.py
|
Python
|
bsd-3-clause
| 258,381
|
[
"Gaussian"
] |
ec39b5cad61b108c247317ab1128a71c02d08fe3110665dc02ef715dbe7fef91
|
import vtk
import matplotlib.cm as cm
import sys
LABEL_NAMES = [\
'olfactory bulb',
'cerebral cortex',
'lateral septal nuclei',
'striatum',
'globus pallidus',
'thalamus',
'hypothalamus',
'hippocampal formation',
'superior colliculus',
'inferior colliculus',
'cerebellum',
'fimbria',
'internal capsule',
'ventricle',
'ventricle',
'corpus callosum',
'subcommissural organ',
'anterior commissure',
'paraflocculus',
'deep mesencephalic nucleus',
'fornix',
'aqueaduct',
'pineal gland',
'substantia nigra',
'brainstem (remainder)',
'pontine gray',
'fasciculus retroflexus',
'amygdala',
'interpeduncular nucleus',
'periacueductal gray',
'nucleus accumbens',
'optic chiasm',
'supraoptic decussation',
'optic tract',
'lateral lemniscus',
'epithalamus',
'mammillary nucleus',
'cochlear nuclei and nerve'
]
def read_intensity_data(filename):
int_data = {}
fp = open(filename, 'r')
lines = fp.readlines()
for line in lines:
splited = line.strip().split(',')
int_data[int(splited[0])] = float(splited[1])
return int_data
def draw_scene(int_filename, color_mode=0, screen_name=None):
###############################################################################
# read polydata file
#
offscreen = False
draw_axes = False
segs = []
segs_mapper = []
segs_actor = []
transforms = []
transforms_filter = []
seg_fileformat = '/mnt/data1/bah2015/seg2/seg%05d.vtk'
#seg_fileformat = '/media/nebula/data/bah/vtk/seg%05d.vtk'
#int_filename = '../matching_area/result.txt'
int_data = read_intensity_data(int_filename)
int_sum = sum(int_data)
int_data_sorted = {}
transform = vtk.vtkTransform()
transform.RotateWXYZ(90, 0, 1, 0)
transformFilter = vtk.vtkTransformPolyDataFilter()
transformFilter.SetTransform(transform)
transformFilter
for i in range(1, 39):
segs.append(vtk.vtkPolyDataReader())
segs[-1].SetFileName(seg_fileformat % i)
transforms.append(vtk.vtkTransform())
#transforms[-1].RotateWXYZ(90., 0, 1, 0)
transforms_filter.append(vtk.vtkTransformPolyDataFilter())
transforms_filter[-1].SetTransform(transforms[-1])
transforms_filter[-1].SetInputConnection(segs[-1].GetOutputPort())
transforms_filter[-1].Update()
segs_mapper.append(vtk.vtkPolyDataMapper())
#segs_mapper[-1].SetInputConnection(segs[-1].GetOutputPort())
segs_mapper[-1].SetInputConnection(transforms_filter[-1].GetOutputPort())
segs_actor.append(vtk.vtkActor())
segs_actor[-1].SetMapper(segs_mapper[-1])
segs_actor[-1].GetProperty().SetOpacity(0.1)
#color = cm.jet(i/39.)
#segs_actor[-1].GetProperty().SetColor(color[0], color[1], color[2])
i = 0
for k, v in sorted(int_data.items(), key=lambda x:x[1], reverse=True):
if i < 9:
segs_actor[k-1].GetProperty().SetOpacity(0.8)
if color_mode == 0:
color = ((v-40) / 20.)
segs_actor[k-1].GetProperty().SetColor(color, 0, 0)
elif color_mode == 1:
color = cm.jet( (v-float(int_sum/40)) / float(int_sum) * 10.)
segs_actor[k-1].GetProperty().SetColor(color[0], color[1], color[2])
print ' Rank %d : %s (%d) = %d' % (i+1, LABEL_NAMES[k-1], k, v)
#print color
i += 1
#int_data_sorted[k] = v
#print 'Lank %5d : %d (%d)' % (i, k, v)
###############################################################################
# draw axis
#
if draw_axes:
axesActor = vtk.vtkAxesActor()
lut = vtk.vtkLookupTable()
lut.Build()
scalar_bar = vtk.vtkScalarBarActor()
scalar_bar.SetLookupTable(lut)
###############################################################################
# prepare rendering
#
ren = vtk.vtkRenderer()
ren.SetBackground(0.0, 0.0, 0.0)
if draw_axes:
ren.AddActor(axesActor)
for seg in segs_actor:
ren.AddActor(seg)
ren.AddActor(scalar_bar)
renWin = vtk.vtkRenderWindow()
if offscreen:
renWin.SetOffScreenRendering(True)
renWin.AddRenderer(ren)
renWin.SetWindowName('Mouse Brain Viewer 2 + (%s)' % screen_name)
renWin.SetSize(1600, 1600)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.Initialize()
renWin.Render()
'''
get_screenshot(renWin, screen_name + '_1.png')
for trans in transforms:
trans.RotateWXYZ(90., 0, 1, 0)
for trans_filter in transforms_filter:
trans_filter.Update()
renWin.Render()
get_screenshot(renWin, screen_name + '_2.png')
for trans in transforms:
trans.RotateWXYZ(90., 0, 1, 0)
for trans_filter in transforms_filter:
trans_filter.Update()
renWin.Render()
get_screenshot(renWin, screen_name + '_3.png')
'''
iren.Start()
def get_screenshot(renWin, filename):
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(renWin)
w2if.Update()
writer = vtk.vtkPNGWriter()
writer.SetFileName(filename)
writer.SetInput(w2if.GetOutput())
writer.Write()
renWin.Render()
if __name__ == '__main__':
argvs = sys.argv
argc = len(argvs)
filename = ''
if(argc >= 2):
filename = argvs[1]
else:
filename = '../matching_area/result.txt'
if(argc >= 3):
color_mode = int(argvs[2])
else:
color_mode = 1
if(argc >= 4):
gene_name = argvs[3]
else:
gene_name = 'GENE_NAME'
print '************ %s ************' % gene_name
draw_scene(filename, color_mode, gene_name)
|
neuroinformatics/bah2015_registration
|
vtk_test/interactive_intensity.py
|
Python
|
mit
| 5,692
|
[
"VTK"
] |
35b9c923028a54deed3686bd93b226b42b0ff88c72fd64dedb52371dd6e3dad8
|
"""Integration test using a simple dummy model definition."""
# Copyright 2016 Andrew Dawson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from os.path import join as pjoin
import shutil
import sys
from tempfile import NamedTemporaryFile, TemporaryDirectory
import unittest
from scmtiles.task import TileTask
from scmtiles.runner import TileRunner, CellResult
from scmtiles.test import _get_test_data_path
class SimpleRunner(TileRunner):
"""
A simple implementation of a `TileRunner` class that doesn't do much
other than write an input file and an output file.
"""
__version__ = '1.0'
def run_cell(self, cell, logger):
"""Run the model for the given cell."""
# Create a run directory and create links to the template.
run_directory = self.create_run_directory()
self.link_template(run_directory)
# Write the input netcdf file.
cell_ds = self.get_cell(cell)
cell_ds.to_netcdf(pjoin(run_directory, 'input.nc'))
# Simulate a model doing somthing by writing an output text file
# consisting of the xarray representation of the input dataset and
# the single required input file.
output_file = pjoin(run_directory, 'output.txt')
with open(output_file, 'w') as fh:
fh.write('{!s}\n'.format(cell_ds))
with open(pjoin(run_directory, 'required.txt'), 'r') as rh:
fh.write(rh.read())
# Archive the run output to the output direcrtory.
outputs = self.archive_output(cell, run_directory)
return CellResult(cell, outputs)
def archive_output(self, cell, run_directory):
"""Archive the model output."""
# Construct an output file path including the run start time and
# the cell's location in the full grid.
time_portion = self.config.start_time.strftime('%Y%m')
grid_portion = 'y{:02d}x{:02d}'.format(cell.y_global, cell.x_global)
archive_file_name = 'output.{!s}.{!s}.txt'.format(time_portion,
grid_portion)
archive_file_path = pjoin(self.config.output_directory,
archive_file_name)
# Move the model output file to the output directory.
shutil.move(pjoin(run_directory, 'output.txt'), archive_file_path)
return [archive_file_path]
#: A string template for an SCM Tiles configuration file, to be filled
#: in using string formatting.
_CONFIG_TEMPLATE = """
[default]
start_time = 2016-01-01T00:00:00
forcing_step_seconds = 1
forcing_num_steps = 1
xname = lon
yname = lat
xsize = 28
ysize = 6
input_directory = {input_directory:s}
template_directory = {template_directory:s}
work_directory = {work_directory:s}
output_directory = {output_directory:s}
input_file_pattern = grid.{{time.year:04d}}{{time.month:02d}}.nc
archive_directory = /dev/null
"""
class Test_SimpleRunner(unittest.TestCase):
def setUp(self):
# Create the run sandbox in the user's home directory.
self.sandbox_dir = TemporaryDirectory(dir=os.path.expanduser('~'),
prefix='scmtiles.test_simple.')
base_path = self.sandbox_dir.name
self.input_directory = pjoin(base_path, 'inputs')
self.template_directory = pjoin(base_path, 'template')
self.work_directory = pjoin(base_path, 'work')
self.output_directory = pjoin(base_path, 'run')
os.mkdir(self.input_directory)
os.mkdir(self.template_directory)
os.mkdir(self.work_directory)
os.mkdir(self.output_directory)
# Symlink the reference input file.
os.symlink(_get_test_data_path('grid.201601.nc'),
pjoin(self.input_directory, 'grid.201601.nc'))
# Create a dummy template.
with open(pjoin(self.template_directory, 'required.txt'), 'w') as f:
f.write('This file is needed for a run.')
def tearDown(self):
# Delete the sandbox directory.
self.sandbox_dir.cleanup()
def write_config_file(self):
# Write a configuration file in /tmp.
config_text = _CONFIG_TEMPLATE.format(
input_directory=self.input_directory,
template_directory=self.template_directory,
work_directory=self.work_directory,
output_directory=self.output_directory,
)
config_file = NamedTemporaryFile(mode='w', buffering=1)
config_file.write(config_text)
return config_file
def verify_run(self):
# Verify that all the expected output files were written, and that
# they contain the expected information.
for y in range(6):
for x in range(28):
filename = pjoin(self.output_directory,
'output.201601.y{:02d}x{:02d}.txt'
''.format(y, x))
# The expected output file must exist.
self.assertTrue(os.path.exists(filename))
# The file should contain a dataset description and the line
# from required.txt. Check that the number of lines is > 2,
# which is a crude way of checking there is something else in
# the file other than the contents of required.txt. Then check
# That the specific text from required.txt is present.
with open(filename, 'r') as fh:
lines = fh.readlines()
self.assertTrue(len(lines) > 2)
self.assertTrue('This file is needed for a run.' in lines)
def test_rows(self):
config_file = self.write_config_file()
cliargs = ['simple', config_file.name]
task = TileTask(SimpleRunner, decompose_mode='rows',
logname='all_rows')
# Catch both stdout and stderr while doing this.
task.initialize(cliargs=cliargs)
task.run()
status = task.finalize()
# Close the config file object which deletes it.
config_file.close()
self.assertEqual(status, 0)
self.verify_run()
def test_cells(self):
config_file = self.write_config_file()
cliargs = ['simple', config_file.name]
task = TileTask(SimpleRunner, decompose_mode='cells',
logname='all_cells')
# Catch both stdout and stderr while doing this.
task.initialize(cliargs=cliargs)
task.run()
status = task.finalize()
# Close the config file object which deletes it.
config_file.close()
self.assertEqual(status, 0)
self.verify_run()
|
aopp-pred/scmtiles
|
scmtiles/test/integration/test_simple.py
|
Python
|
apache-2.0
| 7,178
|
[
"NetCDF"
] |
9c536f194956b2a611205a8cf6a74fcb26b67ab54f5127f914ba1ea874c2b551
|
""" AlwaysDegradedPolicy module
"""
from DIRAC import S_OK
from DIRAC.ResourceStatusSystem.PolicySystem.PolicyBase import PolicyBase
class AlwaysDegradedPolicy(PolicyBase):
"""
The AlwaysDegradedPolicy is a dummy module that can be used as example, it
always returns Degraded status.
"""
@staticmethod
def _evaluate(commandResult):
"""
It returns Degraded status, evaluates the default command, but its output
is completely ignored.
"""
policyResult = {"Status": "Degraded", "Reason": "AlwaysDegraded"}
return S_OK(policyResult)
|
ic-hep/DIRAC
|
src/DIRAC/ResourceStatusSystem/Policy/AlwaysDegradedPolicy.py
|
Python
|
gpl-3.0
| 606
|
[
"DIRAC"
] |
f47bdddd6117cf2d020a5ffa8c6089be671dc62b940df6193f1d588d95a6f288
|
import os
import time
import yaml
import json
import datetime
import numpy as np
import pandas as pd
from time import strftime
from pprint import pprint
import matplotlib.pyplot as plt
from diffpy.Structure import loadStructure
from diffpy.Structure import StructureFormatError
from diffpy.srreal.structureadapter import nosymmetry
from diffpy.srreal.pdfcalculator import DebyePDFCalculator
from diffpy.srreal.pdfcalculator import PDFCalculator
from pymatgen.io.cif import CifParser
from pymatgen.analysis.diffraction.xrd import XRDCalculator
# pymatgen syntax
"""
struc = CifParser(<fp>).get_structures().pop()
meta_tuple = struc.lattice.abc + aa.lattice.angles
volume = struc.volume
spacegroup_info = struc.get_space_group_info()
element_info = struc.species # a list
"""
#from pdf_lib.glbl import glbl
from .glbl import glbl
def _makedirs(path_name):
'''function to support python2 stupid logic'''
if os.path.isdir(path_name):
pass
else:
os.makedirs(path_name)
def _timestampstr(timestamp):
''' convert timestamp to strftime formate '''
timestring = datetime.datetime.fromtimestamp(\
float(timestamp)).strftime('%Y%m%d-%H%M')
return timestring
def find_nearest(std_array, val):
"""function to find the index of nearest value"""
idx = (np.abs(std_array-val)).argmin()
return idx
def theta2q(theta, wavelength):
"""transform from 2theta to Q(A^-1)"""
_theta = theta.astype(float)
rad = np.deg2rad(_theta)
q = 4*np.pi/wavelength*np.sin(rad/2)
return q
def assign_nearest(std_array, q_array, iq_val):
"""assign value to nearest grid"""
idx_list = []
interp_iq = np.zeros_like(std_array)
for val in q_array:
idx_list.append(find_nearest(std_array, val))
interp_iq[idx_list]=iq_val
return interp_iq
class PDFLibBuilder:
''' a class that loads in .cif in given directory and compute
corresponding learning lib
Features will be computed are:
1. (a, b ,c, alpha, beta, gamma)
2. chemical composition
3. RDF (unormalized)
4. XRD peak positions + intensity
Label will be computed are:
1.SpaceGroup Label + space group order
2.volume of unit cell
Parameters:
-----------
input_dir : str
optional. path to where you stores cif files. default is current
directory.
'''
def __init__(self, input_dir=None):
# set up API_key
if input_dir is None:
input_dir = os.getcwd()
print('=== Input dir set to {}, change it if needed ==='
.format(input_dir))
self.input_dir = input_dir
#get the stem
full_path = os.path.abspath(input_dir)
stem, tail = os.path.split(full_path)
self.stem = stem
self.output_dir = None # overwrite it later
# diffpy
self.r_grid = None
self.gr_array = None
self.rdf_array = None
self.density_list = []
# pymatgen
self.xrd_array = None
self.std_q = None
self.fail_list = None
self.calculate_params = {}
cif_list = sorted([ f for f in os.listdir(self.input_dir) if
f.endswith('.cif')])
print("INFO: there are {} structures in input_dir"
.format(len(cif_list)))
self.cif_list = cif_list
def learninglib_build(self, output_dir=None, pdfcal_cfg=None,
rdf=True, xrd=False, Bisoequiv=0.1,
rstep=None, DebyeCal=False, nosymmetry=False,
tth_range=None, wavelength=0.5):
""" method to build learning lib with diffpy based on path
of cif library. Paramters of G(r) calculation are set
via glbl.<attribute>. "PDFCal_config.txt" file with PDFCalculator
configuration will also be output
Parameters
----------
pdfcal_cfg : dict, optional
configuration of PDF calculator, default is the one defined
inside glbl class.
rdf : bool, optional
option to compute RDF or not. default to True, if not,
compute pdf
xrd : bool, optional
option to compute XRD (which is slow). default to False.
Bisoequiv : float, optional
value of isotropic thermal parameter. default is 0.1.
scientific equation: Biso = 8 (pi**2) Uiso
rstep : float, optioanl
space of PDF. default is pi/qmax.
DebyeCal : bool, optional
option to use Debye calculator. default is False.
nosymmetry : bool, optional
DEPRECATED for now. option to apply no symmetry.
default is False.
tth_range : ndarray, optional
range of 2theta. default is [0:0.1:90]
wavelength : float, optional
wavelength in angstroms, default to 0.5 A which corresponds
to Qmax ~= 17
"""
# setup output dir
timestr = _timestampstr(time.time())
if output_dir is None:
tail = "LearningLib_{}".format(timestr)
output_dir = os.path.join(os.getcwd(), tail)
print('=== output dir would be {} ==='.format(output_dir))
self.output_dir = output_dir
if tth_range is None:
self.tth_range = np.arange(0, 90, 0.1)
self.wavelength = wavelength
self.std_q = theta2q(self.tth_range, self.wavelength)
####### configure pymatgen XRD calculator #####
# instantiate calculators
xrd_cal = XRDCalculator()
xrd_cal.wavelength = self.wavelength
xrd_cal.TWO_THETA_TOL = 10**-2
self.calculate_params.update({'xrd_wavelength':
xrd_cal.wavelength})
xrd_list = []
sg_list = []
# (a,b,c, alpha, beta, gamma, volume)
structure_list_1 = [] # primative cell
structure_list_2 = [] # ordinary cell
# chemical element
composition_list_1 = [] # primative cell
composition_list_2 = [] # ordinary cell
fail_list = []
####### configure diffpy PDF calculator ######
if DebyeCal:
cal = DebyePDFCalculator()
self.calculator_type = 'Debye'
cal = PDFCalculator()
self.calculator = cal
self.calculator_type = 'PDF'
self.calculate_params.update({'calculator_type':
self.calculator_type})
# setup calculator parameters
if rstep is None:
rstep = glbl.rstep
self.rstep = rstep
self.calculator.rstep = rstep # annoying fact
self.calculate_params.update({'rstep':rstep})
if pdfcal_cfg is None:
self.pdfcal_cfg = glbl.cfg
self.calculate_params.update(self.pdfcal_cfg)
# configure calculator
for k,v in self.pdfcal_cfg.items():
setattr(self.calculator, k, v)
# empty list to store results
gr_list = []
rdf_list = []
print("====== INFO: calculation parameters:====\n{}"
.format(self.calculate_params))
struc_df = pd.DataFrame()
############# loop through cifs #################
for cif in sorted(self.cif_list):
_cif = os.path.join(self.input_dir, cif)
try:
# diffpy structure
struc = loadStructure(_cif)
struc.Bisoequiv = Bisoequiv
## calculate PDF/RDF with diffpy ##
if nosymmetry:
struc = nosymmetry(struc)
cal.setStructure(struc)
cal.eval()
# pymatge structure
struc_meta = CifParser(_cif)
## calculate XRD with pymatgen ##
if xrd:
xrd = xrd_cal.get_xrd_data(struc_meta\
.get_structures(False).pop())
_xrd = np.asarray(xrd)[:,:2]
q, iq = _xrd.T
interp_q = assign_nearest(self.std_q, q, iq)
xrd_list.append(interp_q)
else:
pass
## test space group info ##
_sg = struc_meta.get_structures(False).pop()\
.get_space_group_info()
except:
print("{} fail".format(_cif))
fail_list.append(cif)
else:
# no error for both pymatgen and diffpy
gr_list.append(cal.pdf)
rdf_list.append(cal.rdf)
self.density_list.append(cal.slope)
print('=== Finished evaluating PDF from structure {} ==='
.format(cif))
## update features ##
flag = ['primitive', 'ordinary']
option = [True, False]
compo_list = [composition_list_1, composition_list_2]
struc_fields = ['a','b','c','alpha','beta','gamma', 'volume']
for f, op, compo in zip(flag, option, compo_list):
rv_dict = {}
struc = struc_meta.get_structures(op).pop()
a, b, c = struc.lattice.abc
aa, bb, cc = struc.lattice.angles
volume = struc.volume
for k, v in zip(struc_fields,
[a, b, c, aa, bb, cc, volume]):
rv_dict.update({"{}_{}".format(f, k) : v})
compo.append(struc.composition.as_dict())
struc_df = struc_df.append(rv_dict,
ignore_index=True)
# sg info, use the ordinary setup
sg_list.append(struc.get_space_group_info())
print('=== Finished evaluating XRD from structure {} ==='
.format(cif))
# finally, store crucial calculation results as attributes
self.r_grid = cal.rgrid
#4*pi * r^2 * rho(r) = R(r) -> RDF to density
self.gr_array = np.asarray(gr_list)/4/np.pi/self.r_grid**2
self.rdf_array = np.asarray(gr_list)
self.density_list = np.asarray(self.density_list)
self.xrd_info = np.asarray(xrd_list)
self.sg_list = sg_list
# 1 -> primitive , 2 -> ordinary
self.composition_list_1 = composition_list_1
self.composition_list_2 = composition_list_2
self.struc_df = struc_df
self.fail_list = fail_list
def save_data(self):
""" a method to save outputs """
output_dir = self.output_dir
_makedirs(output_dir)
# save config of calculator
with open(os.path.join(output_dir, \
'learninglib_config.txt'), 'w') as f:
para_dict = dict(self.calculate_params)
f.write(str(para_dict))
# save gr, r, composition and fail list
gr_array_name = 'Gr'
gr_array_w_name = os.path.join(output_dir, gr_array_name)
np.save(gr_array_w_name, self.gr_array)
# rgrid
r_grid_name ='rgrid'
r_grid_w_name = os.path.join(output_dir, r_grid_name)
np.save(r_grid_w_name, self.r_grid)
# std_q
q_grid_name = 'qgrid'
q_grid_w_name = os.path.join(output_dir, q_grid_name)
np.save(q_grid_w_name, self.std_q)
# density_list
f_name = 'density'
w_name = os.path.join(output_dir, f_name)
np.save(w_name, self.density_list)
# sg_list
sg_list_name = 'sg_list.json'
sg_list_w_name = os.path.join(output_dir, sg_list_name)
with open(sg_list_w_name, 'w') as f:
json.dump(self.sg_list, f)
# xrd_info
xrd_list_name = 'xrd_info'
xrd_w_name = os.path.join(output_dir, xrd_list_name)
np.save(xrd_w_name, self.xrd_info)
#TODO: simplify saving code
# composition
fn_stem_list = ['primitive', 'ordinary']
for ind, compo in enumerate([self.composition_list_1,
self.composition_list_2]):
f_name = "{}_composition_list.json".format(fn_stem_list[ind])
w_name = os.path.join(output_dir,f_name)
if compo:
print('INFO: saving {}'.format(w_name))
with open(w_name, 'w') as f:
json.dump(compo, f)
else:
raise RuntimeError("{} is empty".format(f_name))
# structure_meta
f_name = "struc_df.json"
w_name = os.path.join(output_dir, f_name)
self.struc_df.to_json(w_name)
# fail_list
f_name = "fail_list.json"
w_name = os.path.join(output_dir,f_name)
print('INFO: saving {}'.format(w_name))
with open(w_name, 'w') as f:
json.dump(meta, f)
print("======== SUMMARY ======== ")
print("Number of fature calculated is {}"
.format(np.shape(self.gr_array)[0]))
|
chiahaoliu/pdf_lib
|
pdf_lib/calculate.py
|
Python
|
mit
| 13,026
|
[
"pymatgen"
] |
8dd8e8ea8d711f6999ee21b0a515958e7cfd0dde906721607995339e765aa71d
|
#!/usr/bin/python
# coding=utf-8
from wikitools import wiki, wikifile
""" Because the Veekun sprite rips of gen1/2 are non-transparent, they must be downloaded from Bulbapedia instead """
def main():
site = wiki.Wiki("http://bulbapedia.bulbagarden.net/w/api.php")
img_root = "/var/projects/namerater/assets/pokemon/"
for num in xrange(1,252): #252
dex = ("00"+str(num))[-3:]
print num
# Gen 1
if num <= 151:
# R/G front
f = wikifile.File(site, "File:Spr_1g_"+dex+".png")
f.download(location=img_root+"red-green/"+str(num)+".png")
# R/G back
f = wikifile.File(site, "File:Spr_b_g1_"+dex+".png")
f.download(location=img_root+"red-green/back/"+str(num)+".png")
# R/B front
f = wikifile.File(site, "File:Spr_1b_"+dex+".png")
f.download(location=img_root+"red-blue/"+str(num)+".png")
# R/B back
f = wikifile.File(site, "File:Spr_b_g1_"+dex+".png")
f.download(location=img_root+"red-blue/back/"+str(num)+".png")
# Y front
f = wikifile.File(site, "File:Spr_1y_"+dex+".png")
f.download(location=img_root+"yellow/"+str(num)+".png")
# Y back
f = wikifile.File(site, "File:Spr_b_g1_"+dex+".png")
f.download(location=img_root+"yellow/back/"+str(num)+".png")
# Gen 2
# Gold front
f = wikifile.File(site, "File:Spr_2g_"+dex+".png")
f.download(location=img_root+"gold/"+str(num)+".png")
# Gold back
f = wikifile.File(site, "File:Spr_b_2g_"+dex+".png")
f.download(location=img_root+"gold/back/"+str(num)+".png")
# Gold shiny front
f = wikifile.File(site, "File:Spr_2g_"+dex+"_s.png")
f.download(location=img_root+"gold/shiny/"+str(num)+".png")
# Gold shiny back
f = wikifile.File(site, "File:Spr_b_2g_"+dex+"_s.png")
f.download(location=img_root+"gold/back/shiny/"+str(num)+".png")
# Silver front
f = wikifile.File(site, "File:Spr_2s_"+dex+".png")
f.download(location=img_root+"silver/"+str(num)+".png")
# Silver back
f = wikifile.File(site, "File:Spr_b_2g_"+dex+".png")
f.download(location=img_root+"silver/back/"+str(num)+".png")
# Silver shiny front
f = wikifile.File(site, "File:Spr_2s_"+dex+"_s.png")
f.download(location=img_root+"silver/shiny/"+str(num)+".png")
# Silver shiny back
f = wikifile.File(site, "File:Spr_b_2g_"+dex+"_s.png")
f.download(location=img_root+"silver/back/shiny/"+str(num)+".png")
# Crystal front
f = wikifile.File(site, "File:Spr_2c_"+dex+".gif")
f.download(location=img_root+"crystal/"+str(num)+".png")
# Crystal back
f = wikifile.File(site, "File:Spr_b_2c_"+dex+".png")
f.download(location=img_root+"crystal/back/"+str(num)+".png")
# Crystal shiny front
f = wikifile.File(site, "File:Spr_2c_"+dex+"_s.gif")
f.download(location=img_root+"crystal/shiny/"+str(num)+".png")
# Crystal shiny back
f = wikifile.File(site, "File:Spr_b_2g_"+dex+"_s.png")
f.download(location=img_root+"crystal/back/shiny/"+str(num)+".png")
return
if __name__ == "__main__": main()
|
DrDos0016/namerater
|
tools/download_gen1-2.py
|
Python
|
mit
| 3,435
|
[
"CRYSTAL"
] |
6a271f4b8adaa34fa245897935d9ba114e6ad9034689fa9d05a6078c1b4e92a8
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
# !! This is the configuration of Nikola. !! #
# !! You should edit it to your liking. !! #
# ! Some settings can be different in different languages.
# ! A comment stating (translatable) is used to denote those.
# ! There are two ways to specify a translatable setting:
# ! (a) BLOG_TITLE = "My Blog"
# ! (b) BLOG_TITLE = {"en": "My Blog", "es": "Mi Blog"}
# ! Option (a) is used when you don't want that setting translated.
# ! Option (b) is used for settings that are different in different languages.
# Data about this site
BLOG_AUTHOR = "北京微纳精密机械" # (translatable)
BLOG_TITLE = "北京微纳精密机械" # (translatable)
BLOG_EMAIL = "test@bjwnjm.com"
BLOG_DESCRIPTION = "北京微纳精密机械有限公司" # (translatable)
# This is the main URL for your site. It will be used
# in a prominent link. Don't forget the protocol (http/https)!
SITE_URL = "http://bjwnjm.github.io/"
# This is the URL where Nikola's output will be deployed.
# If not set, defaults to SITE_URL
# BASE_URL = "http://"
# What is the default language?
DEFAULT_LANG = "en"
# What other languages do you have?
# The format is {"translationcode" : "path/to/translation" }
# the path will be used as a prefix for the generated pages location
TRANSLATIONS = {
DEFAULT_LANG: "",
# Example for another language:
# "es": "./es",
}
# What will translated input files be named like?
# If you have a page something.rst, then something.pl.rst will be considered
# its Polish translation.
# (in the above example: path == "something", ext == "rst", lang == "pl")
# this pattern is also used for metadata:
# something.meta -> something.pl.meta
TRANSLATIONS_PATTERN = "{path}.{lang}.{ext}"
# Links for the sidebar / navigation bar. (translatable)
# This is a dict. The keys are languages, and values are tuples.
NAVIGATION_LINKS = {
DEFAULT_LANG: (
(
(
("/intro/about/", "企业简介"),
("/intro/reword/", "荣誉资质"),
("/intro/partner/", "合作伙伴"),
), "公司概况"
),
(
(
("/news/", "公司动态"),
("/news/", "行业资讯"),
("/news/", "通知公告"),
), "企业新闻"
),
(
(
("/product/cnc_machine/", "数控加工机床"),
("/product/measuring_instrument/", "测量仪"),
("/product/unit_components/", "机床单元部件"),
), "产品中心"
),
(
(
("/solution/flat_components/", "平面元件加工"),
("/solution/car_piston/", "汽车活塞加工"),
), "市场解决方案"
),
(
(
("/service/complaints/", "留言和投诉"),
("/service/customer_service/", "联系客服"),
), "售后服务"
),
(
(
("/research/scientific_research_achievements/", "科研成果展示"),
("/research/academic_exchange/", "学术交流"),
), "科研和交流"
),
("/recruit/", "人才招聘"),
("/contact/", "联系我们"),
),
}
# POSTS and PAGES contains (wildcard, destination, template) tuples.
POSTS = (
# 2 企业新闻
("posts/*.rst", "news", "post.tmpl"),
("posts/*.md", "news", "post.tmpl"),
("posts/*.txt", "news", "post.tmpl"),
("posts/*.html", "news", "post.tmpl"),
("posts/*.org", "news", "post.tmpl"),
)
# And to avoid a conflict because blogs try to generate /index.html
INDEX_PATH = "news"
PAGES = (
# index page (homepage)
("pages/index.html", "", "page.tmpl"),
# 1 公司概况
("intro/about.org", "intro", "intro.tmpl"),
("intro/reword.org", "intro", "intro.tmpl"),
("intro/partner.org", "intro", "intro.tmpl"),
# 3 产品中心
("product/*.org", "product", "page.tmpl"),
# 4 市场解决方案
("solution/*.org", "solution", "page.tmpl"),
# 5 售后服务
("service/*.org", "service", "page.tmpl"),
# 6 科研和交流
("research/*.org", "research", "page.tmpl"),
# 7 人才招聘
("pages/recruit.org", "", "page.tmpl"),
# 8 联系我们
("pages/contact.org", "", "page.tmpl"),
# other pages
("pages/*.rst", "pages", "page.tmpl"),
("pages/*.md", "pages", "page.tmpl"),
("pages/*.txt", "pages", "page.tmpl"),
("pages/*.html", "pages", "page.tmpl"),
("pages/*.org", "pages", "page.tmpl"),
# archives
("archive/*.rst", "archives", "page.tmpl"),
("archive/*.md", "archives", "page.tmpl"),
("archive/*.txt", "archives", "page.tmpl"),
("archive/*.html", "archives", "page.tmpl"),
("archive/*.org", "archives", "page.tmpl"),
)
# Name of the theme to use.
#THEME = "bootstrap3"
THEME = "cerulean"
# Primary color of your theme. This will be used to customize your theme and
# auto-generate related colors in POSTS_SECTION_COLORS. Must be a HEX value.
THEME_COLOR = '#5670d4'
#THEME_COLOR = '#1d58a5'
# Below this point, everything is optional
# Post's dates are considered in UTC by default, if you want to use
# another time zone, please set TIMEZONE to match. Check the available
# list from Wikipedia:
# https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# (e.g. 'Europe/Zurich')
# Also, if you want to use a different time zone in some of your posts,
# you can use the ISO 8601/RFC 3339 format (ex. 2012-03-30T23:00:00+02:00)
TIMEZONE = "Asia/Shanghai"
# If you want to use ISO 8601 (also valid RFC 3339) throughout Nikola
# (especially in new_post), set this to True.
# Note that this does not affect DATE_FORMAT.
# FORCE_ISO8601 = False
# Date format used to display post dates. (translatable)
# (str used by datetime.datetime.strftime)
# DATE_FORMAT = '%Y-%m-%d %H:%M'
# Date format used to display post dates, if local dates are used. (translatable)
# (str used by moment.js)
# JS_DATE_FORMAT = 'YYYY-MM-DD HH:mm'
# Date fanciness.
#
# 0 = using DATE_FORMAT and TIMEZONE
# 1 = using JS_DATE_FORMAT and local user time (via moment.js)
# 2 = using a string like “2 days ago”
#
# Your theme must support it, bootstrap and bootstrap3 already do.
# DATE_FANCINESS = 0
# While Nikola can select a sensible locale for each language,
# sometimes explicit control can come handy.
# In this file we express locales in the string form that
# python's locales will accept in your OS, by example
# "en_US.utf8" in Unix-like OS, "English_United States" in Windows.
# LOCALES = dict mapping language --> explicit locale for the languages
# in TRANSLATIONS. You can omit one or more keys.
# LOCALE_FALLBACK = locale to use when an explicit locale is unavailable
# LOCALE_DEFAULT = locale to use for languages not mentioned in LOCALES; if
# not set the default Nikola mapping is used.
# LOCALES = {}
# LOCALE_FALLBACK = None
# LOCALE_DEFAULT = None
# One or more folders containing files to be copied as-is into the output.
# The format is a dictionary of {source: relative destination}.
# Default is:
# FILES_FOLDERS = {'files': ''}
# Which means copy 'files' into 'output'
# One or more folders containing code listings to be processed and published on
# the site. The format is a dictionary of {source: relative destination}.
# Default is:
# LISTINGS_FOLDERS = {'listings': 'listings'}
# Which means process listings from 'listings' into 'output/listings'
# A mapping of languages to file-extensions that represent that language.
# Feel free to add or delete extensions to any list, but don't add any new
# compilers unless you write the interface for it yourself.
#
# 'rest' is reStructuredText
# 'markdown' is Markdown
# 'html' assumes the file is HTML and just copies it
COMPILERS = {
"rest": ('.rst', '.txt'),
"markdown": ('.md', '.mdown', '.markdown'),
"textile": ('.textile',),
"txt2tags": ('.t2t',),
"bbcode": ('.bb',),
"wiki": ('.wiki',),
"ipynb": ('.ipynb',),
"html": ('.html', '.htm'),
# PHP files are rendered the usual way (i.e. with the full templates).
# The resulting files have .php extensions, making it possible to run
# them without reconfiguring your server to recognize them.
"php": ('.php',),
# Pandoc detects the input from the source filename
# but is disabled by default as it would conflict
# with many of the others.
# "pandoc": ('.rst', '.md', '.txt'),
"orgmode": ('.org',),
}
# Create by default posts in one file format?
# Set to False for two-file posts, with separate metadata.
# ONE_FILE_POSTS = True
# Preferred metadata format for new posts
# "Nikola": reST comments wrapped in a comment if needed (default)
# "YAML": YAML wrapped in "---"
# "TOML": TOML wrapped in "+++"
# "Pelican": Native markdown metadata or reST docinfo fields. Nikola style for other formats.
# METADATA_FORMAT = "Nikola"
# Use date-based path when creating posts?
# Can be enabled on a per-post basis with `nikola new_post -d`.
# The setting is ignored when creating pages (`-d` still works).
# NEW_POST_DATE_PATH = False
# What format to use when creating posts with date paths?
# Default is '%Y/%m/%d', other possibilities include '%Y' or '%Y/%m'.
# NEW_POST_DATE_PATH_FORMAT = '%Y/%m/%d'
# If this is set to True, the DEFAULT_LANG version will be displayed for
# untranslated posts.
# If this is set to False, then posts that are not translated to a language
# LANG will not be visible at all in the pages in that language.
# Formerly known as HIDE_UNTRANSLATED_POSTS (inverse)
# SHOW_UNTRANSLATED_POSTS = True
# Nikola supports logo display. If you have one, you can put the URL here.
# Final output is <img src="LOGO_URL" id="logo" alt="BLOG_TITLE">.
# The URL may be relative to the site root.
# LOGO_URL = ''
# If you want to hide the title of your website (for example, if your logo
# already contains the text), set this to False.
SHOW_BLOG_TITLE = False
# Writes tag cloud data in form of tag_cloud_data.json.
# Warning: this option will change its default value to False in v8!
WRITE_TAG_CLOUD = True
# Generate pages for each section. The site must have at least two sections
# for this option to take effect. It wouldn't build for just one section.
POSTS_SECTIONS = True
# Setting this to False generates a list page instead of an index. Indexes
# are the default and will apply GENERATE_ATOM if set.
# POSTS_SECTIONS_ARE_INDEXES = True
# Final locations are:
# output / TRANSLATION[lang] / SECTION_PATH / SECTION_NAME / index.html (list of posts for a section)
# output / TRANSLATION[lang] / SECTION_PATH / SECTION_NAME / rss.xml (RSS feed for a section)
# (translatable)
# SECTION_PATH = ""
# Each post and section page will have an associated color that can be used
# to style them with a recognizable color detail across your site. A color
# is assigned to each section based on shifting the hue of your THEME_COLOR
# at least 7.5 % while leaving the lightness and saturation untouched in the
# HUSL colorspace. You can overwrite colors by assigning them colors in HEX.
# POSTS_SECTION_COLORS = {
# DEFAULT_LANG: {
# 'posts': '#49b11bf',
# 'reviews': '#ffe200',
# },
# }
# Associate a description with a section. For use in meta description on
# section index pages or elsewhere in themes.
# POSTS_SECTION_DESCRIPTIONS = {
# DEFAULT_LANG: {
# 'how-to': 'Learn how-to things properly with these amazing tutorials.',
# },
# }
# Sections are determined by their output directory as set in POSTS by default,
# but can alternatively be determined from file metadata instead.
# POSTS_SECTION_FROM_META = False
# Names are determined from the output directory name automatically or the
# metadata label. Unless overwritten below, names will use title cased and
# hyphens replaced by spaces.
# POSTS_SECTION_NAME = {
# DEFAULT_LANG: {
# 'posts': 'Blog Posts',
# 'uncategorized': 'Odds and Ends',
# },
# }
# Titles for per-section index pages. Can be either one string where "{name}"
# is substituted or the POSTS_SECTION_NAME, or a dict of sections. Note
# that the INDEX_PAGES option is also applied to section page titles.
# POSTS_SECTION_TITLE = {
# DEFAULT_LANG: {
# 'how-to': 'How-to and Tutorials',
# },
# }
# A list of dictionaries specifying sections which translate to each other.
# For example:
# [
# {'en': 'private', 'de': 'Privat'},
# {'en': 'work', 'fr': 'travail', 'de': 'Arbeit'},
# ]
# POSTS_SECTION_TRANSLATIONS = []
# If set to True, a section in a language will be treated as a translation
# of the literally same section in all other languages. Enable this if you
# do not translate sections, for example.
# POSTS_SECTION_TRANSLATIONS_ADD_DEFAULTS = True
# Paths for different autogenerated bits. These are combined with the
# translation paths.
# Final locations are:
# output / TRANSLATION[lang] / TAG_PATH / index.html (list of tags)
# output / TRANSLATION[lang] / TAG_PATH / tag.html (list of posts for a tag)
# output / TRANSLATION[lang] / TAG_PATH / tag.xml (RSS feed for a tag)
# (translatable)
# TAG_PATH = "categories"
# By default, the list of tags is stored in
# output / TRANSLATION[lang] / TAG_PATH / index.html
# (see explanation for TAG_PATH). This location can be changed to
# output / TRANSLATION[lang] / TAGS_INDEX_PATH
# with an arbitrary relative path TAGS_INDEX_PATH.
# (translatable)
# TAGS_INDEX_PATH = "tags.html"
# If TAG_PAGES_ARE_INDEXES is set to True, each tag's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# TAG_PAGES_ARE_INDEXES = False
# Set descriptions for tag pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the tag list or index page’s title.
# TAG_PAGES_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "blogging": "Meta-blog posts about blogging about blogging.",
# "open source": "My contributions to my many, varied, ever-changing, and eternal libre software projects."
# },
# }
# Set special titles for tag pages. The default is "Posts about TAG".
# TAG_PAGES_TITLES = {
# DEFAULT_LANG: {
# "blogging": "Meta-posts about blogging",
# "open source": "Posts about open source software"
# },
# }
# If you do not want to display a tag publicly, you can mark it as hidden.
# The tag will not be displayed on the tag list page, the tag cloud and posts.
# Tag pages will still be generated.
HIDDEN_TAGS = ['mathjax']
# Only include tags on the tag list/overview page if there are at least
# TAGLIST_MINIMUM_POSTS number of posts or more with every tag. Every tag
# page is still generated, linked from posts, and included in the sitemap.
# However, more obscure tags can be hidden from the tag index page.
# TAGLIST_MINIMUM_POSTS = 1
# A list of dictionaries specifying tags which translate to each other.
# Format: a list of dicts {language: translation, language2: translation2, …}
# See POSTS_SECTION_TRANSLATIONS example above.
# TAG_TRANSLATIONS = []
# If set to True, a tag in a language will be treated as a translation
# of the literally same tag in all other languages. Enable this if you
# do not translate tags, for example.
# TAG_TRANSLATIONS_ADD_DEFAULTS = True
# Final locations are:
# output / TRANSLATION[lang] / CATEGORY_PATH / index.html (list of categories)
# output / TRANSLATION[lang] / CATEGORY_PATH / CATEGORY_PREFIX category.html (list of posts for a category)
# output / TRANSLATION[lang] / CATEGORY_PATH / CATEGORY_PREFIX category.xml (RSS feed for a category)
# (translatable)
# CATEGORY_PATH = "categories"
# CATEGORY_PREFIX = "cat_"
# By default, the list of categories is stored in
# output / TRANSLATION[lang] / CATEGORY_PATH / index.html
# (see explanation for CATEGORY_PATH). This location can be changed to
# output / TRANSLATION[lang] / CATEGORIES_INDEX_PATH
# with an arbitrary relative path CATEGORIES_INDEX_PATH.
# (translatable)
# CATEGORIES_INDEX_PATH = "categories.html"
# If CATEGORY_ALLOW_HIERARCHIES is set to True, categories can be organized in
# hierarchies. For a post, the whole path in the hierarchy must be specified,
# using a forward slash ('/') to separate paths. Use a backslash ('\') to escape
# a forward slash or a backslash (i.e. '\//\\' is a path specifying the
# subcategory called '\' of the top-level category called '/').
CATEGORY_ALLOW_HIERARCHIES = False
# If CATEGORY_OUTPUT_FLAT_HIERARCHY is set to True, the output written to output
# contains only the name of the leaf category and not the whole path.
CATEGORY_OUTPUT_FLAT_HIERARCHY = False
# If CATEGORY_PAGES_ARE_INDEXES is set to True, each category's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# CATEGORY_PAGES_ARE_INDEXES = False
# Set descriptions for category pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the category list or index page’s title.
# CATEGORY_PAGES_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "blogging": "Meta-blog posts about blogging about blogging.",
# "open source": "My contributions to my many, varied, ever-changing, and eternal libre software projects."
# },
# }
# Set special titles for category pages. The default is "Posts about CATEGORY".
# CATEGORY_PAGES_TITLES = {
# DEFAULT_LANG: {
# "blogging": "Meta-posts about blogging",
# "open source": "Posts about open source software"
# },
# }
# If you do not want to display a category publicly, you can mark it as hidden.
# The category will not be displayed on the category list page.
# Category pages will still be generated.
HIDDEN_CATEGORIES = []
# A list of dictionaries specifying categories which translate to each other.
# Format: a list of dicts {language: translation, language2: translation2, …}
# See POSTS_SECTION_TRANSLATIONS example above.
# CATEGORY_TRANSLATIONS = []
# If set to True, a category in a language will be treated as a translation
# of the literally same category in all other languages. Enable this if you
# do not translate categories, for example.
# CATEGORY_TRANSLATIONS_ADD_DEFAULTS = True
# If ENABLE_AUTHOR_PAGES is set to True and there is more than one
# author, author pages are generated.
# ENABLE_AUTHOR_PAGES = True
# Path to author pages. Final locations are:
# output / TRANSLATION[lang] / AUTHOR_PATH / index.html (list of authors)
# output / TRANSLATION[lang] / AUTHOR_PATH / author.html (list of posts by an author)
# output / TRANSLATION[lang] / AUTHOR_PATH / author.xml (RSS feed for an author)
# (translatable)
# AUTHOR_PATH = "authors"
# If AUTHOR_PAGES_ARE_INDEXES is set to True, each author's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# AUTHOR_PAGES_ARE_INDEXES = False
# Set descriptions for author pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the author list or index page’s title.
# AUTHOR_PAGES_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "Juanjo Conti": "Python coder and writer.",
# "Roberto Alsina": "Nikola father."
# },
# }
# If you do not want to display an author publicly, you can mark it as hidden.
# The author will not be displayed on the author list page and posts.
# Tag pages will still be generated.
HIDDEN_AUTHORS = ['Guest']
# Final location for the main blog page and sibling paginated pages is
# output / TRANSLATION[lang] / INDEX_PATH / index-*.html
# (translatable)
# INDEX_PATH = ""
# Optional HTML that displayed on “main” blog index.html files.
# May be used for a greeting. (translatable)
FRONT_INDEX_HEADER = {
DEFAULT_LANG: ''
}
# Create per-month archives instead of per-year
# CREATE_MONTHLY_ARCHIVE = False
# Create one large archive instead of per-year
# CREATE_SINGLE_ARCHIVE = False
# Create year, month, and day archives each with a (long) list of posts
# (overrides both CREATE_MONTHLY_ARCHIVE and CREATE_SINGLE_ARCHIVE)
# CREATE_FULL_ARCHIVES = False
# If monthly archives or full archives are created, adds also one archive per day
# CREATE_DAILY_ARCHIVE = False
# Create previous, up, next navigation links for archives
# CREATE_ARCHIVE_NAVIGATION = False
# Final locations for the archives are:
# output / TRANSLATION[lang] / ARCHIVE_PATH / ARCHIVE_FILENAME
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / DAY / index.html
# ARCHIVE_PATH = ""
# ARCHIVE_FILENAME = "archive.html"
# If ARCHIVES_ARE_INDEXES is set to True, each archive page which contains a list
# of posts will contain the posts themselves. If set to False, it will be just a
# list of links.
# ARCHIVES_ARE_INDEXES = False
# URLs to other posts/pages can take 3 forms:
# rel_path: a relative URL to the current page/post (default)
# full_path: a URL with the full path from the root
# absolute: a complete URL (that includes the SITE_URL)
# URL_TYPE = 'rel_path'
# If USE_BASE_TAG is True, then all HTML files will include
# something like <base href=http://foo.var.com/baz/bat> to help
# the browser resolve relative links.
# Most people don’t need this tag; major websites don’t use it. Use
# only if you know what you’re doing. If this is True, your website
# will not be fully usable by manually opening .html files in your web
# browser (`nikola serve` or `nikola auto` is mandatory). Also, if you
# have mirrors of your site, they will point to SITE_URL everywhere.
USE_BASE_TAG = False
# Final location for the blog main RSS feed is:
# output / TRANSLATION[lang] / RSS_PATH / rss.xml
# (translatable)
# RSS_PATH = ""
# Slug the Tag URL. Easier for users to type, special characters are
# often removed or replaced as well.
# SLUG_TAG_PATH = True
# Slug the Author URL. Easier for users to type, special characters are
# often removed or replaced as well.
# SLUG_AUTHOR_PATH = True
# A list of redirection tuples, [("foo/from.html", "/bar/to.html")].
#
# A HTML file will be created in output/foo/from.html that redirects
# to the "/bar/to.html" URL. notice that the "from" side MUST be a
# relative URL.
#
# If you don't need any of these, just set to []
REDIRECTIONS = []
# Presets of commands to execute to deploy. Can be anything, for
# example, you may use rsync:
# "rsync -rav --delete output/ joe@my.site:/srv/www/site"
# And then do a backup, or run `nikola ping` from the `ping`
# plugin (`nikola plugin -i ping`). Or run `nikola check -l`.
# You may also want to use github_deploy (see below).
# You can define multiple presets and specify them as arguments
# to `nikola deploy`. If no arguments are specified, a preset
# named `default` will be executed. You can use as many presets
# in a `nikola deploy` command as you like.
# DEPLOY_COMMANDS = {
# 'default': [
# "rsync -rav --delete output/ joe@my.site:/srv/www/site",
# ]
# }
# github_deploy configuration
# For more details, read the manual:
# https://getnikola.com/handbook.html#deploying-to-github
# You will need to configure the deployment branch on GitHub.
GITHUB_SOURCE_BRANCH = 'source'
GITHUB_DEPLOY_BRANCH = 'master'
# The name of the remote where you wish to push to, using github_deploy.
GITHUB_REMOTE_NAME = 'origin'
# Whether or not github_deploy should commit to the source branch automatically
# before deploying.
GITHUB_COMMIT_SOURCE = False
# Where the output site should be located
# If you don't use an absolute path, it will be considered as relative
# to the location of conf.py
# OUTPUT_FOLDER = 'output'
# where the "cache" of partial generated content should be located
# default: 'cache'
# CACHE_FOLDER = 'cache'
# Filters to apply to the output.
# A directory where the keys are either: a file extensions, or
# a tuple of file extensions.
#
# And the value is a list of commands to be applied in order.
#
# Each command must be either:
#
# A string containing a '%s' which will
# be replaced with a filename. The command *must* produce output
# in place.
#
# Or:
#
# A python callable, which will be called with the filename as
# argument.
#
# By default, only .php files uses filters to inject PHP into
# Nikola’s templates. All other filters must be enabled through FILTERS.
#
# Many filters are shipped with Nikola. A list is available in the manual:
# <https://getnikola.com/handbook.html#post-processing-filters>
#
# from nikola import filters
# FILTERS = {
# ".html": [filters.typogrify],
# ".js": [filters.closure_compiler],
# ".jpg": ["jpegoptim --strip-all -m75 -v %s"],
# }
# Executable for the "yui_compressor" filter (defaults to 'yui-compressor').
# YUI_COMPRESSOR_EXECUTABLE = 'yui-compressor'
# Executable for the "closure_compiler" filter (defaults to 'closure-compiler').
# CLOSURE_COMPILER_EXECUTABLE = 'closure-compiler'
# Executable for the "optipng" filter (defaults to 'optipng').
# OPTIPNG_EXECUTABLE = 'optipng'
# Executable for the "jpegoptim" filter (defaults to 'jpegoptim').
# JPEGOPTIM_EXECUTABLE = 'jpegoptim'
# Executable for the "html_tidy_withconfig", "html_tidy_nowrap",
# "html_tidy_wrap", "html_tidy_wrap_attr" and "html_tidy_mini" filters
# (defaults to 'tidy5').
# HTML_TIDY_EXECUTABLE = 'tidy5'
# List of XPath expressions which should be used for finding headers
# ({hx} is replaced by headers h1 through h6).
# You must change this if you use a custom theme that does not use
# "e-content entry-content" as a class for post and page contents.
# HEADER_PERMALINKS_XPATH_LIST = ['*//div[@class="e-content entry-content"]//{hx}']
# Include *every* header (not recommended):
# HEADER_PERMALINKS_XPATH_LIST = ['*//{hx}']
# File blacklist for header permalinks. Contains output path
# (eg. 'output/index.html')
# HEADER_PERMALINKS_FILE_BLACKLIST = []
# Expert setting! Create a gzipped copy of each generated file. Cheap server-
# side optimization for very high traffic sites or low memory servers.
# GZIP_FILES = False
# File extensions that will be compressed
# GZIP_EXTENSIONS = ('.txt', '.htm', '.html', '.css', '.js', '.json', '.atom', '.xml')
# Use an external gzip command? None means no.
# Example: GZIP_COMMAND = "pigz -k {filename}"
# GZIP_COMMAND = None
# Make sure the server does not return a "Accept-Ranges: bytes" header for
# files compressed by this option! OR make sure that a ranged request does not
# return partial content of another representation for these resources. Do not
# use this feature if you do not understand what this means.
# Compiler to process LESS files.
# LESS_COMPILER = 'lessc'
# A list of options to pass to the LESS compiler.
# Final command is: LESS_COMPILER LESS_OPTIONS file.less
# LESS_OPTIONS = []
# Compiler to process Sass files.
# SASS_COMPILER = 'sass'
# A list of options to pass to the Sass compiler.
# Final command is: SASS_COMPILER SASS_OPTIONS file.s(a|c)ss
# SASS_OPTIONS = []
# #############################################################################
# Image Gallery Options
# #############################################################################
# One or more folders containing galleries. The format is a dictionary of
# {"source": "relative_destination"}, where galleries are looked for in
# "source/" and the results will be located in
# "OUTPUT_PATH/relative_destination/gallery_name"
# Default is:
# GALLERY_FOLDERS = {"galleries": "galleries"}
# More gallery options:
# THUMBNAIL_SIZE = 180
# MAX_IMAGE_SIZE = 1280
# USE_FILENAME_AS_TITLE = True
# EXTRA_IMAGE_EXTENSIONS = []
#
# If set to False, it will sort by filename instead. Defaults to True
# GALLERY_SORT_BY_DATE = True
# If set to True, EXIF data will be copied when an image is thumbnailed or
# resized. (See also EXIF_WHITELIST)
# PRESERVE_EXIF_DATA = False
# If you have enabled PRESERVE_EXIF_DATA, this option lets you choose EXIF
# fields you want to keep in images. (See also PRESERVE_EXIF_DATA)
#
# For a full list of field names, please see here:
# http://www.cipa.jp/std/documents/e/DC-008-2012_E.pdf
#
# This is a dictionary of lists. Each key in the dictionary is the
# name of a IDF, and each list item is a field you want to preserve.
# If you have a IDF with only a '*' item, *EVERY* item in it will be
# preserved. If you don't want to preserve anything in a IDF, remove it
# from the setting. By default, no EXIF information is kept.
# Setting the whitelist to anything other than {} implies
# PRESERVE_EXIF_DATA is set to True
# To preserve ALL EXIF data, set EXIF_WHITELIST to {"*": "*"}
# EXIF_WHITELIST = {}
# Some examples of EXIF_WHITELIST settings:
# Basic image information:
# EXIF_WHITELIST['0th'] = [
# "Orientation",
# "XResolution",
# "YResolution",
# ]
# If you want to keep GPS data in the images:
# EXIF_WHITELIST['GPS'] = ["*"]
# Embedded thumbnail information:
# EXIF_WHITELIST['1st'] = ["*"]
# Folders containing images to be used in normal posts or pages.
# IMAGE_FOLDERS is a dictionary of the form {"source": "destination"},
# where "source" is the folder containing the images to be published, and
# "destination" is the folder under OUTPUT_PATH containing the images copied
# to the site. Thumbnail images will be created there as well.
# To reference the images in your posts, include a leading slash in the path.
# For example, if IMAGE_FOLDERS = {'images': 'images'}, write
#
# .. image:: /images/tesla.jpg
#
# See the Nikola Handbook for details (in the “Embedding Images” and
# “Thumbnails” sections)
# Images will be scaled down according to IMAGE_THUMBNAIL_SIZE and MAX_IMAGE_SIZE
# options, but will have to be referenced manually to be visible on the site
# (the thumbnail has ``.thumbnail`` added before the file extension by default,
# but a different naming template can be configured with IMAGE_THUMBNAIL_FORMAT).
IMAGE_FOLDERS = {'images': 'images'}
# IMAGE_THUMBNAIL_SIZE = 400
# IMAGE_THUMBNAIL_FORMAT = '{name}.thumbnail{ext}'
# #############################################################################
# HTML fragments and diverse things that are used by the templates
# #############################################################################
# Data about post-per-page indexes.
# INDEXES_PAGES defaults to ' old posts, page %d' or ' page %d' (translated),
# depending on the value of INDEXES_PAGES_MAIN.
#
# (translatable) If the following is empty, defaults to BLOG_TITLE:
# INDEXES_TITLE = ""
#
# (translatable) If the following is empty, defaults to ' [old posts,] page %d' (see above):
# INDEXES_PAGES = ""
#
# If the following is True, INDEXES_PAGES is also displayed on the main (the
# newest) index page (index.html):
# INDEXES_PAGES_MAIN = False
#
# If the following is True, index-1.html has the oldest posts, index-2.html the
# second-oldest posts, etc., and index.html has the newest posts. This ensures
# that all posts on index-x.html will forever stay on that page, now matter how
# many new posts are added.
# If False, index-1.html has the second-newest posts, index-2.html the third-newest,
# and index-n.html the oldest posts. When this is active, old posts can be moved
# to other index pages when new posts are added.
# INDEXES_STATIC = True
#
# (translatable) If PRETTY_URLS is set to True, this setting will be used to create
# prettier URLs for index pages, such as page/2/index.html instead of index-2.html.
# Valid values for this settings are:
# * False,
# * a list or tuple, specifying the path to be generated,
# * a dictionary mapping languages to lists or tuples.
# Every list or tuple must consist of strings which are used to combine the path;
# for example:
# ['page', '{number}', '{index_file}']
# The replacements
# {number} --> (logical) page number;
# {old_number} --> the page number inserted into index-n.html before (zero for
# the main page);
# {index_file} --> value of option INDEX_FILE
# are made.
# Note that in case INDEXES_PAGES_MAIN is set to True, a redirection will be created
# for the full URL with the page number of the main page to the normal (shorter) main
# page URL.
# INDEXES_PRETTY_PAGE_URL = False
#
# If the following is true, a page range navigation will be inserted to indices.
# Please note that this will undo the effect of INDEXES_STATIC, as all index pages
# must be recreated whenever the number of pages changes.
# SHOW_INDEX_PAGE_NAVIGATION = False
# If the following is True, a meta name="generator" tag is added to pages. The
# generator tag is used to specify the software used to generate the page
# (it promotes Nikola).
# META_GENERATOR_TAG = True
# Color scheme to be used for code blocks. If your theme provides
# "assets/css/code.css" this is ignored. Leave empty to disable.
# Can be any of:
# algol, algol_nu, autumn, borland, bw, colorful, default, emacs, friendly,
# fruity, igor, lovelace, manni, monokai, murphy, native, paraiso-dark,
# paraiso-light, pastie, perldoc, rrt, tango, trac, vim, vs, xcode
# This list MAY be incomplete since pygments adds styles every now and then.
# Check with list(pygments.styles.get_all_styles()) in an interpreter.
# CODE_COLOR_SCHEME = 'default'
# FAVICONS contains (name, file, size) tuples.
# Used to create favicon link like this:
# <link rel="name" href="file" sizes="size"/>
# FAVICONS = (
# ("icon", "/favicon.ico", "16x16"),
# ("icon", "/icon_128x128.png", "128x128"),
# )
# Show teasers (instead of full posts) in indexes? Defaults to False.
INDEX_TEASERS = True
# HTML fragments with the Read more... links.
# The following tags exist and are replaced for you:
# {link} A link to the full post page.
# {read_more} The string “Read more” in the current language.
# {reading_time} An estimate of how long it will take to read the post.
# {remaining_reading_time} An estimate of how long it will take to read the post, sans the teaser.
# {min_remaining_read} The string “{remaining_reading_time} min remaining to read” in the current language.
# {paragraph_count} The amount of paragraphs in the post.
# {remaining_paragraph_count} The amount of paragraphs in the post, sans the teaser.
# {post_title} The title of the post.
# {{ A literal { (U+007B LEFT CURLY BRACKET)
# }} A literal } (U+007D RIGHT CURLY BRACKET)
# 'Read more...' for the index page, if INDEX_TEASERS is True (translatable)
INDEX_READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>'
# 'Read more...' for the feeds, if FEED_TEASERS is True (translatable)
FEED_READ_MORE_LINK = '<p><a href="{link}">{read_more}…</a> ({min_remaining_read})</p>'
# Append a URL query to the FEED_READ_MORE_LINK in Atom and RSS feeds. Advanced
# option used for traffic source tracking.
# Minimum example for use with Piwik: "pk_campaign=feed"
# The following tags exist and are replaced for you:
# {feedRelUri} A relative link to the feed.
# {feedFormat} The name of the syndication format.
# Example using replacement for use with Google Analytics:
# "utm_source={feedRelUri}&utm_medium=nikola_feed&utm_campaign={feedFormat}_feed"
FEED_LINKS_APPEND_QUERY = False
# A HTML fragment describing the license, for the sidebar.
# (translatable)
LICENSE = ""
# I recommend using the Creative Commons' wizard:
# https://creativecommons.org/choose/
# LICENSE = """
# <a rel="license" href="https://creativecommons.org/licenses/by-nc-sa/4.0/">
# <img alt="Creative Commons License BY-NC-SA"
# style="border-width:0; margin-bottom:12px;"
# src="https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png"></a>"""
# A small copyright notice for the page footer (in HTML).
# (translatable)
CONTENT_FOOTER = """
<div class="text-center">
<p> {author} 版权所有 © {date} <br>
地址: 北京市朝阳区望京新兴产业园区利泽中园二区208号 邮编: 101399 联系电话: 010-89401023</p>
</div>
"""
# Things that will be passed to CONTENT_FOOTER.format(). This is done
# for translatability, as dicts are not formattable. Nikola will
# intelligently format the setting properly.
# The setting takes a dict. The keys are languages. The values are
# tuples of tuples of positional arguments and dicts of keyword arguments
# to format(). For example, {'en': (('Hello'), {'target': 'World'})}
# results in CONTENT_FOOTER['en'].format('Hello', target='World').
# If you need to use the literal braces '{' and '}' in your footer text, use
# '{{' and '}}' to escape them (str.format is used)
# WARNING: If you do not use multiple languages with CONTENT_FOOTER, this
# still needs to be a dict of this format. (it can be empty if you
# do not need formatting)
# (translatable)
CONTENT_FOOTER_FORMATS = {
DEFAULT_LANG: (
(),
{
"email": BLOG_EMAIL,
"author": BLOG_DESCRIPTION,
"date": time.gmtime().tm_year,
"license": LICENSE
}
)
}
# A simple copyright tag for inclusion in RSS feeds that works just
# like CONTENT_FOOTER and CONTENT_FOOTER_FORMATS
RSS_COPYRIGHT = 'Contents © {date} <a href="mailto:{email}">{author}</a> {license}'
RSS_COPYRIGHT_PLAIN = 'Contents © {date} {author} {license}'
RSS_COPYRIGHT_FORMATS = CONTENT_FOOTER_FORMATS
# To use comments, you can choose between different third party comment
# systems. The following comment systems are supported by Nikola:
# disqus, facebook, googleplus, intensedebate, isso, livefyre, muut
# You can leave this option blank to disable comments.
COMMENT_SYSTEM = "disqus"
# And you also need to add your COMMENT_SYSTEM_ID which
# depends on what comment system you use. The default is
# "nikolademo" which is a test account for Disqus. More information
# is in the manual.
COMMENT_SYSTEM_ID = ""
# Enable annotations using annotateit.org?
# If set to False, you can still enable them for individual posts and pages
# setting the "annotations" metadata.
# If set to True, you can disable them for individual posts and pages using
# the "noannotations" metadata.
# ANNOTATIONS = False
# Create index.html for page folders?
# WARNING: if a page would conflict with the index file (usually
# caused by setting slug to `index`), the PAGE_INDEX
# will not be generated for that directory.
# PAGE_INDEX = False
# Enable comments on pages (i.e. not posts)?
# COMMENTS_IN_PAGES = False
# Enable comments on picture gallery pages?
# COMMENTS_IN_GALLERIES = False
# What file should be used for directory indexes?
# Defaults to index.html
# Common other alternatives: default.html for IIS, index.php
# INDEX_FILE = "index.html"
# If a link ends in /index.html, drop the index.html part.
# http://mysite/foo/bar/index.html => http://mysite/foo/bar/
# (Uses the INDEX_FILE setting, so if that is, say, default.html,
# it will instead /foo/default.html => /foo)
# (Note: This was briefly STRIP_INDEX_HTML in v 5.4.3 and 5.4.4)
STRIP_INDEXES = True
# Should the sitemap list directories which only include other directories
# and no files.
# Default to True
# If this is False
# e.g. /2012 includes only /01, /02, /03, /04, ...: don't add it to the sitemap
# if /2012 includes any files (including index.html)... add it to the sitemap
# SITEMAP_INCLUDE_FILELESS_DIRS = True
# List of files relative to the server root (!) that will be asked to be excluded
# from indexing and other robotic spidering. * is supported. Will only be effective
# if SITE_URL points to server root. The list is used to exclude resources from
# /robots.txt and /sitemap.xml, and to inform search engines about /sitemapindex.xml.
# ROBOTS_EXCLUSIONS = ["/archive.html", "/category/*.html"]
# Instead of putting files in <slug>.html, put them in <slug>/index.html.
# No web server configuration is required. Also enables STRIP_INDEXES.
# This can be disabled on a per-page/post basis by adding
# .. pretty_url: False
# to the metadata.
PRETTY_URLS = True
# If True, publish future dated posts right away instead of scheduling them.
# Defaults to False.
# FUTURE_IS_NOW = False
# If True, future dated posts are allowed in deployed output
# Only the individual posts are published/deployed; not in indexes/sitemap
# Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value.
# DEPLOY_FUTURE = False
# If False, draft posts will not be deployed
# DEPLOY_DRAFTS = True
# Allows scheduling of posts using the rule specified here (new_post -s)
# Specify an iCal Recurrence Rule: http://www.kanzaki.com/docs/ical/rrule.html
# SCHEDULE_RULE = ''
# If True, use the scheduling rule to all posts by default
# SCHEDULE_ALL = False
# Do you want a add a Mathjax config file?
# MATHJAX_CONFIG = ""
# If you want support for the $.$ syntax (which may conflict with running
# text!), just use this config:
# MATHJAX_CONFIG = """
# <script type="text/x-mathjax-config">
# MathJax.Hub.Config({
# tex2jax: {
# inlineMath: [ ['$','$'], ["\\\(","\\\)"] ],
# displayMath: [ ['$$','$$'], ["\\\[","\\\]"] ],
# processEscapes: true
# },
# displayAlign: 'center', // Change this to 'left' if you want left-aligned equations.
# "HTML-CSS": {
# styles: {'.MathJax_Display': {"margin": 0}}
# }
# });
# </script>
# """
# Want to use KaTeX instead of MathJax? While KaTeX may not support every
# feature yet, it's faster and the output looks better.
# USE_KATEX = False
# KaTeX auto-render settings. If you want support for the $.$ syntax (wihch may
# conflict with running text!), just use this config:
# KATEX_AUTO_RENDER = """
# delimiters: [
# {left: "$$", right: "$$", display: true},
# {left: "\\\[", right: "\\\]", display: true},
# {left: "$", right: "$", display: false},
# {left: "\\\(", right: "\\\)", display: false}
# ]
# """
# Do you want to customize the nbconversion of your IPython notebook?
# IPYNB_CONFIG = {}
# With the following example configuration you can use a custom jinja template
# called `toggle.tpl` which has to be located in your site/blog main folder:
# IPYNB_CONFIG = {'Exporter':{'template_file': 'toggle'}}
# What Markdown extensions to enable?
# You will also get gist, nikola and podcast because those are
# done in the code, hope you don't mind ;-)
# Note: most Nikola-specific extensions are done via the Nikola plugin system,
# with the MarkdownExtension class and should not be added here.
# The default is ['fenced_code', 'codehilite']
MARKDOWN_EXTENSIONS = ['markdown.extensions.fenced_code', 'markdown.extensions.codehilite', 'markdown.extensions.extra']
# Extra options to pass to the pandoc command.
# by default, it's empty, is a list of strings, for example
# ['-F', 'pandoc-citeproc', '--bibliography=/Users/foo/references.bib']
# Pandoc does not demote headers by default. To enable this, you can use, for example
# ['--base-header-level=2']
# PANDOC_OPTIONS = []
# Social buttons. This is sample code for AddThis (which was the default for a
# long time). Insert anything you want here, or even make it empty (which is
# the default right now)
# (translatable)
# SOCIAL_BUTTONS_CODE = """
# <!-- Social buttons -->
# <div id="addthisbox" class="addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style">
# <a class="addthis_button_more">Share</a>
# <ul><li><a class="addthis_button_facebook"></a>
# <li><a class="addthis_button_google_plusone_share"></a>
# <li><a class="addthis_button_linkedin"></a>
# <li><a class="addthis_button_twitter"></a>
# </ul>
# </div>
# <script src="https://s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798"></script>
# <!-- End of social buttons -->
# """
# Show link to source for the posts?
# Formerly known as HIDE_SOURCELINK (inverse)
SHOW_SOURCELINK = False
# Copy the source files for your pages?
# Setting it to False implies SHOW_SOURCELINK = False
COPY_SOURCES = False
# Modify the number of Post per Index Page
# Defaults to 10
# INDEX_DISPLAY_POST_COUNT = 10
# By default, Nikola generates RSS files for the website and for tags, and
# links to it. Set this to False to disable everything RSS-related.
# GENERATE_RSS = True
# By default, Nikola does not generates Atom files for indexes and links to
# them. Generate Atom for tags by setting TAG_PAGES_ARE_INDEXES to True.
# Atom feeds are built based on INDEX_DISPLAY_POST_COUNT and not FEED_LENGTH
# Switch between plain-text summaries and full HTML content using the
# FEED_TEASER option. FEED_LINKS_APPEND_QUERY is also respected. Atom feeds
# are generated even for old indexes and have pagination link relations
# between each other. Old Atom feeds with no changes are marked as archived.
# GENERATE_ATOM = False
# Only include teasers in Atom and RSS feeds. Disabling include the full
# content. Defaults to True.
# FEED_TEASERS = True
# Strip HTML from Atom and RSS feed summaries and content. Defaults to False.
# FEED_PLAIN = False
# Number of posts in Atom and RSS feeds.
# FEED_LENGTH = 10
# Include preview image as a <figure><img></figure> at the top of the entry.
# Requires FEED_PLAIN = False. If the preview image is found in the content,
# it will not be included again. Image will be included as-is, aim to optmize
# the image source for Feedly, Apple News, Flipboard, and other popular clients.
# FEED_PREVIEWIMAGE = True
# RSS_LINK is a HTML fragment to link the RSS or Atom feeds. If set to None,
# the base.tmpl will use the feed Nikola generates. However, you may want to
# change it for a FeedBurner feed or something else.
# RSS_LINK = None
# A search form to search this site, for the sidebar. You can use a Google
# custom search (https://www.google.com/cse/)
# Or a DuckDuckGo search: https://duckduckgo.com/search_box.html
# Default is no search form.
# (translatable)
# SEARCH_FORM = ""
#
# This search form works for any site and looks good in the "site" theme where
# it appears on the navigation bar:
#
# SEARCH_FORM = """
# <!-- DuckDuckGo custom search -->
# <form method="get" id="search" action="https://duckduckgo.com/"
# class="navbar-form pull-left">
# <input type="hidden" name="sites" value="%s">
# <input type="hidden" name="k8" value="#444444">
# <input type="hidden" name="k9" value="#D51920">
# <input type="hidden" name="kt" value="h">
# <input type="text" name="q" maxlength="255"
# placeholder="Search…" class="span2" style="margin-top: 4px;">
# <input type="submit" value="DuckDuckGo Search" style="visibility: hidden;">
# </form>
# <!-- End of custom search -->
# """ % SITE_URL
#
# If you prefer a Google search form, here's an example that should just work:
# SEARCH_FORM = """
# <!-- Google custom search -->
# <form method="get" action="https://www.google.com/search" class="navbar-form navbar-right" role="search">
# <div class="form-group">
# <input type="text" name="q" class="form-control" placeholder="Search">
# </div>
# <button type="submit" class="btn btn-primary">
# <span class="glyphicon glyphicon-search"></span>
# </button>
# <input type="hidden" name="sitesearch" value="%s">
# </form>
# <!-- End of custom search -->
# """ % SITE_URL
# Use content distribution networks for jQuery, twitter-bootstrap css and js,
# and html5shiv (for older versions of Internet Explorer)
# If this is True, jQuery and html5shiv are served from the Google CDN and
# Bootstrap is served from BootstrapCDN (provided by MaxCDN)
# Set this to False if you want to host your site without requiring access to
# external resources.
# USE_CDN = False
# Check for USE_CDN compatibility.
# If you are using custom themes, have configured the CSS properly and are
# receiving warnings about incompatibility but believe they are incorrect, you
# can set this to False.
# USE_CDN_WARNING = True
# Extra things you want in the pages HEAD tag. This will be added right
# before </head>
# (translatable)
# EXTRA_HEAD_DATA = ""
# Google Analytics or whatever else you use. Added to the bottom of <body>
# in the default template (base.tmpl).
# (translatable)
# BODY_END = ""
# Google analytics or whatever else you use. Added to the bottom of <body>
# in the default template (base.tmpl).
BODY_END = """
<script>
var _hmt = _hmt || [];
(function() {
var hm = document.createElement("script");
hm.src = "https://hm.baidu.com/hm.js?01e30b6867ec146fc376c9e83a243b5d";
var s = document.getElementsByTagName("script")[0];
s.parentNode.insertBefore(hm, s);
})();
</script>
"""
# The possibility to extract metadata from the filename by using a
# regular expression.
# To make it work you need to name parts of your regular expression.
# The following names will be used to extract metadata:
# - title
# - slug
# - date
# - tags
# - link
# - description
#
# An example re is the following:
# '.*\/(?P<date>\d{4}-\d{2}-\d{2})-(?P<slug>.*)-(?P<title>.*)\.rst'
# (Note the '.*\/' in the beginning -- matches source paths relative to conf.py)
# FILE_METADATA_REGEXP = None
# If enabled, extract metadata from docinfo fields in reST documents
# USE_REST_DOCINFO_METADATA = False
# If enabled, hide docinfo fields in reST document output
# HIDE_REST_DOCINFO = False
# Map metadata from other formats to Nikola names.
# Supported formats: yaml, toml, rest_docinfo, markdown_metadata
# METADATA_MAPPING = {}
#
# Example for Pelican compatibility:
# METADATA_MAPPING = {
# "rest_docinfo": {"summary": "description", "modified": "updated"},
# "markdown_metadata": {"summary": "description", "modified": "updated"}
# }
# Other examples: https://getnikola.com/handbook.html#mapping-metadata-from-other-formats
# If you hate "Filenames with Capital Letters and Spaces.md", you should
# set this to true.
UNSLUGIFY_TITLES = True
# Additional metadata that is added to a post when creating a new_post
# ADDITIONAL_METADATA = {}
# Nikola supports Open Graph Protocol data for enhancing link sharing and
# discoverability of your site on Facebook, Google+, and other services.
# Open Graph is enabled by default.
# USE_OPEN_GRAPH = True
# Nikola supports Twitter Card summaries, but they are disabled by default.
# They make it possible for you to attach media to Tweets that link
# to your content.
#
# IMPORTANT:
# Please note, that you need to opt-in for using Twitter Cards!
# To do this please visit https://cards-dev.twitter.com/validator
#
# Uncomment and modify to following lines to match your accounts.
# Images displayed come from the `previewimage` meta tag.
# You can specify the card type by using the `card` parameter in TWITTER_CARD.
# TWITTER_CARD = {
# # 'use_twitter_cards': True, # enable Twitter Cards
# # 'card': 'summary', # Card type, you can also use 'summary_large_image',
# # see https://dev.twitter.com/cards/types
# # 'site': '@website', # twitter nick for the website
# # 'creator': '@username', # Username for the content creator / author.
# }
# If webassets is installed, bundle JS and CSS into single files to make
# site loading faster in a HTTP/1.1 environment but is not recommended for
# HTTP/2.0 when caching is used. Defaults to True.
USE_BUNDLES = False
# Plugins you don't want to use. Be careful :-)
# DISABLED_PLUGINS = ["render_galleries"]
# Special settings to disable only parts of the indexes plugin (to allow RSS
# but no blog indexes, or to allow blog indexes and Atom but no site-wide RSS).
# Use with care.
# DISABLE_INDEXES_PLUGIN_INDEX_AND_ATOM_FEED = False
# DISABLE_INDEXES_PLUGIN_RSS_FEED = False
# Add the absolute paths to directories containing plugins to use them.
# For example, the `plugins` directory of your clone of the Nikola plugins
# repository.
# EXTRA_PLUGINS_DIRS = []
# Add the absolute paths to directories containing themes to use them.
# For example, the `v7` directory of your clone of the Nikola themes
# repository.
# EXTRA_THEMES_DIRS = []
# List of regular expressions, links matching them will always be considered
# valid by "nikola check -l"
# LINK_CHECK_WHITELIST = []
# If set to True, enable optional hyphenation in your posts (requires pyphen)
# Enabling hyphenation has been shown to break math support in some cases,
# use with caution.
# HYPHENATE = False
# The <hN> tags in HTML generated by certain compilers (reST/Markdown)
# will be demoted by that much (1 → h1 will become h2 and so on)
# This was a hidden feature of the Markdown and reST compilers in the
# past. Useful especially if your post titles are in <h1> tags too, for
# example.
# (defaults to 1.)
# DEMOTE_HEADERS = 1
# Docutils, by default, will perform a transform in your documents
# extracting unique titles at the top of your document and turning
# them into metadata. This surprises a lot of people, and setting
# this option to True will prevent it.
# NO_DOCUTILS_TITLE_TRANSFORM = False
# If you don’t like slugified file names ([a-z0-9] and a literal dash),
# and would prefer to use all the characters your file system allows.
# USE WITH CARE! This is also not guaranteed to be perfect, and may
# sometimes crash Nikola, your web server, or eat your cat.
# USE_SLUGIFY = True
# Templates will use those filters, along with the defaults.
# Consult your engine's documentation on filters if you need help defining
# those.
# TEMPLATE_FILTERS = {}
# Put in global_context things you want available on all your templates.
# It can be anything, data, functions, modules, etc.
GLOBAL_CONTEXT = {}
# Add functions here and they will be called with template
# GLOBAL_CONTEXT as parameter when the template is about to be
# rendered
GLOBAL_CONTEXT_FILLER = []
|
bjwnjm/bjwnjm.github.io
|
conf.py
|
Python
|
apache-2.0
| 53,455
|
[
"VisIt"
] |
d03c526671fb3c522c4502bdf56b61ea3b822242ce7ad505055186feb584e092
|
# !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-02-12 20:46:42
# @Last modified by: Brian Cherinka
# @Last Modified time: 2017-02-22 10:40:26
from __future__ import print_function, division, absolute_import
from marvin.tests.web import MarvinWebTester
from marvin import config, marvindb
from flask import session, url_for
import unittest
class TestIndexPage(MarvinWebTester):
render_templates = False
def setUp(self):
super(TestIndexPage, self).setUp()
self.blue = 'index_page'
config.setRelease('MPL-5')
self.release = config.release
def test_assert_index_template_used(self):
url = self.get_url('Marvin:index')
self._load_page('get', url)
self.assertEqual('', self.data)
self.assert_template_used('index.html')
class TestDb(TestIndexPage):
def test_db_works(self):
url = self.get_url('Marvin:database')
self._load_page('get', url, params={'release': self.release})
data = {'plate': 7443}
self._assert_webjson_success(data)
def test_db_post_fails(self):
url = self.get_url('Marvin:database')
self._load_page('post', url, params={'release': self.release})
self.assert405(self.response, 'allowed method should be get')
class TestSelectMPL(TestIndexPage):
def _select_mpl(self, release, drpver, dapver):
url = self.get_url('selectmpl')
self._load_page('post', url, params={'release': release})
data = {'current_release': release, 'current_drpver': drpver, 'current_dapver': dapver}
self._assert_webjson_success(data)
self._release_in_session(data)
def test_select_mpl5(self):
self._select_mpl('MPL-5', 'v2_0_1', '2.0.2')
def test_select_mpl4(self):
self._select_mpl('MPL-4', 'v1_5_1', '1.1.1')
def test_select_mpl2(self):
self._select_mpl('MPL-2', 'v1_2_0', None)
def _release_in_session(self, data):
with self.client as c:
with c.session_transaction() as sess:
sess['release'] = data['current_release']
sess['drpver'] = data['current_drpver']
sess['dapver'] = data['current_dapver']
class TestGetGalIdList(TestIndexPage):
def test_getgalid_success(self):
url = self.get_url('getgalidlist')
self._load_page('post', url, params={'release': self.release})
data = ['8485', '8485-1901', '7443', '7443-12701', '1-209232', '12-98126']
self.assert200(self.response, message='response status should be 200 for ok')
self.assertListIn(data, self.json)
def test_getgalid_fail(self):
marvindb.datadb = None
url = self.get_url('getgalidlist')
self._load_page('post', url, params={'release': self.release})
data = ['']
self.assert200(self.response, message='response status should be 200 for ok')
self.assertListEqual(data, self.json)
class TestGalIdSelect(TestIndexPage):
def _get_galid(self, name, galid, redirect_url):
data = {'galid': galid, 'release': self.release}
url = self.get_url('galidselect')
self._load_page('get', url, params=data)
self.assert_redirects(self.response, redirect_url, 'page should be redirected to {0} page'.format(name))
def test_get_plate(self):
self._get_galid('plate', self.plate, url_for('plate_page.Plate:get', plateid=self.plate))
def test_get_plateifu(self):
self._get_galid('galaxy', self.plateifu, url_for('galaxy_page.Galaxy:get', galid=self.plateifu))
def test_get_mangaid(self):
self._get_galid('galaxy', self.mangaid, url_for('galaxy_page.Galaxy:get', galid=self.mangaid))
def test_get_none(self):
self._get_galid('main', 'galname', url_for('index_page.Marvin:index'))
class TestLogin(TestIndexPage):
@unittest.SkipTest
def test_login_success(self):
data = {'username': 'sdss', 'password': 'password', 'release': self.release}
exp = {'ready': True, 'status': 1, 'message': 'Logged in as sdss. ', 'membername': 'SDSS User'}
self._login(data, exp)
def test_no_input(self):
data = {'username': '', 'password': '', 'release': self.release}
exp = {'ready': False, 'status': -1, 'message': ''}
self._login(data, exp)
def test_wrong_password(self):
data = {'username': 'sdss', 'password': 'password', 'release': self.release}
exp = {'ready': False, 'status': 0, 'message': 'Failed login for sdss. Please retry.', 'membername': 'Unknown user'}
self._login(data, exp)
def test_wrong_username(self):
data = {'username': 'bac29', 'password': 'password', 'release': self.release}
exp = {'ready': False, 'status': 0, 'message': 'Failed login for bac29. Username unrecognized.', 'membername': 'Unknown user'}
self._login(data, exp)
def _login(self, data, exp):
url = self.get_url('login')
self._load_page('post', url, params=data)
self.assert200(self.response, 'response status should be 200 for ok')
self.assertEqual(exp['status'], self.response.json['result']['status'])
self.assertEqual(exp['message'], self.response.json['result']['message'])
if 'membername' in exp:
self.assertEqual(exp['membername'], self.response.json['result']['membername'])
if __name__ == '__main__':
verbosity = 2
unittest.main(verbosity=verbosity)
|
bretthandrews/marvin
|
python/marvin/tests/web/test_index.py
|
Python
|
bsd-3-clause
| 5,525
|
[
"Brian",
"Galaxy"
] |
fc42bac1b8eb73d30310bc67cf17ebec07144be9fcb75176a2b61ec468cae188
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""The mdtraj package contains tools for loading and saving molecular dynamics
trajectories in a variety of formats, including Gromacs XTC & TRR, CHARMM/NAMD
DCD, AMBER BINPOS, PDB, and HDF5.
"""
from mdtraj.formats.registry import FormatRegistry
from mdtraj.formats.xtc import load_xtc
from mdtraj.formats.trr import load_trr
from mdtraj.formats.hdf5 import load_hdf5
from mdtraj.formats.lh5 import load_lh5
from mdtraj.formats.netcdf import load_netcdf
from mdtraj.formats.mdcrd import load_mdcrd
from mdtraj.formats.dcd import load_dcd
from mdtraj.formats.binpos import load_binpos
from mdtraj.formats.pdb import load_pdb
from mdtraj.formats.arc import load_arc
from mdtraj.formats.openmmxml import load_xml
from mdtraj.formats.prmtop import load_prmtop
from mdtraj.formats.psf import load_psf
from mdtraj.formats.mol2 import load_mol2
from mdtraj.formats.amberrst import load_restrt, load_ncrestrt
from mdtraj.formats.lammpstrj import load_lammpstrj
from mdtraj.formats.dtr import load_dtr
from mdtraj.formats.xyzfile import load_xyz
from mdtraj.formats.hoomdxml import load_hoomdxml
from mdtraj.core import element
from mdtraj._rmsd import rmsd
from mdtraj._lprmsd import lprmsd
from mdtraj.core.topology import Topology
from mdtraj.geometry import *
from mdtraj.core.trajectory import *
from mdtraj.nmr import *
import mdtraj.reporters
def test(label='full', verbose=2, extra_argv=None, doctests=False):
"""Run tests for mdtraj using nose.
Parameters
----------
label : {'fast', 'full'}
Identifies the tests to run. The fast tests take about 10 seconds,
and the full test suite takes about two minutes (as of this writing).
verbose : int, optional
Verbosity value for test outputs, in the range 1-10. Default is 2.
"""
import mdtraj
from mdtraj.testing.nosetester import MDTrajTester
tester = MDTrajTester(mdtraj)
return tester.test(label=label, verbose=verbose, extra_argv=extra_argv)
# prevent nose from discovering this function, or otherwise when its run
# the test suite in an infinite loop
test.__test__ = False
def capi():
import os
import sys
module_path = sys.modules['mdtraj'].__path__[0]
return {
'lib_dir': os.path.join(module_path, 'core', 'lib'),
'include_dir': os.path.join(module_path, 'core', 'lib'),
}
|
swails/mdtraj
|
mdtraj/__init__.py
|
Python
|
lgpl-2.1
| 3,351
|
[
"Amber",
"CHARMM",
"Gromacs",
"MDTraj",
"NAMD",
"NetCDF"
] |
4422f12207c7984c99a934941666bd739d121a4a26ab29d2931be4a537e902ea
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
All the pure-Python 'helper' functions which were previously included in the
Pyke rules database 'fc_rules_cf.krb'.
The 'action' routines now call these, as the rules used to do.
They have not changed, **except** that the 'build_coordinate_system' routine
acquired an extra initial 'engine' argument, purely for consistency with other
build routines, and which it does not use.
"""
import warnings
import cf_units
import numpy as np
import numpy.ma as ma
import iris.aux_factory
from iris.common.mixin import _get_valid_standard_name
import iris.coord_systems
import iris.coords
import iris.exceptions
import iris.fileformats.cf as cf
import iris.fileformats.netcdf
from iris.fileformats.netcdf import (
UnknownCellMethodWarning,
_get_cf_var_data,
parse_cell_methods,
)
import iris.std_names
import iris.util
#
# UD Units Constants (based on Unidata udunits.dat definition file)
#
UD_UNITS_LAT = [
"degrees_north",
"degree_north",
"degree_n",
"degrees_n",
"degreen",
"degreesn",
"degrees",
"degrees north",
"degree north",
"degree n",
"degrees n",
]
UD_UNITS_LON = [
"degrees_east",
"degree_east",
"degree_e",
"degrees_e",
"degreee",
"degreese",
"degrees",
"degrees east",
"degree east",
"degree e",
"degrees e",
]
UNKNOWN_UNIT_STRING = "?"
NO_UNIT_STRING = "-"
#
# CF Dimensionless Vertical Coordinates
#
CF_COORD_VERTICAL = {
"atmosphere_ln_pressure_coordinate": ["p0", "lev"],
"atmosphere_sigma_coordinate": ["sigma", "ps", "ptop"],
"atmosphere_hybrid_sigma_pressure_coordinate": ["a", "b", "ps", "p0"],
"atmosphere_hybrid_height_coordinate": ["a", "b", "orog"],
"atmosphere_sleve_coordinate": [
"a",
"b1",
"b2",
"ztop",
"zsurf1",
"zsurf2",
],
"ocean_sigma_coordinate": ["sigma", "eta", "depth"],
"ocean_s_coordinate": ["s", "eta", "depth", "a", "b", "depth_c"],
"ocean_sigma_z_coordinate": [
"sigma",
"eta",
"depth",
"depth_c",
"nsigma",
"zlev",
],
"ocean_double_sigma_coordinate": [
"sigma",
"depth",
"z1",
"z2",
"a",
"href",
"k_c",
],
"ocean_s_coordinate_g1": ["s", "eta", "depth", "depth_c", "C"],
"ocean_s_coordinate_g2": ["s", "eta", "depth", "depth_c", "C"],
}
#
# CF Grid Mappings
#
CF_GRID_MAPPING_ALBERS = "albers_conical_equal_area"
CF_GRID_MAPPING_AZIMUTHAL = "azimuthal_equidistant"
CF_GRID_MAPPING_LAMBERT_AZIMUTHAL = "lambert_azimuthal_equal_area"
CF_GRID_MAPPING_LAMBERT_CONFORMAL = "lambert_conformal_conic"
CF_GRID_MAPPING_LAMBERT_CYLINDRICAL = "lambert_cylindrical_equal_area"
CF_GRID_MAPPING_LAT_LON = "latitude_longitude"
CF_GRID_MAPPING_MERCATOR = "mercator"
CF_GRID_MAPPING_ORTHO = "orthographic"
CF_GRID_MAPPING_POLAR = "polar_stereographic"
CF_GRID_MAPPING_ROTATED_LAT_LON = "rotated_latitude_longitude"
CF_GRID_MAPPING_STEREO = "stereographic"
CF_GRID_MAPPING_TRANSVERSE = "transverse_mercator"
CF_GRID_MAPPING_VERTICAL = "vertical_perspective"
CF_GRID_MAPPING_GEOSTATIONARY = "geostationary"
#
# CF Attribute Names.
#
CF_ATTR_AXIS = "axis"
CF_ATTR_BOUNDS = "bounds"
CF_ATTR_CALENDAR = "calendar"
CF_ATTR_CLIMATOLOGY = "climatology"
CF_ATTR_GRID_INVERSE_FLATTENING = "inverse_flattening"
CF_ATTR_GRID_EARTH_RADIUS = "earth_radius"
CF_ATTR_GRID_MAPPING_NAME = "grid_mapping_name"
CF_ATTR_GRID_NORTH_POLE_LAT = "grid_north_pole_latitude"
CF_ATTR_GRID_NORTH_POLE_LON = "grid_north_pole_longitude"
CF_ATTR_GRID_NORTH_POLE_GRID_LON = "north_pole_grid_longitude"
CF_ATTR_GRID_SEMI_MAJOR_AXIS = "semi_major_axis"
CF_ATTR_GRID_SEMI_MINOR_AXIS = "semi_minor_axis"
CF_ATTR_GRID_LAT_OF_PROJ_ORIGIN = "latitude_of_projection_origin"
CF_ATTR_GRID_LON_OF_PROJ_ORIGIN = "longitude_of_projection_origin"
CF_ATTR_GRID_STANDARD_PARALLEL = "standard_parallel"
CF_ATTR_GRID_FALSE_EASTING = "false_easting"
CF_ATTR_GRID_FALSE_NORTHING = "false_northing"
CF_ATTR_GRID_SCALE_FACTOR_AT_PROJ_ORIGIN = "scale_factor_at_projection_origin"
CF_ATTR_GRID_SCALE_FACTOR_AT_CENT_MERIDIAN = "scale_factor_at_central_meridian"
CF_ATTR_GRID_LON_OF_CENT_MERIDIAN = "longitude_of_central_meridian"
CF_ATTR_GRID_STANDARD_PARALLEL = "standard_parallel"
CF_ATTR_GRID_PERSPECTIVE_HEIGHT = "perspective_point_height"
CF_ATTR_GRID_SWEEP_ANGLE_AXIS = "sweep_angle_axis"
CF_ATTR_POSITIVE = "positive"
CF_ATTR_STD_NAME = "standard_name"
CF_ATTR_LONG_NAME = "long_name"
CF_ATTR_UNITS = "units"
CF_ATTR_CELL_METHODS = "cell_methods"
#
# CF Attribute Value Constants.
#
# Attribute - axis.
CF_VALUE_AXIS_X = "x"
CF_VALUE_AXIS_Y = "y"
CF_VALUE_AXIS_T = "t"
CF_VALUE_AXIS_Z = "z"
# Attribute - positive.
CF_VALUE_POSITIVE = ["down", "up"]
# Attribute - standard_name.
CF_VALUE_STD_NAME_LAT = "latitude"
CF_VALUE_STD_NAME_LON = "longitude"
CF_VALUE_STD_NAME_GRID_LAT = "grid_latitude"
CF_VALUE_STD_NAME_GRID_LON = "grid_longitude"
CF_VALUE_STD_NAME_PROJ_X = "projection_x_coordinate"
CF_VALUE_STD_NAME_PROJ_Y = "projection_y_coordinate"
################################################################################
def build_cube_metadata(engine):
"""Add the standard meta data to the cube."""
cf_var = engine.cf_var
cube = engine.cube
# Determine the cube's name attributes
cube.var_name = cf_var.cf_name
standard_name = getattr(cf_var, CF_ATTR_STD_NAME, None)
long_name = getattr(cf_var, CF_ATTR_LONG_NAME, None)
cube.long_name = long_name
if standard_name is not None:
try:
cube.standard_name = _get_valid_standard_name(standard_name)
except ValueError:
if cube.long_name is not None:
cube.attributes["invalid_standard_name"] = standard_name
else:
cube.long_name = standard_name
# Determine the cube units.
attr_units = get_attr_units(cf_var, cube.attributes)
cube.units = attr_units
# Incorporate cell methods
nc_att_cell_methods = getattr(cf_var, CF_ATTR_CELL_METHODS, None)
with warnings.catch_warnings(record=True) as warning_records:
cube.cell_methods = parse_cell_methods(nc_att_cell_methods)
# Filter to get the warning we are interested in.
warning_records = [
record
for record in warning_records
if issubclass(record.category, UnknownCellMethodWarning)
]
if len(warning_records) > 0:
# Output an enhanced warning message.
warn_record = warning_records[0]
name = "{}".format(cf_var.cf_name)
msg = warn_record.message.args[0]
msg = msg.replace("variable", "variable {!r}".format(name))
warnings.warn(message=msg, category=UnknownCellMethodWarning)
# Set the cube global attributes.
for attr_name, attr_value in cf_var.cf_group.global_attributes.items():
try:
cube.attributes[str(attr_name)] = attr_value
except ValueError as e:
msg = "Skipping global attribute {!r}: {}"
warnings.warn(msg.format(attr_name, str(e)))
################################################################################
def _get_ellipsoid(cf_grid_var):
"""Return the ellipsoid definition."""
major = getattr(cf_grid_var, CF_ATTR_GRID_SEMI_MAJOR_AXIS, None)
minor = getattr(cf_grid_var, CF_ATTR_GRID_SEMI_MINOR_AXIS, None)
inverse_flattening = getattr(
cf_grid_var, CF_ATTR_GRID_INVERSE_FLATTENING, None
)
# Avoid over-specification exception.
if major is not None and minor is not None:
inverse_flattening = None
# Check for a default spherical earth.
if major is None and minor is None and inverse_flattening is None:
major = getattr(cf_grid_var, CF_ATTR_GRID_EARTH_RADIUS, None)
return major, minor, inverse_flattening
################################################################################
def build_coordinate_system(engine, cf_grid_var):
"""Create a coordinate system from the CF-netCDF grid mapping variable."""
major, minor, inverse_flattening = _get_ellipsoid(cf_grid_var)
return iris.coord_systems.GeogCS(major, minor, inverse_flattening)
################################################################################
def build_rotated_coordinate_system(engine, cf_grid_var):
"""Create a rotated coordinate system from the CF-netCDF grid mapping variable."""
major, minor, inverse_flattening = _get_ellipsoid(cf_grid_var)
north_pole_latitude = getattr(
cf_grid_var, CF_ATTR_GRID_NORTH_POLE_LAT, 90.0
)
north_pole_longitude = getattr(
cf_grid_var, CF_ATTR_GRID_NORTH_POLE_LON, 0.0
)
if north_pole_latitude is None or north_pole_longitude is None:
warnings.warn("Rotated pole position is not fully specified")
north_pole_grid_lon = getattr(
cf_grid_var, CF_ATTR_GRID_NORTH_POLE_GRID_LON, 0.0
)
ellipsoid = None
if (
major is not None
or minor is not None
or inverse_flattening is not None
):
ellipsoid = iris.coord_systems.GeogCS(major, minor, inverse_flattening)
rcs = iris.coord_systems.RotatedGeogCS(
north_pole_latitude,
north_pole_longitude,
north_pole_grid_lon,
ellipsoid,
)
return rcs
################################################################################
def build_transverse_mercator_coordinate_system(engine, cf_grid_var):
"""
Create a transverse Mercator coordinate system from the CF-netCDF
grid mapping variable.
"""
major, minor, inverse_flattening = _get_ellipsoid(cf_grid_var)
latitude_of_projection_origin = getattr(
cf_grid_var, CF_ATTR_GRID_LAT_OF_PROJ_ORIGIN, None
)
longitude_of_central_meridian = getattr(
cf_grid_var, CF_ATTR_GRID_LON_OF_CENT_MERIDIAN, None
)
false_easting = getattr(cf_grid_var, CF_ATTR_GRID_FALSE_EASTING, None)
false_northing = getattr(cf_grid_var, CF_ATTR_GRID_FALSE_NORTHING, None)
scale_factor_at_central_meridian = getattr(
cf_grid_var, CF_ATTR_GRID_SCALE_FACTOR_AT_CENT_MERIDIAN, None
)
# The following accounts for the inconsistancy in the transverse
# mercator description within the CF spec.
if longitude_of_central_meridian is None:
longitude_of_central_meridian = getattr(
cf_grid_var, CF_ATTR_GRID_LON_OF_PROJ_ORIGIN, None
)
if scale_factor_at_central_meridian is None:
scale_factor_at_central_meridian = getattr(
cf_grid_var, CF_ATTR_GRID_SCALE_FACTOR_AT_PROJ_ORIGIN, None
)
ellipsoid = None
if (
major is not None
or minor is not None
or inverse_flattening is not None
):
ellipsoid = iris.coord_systems.GeogCS(major, minor, inverse_flattening)
cs = iris.coord_systems.TransverseMercator(
latitude_of_projection_origin,
longitude_of_central_meridian,
false_easting,
false_northing,
scale_factor_at_central_meridian,
ellipsoid,
)
return cs
################################################################################
def build_lambert_conformal_coordinate_system(engine, cf_grid_var):
"""
Create a Lambert conformal conic coordinate system from the CF-netCDF
grid mapping variable.
"""
major, minor, inverse_flattening = _get_ellipsoid(cf_grid_var)
latitude_of_projection_origin = getattr(
cf_grid_var, CF_ATTR_GRID_LAT_OF_PROJ_ORIGIN, None
)
longitude_of_central_meridian = getattr(
cf_grid_var, CF_ATTR_GRID_LON_OF_CENT_MERIDIAN, None
)
false_easting = getattr(cf_grid_var, CF_ATTR_GRID_FALSE_EASTING, None)
false_northing = getattr(cf_grid_var, CF_ATTR_GRID_FALSE_NORTHING, None)
standard_parallel = getattr(
cf_grid_var, CF_ATTR_GRID_STANDARD_PARALLEL, None
)
ellipsoid = None
if (
major is not None
or minor is not None
or inverse_flattening is not None
):
ellipsoid = iris.coord_systems.GeogCS(major, minor, inverse_flattening)
cs = iris.coord_systems.LambertConformal(
latitude_of_projection_origin,
longitude_of_central_meridian,
false_easting,
false_northing,
standard_parallel,
ellipsoid,
)
return cs
################################################################################
def build_stereographic_coordinate_system(engine, cf_grid_var):
"""
Create a stereographic coordinate system from the CF-netCDF
grid mapping variable.
"""
major, minor, inverse_flattening = _get_ellipsoid(cf_grid_var)
latitude_of_projection_origin = getattr(
cf_grid_var, CF_ATTR_GRID_LAT_OF_PROJ_ORIGIN, None
)
longitude_of_projection_origin = getattr(
cf_grid_var, CF_ATTR_GRID_LON_OF_PROJ_ORIGIN, None
)
false_easting = getattr(cf_grid_var, CF_ATTR_GRID_FALSE_EASTING, None)
false_northing = getattr(cf_grid_var, CF_ATTR_GRID_FALSE_NORTHING, None)
# Iris currently only supports Stereographic projections with a scale
# factor of 1.0. This is checked elsewhere.
ellipsoid = None
if (
major is not None
or minor is not None
or inverse_flattening is not None
):
ellipsoid = iris.coord_systems.GeogCS(major, minor, inverse_flattening)
cs = iris.coord_systems.Stereographic(
latitude_of_projection_origin,
longitude_of_projection_origin,
false_easting,
false_northing,
true_scale_lat=None,
ellipsoid=ellipsoid,
)
return cs
################################################################################
def build_mercator_coordinate_system(engine, cf_grid_var):
"""
Create a Mercator coordinate system from the CF-netCDF
grid mapping variable.
"""
major, minor, inverse_flattening = _get_ellipsoid(cf_grid_var)
longitude_of_projection_origin = getattr(
cf_grid_var, CF_ATTR_GRID_LON_OF_PROJ_ORIGIN, None
)
standard_parallel = getattr(
cf_grid_var, CF_ATTR_GRID_STANDARD_PARALLEL, None
)
false_easting = getattr(cf_grid_var, CF_ATTR_GRID_FALSE_EASTING, None)
false_northing = getattr(cf_grid_var, CF_ATTR_GRID_FALSE_NORTHING, None)
# Iris currently only supports Mercator projections with specific
# scale_factor_at_projection_origin. This is checked elsewhere.
ellipsoid = None
if (
major is not None
or minor is not None
or inverse_flattening is not None
):
ellipsoid = iris.coord_systems.GeogCS(major, minor, inverse_flattening)
cs = iris.coord_systems.Mercator(
longitude_of_projection_origin,
ellipsoid=ellipsoid,
standard_parallel=standard_parallel,
false_easting=false_easting,
false_northing=false_northing,
)
return cs
################################################################################
def build_lambert_azimuthal_equal_area_coordinate_system(engine, cf_grid_var):
"""
Create a lambert azimuthal equal area coordinate system from the CF-netCDF
grid mapping variable.
"""
major, minor, inverse_flattening = _get_ellipsoid(cf_grid_var)
latitude_of_projection_origin = getattr(
cf_grid_var, CF_ATTR_GRID_LAT_OF_PROJ_ORIGIN, None
)
longitude_of_projection_origin = getattr(
cf_grid_var, CF_ATTR_GRID_LON_OF_PROJ_ORIGIN, None
)
false_easting = getattr(cf_grid_var, CF_ATTR_GRID_FALSE_EASTING, None)
false_northing = getattr(cf_grid_var, CF_ATTR_GRID_FALSE_NORTHING, None)
ellipsoid = None
if (
major is not None
or minor is not None
or inverse_flattening is not None
):
ellipsoid = iris.coord_systems.GeogCS(major, minor, inverse_flattening)
cs = iris.coord_systems.LambertAzimuthalEqualArea(
latitude_of_projection_origin,
longitude_of_projection_origin,
false_easting,
false_northing,
ellipsoid,
)
return cs
################################################################################
def build_albers_equal_area_coordinate_system(engine, cf_grid_var):
"""
Create a albers conical equal area coordinate system from the CF-netCDF
grid mapping variable.
"""
major, minor, inverse_flattening = _get_ellipsoid(cf_grid_var)
latitude_of_projection_origin = getattr(
cf_grid_var, CF_ATTR_GRID_LAT_OF_PROJ_ORIGIN, None
)
longitude_of_central_meridian = getattr(
cf_grid_var, CF_ATTR_GRID_LON_OF_CENT_MERIDIAN, None
)
false_easting = getattr(cf_grid_var, CF_ATTR_GRID_FALSE_EASTING, None)
false_northing = getattr(cf_grid_var, CF_ATTR_GRID_FALSE_NORTHING, None)
standard_parallels = getattr(
cf_grid_var, CF_ATTR_GRID_STANDARD_PARALLEL, None
)
ellipsoid = None
if (
major is not None
or minor is not None
or inverse_flattening is not None
):
ellipsoid = iris.coord_systems.GeogCS(major, minor, inverse_flattening)
cs = iris.coord_systems.AlbersEqualArea(
latitude_of_projection_origin,
longitude_of_central_meridian,
false_easting,
false_northing,
standard_parallels,
ellipsoid,
)
return cs
################################################################################
def build_vertical_perspective_coordinate_system(engine, cf_grid_var):
"""
Create a vertical perspective coordinate system from the CF-netCDF
grid mapping variable.
"""
major, minor, inverse_flattening = _get_ellipsoid(cf_grid_var)
latitude_of_projection_origin = getattr(
cf_grid_var, CF_ATTR_GRID_LAT_OF_PROJ_ORIGIN, None
)
longitude_of_projection_origin = getattr(
cf_grid_var, CF_ATTR_GRID_LON_OF_PROJ_ORIGIN, None
)
perspective_point_height = getattr(
cf_grid_var, CF_ATTR_GRID_PERSPECTIVE_HEIGHT, None
)
false_easting = getattr(cf_grid_var, CF_ATTR_GRID_FALSE_EASTING, None)
false_northing = getattr(cf_grid_var, CF_ATTR_GRID_FALSE_NORTHING, None)
ellipsoid = None
if (
major is not None
or minor is not None
or inverse_flattening is not None
):
ellipsoid = iris.coord_systems.GeogCS(major, minor, inverse_flattening)
cs = iris.coord_systems.VerticalPerspective(
latitude_of_projection_origin,
longitude_of_projection_origin,
perspective_point_height,
false_easting,
false_northing,
ellipsoid,
)
return cs
################################################################################
def build_geostationary_coordinate_system(engine, cf_grid_var):
"""
Create a geostationary coordinate system from the CF-netCDF
grid mapping variable.
"""
major, minor, inverse_flattening = _get_ellipsoid(cf_grid_var)
latitude_of_projection_origin = getattr(
cf_grid_var, CF_ATTR_GRID_LAT_OF_PROJ_ORIGIN, None
)
longitude_of_projection_origin = getattr(
cf_grid_var, CF_ATTR_GRID_LON_OF_PROJ_ORIGIN, None
)
perspective_point_height = getattr(
cf_grid_var, CF_ATTR_GRID_PERSPECTIVE_HEIGHT, None
)
false_easting = getattr(cf_grid_var, CF_ATTR_GRID_FALSE_EASTING, None)
false_northing = getattr(cf_grid_var, CF_ATTR_GRID_FALSE_NORTHING, None)
sweep_angle_axis = getattr(
cf_grid_var, CF_ATTR_GRID_SWEEP_ANGLE_AXIS, None
)
ellipsoid = None
if (
major is not None
or minor is not None
or inverse_flattening is not None
):
ellipsoid = iris.coord_systems.GeogCS(major, minor, inverse_flattening)
cs = iris.coord_systems.Geostationary(
latitude_of_projection_origin,
longitude_of_projection_origin,
perspective_point_height,
sweep_angle_axis,
false_easting,
false_northing,
ellipsoid,
)
return cs
################################################################################
def get_attr_units(cf_var, attributes):
attr_units = getattr(cf_var, CF_ATTR_UNITS, UNKNOWN_UNIT_STRING)
if not attr_units:
attr_units = UNKNOWN_UNIT_STRING
# Sanitise lat/lon units.
if attr_units in UD_UNITS_LAT or attr_units in UD_UNITS_LON:
attr_units = "degrees"
# Graceful loading of invalid units.
try:
cf_units.as_unit(attr_units)
except ValueError:
# Using converted unicode message. Can be reverted with Python 3.
msg = "Ignoring netCDF variable {!r} invalid units {!r}".format(
cf_var.cf_name, attr_units
)
warnings.warn(msg)
attributes["invalid_units"] = attr_units
attr_units = UNKNOWN_UNIT_STRING
if np.issubdtype(cf_var.dtype, np.str_):
attr_units = NO_UNIT_STRING
if any(
hasattr(cf_var.cf_data, name)
for name in ("flag_values", "flag_masks", "flag_meanings")
):
attr_units = cf_units._NO_UNIT_STRING
# Get any assoicated calendar for a time reference coordinate.
if cf_units.as_unit(attr_units).is_time_reference():
attr_calendar = getattr(cf_var, CF_ATTR_CALENDAR, None)
if attr_calendar:
attr_units = cf_units.Unit(attr_units, calendar=attr_calendar)
return attr_units
################################################################################
def get_names(cf_coord_var, coord_name, attributes):
"""Determine the standard_name, long_name and var_name attributes."""
standard_name = getattr(cf_coord_var, CF_ATTR_STD_NAME, None)
long_name = getattr(cf_coord_var, CF_ATTR_LONG_NAME, None)
cf_name = str(cf_coord_var.cf_name)
if standard_name is not None:
try:
standard_name = _get_valid_standard_name(standard_name)
except ValueError:
if long_name is not None:
attributes["invalid_standard_name"] = standard_name
if coord_name is not None:
standard_name = coord_name
else:
standard_name = None
else:
if coord_name is not None:
attributes["invalid_standard_name"] = standard_name
standard_name = coord_name
else:
standard_name = None
else:
if coord_name is not None:
standard_name = coord_name
# Last attempt to set the standard name to something meaningful.
if standard_name is None:
if cf_name in iris.std_names.STD_NAMES:
standard_name = cf_name
return (standard_name, long_name, cf_name)
################################################################################
def get_cf_bounds_var(cf_coord_var):
"""
Return the CF variable representing the bounds of a coordinate
variable.
"""
attr_bounds = getattr(cf_coord_var, CF_ATTR_BOUNDS, None)
attr_climatology = getattr(cf_coord_var, CF_ATTR_CLIMATOLOGY, None)
# Determine bounds, prefering standard bounds over climatology.
# NB. No need to raise a warning if the bounds/climatology
# variable is missing, as that will already have been done by
# iris.fileformats.cf.
cf_bounds_var = None
climatological = False
if attr_bounds is not None:
bounds_vars = cf_coord_var.cf_group.bounds
if attr_bounds in bounds_vars:
cf_bounds_var = bounds_vars[attr_bounds]
elif attr_climatology is not None:
climatology_vars = cf_coord_var.cf_group.climatology
if attr_climatology in climatology_vars:
cf_bounds_var = climatology_vars[attr_climatology]
climatological = True
if attr_bounds is not None and attr_climatology is not None:
warnings.warn(
"Ignoring climatology in favour of bounds attribute "
"on NetCDF variable {!r}.".format(cf_coord_var.cf_name)
)
return cf_bounds_var, climatological
################################################################################
def reorder_bounds_data(bounds_data, cf_bounds_var, cf_coord_var):
"""
Return a bounds_data array with the vertex dimension as the most
rapidly varying.
.. note::
This function assumes the dimension names of the coordinate
variable match those of the bounds variable in order to determine
which is the vertex dimension.
"""
vertex_dim_names = set(cf_bounds_var.dimensions).difference(
cf_coord_var.dimensions
)
if len(vertex_dim_names) != 1:
msg = (
"Too many dimension names differ between coordinate "
"variable {!r} and the bounds variable {!r}. "
"Expected 1, got {}."
)
raise ValueError(
msg.format(
str(cf_coord_var.cf_name),
str(cf_bounds_var.cf_name),
len(vertex_dim_names),
)
)
vertex_dim = cf_bounds_var.dimensions.index(*vertex_dim_names)
bounds_data = np.rollaxis(
bounds_data.view(), vertex_dim, len(bounds_data.shape)
)
return bounds_data
################################################################################
def build_dimension_coordinate(
engine, cf_coord_var, coord_name=None, coord_system=None
):
"""Create a dimension coordinate (DimCoord) and add it to the cube."""
cf_var = engine.cf_var
cube = engine.cube
attributes = {}
attr_units = get_attr_units(cf_coord_var, attributes)
points_data = cf_coord_var[:]
# Gracefully fill points masked array.
if ma.is_masked(points_data):
points_data = ma.filled(points_data)
msg = "Gracefully filling {!r} dimension coordinate masked points"
warnings.warn(msg.format(str(cf_coord_var.cf_name)))
# Get any coordinate bounds.
cf_bounds_var, climatological = get_cf_bounds_var(cf_coord_var)
if cf_bounds_var is not None:
bounds_data = cf_bounds_var[:]
# Gracefully fill bounds masked array.
if ma.is_masked(bounds_data):
bounds_data = ma.filled(bounds_data)
msg = "Gracefully filling {!r} dimension coordinate masked bounds"
warnings.warn(msg.format(str(cf_coord_var.cf_name)))
# Handle transposed bounds where the vertex dimension is not
# the last one. Test based on shape to support different
# dimension names.
if cf_bounds_var.shape[:-1] != cf_coord_var.shape:
bounds_data = reorder_bounds_data(
bounds_data, cf_bounds_var, cf_coord_var
)
else:
bounds_data = None
# Determine whether the coordinate is circular.
circular = False
if (
points_data.ndim == 1
and coord_name in [CF_VALUE_STD_NAME_LON, CF_VALUE_STD_NAME_GRID_LON]
and cf_units.Unit(attr_units)
in [cf_units.Unit("radians"), cf_units.Unit("degrees")]
):
modulus_value = cf_units.Unit(attr_units).modulus
circular = iris.util._is_circular(
points_data, modulus_value, bounds=bounds_data
)
# Determine the name of the dimension/s shared between the CF-netCDF data variable
# and the coordinate being built.
common_dims = [
dim for dim in cf_coord_var.dimensions if dim in cf_var.dimensions
]
data_dims = None
if common_dims:
# Calculate the offset of each common dimension.
data_dims = [cf_var.dimensions.index(dim) for dim in common_dims]
# Determine the standard_name, long_name and var_name
standard_name, long_name, var_name = get_names(
cf_coord_var, coord_name, attributes
)
# Create the coordinate.
try:
coord = iris.coords.DimCoord(
points_data,
standard_name=standard_name,
long_name=long_name,
var_name=var_name,
units=attr_units,
bounds=bounds_data,
attributes=attributes,
coord_system=coord_system,
circular=circular,
climatological=climatological,
)
except ValueError as e_msg:
# Attempt graceful loading.
coord = iris.coords.AuxCoord(
points_data,
standard_name=standard_name,
long_name=long_name,
var_name=var_name,
units=attr_units,
bounds=bounds_data,
attributes=attributes,
coord_system=coord_system,
climatological=climatological,
)
cube.add_aux_coord(coord, data_dims)
msg = (
"Failed to create {name!r} dimension coordinate: {error}\n"
"Gracefully creating {name!r} auxiliary coordinate instead."
)
warnings.warn(msg.format(name=str(cf_coord_var.cf_name), error=e_msg))
else:
# Add the dimension coordinate to the cube.
if data_dims:
cube.add_dim_coord(coord, data_dims)
else:
# Scalar coords are placed in the aux_coords container.
cube.add_aux_coord(coord, data_dims)
# Update the coordinate to CF-netCDF variable mapping.
engine.cube_parts["coordinates"].append((coord, cf_coord_var.cf_name))
################################################################################
def build_auxiliary_coordinate(
engine, cf_coord_var, coord_name=None, coord_system=None
):
"""Create an auxiliary coordinate (AuxCoord) and add it to the cube."""
cf_var = engine.cf_var
cube = engine.cube
attributes = {}
# Get units
attr_units = get_attr_units(cf_coord_var, attributes)
# Get any coordinate point data.
if isinstance(cf_coord_var, cf.CFLabelVariable):
points_data = cf_coord_var.cf_label_data(cf_var)
else:
points_data = _get_cf_var_data(cf_coord_var, engine.filename)
# Get any coordinate bounds.
cf_bounds_var, climatological = get_cf_bounds_var(cf_coord_var)
if cf_bounds_var is not None:
bounds_data = _get_cf_var_data(cf_bounds_var, engine.filename)
# Handle transposed bounds where the vertex dimension is not
# the last one. Test based on shape to support different
# dimension names.
if cf_bounds_var.shape[:-1] != cf_coord_var.shape:
# Resolving the data to a numpy array (i.e. *not* masked) for
# compatibility with array creators (i.e. dask)
bounds_data = np.asarray(bounds_data)
bounds_data = reorder_bounds_data(
bounds_data, cf_bounds_var, cf_coord_var
)
else:
bounds_data = None
# Determine the name of the dimension/s shared between the CF-netCDF data variable
# and the coordinate being built.
common_dims = [
dim for dim in cf_coord_var.dimensions if dim in cf_var.dimensions
]
data_dims = None
if common_dims:
# Calculate the offset of each common dimension.
data_dims = [cf_var.dimensions.index(dim) for dim in common_dims]
# Determine the standard_name, long_name and var_name
standard_name, long_name, var_name = get_names(
cf_coord_var, coord_name, attributes
)
# Create the coordinate
coord = iris.coords.AuxCoord(
points_data,
standard_name=standard_name,
long_name=long_name,
var_name=var_name,
units=attr_units,
bounds=bounds_data,
attributes=attributes,
coord_system=coord_system,
climatological=climatological,
)
# Add it to the cube
cube.add_aux_coord(coord, data_dims)
# Make a list with names, stored on the engine, so we can find them all later.
engine.cube_parts["coordinates"].append((coord, cf_coord_var.cf_name))
################################################################################
def build_cell_measures(engine, cf_cm_var):
"""Create a CellMeasure instance and add it to the cube."""
cf_var = engine.cf_var
cube = engine.cube
attributes = {}
# Get units
attr_units = get_attr_units(cf_cm_var, attributes)
# Get (lazy) content array
data = _get_cf_var_data(cf_cm_var, engine.filename)
# Determine the name of the dimension/s shared between the CF-netCDF data variable
# and the coordinate being built.
common_dims = [
dim for dim in cf_cm_var.dimensions if dim in cf_var.dimensions
]
data_dims = None
if common_dims:
# Calculate the offset of each common dimension.
data_dims = [cf_var.dimensions.index(dim) for dim in common_dims]
# Determine the standard_name, long_name and var_name
standard_name, long_name, var_name = get_names(cf_cm_var, None, attributes)
# Obtain the cf_measure.
measure = cf_cm_var.cf_measure
# Create the CellMeasure
cell_measure = iris.coords.CellMeasure(
data,
standard_name=standard_name,
long_name=long_name,
var_name=var_name,
units=attr_units,
attributes=attributes,
measure=measure,
)
# Add it to the cube
cube.add_cell_measure(cell_measure, data_dims)
# Make a list with names, stored on the engine, so we can find them all later.
engine.cube_parts["cell_measures"].append(
(cell_measure, cf_cm_var.cf_name)
)
################################################################################
def build_ancil_var(engine, cf_av_var):
"""Create an AncillaryVariable instance and add it to the cube."""
cf_var = engine.cf_var
cube = engine.cube
attributes = {}
# Get units
attr_units = get_attr_units(cf_av_var, attributes)
# Get (lazy) content array
data = _get_cf_var_data(cf_av_var, engine.filename)
# Determine the name of the dimension/s shared between the CF-netCDF data variable
# and the AV being built.
common_dims = [
dim for dim in cf_av_var.dimensions if dim in cf_var.dimensions
]
data_dims = None
if common_dims:
# Calculate the offset of each common dimension.
data_dims = [cf_var.dimensions.index(dim) for dim in common_dims]
# Determine the standard_name, long_name and var_name
standard_name, long_name, var_name = get_names(cf_av_var, None, attributes)
# Create the AncillaryVariable
av = iris.coords.AncillaryVariable(
data,
standard_name=standard_name,
long_name=long_name,
var_name=var_name,
units=attr_units,
attributes=attributes,
)
# Add it to the cube
cube.add_ancillary_variable(av, data_dims)
# Make a list with names, stored on the engine, so we can find them all later.
engine.cube_parts["ancillary_variables"].append((av, cf_av_var.cf_name))
################################################################################
def _is_lat_lon(
cf_var, ud_units, std_name, std_name_grid, axis_name, prefixes
):
"""
Determine whether the CF coordinate variable is a latitude/longitude variable.
Ref: [CF] Section 4.1 Latitude Coordinate.
[CF] Section 4.2 Longitude Coordinate.
"""
is_valid = False
attr_units = getattr(cf_var, CF_ATTR_UNITS, None)
if attr_units is not None:
attr_units = attr_units.lower()
is_valid = attr_units in ud_units
# Special case - Check for rotated pole.
if attr_units == "degrees":
attr_std_name = getattr(cf_var, CF_ATTR_STD_NAME, None)
if attr_std_name is not None:
is_valid = attr_std_name.lower() == std_name_grid
else:
is_valid = False
# TODO: check that this interpretation of axis is correct.
attr_axis = getattr(cf_var, CF_ATTR_AXIS, None)
if attr_axis is not None:
is_valid = attr_axis.lower() == axis_name
else:
# Alternative is to check standard_name or axis.
attr_std_name = getattr(cf_var, CF_ATTR_STD_NAME, None)
if attr_std_name is not None:
attr_std_name = attr_std_name.lower()
is_valid = attr_std_name in [std_name, std_name_grid]
if not is_valid:
is_valid = any(
[attr_std_name.startswith(prefix) for prefix in prefixes]
)
else:
attr_axis = getattr(cf_var, CF_ATTR_AXIS, None)
if attr_axis is not None:
is_valid = attr_axis.lower() == axis_name
return is_valid
################################################################################
def is_latitude(engine, cf_name):
"""Determine whether the CF coordinate variable is a latitude variable."""
cf_var = engine.cf_var.cf_group[cf_name]
return _is_lat_lon(
cf_var,
UD_UNITS_LAT,
CF_VALUE_STD_NAME_LAT,
CF_VALUE_STD_NAME_GRID_LAT,
CF_VALUE_AXIS_Y,
["lat", "rlat"],
)
################################################################################
def is_longitude(engine, cf_name):
"""Determine whether the CF coordinate variable is a longitude variable."""
cf_var = engine.cf_var.cf_group[cf_name]
return _is_lat_lon(
cf_var,
UD_UNITS_LON,
CF_VALUE_STD_NAME_LON,
CF_VALUE_STD_NAME_GRID_LON,
CF_VALUE_AXIS_X,
["lon", "rlon"],
)
################################################################################
def is_projection_x_coordinate(engine, cf_name):
"""
Determine whether the CF coordinate variable is a
projection_x_coordinate variable.
"""
cf_var = engine.cf_var.cf_group[cf_name]
attr_name = getattr(cf_var, CF_ATTR_STD_NAME, None) or getattr(
cf_var, CF_ATTR_LONG_NAME, None
)
return attr_name == CF_VALUE_STD_NAME_PROJ_X
################################################################################
def is_projection_y_coordinate(engine, cf_name):
"""
Determine whether the CF coordinate variable is a
projection_y_coordinate variable.
"""
cf_var = engine.cf_var.cf_group[cf_name]
attr_name = getattr(cf_var, CF_ATTR_STD_NAME, None) or getattr(
cf_var, CF_ATTR_LONG_NAME, None
)
return attr_name == CF_VALUE_STD_NAME_PROJ_Y
################################################################################
def is_time(engine, cf_name):
"""
Determine whether the CF coordinate variable is a time variable.
Ref: [CF] Section 4.4 Time Coordinate.
"""
cf_var = engine.cf_var.cf_group[cf_name]
attr_units = getattr(cf_var, CF_ATTR_UNITS, None)
attr_std_name = getattr(cf_var, CF_ATTR_STD_NAME, None)
attr_axis = getattr(cf_var, CF_ATTR_AXIS, "")
try:
is_time_reference = cf_units.Unit(attr_units or 1).is_time_reference()
except ValueError:
is_time_reference = False
return is_time_reference and (
attr_std_name == "time" or attr_axis.lower() == CF_VALUE_AXIS_T
)
################################################################################
def is_time_period(engine, cf_name):
"""Determine whether the CF coordinate variable represents a time period."""
is_valid = False
cf_var = engine.cf_var.cf_group[cf_name]
attr_units = getattr(cf_var, CF_ATTR_UNITS, None)
if attr_units is not None:
try:
is_valid = cf_units.is_time(attr_units)
except ValueError:
is_valid = False
return is_valid
################################################################################
def is_grid_mapping(engine, cf_name, grid_mapping):
"""Determine whether the CF grid mapping variable is of the appropriate type."""
is_valid = False
cf_var = engine.cf_var.cf_group[cf_name]
attr_mapping_name = getattr(cf_var, CF_ATTR_GRID_MAPPING_NAME, None)
if attr_mapping_name is not None:
is_valid = attr_mapping_name.lower() == grid_mapping
return is_valid
################################################################################
def _is_rotated(engine, cf_name, cf_attr_value):
"""Determine whether the CF coordinate variable is rotated."""
is_valid = False
cf_var = engine.cf_var.cf_group[cf_name]
attr_std_name = getattr(cf_var, CF_ATTR_STD_NAME, None)
if attr_std_name is not None:
is_valid = attr_std_name.lower() == cf_attr_value
else:
attr_units = getattr(cf_var, CF_ATTR_UNITS, None)
if attr_units is not None:
is_valid = attr_units.lower() == "degrees"
return is_valid
################################################################################
def is_rotated_latitude(engine, cf_name):
"""Determine whether the CF coodinate variable is rotated latitude."""
return _is_rotated(engine, cf_name, CF_VALUE_STD_NAME_GRID_LAT)
###############################################################################
def is_rotated_longitude(engine, cf_name):
"""Determine whether the CF coordinate variable is rotated longitude."""
return _is_rotated(engine, cf_name, CF_VALUE_STD_NAME_GRID_LON)
################################################################################
def has_supported_mercator_parameters(engine, cf_name):
"""Determine whether the CF grid mapping variable has the supported
values for the parameters of the Mercator projection."""
is_valid = True
cf_grid_var = engine.cf_var.cf_group[cf_name]
scale_factor_at_projection_origin = getattr(
cf_grid_var, CF_ATTR_GRID_SCALE_FACTOR_AT_PROJ_ORIGIN, None
)
if (
scale_factor_at_projection_origin is not None
and scale_factor_at_projection_origin != 1
):
warnings.warn(
"Scale factors other than 1.0 not yet supported for "
"Mercator projections"
)
is_valid = False
return is_valid
################################################################################
def has_supported_stereographic_parameters(engine, cf_name):
"""Determine whether the CF grid mapping variable has a value of 1.0
for the scale_factor_at_projection_origin attribute."""
is_valid = True
cf_grid_var = engine.cf_var.cf_group[cf_name]
scale_factor_at_projection_origin = getattr(
cf_grid_var, CF_ATTR_GRID_SCALE_FACTOR_AT_PROJ_ORIGIN, None
)
if (
scale_factor_at_projection_origin is not None
and scale_factor_at_projection_origin != 1
):
warnings.warn(
"Scale factors other than 1.0 not yet supported for "
"stereographic projections"
)
is_valid = False
return is_valid
|
SciTools/iris
|
lib/iris/fileformats/_nc_load_rules/helpers.py
|
Python
|
lgpl-3.0
| 42,793
|
[
"NetCDF"
] |
992be2f10c33fc9e8c786824be416e95bff78ab7cebfd0727f0470094061cd8a
|
from numpy import *
from refractive_index import *
# function to computer soil dielectric constant using Mironov et al 2009
# Brian Hornbuckle, May 25, 2010.
# Converted to Python by Jason Patton, January 10, 2012
# uses function "refractive_index.m"
# output: n = n' - j*n" = moist soil index of refraction, ejwt time dependence
# inputs:
# f = frequency, Hz
# c = clay content, %
# mv = volumetric water content, m^3 m^-3
def n_soil(f,c,mv):
# ------ questions
# eou is set to 100,
# but Debye water model has eou = 88.045 - Tc.*0.4147 + Tc**2.*6.295e-4 + Tc**3.*1.075e-5, where Tc = Celsius temperature
# and this yields eou = 76 to 86 for Tc = 5 to 30 deg C
# relaxation time tu agrees with water model
# ------- notes
# temperature dependence was not tested,
# so best if used near 293 to 295 K,
# the temperature at which data was collected
eo = 8.854e-12 # permittivity of free space, F m^-1
# ----- empirical models
nd = 1.634 - 0.539e-2*c + 0.2748e-4*c**2 # Re(dry soil refractive index) = n'_dry
kd = 0.03952 - 0.04038e-2*c # Im(dry soil refractive index) = n"_dry
mvt = 0.02863 + 0.30673e-2*c # max bound water volumetric water content, m^3 m^-3
eob = 79.8 - 85.4e-2*c + 32.7e-4*c**2 # low frequency limit of bound water dielectric constant
tb = 1.062e-11 + 3.450e-12*c*1e-2 # relaxation time for bound water
sb = 0.3112 + 0.467e-2*c # bound water conductivity
su = 0.3631 + 1.217e-2*c # free (unbound) water conductivity
eou = 100 # low frequency limit of free (unbound) water dielectric constant
tu = 8.5e-12 # relaxation time for free (unbound) water, s
# -------- bound water Debye model
einf = 4.9 # high-frequency (optical) limit of relative permittivity
erprime = einf + (eob-einf)/(1+((2*pi)*f*tb)**2) # real part of relative permittivity
erdoubleprime = (eob-einf)/(1+((2*pi)*f*tb)**2)*(2*pi)*f*tb + sb/((2*pi*eo)*f) # imaginary part of relative permittivity
erb = erprime + 1j*erdoubleprime # bound water relative permittivity or dielectric constant
bound = refractive_index(erb)
nb = real(bound)
kb = abs(imag(bound)) # bound water index of refraction
# -------- free (unbound) water Debye model
erprime = einf + (eou-einf)/(1+((2*pi)*f*tu)**2) # real part of relative permittivity
erdoubleprime = (eou-einf)/(1+((2*pi)*f*tu)**2)*(2*pi)*f*tu + su/((2*pi*eo)*f) # imaginary part of relative permittivity
eru = erprime + 1j*erdoubleprime # bound water relative permittivity or dielectric constant
free = refractive_index(eru)
nu = real(free)
ku = abs(imag(free)) # free (unbound) water index of refraction
# ------- moist soil index of refraction
if(mv < mvt): # all soil water is bound
nm = nd + (nb-1)*mv # real part of refractive index of moist soil
km = kd + kb*mv # imaginary part of moist soil refractive index
else: # some free (unbound) water exists
nm = nd + (nb-1)*mvt + (nu-1)*(mv-mvt) # real part of refractive index of moist soil
km = kd + kb*mvt + ku*(mv-mvt) # imaginary part of moist soil refractive index
return nm - 1j*km # moist soil refractive index, ejwt time dependence
|
jasoncpatton/TbSim
|
n_soil.py
|
Python
|
mit
| 3,176
|
[
"Brian"
] |
1f0dca5855ac9730b9df8d0f78760e912b0fc91b014c6821456e22c427e09f7f
|
#!/usr/bin/env python
# B a r a K u d a
#
# Budget and other stuffs on rectangular boxes!
#
# L. Brodeau, 2013
import sys
import numpy as nmp
from netCDF4 import Dataset
from os.path import basename
import barakuda_tool as bt
import barakuda_ncio as bnc
from barakuda_physics import sigma0
#next = 10
next = 10
# The density of seawater is about 1025 kg/m^3 and the specific heat is about 3850 J/(kg C)
#rho0 = 1025. ; # kg/m^3
#rCp = 3850. ; # 3850 J/kg/deg.C
rho0 = 1000. ; # kg/m^3 => to stay consistent with cdftransportiz.f90 of CDFTOOLS...
rCp = 4000. ; # 3850 J/kg/deg.C => to stay consistent with cdftransportiz.f90 of CDFTOOLS...
#l_plot_debug = True
l_plot_debug = False
venv_needed = {'ORCA','EXP','CPREF','DIAG_D','MM_FILE','FILE_DEF_BOXES','NN_T','NN_S', \
'NN_SST','NN_SSS','NN_SSH'}
vdic = bt.check_env_var(sys.argv[0], venv_needed)
corca = vdic['ORCA']
CONFEXP = corca+'-'+vdic['EXP']
cname_script = basename(sys.argv[0])
print '\n'+cname_script
narg = len(sys.argv)
if narg < 3 or narg > 4:
print 'Usage: '+cname_script+' <year> <depth for surface properties> (uv)'
print ' by specifying "uv" as 3rd argument, budget will be extended to'
print ' some variables found in grid_U and grid_V files such as wind stress\n'
sys.exit(0)
cy = sys.argv[1] ; jy=int(cy)
czs= sys.argv[2] ; zs = float(int(czs))
# Shall we need U and V files???
luv = False
if narg == 4 and sys.argv[3] == 'uv':
luv = True
venv_uv = {'NN_TAUX','NN_TAUY'}
vdic_uv = bt.check_env_var(sys.argv[0], venv_uv)
path_fig = vdic['DIAG_D']+'/'
# Image type? eps, png, jpg...
#FIG_FORM = 'pdf'
FIG_FORM = 'png'
# First will read name and coordinates of rectangular boxes to treat into file FILE_DEF_BOXES
##############################################################################################
vboxes, vi1, vj1, vi2, vj2 = bt.read_coor(vdic['FILE_DEF_BOXES'])
nbb = len(vboxes)
print ''
# Checking presence of NEMO files:
cfroot = vdic['CPREF']+cy+'0101_'+cy+'1231'
cf_in_T = cfroot+'_grid_T.nc'; bt.chck4f(cf_in_T, script_name=cname_script)
if luv:
cf_in_U = cfroot+'_grid_U.nc'; bt.chck4f(cf_in_U, script_name=cname_script)
cf_in_V = cfroot+'_grid_V.nc'; bt.chck4f(cf_in_V, script_name=cname_script)
# Coordinates, mask and metrics:
bt.chck4f(vdic['MM_FILE'], script_name=cname_script)
id_mm = Dataset(vdic['MM_FILE'])
Xmask = id_mm.variables['tmask'] [0,:,:,:]
ze1t = id_mm.variables['e1t'] [0,:,:]
ze2t = id_mm.variables['e2t'] [0,:,:]
ve3t = id_mm.variables['e3t_1d'] [0,:]
zlon = id_mm.variables['nav_lon'] [:,:]
zlat = id_mm.variables['nav_lat'] [:,:]
vzt = id_mm.variables['gdept_1d'][0,:]
vzw = id_mm.variables['gdepw_1d'][0,:]
id_mm.close()
lqnet = False ; lqsw = False ; lpme = False; ltau = False
# NEMO output, Grid T
# ~~~~~~~~~~~~~~~~~~~
id_in_T = Dataset(cf_in_T)
list_variables = id_in_T.variables.keys()
Vtime = id_in_T.variables['time_counter'][:]
if vdic['NN_SST'] == 'thetao':
Zsst = id_in_T.variables[vdic['NN_SST']][:,0,:,:]
else:
Zsst = id_in_T.variables[vdic['NN_SST']][:,:,:]
if vdic['NN_SSS'] == 'so':
Zsss = id_in_T.variables[vdic['NN_SSS']][:,0,:,:]
else:
Zsss = id_in_T.variables[vdic['NN_SSS']][:,:,:]
Zssh = id_in_T.variables[vdic['NN_SSH']][:,:,:]
Xtemp = id_in_T.variables[vdic['NN_T']][:,:,:,:]
Xsali = id_in_T.variables[vdic['NN_S']][:,:,:,:]
if 'sohefldo' in list_variables[:]:
Zqnet = id_in_T.variables['sohefldo'] [:,:,:] ; # Net Downward Heat Flux
lqnet = True
if 'tohfls' in list_variables[:]:
Zqnet = id_in_T.variables['tohfls'] [:,:,:] ; # Net Downward Heat Flux
lqnet = True
if 'soshfldo' in list_variables[:]:
Zqsw = id_in_T.variables['soshfldo'] [:,:,:] ; # Shortwave Radiation
lqsw = True
if 'rsntds' in list_variables[:]:
Zqsw = id_in_T.variables['rsntds'] [:,:,:] ; # Shortwave Radiation
lqsw = True
# Want PmE (positive when ocean gains FW), in NEMO files its the oposite EmP...
if 'wfo' in list_variables[:]:
Zpme = -id_in_T.variables['wfo'] [:,:,:] ; # wfo same as below = EmP, > 0 when ocean losing water
lpme = True
if 'sowaflup' in list_variables[:]:
Zpme = -id_in_T.variables['sowaflup'] [:,:,:] ; # Net Downward Heat Flux
# # sowaflup = EmP (>0 if more evaporation than P)
lpme = True
print '(has ',Xtemp.shape[0],' time snapshots)\n'
id_in_T.close()
[ Nt, nk, nj, ni ] = Xtemp.shape ; print 'Nt, nk, nj, ni =', Nt, nk, nj, ni
Zss0 = sigma0( Zsst, Zsss )
print len(ve3t[:])
if len(ve3t[:]) != nk: print 'Problem with nk!!!'; sys.exit(0)
# NEMO output, Grid U and V
# ~~~~~~~~~~~~~~~~~~~~~~~~~
if luv:
id_in_U = Dataset(cf_in_U)
list_variables = id_in_U.variables.keys()
if vdic_uv['NN_TAUX'] in list_variables[:]:
Ztaux = id_in_U.variables[vdic_uv['NN_TAUX']][:,:,:] ; # Net Downward Heat Flux
ltau = True
print vdic_uv['NN_TAUX']+' found in '+cf_in_U
else:
print vdic_uv['NN_TAUX']+' NOT found in '+cf_in_U
id_in_U.close()
id_in_V = Dataset(cf_in_V)
list_variables = id_in_V.variables.keys()
if ltau and vdic_uv['NN_TAUY'] in list_variables[:]:
Ztauy = id_in_V.variables[vdic_uv['NN_TAUY']][:,:,:] ; # Net Downward Heat Flux
print vdic_uv['NN_TAUY']+' found in '+cf_in_V+'\n'
else:
print vdic_uv['NN_TAUY']+' NOT found in '+cf_in_V
ltau = False
id_in_V.close()
if ltau:
# Must interpolate Taux and Tauy on T-grid:
Ztau = nmp.zeros(Nt*nj*ni) ; Ztau.shape = [ Nt, nj, ni ]
xtmp1 = nmp.zeros(nj*ni) ; xtmp1.shape = [ nj, ni ]
xtmp2 = nmp.zeros(nj*ni) ; xtmp2.shape = [ nj, ni ]
for jm in range(Nt):
xtmp1[:,1:] = 0.5*(Ztaux[jm,:,1:]+Ztaux[jm,:,:ni-1]) ; # u on Tgrid
xtmp2[1:,:] = 0.5*(Ztauy[jm,1:,:]+Ztauy[jm,:nj-1,:]) ; # v on Tgrid
Ztau[jm,:,:] = nmp.sqrt(xtmp1*xtmp1 + xtmp2*xtmp2)
#print Ztau[3,100,:]
#del Ztaux, Ztaux, xtmp1, xtmp2
jks = 0
for jk in range(nk-1):
if zs >= vzw[jk] and zs < vzw[jk+1]: jks = jk+1
czs = str(int(round(vzw[jks],0))) ; print czs
print ' *** for depth '+czs+': jks = '+str(jks)+', depthw = '+str(vzw[jks])+' => '+czs+'m'
print ' => will average from jk=0 to jk='+str(jks)+'-1 on T-points => X[:jks-1]'
jks = jks - 1
print ' => that\'s on T-points from jk=0 to jk='+str(jks)+' (depth of deepest T-point used ='+str(vzt[jks])+'m)'
# Loop along boxes:
for jb in range(nbb):
cbox = vboxes[jb]
i1 = vi1[jb]
j1 = vj1[jb]
i2 = vi2[jb]+1
j2 = vj2[jb]+1
print '\n *** Treating box '+cbox+' => ', i1, j1, i2-1, j2-1
# Filling box arrays:
# ~~~~~~~~~~~~~~~~~~~
nx_b = i2 - i1
ny_b = j2 - j1
shape_array = [ Nt, nk, ny_b, nx_b ]
XVolu = nmp.zeros(nk*ny_b*nx_b) ; XVolu.shape = [ nk, ny_b, nx_b ]
ZArea = nmp.zeros( ny_b*nx_b) ; ZArea.shape = [ ny_b, nx_b ]
Ztmp = nmp.zeros( ny_b*nx_b) ; Ztmp.shape = [ ny_b, nx_b ]
Xs0 = nmp.zeros(nk*ny_b*nx_b) ; Xs0.shape = [ nk, ny_b, nx_b ]
ssh_m = nmp.zeros(Nt) ; ssh_m.shape = [ Nt ]
sst_m = nmp.zeros(Nt) ; sst_m.shape = [ Nt ]
sss_m = nmp.zeros(Nt) ; sss_m.shape = [ Nt ]
ss0_m = nmp.zeros(Nt) ; ss0_m.shape = [ Nt ]
surf_T_m = nmp.zeros(Nt) ; surf_T_m.shape = [ Nt ]
surf_S_m = nmp.zeros(Nt) ; surf_S_m.shape = [ Nt ]
surf_s0_m = nmp.zeros(Nt) ; surf_s0_m.shape = [ Nt ]
T_m = nmp.zeros(Nt) ; T_m.shape = [ Nt ]
Tau_m = nmp.zeros(Nt) ; Tau_m.shape = [ Nt ]
Qnet_m = nmp.zeros(Nt) ; Qnet_m.shape = [ Nt ]
Qnet_x_S_m = nmp.zeros(Nt) ; Qnet_x_S_m.shape = [ Nt ]
Qsw_m = nmp.zeros(Nt) ; Qsw_m.shape = [ Nt ]
Qsw_x_S_m = nmp.zeros(Nt) ; Qsw_x_S_m.shape = [ Nt ]
PmE_m = nmp.zeros(Nt) ; PmE_m.shape = [ Nt ]
H_m = nmp.zeros(Nt) ; H_m.shape = [ Nt ] ; # Heat coNtent
S_m = nmp.zeros(Nt) ; S_m.shape = [ Nt ]
Vol_m = nmp.zeros(Nt) ; Vol_m.shape = [ Nt ] ; # Volume derived from SSH!
# On the sea of the box only:
ZArea[:,:] = ze1t[j1:j2, i1:i2]*ze2t[j1:j2, i1:i2]
for jk in range(nk): XVolu[jk,:,:] = Xmask[jk, j1:j2, i1:i2]*ZArea[:,:]*ve3t[jk]
ZArea[:,:] = Xmask[0, j1:j2, i1:i2]*ZArea[:,:]
#if l_plot_debug: bp.check_with_fig_2(ZArea, ZArea*0.+1., 'ZArea', fig_type=FIG_FORM)
Tot_area = nmp.sum(ZArea) ; print 'Total area of '+cbox+' (m^2): ', Tot_area
Tot_vol = nmp.sum(XVolu)
Tot_vol_jks = nmp.sum(XVolu[:jks,:,:])
for jm in range(Nt):
# 3D sigma0 density for current month
Xs0[:,:,:] = 0.
Xs0[:,:,:] = sigma0( Xtemp[jm,:, j1:j2, i1:i2], Xsali[jm,:, j1:j2, i1:i2] )
# Mean SSH
ssh_m[jm] = nmp.sum(Zssh[jm, j1:j2, i1:i2]*ZArea) / Tot_area
# Mean SST
sst_m[jm] = nmp.sum(Zsst[jm, j1:j2, i1:i2]*ZArea) / Tot_area
# Mean SSS
sss_m[jm] = nmp.sum(Zsss[jm, j1:j2, i1:i2]*ZArea) / Tot_area
# Mean SS0
ss0_m[jm] = nmp.sum(Zss0[jm, j1:j2, i1:i2]*ZArea) / Tot_area
# Mean surface temp (first Xm)
surf_T_m[jm] = nmp.sum(Xtemp[jm, :jks, j1:j2, i1:i2]*XVolu[:jks,:,:]) / Tot_vol_jks
# Mean temperature
T_m[jm] = nmp.sum(Xtemp[jm, : , j1:j2, i1:i2]*XVolu[:,:,:]) / Tot_vol
# Heat content in Peta Joules:
H_m[jm] = nmp.sum(Xtemp[jm,:, j1:j2, i1:i2]*(XVolu/1.E6))*rho0*rCp * 1.E-9 ; # => PJ (E15)
# Mean surface salinity (first Xm)
surf_S_m[jm] = nmp.sum(Xsali[jm,:jks, j1:j2, i1:i2]*XVolu[:jks,:,:]) / Tot_vol_jks
# Mean salinity
S_m[jm] = nmp.sum(Xsali[jm,:, j1:j2, i1:i2]*XVolu) / Tot_vol
# Mean surface sigma0 density (first Xm)
surf_s0_m[jm] = nmp.sum(Xs0[:jks,:,:]*XVolu[:jks,:,:]) / Tot_vol_jks
# Sea-ice area
#Aice_m[jm] = nmp.sum(Zicec[jm,:,:]*ZArea) * 1.E-12; # Million km^2
# For open-sea:
#Ztmp[:,:] = 1. - Zicec[jm,:,:] ; # 1 => 100% open sea / 0 => 100% ice
Ztmp[:,:] = 1.
# ZArea is in m^2
if ltau:
# Surface wind stress:
Tau_m[jm] = nmp.sum(Ztau[jm, j1:j2, i1:i2]*ZArea) / Tot_area
# Surface heat flux
if lqnet:
rr = nmp.sum(Zqnet[jm, j1:j2, i1:i2]*ZArea)
Qnet_m[jm] = rr / Tot_area # W/m^2
Qnet_x_S_m[jm] = rr * 1.E-15 # PW
# Shortwave heat flux
if lqsw:
rr = nmp.sum(Zqsw[jm, j1:j2, i1:i2]*ZArea)
Qsw_m[jm] = rr / Tot_area # W/m^2
Qsw_x_S_m[jm] = rr * 1.E-15 # PW
# PmE
if lpme: PmE_m[jm] = nmp.sum( Zpme[jm, j1:j2, i1:i2]*ZArea) * 1.E-9; # (Sv) 1 kg/m^2/s x S => 1E-3 m^3/s
# # 1 Sv = 1E6 m^3
# Volume associated with SSH
Vol_m[jm] = nmp.sum(Zssh[jm, j1:j2, i1:i2]*ZArea) * 1.E-9; # km^3
if l_plot_debug:
VMN = [ 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 ]
print ' *** month ', str(jm+1), '***'
print 'Mean sst = ', sst_m[jm]
print 'Mean ssh = ', ssh_m[jm]
print 'Mean sss (0-'+czs+'m) = ', sss_m[jm]
print 'Mean ss0 (0-'+czs+'m) = ', ss0_m[jm]
print 'Mean T = ', T_m[jm]
print 'Heat content (PJ) / ref T=0C => ', H_m[jm]
if jm>0: print 'Volume heat flux (PW) / ref T=0C => ', (H_m[jm] - H_m[jm-1]) / (VMN[jm-1]*24.*3600.)
print 'Mean S = ', S_m[jm]
#print 'Sea-ice (10^6 km^2) = ', Aice_m[jm]
print 'Shortwave Radiation (PW) = ', Qsw_m[jm]
print 'Net surface heat flux (PW) = ', Qnet_m[jm], '\n'
print 'Surface freshwater flux (Sv) = ', PmE_m[jm], '\n'
print 'Volume associated with SSH (km^3) = ', Vol_m[jm], '\n'
Vtime = nmp.zeros(Nt)
for jm in range(Nt): Vtime[jm] = float(jy) + (float(jm)+0.5)/12.
cc = 'Box-averaged '
cf_out = vdic['DIAG_D']+'/budget_'+CONFEXP+'_box_'+cbox+'.nc'
bnc.wrt_appnd_1d_series(Vtime, ssh_m, cf_out, 'ssh', cu_t='year', cu_d='m', cln_d=cc+'sea surface height',
vd2=sst_m, cvar2='sst', cln_d2=cc+'sea surface temperature', cun2='deg.C',
vd3=sss_m, cvar3='sss', cln_d3=cc+'sea surface salinity', cun3='PSU',
vd4=surf_T_m, cvar4='surf_T', cln_d4=cc+'Temperature (first '+czs+'m)', cun4='deg.C',
vd5=T_m, cvar5='theta', cln_d5=cc+'potential temperature', cun5='deg.C',
vd6=H_m, cvar6='HC', cln_d6=cc+'heat content', cun6='PJ',
vd7=surf_S_m, cvar7='surf_S', cln_d7=cc+'salinity (first '+czs+'m)', cun7='PSU',
vd8=S_m, cvar8='S', cln_d8=cc+'salinity', cun8='PSU',
vd9=ss0_m, cvar9='SSs0', cln_d9=cc+'sea surface sigma0 (sst&sss)', cun9='',
vd10=surf_s0_m,cvar10='surf_s0', cln_d10=cc+'surface sigma0 (first '+czs+'m)', cun10=''
)
cf_out = vdic['DIAG_D']+'/budget_srf_flx_'+CONFEXP+'_box_'+cbox+'.nc'
bnc.wrt_appnd_1d_series(Vtime, Qnet_m, cf_out, 'Qnet', cu_t='year', cu_d='W/m^2', cln_d=cc+'net heat flux',
vd2=Qnet_x_S_m, cvar2='Qnet_x_S', cln_d2=cc+'net heat flux x Surface', cun2='PW',
vd3=Qsw_m, cvar3='Qsw', cln_d3=cc+'solar radiation', cun3='W/m^2',
vd4=Qsw_x_S_m, cvar4='Qsw_x_S', cln_d4=cc+'solar radiation x Surface', cun4='PW',
vd5=PmE_m, cvar5='PmE', cln_d5=cc+'net freshwater flux', cun5='Sv',
vd6=Tau_m, cvar6='Tau', cln_d6=cc+'wind stress module', cun6='N/m^2'
)
|
plesager/barakuda
|
python/exec/budget_rectangle_box.py
|
Python
|
gpl-2.0
| 14,348
|
[
"ORCA"
] |
058ca02d70663da4562acdc0238d4a2bc8988fa7d3ee16e825e9d56ee70d3156
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# A list of all valid configuration keywords for a measurement.
# The variable names depict the corresponding section title in
# a configuration file, e.g.
#
# [setup]
# channel width = 20
# chip region = channel
# ...
# [imaging]
# exposure time = 20
# ...
# etc.
meta = {
# All parameters related to the actual experiment
"experiment": [
# no correspondence
["date", str, "Date of measurement ('YYYY-MM-DD')"],
# no correspondence
["event count", int, "Number of recorded events"],
# General - Measurement Number
["run index", int, "Index of measurement run"],
# no correspondence
["sample", str, "Measured sample or user-defined reference"],
# no correspondence
["time", str, "Start time of measurement ('HH:MM:SS')"],
],
# All special keywords related to RT-FDC
"fluorescence": [
# FLUOR - Bitdepthraw = 16
["bit depth", int, "Trace bit depth"],
# FLUOR - FL-Channels = 1
["channel count", int, "Number of channels"],
# FLUOR - Laser Power 488 [mW] = 0
["laser 1 power", float, "Laser 1 output power [mW]"],
# FLUOR - Laser Power 561 [mW] = 0
["laser 2 power", float, "Laser 2 output power [mW]"],
# FLUOR - Laser Power 640 [mW] = 0
["laser 3 power", float, "Laser 3 output power [mW]"],
# no correspondence
["laser 1 lambda", float, "Laser 1 wavelength [nm]"],
# no correspondence
["laser 2 lambda", float, "Laser 2 wavelength [nm]"],
# no correspondence
["laser 3 lambda", float, "Laser 3 wavelength [nm]"],
# FLUOR - Samplerate = 1000000
["sample rate", float, "Trace sample rate [Hz]"],
# FLUOR - ADCmax = 1
["signal max", float, "Upper voltage detection limit [V]"],
# FLUOR - ADCmin = -1
["signal min", float, "Lower voltage detection limit [V]"],
# no correspondence
["trace median", int, "Rolling median filter size for traces"],
],
# All tdms-related parameters
"fmt_tdms": [
["video frame offset", int, "Missing events at beginning of video"],
],
# All imaging-related keywords
"imaging": [
# Framerate - Shutter Time
["exposure time", float, "Sensor exposure time [µs]"],
# General - Current LED [A]
["flash current", float, "Light source current [A]"],
# no correspondence
["flash device", str, "Light source device type (e.g. green LED)"],
# General - Shutter Time LED [us]
["flash duration", float, "Light source flash duration [µs]"],
# Framerate - Frame Rate
["frame rate", float, "Imaging frame rate [Hz]"],
# Image - Pix Size
["pixel size", float, "Pixel size [µm]"],
# ROI - x-pos
["roi position x", float, "Image x coordinate on sensor [px]"],
# ROI - y-pos
["roi position y", float, "Image y coordinate on sensor [px]"],
# ROI - width
["roi size x", int, "Image width [px]"],
# ROI - height
["roi size y", int, "Image height [px]"],
],
# All parameters for online contour extraction from the event images
"online_contour": [
# Image - Trig Thresh = 50
["bin area min", int, "Minium pixel area of binary image event"],
# Image - Bin Ops = 5
["bin kernel", int, "Odd ellipse kernel size, binary image morphing"],
# Image - Margin = 0
["bin margin", int, "Remove margin in x for contour detection"],
# Image - Thresh = 6
["bin threshold", int, "Binary threshold for avg-bg-corrected image"],
# Image - Blur = 0
["image blur", int, "Odd sigma for Gaussian blur (21x21 kernel)"],
# Image - Diff_Method = 1
["no absdiff", bool, "Avoid OpenCV 'absdiff' for avg-bg-correction"],
],
# All online filters
"online_filter": [
# Image - Cell Aspect Min
["aspect min", float, "Minimum aspect ratio of bounding box"],
# Image - Cell Aspect Max
["aspect max", float, "Maximum aspect ratio of bounding box"],
# Image - Cell Max Length = 80.000000
["size_x max", int, "Maximum bounding box size x [µm]"],
# Image - Cell Max Height = 20.000000
["size_y max", int, "Maximum bounding box size y [µm]"],
# no correspondence
["size_x min", int, "Minimum bounding box size x [µm]"],
# no correspondence
["size_y min", int, "Minimum bounding box size y [µm]"],
],
# All setup-related keywords, except imaging
"setup": [
# General - Channel width
["channel width", float, "Width of microfluidic channel [µm]"],
# General - Region
["chip region", str, "Imaged chip region (channel or reservoir)"],
# General - Flow Rate [ul/s]
["flow rate", float, "Flow rate in channel [µL/s]"],
# General - Sample Flow Rate [ul/s]
["flow rate sample", float, "Sample flow rate [µL/s]"],
# General - Sheath Flow Rate [ul/s]
["flow rate sheath", float, "Sheath flow rate [µL/s]"],
# no correspondence
["medium", str, "The medium used (e.g. CellCarrierB, water)"],
# Image - Setup
["module composition", str, "Comma-separated list of modules used"],
# no correspondence
["software version", str, "Acquisition software with version"],
# FLUOR - Ambient Temperature
["temperature", float, "Chip temperature [°C]"],
# no correspondence
["viscosity", float, "Medium viscosity [Pa*s], if 'medium' not given"]
],
}
|
ZELLMECHANIK-DRESDEN/rtdc_hdf5
|
Python/meta.py
|
Python
|
bsd-3-clause
| 5,818
|
[
"Gaussian"
] |
c155fbfaab14234f8b9850c9db6aa0896e2642612a2381701128a2418616e3db
|
# Copyright 2012, 2013 by the Micromagnum authors.
#
# This file is part of MicroMagnum.
#
# MicroMagnum is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MicroMagnum is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MicroMagnum. If not, see <http://www.gnu.org/licenses/>.
from .vtk import VtkFile, VtkRectilinearGrid, VtkFloat64
import struct
def writeVTK(filename, field):
mesh = field.mesh
n = mesh.num_nodes
d = mesh.delta
# I. Describe data entries in file
start, end = (0, 0, 0), (n[0], n[1], n[2])
w = VtkFile(filename, VtkRectilinearGrid)
w.openGrid(start = start, end = end)
w.openPiece(start = start, end = end)
# - Magnetization data
w.openData("Cell", vectors = "M")
w.addData("M", VtkFloat64, field.size(), 3)
w.closeData("Cell")
# - Coordinate data
w.openElement("Coordinates")
w.addData("x_coordinate", VtkFloat64, n[0] + 1, 1)
w.addData("y_coordinate", VtkFloat64, n[1] + 1, 1)
w.addData("z_coordinate", VtkFloat64, n[2] + 1, 1)
w.closeElement("Coordinates")
w.closePiece()
w.closeGrid()
# II. Append binary parts to file
def coordRange(start, step, n):
result = bytearray(0)
for i in range(0, n+1):
result = result + struct.pack('d', start + step * i)
return result
w.appendData(field.toByteArray())
w.appendData(coordRange(0.0, d[0], n[0]))
w.appendData(coordRange(0.0, d[1], n[1]))
w.appendData(coordRange(0.0, d[2], n[2]))
# III. Save & close
w.save()
|
MicroMagnum/MicroMagnum
|
src/magnum/micromagnetics/io/write_vtk.py
|
Python
|
gpl-3.0
| 1,978
|
[
"VTK"
] |
000a0f1c3bfee6062ebe1a4dfe3961e73e0d8c3f9446eeb11bef3acf842346c9
|
#pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import vtk
from ContourFilter import ContourFilter
from TransformFilter import TransformFilter
from TubeFilter import TubeFilter
from ClipperFilterBase import ClipperFilterBase
from PlaneClipper import PlaneClipper
from BoxClipper import BoxClipper
from ChiggerFilterBase import ChiggerFilterBase
def create_basic_filter(vtkfilter_type):
"""
Function for creating meta filter objects.
"""
class ChiggerMetaFilter(ChiggerFilterBase):
"""
Meta object for generating chigger filter objects.
"""
def __init__(self, **kwargs):
super(ChiggerMetaFilter, self).__init__(vtkfilter_type=vtkfilter_type, **kwargs)
return ChiggerMetaFilter
GeometryFilter = create_basic_filter(vtk.vtkCompositeDataGeometryFilter)
IdFilter = create_basic_filter(vtk.vtkIdFilter)
CellCenters = create_basic_filter(vtk.vtkCellCenters)
SelectVisiblePoints = create_basic_filter(vtk.vtkSelectVisiblePoints)
CompositeDataProbeFilter = create_basic_filter(vtk.vtkCompositeDataProbeFilter)
|
liuwenf/moose
|
python/chigger/filters/__init__.py
|
Python
|
lgpl-2.1
| 1,914
|
[
"MOOSE",
"VTK"
] |
144709923087a91aa761df76db18d6c327e19f1d2ca84bb95c7c4af83e239013
|
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import numpy as np
from ..gloo import Texture2D, VertexBuffer
from ..color import get_colormap
from .shaders import Function, FunctionChain
from .transforms import NullTransform
from .visual import Visual
from ..io import load_spatial_filters
VERT_SHADER = """
uniform int method; // 0=subdivide, 1=impostor
attribute vec2 a_position;
attribute vec2 a_texcoord;
varying vec2 v_texcoord;
void main() {
v_texcoord = a_texcoord;
gl_Position = $transform(vec4(a_position, 0., 1.));
}
"""
FRAG_SHADER = """
uniform vec2 image_size;
uniform int method; // 0=subdivide, 1=impostor
uniform sampler2D u_texture;
varying vec2 v_texcoord;
vec4 map_local_to_tex(vec4 x) {
// Cast ray from 3D viewport to surface of image
// (if $transform does not affect z values, then this
// can be optimized as simply $transform.map(x) )
vec4 p1 = $transform(x);
vec4 p2 = $transform(x + vec4(0, 0, 0.5, 0));
p1 /= p1.w;
p2 /= p2.w;
vec4 d = p2 - p1;
float f = p2.z / d.z;
vec4 p3 = p2 - d * f;
// finally map local to texture coords
return vec4(p3.xy / image_size, 0, 1);
}
void main()
{
vec2 texcoord;
if( method == 0 ) {
texcoord = v_texcoord;
}
else {
// vertex shader outputs clip coordinates;
// fragment shader maps to texture coordinates
texcoord = map_local_to_tex(vec4(v_texcoord, 0, 1)).xy;
}
gl_FragColor = $color_transform($get_data(texcoord));
}
""" # noqa
_interpolation_template = """
#include "misc/spatial-filters.frag"
vec4 texture_lookup_filtered(vec2 texcoord) {
if(texcoord.x < 0.0 || texcoord.x > 1.0 ||
texcoord.y < 0.0 || texcoord.y > 1.0) {
discard;
}
return %s($texture, $shape, texcoord);
}"""
_texture_lookup = """
vec4 texture_lookup(vec2 texcoord) {
if(texcoord.x < 0.0 || texcoord.x > 1.0 ||
texcoord.y < 0.0 || texcoord.y > 1.0) {
discard;
}
return texture2D($texture, texcoord);
}"""
_apply_clim_float = """
float apply_clim(float data) {
data = data - $clim.x;
data = data / ($clim.y - $clim.x);
return max(data, 0);
}"""
_apply_clim = """
vec4 apply_clim(vec4 color) {
color.rgb = color.rgb - $clim.x;
color.rgb = color.rgb / ($clim.y - $clim.x);
return max(color, 0);
}
"""
_apply_gamma_float = """
float apply_gamma(float data) {
return pow(data, $gamma);
}"""
_apply_gamma = """
vec4 apply_gamma(vec4 color) {
color.rgb = pow(color.rgb, vec3($gamma));
return color;
}
"""
_null_color_transform = 'vec4 pass(vec4 color) { return color; }'
_c2l = 'float cmap(vec4 color) { return (color.r + color.g + color.b) / 3.; }'
def _build_color_transform(data, clim, gamma, cmap):
if data.ndim == 2 or data.shape[2] == 1:
fclim = Function(_apply_clim_float)
fgamma = Function(_apply_gamma_float)
fun = FunctionChain(
None, [Function(_c2l), fclim, fgamma, Function(cmap.glsl_map)]
)
else:
fclim = Function(_apply_clim)
fgamma = Function(_apply_gamma)
fun = FunctionChain(None, [Function(_null_color_transform), fclim, fgamma])
fclim['clim'] = clim
fgamma['gamma'] = gamma
return fun
class ImageVisual(Visual):
"""Visual subclass displaying an image.
Parameters
----------
data : ndarray
ImageVisual data. Can be shape (M, N), (M, N, 3), or (M, N, 4).
method : str
Selects method of rendering image in case of non-linear transforms.
Each method produces similar results, but may trade efficiency
and accuracy. If the transform is linear, this parameter is ignored
and a single quad is drawn around the area of the image.
* 'auto': Automatically select 'impostor' if the image is drawn
with a nonlinear transform; otherwise select 'subdivide'.
* 'subdivide': ImageVisual is represented as a grid of triangles
with texture coordinates linearly mapped.
* 'impostor': ImageVisual is represented as a quad covering the
entire view, with texture coordinates determined by the
transform. This produces the best transformation results, but may
be slow.
grid: tuple (rows, cols)
If method='subdivide', this tuple determines the number of rows and
columns in the image grid.
cmap : str | ColorMap
Colormap to use for luminance images.
clim : str | tuple
Limits to use for the colormap. Can be 'auto' to auto-set bounds to
the min and max of the data.
gamma : float
Gamma to use during colormap lookup. Final color will be cmap(val**gamma).
by default: 1.
interpolation : str
Selects method of image interpolation. Makes use of the two Texture2D
interpolation methods and the available interpolation methods defined
in vispy/gloo/glsl/misc/spatial_filters.frag
* 'nearest': Default, uses 'nearest' with Texture2D interpolation.
* 'bilinear': uses 'linear' with Texture2D interpolation.
* 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'bicubic',
'catrom', 'mitchell', 'spline16', 'spline36', 'gaussian',
'bessel', 'sinc', 'lanczos', 'blackman'
**kwargs : dict
Keyword arguments to pass to `Visual`.
Notes
-----
The colormap functionality through ``cmap`` and ``clim`` are only used
if the data are 2D.
"""
def __init__(self, data=None, method='auto', grid=(1, 1),
cmap='viridis', clim='auto', gamma=1.0,
interpolation='nearest', **kwargs):
self._data = None
self._gamma = gamma
# load 'float packed rgba8' interpolation kernel
# to load float interpolation kernel use
# `load_spatial_filters(packed=False)`
kernel, self._interpolation_names = load_spatial_filters()
self._kerneltex = Texture2D(kernel, interpolation='nearest')
# The unpacking can be debugged by changing "spatial-filters.frag"
# to have the "unpack" function just return the .r component. That
# combined with using the below as the _kerneltex allows debugging
# of the pipeline
# self._kerneltex = Texture2D(kernel, interpolation='linear',
# internalformat='r32f')
# create interpolation shader functions for available
# interpolations
fun = [Function(_interpolation_template % n)
for n in self._interpolation_names]
self._interpolation_names = [n.lower()
for n in self._interpolation_names]
self._interpolation_fun = dict(zip(self._interpolation_names, fun))
self._interpolation_names.sort()
self._interpolation_names = tuple(self._interpolation_names)
# overwrite "nearest" and "bilinear" spatial-filters
# with "hardware" interpolation _data_lookup_fn
self._interpolation_fun['nearest'] = Function(_texture_lookup)
self._interpolation_fun['bilinear'] = Function(_texture_lookup)
if interpolation not in self._interpolation_names:
raise ValueError("interpolation must be one of %s" %
', '.join(self._interpolation_names))
self._interpolation = interpolation
# check texture interpolation
if self._interpolation == 'bilinear':
texture_interpolation = 'linear'
else:
texture_interpolation = 'nearest'
self._method = method
self._grid = grid
self._texture_limits = None
self._need_texture_upload = True
self._need_vertex_update = True
self._need_colortransform_update = True
self._need_interpolation_update = True
self._texture = Texture2D(np.zeros((1, 1, 4)),
interpolation=texture_interpolation)
self._subdiv_position = VertexBuffer()
self._subdiv_texcoord = VertexBuffer()
# impostor quad covers entire viewport
vertices = np.array([[-1, -1], [1, -1], [1, 1],
[-1, -1], [1, 1], [-1, 1]],
dtype=np.float32)
self._impostor_coords = VertexBuffer(vertices)
self._null_tr = NullTransform()
self._init_view(self)
super(ImageVisual, self).__init__(vcode=VERT_SHADER, fcode=FRAG_SHADER)
self.set_gl_state('translucent', cull_face=False)
self._draw_mode = 'triangles'
# define _data_lookup_fn as None, will be setup in
# self._build_interpolation()
self._data_lookup_fn = None
self.clim = clim
self.cmap = cmap
if data is not None:
self.set_data(data)
self.freeze()
def set_data(self, image):
"""Set the data
Parameters
----------
image : array-like
The image data.
"""
data = np.asarray(image)
if self._data is None or self._data.shape != data.shape:
self._need_vertex_update = True
self._data = data
self._need_texture_upload = True
def view(self):
v = Visual.view(self)
self._init_view(v)
return v
def _init_view(self, view):
# Store some extra variables per-view
view._need_method_update = True
view._method_used = None
@property
def clim(self):
return (self._clim if isinstance(self._clim, str) else
tuple(self._clim))
@clim.setter
def clim(self, clim):
if isinstance(clim, str):
if clim != 'auto':
raise ValueError('clim must be "auto" if a string')
self._need_texture_upload = True
else:
clim = np.array(clim, float)
if clim.shape != (2,):
raise ValueError('clim must have two elements')
if self._texture_limits is not None and (
(clim[0] < self._texture_limits[0])
or (clim[1] > self._texture_limits[1])
):
self._need_texture_upload = True
self._clim = clim
if self._texture_limits is not None:
self.shared_program.frag['color_transform'][1]['clim'] = self.clim_normalized
self.update()
@property
def clim_normalized(self):
"""Normalize current clims between 0-1 based on last-used texture data range.
In _build_texture(), the data is normalized (on the CPU) to 0-1 using ``clim``.
During rendering, the frag shader will apply the final contrast adjustment based on
the current ``clim``.
"""
range_min, range_max = self._texture_limits
clim_min, clim_max = self.clim
clim_min = (clim_min - range_min) / (range_max - range_min)
clim_max = (clim_max - range_min) / (range_max - range_min)
return clim_min, clim_max
@property
def cmap(self):
return self._cmap
@cmap.setter
def cmap(self, cmap):
self._cmap = get_colormap(cmap)
self._need_colortransform_update = True
self.update()
@property
def gamma(self):
"""The gamma used when rendering the image."""
return self._gamma
@gamma.setter
def gamma(self, value):
"""Set gamma used when rendering the image."""
if value <= 0:
raise ValueError("gamma must be > 0")
self._gamma = float(value)
self.shared_program.frag['color_transform'][2]['gamma'] = self._gamma
self.update()
@property
def method(self):
return self._method
@method.setter
def method(self, m):
if self._method != m:
self._method = m
self._need_vertex_update = True
self.update()
@property
def size(self):
return self._data.shape[:2][::-1]
@property
def interpolation(self):
return self._interpolation
@interpolation.setter
def interpolation(self, i):
if i not in self._interpolation_names:
raise ValueError("interpolation must be one of %s" %
', '.join(self._interpolation_names))
if self._interpolation != i:
self._interpolation = i
self._need_interpolation_update = True
self.update()
@property
def interpolation_functions(self):
return self._interpolation_names
# The interpolation code could be transferred to a dedicated filter
# function in visuals/filters as discussed in #1051
def _build_interpolation(self):
"""Rebuild the _data_lookup_fn using different interpolations within
the shader
"""
interpolation = self._interpolation
self._data_lookup_fn = self._interpolation_fun[interpolation]
self.shared_program.frag['get_data'] = self._data_lookup_fn
# only 'bilinear' uses 'linear' texture interpolation
if interpolation == 'bilinear':
texture_interpolation = 'linear'
else:
# 'nearest' (and also 'bilinear') doesn't use spatial_filters.frag
# so u_kernel and shape setting is skipped
texture_interpolation = 'nearest'
if interpolation != 'nearest':
self.shared_program['u_kernel'] = self._kerneltex
self._data_lookup_fn['shape'] = self._data.shape[:2][::-1]
if self._texture.interpolation != texture_interpolation:
self._texture.interpolation = texture_interpolation
self._data_lookup_fn['texture'] = self._texture
self._need_interpolation_update = False
def _build_vertex_data(self):
"""Rebuild the vertex buffers used for rendering the image when using
the subdivide method.
"""
grid = self._grid
w = 1.0 / grid[1]
h = 1.0 / grid[0]
quad = np.array([[0, 0, 0], [w, 0, 0], [w, h, 0],
[0, 0, 0], [w, h, 0], [0, h, 0]],
dtype=np.float32)
quads = np.empty((grid[1], grid[0], 6, 3), dtype=np.float32)
quads[:] = quad
mgrid = np.mgrid[0.:grid[1], 0.:grid[0]].transpose(1, 2, 0)
mgrid = mgrid[:, :, np.newaxis, :]
mgrid[..., 0] *= w
mgrid[..., 1] *= h
quads[..., :2] += mgrid
tex_coords = quads.reshape(grid[1]*grid[0]*6, 3)
tex_coords = np.ascontiguousarray(tex_coords[:, :2])
vertices = tex_coords * self.size
self._subdiv_position.set_data(vertices.astype('float32'))
self._subdiv_texcoord.set_data(tex_coords.astype('float32'))
self._need_vertex_update = False
def _update_method(self, view):
"""Decide which method to use for *view* and configure it accordingly.
"""
method = self._method
if method == 'auto':
if view.transforms.get_transform().Linear:
method = 'subdivide'
else:
method = 'impostor'
view._method_used = method
if method == 'subdivide':
view.view_program['method'] = 0
view.view_program['a_position'] = self._subdiv_position
view.view_program['a_texcoord'] = self._subdiv_texcoord
elif method == 'impostor':
view.view_program['method'] = 1
view.view_program['a_position'] = self._impostor_coords
view.view_program['a_texcoord'] = self._impostor_coords
else:
raise ValueError("Unknown image draw method '%s'" % method)
self.shared_program['image_size'] = self.size
view._need_method_update = False
self._prepare_transforms(view)
def _build_texture(self):
data = self._data
if data.dtype == np.float64:
data = data.astype(np.float32)
if data.ndim == 2 or data.shape[2] == 1:
# deal with clim on CPU b/c of texture depth limits :(
# can eventually do this by simulating 32-bit float... maybe
clim = self._clim
if isinstance(clim, str) and clim == 'auto':
clim = np.min(data), np.max(data)
clim = np.asarray(clim, dtype=np.float32)
data = data - clim[0] # not inplace so we don't modify orig data
if clim[1] - clim[0] > 0:
data /= clim[1] - clim[0]
else:
data[:] = 1 if data[0, 0] != 0 else 0
self._clim = np.array(clim)
else:
# assume that RGB data is already scaled (0, 1)
if isinstance(self._clim, str) and self._clim == 'auto':
self._clim = (0, 1)
self._texture_limits = np.array(self._clim)
self._need_colortransform_update = True
self._texture.set_data(data)
self._need_texture_upload = False
def _compute_bounds(self, axis, view):
if axis > 1:
return (0, 0)
else:
return (0, self.size[axis])
def _prepare_transforms(self, view):
trs = view.transforms
prg = view.view_program
method = view._method_used
if method == 'subdivide':
prg.vert['transform'] = trs.get_transform()
prg.frag['transform'] = self._null_tr
else:
prg.vert['transform'] = self._null_tr
prg.frag['transform'] = trs.get_transform().inverse
def _prepare_draw(self, view):
if self._data is None:
return False
if self._need_interpolation_update:
self._build_interpolation()
if self._need_texture_upload:
self._build_texture()
if self._need_colortransform_update:
prg = view.view_program
self.shared_program.frag['color_transform'] = _build_color_transform(
self._data, self.clim_normalized, self.gamma, self.cmap
)
self._need_colortransform_update = False
prg['texture2D_LUT'] = self.cmap.texture_lut() \
if (hasattr(self.cmap, 'texture_lut')) else None
if self._need_vertex_update:
self._build_vertex_data()
if view._need_method_update:
self._update_method(view)
|
Eric89GXL/vispy
|
vispy/visuals/image.py
|
Python
|
bsd-3-clause
| 18,615
|
[
"Gaussian"
] |
5c0b455d25edb3b8b295a1093fc2f5e6cb7ee61d9628dabbcf7c0bb62f224f45
|
"""
Maximum likelihood covariance estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
# avoid division truncation
from __future__ import division
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator
from ..utils import array2d
from ..utils.extmath import fast_logdet, pinvh
def log_likelihood(emp_cov, precision):
"""Computes the sample mean of the log_likelihood under a covariance model
computes the empirical expected log-likelihood (accounting for the
normalization terms and scaling), allowing for universal comparison (beyond
this software package)
Parameters
----------
emp_cov : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
Returns
-------
sample mean of the log-likelihood
"""
p = precision.shape[0]
log_likelihood_ = - np.sum(emp_cov * precision) + fast_logdet(precision)
log_likelihood_ -= p * np.log(2 * np.pi)
log_likelihood_ /= 2.
return log_likelihood_
def empirical_covariance(X, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
return covariance
class EmpiricalCovariance(BaseEstimator):
"""Maximum likelihood covariance estimator
Parameters
----------
store_precision : bool
Specifies if the estimated precision is stored.
assume_centered : bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
`covariance_` : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix
`precision_` : 2D ndarray, shape (n_features, n_features)
Estimated pseudo-inverse matrix.
(stored only if store_precision is True)
"""
def __init__(self, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
covariance = array2d(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = pinvh(covariance)
else:
self.precision_ = None
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
`precision_` : array-like,
The precision matrix associated to the current covariance object.
"""
if self.store_precision:
precision = self.precision_
else:
precision = pinvh(self.covariance_)
return precision
def fit(self, X, y=None):
"""Fits the Maximum Likelihood Estimator covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
"""Computes the log-likelihood of a Gaussian data set with
`self.covariance_` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y : not used, present for API consistence purpose.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
# compute empirical covariance of the test set
test_cov = empirical_covariance(
X_test - self.location_, assume_centered=True)
# compute log likelihood
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm='frobenius', scaling=True,
squared=True):
"""Computes the Mean Squared Error between two covariance estimators.
(In the sense of the Frobenius norm).
Parameters
----------
comp_cov : array-like, shape = [n_features, n_features]
The covariance to compare with.
norm : str
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
# compute the error
error = comp_cov - self.covariance_
# compute the error norm
if norm == "frobenius":
squared_norm = np.sum(error ** 2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented")
# optionaly scale the error norm
if scaling:
squared_norm = squared_norm / error.shape[0]
# finally get either the squared norm or the norm
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
def mahalanobis(self, observations):
"""Computes the Mahalanobis distances of given observations.
The provided observations are assumed to be centered. One may want to
center them using a location estimate first.
Parameters
----------
observations : array-like, shape = [n_observations, n_features]
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit (including centering).
Returns
-------
mahalanobis_distance : array, shape = [n_observations,]
Mahalanobis distances of the observations.
"""
precision = self.get_precision()
# compute mahalanobis distances
centered_obs = observations - self.location_
mahalanobis_dist = np.sum(
np.dot(centered_obs, precision) * centered_obs, 1)
return mahalanobis_dist
|
Tong-Chen/scikit-learn
|
sklearn/covariance/empirical_covariance_.py
|
Python
|
bsd-3-clause
| 9,099
|
[
"Gaussian"
] |
5c13fe98f1b78b2a79a090d89c9e98e9f2d8691285c0d124fd858596afba84e4
|
# $Id$
#
# Copyright (C) 2000-2006 greg Landrum
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Matrix operations which may or may not come in handy some day
**NOTE**: the two functions defined here have been moved to ML.Data.Stats
"""
from __future__ import print_function
from rdkit.ML.Data import Stats
FormCovarianceMatrix = Stats.FormCovarianceMatrix
PrincipalComponents = Stats.PrincipalComponents
if __name__ == '__main__':
import sys
import files
fileN = sys.argv[1]
iV,dV = files.ReadDataFile(fileN)
eVals,eVects=PrincipalComponents(iV)
print('eVals: ', eVals)
print('eVects:', eVects)
|
adalke/rdkit
|
rdkit/ML/MatOps.py
|
Python
|
bsd-3-clause
| 805
|
[
"RDKit"
] |
de1bbeda4a94aeee3f3914e31fe19a855b7aad8fde2fe97193982eb06366ec1e
|
"""Analyse the polarity of cells using tensors.
This script makes use of a Gaussian projection as a pre-processing step prior
to segmentation of the cell wall and marker channels.
"""
import os
import os.path
import argparse
import logging
import PIL
import numpy as np
import skimage.feature
from jicbioimage.core.util.array import pretty_color_array
from jicbioimage.core.transform import transformation
from jicbioimage.core.io import (
AutoName,
AutoWrite,
)
from jicbioimage.transform import (
invert,
dilate_binary,
remove_small_objects,
)
from jicbioimage.segment import connected_components, watershed_with_seeds
from utils import (
get_microscopy_collection,
threshold_abs,
identity,
remove_large_segments,
)
from tensor import get_tensors
from annotate import make_transparent
from gaussproj import (
generate_surface_from_stack,
projection_from_stack_and_surface,
)
AutoName.prefix_format = "{:03d}_"
@transformation
def threshold_adaptive_median(image, block_size):
return skimage.filters.threshold_adaptive(image, block_size=block_size)
@transformation
def marker_in_wall(marker, wall):
return marker * wall
def segment_cells(image, max_cell_size):
"""Return segmented cells."""
image = identity(image)
wall = threshold_adaptive_median(image, block_size=101)
seeds = remove_small_objects(wall, min_size=100)
seeds = dilate_binary(seeds)
seeds = invert(seeds)
seeds = remove_small_objects(seeds, min_size=5)
seeds = connected_components(seeds, background=0)
segmentation = watershed_with_seeds(-image, seeds=seeds)
segmentation = remove_large_segments(segmentation, max_cell_size)
return segmentation, wall
def segment_markers(image, wall, threshold):
"""Return segmented markers."""
image = threshold_abs(image, threshold)
image = marker_in_wall(image, wall)
image = remove_small_objects(image, min_size=10)
segmentation = connected_components(image, background=0)
return segmentation
def analyse(microscopy_collection, wall_channel, marker_channel,
threshold, max_cell_size):
"""Do the analysis."""
# Prepare the input data for the segmentations.
cell_wall_stack = microscopy_collection.zstack_array(c=wall_channel)
marker_stack = microscopy_collection.zstack_array(c=marker_channel)
surface = generate_surface_from_stack(cell_wall_stack)
cell_wall_projection = projection_from_stack_and_surface(cell_wall_stack,
surface, 1, 9)
marker_projection = projection_from_stack_and_surface(marker_stack,
surface, 1, 9)
# Perform the segmentation.
cells, wall = segment_cells(cell_wall_projection, max_cell_size)
markers = segment_markers(marker_projection, wall, threshold)
# Get tensors.
tensors = get_tensors(cells, markers)
# Write out tensors to a text file.
fpath = os.path.join(AutoName.directory, "raw_tensors.txt")
with open(fpath, "w") as fh:
tensors.write_raw_tensors(fh)
# Write out intensity images.
fpath = os.path.join(AutoName.directory, "wall_intensity.png")
with open(fpath, "wb") as fh:
fh.write(cell_wall_projection.png())
fpath = os.path.join(AutoName.directory, "marker_intensity.png")
marker_im = marker_in_wall(marker_projection, wall)
with open(fpath, "wb") as fh:
fh.write(marker_im.png())
# Shrink the segments to make them clearer.
for i in cells.identifiers:
region = cells.region_by_identifier(i)
mask = region - region.inner.inner
cells[mask] = 0
colorful = pretty_color_array(cells)
pil_im = PIL.Image.fromarray(colorful.view(dtype=np.uint8))
pil_im = make_transparent(pil_im, 60)
fpath = os.path.join(AutoName.directory, "segmentation.png")
pil_im.save(fpath)
def main():
"""Run the analysis on an individual image."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("input_file", help="Path to input tiff file")
parser.add_argument("output_dir", help="Output directory")
parser.add_argument("-w", "--wall-channel",
default=1, type=int,
help="Wall channel (zero indexed)")
parser.add_argument("-m", "--marker-channel",
default=0, type=int,
help="Marker channel (zero indexed)")
parser.add_argument("-t", "--threshold",
default=60, type=int,
help="Marker threshold")
parser.add_argument("-s", "--max-cell-size",
default=10000, type=int,
help="Maximum cell size (pixels)")
parser.add_argument("--debug",
default=False, action="store_true")
args = parser.parse_args()
if not os.path.isfile(args.input_file):
parser.error("No such file: {}".format(args.input_file))
if not os.path.isdir(args.output_dir):
os.mkdir(args.output_dir)
AutoName.directory = args.output_dir
AutoWrite.on = args.debug
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)
logging.info("Input file: {}".format(args.input_file))
logging.info("Wall channel: {}".format(args.wall_channel))
logging.info("Marker channel: {}".format(args.marker_channel))
logging.info("Marker threshold: {}".format(args.threshold))
logging.info("Max cell size: {}".format(args.max_cell_size))
microscopy_collection = get_microscopy_collection(args.input_file)
analyse(microscopy_collection,
wall_channel=args.wall_channel,
marker_channel=args.marker_channel,
threshold=args.threshold,
max_cell_size=args.max_cell_size)
if __name__ == "__main__":
main()
|
JIC-Image-Analysis/leaf-cell-polarisation-tensors
|
scripts/automated_gaussproj_analysis.py
|
Python
|
mit
| 5,901
|
[
"Gaussian"
] |
57f28755a387a80b1e0d1faf0f5ac14692d5dc3b74bc0a81c9a59fd520ffc765
|
#************************************************************************
#
# PathFinder: finding a series of labeled nodes within a
# two-layer directed, cyclic graph.
# Copyright (2013) Sandia Corporation
#
# Copyright (2013) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
# retains certain rights in this software.
#
# This file is part of PathFinder.
#
# PathFinder is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# PathFinder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PathFinder. If not, see <http://www.gnu.org/licenses/>.
#
# Questions? Contact J. Brian Rigdon (jbrigdo@sandia.gov)
import networkx, random, copy, os
#import api
random.seed()
def gen_super_structure(max):
num_nodes = max
max_edges = min(10, num_nodes-2)
nodes = [x * 100000 for x in xrange(num_nodes)]
g = networkx.DiGraph()
g.add_nodes_from(nodes)
added = []
not_added = set(nodes)
first = random.choice(list(not_added))
added.append(first)
not_added.remove(first)
while added:
n = added.pop()
max_edges = min(max_edges, len(not_added))
num_edges = random.randint(0,max_edges)
outgoing = random.sample(not_added, num_edges)
for e in outgoing:
g.add_edge(n,e)
added.append(e)
not_added.difference_update(outgoing)
num_random_edges = random.randint(min(5,len(nodes)-1),len(nodes))
for i in xrange(num_random_edges):
x = random.choice(list(nodes))
y = random.choice(list(nodes))
g.add_edge(x,y)
return g
def new_gen_super_structure(max):
num_nodes = max
max_edges = min(num_nodes/2, num_nodes-2)
nodes = [x * 100000 for x in xrange(num_nodes)]
g = networkx.DiGraph()
g.add_nodes_from(nodes)
added = []
not_added = set(nodes)
first = random.choice(list(not_added))
added.append(first)
not_added.remove(first)
while added:
n = added.pop()
max_edges = min(max_edges, len(not_added))
num_edges = random.randint(0,max_edges)
outgoing = random.sample(not_added, num_edges)
for e in outgoing:
g.add_edge(n,e)
added.append(e)
not_added.difference_update(outgoing)
num_random_edges = random.randint(min(5,len(nodes)-1),len(nodes))
for i in xrange(num_random_edges):
x = random.choice(list(nodes))
y = random.choice(list(nodes))
g.add_edge(x,y)
return g
def gen_sub_structure(top_node, super):
g = networkx.DiGraph()
label = top_node
stack = [label]
g.add_node(label)
forward_edges = []
min_nodes = len(super.successors(top_node))
while stack:
n = stack.pop()
nodes_with_few_successors = 0
for node in g.nodes():
if len(g.successors(node)) < 2 and node not in forward_edges:
nodes_with_few_successors += 1
if min_nodes > nodes_with_few_successors:
need_more_nodes = True
else:
need_more_nodes = False
if random.random() > 0.2 or need_more_nodes:
new_node = label + g.number_of_nodes()
g.add_node(new_node)
g.add_edge(n, new_node)
stack.append(new_node)
if random.random() > 0.5: # Add another edge
if random.random() > 0.5: # New edge is a back edge
target = random.choice(g.nodes())
g.add_edge(n, target)
else: # New edge is potentially a forward edge
forward_edges.append(n)
for e in forward_edges:
target = random.choice(g.nodes())
g.add_edge(e, target)
return g
def add_calls(super, subs):
edges = super.edges()
new_edges = set()
picked = set()
for e in edges:
source = e[0]
target = e[1]
n = random.choice(subs[source].nodes())
while len(subs[source].successors(n)) > 1 or n in picked:
n = random.choice(subs[source].nodes())
picked.add(n)
new_edges.add((n, target))
return new_edges
def add_labels(sub_nodes, num_labels, total_labels):
labels = []
label_assignments = {}
for i in xrange(num_labels):
labels.append("label_" + str(i))
if num_labels >= len(sub_nodes):
num_labels = len(sub_nodes)
nodes = copy.copy(sub_nodes)
for l in labels:
choice = random.choice(list(nodes))
label_assignments[choice] = l
remaining_labels = total_labels - num_labels
for i in xrange(remaining_labels):
choice = random.choice(list(nodes))
label_assignments[choice] = random.choice(labels)
return label_assignments
def build_graphs(num_nodes, num_labels, total_labels):
g = new_gen_super_structure(num_nodes)
subs = {}
for n in g.nodes():
subs[n] = gen_sub_structure(n, g)
calls = add_calls(g, subs)
basic_blocks = set()
for n in subs:
basic_blocks.update(subs[n].nodes())
labels = add_labels(basic_blocks, num_labels, total_labels)
return {"super":g, "subs":subs, "calls":calls, "labels":labels}
def build_graph(args, opts):
""" Plugin: builds an artificial graph resembling a control flow graph. n is the
number of functions, labels is the number of unique labels.
Syntax: build_graph --n=100 --labels=50 --total_labels=100
"""
if not "n" in opts or not "labels" in opts:
raise ShellSyntaxError("You must specify n and labels.")
n = opts["n"]
labels = opts["labels"]
total_labels = opts["total_labels"]
return [build_graphs(n, labels, total_labels)]
def write_graph(args, opts):
""" Plugin: writes the graph created by build_graph to a file specified by name.
Syntax: build_graph --n=100 --labels=50 | write_graph --name=myfile
"""
if not "name" in opts:
raise ShellSyntaxError("You must specify a file name.")
name = opts["name"]
graph = args[0]
cg = graph["super"]
cfgs = graph["subs"]
calls = dict(graph["calls"])
labels = graph["labels"]
fpath = name ; # making this a standalone script... os.path.join(api.scratch_dir, name)
fd = file(fpath, 'wb')
fd.write("Functions: " + str(cg.number_of_nodes()) + "\n")
num_bbs = 0
for bb in cfgs:
num_bbs += len(cfgs[bb].nodes())
fd.write("Basic blocks: " + str(num_bbs) + "\n\n")
for n in cg.nodes():
edges = cg.successors(n)
s = len(edges)
outstr = str(n) + " " + str(s) + " "
for e in edges:
outstr += str(e) + " "
fd.write(outstr + "\n")
fd.write("\n--------------------------------------------\n")
for cfg in cfgs:
fd.write(str(cfg) + "\n")
if not cfgs[cfg]:
fd.write("\n--------------------------------------------\n")
continue
for n in cfgs[cfg].nodes():
edges = cfgs[cfg].successors(n)
s = len(edges)
if n in calls:
s += 1
outstr = str(n) + " " + str(s) + " "
for e in edges:
outstr += str(e) + " "
if n in calls:
outstr += str(calls[n]) + " "
fd.write(outstr + "\n")
fd.write("\n--------------------------------------------\n")
fd.write("SYSTEM CALLS \n\n")
for s in labels:
fd.write(str(s) + " " + labels[s] + "\n")
fd.close()
return graph
def write_sample_signatures(args, opts):
""" Plugin: writes the graph created by build_graph to a file specified by name.
Syntax: build_graph ... | write_graph ... | write_sample_signatures --name=myfile --signatures=100 --longest=20
"""
if not "signatures" in opts:
raise ShellSyntaxError("You must specify how many signatures to generate.")
sigs = opts["signatures"]
if not "longest" in opts:
raise ShellSyntaxError("You must specify longest signature possible.")
longest = opts["longest"]
if not "name" in opts:
raise ShellSyntaxError("You must specify a file name.")
name = opts["name"]
graph = args[0]
labels = graph["labels"].values() ; # we only want the actual labels
fpath = name ; # making this a standalone script... os.path.join(api.scratch_dir, name)
fd = file(fpath, 'wb')
for i in xrange(sigs):
signature = ""
for j in xrange(random.randrange(2,longest+1)):
signature += random.choice(labels) + " "
fd.write(signature + "\n")
fd.close()
# The exports variable is used when this module is loaded into an SNL product
# called "Oxide".
exports = [build_graph, write_graph, write_sample_signatures]
if __name__ == "__main__":
print "How big should the graph be?:"
num_nodes = input("(number of nodes) > ")
print "How many labels should there be?:"
num_labels = input("(number of labels) > ")
print "How many nodes should have labels?:"
prompt = "(number bigger than " + str(num_labels) + ") > "
total_labels = input(prompt)
graph = build_graphs(num_nodes, num_labels, total_labels)
print "\n\tGraph has been built\n"
print "Output directory?:"
dir = raw_input("(relative or absolute path) > ")
print "Output filename?:"
name = raw_input("(.adj_list will be added) > ")
adj_file_name = dir + "/" + name + ".adj_list"
args = [graph]
opts = {}
opts["name"] = adj_file_name
write_graph(args, opts)
print "\n\tAdjacency list file written as " + adj_file_name + ".\n"
print "Generate random signatures?:"
sig_yes_no = raw_input("(y/n) > ")
if sig_yes_no == 'y' or sig_yes_no == 'Y' or sig_yes_no == "yes":
print "How many signatures do you want?:"
signatures = input(" > ")
print "How long should the longest one be?"
sig_max = input(" > ")
sig_file_name = dir + "/" + name + ".sig"
opts["name"] = sig_file_name
opts["signatures"] = signatures
opts["longest"] = sig_max
write_sample_signatures(args,opts)
print "\n\tSignature file written as " + sig_file_name + ".\n"
print "...and done!\n\n"
|
Mantevo/PathFinder
|
ref/graph_gen.py
|
Python
|
lgpl-3.0
| 10,752
|
[
"Brian"
] |
83675ee1d81a046c219976a9ea959ede8686959411522282ce46802be6f316f0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the bencode parser plugin for Transmission BitTorrent files."""
import unittest
from plaso.lib import definitions
from plaso.parsers import bencode_parser
from tests.parsers.bencode_plugins import test_lib
class TransmissionPluginTest(test_lib.BencodePluginTestCase):
"""Tests for bencode parser plugin for Transmission BitTorrent files."""
def testProcess(self):
"""Tests the Process function."""
parser = bencode_parser.BencodeParser()
storage_writer = self._ParseFile(['bencode', 'transmission'], parser)
self.assertEqual(storage_writer.number_of_events, 3)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
# The order in which BencodeParser generates events is nondeterministic
# hence we sort the events.
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'data_type': 'p2p:bittorrent:transmission',
'date_time': '2013-11-08 15:31:20',
'destination': '/Users/brian/Downloads',
'seedtime': 4,
'timestamp_desc': definitions.TIME_DESCRIPTION_ADDED}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# Test on second event of first torrent.
expected_event_values = {
'data_type': 'p2p:bittorrent:transmission',
'date_time': '2013-11-08 18:24:24',
'destination': '/Users/brian/Downloads',
'seedtime': 4,
'timestamp_desc': definitions.TIME_DESCRIPTION_FILE_DOWNLOADED}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
kiddinn/plaso
|
tests/parsers/bencode_plugins/transmission.py
|
Python
|
apache-2.0
| 1,725
|
[
"Brian"
] |
e9f579cee466b1b66823e2e6af30a49af31c056007c570426ac7a0ae767487a6
|
"""
Regularized Regression Example
------------------------------
This performs regularized regression on a gaussian basis function model.
"""
# Author: Jake VanderPlas <vanderplas@astro.washington.edu>
# License: BSD
# The figure is an example from astroML: see http://astroML.github.com
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import lognorm
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from astroML.cosmology import Cosmology
from astroML.datasets import generate_mu_z
from astroML.density_estimation import FunctionDistribution
#----------------------------------------------------------------------
# generate data
np.random.seed(0)
z_sample, mu_sample, dmu = generate_mu_z(100, random_state=0)
cosmo = Cosmology()
z = np.linspace(0.01, 2, 1000)
mu = np.asarray(map(cosmo.mu, z))
#------------------------------------------------------------
# Manually convert data to a gaussian basis
# note that we're ignoring errors here, for the sake of example.
def gaussian_basis(x, mu, sigma):
return np.exp(-0.5 * ((x - mu) / sigma) ** 2)
centers = np.linspace(0, 1.8, 100)
widths = 0.2
X = gaussian_basis(z_sample[:, np.newaxis], centers, widths)
#------------------------------------------------------------
# Set up the figure to plot the results
fig = plt.figure(figsize=(12, 7))
fig.subplots_adjust(left=0.07, right=0.95,
bottom=0.08, top=0.95,
hspace=0.1, wspace=0.15)
classifier = [LinearRegression, Ridge, Lasso]
kwargs = [dict(), dict(alpha=0.005), dict(alpha=0.001)]
labels = ['Linear Regression', 'Ridge Regression', 'Lasso Regression']
for i in range(3):
clf = classifier[i](fit_intercept=True, **kwargs[i])
clf.fit(X, mu_sample)
w = clf.coef_
fit = clf.predict(gaussian_basis(z[:, None], centers, widths))
# plot fit
ax = fig.add_subplot(231 + i)
ax.xaxis.set_major_formatter(plt.NullFormatter())
# plot curves for regularized fits
if i == 0:
ax.set_ylabel('$\mu$')
else:
ax.yaxis.set_major_formatter(plt.NullFormatter())
curves = 37 + w * gaussian_basis(z[:, np.newaxis], centers, widths)
curves = curves[:, abs(w) > 0.01]
ax.plot(z, curves,
c='gray', lw=1, alpha=0.5)
ax.plot(z, fit, '-k')
ax.plot(z, mu, '--', c='gray')
ax.errorbar(z_sample, mu_sample, dmu, fmt='.k', ecolor='gray', lw=1)
ax.set_xlim(0.001, 1.8)
ax.set_ylim(36, 48)
ax.text(0.05, 0.95, labels[i],
ha='left', va='top',
transform=ax.transAxes)
# plot weights
ax = plt.subplot(234 + i)
ax.xaxis.set_major_locator(plt.MultipleLocator(0.5))
ax.set_xlabel('z')
if i == 0:
ax.set_ylabel(r'$\theta$')
w *= 1E-12
ax.text(0, 1, r'$\rm \times 10^{12}$',
transform=ax.transAxes, fontsize=16)
ax.scatter(centers, w, s=9, lw=0, c='k')
ax.set_xlim(-0.05, 1.8)
if i == 1:
ax.set_ylim(-2, 4)
elif i == 2:
ax.set_ylim(-0.5, 2)
ax.text(0.05, 0.95, labels[i],
ha='left', va='top',
transform=ax.transAxes)
plt.show()
|
nhuntwalker/astroML
|
paper_figures/CIDU2012/fig_rbf_ridge_mu_z.py
|
Python
|
bsd-2-clause
| 3,158
|
[
"Gaussian"
] |
e0159de826c10d0b7c5b66e0dacc67c6ffd777cfbc68d653bc9158a50e1d5019
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
Created on Jul 30, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 30, 2012"
import unittest
import os
from pymatgen.io.smartio import read_structure, read_mol, write_structure, \
write_mol
from pymatgen.core.structure import Structure, Molecule
from pymatgen.analysis.structure_matcher import StructureMatcher
try:
import openbabel as ob
except ImportError:
ob = None
class MethodsTest(unittest.TestCase):
def test_read_structure(self):
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
for fname in ("Li2O.cif", "vasprun.xml",
"vasprun_Si_bands.xml", "Si.cssr"):
filename = os.path.join(test_dir, fname)
struct = read_structure(filename)
self.assertIsInstance(struct, Structure)
prim = read_structure(filename, primitive=True)
self.assertLessEqual(len(prim), len(struct))
sorted_s = read_structure(filename, sort=True)
self.assertEqual(sorted_s, sorted_s.get_sorted_structure())
m = StructureMatcher()
for ext in [".cif", ".json", ".cssr"]:
fn = "smartio_structure_test" + ext
write_structure(struct, fn)
back = read_structure(fn)
self.assertTrue(m.fit(back, struct))
os.remove(fn)
def test_read_mol(self):
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files', "molecules")
for fname in ("methane.log", "c60.xyz", "ethane.gjf"):
filename = os.path.join(test_dir, fname)
mol = read_mol(filename)
self.assertIsInstance(mol, Molecule)
for ext in [".xyz", ".json", ".gjf"]:
fn = "smartio_mol_test" + ext
write_mol(mol, fn)
back = read_mol(fn)
self.assertEqual(back, mol)
os.remove(fn)
@unittest.skipIf(ob is None, "No openbabel")
def test_read_mol_babel(self):
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files', "molecules")
for fname in ("ethane.mol", ):
filename = os.path.join(test_dir, fname)
mol = read_mol(filename)
self.assertIsInstance(mol, Molecule)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Dioptas/pymatgen
|
pymatgen/io/tests/test_smartio.py
|
Python
|
mit
| 2,675
|
[
"pymatgen"
] |
4573d4b8e636fc7c738ab47324d924a74c01dfe079306326b4e1cf7096c97486
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import copy
import numpy
from functools import reduce
from pyscf import gto, lib
from pyscf import scf, dft
from pyscf import cc
from pyscf.cc import dfccsd, eom_rccsd
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = '631g'
mol.build()
mf = scf.RHF(mol).density_fit(auxbasis='weigend')
mf.conv_tol_grad = 1e-8
mf.kernel()
cc1 = dfccsd.RCCSD(mf).run(conv_tol=1e-10)
mycc = cc.ccsd.CCSD(mf).density_fit().set(max_memory=0)
mycc.__dict__.update(cc1.__dict__)
def make_mycc1():
mf1 = copy.copy(mf)
no = mol.nelectron // 2
n = mol.nao_nr()
nv = n - no
mf1.mo_occ = numpy.zeros(mol.nao_nr())
mf1.mo_occ[:no] = 2
numpy.random.seed(12)
mf1.mo_coeff = numpy.random.random((n,n))
dm = mf1.make_rdm1(mf1.mo_coeff, mf1.mo_occ)
fockao = mf1.get_hcore() + mf1.get_veff(mol, dm)
mf1.mo_energy = numpy.einsum('pi,pq,qi->i', mf1.mo_coeff, fockao, mf1.mo_coeff)
idx = numpy.hstack([mf1.mo_energy[:no].argsort(), no+mf1.mo_energy[no:].argsort()])
mf1.mo_coeff = mf1.mo_coeff[:,idx]
mycc1 = dfccsd.RCCSD(mf1)
eris1 = mycc1.ao2mo()
numpy.random.seed(12)
r1 = numpy.random.random((no,nv)) - .9
r2 = numpy.random.random((no,no,nv,nv)) - .9
r2 = r2 + r2.transpose(1,0,3,2)
mycc1.t1 = r1*1e-5
mycc1.t2 = r2*1e-5
return mf1, mycc1, eris1
mf1, mycc1, eris1 = make_mycc1()
no, nv = mycc1.t1.shape
def tearDownModule():
global mol, mf, cc1, mycc, mf1, mycc1, eris1
mol.stdout.close()
del mol, mf, cc1, mycc, mf1, mycc1, eris1
class KnownValues(unittest.TestCase):
def test_with_df(self):
self.assertAlmostEqual(cc1.e_tot, -76.118403942938741, 7)
numpy.random.seed(1)
mo_coeff = numpy.random.random(mf.mo_coeff.shape)
eris = cc.ccsd.CCSD(mf).ao2mo(mo_coeff)
self.assertAlmostEqual(lib.finger(numpy.array(eris.oooo)), 4.962033460861587 , 12)
self.assertAlmostEqual(lib.finger(numpy.array(eris.ovoo)),-1.3666078517246127, 12)
self.assertAlmostEqual(lib.finger(numpy.array(eris.oovv)), 55.122525571320871, 12)
self.assertAlmostEqual(lib.finger(numpy.array(eris.ovvo)), 133.48517302161068, 12)
self.assertAlmostEqual(lib.finger(numpy.array(eris.ovvv)), 59.418747028576142, 12)
self.assertAlmostEqual(lib.finger(numpy.array(eris.vvvv)), 43.562457227975969, 12)
def test_df_ipccsd(self):
e,v = mycc.ipccsd(nroots=1)
self.assertAlmostEqual(e, 0.42788191082629801, 6)
e,v = mycc.ipccsd(nroots=3)
self.assertAlmostEqual(e[0], 0.42788191082629801, 6)
self.assertAlmostEqual(e[1], 0.50229582430658171, 6)
self.assertAlmostEqual(e[2], 0.68557652412060088, 6)
myeom = eom_rccsd.EOMIP(mycc)
lv = myeom.ipccsd(nroots=3, left=True)[1]
e = myeom.ipccsd_star_contract(e, v, lv)
self.assertAlmostEqual(e[0], 0.43584093045349137, 6)
self.assertAlmostEqual(e[1], 0.50959675100507518, 6)
self.assertAlmostEqual(e[2], 0.69021193094404043, 6)
def test_df_ipccsd_koopmans(self):
e,v = mycc.ipccsd(nroots=3, koopmans=True)
self.assertAlmostEqual(e[0], 0.42788191082629801, 6)
self.assertAlmostEqual(e[1], 0.50229582430658171, 6)
self.assertAlmostEqual(e[2], 0.68557652412060088, 6)
e,v = mycc.ipccsd(nroots=3, guess=v[:3])
self.assertAlmostEqual(e[0], 0.42788191082629801, 6)
self.assertAlmostEqual(e[1], 0.50229582430658171, 6)
self.assertAlmostEqual(e[2], 0.68557652412060088, 6)
def test_df_ipccsd_partition(self):
e,v = mycc.ipccsd(nroots=3, partition='mp')
self.assertAlmostEqual(e[0], 0.42183117410776649, 6)
self.assertAlmostEqual(e[1], 0.49650713906402066, 6)
self.assertAlmostEqual(e[2], 0.6808175428439881 , 6)
e,v = mycc.ipccsd(nroots=3, partition='full')
self.assertAlmostEqual(e[0], 0.41392302194803809, 6)
self.assertAlmostEqual(e[1], 0.49046066205501643, 6)
self.assertAlmostEqual(e[2], 0.67472905602747868, 6)
def test_df_eaccsd(self):
self.assertAlmostEqual(mycc.e_tot, -76.118403942938741, 7)
e,v = mycc.eaccsd(nroots=1)
self.assertAlmostEqual(e, 0.1903885587959659, 6)
e,v = mycc.eaccsd(nroots=3)
self.assertAlmostEqual(e[0], 0.1903885587959659, 6)
self.assertAlmostEqual(e[1], 0.2833972143749155, 6)
self.assertAlmostEqual(e[2], 0.5222497886685452, 6)
myeom = eom_rccsd.EOMEA(mycc)
lv = myeom.eaccsd(nroots=3, left=True)[1]
e = myeom.eaccsd_star_contract(e, v, lv)
self.assertAlmostEqual(e[0], 0.18931289565459147, 6)
self.assertAlmostEqual(e[1], 0.28204643613789027, 6)
self.assertAlmostEqual(e[2], 0.457836723621172 , 6)
def test_df_eaccsd_koopmans(self):
e,v = mycc.eaccsd(nroots=3, koopmans=True)
self.assertAlmostEqual(e[0], 0.19038860392603385, 6)
self.assertAlmostEqual(e[1], 0.28339727115722535, 6)
self.assertAlmostEqual(e[2], 1.0215547528836946 , 6)
e,v = mycc.eaccsd(nroots=3, guess=v[:3])
self.assertAlmostEqual(e[0], 0.19038860392603385, 6)
self.assertAlmostEqual(e[1], 0.28339727115722535, 6)
self.assertAlmostEqual(e[2], 1.0215547528836946 , 6)
def test_df_eaccsd_partition(self):
e,v = mycc.eaccsd(nroots=3, partition='mp')
self.assertAlmostEqual(e[0], 0.19324341795558322, 6)
self.assertAlmostEqual(e[1], 0.28716776030933833, 6)
self.assertAlmostEqual(e[2], 0.90836050326011419, 6)
e,v = mycc.eaccsd(nroots=3, partition='full')
self.assertAlmostEqual(e[0], 0.18750981070399036, 6)
self.assertAlmostEqual(e[1], 0.27959207345640869, 6)
self.assertAlmostEqual(e[2], 0.57042043243953111, 6)
def test_df_eeccsd(self):
e,v = mycc.eeccsd(nroots=1)
self.assertAlmostEqual(e, 0.28107576276117063, 6)
e,v = mycc.eeccsd(nroots=4)
self.assertAlmostEqual(e[0], 0.28107576276117063, 6)
self.assertAlmostEqual(e[1], 0.28107576276117063, 6)
self.assertAlmostEqual(e[2], 0.28107576276117063, 6)
self.assertAlmostEqual(e[3], 0.30810935900155312, 6)
def test_df_eeccsd_koopmans(self):
e,v = mycc.eeccsd(nroots=4, koopmans=True)
self.assertAlmostEqual(e[0], 0.28107576276117063, 6)
self.assertAlmostEqual(e[1], 0.28107576276117063, 6)
self.assertAlmostEqual(e[2], 0.28107576276117063, 6)
self.assertAlmostEqual(e[3], 0.30810935900155312, 6)
e,v = mycc.eeccsd(nroots=4, guess=v[:4])
self.assertAlmostEqual(e[0], 0.28107576276117063, 6)
self.assertAlmostEqual(e[1], 0.28107576276117063, 6)
self.assertAlmostEqual(e[2], 0.28107576276117063, 6)
self.assertAlmostEqual(e[3], 0.30810935900155312, 6)
def test_df_eomee_ccsd_matvec_singlet(self):
numpy.random.seed(10)
r1 = numpy.random.random((no,nv)) - .9
r2 = numpy.random.random((no,no,nv,nv)) - .9
r2 = r2 + r2.transpose(1,0,3,2)
myeom = eom_rccsd.EOMEESinglet(mycc1)
vec = myeom.amplitudes_to_vector(r1,r2)
imds = myeom.make_imds(eris1)
vec1 = myeom.matvec(vec, imds)
r1, r2 = myeom.vector_to_amplitudes(vec1)
self.assertAlmostEqual(lib.finger(r1), -11001.96269563921, 8)
self.assertAlmostEqual(lib.finger(r2), 10145.408880409095, 8)
def test_df_eomee_ccsd_matvec_triplet(self):
numpy.random.seed(10)
r1 = numpy.random.random((no,nv)) - .9
r2 = numpy.random.random((2,no,no,nv,nv)) - .9
r2[0] = r2[0] - r2[0].transpose(0,1,3,2)
r2[0] = r2[0] - r2[0].transpose(1,0,2,3)
r2[1] = r2[1] - r2[1].transpose(1,0,3,2)
myeom = eom_rccsd.EOMEETriplet(mycc1)
vec = myeom.amplitudes_to_vector(r1, r2)
imds = myeom.make_imds(eris1)
vec1 = myeom.matvec(vec, imds)
r1, r2 = myeom.vector_to_amplitudes(vec1)
self.assertAlmostEqual(lib.finger(r1 ), 214.90035498814302, 9)
self.assertAlmostEqual(lib.finger(r2[0]), 37033.183886562998, 8)
self.assertAlmostEqual(lib.finger(r2[1]), 4164.1657912277242, 8)
def test_df_eomsf_ccsd_matvec(self):
numpy.random.seed(10)
r1 = numpy.random.random((no,nv)) - .9
r2 = numpy.random.random((2,no,no,nv,nv)) - .9
myeom = eom_rccsd.EOMEESpinFlip(mycc1)
vec = myeom.amplitudes_to_vector(r1,r2)
imds = myeom.make_imds(eris1)
vec1 = myeom.matvec(vec, imds)
r1, r2 = myeom.vector_to_amplitudes(vec1)
self.assertAlmostEqual(lib.finger(r1 ), 1929.9270950777639, 8)
self.assertAlmostEqual(lib.finger(r2[0]), 15571.714806853948, 8)
self.assertAlmostEqual(lib.finger(r2[1]),-12949.619613624538, 8)
def test_df_eomee_diag(self):
vec1S, vec1T, vec2 = eom_rccsd.EOMEE(mycc1).get_diag()
self.assertAlmostEqual(lib.finger(vec1S), 213.16715890265095, 9)
self.assertAlmostEqual(lib.finger(vec1T),-857.23800705535234, 9)
self.assertAlmostEqual(lib.finger(vec2) , 14.360296355284504, 9)
if __name__ == "__main__":
print("Full Tests for DFCCSD")
unittest.main()
|
gkc1000/pyscf
|
pyscf/cc/test/test_dfccsd.py
|
Python
|
apache-2.0
| 9,940
|
[
"PySCF"
] |
484f12f9a7bd0ab39ead153a4fdd03e01e0677a20c2cbeb8af1598118db4d066
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'RawIncomingEmail'
db.create_table(u'mailit_rawincomingemail', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'mailit', ['RawIncomingEmail'])
def backwards(self, orm):
# Deleting model 'RawIncomingEmail'
db.delete_table(u'mailit_rawincomingemail')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contactos.contact': {
'Meta': {'object_name': 'Contact'},
'contact_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contactos.ContactType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_bounced': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contacts'", 'to': u"orm['auth.User']"}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.Person']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
u'contactos.contacttype': {
'Meta': {'object_name': 'ContactType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'mailit.bouncedmessagerecord': {
'Meta': {'object_name': 'BouncedMessageRecord'},
'bounce_text': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'outbound_message': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['nuntium.OutboundMessage']", 'unique': 'True'})
},
u'mailit.mailittemplate': {
'Meta': {'object_name': 'MailItTemplate'},
'content_html_template': ('django.db.models.fields.TextField', [], {'default': "'Hello {{ person }}: <br />\\nYou have a new message: <br />\\n<strong>subject:</strong> {{ subject }} <br />\\n<strong>content:</strong> {{ content }} <br />\\n\\n\\nIf you want to see all the other messages please visit {{ writeit_url }}.<br />\\nSeeya<br />\\n--<br /><br />\\nYou writeIt and we deliverit.'"}),
'content_template': ('django.db.models.fields.TextField', [], {'default': "'Hello {{ person }}:\\nYou have a new message:\\nsubject: {{ subject }} \\ncontent: {{ content }}\\n\\n\\nIf you want to see all the other messages please visit {{ writeit_url }}.\\nSeeya\\n--\\nYou writeIt and we deliverit.'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subject_template': ('django.db.models.fields.CharField', [], {'default': "'[WriteIT] Message: %(subject)s'", 'max_length': '255'}),
'writeitinstance': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'mailit_template'", 'unique': 'True', 'to': u"orm['nuntium.WriteItInstance']"})
},
u'mailit.rawincomingemail': {
'Meta': {'object_name': 'RawIncomingEmail'},
'content': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'nuntium.membership': {
'Meta': {'object_name': 'Membership'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.Person']"}),
'writeitinstance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['nuntium.WriteItInstance']"})
},
u'nuntium.message': {
'Meta': {'ordering': "['-created']", 'object_name': 'Message'},
'author_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'author_name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'confirmated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderated': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'writeitinstance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['nuntium.WriteItInstance']"})
},
u'nuntium.outboundmessage': {
'Meta': {'object_name': 'OutboundMessage'},
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contactos.Contact']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['nuntium.Message']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': "'10'"})
},
u'nuntium.writeitinstance': {
'Meta': {'object_name': 'WriteItInstance'},
'allow_messages_using_form': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'autoconfirm_api_messages': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderation_needed_in_all_messages': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notify_owner_when_new_answer': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writeitinstances'", 'to': u"orm['auth.User']"}),
'persons': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'writeit_instances'", 'symmetrical': 'False', 'through': u"orm['nuntium.Membership']", 'to': u"orm['popit.Person']"}),
'rate_limiter': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'name'", 'unique_with': '()'})
},
u'popit.apiinstance': {
'Meta': {'object_name': 'ApiInstance'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('popit.fields.ApiInstanceURLField', [], {'unique': 'True', 'max_length': '200'})
},
u'popit.person': {
'Meta': {'object_name': 'Person'},
'api_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.ApiInstance']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'popit_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'popit_url': ('popit.fields.PopItURLField', [], {'default': "''", 'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['mailit']
|
TEDICpy/write-it
|
mailit/migrations/0004_auto__add_rawincomingemail.py
|
Python
|
gpl-3.0
| 11,701
|
[
"VisIt"
] |
713167e4c8e932e448a97f3b602f014b80f305b5a80147ec22842c396b56c3c2
|
import time
import json
import cgi
import threading
import os.path
import re
import sys
import logging
import lesscpy
from six import StringIO
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from http.cookies import SimpleCookie
from urllib.parse import unquote, quote
def get_cookies(headers):
"""
Convert cookies string to dict
"""
cookies_res = {}
try:
cookies = headers['Cookie'].split(';')
for cookie in cookies:
c = cookie.split('=')
cookies_res[c[0].strip()] = unquote(c[1].strip())
except Exception as e:
logging.debug('get_cookies() %s' % e)
return cookies_res
def get_params(path):
"""
Convert params from path to dict
ex: '?page=1&language=en' to dict
"""
query_res = {}
if path.find('?') != -1:
query = path[path.find('?')+1:]
if query.find('&') != -1:
query_arr = query.split('&')
for q in query_arr:
v = q.split('=')
if len(v) == 2:
query_res[v[0].strip()] = unquote(v[1].strip())
else:
v = query.split('=')
if len(v) == 2:
query_res[v[0].strip()] = unquote(v[1].strip())
return query_res
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
request_queue_size = 1000
class Middleware:
"""
Singleton
Abstract layer between request and custom handler
For example:
protect only auth page, ddos, add some data (like BEFORE REQUEST)
"""
def __init__(self):
self.middlewares = []
def add(self, class_obj):
self.middlewares.append(class_obj)
def use(self, req):
req_result = req
for m in self.middlewares:
req_result = m(req_result)
if not req_result:
break
return req_result
class StaticContentHandler:
"""
Return static files: js, css, images
CSS: By default StaticContentHandler using LESS, but you can use raw css
this class can handle css files like some.css and less like some.less
(prod mode) less files cached after first call and saved memory while Thorin working.
(dev mode) reload less files each request.
you can use GULP, GRUNT or another build system to merge and create less, styl, jade or somthing other.
"""
def __init__(self, req):
self.req = req
mimetype_res = self.mimetype()
if mimetype_res['send_reply']:
try:
ext_list = ['jpg', 'gif', 'png']
if mimetype_res['mimetype'] == 'text/css':
content = self.css()
if not content:
raise IOError
else:
if any(mimetype_res['mimetype'].find(ext) != -1 for ext in ext_list):
f = open('.'+self.req.path, 'rb')
else:
f = open('.'+self.req.path)
content = f.read()
f.close()
self.req.send_response(200)
self.req.send_header("Content-type", mimetype_res['mimetype'])
self.req.end_headers()
if any(mimetype_res['mimetype'].find(ext) != -1 for ext in ext_list):
self.req.wfile.write(content)
else:
self.req.wfile.write(bytes(content, "utf-8"))
except IOError:
self.req.send_error(404)
else:
self.req.send_error(404)
def css(self):
content = None
path = '.'+self.req.path
if os.path.isfile(path):
f = open(path)
content = f.read()
f.close()
else:
path = re.sub('(\.css)', '.less', path, flags=re.IGNORECASE)
if os.path.isfile(path):
f = open(path)
f_content = f.read()
f.close()
for l in thorinService.less_list:
if l['path'] == path:
content = l['css']
break
if not content:
content = lesscpy.compile(StringIO(f_content), minify=True)
if thorinService.env.get('location') and thorinService.env['location'] == 'prod':
thorinService.less_list.append({
'path': path,
'css': content
})
return content
def mimetype(self):
mimetype = 'text/plain'
send_reply = False
if self.req.path.endswith(".html"):
mimetype = 'text/html'
send_reply = True
if self.req.path.endswith(".jpg"):
mimetype = 'image/jpg'
send_reply = True
if self.req.path.endswith(".gif"):
mimetype = 'image/gif'
send_reply = True
if self.req.path.endswith(".png"):
mimetype = 'image/png'
send_reply = True
if self.req.path.endswith(".js"):
mimetype = 'application/javascript'
send_reply = True
if self.req.path.endswith(".css"):
mimetype = 'text/css'
send_reply = True
return {
'mimetype': mimetype,
'send_reply': send_reply
}
class Router:
"""
Singleton
Router can handle http requests like this:
GET /user/:id # in req.params you can get user_id req.params['id']
GET /user/:id? # if you add "?" this param optional
POST /event/create
PUT /events/type/:type?/page/:page you can use optional param anywhere
I was insperied expressjs(nodejs) framework and get simillar format
"""
def __init__(self):
self.routes = []
def add(self, method, path, handler, action, middleware = None):
self.routes.append({
'path': path, # route path. Ex. /user/:id
'method': method, # GET, POST, etc
'handler': handler, # controller name. Ex. IndexController
'action': action, # method name of controller (string), 'get_user'
'middleware': middleware # method or function in list. Ex. [IndexController.is_user_auth]
})
def show_error_page(self, req, code):
req.send_response(code)
req.send_header("Content-type", "text/html")
req.end_headers()
try:
f = open(thorinService.error_folder+'/'+str(code)+'.html')
html = f.read()
req.wfile.write(bytes(html, "utf-8"))
f.close()
except:
pass
def get_params(self, path, route_path):
"""
get all values from path
return dict { param_name: value, ... }
"""
def get_clean_key(key):
return re.sub('\?', '', key).strip()
params = {}
path = re.sub(':', ':', path)
path_list = path.split('/')
route_path_list = route_path.split('/')
index = 0
for r in route_path_list:
if r.find(':') != -1:
key = get_clean_key(r[1:])
try:
params[key] = path_list[index]
except IndexError:
pass
index += 1
return params
def is_param_in_another_route(self, index, param):
res = False
for r in self.routes:
try:
path = r['path'].split('/')
if path[index] == param:
res = True
break
except:
pass
return res
def get_current_route(self, req):
""" find and get current route """
current_route = None
params = {}
req.path = re.sub('\:', ':', req.path)
if len(req.path) > 1 and req.path[-1:] == '/':
req.path = req.path[:-1]
for route in self.routes:
found = True
# if route equal path (doesn`t has params in route)
if req.path == route['path'] and req.command == route['method']:
current_route = route
break
# if route has params
elif ':' in route['path']:
route_path = route['path'].split('/')
req_path = req.path.split('/')
req_path_index = 0
for route_param in route_path:
try:
# route has optional param
if '?' in route_param:
continue
elif route_param != req_path[req_path_index]:
if ':' not in route_param:
found = False
break
else:
if self.is_param_in_another_route(req_path_index, req_path[req_path_index]):
found = False
break
req_path_index += 1
except Exception as e:
logging.debug('Route error %s' % e)
found = False
break
# found route and method(get,post,etc)
if found and req.command == route['method']:
current_route = route
break
if current_route:
logging.debug('current_route %s %s' % (current_route, req.path))
params = self.get_params(req.path, current_route['path'])
return {
'route': current_route,
'params': params
}
def use_middlewares(self, req, original_req, current_route):
"""
start current middleware
main feature - if (request == None) after executing middleware
it`s protected middleware and we send 403 error to client
"""
protected = False
for mid in current_route['middleware']:
req = mid(req)
if not req:
protected = True
break
if not protected:
r = current_route['handler'](req)
getattr(r, current_route['action'])()
else:
self.show_error_page(original_req, 403)
def start_handler(self, req):
if not req:
return None
# save original request
# if middleware return None our request be overrided
original_req = req
current_route = self.get_current_route(req)
if current_route['route']:
req.params = current_route['params']
if not current_route['route']['middleware']:
r = current_route['route']['handler'](req)
getattr(r, current_route['route']['action'])()
else:
self.use_middlewares(req, original_req, current_route['route'])
else:
self.show_error_page(original_req, 404)
def handler(self, req):
"""
first method called from MainHandler class.
we are create new thread and processing client request.
each one request create new thread.
"""
t = threading.Thread(target=self.start_handler, args=(req,))
t.start()
# forward processed request
t.join()
class MainHandler(BaseHTTPRequestHandler):
""" Using BaseHTTPRequestHandler from default Python3 box """
def __init__(self, request, client_address, server):
"""
override default baseHTTP info
add some variables like: cookies, query, path, etc.
"""
self.server_version = 'Thorin/1.0.3'
self.request_version = 'HTTP/1.1'
self.sys_version = ''
self.response_time = time.time()
self.cookies = {}
self.query = {}
self.path = {}
self.remote_ip = ''
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def add_request_data(self, s):
# Get real ip from headers
if s.headers.get(thorinService.remote_real_ip_header):
s.remote_ip = s.headers[thorinService.remote_real_ip_header]
# Convert cookies to dict
s.cookies = get_cookies(s.headers)
# Convert params to dict
s.query = get_params(s.path)
# Remove params from request path.
# Because we want get clear path. Then we define path in routes
if s.path.find('?') != -1:
s.path = s.path[0:s.path.find('?')]
def do_GET(self):
self.add_request_data(self)
# if this static folder. Call StaticContentHandler
if self.path.find(thorinService.static_folder) == 0:
StaticContentHandler(self)
else:
router.handler(middleware.use(self))
def do_POST(self):
self.add_request_data(self)
router.handler(middleware.use(self))
def do_PUT(self):
self.add_request_data(self)
router.handler(middleware.use(self))
def do_PATCH(self):
self.add_request_data(self)
router.handler(middleware.use(self))
def do_DELETE(self):
self.add_request_data(self)
router.handler(middleware.use(self))
def log_message(self, format, *args):
self.response_time = round(time.time() - self.response_time, 3)
logging.info('%s - [%s] %s - %sms' % (self.remote_ip, self.log_date_time_string(), format%args, self.response_time))
class ThorinServer:
""" Main Init Server Class """
def __init__(self):
self.my_server = None
def start(self, host_name='localhost', port_number='9000'):
""" start listen host:port """
self.host_name = host_name
self.port_number = port_number
# start threaded server. Each request processing by new thread.
# start MainHandler class
self.my_server = ThreadedHTTPServer((self.host_name, self.port_number), MainHandler)
logging.info("%s Server Starts - %s:%s" % (time.asctime(), self.host_name, self.port_number))
try:
self.my_server.serve_forever()
except KeyboardInterrupt:
pass
self.my_server.server_close()
logging.info("%s Server Stops - %s:%s" % (time.asctime(), self.host_name, self.port_number))
class ThorinUtils:
"""
this class extend custom controllers
like this:
class IndexController(ThorinUtils):
def __init__(self):
...
...
ThorinUtils can:
post_data - return post data from forms, POST ajax, etc.
send - return template with data, json format or text/html, etc.
redirect - redirct user to another page
set_cookie - set cookie :)
remove_cookie - delete cookie :)
"""
def __init__(self, req):
self.req = req
self.cookies_list = []
def post_data(self):
""" post_data return data from Forms, post ajax, etc """
form = cgi.FieldStorage(
fp=self.req.rfile,
headers=self.req.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.req.headers['Content-Type'],
}
)
return form
def send(self, data={}, code=200, content_type='text/html', path_to_template=''):
""" send data with template to client or send text/html, application/json """
# send cookies
self.req.send_response(code)
self.req.send_header("Content-type", content_type)
for c in self.cookies_list:
self.req.send_header('Set-Cookie', c.output(header=''))
self.req.end_headers()
try:
if content_type == 'text/html':
# if you connect templater Jinga2 or another
if thorinService.t_engine and thorinService.t_engine_render:
# static_data it`s enviroment variables, path to css, js, etc
data['static_data'] = thorinService.static_data
# Access to cookies. You can call cookie variable in template
data['cookies'] = self.req.cookies
# Access to params. All sended params you can show in template
data['params'] = self.req.params
result_data = thorinService.t_engine_render(path_to_template, data)
self.req.wfile.write(bytes(result_data, "utf-8"))
else:
# send raw text/html data
# example: '<b>hello</b>'
self.req.wfile.write(bytes(data, "utf-8"))
elif content_type == 'application/json':
# send json string to client
json_str = json.dumps(data, ensure_ascii=False)
self.req.wfile.write(bytes(json_str, "utf-8"))
except BrokenPipeError as e:
print('########################################')
logging.debug('BrokenPipeError. Connection was broken. %s' % e)
def redirect(self, url):
""" redirect to another page """
self.req.send_response(301)
for c in self.cookies_list:
self.req.send_header('Set-Cookie', c.output(header=''))
self.req.send_header('Location', url)
self.req.end_headers()
def set_cookie(self, name, value, path='/', expires='Wed, 13 Jan 2020 10:00:00 GMT'):
""" set cookie with SimpleCookie() standart python3 class """
c = SimpleCookie()
c[name] = quote(value)
c[name]['path'] = path
c[name]['expires'] = expires
self.cookies_list.append(c)
def remove_cookie(self, name):
c = SimpleCookie()
c[name] = 'deleted'
c[name]['expires'] = 'Thu, 01 Jan 1970 00:00:00 GMT'
self.cookies_list.append(c)
class ThorinService:
"""
Singleton
wrapper for creating middlewares and routes
storage for db connection, env variables, path to static folder, etc
"""
def __init__(self):
# you can save all env right here or use self.glob
self.env = {}
# default language project
self.lang = 'ru'
# database dict. You can create many different links to DB
# example:
# thorinSerivce.db['mysql'] = connect_to_mysql()
# thorinSerivce.db['mongo'] = connect_to_mongo()
self.db = {}
# if you working under Nginx you should specify real user ip.
# nginx directive which is responsible for ip address:
# proxy_set_header X-Real-IP $remote_addr;
# Why "X-Real-IP" - I don`t know. I took this along time ago from stackoverflow discussion
self.remote_real_ip_header = 'X-Real-IP'
# you can set any variable in dict static_data
self.static_data = {
'domain': ''
}
# storage for global variables
# project serphi.com stores there cached events, prices, etc
self.glob = {}
# path to static folder (js, css, fonts)
self.static_folder = '/static'
# template engine
self.t_engine = None
# template engine render
self.t_engine_render = None
# less (css) files list
self.less_list = []
# error folder
# example: 404.html, 502.html, etc
self.error_folder = './templates/errors'
# settings for cookies
self.cookies = {
'httpOnly': True,
'Secure': False
}
# wrapper to add middleware
def add_middleware(self, class_obj):
middleware.add(class_obj)
# wrapper to add route
def add_route(self, method, path, handler, action, middleware = None):
router.add(method, path, handler, action, middleware)
router = Router()
middleware = Middleware()
thorinService = ThorinService()
|
philsitumorang/thorin
|
thorin/server.py
|
Python
|
mit
| 19,933
|
[
"GULP"
] |
8056ff271ccbc17f7babf739380c53791d01ae1cdd57bbd7b4b8d6b7320a069e
|
#!/usr/bin/env python
############################################################
from vtk import *
############################################################
# Create pole for camera aim and polar axes pole
pole = [1., 12., 3.]
# Create camera
camera = vtkCamera()
camera.SetClippingRange( 1.0, 100.0 )
camera.SetFocalPoint( pole )
camera.SetPosition( 10., 10., 13. )
# Create cylinder
cylinder = vtkCylinderSource()
cylinder.SetRadius( 6. )
cylinder.SetCenter( 1., 2., 3. )
cylinder.SetHeight( 15 )
cylinder.SetResolution( 32 )
cylinder.Update()
# Create mappers for surface and wireframe representations
mapperS = vtkPolyDataMapper()
mapperS.SetInputConnection( cylinder.GetOutputPort() )
mapperS.SetResolveCoincidentTopologyPolygonOffsetParameters( 0, 1 )
mapperS.SetResolveCoincidentTopologyToPolygonOffset()
mapperW = vtkPolyDataMapper()
mapperW.SetInputConnection( cylinder.GetOutputPort() )
mapperW.SetResolveCoincidentTopologyPolygonOffsetParameters( 1, 1 )
mapperW.SetResolveCoincidentTopologyToPolygonOffset()
# Create actor for surface representation
surfactor = vtkActor()
surfactor.SetMapper( mapperS )
surfactor.GetProperty().SetColor( .89, .66, .41 )
surfactor.SetOrigin( pole )
surfactor.RotateX( 90. )
# Create actor for wireframe representation
wireactor = vtkActor()
wireactor.SetMapper( mapperW )
wireactor.GetProperty().SetColor( .1, .1 , .1 )
wireactor.GetProperty().SetRepresentationToWireframe()
wireactor.SetOrigin( pole )
wireactor.RotateX( 90. )
# Create renderer
renderer = vtkRenderer()
renderer.SetActiveCamera( camera )
renderer.GradientBackgroundOn()
renderer.SetBackground( .2, .2 ,.2 )
renderer.SetBackground2( .8, .8 ,.8 )
# Create polar axes
polaxes = vtkPolarAxesActor()
polaxes.SetPole( pole )
polaxes.SetAutoScaleRadius( 0 )
polaxes.SetMaximumRadius( 4.5 )
polaxes.SetMinimumAngle( -60. )
polaxes.SetMaximumAngle( 210. )
polaxes.SetNumberOfRadialAxes( 10 )
polaxes.AutoSubdividePolarAxisOff()
polaxes.SetNumberOfPolarAxisTicks( 5 )
polaxes.SetCamera( renderer.GetActiveCamera() )
polaxes.SetPolarLabelFormat( "%6.1f" )
polaxes.GetRadialAxesProperty().SetColor( .0, .0, 1. )
polaxes.GetPolarArcsProperty().SetColor( 1., .0, 0. )
polaxes.GetPolarAxisProperty().SetColor( 0., 0, 0. )
polaxes.GetPolarAxisTitleTextProperty().SetColor( 0., 0., 0. )
polaxes.GetPolarAxisLabelTextProperty().SetColor( 0., 0., 0. )
polaxes.SetEnableDistanceLOD( 1 )
polaxes.SetDistanceLODThreshold( .6 )
polaxes.SetEnableViewAngleLOD( 1 )
polaxes.SetViewAngleLODThreshold( .4 )
polaxes.SetScreenSize( 8. )
# Create render window
window = vtkRenderWindow()
renderer.AddViewProp( surfactor )
renderer.AddViewProp( wireactor )
renderer.AddViewProp( polaxes )
window.AddRenderer( renderer )
window.SetSize( 500, 500 )
# Create interactor
interactor = vtkRenderWindowInteractor()
interactor.SetRenderWindow( window )
# Start interaction
window.Render()
polaxes.SetMinimumAngle( 40. )
polaxes.SetMaximumAngle( 220. )
polaxes.SetNumberOfRadialAxes( 10 )
polaxes.SetNumberOfPolarAxisTicks( 10 )
interactor.Start()
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Examples/LIC/Python/CylinderAndPolarAxes.py
|
Python
|
bsd-3-clause
| 3,133
|
[
"VTK"
] |
dc3c43c6a0715c97ba65fb9ea03c36a718caeeb927c4a8993caa85c8362910fb
|
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2003-2005 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Andrew I Baznikin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Written by Alex Roitman, largely based on relationship.py by Don Allingham
# and on valuable input from Frode Jemtland
"""
Norwegian-Specific classes for relationships.
"""
from __future__ import unicode_literals
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from gramps.gen.lib import Person
import gramps.gen.relationship
#-------------------------------------------------------------------------
_cousin_level = [ "", "", #brother/sister, fetter/kusine -- these are taken care of separately
"tremenning", "firemenning", "femmenning",
"seksmenning", "sjumenning", "åttemenning",
"nimenning", "timenning", "elvemenning",
"tolvmenning", "tretenmenning", "fjortenmenning",
"femtenmenning", "sekstenmenning", "syttenmenning",
"attenmenning", "nittenmenning", "tyvemenning" ]
_cousin_terms = _cousin_level + ["fetter", "kusine"]
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
class RelationshipCalculator(gramps.gen.relationship.RelationshipCalculator):
"""
RelationshipCalculator Class
"""
def __init__(self):
gramps.gen.relationship.RelationshipCalculator.__init__(self)
def get_parents(self, level):
if level == 0:
return "forelder"
else:
return "ane i %d-te generationen" % (level+1)
def get_cousin(self, level):
if level > len(_cousin_level)-1:
# FIXME: use Norwegian term term here,
# UPDATED by Frode: unsure about the expretion "en fjern slektning", should it be just "fjern slektning".
# Need to see it used in the program to get the understanding.
return "en fjern slektning"
else:
return _cousin_level[level]
def pair_up(self, rel_list):
result = []
item = ""
for word in rel_list[:]:
if not word:
continue
if word in _cousin_terms:
if item:
result.append(item)
item = ""
result.append(word)
continue
if item:
if word == 'sønne':
word = 'sønn'
result.append(item + word)
item = ""
else:
item = word
if item:
result.append(item)
gen_result = [ item + 's' for item in result[0:-1] ]
return ' '.join(gen_result+result[-1:])
def get_direct_ancestor(self, person, rel_string):
result = []
for ix in range(len(rel_string)):
if rel_string[ix] == 'f':
result.append('far')
else:
result.append('mor')
return self.pair_up(result)
def get_direct_descendant(self, person, rel_string):
result = []
for ix in range(len(rel_string)-2, -1, -1):
if rel_string[ix] == 'f':
result.append('sønne')
else:
result.append('datter')
if person == Person.MALE:
result.append('sønn')
else:
result.append('datter')
return self.pair_up(result)
def get_ancestors_cousin(self, person, rel_string_long, rel_string_short):
result = []
removed = len(rel_string_long)-len(rel_string_short)
level = len(rel_string_short)-1
for ix in range(removed):
if rel_string_long[ix] == 'f':
result.append('far')
else:
result.append('mor')
if level > 1:
result.append(self.get_cousin(level))
elif person == Person.MALE:
result.append('fetter')
else:
result.append('kusine')
main_string = self.get_two_way_rel(person, rel_string_short, rel_string_long)
aux_string = self.pair_up(result)
return "%s (%s)" % (main_string, aux_string)
def get_cousins_descendant(self, person, rel_string_long, rel_string_short):
result = []
aux_string = ""
removed = len(rel_string_long)-len(rel_string_short)-1
level = len(rel_string_short)-1
if level > 1: # Cousin terms without gender
result.append(self.get_cousin(level))
elif level == 1: # gender-dependent fetter/kusine
if rel_string_long[removed] == 'f':
result.append('fetter')
else:
result.append('kusine')
elif rel_string_long[removed] == 'f':
result.append('bror')
else:
result.append('søster')
for ix in range(removed-1, -1, -1):
if rel_string_long[ix] == 'f':
result.append('sønn')
else:
result.append('datter')
if person == Person.MALE:
result.append('sønn')
else:
result.append('datter')
main_string = self.get_two_way_rel(person, rel_string_long, rel_string_short)
if level:
aux_string = " (%s)" % self.pair_up(result)
return "%s%s" % (main_string, aux_string)
def get_ancestors_brother(self, rel_string):
result = []
for ix in range(len(rel_string)-1):
if rel_string[ix] == 'f':
result.append('far')
else:
result.append('mor')
result.append('bror')
return self.pair_up(result)
def get_ancestors_sister(self, rel_string):
result = []
for ix in range(len(rel_string)-1):
if rel_string[ix] == 'f':
result.append('far')
else:
result.append('mor')
result.append('søster')
return self.pair_up(result)
def get_two_way_rel(self, person, first_rel_string, second_rel_string):
result = []
for ix in range(len(second_rel_string)-1):
if second_rel_string[ix] == 'f':
result.append('far')
else:
result.append('mor')
if len(first_rel_string)>1:
if first_rel_string[-2] == 'f':
result.append('bror')
else:
result.append('søster')
for ix in range(len(first_rel_string)-3, -1, -1):
if first_rel_string[ix] == 'f':
result.append('sønne')
else:
result.append('datter')
if person == Person.MALE:
result.append('sønn')
else:
result.append('datter')
else:
if person == Person.MALE:
result.append('bror')
else:
result.append('søster')
return self.pair_up(result)
def get_relationship(self,
secondRel, firstRel, orig_person, other_person):
common = ""
if not firstRel:
if not secondRel:
return ('', common)
else:
return (self.get_direct_ancestor(other_person, secondRel), common)
elif not secondRel:
return (self.get_direct_descendant(other_person, firstRel), common)
elif len(firstRel) == 1:
if other_person == Person.MALE:
return (self.get_ancestors_brother(secondRel), common)
else:
return (self.get_ancestors_sister(secondRel), common)
elif len(secondRel) >= len(firstRel):
return (self.get_ancestors_cousin(other_person, secondRel, firstRel), common)
else:
return (self.get_cousins_descendant(other_person, firstRel, secondRel), common)
def get_single_relationship_string(self, Ga, Gb, gender_a, gender_b,
reltocommon_a, reltocommon_b,
only_birth=True,
in_law_a=False, in_law_b=False):
return self.get_relationship(reltocommon_a, reltocommon_b, gender_a, gender_b)[0];
def get_sibling_relationship_string(self, sib_type, gender_a, gender_b,
in_law_a=False, in_law_b=False):
return self.get_two_way_rel(gender_b, "", "")
if __name__ == "__main__":
# Test function. Call it as follows from the command line (so as to find
# imported modules):
# export PYTHONPATH=/path/to/gramps/src
# python src/plugins/rel/rel_no.py
# (Above not needed here)
"""TRANSLATORS, copy this if statement at the bottom of your
rel_xx.py module, and test your work with:
python src/plugins/rel/rel_xx.py
"""
from gramps.gen.relationship import test
RC = RelationshipCalculator()
test(RC, True)
|
pmghalvorsen/gramps_branch
|
gramps/plugins/rel/rel_no.py
|
Python
|
gpl-2.0
| 9,799
|
[
"Brian"
] |
1af2a0ffb80982bec274a81234d9b22f3f749f69118fdc77af584807f1100c85
|
import pysam
import pandas as pd
import collections
import copy
class granges:
def __init__(self, lengths = None):
# Read in chromosome lengths
if lengths.endswith('.fa') or lengths.endswith('.fasta'):
in_file = pysam.FastaFile(lengths)
self.lengths = collections.OrderedDict(
zip(in_file.references, in_file.lengths)
)
in_file.close()
elif lengths.endswith('.bam'):
in_file = pysam.BamFile(lengths)
self.lengths = collections.OrderedDict(
zip(in_file.references, in_file.lengths)
)
in_file.close()
elif lengths is not None:
self.lengths = collections.OrderedDict()
with open(lengths) as in_file:
for line in in_file:
chrom, length = line.strip().split('\s')[0:2]
self.lengths[chrom] = int(length)
else:
self.lengths = None
if self.lengths:
self.intervals = collections.OrderedDict()
for chrom in self.lengths:
self.intervals[chrom] = pd.DataFrame()
def add_bed_intervals(self, bed):
# Read in bed data and add column names
bed_data = pd.DataFrame.from_csv(path = bed, sep = '\t',
header = None, index_col = False
)
if bed_data.shape[1] == 3:
bed_data.columns = ['chr', 'start', 'end']
bed_data['strand'] = '*'
elif bed_data.shape[1] == 6:
bed_data.columns = ['chr', 'start', 'end', 'name', 'score',
'strand']
else:
raise ValueError('bed files must have 3 or 6 columns')
if (bed_data['start'] > bed_data['end']).any():
raise ValueError('start greater than end')
# Add bed data to interval list and sort
bed_data = bed_data.groupby('chr')
for chrom, chrom_data in bed_data:
if self.lengths:
if chrom not in self.lengths:
raise ValueError('chromosome not recognised')
if (chrom_data['end'].max() > self.lengths[chrom]
or chrom_data['start'].min() < 0):
raise ValueError('interval not within chromosome')
chrom_data.drop('chr', axis=1, inplace = True)
self.intervals[chrom] = pd.concat(
(self.intervals[chrom], chrom_data),
axis = 0
)
self.sort_intervals()
def sort_intervals(self):
count = 0
for chrom in self.intervals:
nrow = self.intervals[chrom].shape[0]
if nrow == 0:
continue
self.intervals[chrom].sort_values(['start', 'end'], inplace=True)
self.intervals[chrom].index = range(count, count + nrow)
count += nrow
def merge_overlaps(self, intervals):
# Group intervals into overlapping groups
intervals = intervals[['start','end']]
intervals['seperate'] = intervals['start'] >= intervals['end'].shift(1)
intervals['group'] = intervals['seperate'].cumsum() - 1
intervals = intervals.groupby('group')
# Creeate and return output data frame
output = pd.DataFrame()
output['start'] = intervals['start'].min()
output['end'] = intervals['end'].max()
return(output)
def reduce_intervals(self, merge = False, ignore_strand = False):
for chrom in self.intervals:
nrow = self.intervals[chrom].shape[0]
if nrow == 0:
continue
# Merge all overlapping intervals
if ignore_strand:
# Merge intervals on one strand
else:
x['overlap'] = x['start'] < x['end'].shift(1)
x['group'] = (~x['overlap']).cumsum()
x = x.groupby('group')
out =
def extract_overlaps(self, other):
output = copy.deepcopy(self)
for chrom in self.intervals:
# Extract chromosome data for both object or skip
try:
self_data = self.intervals[chrom]
other_data = other.intervals[chrom]
except KeyError:
continue
try:
self_index = self_data.index[0]
other_index = other_data.index[0]
except IndexError:
continue
# Find overlaps or advance indices
output_indices = []
output_start = []
output_end = []
while True:
try:
self_start, self_end = self_data.loc[
self_index, ['start', 'end']
]
other_start, other_end = other_data.loc[
other_index, ['start', 'end']
]
except KeyError:
break
if self_end <= other_start:
self_index += 1
elif other_end <= self_start:
other_index += 1
elif self_end > other_start:
output_indices.append(self_index)
output_start.append(max(self_start, other_start))
output_end.append(min(self_end, other_end))
self_index += 1
if len(output_indices) > 0:
output.intervals[chrom] = output.intervals[chrom].loc[
output_indices
]
output.intervals[chrom]['start'] = output_start
output.intervals[chrom]['end'] = output_end
return(output)
test1 = granges('/farm/scratch/rs-bio-lif/rabino01/Elza/genome/mm10.fa')
test1.add_bed_intervals('/farm/home/rabino01/test1.bed')
test2 = granges('/farm/scratch/rs-bio-lif/rabino01/Elza/genome/mm10.fa')
test2.add_bed_intervals('/farm/home/rabino01/test2.bed')
overlap = test1.extract_overlaps(test2)
print(overlap.intervals['chr1'])
print(test1.intervals['chr1'])
|
adam-rabinowitz/ngs_python
|
ranges/granges.py
|
Python
|
gpl-2.0
| 6,114
|
[
"pysam"
] |
d733c9720babde94299e9e5e91c776f0e0b5f32d3f11ec3b5d07dd8db602e4bc
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_applicationpersistenceprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of ApplicationPersistenceProfile Avi RESTful Object
description:
- This module is used to configure ApplicationPersistenceProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
app_cookie_persistence_profile:
description:
- Specifies the application cookie persistence profile parameters.
description:
description:
- User defined description for the object.
hdr_persistence_profile:
description:
- Specifies the custom http header persistence profile parameters.
http_cookie_persistence_profile:
description:
- Specifies the http cookie persistence profile parameters.
ip_persistence_profile:
description:
- Specifies the client ip persistence profile parameters.
is_federated:
description:
- This field describes the object's replication scope.
- If the field is set to false, then the object is visible within the controller-cluster and its associated service-engines.
- If the field is set to true, then the object is replicated across the federation.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.4"
name:
description:
- A user-friendly name for the persistence profile.
required: true
persistence_type:
description:
- Method used to persist clients to the same server for a duration of time or a session.
- Enum options - PERSISTENCE_TYPE_CLIENT_IP_ADDRESS, PERSISTENCE_TYPE_HTTP_COOKIE, PERSISTENCE_TYPE_TLS, PERSISTENCE_TYPE_CLIENT_IPV6_ADDRESS,
- PERSISTENCE_TYPE_CUSTOM_HTTP_HEADER, PERSISTENCE_TYPE_APP_COOKIE, PERSISTENCE_TYPE_GSLB_SITE.
- Default value when not specified in API or module is interpreted by Avi Controller as PERSISTENCE_TYPE_CLIENT_IP_ADDRESS.
required: true
server_hm_down_recovery:
description:
- Specifies behavior when a persistent server has been marked down by a health monitor.
- Enum options - HM_DOWN_PICK_NEW_SERVER, HM_DOWN_ABORT_CONNECTION, HM_DOWN_CONTINUE_PERSISTENT_SERVER.
- Default value when not specified in API or module is interpreted by Avi Controller as HM_DOWN_PICK_NEW_SERVER.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the persistence profile.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create an Application Persistence setting using http cookie.
avi_applicationpersistenceprofile:
controller: '{{ controller }}'
username: '{{ username }}'
password: '{{ password }}'
http_cookie_persistence_profile:
always_send_cookie: false
cookie_name: My-HTTP
key:
- aes_key: ShYGZdMks8j6Bpvm2sCvaXWzvXms2Z9ob+TTjRy46lQ=
name: c1276819-550c-4adf-912d-59efa5fd7269
- aes_key: OGsyVk84VCtyMENFOW0rMnRXVnNrb0RzdG5mT29oamJRb0dlbHZVSjR1az0=
name: a080de57-77c3-4580-a3ea-e7a6493c14fd
- aes_key: UVN0cU9HWmFUM2xOUzBVcmVXaHFXbnBLVUUxMU1VSktSVU5HWjJOWmVFMTBUMUV4UmxsNk4xQmFZejA9
name: 60478846-33c6-484d-868d-bbc324fce4a5
timeout: 15
name: My-HTTP-Cookie
persistence_type: PERSISTENCE_TYPE_HTTP_COOKIE
server_hm_down_recovery: HM_DOWN_PICK_NEW_SERVER
tenant_ref: Demo
"""
RETURN = '''
obj:
description: ApplicationPersistenceProfile (api/applicationpersistenceprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
app_cookie_persistence_profile=dict(type='dict',),
description=dict(type='str',),
hdr_persistence_profile=dict(type='dict',),
http_cookie_persistence_profile=dict(type='dict',),
ip_persistence_profile=dict(type='dict',),
is_federated=dict(type='bool',),
name=dict(type='str', required=True),
persistence_type=dict(type='str', required=True),
server_hm_down_recovery=dict(type='str',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'applicationpersistenceprofile',
set([]))
if __name__ == '__main__':
main()
|
le9i0nx/ansible
|
lib/ansible/modules/network/avi/avi_applicationpersistenceprofile.py
|
Python
|
gpl-3.0
| 7,203
|
[
"VisIt"
] |
0acdfde783027229a7231eb99accef16ecf181af44a97f3cb25dff764465cbbb
|
#!/usr/bin/python
"""Test of sayAll."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(5000))
sequence.append(KeyComboAction("<Shift>Tab"))
sequence.append(KeyComboAction("<Shift>Tab"))
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Add"))
sequence.append(utils.AssertPresentationAction(
"KP_Add to do a SayAll",
["SPEECH OUTPUT: 'Home'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Back to the Gnome Bugzilla home page'",
"SPEECH OUTPUT: 'Bugzilla'",
"SPEECH OUTPUT: 'New bug'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '·'",
"SPEECH OUTPUT: 'Browse'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '·'",
"SPEECH OUTPUT: 'Search'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '·'",
"SPEECH OUTPUT: 'Reports'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '·'",
"SPEECH OUTPUT: 'Account'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '·'",
"SPEECH OUTPUT: 'Admin'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '·'",
"SPEECH OUTPUT: 'Help'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Logged In joanmarie.diggs@gmail.com'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Log Out'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Short Bug Search Form'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Complicated Bug Search Form'",
"SPEECH OUTPUT: 'Give me some help'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '(reloads page.)'",
"SPEECH OUTPUT: 'form'",
"SPEECH OUTPUT: 'table with 5 rows 4 columns'",
"SPEECH OUTPUT: 'Summary:'",
"SPEECH OUTPUT: 'row header'",
"SPEECH OUTPUT: 'contains all of the words/strings'",
"SPEECH OUTPUT: 'combo box'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'Search'",
"SPEECH OUTPUT: 'push button'",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'Classification:'",
"SPEECH OUTPUT: 'column header'",
"SPEECH OUTPUT: 'multi-select'",
"SPEECH OUTPUT: 'List with 8 items'",
"SPEECH OUTPUT: 'leaving table.'",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'Product:'",
"SPEECH OUTPUT: 'column header'",
"SPEECH OUTPUT: 'multi-select'",
"SPEECH OUTPUT: 'List with 379 items'",
"SPEECH OUTPUT: 'Component'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ':'",
"SPEECH OUTPUT: 'column header'",
"SPEECH OUTPUT: 'multi-select'",
"SPEECH OUTPUT: 'List with 1248 items'",
"SPEECH OUTPUT: 'leaving table.'",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'Version:'",
"SPEECH OUTPUT: 'column header'",
"SPEECH OUTPUT: 'multi-select'",
"SPEECH OUTPUT: 'List with 857 items'",
"SPEECH OUTPUT: 'leaving table.'",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'Target Milestone:'",
"SPEECH OUTPUT: 'column header'",
"SPEECH OUTPUT: 'multi-select'",
"SPEECH OUTPUT: 'List with 555 items'",
"SPEECH OUTPUT: 'leaving table.'",
"SPEECH OUTPUT: 'A Comment:'",
"SPEECH OUTPUT: 'row header'",
"SPEECH OUTPUT: 'contains the string'",
"SPEECH OUTPUT: 'combo box'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'Whiteboard:'",
"SPEECH OUTPUT: 'row header'",
"SPEECH OUTPUT: 'contains all of the words/strings'",
"SPEECH OUTPUT: 'combo box'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'Keywords'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ':'",
"SPEECH OUTPUT: 'row header'",
"SPEECH OUTPUT: 'contains all of the keywords'",
"SPEECH OUTPUT: 'combo box'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'leaving table.'",
"SPEECH OUTPUT: 'separator'",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'Status:'",
"SPEECH OUTPUT: 'column header'",
"SPEECH OUTPUT: 'UNCONFIRMED'",
"SPEECH OUTPUT: 'NEW'",
"SPEECH OUTPUT: 'ASSIGNED'",
"SPEECH OUTPUT: 'REOPENED'",
"SPEECH OUTPUT: 'NEEDINFO'",
"SPEECH OUTPUT: 'multi-select'",
"SPEECH OUTPUT: 'List with 8 items'",
"SPEECH OUTPUT: 'leaving table.'",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'Resolution:'",
"SPEECH OUTPUT: 'column header'",
"SPEECH OUTPUT: 'multi-select'",
"SPEECH OUTPUT: 'List with 12 items'",
"SPEECH OUTPUT: 'leaving table.'",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'Severity:'",
"SPEECH OUTPUT: 'column header'",
"SPEECH OUTPUT: 'multi-select'",
"SPEECH OUTPUT: 'List with 7 items'",
"SPEECH OUTPUT: 'leaving table.'",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'Priority:'",
"SPEECH OUTPUT: 'column header'",
"SPEECH OUTPUT: 'multi-select'",
"SPEECH OUTPUT: 'List with 5 items'",
"SPEECH OUTPUT: 'leaving table.'",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'OS:'",
"SPEECH OUTPUT: 'column header'",
"SPEECH OUTPUT: 'multi-select'",
"SPEECH OUTPUT: 'List with 21 items'",
"SPEECH OUTPUT: 'leaving table.'",
"SPEECH OUTPUT: 'Email and Numbering'",
"SPEECH OUTPUT: 'panel'",
"SPEECH OUTPUT: 'Any one of:'",
"SPEECH OUTPUT: 'check box'",
"SPEECH OUTPUT: 'checked'",
"SPEECH OUTPUT: 'the bug assignee'",
"SPEECH OUTPUT: 'check box'",
"SPEECH OUTPUT: 'not checked'",
"SPEECH OUTPUT: 'the reporter'",
"SPEECH OUTPUT: 'check box'",
"SPEECH OUTPUT: 'not checked'",
"SPEECH OUTPUT: 'the QA contact'",
"SPEECH OUTPUT: 'check box'",
"SPEECH OUTPUT: 'not checked'",
"SPEECH OUTPUT: 'a CC list member'",
"SPEECH OUTPUT: 'check box'",
"SPEECH OUTPUT: 'not checked'",
"SPEECH OUTPUT: 'a commenter'",
"SPEECH OUTPUT: 'contains'",
"SPEECH OUTPUT: 'combo box'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'Any one of:'",
"SPEECH OUTPUT: 'check box'",
"SPEECH OUTPUT: 'checked'",
"SPEECH OUTPUT: 'the bug assignee'",
"SPEECH OUTPUT: 'check box'",
"SPEECH OUTPUT: 'checked'",
"SPEECH OUTPUT: 'the reporter'",
"SPEECH OUTPUT: 'check box'",
"SPEECH OUTPUT: 'checked'",
"SPEECH OUTPUT: 'the QA contact'",
"SPEECH OUTPUT: 'check box'",
"SPEECH OUTPUT: 'checked'",
"SPEECH OUTPUT: 'a CC list member'",
"SPEECH OUTPUT: 'check box'",
"SPEECH OUTPUT: 'not checked'",
"SPEECH OUTPUT: 'a commenter'",
"SPEECH OUTPUT: 'contains'",
"SPEECH OUTPUT: 'combo box'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'separator'",
"SPEECH OUTPUT: 'Only include'",
"SPEECH OUTPUT: 'combo box'",
"SPEECH OUTPUT: 'bugs numbered:'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: '(comma-separated list)'",
"SPEECH OUTPUT: 'leaving panel.'",
"SPEECH OUTPUT: 'Bug Changes'",
"SPEECH OUTPUT: 'panel'",
"SPEECH OUTPUT: 'Only bugs changed between:'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'and'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'Now'",
"SPEECH OUTPUT: '(YYYY-MM-DD or relative dates)'",
"SPEECH OUTPUT: 'where one or more of the following changed:'",
"SPEECH OUTPUT: 'multi-select'",
"SPEECH OUTPUT: 'List with 26 items'",
"SPEECH OUTPUT: 'and the new value was:'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'leaving panel.'",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'GNOME version:'",
"SPEECH OUTPUT: 'column header'",
"SPEECH OUTPUT: 'multi-select'",
"SPEECH OUTPUT: 'List with 14 items'",
"SPEECH OUTPUT: 'leaving table.'",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'GNOME target:'",
"SPEECH OUTPUT: 'column header'",
"SPEECH OUTPUT: 'multi-select'",
"SPEECH OUTPUT: 'List with 12 items'",
"SPEECH OUTPUT: 'leaving table.'",
"SPEECH OUTPUT: 'Sort results by:'",
"SPEECH OUTPUT: 'Reuse same sort as last time'",
"SPEECH OUTPUT: 'combo box'",
"SPEECH OUTPUT: 'Search'",
"SPEECH OUTPUT: 'push button'",
"SPEECH OUTPUT: 'check box'",
"SPEECH OUTPUT: 'not checked'",
"SPEECH OUTPUT: 'and remember these as my default search options'",
"SPEECH OUTPUT: 'separator'",
"SPEECH OUTPUT: 'Advanced Searching Using Boolean Charts:'",
"SPEECH OUTPUT: 'check box'",
"SPEECH OUTPUT: 'not checked'",
"SPEECH OUTPUT: 'Not (negate this whole chart)'",
"SPEECH OUTPUT: '---'",
"SPEECH OUTPUT: 'combo box'",
"SPEECH OUTPUT: '---'",
"SPEECH OUTPUT: 'combo box'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'Or'",
"SPEECH OUTPUT: 'push button'",
"SPEECH OUTPUT: 'And'",
"SPEECH OUTPUT: 'push button'",
"SPEECH OUTPUT: 'Add another boolean chart'",
"SPEECH OUTPUT: 'push button'",
"SPEECH OUTPUT: 'separator'",
"SPEECH OUTPUT: 'leaving form.'",
"SPEECH OUTPUT: 'form'",
"SPEECH OUTPUT: 'Saved Searches:'",
"SPEECH OUTPUT: 'My Bugs and Patches'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'All Orca'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Firefox'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'open orca'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Open RFEs'",
"SPEECH OUTPUT: 'link'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
GNOME/orca
|
test/keystrokes/firefox/say_all_bugzilla_search.py
|
Python
|
lgpl-2.1
| 9,712
|
[
"ORCA"
] |
2177a2bd0dc025eedab3d7a1ee15fb89099be613efad5641ea96f39b7abf442b
|
# Copyright (C) 2001-2021 greg Landrum
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""basic unit testing code for the molecule boost wrapper
"""
import unittest, os
import pickle
from rdkit import RDConfig
from rdkit import Chem
class TestCase(unittest.TestCase):
def setUp(self):
self.bigSmiList = [
"CC1=CC(=O)C=CC1=O",
"S(SC1=NC2=CC=CC=C2S1)C3=NC4=C(S3)C=CC=C4",
"OC1=C(Cl)C=C(C=C1[N+]([O-])=O)[N+]([O-])=O",
"[O-][N+](=O)C1=CNC(=N)S1",
"NC1=CC2=C(C=C1)C(=O)C3=C(C=CC=C3)C2=O",
"OC(=O)C1=C(C=CC=C1)C2=C3C=CC(=O)C(=C3OC4=C2C=CC(=C4Br)O)Br",
"CN(C)C1=C(Cl)C(=O)C2=C(C=CC=C2)C1=O",
"CC1=C(C2=C(C=C1)C(=O)C3=CC=CC=C3C2=O)[N+]([O-])=O",
"CC(=NO)C(C)=NO",
"C1=CC=C(C=C1)P(C2=CC=CC=C2)C3=CC=CC=C3",
"CC(C)(C)C1=C(O)C=C(C(=C1)O)C(C)(C)C",
"CC1=NN(C(=O)C1)C2=CC=CC=C2",
"NC1=CC=NC2=C1C=CC(=C2)Cl",
"CCCCCC[CH]1CCCCN1",
"O=CC1=C2C=CC=CC2=CC3=C1C=CC=C3",
"BrN1C(=O)CCC1=O",
"CCCCCCCCCCCCCCCC1=C(N)C=CC(=C1)O",
"C(COC1=C(C=CC=C1)C2=CC=CC=C2)OC3=CC=CC=C3C4=CC=CC=C4",
"CCCCSCC",
"CC(=O)NC1=NC2=C(C=C1)C(=CC=N2)O",
"CC1=C2C=CC(=NC2=NC(=C1)O)N",
"CCOC(=O)C1=CN=C2N=C(N)C=CC2=C1O",
"CC1=CC(=NC=C1)N=CC2=CC=CC=C2",
"C[N+](C)(C)CC1=CC=CC=C1",
"C[N+](C)(C)C(=O)C1=CC=CC=C1",
"ICCC(C1=CC=CC=C1)(C2=CC=CC=C2)C3=CC=CC=C3",
"CC1=CC(=C(C[N+](C)(C)C)C(=C1)C)C",
"C[C](O)(CC(O)=O)C1=CC=C(C=C1)[N+]([O-])=O",
"CC1=CC=C(C=C1)C(=O)C2=CC=C(Cl)C=C2",
"ON=CC1=CC=C(O)C=C1",
"CC1=CC(=C(N)C(=C1)C)C",
"CC1=CC=C(C=C1)C(=O)C2=CC=C(C=C2)[N+]([O-])=O",
"CC(O)(C1=CC=CC=C1)C2=CC=CC=C2",
"ON=CC1=CC(=CC=C1)[N+]([O-])=O",
"OC1=C2C=CC(=CC2=NC=C1[N+]([O-])=O)Cl",
"CC1=CC=CC2=NC=C(C)C(=C12)Cl",
"CCC(CC)([CH](OC(N)=O)C1=CC=CC=C1)C2=CC=CC=C2",
"ON=C(CC1=CC=CC=C1)[CH](C#N)C2=CC=CC=C2",
"O[CH](CC1=CC=CC=C1)C2=CC=CC=C2",
"COC1=CC=C(CC2=CC=C(OC)C=C2)C=C1",
"CN(C)[CH](C1=CC=CC=C1)C2=C(C)C=CC=C2",
"COC1=CC(=C(N)C(=C1)[N+]([O-])=O)[N+]([O-])=O",
"NN=C(C1=CC=CC=C1)C2=CC=CC=C2",
"COC1=CC=C(C=C1)C=NO",
"C1=CC=C(C=C1)C(N=C(C2=CC=CC=C2)C3=CC=CC=C3)C4=CC=CC=C4",
"C1=CC=C(C=C1)N=C(C2=CC=CC=C2)C3=CC=CC=C3",
"CC1=C(C2=CC=CC=C2)C(=C3C=CC=CC3=N1)O",
"CCC1=[O+][Cu]2([O+]=C(CC)C1)[O+]=C(CC)CC(=[O+]2)CC",
"OC(=O)[CH](CC1=CC=CC=C1)C2=CC=CC=C2",
"CCC1=C(N)C=C(C)N=C1",
]
def _testPkl10(self):
" testing 5k molecule pickles "
inLines = open('%s/NCI/first_5K.smi' % (RDConfig.RDDataDir), 'r').readlines()
smis = []
for line in inLines:
smis.append(line.split('\t')[0])
for smi in smis:
m = Chem.MolFromSmiles(smi)
newM1 = pickle.loads(pickle.dumps(m))
newSmi1 = Chem.MolToSmiles(newM1)
newM2 = pickle.loads(pickle.dumps(newM1))
newSmi2 = Chem.MolToSmiles(newM2)
assert newM1.GetNumAtoms() == m.GetNumAtoms(), 'num atoms comparison failed'
assert newM2.GetNumAtoms() == m.GetNumAtoms(), 'num atoms comparison failed'
assert len(newSmi1) > 0, 'empty smi1'
assert len(newSmi2) > 0, 'empty smi2'
assert newSmi1 == newSmi2, 'string compare failed:\n%s\n\t!=\n%s\norig smiles:\n%s' % (
newSmi1, newSmi2, smi)
def testPkl1(self):
" testing single molecule pickle "
m = Chem.MolFromSmiles('CCOC')
outS = Chem.MolToSmiles(m)
m2 = pickle.loads(pickle.dumps(m))
outS2 = Chem.MolToSmiles(m2)
assert outS == outS2, "bad pickle: %s != %s" % (outS, outS2)
def testPkl2(self):
""" further pickle tests """
smis = self.bigSmiList
for smi in smis:
m = Chem.MolFromSmiles(smi)
newM1 = pickle.loads(pickle.dumps(m))
newM2 = pickle.loads(pickle.dumps(newM1))
oldSmi = Chem.MolToSmiles(newM1)
newSmi = Chem.MolToSmiles(newM2)
assert newM1.GetNumAtoms() == m.GetNumAtoms(), 'num atoms comparison failed'
assert newM2.GetNumAtoms() == m.GetNumAtoms(), 'num atoms comparison failed'
assert oldSmi == newSmi, 'string compare failed: %s != %s' % (oldSmi, newSmi)
def testPkl(self):
" testing molecule pickle "
import tempfile
f, self.fName = tempfile.mkstemp('.pkl')
f = None
self.m = Chem.MolFromSmiles('CC(=O)CC')
outF = open(self.fName, 'wb+')
pickle.dump(self.m, outF)
outF.close()
inF = open(self.fName, 'rb')
m2 = pickle.load(inF)
inF.close()
try:
os.unlink(self.fName)
except Exception:
pass
oldSmi = Chem.MolToSmiles(self.m)
newSmi = Chem.MolToSmiles(m2)
assert oldSmi == newSmi, 'string compare failed'
def testRings(self):
" testing SSSR handling "
m = Chem.MolFromSmiles('OC1C(O)C2C1C(O)C2O')
for i in range(m.GetNumAtoms()):
at = m.GetAtomWithIdx(i)
n = at.GetAtomicNum()
if n == 8:
assert not at.IsInRingSize(4), 'atom %d improperly in ring' % (i)
else:
assert at.IsInRingSize(4), 'atom %d not in ring of size 4' % (i)
@unittest.skipIf(not hasattr(Chem, "MolToJSON"), "MolInterchange support not enabled")
def testJSON1(self):
""" JSON test1 """
for smi in self.bigSmiList:
m = Chem.MolFromSmiles(smi)
json = Chem.MolToJSON(m)
nm = Chem.JSONToMols(json)[0]
self.assertEqual(Chem.MolToSmiles(m), Chem.MolToSmiles(nm))
@unittest.skipIf(not hasattr(Chem, "MolToJSON"), "MolInterchange support not enabled")
def testJSON2(self):
""" JSON test2 """
ms = [Chem.MolFromSmiles(smi) for smi in self.bigSmiList]
json = Chem.MolsToJSON(ms)
nms = Chem.JSONToMols(json)
#for nm in nms:
# Chem.SanitizeMol(nm)
self.assertEqual(len(ms), len(nms))
smis1 = [Chem.MolToSmiles(x) for x in ms]
smis2 = [Chem.MolToSmiles(x) for x in nms]
for i, (smi1, smi2) in enumerate(zip(smis1, smis2)):
if smi1 != smi2:
print(self.bigSmiList[i])
print(smi1)
print(smi2)
print("-------")
self.assertEqual(smis1, smis2)
def testGithub4144(self):
""" github4144: EnumerateStereiosomers clearing ring info """
from rdkit.Chem import EnumerateStereoisomers
m = Chem.MolFromSmiles('CSCc1cnc(C=Nn2c(C)nc3sc4c(c3c2=O)CCCCC4)s1')
sssr = [list(x) for x in Chem.GetSymmSSSR(m)]
sms = EnumerateStereoisomers.EnumerateStereoisomers(m)
for sm in sms:
sssr2 = [list(x) for x in Chem.GetSymmSSSR(sm)]
self.assertEqual(sssr, sssr2)
if __name__ == '__main__':
unittest.main()
|
greglandrum/rdkit
|
rdkit/Chem/UnitTestChem.py
|
Python
|
bsd-3-clause
| 6,638
|
[
"RDKit"
] |
dfcb365a226d4833254ce95ec04fdf3be340b98cd4af9e0b7e9f51738421386a
|
import os
from paraview.simple import OpenDataFile, RenameSource, Show
surfdir = "postProcessing/surfaces"
times = sorted(os.listdir(surfdir))
files = os.listdir(os.path.join(surfdir, times[0]))
for f in files:
file_list = [os.path.join(surfdir, t, f) for t in times]
OpenDataFile(file_list)
RenameSource(f.replace(".vtk", ""))
|
petebachant/foamPy
|
scripts/pvloadsurf.py
|
Python
|
mit
| 345
|
[
"ParaView",
"VTK"
] |
89941a69ec955e56b871ecb844619f76c765376baab4d9301b05d029f13078d7
|
from __future__ import division
import tornado.ioloop
import tornado.web
import tornado.httputil
import tornado.gen
import tornado.escape
import numpy as np
import matplotlib
#matplotlib.use('Agg')
from matplotlib import pyplot as plt
from netCDF4 import Dataset
import datetime
import glob
import io
import os
import pyproj
import sys
from scipy import interpolate
from scipy.interpolate import interp2d
from scipy import ndimage as nd
from mpl_toolkits.basemap import Basemap
from PIL import Image, ImageDraw
import shapefile
#import osgeo
import ConfigParser
from mappy.WMS import get_capabilities
from mappy.WMS import WMSGetMapRequest
from mappy.WMS.style import StyleReader
from mappy.Data import DataCollection
TEST_NC_FILE = '/share/data/gwrf/fc_northsea/netcdf/gwrf2016071100/wrf.ns.24km.*'
TEST_VAR = 'mean_sea_level_pressure'
TEST_VAR = 'water_temperature'
TEST_SHAPEFILE = '/share/data/GEOG/gshhs/GSHHS_shp/i/GSHHS_i_L1.shp'
class Server(object):
def __init__(self):
self.contact_person = 'Dave Sproson'
self.contact_organization = 'A Company'
self.contact_position = 'A job title'
self.address = 'Street Address'
self.city = 'Anyton'
self.state_or_province = 'Someshire'
self.postcode = 'AB12 1AB'
self.country = 'UK'
self.contact_voice_telephone = '0123456789'
self.contact_electronic_mail_address = 'My@Email.com'
self.fees = 'None'
self.access_constraints = 'Commercial and Restricted'
self.ip_address = 'fgwfcluster3'
self.port = '8888'
self.wms_version = '1.3.0'
self.projections = dict()
self.__init_projections()
def __init_projections(self):
config = ConfigParser.SafeConfigParser()
config.read('projections.ini')
for section in config.sections():
if section == 'projections':
for key, value in config.items(section):
self.projections.update({key.replace('_',':'): value})
print self.projections
server = Server()
styles = StyleReader('styles.ini').styles
def PIL2np(img):
return np.array(img.getdata(),
np.uint8).reshape(img.size[1], img.size[0], 3)
def bboxes_intersect(b1, b2):
def intervals_intersect(x1, x2):
return x1[1] >= x2[0] and x2[1] >= x1[0]
return (intervals_intersect((b1[0], b1[2]), (b2[0], b2[2])) and
intervals_intersect((b1[1], b1[3]), (b2[1], b2[3])))
def mask_data(lon, lat, data, proj, shapes, layer):
# if layer.mask is not None:
# print "USING SAVED MASK"
# return np.ma.masked_where(np.logical_or(layer.mask>0, np.isnan(data)), data)
# Size of the domain in the requested CRS
latlon = pyproj.Proj('+proj=latlong +a=6378137 +b=6378137')
llon, llat = pyproj.transform(proj, latlon, lon, lat)
lon = lon[0, :]
lat = lat[:, 0]
llon = llon[0, :]
llat = llat[:, 0]
# print "lon = {}".format(lon)
# print('lat = {}'.format(lat))
# print "llon = {}".format(llon)
# print('llat = {}'.format(llat))
xdist = lon[-1] - lon[0] #bbox[2] - bbox[0]
ydist = lat[-1] - lat[0] #bbox[3] - bbox[1]
bbox = [llon[0], llat[0], llon[-1], llat[-1]]
# print(bbox)
# Image width & height
iwidth = np.shape(data)[1]
iheight = np.shape(data)[0]
xratio = iwidth/xdist
yratio = iheight/ydist
pixels = []
img = Image.new("RGB", (iwidth, iheight), "white")
draw = ImageDraw.Draw(img)
def nearest(array, val):
temp = np.abs(array - val)
return temp.tolist().index(np.min(temp))
for shape in shapes:
if not bboxes_intersect(shape.bbox, bbox):
continue
pixels = []
for p_x, p_y in shape.points:
x, y = pyproj.transform(latlon, proj, p_x, p_y)
# px = int(iwidth - ((bbox[2] - x) * xratio))
px = int(iwidth - ((lon[-1] - x) * xratio))
# px = nearest(lon, x)
# py = int((bbox[3] - y) * yratio)
py = int((lat[-1] - y) * yratio)
# py = nearest(lat, y)
pixels.append((px,py))
draw.polygon(pixels, outline="rgb(0,0,0)", fill="rgb(0,0,0)")
mdata = np.flipud(np.mean(PIL2np(img), axis=2).astype(int))
# plt.pcolor(mdata)
# plt.show()
layer.mask = mdata
data = np.ma.masked_where(np.logical_or(mdata>0, np.isnan(data)), data)
return data
class Layer(object):
def __init__(self, data_source=None, crop=False, crop_inverse=False,
crop_file=None, colormap=None, refine_data=0,
gshhs_resolution=None, var_name=None,
native_projection=None, style=None, enable_time=False):
self.data_source = data_source
self.crop = crop
self.var_name = var_name
self.crop_inverse = crop_inverse
self.crop_file = crop_file
self.colormap = colormap
self.refine_data = refine_data
self.gshhs_resolution = gshhs_resolution
self.native_projection = pyproj.Proj('+init=EPSG:4326')
self.shapes = []
self.style = style
self.enable_time = enable_time
self.mask = None
self.bbox = [-20.358, 39.419, 35.509, 64.749]
if crop and crop_inverse:
raise ValueError('crop and crop_inverse cannot both be True')
def set_shapes(self):
nc = Dataset(glob.glob(TEST_NC_FILE)[0])
lat = nc['latitude'][:]
lon = nc['longitude'][:]
nc.close()
bbox = [lon[0], lat[0], lon[-1], lat[-1]]
r = shapefile.Reader(TEST_SHAPEFILE)
for shape in r.shapes():
if not(bboxes_intersect(shape.bbox, bbox)):
continue
self.shapes.append(shape)
print "In-memory caching of shapefiles for layer..."
print "Size: {}".format(sys.getsizeof(self.shapes))
print "Shapes: {}".format(len(self.shapes))
data = DataCollection(file_glob=TEST_NC_FILE,
lat_var='latitude', lon_var='longitude',
elevation_var='elevation', time_var='time',
data_type='netcdf')
test_layer = Layer(crop=False, refine_data=0, gshhs_resolution='i',
var_name=TEST_VAR, style=styles[2],
data_source=data, enable_time=True)
layers = [test_layer]
def refine_data(lon, lat, f, refine):
lon = lon[0, :]
lat = lat[:, 0]
dlon = lon[1] - lon[0]
dlat = lat[1] - lat[0]
lat_hi = np.arange(lat[0],lat[-1],dlat/refine)
lon_hi = np.arange(lon[0],lon[-1],dlon/refine)
nx = len(lon_hi)
ny = len(lat_hi)
a = np.array(f.mask).astype(int)
f[np.isnan(f)] = 100000
ipol = interp2d(lon, lat, f)
apol = interp2d(lon, lat, a)
f = ipol(lon_hi, lat_hi)
a = apol(lon_hi, lat_hi)
f = np.ma.masked_where(a>.2, f)
lon_hi, lat_hi = np.meshgrid(lon_hi, lat_hi)
return lon_hi, lat_hi, f
def crop_to_bbox(lon, lat, data, bbox, nx=200, ny=200):
# lati = np.where(np.logical_and(lat>=bbox[0]-1, lat<=bbox[2]+1))[0]
# loni = np.where(np.logical_and(lon>=bbox[1]-1, lon<=bbox[3]+1))[0]
# lon = lon[loni[0]:loni[-1]]
# lat = lat[lati[0]:lati[-1]]
# data = data[lati[0]:lati[-1], loni[0]:loni[-1]]
# return lon, lat, data
def __fill(data):
invalid = np.isnan(data)
ind = nd.distance_transform_edt(invalid,
return_distances=False,
return_indices=True)
return data[tuple(ind)]
lat_min = bbox[0]
lat_max = bbox[2]
lon_min = bbox[1]
lon_max = bbox[3]
a = np.array(data.mask).astype(int)
print a
a[:, 0] = 1
a[-1, :] = 1
print(a)
data = __fill(data)
# data[np.isnan(data)] = 9e99
dlat = (lat_max - lat_min) / ny
dlon = (lon_max - lon_min) / nx
lat_hi = np.arange(lat_min, lat_max + dlat, dlat)
lon_hi = np.arange(lon_min, lon_max + dlon, dlon)
ipol = interp2d(lon, lat, data)
apol = interp2d(lon, lat, a)
data_hi = ipol(lon_hi, lat_hi)
mask_hi = apol(lon_hi, lat_hi)
data_hi = np.ma.masked_where(mask_hi>.2, data_hi)
return lon_hi, lat_hi, data_hi
@tornado.gen.coroutine
def render(layer, width=100, height=100, request=None):
# Get the lat/lon variables for the dataset
nc = Dataset(glob.glob(TEST_NC_FILE)[0], 'r')
lon = np.squeeze(nc['longitude'][:])
lat = np.squeeze(nc['latitude'][:])
nc.close()
# Get the data from the layer's data_source
w = layer.data_source.get_data_layer(var_name=TEST_VAR,
time=datetime.datetime(2016,7,11)+datetime.timedelta(hours=100))
print "*** shape(data) = {}".format(np.shape(w))
# lon, lat = np.meshgrid(lon,lat)
# Save out some useful stuff from the request
bbox = request.bbox # BBOX in the requested CRS
wgs84_bbox = request.wgs84_bbox # BBOX in WGS84 CRS
crs = request.crs # The requested CRS
# Initialise pyproj projections for the requested and WGS84 CRS's
proj_to = pyproj.Proj(server.projections[crs.lower()])
wgs84 = pyproj.Proj(server.projections['epsg:4326'])
latlon = pyproj.Proj('+proj=latlong +a=6378137 +b=6378137')
latlon_bbox = [0, 0, 0, 0]
latlon_bbox[0], latlon_bbox[2] = pyproj.transform(wgs84, latlon, wgs84_bbox[0], wgs84_bbox[2])
latlon_bbox[1], latlon_bbox[3] = pyproj.transform(wgs84, latlon, wgs84_bbox[1], wgs84_bbox[3])
# Build a bounding box for the data (assume this is in WGS84)
# lon = lon[0, :]
# lat = lat[:, 0]
data_bbox = [lon[0], lat[0], lon[-1], lat[-1]]
# lon,lat = np.meshgrid(lon,lat)
# Supersample the data, if requested
# if layer.refine_data:
# lon, lat, w = refine_data(lon, lat, w, layer.refine_data)
# if True:
# lon, lat, w = crop_to_bbox(lon, lat, w, wgs84_bbox, nx=50, ny=50)
if True: #layer.crop or layer.crop_inverse:
if not layer.shapes:
layer.set_shapes()
# w = mask_data(lon[0, :], lat[:, 0], w, layer.shapes)
# Reproject to requested CRS
if crs != 'EPSG:4326':
lon, lat = np.meshgrid(lon, lat)
p_lon, p_lat = pyproj.transform(wgs84, proj_to, lon, lat)
minx, miny = pyproj.transform(wgs84, proj_to, lon[0], lat[0])
maxx, maxy = pyproj.transform(wgs84, proj_to, lon[-1], lat[-1])
else:
# p_lon, p_lat = lon[0, :], lat[:, 0]
minx, miny = lon[0], lat[0]
maxx, maxy = lon[-1], lat[-1]
p_lon, p_lat = np.meshgrid(lon, lat)
# if layer.crop:
# w = mask_data(p_lon[0, :], p_lat[:, 0], w, layer.shapes, wgs84, proj_to, layer)
if True:
lon, lat, w = crop_to_bbox(p_lon[0, :], p_lat[:, 0], w, bbox, nx=400, ny=200)
p_lon, p_lat = np.meshgrid(lon, lat)
if True: #layer.crop:
w = mask_data(p_lon, p_lat, w, proj_to, layer.shapes, layer)
fig = plt.figure(frameon=False)
# fig.set_size_inches(width/100, height/100)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
print "***w = {}".format(np.shape(w))
function_args = {}
for key, value in layer.style.render_args.iteritems():
if 'fn:' in value:
fn = ''.join(value.split(':')[1:])
value = eval(fn)
function_args.update({key: value})
print function_args
# lon, lat = np.meshgrid(lon, lat)
getattr(plt, layer.style.render_function)(
p_lon, p_lat, w, **function_args)
# plt.pcolormesh(p_lon, p_lat, w)
if '+proj=longlat' in server.projections[crs.lower()]:
ax.set_xlim([bbox[0], bbox[2]])
ax.set_ylim([bbox[1], bbox[3]])
else:
ax.set_xlim([bbox[1], bbox[3]])
ax.set_ylim([bbox[0], bbox[2]])
# print " ...done"
memdata = io.BytesIO()
plt.savefig(memdata, format='png', dpi=100, transparent=True)
plt.close()
image = memdata.getvalue()
memdata.close()
print "request complete"
raise tornado.gen.Return(image)
def nan_helper(y):
return np.isnan(y), lambda z: z.nonzero()[0]
class MainHandler(tornado.web.RequestHandler):
@tornado.gen.coroutine
def get(self):
s = datetime.datetime.utcnow()
print self.request.query_arguments
try:
request = self.get_argument('REQUEST')
except:
request = self.get_argument('request')
if request == 'GetCapabilities':
capes = get_capabilities(server, layers)
self.set_header('Content-type', 'text/xml')
self.write(capes)
return
if request == 'GetMap':
map_request = WMSGetMapRequest(**self.request.query_arguments)
image = yield render(test_layer,
width=map_request.width,
height=map_request.height,
request=map_request)
self.set_header('Content-type', 'image/png')
self.write(image)
print (datetime.datetime.utcnow() - s).total_seconds()
def make_app():
return tornado.web.Application([
(r'/wms', MainHandler),
(r'', MainHandler),
])
if __name__ == '__main__':
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
|
davesproson/mappy
|
app.py
|
Python
|
gpl-3.0
| 13,410
|
[
"NetCDF"
] |
9b2344e64a5241428710bd7e422b59bb81660cb558a8271e4067bc8296a0b7b5
|
import numpy
import mcfit
from scipy.interpolate import InterpolatedUnivariateSpline
from .power.zeldovich import ZeldovichPower
NUM_PTS = 1024
def xi_to_pk(r, xi, ell=0, extrap=False):
r"""
Return a callable function returning the power spectrum multipole of degree
:math:`\ell`, as computed from the Fourier transform of the input :math:`r`
and :math:`\xi_\ell(r)` arrays.
This uses the :mod:`mcfit` package perform the FFT.
Parameters
----------
r : array_like
separation values where ``xi`` is evaluated
xi : array_like
the array holding the correlation function multipole values
ell : int
multipole degree of the input correlation function and the output power
spectrum; monopole by default
extrap : bool, optional
whether to extrapolate the power spectrum with a power law; can improve
the smoothness of the FFT
Returns
-------
InterpolatedUnivariateSpline :
a spline holding the interpolated power spectrum values
"""
P = mcfit.xi2P(r, l=ell)
kk, Pk = P(xi, extrap=extrap)
return InterpolatedUnivariateSpline(kk, Pk)
def pk_to_xi(k, Pk, ell=0, extrap=True):
r"""
Return a callable function returning the correlation function multipole of
degree :math:`\ell`, as computed from the Fourier transform of the input
:math:`k` and :math:`P_\ell(k)` arrays.
This uses the :mod:`mcfit` package perform the FFT.
Parameters
----------
k : array_like
wavenumbers where ``Pk`` is evaluated
Pk : array_like
the array holding the power spectrum multipole values
ell : int
multipole degree of the input power spectrum and the output correlation
function; monopole by default
extrap : bool, optional
whether to extrapolate the power spectrum with a power law; can improve
the smoothness of the FFT
Returns
-------
InterpolatedUnivariateSpline :
a spline holding the interpolated correlation function values
"""
xi = mcfit.P2xi(k, l=ell)
rr, CF = xi(Pk, extrap=extrap)
return InterpolatedUnivariateSpline(rr, CF)
class CorrelationFunction(object):
"""
Evaluate the correlation function by Fourier transforming
a power spectrum object, with automatic re-scaling with redshift and sigma8.
Parameters
----------
power : callable
a callable power spectrum that returns the power at a given ``k``;
this should have ``redshift``, ``sigma8``, and ``cosmo`` attributes
"""
def __init__(self, power):
self.power = power
# check required attributes
for attr in ['redshift', 'sigma8', 'cosmo']:
if not hasattr(power, attr):
raise AttributeError("input power spectrum object must have a ``%s`` attribute" %attr)
# meta-data
self._attrs = {}
self._attrs.update(getattr(self.power, 'attrs', {}))
@property
def attrs(self):
self._attrs['redshift'] = self.redshift
self._attrs['sigma8'] = self.sigma8
return self._attrs
@property
def redshift(self):
return self.power.redshift
@redshift.setter
def redshift(self, value):
self.power.redshift = value
@property
def sigma8(self):
return self.power.sigma8
@sigma8.setter
def sigma8(self, value):
self.power.sigma8 = value
@property
def cosmo(self):
return self.power.cosmo
def __call__(self, r, smoothing=0., kmin=1e-5, kmax=10.):
"""
Return the correlation function (dimensionless) for separations ``r``
Parameters
----------
r : float, array_like
the separation array in units of :math:`h^{-1} \mathrm(Mpc)`
smoothing : float, optional
the std deviation of the Fourier space Gaussian smoothing to apply
to P(k) before taking the FFT
kmin : float, optional
the minimum ``k`` value to compute P(k) for before taking the FFT
kmax : float, optional
the maximum ``k`` value to compute P(k) for before taking the FFT
"""
k = numpy.logspace(numpy.log10(kmin), numpy.log10(kmax), NUM_PTS)
# power with smoothing
Pk = self.power(k)
Pk *= numpy.exp(-(k*smoothing)**2)
# only extrap if not zeldovich
extrap = not isinstance(self.power, ZeldovichPower)
return pk_to_xi(k, Pk, extrap=extrap)(r)
|
nickhand/nbodykit
|
nbodykit/cosmology/correlation.py
|
Python
|
gpl-3.0
| 4,517
|
[
"Gaussian"
] |
4639a13f4648e647c70291d067beb7f7963e8a206332b2e2bd0ff458519b8695
|
'''
Created on Feb 1, 2017
@author: Alexandre Day
Purpose:
Perform density clustering on gaussian mixture
'''
from fdc import FDC
from sklearn.datasets import make_blobs
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import normalized_mutual_info_score as nmi
from fdc import plotting
import pickle
import numpy as np
from matplotlib import pyplot as plt
n_true_center = 15
np.random.seed(0)
print("------> Example with %i true cluster centers <-------"%n_true_center)
X, y = make_blobs(10000, 2, n_true_center) # Generating random gaussian mixture
X = StandardScaler().fit_transform(X) # always normalize your data :)
# set eta=0.0 if you have excellent density profile fit (lots of data say)
model = FDC(eta = 0.01)#, atol=0.0001, rtol=0.0001)
model.fit(X) # performing the clustering
x = np.linspace(-0.5, 0.6,200)
y = 1.5*x+0.15
X_2 = np.vstack([x,y]).T
xy2 = X_2[65]
b=xy2[0]/1.5+xy2[1]
y2 = -x/1.5+b
#rho = np.exp(model.density_model.evaluate_density(X_2))
#plt.plot(rho)
#plt.show()
#exit()
plt.scatter(x, y, c="green", zorder=2)
plt.scatter(x, y2, c="green", zorder=2)
#plt.scatter(X[:, 0], X[:,1], c= model.rho, cmap="coolwarm")
plt.scatter(X[:, 0],X[:,1], c=model.cluster_label, cmap="jet", alpha=0.1)
plt.xlim([-0.7, 0.7])
plt.ylim([-0.7, 0.7])
plt.show()
""" print("Normalized mutual information = %.4f"%nmi(y, model.cluster_label))
plotting.set_nice_font() # nicer plotting font !
plotting.summary_model(model, ytrue=y, show=True, savefile="result.png")
"""
|
alexandreday/fast_density_clustering
|
example/example_test.py
|
Python
|
bsd-3-clause
| 1,525
|
[
"Gaussian"
] |
e0e490451a923b97f329859323e39e67af50386ee85d3bfdab9601bfd684e967
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.