text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
import itertools
import os
import re
import urllib
import logging
import datetime
import urlparse
from collections import OrderedDict
import warnings
import pytz
from flask import request
from django.core.urlresolvers import reverse
from modularodm import Q
from modularodm import fields
from modularodm.validators import MaxLengthValidator
from modularodm.exceptions import NoResultsFound
from modularodm.exceptions import ValidationTypeError
from modularodm.exceptions import ValidationValueError
from api.base.utils import absolute_reverse
from framework import status
from framework.mongo import ObjectId
from framework.mongo import StoredObject
from framework.addons import AddonModelMixin
from framework.auth import get_user, User, Auth
from framework.auth import signals as auth_signals
from framework.exceptions import PermissionsError
from framework.guid.model import GuidStoredObject
from framework.auth.utils import privacy_info_handle
from framework.analytics import tasks as piwik_tasks
from framework.mongo.utils import to_mongo, to_mongo_key, unique_on
from framework.analytics import (
get_basic_counters, increment_user_activity_counters
)
from framework.sentry import log_exception
from framework.transactions.context import TokuTransaction
from framework.utils import iso8601format
from website import language, mails, settings, tokens
from website.util import web_url_for
from website.util import api_url_for
from website.util import sanitize
from website.exceptions import (
NodeStateError,
InvalidSanctionApprovalToken, InvalidSanctionRejectionToken,
)
from website.citations.utils import datetime_to_csl
from website.identifiers.model import IdentifierMixin
from website.util.permissions import expand_permissions
from website.util.permissions import CREATOR_PERMISSIONS, DEFAULT_CONTRIBUTOR_PERMISSIONS, ADMIN
from website.project.metadata.schemas import OSF_META_SCHEMAS
from website.project import signals as project_signals
logger = logging.getLogger(__name__)
VIEW_PROJECT_URL_TEMPLATE = settings.DOMAIN + '{node_id}/'
def has_anonymous_link(node, auth):
"""check if the node is anonymous to the user
:param Node node: Node which the user wants to visit
:param str link: any view-only link in the current url
:return bool anonymous: Whether the node is anonymous to the user or not
"""
view_only_link = auth.private_key or request.args.get('view_only', '').strip('/')
if not view_only_link:
return False
if node.is_public:
return False
return any(
link.anonymous
for link in node.private_links_active
if link.key == view_only_link
)
class MetaSchema(StoredObject):
_id = fields.StringField(default=lambda: str(ObjectId()))
name = fields.StringField()
schema = fields.DictionaryField()
category = fields.StringField()
# Version of the Knockout metadata renderer to use (e.g. if data binds
# change)
metadata_version = fields.IntegerField()
# Version of the schema to use (e.g. if questions, responses change)
schema_version = fields.IntegerField()
def ensure_schemas(clear=True):
"""Import meta-data schemas from JSON to database, optionally clearing
database first.
:param clear: Clear schema database before import
"""
if clear:
try:
MetaSchema.remove()
except AttributeError:
if not settings.DEBUG_MODE:
raise
for schema in OSF_META_SCHEMAS:
try:
MetaSchema.find_one(
Q('name', 'eq', schema['name']) &
Q('schema_version', 'eq', schema['schema_version'])
)
except:
schema['name'] = schema['name'].replace(' ', '_')
schema_obj = MetaSchema(**schema)
schema_obj.save()
class MetaData(GuidStoredObject):
_id = fields.StringField(primary=True)
target = fields.AbstractForeignField(backref='metadata')
data = fields.DictionaryField()
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
date_modified = fields.DateTimeField(auto_now=datetime.datetime.utcnow)
def validate_comment_reports(value, *args, **kwargs):
for key, val in value.iteritems():
if not User.load(key):
raise ValidationValueError('Keys must be user IDs')
if not isinstance(val, dict):
raise ValidationTypeError('Values must be dictionaries')
if 'category' not in val or 'text' not in val:
raise ValidationValueError(
'Values must include `category` and `text` keys'
)
class Comment(GuidStoredObject):
_id = fields.StringField(primary=True)
user = fields.ForeignField('user', required=True, backref='commented')
node = fields.ForeignField('node', required=True, backref='comment_owner')
target = fields.AbstractForeignField(required=True, backref='commented')
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
date_modified = fields.DateTimeField(auto_now=datetime.datetime.utcnow)
modified = fields.BooleanField()
is_deleted = fields.BooleanField(default=False)
content = fields.StringField()
# Dictionary field mapping user IDs to dictionaries of report details:
# {
# 'icpnw': {'category': 'hate', 'message': 'offensive'},
# 'cdi38': {'category': 'spam', 'message': 'godwins law'},
# }
reports = fields.DictionaryField(validate=validate_comment_reports)
@classmethod
def create(cls, auth, **kwargs):
comment = cls(**kwargs)
comment.save()
comment.node.add_log(
NodeLog.COMMENT_ADDED,
{
'project': comment.node.parent_id,
'node': comment.node._id,
'user': comment.user._id,
'comment': comment._id,
},
auth=auth,
save=False,
)
comment.node.save()
return comment
def edit(self, content, auth, save=False):
self.content = content
self.modified = True
self.node.add_log(
NodeLog.COMMENT_UPDATED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def delete(self, auth, save=False):
self.is_deleted = True
self.node.add_log(
NodeLog.COMMENT_REMOVED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def undelete(self, auth, save=False):
self.is_deleted = False
self.node.add_log(
NodeLog.COMMENT_ADDED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def report_abuse(self, user, save=False, **kwargs):
"""Report that a comment is abuse.
:param User user: User submitting the report
:param bool save: Save changes
:param dict kwargs: Report details
:raises: ValueError if the user submitting abuse is the same as the
user who posted the comment
"""
if user == self.user:
raise ValueError
self.reports[user._id] = kwargs
if save:
self.save()
def unreport_abuse(self, user, save=False):
"""Revoke report of abuse.
:param User user: User who submitted the report
:param bool save: Save changes
:raises: ValueError if user has not reported comment as abuse
"""
try:
self.reports.pop(user._id)
except KeyError:
raise ValueError('User has not reported comment as abuse')
if save:
self.save()
@unique_on(['params.node', '_id'])
class NodeLog(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
date = fields.DateTimeField(default=datetime.datetime.utcnow, index=True)
action = fields.StringField(index=True)
params = fields.DictionaryField()
should_hide = fields.BooleanField(default=False)
was_connected_to = fields.ForeignField('node', list=True)
user = fields.ForeignField('user', backref='created')
foreign_user = fields.StringField()
DATE_FORMAT = '%m/%d/%Y %H:%M UTC'
# Log action constants -- NOTE: templates stored in log_templates.mako
CREATED_FROM = 'created_from'
PROJECT_CREATED = 'project_created'
PROJECT_REGISTERED = 'project_registered'
PROJECT_DELETED = 'project_deleted'
NODE_CREATED = 'node_created'
NODE_FORKED = 'node_forked'
NODE_REMOVED = 'node_removed'
POINTER_CREATED = 'pointer_created'
POINTER_FORKED = 'pointer_forked'
POINTER_REMOVED = 'pointer_removed'
WIKI_UPDATED = 'wiki_updated'
WIKI_DELETED = 'wiki_deleted'
WIKI_RENAMED = 'wiki_renamed'
MADE_WIKI_PUBLIC = 'made_wiki_public'
MADE_WIKI_PRIVATE = 'made_wiki_private'
CONTRIB_ADDED = 'contributor_added'
CONTRIB_REMOVED = 'contributor_removed'
CONTRIB_REORDERED = 'contributors_reordered'
PERMISSIONS_UPDATED = 'permissions_updated'
MADE_PRIVATE = 'made_private'
MADE_PUBLIC = 'made_public'
TAG_ADDED = 'tag_added'
TAG_REMOVED = 'tag_removed'
EDITED_TITLE = 'edit_title'
EDITED_DESCRIPTION = 'edit_description'
UPDATED_FIELDS = 'updated_fields'
FILE_MOVED = 'addon_file_moved'
FILE_COPIED = 'addon_file_copied'
FILE_RENAMED = 'addon_file_renamed'
FOLDER_CREATED = 'folder_created'
FILE_ADDED = 'file_added'
FILE_UPDATED = 'file_updated'
FILE_REMOVED = 'file_removed'
FILE_RESTORED = 'file_restored'
ADDON_ADDED = 'addon_added'
ADDON_REMOVED = 'addon_removed'
COMMENT_ADDED = 'comment_added'
COMMENT_REMOVED = 'comment_removed'
COMMENT_UPDATED = 'comment_updated'
MADE_CONTRIBUTOR_VISIBLE = 'made_contributor_visible'
MADE_CONTRIBUTOR_INVISIBLE = 'made_contributor_invisible'
EXTERNAL_IDS_ADDED = 'external_ids_added'
EMBARGO_APPROVED = 'embargo_approved'
EMBARGO_CANCELLED = 'embargo_cancelled'
EMBARGO_COMPLETED = 'embargo_completed'
EMBARGO_INITIATED = 'embargo_initiated'
RETRACTION_APPROVED = 'retraction_approved'
RETRACTION_CANCELLED = 'retraction_cancelled'
RETRACTION_INITIATED = 'retraction_initiated'
REGISTRATION_APPROVAL_CANCELLED = 'registration_cancelled'
REGISTRATION_APPROVAL_INITIATED = 'registration_initiated'
REGISTRATION_APPROVAL_APPROVED = 'registration_approved'
def __repr__(self):
return ('<NodeLog({self.action!r}, params={self.params!r}) '
'with id {self._id!r}>').format(self=self)
@property
def node(self):
"""Return the :class:`Node` associated with this log."""
return (
Node.load(self.params.get('node')) or
Node.load(self.params.get('project'))
)
@property
def tz_date(self):
'''Return the timezone-aware date.
'''
# Date should always be defined, but a few logs in production are
# missing dates; return None and log error if date missing
if self.date:
return self.date.replace(tzinfo=pytz.UTC)
logger.error('Date missing on NodeLog {}'.format(self._primary_key))
@property
def formatted_date(self):
'''Return the timezone-aware, ISO-formatted string representation of
this log's date.
'''
if self.tz_date:
return self.tz_date.isoformat()
def resolve_node(self, node):
"""A single `NodeLog` record may be attached to multiple `Node` records
(parents, forks, registrations, etc.), so the node that the log refers
to may not be the same as the node the user is viewing. Use
`resolve_node` to determine the relevant node to use for permission
checks.
:param Node node: Node being viewed
"""
if self.node == node or self.node in node.nodes:
return self.node
if node.is_fork_of(self.node) or node.is_registration_of(self.node):
return node
for child in node.nodes:
if child.is_fork_of(self.node) or node.is_registration_of(self.node):
return child
return False
def can_view(self, node, auth):
node_to_check = self.resolve_node(node)
if node_to_check:
return node_to_check.can_view(auth)
return False
def _render_log_contributor(self, contributor, anonymous=False):
user = User.load(contributor)
if not user:
return None
if self.node:
fullname = user.display_full_name(node=self.node)
else:
fullname = user.fullname
return {
'id': privacy_info_handle(user._primary_key, anonymous),
'fullname': privacy_info_handle(fullname, anonymous, name=True),
'registered': user.is_registered,
}
class Tag(StoredObject):
_id = fields.StringField(primary=True, validate=MaxLengthValidator(128))
def __repr__(self):
return '<Tag() with id {self._id!r}>'.format(self=self)
@property
def url(self):
return '/search/?tags={}'.format(self._id)
class Pointer(StoredObject):
"""A link to a Node. The Pointer delegates all but a few methods to its
contained Node. Forking and registration are overridden such that the
link is cloned, but its contained Node is not.
"""
#: Whether this is a pointer or not
primary = False
_id = fields.StringField()
node = fields.ForeignField('node', backref='_pointed')
_meta = {'optimistic': True}
def _clone(self):
if self.node:
clone = self.clone()
clone.node = self.node
clone.save()
return clone
def fork_node(self, *args, **kwargs):
return self._clone()
def register_node(self, *args, **kwargs):
return self._clone()
def use_as_template(self, *args, **kwargs):
return self._clone()
def resolve(self):
return self.node
def __getattr__(self, item):
"""Delegate attribute access to the node being pointed to."""
# Prevent backref lookups from being overriden by proxied node
try:
return super(Pointer, self).__getattr__(item)
except AttributeError:
pass
if self.node:
return getattr(self.node, item)
raise AttributeError(
'Pointer object has no attribute {0}'.format(
item
)
)
def get_pointer_parent(pointer):
"""Given a `Pointer` object, return its parent node.
"""
# The `parent_node` property of the `Pointer` schema refers to the parents
# of the pointed-at `Node`, not the parents of the `Pointer`; use the
# back-reference syntax to find the parents of the `Pointer`.
parent_refs = pointer.node__parent
assert len(parent_refs) == 1, 'Pointer must have exactly one parent.'
return parent_refs[0]
def validate_category(value):
"""Validator for Node#category. Makes sure that the value is one of the
categories defined in CATEGORY_MAP.
"""
if value not in Node.CATEGORY_MAP.keys():
raise ValidationValueError('Invalid value for category.')
return True
def validate_title(value):
"""Validator for Node#title. Makes sure that the value exists and is not
above 200 characters.
"""
if value is None or not value.strip():
raise ValidationValueError('Title cannot be blank.')
if len(value) > 200:
raise ValidationValueError('Title cannot exceed 200 characters.')
return True
def validate_user(value):
if value != {}:
user_id = value.iterkeys().next()
if User.find(Q('_id', 'eq', user_id)).count() != 1:
raise ValidationValueError('User does not exist.')
return True
class NodeUpdateError(Exception):
def __init__(self, reason, key, *args, **kwargs):
super(NodeUpdateError, self).__init__(*args, **kwargs)
self.key = key
self.reason = reason
class Node(GuidStoredObject, AddonModelMixin, IdentifierMixin):
#: Whether this is a pointer or not
primary = True
# Node fields that trigger an update to Solr on save
SOLR_UPDATE_FIELDS = {
'title',
'category',
'description',
'visible_contributor_ids',
'tags',
'is_fork',
'is_registration',
'retraction',
'embargo',
'is_public',
'is_deleted',
'wiki_pages_current',
'is_retracted',
}
# Maps category identifier => Human-readable representation for use in
# titles, menus, etc.
# Use an OrderedDict so that menu items show in the correct order
CATEGORY_MAP = OrderedDict([
('', 'Uncategorized'),
('project', 'Project'),
('hypothesis', 'Hypothesis'),
('methods and measures', 'Methods and Measures'),
('procedure', 'Procedure'),
('instrumentation', 'Instrumentation'),
('data', 'Data'),
('analysis', 'Analysis'),
('communication', 'Communication'),
('other', 'Other'),
])
WRITABLE_WHITELIST = [
'title',
'description',
'category',
]
_id = fields.StringField(primary=True)
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow, index=True)
# Privacy
is_public = fields.BooleanField(default=False, index=True)
# User mappings
permissions = fields.DictionaryField()
visible_contributor_ids = fields.StringField(list=True)
# Project Organization
is_dashboard = fields.BooleanField(default=False, index=True)
is_folder = fields.BooleanField(default=False, index=True)
# Expanded: Dictionary field mapping user IDs to expand state of this node:
# {
# 'icpnw': True,
# 'cdi38': False,
# }
expanded = fields.DictionaryField(default={}, validate=validate_user)
is_deleted = fields.BooleanField(default=False, index=True)
deleted_date = fields.DateTimeField(index=True)
is_registration = fields.BooleanField(default=False, index=True)
registered_date = fields.DateTimeField(index=True)
registered_user = fields.ForeignField('user', backref='registered')
registered_schema = fields.ForeignField('metaschema', backref='registered')
registered_meta = fields.DictionaryField()
registration_approval = fields.ForeignField('registrationapproval')
retraction = fields.ForeignField('retraction')
embargo = fields.ForeignField('embargo')
is_fork = fields.BooleanField(default=False, index=True)
forked_date = fields.DateTimeField(index=True)
title = fields.StringField(validate=validate_title)
description = fields.StringField()
category = fields.StringField(validate=validate_category, index=True)
# One of 'public', 'private'
# TODO: Add validator
comment_level = fields.StringField(default='private')
wiki_pages_current = fields.DictionaryField()
wiki_pages_versions = fields.DictionaryField()
# Dictionary field mapping node wiki page to sharejs private uuid.
# {<page_name>: <sharejs_id>}
wiki_private_uuids = fields.DictionaryField()
file_guid_to_share_uuids = fields.DictionaryField()
creator = fields.ForeignField('user', backref='created')
contributors = fields.ForeignField('user', list=True, backref='contributed')
users_watching_node = fields.ForeignField('user', list=True, backref='watched')
logs = fields.ForeignField('nodelog', list=True, backref='logged')
tags = fields.ForeignField('tag', list=True, backref='tagged')
# Tags for internal use
system_tags = fields.StringField(list=True)
nodes = fields.AbstractForeignField(list=True, backref='parent')
forked_from = fields.ForeignField('node', backref='forked', index=True)
registered_from = fields.ForeignField('node', backref='registrations', index=True)
# The node (if any) used as a template for this node's creation
template_node = fields.ForeignField('node', backref='template_node', index=True)
piwik_site_id = fields.StringField()
# Dictionary field mapping user id to a list of nodes in node.nodes which the user has subscriptions for
# {<User.id>: [<Node._id>, <Node2._id>, ...] }
child_node_subscriptions = fields.DictionaryField(default=dict)
_meta = {
'optimistic': True,
}
def __init__(self, *args, **kwargs):
super(Node, self).__init__(*args, **kwargs)
if kwargs.get('_is_loaded', False):
return
if self.creator:
self.contributors.append(self.creator)
self.set_visible(self.creator, visible=True, log=False)
# Add default creator permissions
for permission in CREATOR_PERMISSIONS:
self.add_permission(self.creator, permission, save=False)
def __repr__(self):
return ('<Node(title={self.title!r}, category={self.category!r}) '
'with _id {self._id!r}>').format(self=self)
# For Django compatibility
@property
def pk(self):
return self._id
@property
def category_display(self):
"""The human-readable representation of this node's category."""
return self.CATEGORY_MAP[self.category]
@property
def sanction(self):
sanction = self.registration_approval or self.embargo or self.retraction
if sanction:
return sanction
elif self.parent_node:
return self.parent_node.sanction
else:
return None
@property
def is_pending_registration(self):
if not self.is_registration:
return False
if self.registration_approval is None:
if self.parent_node:
return self.parent_node.is_pending_registration
return False
return self.registration_approval.pending_approval
@property
def is_registration_approved(self):
if self.registration_approval is None:
if self.parent_node:
return self.parent_node.is_registration_approved
return False
return self.registration_approval.is_approved
@property
def is_retracted(self):
if self.retraction is None:
if self.parent_node:
return self.parent_node.is_retracted
return False
return self.retraction.is_approved
@property
def is_pending_retraction(self):
if self.retraction is None:
if self.parent_node:
return self.parent_node.is_pending_retraction
return False
return self.retraction.pending_approval
@property
def embargo_end_date(self):
if self.embargo is None:
if self.parent_node:
return self.parent_node.embargo_end_date
return False
return self.embargo.embargo_end_date
@property
def is_pending_embargo(self):
if self.embargo is None:
if self.parent_node:
return self.parent_node.is_pending_embargo
return False
return self.embargo.pending_approval
@property
def is_pending_embargo_for_existing_registration(self):
""" Returns True if Node has an Embargo pending approval for an
existing registrations. This is used specifically to ensure
registrations pre-dating the Embargo feature do not get deleted if
their respective Embargo request is rejected.
"""
if self.embargo is None:
if self.parent_node:
return self.parent_node.is_pending_embargo_for_existing_registration
return False
return self.embargo.pending_registration
@property
def private_links(self):
return self.privatelink__shared
@property
def private_links_active(self):
return [x for x in self.private_links if not x.is_deleted]
@property
def private_link_keys_active(self):
return [x.key for x in self.private_links if not x.is_deleted]
@property
def private_link_keys_deleted(self):
return [x.key for x in self.private_links if x.is_deleted]
def path_above(self, auth):
parents = self.parents
return '/' + '/'.join([p.title if p.can_view(auth) else '-- private project --' for p in reversed(parents)])
@property
def ids_above(self):
parents = self.parents
return {p._id for p in parents}
@property
def nodes_active(self):
return [x for x in self.nodes if not x.is_deleted]
def can_edit(self, auth=None, user=None):
"""Return if a user is authorized to edit this node.
Must specify one of (`auth`, `user`).
:param Auth auth: Auth object to check
:param User user: User object to check
:returns: Whether user has permission to edit this node.
"""
if not auth and not user:
raise ValueError('Must pass either `auth` or `user`')
if auth and user:
raise ValueError('Cannot pass both `auth` and `user`')
user = user or auth.user
if auth:
is_api_node = auth.api_node == self
else:
is_api_node = False
return (
(user and self.has_permission(user, 'write'))
or is_api_node
)
def active_contributors(self, include=lambda n: True):
for contrib in self.contributors:
if contrib.is_active and include(contrib):
yield contrib
def is_admin_parent(self, user):
if self.has_permission(user, 'admin', check_parent=False):
return True
if self.parent_node:
return self.parent_node.is_admin_parent(user)
return False
def can_view(self, auth):
if not auth and not self.is_public:
return False
return (
self.is_public or
(auth.user and self.has_permission(auth.user, 'read')) or
auth.private_key in self.private_link_keys_active or
self.is_admin_parent(auth.user)
)
def is_expanded(self, user=None):
"""Return if a user is has expanded the folder in the dashboard view.
Must specify one of (`auth`, `user`).
:param User user: User object to check
:returns: Boolean if the folder is expanded.
"""
if user._id in self.expanded:
return self.expanded[user._id]
else:
return False
def expand(self, user=None):
self.expanded[user._id] = True
self.save()
def collapse(self, user=None):
self.expanded[user._id] = False
self.save()
def is_derived_from(self, other, attr):
derived_from = getattr(self, attr)
while True:
if derived_from is None:
return False
if derived_from == other:
return True
derived_from = getattr(derived_from, attr)
def is_fork_of(self, other):
return self.is_derived_from(other, 'forked_from')
def is_registration_of(self, other):
return self.is_derived_from(other, 'registered_from')
@property
def forks(self):
"""List of forks of this node"""
return list(self.node__forked.find(Q('is_deleted', 'eq', False) &
Q('is_registration', 'ne', True)))
def add_permission(self, user, permission, save=False):
"""Grant permission to a user.
:param str permission: Permission to grant
:param bool save: Save changes
:raises: ValueError if user already has permission
"""
if user._id not in self.permissions:
self.permissions[user._id] = [permission]
else:
if permission in self.permissions[user._id]:
raise ValueError('User already has permission {0}'.format(permission))
self.permissions[user._id].append(permission)
if save:
self.save()
def remove_permission(self, user, permission, save=False):
"""Revoke permission from a user.
:param User user: User to revoke permission from
:param str permission: Permission to revoke
:param bool save: Save changes
:raises: ValueError if user does not have permission
"""
try:
self.permissions[user._id].remove(permission)
except (KeyError, ValueError):
raise ValueError('User does not have permission {0}'.format(permission))
if save:
self.save()
def clear_permission(self, user, save=False):
"""Clear all permissions for a user.
:param User user: User to revoke permission from
:param bool save: Save changes
:raises: ValueError if user not in permissions
"""
try:
self.permissions.pop(user._id)
except KeyError:
raise ValueError(
'User {0} not in permissions list for node {1}'.format(
user._id, self._id,
)
)
if save:
self.save()
def set_permissions(self, user, permissions, save=False):
self.permissions[user._id] = permissions
if save:
self.save()
def has_permission(self, user, permission, check_parent=True):
"""Check whether user has permission.
:param User user: User to test
:param str permission: Required permission
:returns: User has required permission
"""
if user is None:
logger.warn('User is ``None``.')
return False
if permission in self.permissions.get(user._id, []):
return True
if permission == 'read' and check_parent:
return self.is_admin_parent(user)
return False
def has_permission_on_children(self, user, permission):
"""Checks if the given user has a given permission on any child nodes
that are not registrations or deleted
"""
if self.has_permission(user, permission):
return True
for node in self.nodes:
if not node.primary or node.is_deleted:
continue
if node.has_permission_on_children(user, permission):
return True
return False
def has_addon_on_children(self, addon):
"""Checks if a given node has a specific addon on child nodes
that are not registrations or deleted
"""
if self.has_addon(addon):
return True
for node in self.nodes:
if not node.primary or node.is_deleted:
continue
if node.has_addon_on_children(addon):
return True
return False
def get_permissions(self, user):
"""Get list of permissions for user.
:param User user: User to check
:returns: List of permissions
:raises: ValueError if user not found in permissions
"""
return self.permissions.get(user._id, [])
def adjust_permissions(self):
for key in self.permissions.keys():
if key not in self.contributors:
self.permissions.pop(key)
@property
def visible_contributors(self):
return [
User.load(_id)
for _id in self.visible_contributor_ids
]
@property
def parents(self):
if self.parent_node:
return [self.parent_node] + self.parent_node.parents
return []
@property
def admin_contributor_ids(self, contributors=None):
contributor_ids = self.contributors._to_primary_keys()
admin_ids = set()
for parent in self.parents:
admins = [
user for user, perms in parent.permissions.iteritems()
if 'admin' in perms
]
admin_ids.update(set(admins).difference(contributor_ids))
return admin_ids
@property
def admin_contributors(self):
return sorted(
[User.load(_id) for _id in self.admin_contributor_ids],
key=lambda user: user.family_name,
)
def get_visible(self, user):
if not self.is_contributor(user):
raise ValueError(u'User {0} not in contributors'.format(user))
return user._id in self.visible_contributor_ids
def update_visible_ids(self, save=False):
"""Update the order of `visible_contributor_ids`. Updating on making
a contributor visible is more efficient than recomputing order on
accessing `visible_contributors`.
"""
self.visible_contributor_ids = [
contributor._id
for contributor in self.contributors
if contributor._id in self.visible_contributor_ids
]
if save:
self.save()
def set_visible(self, user, visible, log=True, auth=None, save=False):
if not self.is_contributor(user):
raise ValueError(u'User {0} not in contributors'.format(user))
if visible and user._id not in self.visible_contributor_ids:
self.visible_contributor_ids.append(user._id)
self.update_visible_ids(save=False)
elif not visible and user._id in self.visible_contributor_ids:
if len(self.visible_contributor_ids) == 1:
raise ValueError('Must have at least one visible contributor')
self.visible_contributor_ids.remove(user._id)
else:
return
message = (
NodeLog.MADE_CONTRIBUTOR_VISIBLE
if visible
else NodeLog.MADE_CONTRIBUTOR_INVISIBLE
)
if log:
self.add_log(
message,
params={
'parent': self.parent_id,
'node': self._id,
'contributors': [user._id],
},
auth=auth,
save=False,
)
if save:
self.save()
def can_comment(self, auth):
if self.comment_level == 'public':
return auth.logged_in and (
self.is_public or
(auth.user and self.has_permission(auth.user, 'read'))
)
return self.is_contributor(auth.user)
def update(self, fields, auth=None, save=True):
if self.is_registration:
raise NodeUpdateError(reason="Registered content cannot be updated")
values = {}
for key, value in fields.iteritems():
if key not in self.WRITABLE_WHITELIST:
continue
with warnings.catch_warnings():
try:
# This is in place because historically projects and components
# live on different ElasticSearch indexes, and at the time of Node.save
# there is no reliable way to check what the old Node.category
# value was. When the cateogory changes it is possible to have duplicate/dead
# search entries, so always delete the ES doc on categoryt change
# TODO: consolidate Node indexes into a single index, refactor search
if key == 'category':
self.delete_search_entry()
###############
values[key] = {
'old': getattr(self, key),
'new': value,
}
setattr(self, key, value)
except AttributeError:
raise NodeUpdateError(reason="Invalid value for attribute '{0}'".format(key), key=key)
except warnings.Warning:
raise NodeUpdateError(reason="Attribute '{0}' doesn't exist on the Node class".format(key), key=key)
if save:
updated = self.save()
else:
updated = []
for key in values:
values[key]['new'] = getattr(self, key)
self.add_log(NodeLog.UPDATED_FIELDS,
params={
'node': self._id,
'updated_fields': {
key: {
'old': values[key]['old'],
'new': values[key]['new']
}
for key in values
}
},
auth=auth)
return updated
def save(self, *args, **kwargs):
update_piwik = kwargs.pop('update_piwik', True)
self.adjust_permissions()
first_save = not self._is_loaded
if first_save and self.is_dashboard:
existing_dashboards = self.creator.node__contributed.find(
Q('is_dashboard', 'eq', True)
)
if existing_dashboards.count() > 0:
raise NodeStateError("Only one dashboard allowed per user.")
is_original = not self.is_registration and not self.is_fork
if 'suppress_log' in kwargs.keys():
suppress_log = kwargs['suppress_log']
del kwargs['suppress_log']
else:
suppress_log = False
saved_fields = super(Node, self).save(*args, **kwargs)
if first_save and is_original and not suppress_log:
# TODO: This logic also exists in self.use_as_template()
for addon in settings.ADDONS_AVAILABLE:
if 'node' in addon.added_default:
self.add_addon(addon.short_name, auth=None, log=False)
# Define log fields for non-component project
log_action = NodeLog.PROJECT_CREATED
log_params = {
'node': self._primary_key,
}
if getattr(self, 'parent', None):
# Append log to parent
self.parent.nodes.append(self)
self.parent.save()
log_params.update({'parent_node': self.parent._primary_key})
# Add log with appropriate fields
self.add_log(
log_action,
params=log_params,
auth=Auth(user=self.creator),
log_date=self.date_created,
save=True,
)
# Only update Solr if at least one stored field has changed, and if
# public or privacy setting has changed
need_update = bool(self.SOLR_UPDATE_FIELDS.intersection(saved_fields))
if not self.is_public:
if first_save or 'is_public' not in saved_fields:
need_update = False
if self.is_folder or self.archiving:
need_update = False
if need_update:
self.update_search()
# This method checks what has changed.
if settings.PIWIK_HOST and update_piwik:
piwik_tasks.update_node(self._id, saved_fields)
# Return expected value for StoredObject::save
return saved_fields
######################################
# Methods that return a new instance #
######################################
def use_as_template(self, auth, changes=None, top_level=True):
"""Create a new project, using an existing project as a template.
:param auth: The user to be assigned as creator
:param changes: A dictionary of changes, keyed by node id, which
override the attributes of the template project or its
children.
:return: The `Node` instance created.
"""
changes = changes or dict()
# build the dict of attributes to change for the new node
try:
attributes = changes[self._id]
# TODO: explicitly define attributes which may be changed.
except (AttributeError, KeyError):
attributes = dict()
new = self.clone()
# clear permissions, which are not cleared by the clone method
new.permissions = {}
new.visible_contributor_ids = []
# Clear quasi-foreign fields
new.wiki_pages_current = {}
new.wiki_pages_versions = {}
new.wiki_private_uuids = {}
new.file_guid_to_share_uuids = {}
# set attributes which may be overridden by `changes`
new.is_public = False
new.description = None
# apply `changes`
for attr, val in attributes.iteritems():
setattr(new, attr, val)
# set attributes which may NOT be overridden by `changes`
new.creator = auth.user
new.template_node = self
new.add_contributor(contributor=auth.user, permissions=CREATOR_PERMISSIONS, log=False, save=False)
new.is_fork = False
new.is_registration = False
new.piwik_site_id = None
# If that title hasn't been changed, apply the default prefix (once)
if (new.title == self.title
and top_level
and language.TEMPLATED_FROM_PREFIX not in new.title):
new.title = ''.join((language.TEMPLATED_FROM_PREFIX, new.title, ))
# Slight hack - date_created is a read-only field.
new._fields['date_created'].__set__(
new,
datetime.datetime.utcnow(),
safe=True
)
new.save(suppress_log=True)
# Log the creation
new.add_log(
NodeLog.CREATED_FROM,
params={
'node': new._primary_key,
'template_node': {
'id': self._primary_key,
'url': self.url,
},
},
auth=auth,
log_date=new.date_created,
save=False,
)
# add mandatory addons
# TODO: This logic also exists in self.save()
for addon in settings.ADDONS_AVAILABLE:
if 'node' in addon.added_default:
new.add_addon(addon.short_name, auth=None, log=False)
# deal with the children of the node, if any
new.nodes = [
x.use_as_template(auth, changes, top_level=False)
for x in self.nodes
if x.can_view(auth)
]
new.save()
return new
############
# Pointers #
############
def add_pointer(self, node, auth, save=True):
"""Add a pointer to a node.
:param Node node: Node to add
:param Auth auth: Consolidated authorization
:param bool save: Save changes
:return: Created pointer
"""
# Fail if node already in nodes / pointers. Note: cast node and node
# to primary keys to test for conflicts with both nodes and pointers
# contained in `self.nodes`.
if node._id in self.node_ids:
raise ValueError(
'Pointer to node {0} already in list'.format(node._id)
)
# If a folder, prevent more than one pointer to that folder. This will prevent infinite loops on the Dashboard.
# Also, no pointers to the dashboard project, which could cause loops as well.
already_pointed = node.pointed
if node.is_folder and len(already_pointed) > 0:
raise ValueError(
'Pointer to folder {0} already exists. Only one pointer to any given folder allowed'.format(node._id)
)
if node.is_dashboard:
raise ValueError(
'Pointer to dashboard ({0}) not allowed.'.format(node._id)
)
# Append pointer
pointer = Pointer(node=node)
pointer.save()
self.nodes.append(pointer)
# Add log
self.add_log(
action=NodeLog.POINTER_CREATED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
# Optionally save changes
if save:
self.save()
return pointer
def rm_pointer(self, pointer, auth):
"""Remove a pointer.
:param Pointer pointer: Pointer to remove
:param Auth auth: Consolidated authorization
"""
if pointer not in self.nodes:
raise ValueError('Node link does not belong to the requested node.')
# Remove `Pointer` object; will also remove self from `nodes` list of
# parent node
Pointer.remove_one(pointer)
# Add log
self.add_log(
action=NodeLog.POINTER_REMOVED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
@property
def node_ids(self):
return [
node._id if node.primary else node.node._id
for node in self.nodes
]
@property
def nodes_primary(self):
return [
node
for node in self.nodes
if node.primary
]
def node_and_primary_descendants(self):
"""Return an iterator for a node and all of its primary (non-pointer) descendants.
:param node Node: target Node
"""
return itertools.chain([self], self.get_descendants_recursive(lambda n: n.primary))
@property
def depth(self):
return len(self.parents)
def next_descendants(self, auth, condition=lambda auth, node: True):
"""
Recursively find the first set of descedants under a given node that meet a given condition
returns a list of [(node, [children]), ...]
"""
ret = []
for node in self.nodes:
if condition(auth, node):
# base case
ret.append((node, []))
else:
ret.append((node, node.next_descendants(auth, condition)))
ret = [item for item in ret if item[1] or condition(auth, item[0])] # prune empty branches
return ret
def get_descendants_recursive(self, include=lambda n: True):
for node in self.nodes:
if include(node):
yield node
if node.primary:
for descendant in node.get_descendants_recursive(include):
if include(descendant):
yield descendant
def get_aggregate_logs_queryset(self, auth):
ids = [self._id] + [n._id
for n in self.get_descendants_recursive()
if n.can_view(auth)]
query = Q('__backrefs.logged.node.logs', 'in', ids)
return NodeLog.find(query).sort('-_id')
@property
def nodes_pointer(self):
return [
node
for node in self.nodes
if not node.primary
]
@property
def has_pointers_recursive(self):
"""Recursively checks whether the current node or any of its nodes
contains a pointer.
"""
if self.nodes_pointer:
return True
for node in self.nodes_primary:
if node.has_pointers_recursive:
return True
return False
@property
def pointed(self):
return getattr(self, '_pointed', [])
def pointing_at(self, pointed_node_id):
"""This node is pointed at another node.
:param Node pointed_node_id: The node id of the node being pointed at.
:return: pointer_id
"""
for pointer in self.nodes_pointer:
node_id = pointer.node._id
if node_id == pointed_node_id:
return pointer._id
return None
def get_points(self, folders=False, deleted=False, resolve=True):
ret = []
for each in self.pointed:
pointer_node = get_pointer_parent(each)
if not folders and pointer_node.is_folder:
continue
if not deleted and pointer_node.is_deleted:
continue
if resolve:
ret.append(pointer_node)
else:
ret.append(each)
return ret
def resolve(self):
return self
def fork_pointer(self, pointer, auth, save=True):
"""Replace a pointer with a fork. If the pointer points to a project,
fork the project and replace the pointer with a new pointer pointing
to the fork. If the pointer points to a component, fork the component
and add it to the current node.
:param Pointer pointer:
:param Auth auth:
:param bool save:
:return: Forked node
"""
# Fail if pointer not contained in `nodes`
try:
index = self.nodes.index(pointer)
except ValueError:
raise ValueError('Pointer {0} not in list'.format(pointer._id))
# Get pointed node
node = pointer.node
# Fork into current node and replace pointer with forked component
forked = node.fork_node(auth)
if forked is None:
raise ValueError('Could not fork node')
self.nodes[index] = forked
# Add log
self.add_log(
NodeLog.POINTER_FORKED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
# Optionally save changes
if save:
self.save()
# Garbage-collect pointer. Note: Must save current node before
# removing pointer, else remove will fail when trying to remove
# backref from self to pointer.
Pointer.remove_one(pointer)
# Return forked content
return forked
def get_recent_logs(self, n=10):
"""Return a list of the n most recent logs, in reverse chronological
order.
:param int n: Number of logs to retrieve
"""
return list(reversed(self.logs)[:n])
@property
def date_modified(self):
'''The most recent datetime when this node was modified, based on
the logs.
'''
try:
return self.logs[-1].date
except IndexError:
return self.date_created
def set_title(self, title, auth, save=False):
"""Set the title of this Node and log it.
:param str title: The new title.
:param auth: All the auth information including user, API key.
"""
#Called so validation does not have to wait until save.
validate_title(title)
original_title = self.title
self.title = title
self.add_log(
action=NodeLog.EDITED_TITLE,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'title_new': self.title,
'title_original': original_title,
},
auth=auth,
save=False,
)
if save:
self.save()
return None
def set_description(self, description, auth, save=False):
"""Set the description and log the event.
:param str description: The new description
:param auth: All the auth informtion including user, API key.
:param bool save: Save self after updating.
"""
original = self.description
self.description = description
self.add_log(
action=NodeLog.EDITED_DESCRIPTION,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'description_new': self.description,
'description_original': original
},
auth=auth,
save=False,
)
if save:
self.save()
return None
def update_search(self):
from website import search
try:
search.search.update_node(self)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
log_exception()
def delete_search_entry(self):
from website import search
try:
search.search.delete_node(self)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
log_exception()
def delete_registration_tree(self, save=False):
self.is_deleted = True
if not getattr(self.embargo, 'for_existing_registration', False):
self.registered_from = None
if save:
self.save()
self.update_search()
for child in self.nodes_primary:
child.delete_registration_tree(save=save)
def remove_node(self, auth, date=None):
"""Marks a node as deleted.
TODO: Call a hook on addons
Adds a log to the parent node if applicable
:param auth: an instance of :class:`Auth`.
:param date: Date node was removed
:type date: `datetime.datetime` or `None`
"""
# TODO: rename "date" param - it's shadowing a global
if self.is_dashboard:
raise NodeStateError("Dashboards may not be deleted.")
if not self.can_edit(auth):
raise PermissionsError('{0!r} does not have permission to modify this {1}'.format(auth.user, self.category or 'node'))
#if this is a folder, remove all the folders that this is pointing at.
if self.is_folder:
for pointed in self.nodes_pointer:
if pointed.node.is_folder:
pointed.node.remove_node(auth=auth)
if [x for x in self.nodes_primary if not x.is_deleted]:
raise NodeStateError("Any child components must be deleted prior to deleting this project.")
# After delete callback
for addon in self.get_addons():
message = addon.after_delete(self, auth.user)
if message:
status.push_status_message(message, kind='info', trust=False)
log_date = date or datetime.datetime.utcnow()
# Add log to parent
if self.node__parent:
self.node__parent[0].add_log(
NodeLog.NODE_REMOVED,
params={
'project': self._primary_key,
},
auth=auth,
log_date=log_date,
save=True,
)
else:
self.add_log(
NodeLog.PROJECT_DELETED,
params={
'project': self._primary_key,
},
auth=auth,
log_date=log_date,
save=True,
)
self.is_deleted = True
self.deleted_date = date
self.save()
auth_signals.node_deleted.send(self)
return True
def fork_node(self, auth, title='Fork of '):
"""Recursively fork a node.
:param Auth auth: Consolidated authorization
:param str title: Optional text to prepend to forked title
:return: Forked node
"""
user = auth.user
# Non-contributors can't fork private nodes
if not (self.is_public or self.has_permission(user, 'read')):
raise PermissionsError('{0!r} does not have permission to fork node {1!r}'.format(user, self._id))
when = datetime.datetime.utcnow()
original = self.load(self._primary_key)
if original.is_deleted:
raise NodeStateError('Cannot fork deleted node.')
# Note: Cloning a node copies its `wiki_pages_current` and
# `wiki_pages_versions` fields, but does not clone the underlying
# database objects to which these dictionaries refer. This means that
# the cloned node must pass itself to its wiki objects to build the
# correct URLs to that content.
forked = original.clone()
forked.logs = self.logs
forked.tags = self.tags
# Recursively fork child nodes
for node_contained in original.nodes:
if not node_contained.is_deleted:
forked_node = None
try: # Catch the potential PermissionsError above
forked_node = node_contained.fork_node(auth=auth, title='')
except PermissionsError:
pass # If this exception is thrown omit the node from the result set
if forked_node is not None:
forked.nodes.append(forked_node)
forked.title = title + forked.title
forked.is_fork = True
forked.is_registration = False
forked.forked_date = when
forked.forked_from = original
forked.creator = user
forked.piwik_site_id = None
# Forks default to private status
forked.is_public = False
# Clear permissions before adding users
forked.permissions = {}
forked.visible_contributor_ids = []
forked.add_contributor(
contributor=user,
permissions=CREATOR_PERMISSIONS,
log=False,
save=False
)
forked.add_log(
action=NodeLog.NODE_FORKED,
params={
'parent_node': original.parent_id,
'node': original._primary_key,
'registration': forked._primary_key,
},
auth=auth,
log_date=when,
save=False,
)
forked.save()
# After fork callback
for addon in original.get_addons():
_, message = addon.after_fork(original, forked, user)
if message:
status.push_status_message(message, kind='info', trust=True)
return forked
def register_node(self, schema, auth, template, data, parent=None):
"""Make a frozen copy of a node.
:param schema: Schema object
:param auth: All the auth information including user, API key.
:param template: Template name
:param data: Form data
:param parent Node: parent registration of registration to be created
"""
# NOTE: Admins can register child nodes even if they don't have write access them
if not self.can_edit(auth=auth) and not self.is_admin_parent(user=auth.user):
raise PermissionsError(
'User {} does not have permission '
'to register this node'.format(auth.user._id)
)
if self.is_folder:
raise NodeStateError("Folders may not be registered")
template = urllib.unquote_plus(template)
template = to_mongo(template)
when = datetime.datetime.utcnow()
original = self.load(self._primary_key)
# Note: Cloning a node copies its `wiki_pages_current` and
# `wiki_pages_versions` fields, but does not clone the underlying
# database objects to which these dictionaries refer. This means that
# the cloned node must pass itself to its wiki objects to build the
# correct URLs to that content.
if original.is_deleted:
raise NodeStateError('Cannot register deleted node.')
registered = original.clone()
registered.is_registration = True
registered.registered_date = when
registered.registered_user = auth.user
registered.registered_schema = schema
registered.registered_from = original
if not registered.registered_meta:
registered.registered_meta = {}
registered.registered_meta[template] = data
registered.contributors = self.contributors
registered.forked_from = self.forked_from
registered.creator = self.creator
registered.logs = self.logs
registered.tags = self.tags
registered.piwik_site_id = None
registered.save()
if parent:
registered.parent_node = parent
# After register callback
for addon in original.get_addons():
_, message = addon.after_register(original, registered, auth.user)
if message:
status.push_status_message(message, kind='info', trust=False)
for node_contained in original.nodes:
if not node_contained.is_deleted:
child_registration = node_contained.register_node(
schema, auth, template, data, parent=registered
)
if child_registration and not child_registration.primary:
registered.nodes.append(child_registration)
registered.save()
if settings.ENABLE_ARCHIVER:
project_signals.after_create_registration.send(self, dst=registered, user=auth.user)
return registered
def remove_tag(self, tag, auth, save=True):
if tag in self.tags:
self.tags.remove(tag)
self.add_log(
action=NodeLog.TAG_REMOVED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'tag': tag,
},
auth=auth,
save=False,
)
if save:
self.save()
def add_tag(self, tag, auth, save=True):
if tag not in self.tags:
new_tag = Tag.load(tag)
if not new_tag:
new_tag = Tag(_id=tag)
new_tag.save()
self.tags.append(new_tag)
self.add_log(
action=NodeLog.TAG_ADDED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'tag': tag,
},
auth=auth,
save=False,
)
if save:
self.save()
def add_log(self, action, params, auth, foreign_user=None, log_date=None, save=True):
user = auth.user if auth else None
params['node'] = params.get('node') or params.get('project')
log = NodeLog(
action=action,
user=user,
foreign_user=foreign_user,
params=params,
)
if log_date:
log.date = log_date
log.save()
self.logs.append(log)
if save:
self.save()
if user:
increment_user_activity_counters(user._primary_key, action, log.date)
return log
@property
def url(self):
return '/{}/'.format(self._primary_key)
def web_url_for(self, view_name, _absolute=False, _guid=False, *args, **kwargs):
return web_url_for(view_name, pid=self._primary_key, _absolute=_absolute, _guid=_guid, *args, **kwargs)
def api_url_for(self, view_name, _absolute=False, *args, **kwargs):
return api_url_for(view_name, pid=self._primary_key, _absolute=_absolute, *args, **kwargs)
@property
def absolute_url(self):
if not self.url:
logger.error('Node {0} has a parent that is not a project'.format(self._id))
return None
return urlparse.urljoin(settings.DOMAIN, self.url)
@property
def display_absolute_url(self):
url = self.absolute_url
if url is not None:
return re.sub(r'https?:', '', url).strip('/')
@property
def api_v2_url(self):
return reverse('nodes:node-detail', kwargs={'node_id': self._id})
@property
def absolute_api_v2_url(self):
return absolute_reverse('nodes:node-detail', kwargs={'node_id': self._id})
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
@property
def api_url(self):
if not self.url:
logger.error('Node {0} has a parent that is not a project'.format(self._id))
return None
return '/api/v1{0}'.format(self.deep_url)
@property
def deep_url(self):
return '/project/{}/'.format(self._primary_key)
@property
def csl(self): # formats node information into CSL format for citation parsing
"""a dict in CSL-JSON schema
For details on this schema, see:
https://github.com/citation-style-language/schema#csl-json-schema
"""
csl = {
'id': self._id,
'title': sanitize.unescape_entities(self.title),
'author': [
contributor.csl_name # method in auth/model.py which parses the names of authors
for contributor in self.visible_contributors
],
'publisher': 'Open Science Framework',
'type': 'webpage',
'URL': self.display_absolute_url,
}
doi = self.get_identifier_value('doi')
if doi:
csl['DOI'] = doi
if self.logs:
csl['issued'] = datetime_to_csl(self.logs[-1].date)
return csl
def author_list(self, and_delim='&'):
author_names = [
author.biblio_name
for author in self.visible_contributors
if author
]
if len(author_names) < 2:
return ' {0} '.format(and_delim).join(author_names)
if len(author_names) > 7:
author_names = author_names[:7]
author_names.append('et al.')
return ', '.join(author_names)
return u'{0}, {1} {2}'.format(
', '.join(author_names[:-1]),
and_delim,
author_names[-1]
)
@property
def templated_list(self):
return [
x
for x in self.node__template_node
if not x.is_deleted
]
@property
def parent_node(self):
"""The parent node, if it exists, otherwise ``None``. Note: this
property is named `parent_node` rather than `parent` to avoid a
conflict with the `parent` back-reference created by the `nodes`
field on this schema.
"""
try:
if not self.node__parent[0].is_deleted:
return self.node__parent[0]
except IndexError:
pass
return None
@parent_node.setter
def parent_node(self, parent):
parent.nodes.append(self)
parent.save()
@property
def root(self):
if self.parent_node:
return self.parent_node.root
else:
return self
@property
def archiving(self):
job = self.archive_job
return job and not job.done and not job.archive_tree_finished()
@property
def archive_job(self):
return self.archivejob__active[0] if self.archivejob__active else None
@property
def registrations(self):
return self.node__registrations.find(Q('archiving', 'eq', False))
@property
def watch_url(self):
return os.path.join(self.api_url, "watch/")
@property
def parent_id(self):
if self.node__parent:
return self.node__parent[0]._primary_key
return None
@property
def project_or_component(self):
return 'project' if self.category == 'project' else 'component'
def is_contributor(self, user):
return (
user is not None
and (
user._id in self.contributors
)
)
def add_addon(self, addon_name, auth, log=True, *args, **kwargs):
"""Add an add-on to the node. Do nothing if the addon is already
enabled.
:param str addon_name: Name of add-on
:param Auth auth: Consolidated authorization object
:param bool log: Add a log after adding the add-on
:return: A boolean, whether the addon was added
"""
ret = AddonModelMixin.add_addon(self, addon_name, auth=auth,
*args, **kwargs)
if ret and log:
config = settings.ADDONS_AVAILABLE_DICT[addon_name]
self.add_log(
action=NodeLog.ADDON_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'addon': config.full_name,
},
auth=auth,
save=False,
)
self.save() # TODO: here, or outside the conditional? @mambocab
return ret
def delete_addon(self, addon_name, auth, _force=False):
"""Delete an add-on from the node.
:param str addon_name: Name of add-on
:param Auth auth: Consolidated authorization object
:param bool _force: For migration testing ONLY. Do not set to True
in the application, or else projects will be allowed to delete
mandatory add-ons!
:return bool: Add-on was deleted
"""
ret = super(Node, self).delete_addon(addon_name, auth, _force)
if ret:
config = settings.ADDONS_AVAILABLE_DICT[addon_name]
self.add_log(
action=NodeLog.ADDON_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'addon': config.full_name,
},
auth=auth,
save=False,
)
self.save()
# TODO: save here or outside the conditional? @mambocab
return ret
def callback(self, callback, recursive=False, *args, **kwargs):
"""Invoke callbacks of attached add-ons and collect messages.
:param str callback: Name of callback method to invoke
:param bool recursive: Apply callback recursively over nodes
:return list: List of callback messages
"""
messages = []
for addon in self.get_addons():
method = getattr(addon, callback)
message = method(self, *args, **kwargs)
if message:
messages.append(message)
if recursive:
for child in self.nodes:
if not child.is_deleted:
messages.extend(
child.callback(
callback, recursive, *args, **kwargs
)
)
return messages
def replace_contributor(self, old, new):
for i, contrib in enumerate(self.contributors):
if contrib._primary_key == old._primary_key:
self.contributors[i] = new
# Remove unclaimed record for the project
if self._primary_key in old.unclaimed_records:
del old.unclaimed_records[self._primary_key]
old.save()
for permission in self.get_permissions(old):
self.add_permission(new, permission)
self.permissions.pop(old._id)
if old._id in self.visible_contributor_ids:
self.visible_contributor_ids[self.visible_contributor_ids.index(old._id)] = new._id
return True
return False
def remove_contributor(self, contributor, auth, log=True):
"""Remove a contributor from this node.
:param contributor: User object, the contributor to be removed
:param auth: All the auth information including user, API key.
"""
# remove unclaimed record if necessary
if self._primary_key in contributor.unclaimed_records:
del contributor.unclaimed_records[self._primary_key]
self.contributors.remove(contributor._id)
self.clear_permission(contributor)
if contributor._id in self.visible_contributor_ids:
self.visible_contributor_ids.remove(contributor._id)
if not self.visible_contributor_ids:
return False
# Node must have at least one registered admin user
# TODO: Move to validator or helper
admins = [
user for user in self.contributors
if self.has_permission(user, 'admin')
and user.is_registered
]
if not admins:
return False
# Clear permissions for removed user
self.permissions.pop(contributor._id, None)
# After remove callback
for addon in self.get_addons():
message = addon.after_remove_contributor(self, contributor, auth)
if message:
status.push_status_message(message, kind='info', trust=True)
if log:
self.add_log(
action=NodeLog.CONTRIB_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributor': contributor._id,
},
auth=auth,
save=False,
)
self.save()
#send signal to remove this user from project subscriptions
auth_signals.contributor_removed.send(contributor, node=self)
return True
def remove_contributors(self, contributors, auth=None, log=True, save=False):
results = []
removed = []
for contrib in contributors:
outcome = self.remove_contributor(
contributor=contrib, auth=auth, log=False,
)
results.append(outcome)
removed.append(contrib._id)
if log:
self.add_log(
action=NodeLog.CONTRIB_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': removed,
},
auth=auth,
save=False,
)
if save:
self.save()
if False in results:
return False
return True
def manage_contributors(self, user_dicts, auth, save=False):
"""Reorder and remove contributors.
:param list user_dicts: Ordered list of contributors represented as
dictionaries of the form:
{'id': <id>, 'permission': <One of 'read', 'write', 'admin'>, 'visible': bool}
:param Auth auth: Consolidated authentication information
:param bool save: Save changes
:raises: ValueError if any users in `users` not in contributors or if
no admin contributors remaining
"""
with TokuTransaction():
users = []
user_ids = []
permissions_changed = {}
visibility_removed = []
to_retain = []
to_remove = []
for user_dict in user_dicts:
user = User.load(user_dict['id'])
if user is None:
raise ValueError('User not found')
if user not in self.contributors:
raise ValueError(
'User {0} not in contributors'.format(user.fullname)
)
permissions = expand_permissions(user_dict['permission'])
if set(permissions) != set(self.get_permissions(user)):
self.set_permissions(user, permissions, save=False)
permissions_changed[user._id] = permissions
# visible must be added before removed to ensure they are validated properly
if user_dict['visible']:
self.set_visible(user,
visible=True,
auth=auth)
else:
visibility_removed.append(user)
users.append(user)
user_ids.append(user_dict['id'])
for user in visibility_removed:
self.set_visible(user,
visible=False,
auth=auth)
for user in self.contributors:
if user._id in user_ids:
to_retain.append(user)
else:
to_remove.append(user)
# TODO: Move to validator or helper @jmcarp
admins = [
user for user in users
if self.has_permission(user, 'admin')
and user.is_registered
]
if users is None or not admins:
raise ValueError(
'Must have at least one registered admin contributor'
)
if to_retain != users:
self.add_log(
action=NodeLog.CONTRIB_REORDERED,
params={
'project': self.parent_id,
'node': self._id,
'contributors': [
user._id
for user in users
],
},
auth=auth,
save=False,
)
if to_remove:
self.remove_contributors(to_remove, auth=auth, save=False)
self.contributors = users
if permissions_changed:
self.add_log(
action=NodeLog.PERMISSIONS_UPDATED,
params={
'project': self.parent_id,
'node': self._id,
'contributors': permissions_changed,
},
auth=auth,
save=False,
)
# Update list of visible IDs
self.update_visible_ids()
if save:
self.save()
with TokuTransaction():
if to_remove or permissions_changed and ['read'] in permissions_changed.values():
project_signals.write_permissions_revoked.send(self)
def add_contributor(self, contributor, permissions=None, visible=True,
auth=None, log=True, save=False):
"""Add a contributor to the project.
:param User contributor: The contributor to be added
:param list permissions: Permissions to grant to the contributor
:param bool visible: Contributor is visible in project dashboard
:param Auth auth: All the auth information including user, API key
:param bool log: Add log to self
:param bool save: Save after adding contributor
:returns: Whether contributor was added
"""
MAX_RECENT_LENGTH = 15
# If user is merged into another account, use master account
contrib_to_add = contributor.merged_by if contributor.is_merged else contributor
if contrib_to_add not in self.contributors:
self.contributors.append(contrib_to_add)
if visible:
self.set_visible(contrib_to_add, visible=True, log=False)
# Add default contributor permissions
permissions = permissions or DEFAULT_CONTRIBUTOR_PERMISSIONS
for permission in permissions:
self.add_permission(contrib_to_add, permission, save=False)
# Add contributor to recently added list for user
if auth is not None:
user = auth.user
if contrib_to_add in user.recently_added:
user.recently_added.remove(contrib_to_add)
user.recently_added.insert(0, contrib_to_add)
while len(user.recently_added) > MAX_RECENT_LENGTH:
user.recently_added.pop()
if log:
self.add_log(
action=NodeLog.CONTRIB_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': [contrib_to_add._primary_key],
},
auth=auth,
save=False,
)
if save:
self.save()
project_signals.contributor_added.send(self, contributor=contributor)
return True
#Permissions must be overridden if changed when contributor is added to parent he/she is already on a child of.
elif contrib_to_add in self.contributors and permissions is not None:
self.set_permissions(contrib_to_add, permissions)
if save:
self.save()
return False
else:
return False
def add_contributors(self, contributors, auth=None, log=True, save=False):
"""Add multiple contributors
:param list contributors: A list of dictionaries of the form:
{
'user': <User object>,
'permissions': <Permissions list, e.g. ['read', 'write']>,
'visible': <Boolean indicating whether or not user is a bibliographic contributor>
}
:param auth: All the auth information including user, API key.
:param log: Add log to self
:param save: Save after adding contributor
"""
for contrib in contributors:
self.add_contributor(
contributor=contrib['user'], permissions=contrib['permissions'],
visible=contrib['visible'], auth=auth, log=False, save=False,
)
if log and contributors:
self.add_log(
action=NodeLog.CONTRIB_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': [
contrib['user']._id
for contrib in contributors
],
},
auth=auth,
save=False,
)
if save:
self.save()
def add_unregistered_contributor(self, fullname, email, auth,
permissions=None, save=False):
"""Add a non-registered contributor to the project.
:param str fullname: The full name of the person.
:param str email: The email address of the person.
:param Auth auth: Auth object for the user adding the contributor.
:returns: The added contributor
:raises: DuplicateEmailError if user with given email is already in the database.
"""
# Create a new user record
contributor = User.create_unregistered(fullname=fullname, email=email)
contributor.add_unclaimed_record(node=self, referrer=auth.user,
given_name=fullname, email=email)
try:
contributor.save()
except ValidationValueError: # User with same email already exists
contributor = get_user(email=email)
# Unregistered users may have multiple unclaimed records, so
# only raise error if user is registered.
if contributor.is_registered or self.is_contributor(contributor):
raise
contributor.add_unclaimed_record(node=self, referrer=auth.user,
given_name=fullname, email=email)
contributor.save()
self.add_contributor(
contributor, permissions=permissions, auth=auth,
log=True, save=False,
)
self.save()
return contributor
def set_privacy(self, permissions, auth=None, log=True, save=True):
"""Set the permissions for this node.
:param permissions: A string, either 'public' or 'private'
:param auth: All the auth information including user, API key.
:param bool log: Whether to add a NodeLog for the privacy change.
"""
if permissions == 'public' and not self.is_public:
if self.is_registration:
if self.is_pending_embargo:
raise NodeStateError("A registration with an unapproved embargo cannot be made public.")
if self.embargo_end_date and not self.is_pending_embargo:
self.embargo.state = Embargo.REJECTED
self.embargo.save()
self.is_public = True
elif permissions == 'private' and self.is_public:
if self.is_registration and not self.is_pending_embargo:
raise NodeStateError("Public registrations must be retracted, not made private.")
else:
self.is_public = False
else:
return False
# After set permissions callback
for addon in self.get_addons():
message = addon.after_set_privacy(self, permissions)
if message:
status.push_status_message(message, kind='info', trust=False)
if log:
action = NodeLog.MADE_PUBLIC if permissions == 'public' else NodeLog.MADE_PRIVATE
self.add_log(
action=action,
params={
'project': self.parent_id,
'node': self._primary_key,
},
auth=auth,
save=False,
)
if save:
self.save()
return True
def admin_public_wiki(self, user):
return (
self.has_addon('wiki') and
self.has_permission(user, 'admin') and
self.is_public
)
def include_wiki_settings(self, user):
"""Check if node meets requirements to make publicly editable."""
return (
self.admin_public_wiki(user) or
any(
each.admin_public_wiki(user)
for each in self.get_descendants_recursive()
)
)
# TODO: Move to wiki add-on
def get_wiki_page(self, name=None, version=None, id=None):
from website.addons.wiki.model import NodeWikiPage
if name:
name = (name or '').strip()
key = to_mongo_key(name)
try:
if version and (isinstance(version, int) or version.isdigit()):
id = self.wiki_pages_versions[key][int(version) - 1]
elif version == 'previous':
id = self.wiki_pages_versions[key][-2]
elif version == 'current' or version is None:
id = self.wiki_pages_current[key]
else:
return None
except (KeyError, IndexError):
return None
return NodeWikiPage.load(id)
# TODO: Move to wiki add-on
def update_node_wiki(self, name, content, auth):
"""Update the node's wiki page with new content.
:param page: A string, the page's name, e.g. ``"home"``.
:param content: A string, the posted content.
:param auth: All the auth information including user, API key.
"""
from website.addons.wiki.model import NodeWikiPage
name = (name or '').strip()
key = to_mongo_key(name)
if key not in self.wiki_pages_current:
if key in self.wiki_pages_versions:
version = len(self.wiki_pages_versions[key]) + 1
else:
version = 1
else:
current = NodeWikiPage.load(self.wiki_pages_current[key])
current.is_current = False
version = current.version + 1
current.save()
new_page = NodeWikiPage(
page_name=name,
version=version,
user=auth.user,
is_current=True,
node=self,
content=content
)
new_page.save()
# check if the wiki page already exists in versions (existed once and is now deleted)
if key not in self.wiki_pages_versions:
self.wiki_pages_versions[key] = []
self.wiki_pages_versions[key].append(new_page._primary_key)
self.wiki_pages_current[key] = new_page._primary_key
self.add_log(
action=NodeLog.WIKI_UPDATED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': new_page.page_name,
'page_id': new_page._primary_key,
'version': new_page.version,
},
auth=auth,
log_date=new_page.date,
save=False,
)
self.save()
# TODO: Move to wiki add-on
def rename_node_wiki(self, name, new_name, auth):
"""Rename the node's wiki page with new name.
:param name: A string, the page's name, e.g. ``"My Page"``.
:param new_name: A string, the new page's name, e.g. ``"My Renamed Page"``.
:param auth: All the auth information including user, API key.
"""
# TODO: Fix circular imports
from website.addons.wiki.exceptions import (
PageCannotRenameError,
PageConflictError,
PageNotFoundError,
)
name = (name or '').strip()
key = to_mongo_key(name)
new_name = (new_name or '').strip()
new_key = to_mongo_key(new_name)
page = self.get_wiki_page(name)
if key == 'home':
raise PageCannotRenameError('Cannot rename wiki home page')
if not page:
raise PageNotFoundError('Wiki page not found')
if (new_key in self.wiki_pages_current and key != new_key) or new_key == 'home':
raise PageConflictError(
'Page already exists with name {0}'.format(
new_name,
)
)
# rename the page first in case we hit a validation exception.
old_name = page.page_name
page.rename(new_name)
# TODO: merge historical records like update (prevents log breaks)
# transfer the old page versions/current keys to the new name.
if key != new_key:
self.wiki_pages_versions[new_key] = self.wiki_pages_versions[key]
del self.wiki_pages_versions[key]
self.wiki_pages_current[new_key] = self.wiki_pages_current[key]
del self.wiki_pages_current[key]
if key in self.wiki_private_uuids:
self.wiki_private_uuids[new_key] = self.wiki_private_uuids[key]
del self.wiki_private_uuids[key]
self.add_log(
action=NodeLog.WIKI_RENAMED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': page.page_name,
'page_id': page._primary_key,
'old_page': old_name,
'version': page.version,
},
auth=auth,
save=False,
)
self.save()
def delete_node_wiki(self, name, auth):
name = (name or '').strip()
key = to_mongo_key(name)
page = self.get_wiki_page(key)
del self.wiki_pages_current[key]
self.add_log(
action=NodeLog.WIKI_DELETED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': page.page_name,
'page_id': page._primary_key,
},
auth=auth,
save=False,
)
self.save()
def get_stats(self, detailed=False):
if detailed:
raise NotImplementedError(
'Detailed stats exist, but are not yet implemented.'
)
else:
return get_basic_counters('node:%s' % self._primary_key)
# TODO: Deprecate this; it duplicates much of what serialize_project already
# does
def serialize(self, auth=None):
"""Dictionary representation of node that is nested within a NodeLog's
representation.
"""
# TODO: incomplete implementation
return {
'id': str(self._primary_key),
'category': self.category_display,
'node_type': self.project_or_component,
'url': self.url,
# TODO: Titles shouldn't contain escaped HTML in the first place
'title': sanitize.unescape_entities(self.title),
'path': self.path_above(auth),
'api_url': self.api_url,
'is_public': self.is_public,
'is_registration': self.is_registration,
}
def _initiate_retraction(self, user, justification=None):
"""Initiates the retraction process for a registration
:param user: User who initiated the retraction
:param justification: Justification, if given, for retraction
"""
retraction = Retraction(
initiated_by=user,
justification=justification or None, # make empty strings None
state=Retraction.UNAPPROVED
)
retraction.save() # Save retraction so it has a primary key
self.retraction = retraction
self.save() # Set foreign field reference Node.retraction
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
for admin in admins:
retraction.add_authorizer(admin)
retraction.save() # Save retraction approval state
return retraction
def retract_registration(self, user, justification=None, save=True):
"""Retract public registration. Instantiate new Retraction object
and associate it with the respective registration.
"""
if not self.is_registration or (not self.is_public and not (self.embargo_end_date or self.is_pending_embargo)):
raise NodeStateError('Only public or embargoed registrations may be retracted.')
if self.root is not self:
raise NodeStateError('Retraction of non-parent registrations is not permitted.')
retraction = self._initiate_retraction(user, justification)
self.registered_from.add_log(
action=NodeLog.RETRACTION_INITIATED,
params={
'node': self._id,
'retraction_id': retraction._id,
},
auth=Auth(user),
)
self.retraction = retraction
if save:
self.save()
def _is_embargo_date_valid(self, end_date):
today = datetime.datetime.utcnow()
if (end_date - today) >= settings.EMBARGO_END_DATE_MIN:
if (end_date - today) <= settings.EMBARGO_END_DATE_MAX:
return True
return False
def _initiate_embargo(self, user, end_date, for_existing_registration=False):
"""Initiates the retraction process for a registration
:param user: User who initiated the retraction
:param end_date: Date when the registration should be made public
"""
embargo = Embargo(
initiated_by=user,
end_date=datetime.datetime.combine(end_date, datetime.datetime.min.time()),
for_existing_registration=for_existing_registration
)
embargo.save() # Save embargo so it has a primary key
self.embargo = embargo
self.save() # Set foreign field reference Node.embargo
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
for admin in admins:
embargo.add_authorizer(admin)
embargo.save() # Save embargo's approval_state
return embargo
def embargo_registration(self, user, end_date, for_existing_registration=False):
"""Enter registration into an embargo period at end of which, it will
be made public
:param user: User initiating the embargo
:param end_date: Date when the registration should be made public
:raises: NodeStateError if Node is not a registration
:raises: PermissionsError if user is not an admin for the Node
:raises: ValidationValueError if end_date is not within time constraints
"""
if not self.is_registration:
raise NodeStateError('Only registrations may be embargoed')
if not self.has_permission(user, 'admin'):
raise PermissionsError('Only admins may embargo a registration')
if not self._is_embargo_date_valid(end_date):
raise ValidationValueError('Embargo end date must be more than one day in the future')
embargo = self._initiate_embargo(user, end_date, for_existing_registration=for_existing_registration)
self.registered_from.add_log(
action=NodeLog.EMBARGO_INITIATED,
params={
'node': self._id,
'embargo_id': embargo._id,
},
auth=Auth(user),
save=True,
)
if self.is_public:
self.set_privacy('private', Auth(user))
def _initiate_approval(self, user):
end_date = datetime.datetime.now() + settings.REGISTRATION_APPROVAL_TIME
approval = RegistrationApproval(
initiated_by=user,
end_date=end_date,
)
approval.save() # Save approval so it has a primary key
self.registration_approval = approval
self.save() # Set foreign field reference Node.registration_approval
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
for admin in admins:
approval.add_authorizer(admin)
approval.save() # Save approval's approval_state
return approval
def require_approval(self, user):
if not self.is_registration:
raise NodeStateError('Only registrations may be embargoed')
if not self.has_permission(user, 'admin'):
raise PermissionsError('Only admins may embargo a registration')
approval = self._initiate_approval(user)
self.registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_INITIATED,
params={
'node': self._id,
'registration_approval_id': approval._id,
},
auth=Auth(user),
save=True,
)
# TODO make private?
@Node.subscribe('before_save')
def validate_permissions(schema, instance):
"""Ensure that user IDs in `contributors` and `permissions` match.
"""
node = instance
contributor_ids = set([user._id for user in node.contributors])
permission_ids = set(node.permissions.keys())
mismatched_contributors = contributor_ids.difference(permission_ids)
if mismatched_contributors:
raise ValidationValueError(
'Contributors {0} missing from `permissions` on node {1}'.format(
', '.join(mismatched_contributors),
node._id,
)
)
mismatched_permissions = permission_ids.difference(contributor_ids)
if mismatched_permissions:
raise ValidationValueError(
'Permission keys {0} missing from `contributors` on node {1}'.format(
', '.join(mismatched_contributors),
node._id,
)
)
@Node.subscribe('before_save')
def validate_visible_contributors(schema, instance):
"""Ensure that user IDs in `contributors` and `visible_contributor_ids`
match.
"""
node = instance
for user_id in node.visible_contributor_ids:
if user_id not in node.contributors:
raise ValidationValueError(
('User {0} is in `visible_contributor_ids` but not in '
'`contributors` on node {1}').format(
user_id,
node._id,
)
)
class WatchConfig(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
node = fields.ForeignField('Node', backref='watched')
digest = fields.BooleanField(default=False)
immediate = fields.BooleanField(default=False)
def __repr__(self):
return '<WatchConfig(node="{self.node}")>'.format(self=self)
class PrivateLink(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
key = fields.StringField(required=True)
name = fields.StringField()
is_deleted = fields.BooleanField(default=False)
anonymous = fields.BooleanField(default=False)
nodes = fields.ForeignField('node', list=True, backref='shared')
creator = fields.ForeignField('user', backref='created')
@property
def node_ids(self):
node_ids = [node._id for node in self.nodes]
return node_ids
def node_scale(self, node):
# node may be None if previous node's parent is deleted
if node is None or node.parent_id not in self.node_ids:
return -40
else:
offset = 20 if node.parent_node is not None else 0
return offset + self.node_scale(node.parent_node)
def to_json(self):
return {
"id": self._id,
"date_created": iso8601format(self.date_created),
"key": self.key,
"name": self.name,
"creator": {'fullname': self.creator.fullname, 'url': self.creator.profile_url},
"nodes": [{'title': x.title, 'url': x.url, 'scale': str(self.node_scale(x)) + 'px', 'category': x.category}
for x in self.nodes if not x.is_deleted],
"anonymous": self.anonymous
}
class Sanction(StoredObject):
"""Sanction object is a generic way to track approval states"""
abstract = True
UNAPPROVED = 'unapproved'
APPROVED = 'approved'
REJECTED = 'rejected'
DISPLAY_NAME = 'Sanction'
# SHORT_NAME must correspond with the associated foreign field to query against,
# e.g. Node.find_one(Q(sanction.SHORT_NAME, 'eq', sanction))
SHORT_NAME = 'sanction'
APPROVAL_NOT_AUTHORIZED_MESSAGE = 'This user is not authorized to approve this {DISPLAY_NAME}'
APPROVAL_INVALID_TOKEN_MESSAGE = 'Invalid approval token provided for this {DISPLAY_NAME}.'
REJECTION_NOT_AUTHORIZED_MESSAEGE = 'This user is not authorized to reject this {DISPLAY_NAME}'
REJECTION_INVALID_TOKEN_MESSAGE = 'Invalid rejection token provided for this {DISPLAY_NAME}.'
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
initiation_date = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
end_date = fields.DateTimeField(default=None)
# Sanction subclasses must have an initiated_by field
# initiated_by = fields.ForeignField('user', backref='initiated')
# Expanded: Dictionary field mapping admin IDs their approval status and relevant tokens:
# {
# 'b3k97': {
# 'has_approved': False,
# 'approval_token': 'Pew7wj1Puf7DENUPFPnXSwa1rf3xPN',
# 'rejection_token': 'TwozClTFOic2PYxHDStby94bCQMwJy'}
# }
approval_state = fields.DictionaryField()
# One of 'unapproved', 'approved', or 'rejected'
state = fields.StringField(default='unapproved')
def __repr__(self):
return '<Sanction(end_date={self.end_date}) with _id {self._id}>'.format(self=self)
@property
def pending_approval(self):
return self.state == Sanction.UNAPPROVED
@property
def is_approved(self):
return self.state == Sanction.APPROVED
@property
def is_rejected(self):
return self.state == Sanction.REJECTED
def _validate_authorizer(self, user):
return True
def add_authorizer(self, user, approved=False, save=False):
valid = self._validate_authorizer(user)
if valid and user._id not in self.approval_state:
self.approval_state[user._id] = {
'has_approved': approved,
'approval_token': tokens.encode(
{
'user_id': user._id,
'sanction_id': self._id,
'action': 'approve_{}'.format(self.SHORT_NAME)
}
),
'rejection_token': tokens.encode(
{
'user_id': user._id,
'sanction_id': self._id,
'action': 'reject_{}'.format(self.SHORT_NAME)
}
),
}
if save:
self.save()
return True
return False
def remove_authorizer(self, user):
if user._id not in self.approval_state:
return False
del self.approval_state[user._id]
self.save()
return True
def _on_approve(self, user, token):
if all(authorizer['has_approved'] for authorizer in self.approval_state.values()):
self.state = Sanction.APPROVED
self._on_complete(user)
def _on_reject(self, user, token):
"""Early termination of a Sanction"""
raise NotImplementedError('Sanction subclasses must implement an #_on_reject method')
def _on_complete(self, user):
"""When a Sanction has unanimous approval"""
raise NotImplementedError('Sanction subclasses must implement an #_on_complete method')
def approve(self, user, token):
"""Add user to approval list if user is admin and token verifies."""
try:
if self.approval_state[user._id]['approval_token'] != token:
raise InvalidSanctionApprovalToken(self.APPROVAL_INVALID_TOKEN_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
except KeyError:
raise PermissionsError(self.APPROVAL_NOT_AUTHORIZED_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
self.approval_state[user._id]['has_approved'] = True
self._on_approve(user, token)
def reject(self, user, token):
"""Cancels sanction if user is admin and token verifies."""
try:
if self.approval_state[user._id]['rejection_token'] != token:
raise InvalidSanctionRejectionToken(self.REJECTION_INVALID_TOKEN_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
except KeyError:
raise PermissionsError(self.REJECTION_NOT_AUTHORIZED_MESSAEGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
self.state = Sanction.REJECTED
self._on_reject(user, token)
def forcibly_reject(self):
self.state = Sanction.REJECTED
def _notify_authorizer(self, user):
pass
def _notify_non_authorizer(self, user):
pass
def ask(self, group):
for contrib in group:
if contrib._id in self.approval_state:
self._notify_authorizer(contrib)
else:
self._notify_non_authorizer(contrib)
class EmailApprovableSanction(Sanction):
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = None
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = None
VIEW_URL_TEMPLATE = ''
APPROVE_URL_TEMPLATE = ''
REJECT_URL_TEMPLATE = ''
# Store a persistant copy of urls for use when needed outside of a request context.
# This field gets automagically updated whenever models approval_state is modified
# and the model is saved
# {
# 'abcde': {
# 'approve': [APPROVAL_URL],
# 'reject': [REJECT_URL],
# }
# }
stashed_urls = fields.DictionaryField(default=dict)
@staticmethod
def _format_or_empty(template, context):
if context:
return template.format(**context)
return ''
def _view_url(self, user_id):
return self._format_or_empty(self.VIEW_URL_TEMPLATE, self._view_url_context(user_id))
def _view_url_context(self, user_id):
return None
def _approval_url(self, user_id):
return self._format_or_empty(self.APPROVE_URL_TEMPLATE, self._approval_url_context(user_id))
def _approval_url_context(self, user_id):
return None
def _rejection_url(self, user_id):
return self._format_or_empty(self.REJECT_URL_TEMPLATE, self._rejection_url_context(user_id))
def _rejection_url_context(self, user_id):
return None
def _send_approval_request_email(self, user, template, context):
mails.send_mail(
user.username,
template,
user=user,
**context
)
def _email_template_context(self, user, is_authorizer=False):
return {}
def _notify_authorizer(self, authorizer):
context = self._email_template_context(authorizer, is_authorizer=True)
if self.AUTHORIZER_NOTIFY_EMAIL_TEMPLATE:
self._send_approval_request_email(authorizer, self.AUTHORIZER_NOTIFY_EMAIL_TEMPLATE, context)
else:
raise NotImplementedError
def _notify_non_authorizer(self, user):
context = self._email_template_context(user)
if self.NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE:
self._send_approval_request_email(user, self.NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE, context)
else:
raise NotImplementedError
def add_authorizer(self, user, **kwargs):
super(EmailApprovableSanction, self).add_authorizer(user, **kwargs)
self.stashed_urls[user._id] = {
'view': self._view_url(user._id),
'approve': self._approval_url(user._id),
'reject': self._rejection_url(user._id)
}
self.save()
class Embargo(EmailApprovableSanction):
"""Embargo object for registrations waiting to go public."""
COMPLETED = 'completed'
DISPLAY_NAME = 'Embargo'
SHORT_NAME = 'embargo'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='embargoed')
for_existing_registration = fields.BooleanField(default=False)
@property
def is_completed(self):
return self.state == self.COMPLETED
@property
def embargo_end_date(self):
if self.state == self.APPROVED:
return self.end_date
return False
# NOTE(hrybacki): Old, private registrations are grandfathered and do not
# require to be made public or embargoed. This field differentiates them
# from new registrations entering into an embargo field which should not
# show up in any search related fields.
@property
def pending_registration(self):
return not self.for_existing_registration and self.pending_approval
def __repr__(self):
parent_registration = None
try:
parent_registration = Node.find_one(Q('embargo', 'eq', self))
except NoResultsFound:
pass
return ('<Embargo(parent_registration={0}, initiated_by={1}, '
'end_date={2}) with _id {3}>').format(
parent_registration,
self.initiated_by,
self.end_date,
self._id
)
def _view_url_context(self, user_id):
registration = Node.find_one(Q('embargo', 'eq', self))
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
approval_token = self.approval_state.get(user_id, {}).get('approval_token')
if approval_token:
registration = Node.find_one(Q('embargo', 'eq', self))
return {
'node_id': registration._id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
rejection_token = self.approval_state.get(user_id, {}).get('rejection_token')
if rejection_token:
registration = Node.find_one(Q('embargo', 'eq', self))
return {
'node_id': registration._id,
'token': rejection_token,
}
def _email_template_context(self, user, is_authorizer=False, urls=None):
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.EMBARGO_PENDING_TIME.days * 24
registration = Node.find_one(Q('embargo', 'eq', self))
return {
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'approval_link': approval_link,
'project_name': registration.title,
'disapproval_link': disapproval_link,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
'approval_time_span': approval_time_span,
}
else:
return {
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
}
def _validate_authorizer(self, user):
registration = Node.find_one(Q('embargo', 'eq', self))
return registration.has_permission(user, ADMIN)
def _on_reject(self, user, token):
parent_registration = Node.find_one(Q('embargo', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_CANCELLED,
params={
'node': parent_registration._id,
'embargo_id': self._id,
},
auth=Auth(user),
)
# Remove backref to parent project if embargo was for a new registration
if not self.for_existing_registration:
parent_registration.registered_from = None
# Delete parent registration if it was created at the time the embargo was initiated
if not self.for_existing_registration:
parent_registration.is_deleted = True
parent_registration.save()
def disapprove_embargo(self, user, token):
"""Cancels retraction if user is admin and token verifies."""
self.reject(user, token)
def _on_complete(self, user):
parent_registration = Node.find_one(Q('embargo', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_APPROVED,
params={
'node': parent_registration._id,
'embargo_id': self._id,
},
auth=Auth(self.initiated_by),
)
self.state == self.COMPLETED
self.save()
def approve_embargo(self, user, token):
"""Add user to approval list if user is admin and token verifies."""
self.approve(user, token)
class Retraction(EmailApprovableSanction):
"""Retraction object for public registrations."""
DISPLAY_NAME = 'Retraction'
SHORT_NAME = 'retraction'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_RETRACTION_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_RETRACTION_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='initiated')
justification = fields.StringField(default=None, validate=MaxLengthValidator(2048))
def __repr__(self):
parent_registration = None
try:
parent_registration = Node.find_one(Q('retraction', 'eq', self))
except NoResultsFound:
pass
return ('<Retraction(parent_registration={0}, initiated_by={1}) '
'with _id {2}>').format(
parent_registration,
self.initiated_by,
self._id
)
def _view_url_context(self, user_id):
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
approval_token = self.approval_state.get(user_id, {}).get('approval_token')
if approval_token:
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'node_id': registration._id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
rejection_token = self.approval_state.get(user_id, {}).get('rejection_token')
if rejection_token:
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'node_id': registration._id,
'token': rejection_token,
}
def _email_template_context(self, user, is_authorizer=False, urls=None):
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.RETRACTION_PENDING_TIME.days * 24
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'project_name': registration.title,
'registration_link': registration_link,
'approval_link': approval_link,
'disapproval_link': disapproval_link,
'approval_time_span': approval_time_span,
}
else:
return {
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
}
def _on_reject(self, user, token):
parent_registration = Node.find_one(Q('retraction', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.RETRACTION_CANCELLED,
params={
'node': parent_registration._id,
'retraction_id': self._id,
},
auth=Auth(user),
save=True,
)
def _on_complete(self, user):
parent_registration = Node.find_one(Q('retraction', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.RETRACTION_APPROVED,
params={
'node': parent_registration._id,
'retraction_id': self._id,
},
auth=Auth(self.initiated_by),
)
# Remove any embargoes associated with the registration
if parent_registration.embargo_end_date or parent_registration.is_pending_embargo:
parent_registration.embargo.state = self.REJECTED
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_CANCELLED,
params={
'node': parent_registration._id,
'embargo_id': parent_registration.embargo._id,
},
auth=Auth(self.initiated_by),
)
parent_registration.embargo.save()
# Ensure retracted registration is public
if not parent_registration.is_public:
parent_registration.set_privacy('public')
parent_registration.update_search()
# Retraction status is inherited from the root project, so we
# need to recursively update search for every descendant node
# so that retracted subrojects/components don't appear in search
for node in parent_registration.get_descendants_recursive():
node.update_search()
self.state == self.APPROVED
self.save()
def approve_retraction(self, user, token):
self.approve(user, token)
def disapprove_retraction(self, user, token):
self.reject(user, token)
class RegistrationApproval(EmailApprovableSanction):
DISPLAY_NAME = 'Approval'
SHORT_NAME = 'registration_approval'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_REGISTRATION_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_REGISTRATION_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='registration_approved')
def _view_url_context(self, user_id):
registration = Node.find_one(Q('registration_approval', 'eq', self))
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
approval_token = self.approval_state.get(user_id, {}).get('approval_token')
if approval_token:
registration = Node.find_one(Q('registration_approval', 'eq', self))
return {
'node_id': registration._id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
rejection_token = self.approval_state.get(user_id, {}).get('rejection_token')
if rejection_token:
registration = Node.find_one(Q('registration_approval', 'eq', self))
return {
'node_id': registration._id,
'token': rejection_token,
}
def _email_template_context(self, user, is_authorizer=False, urls=None):
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.REGISTRATION_APPROVAL_TIME.days * 24
registration = Node.find_one(Q('registration_approval', 'eq', self))
return {
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'approval_link': approval_link,
'disapproval_link': disapproval_link,
'approval_time_span': approval_time_span,
'project_name': registration.title,
}
else:
return {
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
}
def _add_success_logs(self, node, user):
src = node.registered_from
src.add_log(
action=NodeLog.PROJECT_REGISTERED,
params={
'parent_node': src.parent_id,
'node': src._primary_key,
'registration': node._primary_key,
},
auth=Auth(user),
save=False
)
src.save()
def _on_complete(self, user):
register = Node.find_one(Q('registration_approval', 'eq', self))
registered_from = register.registered_from
auth = Auth(self.initiated_by)
register.set_privacy('public', auth, log=False)
for child in register.get_descendants_recursive(lambda n: n.primary):
child.set_privacy('public', auth, log=False)
# Accounts for system actions where no `User` performs the final approval
auth = Auth(user) if user else None
registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_APPROVED,
params={
'node': registered_from._id,
'registration_approval_id': self._id,
},
auth=auth,
)
for node in register.root.node_and_primary_descendants():
self._add_success_logs(node, user)
node.update_search() # update search if public
self.state = self.APPROVED
self.save()
def _on_reject(self, user, token):
register = Node.find_one(Q('registration_approval', 'eq', self))
registered_from = register.registered_from
register.delete_registration_tree(save=True)
registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_CANCELLED,
params={
'node': register._id,
'registration_approval_id': self._id,
},
auth=Auth(user),
)
|
sbt9uc/osf.io
|
website/project/model.py
|
Python
|
apache-2.0
| 125,658
|
[
"VisIt"
] |
64ac9fdfc07d6f607f5f804189d958df44fcd961475502ca3d6012d70eccbb67
|
#!/usr/bin/python -u
import os
import sys
import fnmatch
# do a basic check to see if pylint is even installed
try:
from pylint.__pkginfo__ import version as pylint_version
except ImportError:
print "Unable to import pylint, it may need to be installed"
sys.exit(1)
# Classes of errors we ignore on quiet runs
IGNORED_ERRORS = 'E1002,E1101,E1103,E1120,F0401,I0011'
# By default, complain about all things
LINT_VERBOSE = True
def set_verbosity(verbose):
'''
Changes the verbosity level
'''
global LINT_VERBOSE
LINT_VERBOSE = verbose
major, minor, _ = pylint_version.split('.')
pylint_version = float("%s.%s" % (major, minor))
# patch up the logilab module lookup tools to understand autotest.* trash
import logilab.common.modutils
_ffm = logilab.common.modutils.file_from_modpath
def file_from_modpath(modpath, path=None, context_file=None):
if modpath[0] == "autotest":
if modpath[1:]:
return _ffm(modpath[1:], path, context_file)
return _ffm(modpath, path, context_file)
logilab.common.modutils.file_from_modpath = file_from_modpath
import pylint.lint
from pylint.checkers import imports
ROOT_MODULE = 'autotest.'
# need to put autotest root dir on sys.path so pylint will be happy
autotest_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, autotest_root)
# patch up pylint import checker to handle our importing magic
RealImportsChecker = imports.ImportsChecker
class CustomImportsChecker(imports.ImportsChecker):
def visit_from(self, node):
if node.modname.startswith(ROOT_MODULE):
node.modname = node.modname[len(ROOT_MODULE):]
return RealImportsChecker.visit_from(self, node)
imports.ImportsChecker = CustomImportsChecker
# some files make pylint blow up, so make sure we ignore them
blacklist = ['/contrib/*', '/frontend/afe/management.py', ]
def get_pylint_opts():
"""
If VERBOSE is set, show all complaints. If not, only errors.
There are three major sources of E1103/E1120 false positives:
* shared.enum.Enum objects
* DB model objects (scheduler models are the worst, but Django models also
generate some errors)
"""
disable_new = ['--disable=W,R,C,%s' % IGNORED_ERRORS]
disable_old = ['--disable-msg-cat=W,R,C', '--disable-msg=%s' %
IGNORED_ERRORS]
if LINT_VERBOSE:
opts = []
else:
if pylint_version >= 0.21:
opts = disable_new
else:
opts = disable_old
opts += ['--reports=no', '--rcfile=/dev/null',
'--good-names=i,j,k,Run,_,vm']
if pylint_version < 1.0:
fmt_opt = '--include-ids=y'
else:
fmt_opt = '--msg-template="{msg_id}:{line:3d},{column}: {obj}: {msg}"'
opts.append(fmt_opt)
return opts
def check_file(file_path):
if not file_path.endswith('.py'):
return 0
for blacklist_pattern in blacklist:
if fnmatch.fnmatch(os.path.abspath(file_path),
'*' + blacklist_pattern):
return 0
pylint_opts = get_pylint_opts()
if pylint_version >= 0.21:
runner = pylint.lint.Run(pylint_opts + [file_path], exit=False)
else:
runner = pylint.lint.Run(pylint_opts + [file_path])
return runner.linter.msg_status
def visit(arg, dirname, filenames):
for filename in filenames:
check_file(os.path.join(dirname, filename))
def check_dir(dir_path):
os.path.walk(dir_path, visit, None)
if __name__ == "__main__":
import optparse
usage = "usage: %prog [options] [list of files]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-q", "--quiet",
action="store_true", dest="quiet",
help="Ignore pylint errors %s" % IGNORED_ERRORS)
options, args = parser.parse_args()
verbose = not options.quiet
set_verbosity(verbose)
file_list = args
pylint_base_opts = get_pylint_opts()
if '--' in file_list:
index = file_list.index('--')
pylint_base_opts.extend(file_list[index + 1:])
file_list = file_list[:index]
if len(file_list) > 0:
for path in file_list:
if os.path.isdir(path):
check_dir(path)
else:
check_file(path)
else:
check_dir('.')
|
kylazhang/virt-test
|
tools/run_pylint.py
|
Python
|
gpl-2.0
| 4,357
|
[
"VisIt"
] |
bf32b0c7ec3dc5a6d7a7f7ebed7aeb43d0faf235184efca88e617dbb0007a64e
|
"""Grow preprocessors."""
import json
from protorpc import protojson
from grow.preprocessors import blogger
from grow.preprocessors import google_drive
from grow.preprocessors import gulp
from grow.preprocessors import webpack
from grow.common import extensions
_preprocessor_kinds_to_classes = {}
_builtins = (
blogger.BloggerPreprocessor,
google_drive.GoogleDocsPreprocessor,
google_drive.GoogleSheetsPreprocessor,
gulp.GulpPreprocessor,
webpack.WebpackPreprocessor,
)
def register_preprocessor(class_obj):
_preprocessor_kinds_to_classes[class_obj.KIND] = class_obj
def config_from_json(preprocessor_class, content):
config_class = preprocessor_class.Config
return protojson.decode_message(config_class, content)
def make_preprocessor(kind, config, pod):
autorun = config.pop('autorun', True)
name = config.pop('name', None)
tags = config.pop('tags', None)
inject = config.pop('inject', False)
class_obj = _preprocessor_kinds_to_classes.get(kind)
if class_obj is None:
raise ValueError('No legacy preprocessor for "{}".'.format(kind))
if isinstance(config, dict):
config = json.dumps(config)
config = config_from_json(class_obj, config)
return class_obj(pod, config, autorun=autorun, name=name, tags=tags, inject=inject)
def register_builtins():
for builtin in _builtins:
register_preprocessor(builtin)
def register_extensions(extension_paths, pod_root):
for path in extension_paths:
cls = extensions.import_extension(path, [pod_root])
register_preprocessor(cls)
register_builtins()
|
grow/pygrow
|
grow/preprocessors/preprocessors.py
|
Python
|
mit
| 1,620
|
[
"GULP"
] |
475b7217ba2d68f3c4088876a1039ed3bca65ce350317a8410ff249ae8f83c2a
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 6 20:43:23 2017
@author: zqwu
"""
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import tensorflow as tf
from deepchem.models.tensorgraph import activations
from deepchem.models.tensorgraph import initializations
from deepchem.models.tensorgraph import model_ops
from deepchem.models.tensorgraph.layers import Layer
from deepchem.models.tensorgraph.layers import convert_to_layers
from deepchem.metrics import to_one_hot
class DistanceMatrix(Layer):
def __init__(self, max_atoms, **kwargs):
"""
Parameters
----------
max_atoms: int
Maximum number of atoms in the dataset
"""
self.max_atoms = max_atoms
super(DistanceMatrix, self).__init__(**kwargs)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
""" Generate distance matrix for BPSymmetryFunction with trainable cutoff """
if in_layers is None:
in_layers = self.in_layers
in_layers = convert_to_layers(in_layers)
max_atoms = self.max_atoms
atom_coordinates = in_layers[0].out_tensor
atom_flags = in_layers[1].out_tensor
tensor1 = tf.tile(
tf.expand_dims(atom_coordinates, axis=2), (1, 1, max_atoms, 1))
tensor2 = tf.tile(
tf.expand_dims(atom_coordinates, axis=1), (1, max_atoms, 1, 1))
# Calculate pairwise distance
d = tf.sqrt(tf.reduce_sum(tf.square(tensor1 - tensor2), axis=3))
# Masking for valid atom index
self.out_tensor = d * tf.to_float(atom_flags)
class DistanceCutoff(Layer):
def __init__(self, max_atoms, cutoff=6 / 0.52917721092, **kwargs):
"""
Parameters
----------
cutoff: float, optional
cutoff threshold for distance, in Bohr(0.53Angstrom)
"""
self.max_atoms = max_atoms
self.cutoff = cutoff
super(DistanceCutoff, self).__init__(**kwargs)
def build(self):
self.Rc = tf.Variable(tf.constant(self.cutoff))
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
""" Generate distance matrix for BPSymmetryFunction with trainable cutoff """
if in_layers is None:
in_layers = self.in_layers
in_layers = convert_to_layers(in_layers)
self.build()
d = in_layers[0].out_tensor
d_flag = in_layers[1].out_tensor
# Cutoff with threshold Rc
d_flag = d_flag * tf.nn.relu(tf.sign(self.Rc - d))
d = 0.5 * (tf.cos(np.pi * d / self.Rc) + 1)
out_tensor = d * d_flag
out_tensor = out_tensor * tf.expand_dims((1 - tf.eye(self.max_atoms)), 0)
self.out_tensor = out_tensor
class RadialSymmetry(Layer):
""" Radial Symmetry Function """
def __init__(self,
max_atoms,
Rs_init=None,
ita_init=None,
atomic_number_differentiated=False,
atom_numbers=[1, 6, 7, 8],
**kwargs):
self.max_atoms = max_atoms
self.atomic_number_differentiated = atomic_number_differentiated
self.atom_number_cases = atom_numbers
if Rs_init is None:
self.Rs_init = np.array([0.5, 1.17, 1.83, 2.5, 3.17, 3.83, 4.5])
self.Rs_init = self.Rs_init / 0.52917721092
else:
self.Rs_init = np.array(Rs_init)
if ita_init is None:
self.ita_init = np.array([1.12])
else:
self.ita_init = np.array(ita_init)
super(RadialSymmetry, self).__init__(**kwargs)
def build(self):
""" Parameters for the Gaussian """
len_Rs = len(self.Rs_init)
len_ita = len(self.ita_init)
self.length = len_Rs * len_ita
Rs_init, ita_init = np.meshgrid(self.Rs_init, self.ita_init)
self.Rs = tf.constant(Rs_init.flatten(), dtype=tf.float32)
self.ita = tf.constant(ita_init.flatten(), dtype=tf.float32)
self.atom_number_embedding = tf.eye(max(self.atom_number_cases) + 1)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
""" Generate Radial Symmetry Function """
if in_layers is None:
in_layers = self.in_layers
in_layers = convert_to_layers(in_layers)
self.build()
d_cutoff = in_layers[0].out_tensor
d = in_layers[1].out_tensor
if self.atomic_number_differentiated:
atom_numbers = in_layers[2].out_tensor
atom_number_embedded = tf.nn.embedding_lookup(self.atom_number_embedding,
atom_numbers)
d_cutoff = tf.stack([d_cutoff] * self.length, axis=3)
d = tf.stack([d] * self.length, axis=3)
Rs = tf.reshape(self.Rs, (1, 1, 1, -1))
ita = tf.reshape(self.ita, (1, 1, 1, -1))
out_tensor = tf.exp(-ita * tf.square(d - Rs)) * d_cutoff
if self.atomic_number_differentiated:
out_tensors = []
for atom_type in self.atom_number_cases:
selected_atoms = tf.expand_dims(
tf.expand_dims(atom_number_embedded[:, :, atom_type], axis=1),
axis=3)
out_tensors.append(tf.reduce_sum(out_tensor * selected_atoms, axis=2))
self.out_tensor = tf.concat(out_tensors, axis=2)
else:
self.out_tensor = tf.reduce_sum(out_tensor, axis=2)
class AngularSymmetry(Layer):
""" Angular Symmetry Function """
def __init__(self,
max_atoms,
lambd_init=None,
ita_init=None,
zeta_init=None,
**kwargs):
self.max_atoms = max_atoms
if lambd_init is None:
self.lambd_init = np.array([1., -1.])
else:
self.lambd_init = np.array(lambd_init)
if ita_init is None:
self.ita_init = np.array([4.])
else:
self.ita_init = np.array(ita_init)
if zeta_init is None:
self.zeta_init = np.array([2., 4., 8.])
else:
self.zeta_init = np.array(zeta_init)
super(AngularSymmetry, self).__init__(**kwargs)
def build(self):
len_lambd = len(self.lambd_init)
len_ita = len(self.ita_init)
len_zeta = len(self.zeta_init)
self.length = len_lambd * len_ita * len_zeta
lambd_init, ita_init, zeta_init = np.meshgrid(self.lambd_init,
self.ita_init, self.zeta_init)
self.lambd = tf.constant(lambd_init.flatten(), dtype=tf.float32)
self.ita = tf.constant(ita_init.flatten(), dtype=tf.float32)
self.zeta = tf.constant(zeta_init.flatten(), dtype=tf.float32)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
""" Generate Angular Symmetry Function """
if in_layers is None:
in_layers = self.in_layers
in_layers = convert_to_layers(in_layers)
self.build()
max_atoms = self.max_atoms
d_cutoff = in_layers[0].out_tensor
d = in_layers[1].out_tensor
atom_coordinates = in_layers[2].out_tensor
vector_distances = tf.tile(tf.expand_dims(atom_coordinates, axis=2), (1, 1, max_atoms, 1)) - \
tf.tile(tf.expand_dims(atom_coordinates, axis=1), (1, max_atoms, 1, 1))
R_ij = tf.tile(tf.expand_dims(d, axis=3), (1, 1, 1, max_atoms))
R_ik = tf.tile(tf.expand_dims(d, axis=2), (1, 1, max_atoms, 1))
R_jk = tf.tile(tf.expand_dims(d, axis=1), (1, max_atoms, 1, 1))
f_R_ij = tf.tile(tf.expand_dims(d_cutoff, axis=3), (1, 1, 1, max_atoms))
f_R_ik = tf.tile(tf.expand_dims(d_cutoff, axis=2), (1, 1, max_atoms, 1))
f_R_jk = tf.tile(tf.expand_dims(d_cutoff, axis=1), (1, max_atoms, 1, 1))
# Define angle theta = R_ij(Vector) dot R_ik(Vector)/R_ij(distance)/R_ik(distance)
theta = tf.reduce_sum(tf.tile(tf.expand_dims(vector_distances, axis=3), (1, 1, 1, max_atoms, 1)) * \
tf.tile(tf.expand_dims(vector_distances, axis=2), (1, 1, max_atoms, 1, 1)), axis=4)
theta = tf.div(theta, R_ij * R_ik + 1e-5)
R_ij = tf.stack([R_ij] * self.length, axis=4)
R_ik = tf.stack([R_ik] * self.length, axis=4)
R_jk = tf.stack([R_jk] * self.length, axis=4)
f_R_ij = tf.stack([f_R_ij] * self.length, axis=4)
f_R_ik = tf.stack([f_R_ik] * self.length, axis=4)
f_R_jk = tf.stack([f_R_jk] * self.length, axis=4)
theta = tf.stack([theta] * self.length, axis=4)
lambd = tf.reshape(self.lambd, (1, 1, 1, 1, -1))
zeta = tf.reshape(self.zeta, (1, 1, 1, 1, -1))
ita = tf.reshape(self.ita, (1, 1, 1, 1, -1))
out_tensor = tf.pow(1 + lambd * tf.cos(theta), zeta) * \
tf.exp(-ita * (tf.square(R_ij) + tf.square(R_ik) + tf.square(R_jk))) * \
f_R_ij * f_R_ik * f_R_jk
self.out_tensor = tf.reduce_sum(out_tensor, axis=[2, 3]) * \
tf.pow(tf.constant(2.), 1 - tf.reshape(self.zeta, (1, 1, -1)))
class AngularSymmetryMod(Layer):
""" Angular Symmetry Function """
def __init__(self,
max_atoms,
lambd_init=None,
ita_init=None,
zeta_init=None,
Rs_init=None,
thetas_init=None,
atomic_number_differentiated=False,
atom_numbers=[1, 6, 7, 8],
**kwargs):
self.max_atoms = max_atoms
self.atomic_number_differentiated = atomic_number_differentiated
self.atom_number_cases = atom_numbers
if lambd_init is None:
self.lambd_init = np.array([1., -1.])
else:
self.lambd_init = np.array(lambd_init)
if ita_init is None:
self.ita_init = np.array([1.12])
else:
self.ita_init = np.array(ita_init)
if zeta_init is None:
self.zeta_init = np.array([4.])
else:
self.zeta_init = np.array(zeta_init)
if Rs_init is None:
self.Rs_init = np.array([0.5, 1.17, 1.83, 2.5, 3.17])
self.Rs_init = self.Rs_init / 0.52917721092
else:
self.Rs_init = np.array(Rs_init)
if thetas_init is None:
self.thetas_init = np.array([0., 1.57, 3.14, 4.71])
else:
self.thetas_init = np.array(thetas_init)
super(AngularSymmetryMod, self).__init__(**kwargs)
def build(self):
len_lambd = len(self.lambd_init)
len_ita = len(self.ita_init)
len_zeta = len(self.zeta_init)
len_Rs = len(self.Rs_init)
len_thetas = len(self.thetas_init)
self.length = len_lambd * len_ita * len_zeta * len_Rs * len_thetas
lambd_init, ita_init, zeta_init, Rs_init, thetas_init = \
np.meshgrid(self.lambd_init, self.ita_init, self.zeta_init, self.Rs_init, self.thetas_init)
self.lambd = tf.constant(lambd_init.flatten(), dtype=tf.float32)
self.ita = tf.constant(ita_init.flatten(), dtype=tf.float32)
self.zeta = tf.constant(zeta_init.flatten(), dtype=tf.float32)
self.Rs = tf.constant(Rs_init.flatten(), dtype=tf.float32)
self.thetas = tf.constant(thetas_init.flatten(), dtype=tf.float32)
self.atom_number_embedding = tf.eye(max(self.atom_number_cases) + 1)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
""" Generate Angular Symmetry Function """
if in_layers is None:
in_layers = self.in_layers
in_layers = convert_to_layers(in_layers)
self.build()
max_atoms = self.max_atoms
d_cutoff = in_layers[0].out_tensor
d = in_layers[1].out_tensor
atom_coordinates = in_layers[2].out_tensor
if self.atomic_number_differentiated:
atom_numbers = in_layers[3].out_tensor
atom_number_embedded = tf.nn.embedding_lookup(self.atom_number_embedding,
atom_numbers)
vector_distances = tf.tile(tf.expand_dims(atom_coordinates, axis=2), (1, 1, max_atoms, 1)) - \
tf.tile(tf.expand_dims(atom_coordinates, axis=1), (1, max_atoms, 1, 1))
R_ij = tf.tile(tf.expand_dims(d, axis=3), (1, 1, 1, max_atoms))
R_ik = tf.tile(tf.expand_dims(d, axis=2), (1, 1, max_atoms, 1))
f_R_ij = tf.tile(tf.expand_dims(d_cutoff, axis=3), (1, 1, 1, max_atoms))
f_R_ik = tf.tile(tf.expand_dims(d_cutoff, axis=2), (1, 1, max_atoms, 1))
# Define angle theta = R_ij(Vector) dot R_ik(Vector)/R_ij(distance)/R_ik(distance)
theta = tf.reduce_sum(tf.tile(tf.expand_dims(vector_distances, axis=3), (1, 1, 1, max_atoms, 1)) * \
tf.tile(tf.expand_dims(vector_distances, axis=2), (1, 1, max_atoms, 1, 1)), axis=4)
theta = tf.div(theta, R_ij * R_ik + 1e-5)
R_ij = tf.stack([R_ij] * self.length, axis=4)
R_ik = tf.stack([R_ik] * self.length, axis=4)
f_R_ij = tf.stack([f_R_ij] * self.length, axis=4)
f_R_ik = tf.stack([f_R_ik] * self.length, axis=4)
theta = tf.stack([theta] * self.length, axis=4)
lambd = tf.reshape(self.lambd, (1, 1, 1, 1, -1))
zeta = tf.reshape(self.zeta, (1, 1, 1, 1, -1))
ita = tf.reshape(self.ita, (1, 1, 1, 1, -1))
Rs = tf.reshape(self.Rs, (1, 1, 1, 1, -1))
thetas = tf.reshape(self.thetas, (1, 1, 1, 1, -1))
out_tensor = tf.pow(1 + lambd * tf.cos(theta - thetas), zeta) * \
tf.exp(-ita * tf.square((R_ij + R_ik) / 2 - Rs)) * \
f_R_ij * f_R_ik * tf.pow(tf.constant(2.), 1 - zeta)
if self.atomic_number_differentiated:
out_tensors = []
for atom_type_j in self.atom_number_cases:
for atom_type_k in self.atom_number_cases:
selected_atoms = tf.stack([atom_number_embedded[:, :, atom_type_j]] * max_atoms, axis=2) * \
tf.stack([atom_number_embedded[:, :, atom_type_k]] * max_atoms, axis=1)
selected_atoms = tf.expand_dims(
tf.expand_dims(selected_atoms, axis=1), axis=4)
out_tensors.append(
tf.reduce_sum(out_tensor * selected_atoms, axis=[2, 3]))
self.out_tensor = tf.concat(out_tensors, axis=2)
else:
self.out_tensor = tf.reduce_sum(out_tensor, axis=[2, 3])
class BPFeatureMerge(Layer):
def __init__(self, max_atoms, **kwargs):
self.max_atoms = max_atoms
super(BPFeatureMerge, self).__init__(**kwargs)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
""" Merge features together """
if in_layers is None:
in_layers = self.in_layers
in_layers = convert_to_layers(in_layers)
atom_embedding = in_layers[0].out_tensor
radial_symmetry = in_layers[1].out_tensor
angular_symmetry = in_layers[2].out_tensor
atom_flags = in_layers[3].out_tensor
out_tensor = tf.concat(
[atom_embedding, radial_symmetry, angular_symmetry], axis=2)
self.out_tensor = out_tensor * atom_flags[:, :, 0:1]
class BPGather(Layer):
def __init__(self, max_atoms, **kwargs):
self.max_atoms = max_atoms
super(BPGather, self).__init__(**kwargs)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
""" Merge features together """
if in_layers is None:
in_layers = self.in_layers
in_layers = convert_to_layers(in_layers)
out_tensor = in_layers[0].out_tensor
flags = in_layers[1].out_tensor
out_tensor = tf.reduce_sum(out_tensor * flags[:, :, 0:1], axis=1)
self.out_tensor = out_tensor
class AtomicDifferentiatedDense(Layer):
""" Separate Dense module for different atoms """
def __init__(self,
max_atoms,
out_channels,
atom_number_cases=[1, 6, 7, 8],
init='glorot_uniform',
activation='ani',
**kwargs):
self.init = init # Set weight initialization
self.activation = activation # Get activations
self.max_atoms = max_atoms
self.out_channels = out_channels
self.atom_number_cases = atom_number_cases
super(AtomicDifferentiatedDense, self).__init__(**kwargs)
@staticmethod
def ani_activate(X):
return tf.exp(-1 * tf.pow(X, 2))
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
""" Generate Radial Symmetry Function """
init_fn = initializations.get(self.init) # Set weight initialization
if self.activation == 'ani':
activation_fn = self.ani_activate
else:
activation_fn = activations.get(self.activation)
if in_layers is None:
in_layers = self.in_layers
in_layers = convert_to_layers(in_layers)
inputs = in_layers[0].out_tensor
atom_numbers = in_layers[1].out_tensor
in_channels = inputs.get_shape().as_list()[-1]
self.W = init_fn(
[len(self.atom_number_cases), in_channels, self.out_channels])
self.b = model_ops.zeros((len(self.atom_number_cases), self.out_channels))
outputs = []
for i, atom_case in enumerate(self.atom_number_cases):
# optimization to allow for tensorcontraction/broadcasted mmul
# using a reshape trick. Note that the np and tf matmul behavior
# differs when dealing with broadcasts
a = inputs # (i,j,k)
b = self.W[i, :, :] # (k, l)
ai = tf.shape(a)[0]
aj = tf.shape(a)[1]
ak = tf.shape(a)[2]
bl = tf.shape(b)[1]
output = activation_fn(
tf.reshape(tf.matmul(tf.reshape(a, [ai * aj, ak]), b), [ai, aj, bl]) +
self.b[i, :])
mask = 1 - tf.to_float(tf.cast(atom_numbers - atom_case, tf.bool))
output = tf.reshape(output * tf.expand_dims(mask, 2),
(-1, self.max_atoms, self.out_channels))
outputs.append(output)
self.out_tensor = tf.add_n(outputs)
def none_tensors(self):
w, b, out_tensor = self.W, self.b, self.out_tensor
self.W, self.b, self.out_tensor = None, None, None
return w, b, out_tensor
def set_tensors(self, tensor):
self.W, self.b, self.out_tensor = tensor
|
Agent007/deepchem
|
deepchem/models/tensorgraph/symmetry_functions.py
|
Python
|
mit
| 17,234
|
[
"Gaussian"
] |
a0402f850920b6f3fd768f5eecfcf5453e6e5de181abb1a16db421ec6deccea0
|
import random
import string
import unittest
from game import Game, Player
class TestUDontKnowMe(unittest.TestCase):
def setUp(self):
self.brian = Player("Brian")
self.billy = Player("Billy")
self.john = Player("John")
self.jordan = Player("Jordan")
self.rob = Player("Rob")
self.all_players = [self.brian, self.billy, self.john, self.jordan, self.rob] # convenience attribute
self.udontknowme = Game()
def test_add_player(self):
self.udontknowme.add_player(self.brian)
self.assertTrue(len(self.udontknowme.players) == 1)
self.assertTrue(self.udontknowme.players[0] is self.brian)
self.udontknowme.add_player(self.jordan)
self.assertTrue(len(self.udontknowme.players) == 2)
self.assertTrue(self.udontknowme.players[1] is self.jordan)
def test_add_players(self):
self.udontknowme.add_players([self.brian, self.billy])
self.assertTrue(len(self.udontknowme.players) == 2)
self.assertTrue(self.udontknowme.players[0] is self.brian)
self.assertTrue(self.udontknowme.players[1] is self.billy)
def test_setup_questions(self):
self.udontknowme.add_players(self.all_players)
self.udontknowme.setup_questions()
self.assertEqual(len(self.udontknowme.questions), 10) # 2 questions per person
for player in self.all_players:
self.assertEqual(
len(filter(lambda question: question.about_player is player, self.udontknowme.questions)),
2
)
def test_get_next_question(self):
self.udontknowme.add_players(self.all_players)
self.udontknowme.setup_questions()
questions = []
while True:
question = self.udontknowme.go_to_next_question()
if question is not None:
questions.append(question)
else:
break # make sure we eventually get through all our questions and return None
self.assertEqual(
len(questions),
10
)
def test_question_add_answer_and_add_guess_and_num_guesses_and_award_points(self):
""" This should probably be in a TestQuestion(TestCase) class but I'm being lazy..."""
self.udontknowme.add_players(self.all_players)
self.udontknowme.setup_questions()
question = self.udontknowme.go_to_next_question()
about_player = question.about_player
other_players = filter(lambda player: player is not about_player, self.all_players)
question.add_answer(
about_player,
''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
)
for other_player in other_players:
# http://stackoverflow.com/a/2257449/211496
random_string = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
question.add_answer(other_player, random_string)
self.assertTrue(
len(question.answers),
5,
)
self.assertTrue(
len(question.guesses.keys()),
5,
)
# about_player will get other_players[0] and other_players[1] to guess them
# about_player = 2000 pts
# other_players[0] = 1000 pts
# other_players[1] = 1000 pts
question.add_guess(
other_players[0], filter(lambda answer: answer.player is about_player, question.answers)[0].answer
)
question.add_guess(
other_players[1], filter(lambda answer: answer.player is about_player, question.answers)[0].answer
)
# other_players[0] will get other_players[2] to guess them
# other_players[0] = 1500 pts
question.add_guess(
other_players[2], filter(lambda answer: answer.player is other_players[0], question.answers)[0].answer
)
# other_players[2] will get other_players[3] to guess them
# other_players[2] = 500 pts
question.add_guess(
other_players[3], filter(lambda answer: answer.player is other_players[2], question.answers)[0].answer
)
self.assertTrue(
question.num_guesses(),
4,
)
question.award_points()
# make sure the players have the right pts
self.assertEqual(
about_player.points,
2000,
)
self.assertEqual(
other_players[0].points,
1500,
)
self.assertEqual(
other_players[1].points,
1000,
)
self.assertEqual(
other_players[2].points,
500,
)
self.assertEqual(
other_players[3].points,
0,
)
def test_full_game(self):
self.udontknowme.add_players(self.all_players)
self.udontknowme.setup_questions()
while True:
question = self.udontknowme.go_to_next_question()
if question is not None:
about_player = question.about_player
other_players = filter(lambda player: player is not about_player, self.all_players)
question.add_answer(about_player, "brains")
for other_player in other_players:
# http://stackoverflow.com/a/2257449/211496
random_string = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
question.add_answer(other_player, random_string)
# about_player will get other_players[0] and other_players[1] to guess them
# about_player = 2000 pts
# other_players[0] = 1000 pts
# other_players[1] = 1000 pts
question.add_guess(
other_players[0], filter(lambda answer: answer.player is about_player, question.answers)[0].answer
)
question.add_guess(
other_players[1], filter(lambda answer: answer.player is about_player, question.answers)[0].answer
)
# other_players[0] will get other_players[2] to guess them
# other_players[0] = 1500 pts
question.add_guess(
other_players[2], filter(lambda answer: answer.player is other_players[0], question.answers)[0].answer
)
# other_players[2] will get other_players[3] to guess them
# other_players[2] = 500 pts
question.add_guess(
other_players[3], filter(lambda answer: answer.player is other_players[2], question.answers)[0].answer
)
question.award_points()
else:
break
# GAME IS OVER
# TODO - Are these numbers correct? Just did a print to get the values and assumed it worked....
self.assertEqual(
self.brian.points,
16000,
)
self.assertEqual(
self.udontknowme.players_sorted_by_points()[0],
self.brian,
)
self.assertEqual(
self.billy.points,
13000,
)
self.assertEqual(
self.udontknowme.players_sorted_by_points()[1],
self.billy,
)
self.assertEqual(
self.john.points,
10000,
)
self.assertEqual(
self.udontknowme.players_sorted_by_points()[2],
self.john,
)
self.assertEqual(
self.jordan.points,
7000,
)
self.assertEqual(
self.udontknowme.players_sorted_by_points()[3],
self.jordan,
)
self.assertEqual(
self.rob.points,
4000,
)
self.assertEqual(
self.udontknowme.players_sorted_by_points()[4],
self.rob,
)
if __name__ == '__main__':
unittest.main()
|
udontknowmeapp/udontknowme
|
webapp/game/tests.py
|
Python
|
gpl-2.0
| 8,002
|
[
"Brian"
] |
a6aeeb04aacea54f463edb0199c4187a343c8309591cdca01ec3de8324ae8aa0
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
import datetime
import django_tables2 as tables
import six
from django.contrib import messages
from django.contrib.auth import views
from django.http import (
HttpResponseBadRequest,
HttpResponseForbidden,
HttpResponseRedirect,
QueryDict,
)
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views.decorators.debug import sensitive_post_parameters
import karaage.common as common
from karaage.common.decorators import admin_required, login_required
from karaage.institutes.tables import InstituteTable
from karaage.people.emails import (
send_bounced_warning,
send_reset_password_email,
)
from karaage.people.forms import (
AddPersonForm,
AdminPasswordChangeForm,
AdminPersonForm,
SetPasswordForm,
)
from karaage.people.models import Person
from karaage.people.tables import LeaderTable, PersonFilter, PersonTable
from karaage.projects.models import Project
from karaage.projects.tables import ProjectTable
def _add_edit_user(request, form_class, username):
person_form = form_class
if username is None:
person = None
else:
person = get_object_or_404(Person, username=username)
form = person_form(request.POST or None, instance=person)
if request.method == 'POST':
if form.is_valid():
if person:
# edit
person = form.save()
messages.success(
request, "User '%s' was edited succesfully" % person)
assert person is not None
else:
# add
person = form.save()
messages.success(
request, "User '%s' was created succesfully" % person)
assert person is not None
return HttpResponseRedirect(person.get_absolute_url())
return render(
template_name='karaage/people/person_form.html',
context={'person': person, 'form': form},
request=request)
@sensitive_post_parameters('password1', 'password2')
@admin_required
def add_user(request):
return _add_edit_user(request, AddPersonForm, None)
@admin_required
def edit_user(request, username):
return _add_edit_user(request, AdminPersonForm, username)
@login_required
def user_list(request, queryset=None, title=None):
if queryset is None:
queryset = Person.objects.all()
if not common.is_admin(request):
queryset = queryset.filter(pk=request.user.pk)
queryset = queryset.select_related()
q_filter = PersonFilter(request.GET, queryset=queryset)
table = PersonTable(q_filter.qs)
tables.RequestConfig(request).configure(table)
spec = []
for name, value in six.iteritems(q_filter.form.cleaned_data):
if value is not None and value != "":
name = name.replace('_', ' ').capitalize()
spec.append((name, value))
context = {
'table': table,
'filter': q_filter,
'spec': spec,
'title': title or "Person list",
}
return render(
template_name="karaage/people/person_list.html", context=context,
request=request)
@admin_required
def locked_list(request):
result = QueryDict("", mutable=True)
result['active'] = "locked"
url = reverse('kg_person_list') + "?" + result.urlencode()
return HttpResponseRedirect(url)
@admin_required
def struggling(request):
today = datetime.date.today()
days30 = today - datetime.timedelta(days=30)
result = QueryDict("", mutable=True)
result['active'] = "yes"
result['begin_date_approved'] = days30
result['no_last_usage'] = True
result['sort'] = "-date_approved"
url = reverse('kg_person_list') + "?" + result.urlencode()
return HttpResponseRedirect(url)
@admin_required
def delete_user(request, username):
person = get_object_or_404(Person, username=username)
if request.method == 'POST':
deleted_by = request.user
person.deactivate(deleted_by)
messages.success(request, "User '%s' was deleted succesfully" % person)
return HttpResponseRedirect(person.get_absolute_url())
return render(
template_name='karaage/people/person_confirm_delete.html',
context=locals(),
request=request)
@login_required
def user_detail(request, username):
config = tables.RequestConfig(request, paginate={"per_page": 5})
person = get_object_or_404(Person, username=username)
if not person.can_view(request):
return HttpResponseForbidden(
'<h1>Access Denied</h1>'
'<p>You do not have permission to view details '
'about this person.</p>')
leader_project_list = Project.objects.filter(
leaders=person, is_active=True)
leader_project_list = ProjectTable(
leader_project_list, prefix="leader-")
config.configure(leader_project_list)
delegate_institute_list = person.delegate_for.all()
delegate_institute_list = delegate_institute_list.select_related()
delegate_institute_list = InstituteTable(
delegate_institute_list, prefix="delegate")
config.configure(delegate_institute_list)
return render(
template_name='karaage/people/person_detail.html', context=locals(),
request=request)
@admin_required
def user_verbose(request, username):
person = get_object_or_404(Person, username=username)
from karaage.datastores import get_account_details
account_details = {}
for ua in person.account_set.filter(date_deleted__isnull=True):
details = get_account_details(ua)
account_details[ua] = details
return render(
template_name='karaage/people/person_verbose.html', context=locals(),
request=request)
@admin_required
def activate(request, username):
person = get_object_or_404(Person, username=username)
if person.is_active:
return HttpResponseBadRequest("<h1>Bad Request</h1>")
if request.method == 'POST':
approved_by = request.user
person.activate(approved_by)
return HttpResponseRedirect(
reverse('kg_person_password', args=[person.username]))
return render(
template_name='karaage/people/person_reactivate.html',
context={'person': person},
request=request)
@sensitive_post_parameters('new1', 'new2')
@admin_required
def password_change(request, username):
person = get_object_or_404(Person, username=username)
if request.POST:
form = AdminPasswordChangeForm(data=request.POST, person=person)
if form.is_valid():
form.save()
messages.success(request, "Password changed successfully")
if person.is_locked():
person.unlock()
return HttpResponseRedirect(person.get_absolute_url())
else:
form = AdminPasswordChangeForm(person=person)
return render(
template_name='karaage/people/person_password.html',
context={'person': person, 'form': form},
request=request)
@admin_required
def lock_person(request, username):
person = get_object_or_404(Person, username=username)
if request.method == 'POST':
person.lock()
messages.success(request, "%s's account has been locked" % person)
return HttpResponseRedirect(person.get_absolute_url())
return render(
template_name='karaage/people/person_confirm_lock.html',
context=locals(),
request=request)
@admin_required
def unlock_person(request, username):
person = get_object_or_404(Person, username=username)
if request.method == 'POST':
person.unlock()
messages.success(request, "%s's account has been unlocked" % person)
return HttpResponseRedirect(person.get_absolute_url())
return render(
template_name='karaage/people/person_confirm_unlock.html',
context=locals(),
request=request)
@admin_required
def bounced_email(request, username):
person = get_object_or_404(Person, username=username)
leader_list = []
for project in person.projects.filter(is_active=True):
for leader in project.leaders.filter(
is_active=True, login_enabled=True):
leader_list.append({'project': project, 'leader': leader})
if request.method == 'POST':
person.lock()
send_bounced_warning(person, leader_list)
messages.success(
request,
"%s's account has been locked and emails have been sent" % person)
common.log.change(
person,
'Emails sent to project leaders and account locked')
return HttpResponseRedirect(person.get_absolute_url())
leader_list = LeaderTable(leader_list)
tables.RequestConfig(request).configure(leader_list)
return render(
template_name='karaage/people/person_bounced_email.html',
context=locals(),
request=request)
@admin_required
def person_logs(request, username):
obj = get_object_or_404(Person, username=username)
breadcrumbs = [
("People", reverse("kg_person_list")),
(six.text_type(obj), reverse("kg_person_detail", args=[obj.username]))
]
return common.log_list(request, breadcrumbs, obj)
@admin_required
def add_comment(request, username):
obj = get_object_or_404(Person, username=username)
breadcrumbs = [
("People", reverse("kg_person_list")),
(six.text_type(obj), reverse("kg_person_detail", args=[obj.username]))
]
return common.add_comment(request, breadcrumbs, obj)
@login_required
def password_request(request, username):
person = get_object_or_404(Person, username=username)
error = None
post_reset_redirect = reverse(
'kg_person_reset_done', args=[person.username])
if not person.can_view(request):
return HttpResponseForbidden(
'<h1>Access Denied</h1>'
'<p>You do not have permission to view details '
'about this user.</p>')
elif not person.is_active:
error = "Person '%s' is deleted." % person.username
elif not person.login_enabled:
error = "Person '%s' is locked." % person.username
elif request.method == "POST":
send_reset_password_email(person)
return HttpResponseRedirect(post_reset_redirect)
var = {
'person': person,
'error': error,
}
return render(
template_name='karaage/people/person_password_request.html',
context=var,
request=request)
@login_required
def password_request_done(request, username):
person = get_object_or_404(Person, username=username)
if not person.can_view(request):
return HttpResponseForbidden(
'<h1>Access Denied</h1>'
'<p>You do not have permission to view details '
'about this user.</p>')
var = {
'person': person,
}
return render(
template_name='karaage/people/person_password_request_done.html',
context=var,
request=request)
class PasswordResetConfirmView(views.PasswordResetConfirmView):
form_class = SetPasswordForm
template_name = 'karaage/people/person_reset_confirm.html'
class PasswordResetCompleteView(views.PasswordResetCompleteView):
template_name = 'karaage/people/person_reset_complete.html'
|
brianmay/karaage
|
karaage/people/views/persons.py
|
Python
|
gpl-3.0
| 12,094
|
[
"Brian"
] |
8054963ed7b279cd843e2273fc0714e557e13ad9c4e6c68b105cd27ffa2c528f
|
#!/usr/bin/env python
"""
This file provides a more advanced example of vtkTable access and
manipulation methods.
"""
from __future__ import print_function
from vtk import *
#------------------------------------------------------------------------------
# Script Entry Point (i.e., main() )
#------------------------------------------------------------------------------
if __name__ == "__main__":
""" Main entry point of this python script """
print("vtkTable Example 4: Accessing vtkTable data elements")
# Load our table from a CSV file (covered in table2.py)
csv_source = vtkDelimitedTextReader()
csv_source.SetFieldDelimiterCharacters(",")
csv_source.SetHaveHeaders(True)
csv_source.SetFileName("table_data.csv")
csv_source.Update()
csv_source.GetOutput().Dump(6)
T = csv_source.GetOutput()
# Print some information about the table
print("Number of Columns =", T.GetNumberOfColumns())
print("Number of Rows =", T.GetNumberOfRows())
print("Get column 1, row 4 data: ", T.GetColumn(1).GetValue(4))
# Add a new row to the table
new_row = [8, "Luis", 68]
for i in range( T.GetNumberOfColumns()):
T.GetColumn(i).InsertNextValue( str(new_row[i]) )
print("Table after new row appended:")
T.Dump(6)
print("vtkTable Example 4: Finished.")
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Examples/Infovis/Python/tables4.py
|
Python
|
gpl-3.0
| 1,335
|
[
"VTK"
] |
f86181bc71442ae333865a4f0e3ede59a204d47d256efe4dce9b69b98787904e
|
#!/usr/bin/env python
#Dan Blankenberg
"""
Reads a list of intervals and a maf. Outputs a new set of intervals with statistics appended.
"""
import sys
from galaxy import eggs
import pkg_resources; pkg_resources.require( "bx-python" )
import bx.intervals.io
from bx.bitset import BitSet
from galaxy.tools.util import maf_utilities
assert sys.version_info[:2] >= ( 2, 4 )
def __main__():
maf_source_type = sys.argv.pop( 1 )
input_maf_filename = sys.argv[1].strip()
input_interval_filename = sys.argv[2].strip()
output_filename = sys.argv[3].strip()
dbkey = sys.argv[4].strip()
try:
chr_col = int( sys.argv[5].strip() ) - 1
start_col = int( sys.argv[6].strip() ) - 1
end_col = int( sys.argv[7].strip() ) - 1
except:
print >>sys.stderr, "You appear to be missing metadata. You can specify your metadata by clicking on the pencil icon associated with your interval file."
sys.exit()
summary = sys.argv[8].strip()
if summary.lower() == "true": summary = True
else: summary = False
mafIndexFile = "%s/maf_index.loc" % sys.argv[9]
try:
maf_index_filename = sys.argv[10].strip()
except:
maf_index_filename = None
index = index_filename = None
if maf_source_type == "user":
#index maf for use here
index, index_filename = maf_utilities.open_or_build_maf_index( input_maf_filename, maf_index_filename, species = [dbkey] )
if index is None:
print >>sys.stderr, "Your MAF file appears to be malformed."
sys.exit()
elif maf_source_type == "cached":
#access existing indexes
index = maf_utilities.maf_index_by_uid( input_maf_filename, mafIndexFile )
if index is None:
print >> sys.stderr, "The MAF source specified (%s) appears to be invalid." % ( input_maf_filename )
sys.exit()
else:
print >>sys.stdout, 'Invalid source type specified: %s' % maf_source_type
sys.exit()
out = open(output_filename, 'w')
num_region = None
species_summary = {}
total_length = 0
#loop through interval file
for num_region, region in enumerate( bx.intervals.io.NiceReaderWrapper( open( input_interval_filename, 'r' ), chrom_col = chr_col, start_col = start_col, end_col = end_col, fix_strand = True, return_header = False, return_comments = False ) ):
src = "%s.%s" % ( dbkey, region.chrom )
region_length = region.end - region.start
total_length += region_length
coverage = { dbkey: BitSet( region_length ) }
for block in index.get_as_iterator( src, region.start, region.end ):
for spec in maf_utilities.get_species_in_block( block ):
if spec not in coverage: coverage[spec] = BitSet( region_length )
for block in maf_utilities.iter_blocks_split_by_species( block ):
if maf_utilities.component_overlaps_region( block.get_component_by_src( src ), region ):
#need to chop and orient the block
block = maf_utilities.orient_block_by_region( maf_utilities.chop_block_by_region( block, src, region ), src, region, force_strand = '+' )
start_offset, alignment = maf_utilities.reduce_block_by_primary_genome( block, dbkey, region.chrom, region.start )
for i in range( len( alignment[dbkey] ) ):
for spec, text in alignment.items():
if text[i] != '-':
coverage[spec].set( start_offset + i )
if summary:
#record summary
for key in coverage.keys():
if key not in species_summary: species_summary[key] = 0
species_summary[key] = species_summary[key] + coverage[key].count_range()
else:
#print coverage for interval
coverage_sum = coverage[dbkey].count_range()
out.write( "%s\t%s\t%s\t%s\n" % ( "\t".join( region.fields ), dbkey, coverage_sum, region_length - coverage_sum ) )
keys = coverage.keys()
keys.remove( dbkey )
keys.sort()
for key in keys:
coverage_sum = coverage[key].count_range()
out.write( "%s\t%s\t%s\t%s\n" % ( "\t".join( region.fields ), key, coverage_sum, region_length - coverage_sum ) )
if summary:
out.write( "#species\tnucleotides\tcoverage\n" )
for spec in species_summary:
out.write( "%s\t%s\t%.4f\n" % ( spec, species_summary[spec], float( species_summary[spec] ) / total_length ) )
out.close()
if num_region is not None:
print "%i regions were processed with a total length of %i." % ( num_region + 1, total_length )
maf_utilities.remove_temp_index_file( index_filename )
if __name__ == "__main__": __main__()
|
volpino/Yeps-EURAC
|
tools/maf/maf_stats.py
|
Python
|
mit
| 4,889
|
[
"Galaxy"
] |
a92deb5f6f599237035645b598821594635ea44682d88d665ccc10eae352ad18
|
## This file is part of Invenio.
## Copyright (C) 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Invenio Authorlist Data Conversion Engine. """
import time
try:
import json
except ImportError:
import simplejson as json
from xml.dom import minidom
try:
from xml.etree import ElementTree as ET
except ImportError:
import elementtree.ElementTree as ET
from invenio.webuser import page_not_authorized
from invenio.access_control_engine import acc_authorize_action
import invenio.authorlist_config as cfg
from invenio.search_engine import perform_request_search, record_exists
from invenio.search_engine_utils import get_fieldvalues
from invenio.bibedit_utils import get_record
# from lxml import etree
from invenio.authorlist_dblayer import get_owner
from invenio.textutils import escape_latex
# default name that will be used, when affiliation name is missing
UNKNOWN_AFFILIATION = 'Unknown Affiliation'
# Namespaces used in the xml file
NAMESPACES = {'cal': 'http://www.slac.stanford.edu/spires/hepnames/authors_xml/',
'foaf': 'http://xmlns.com/foaf/0.1/',
}
def retrieve_data_from_record(recid):
"""
Extract data from a record id in order to import it to the Author list
interface
"""
if not record_exists(recid):
return
output = {}
DEFAULT_AFFILIATION_TYPE = cfg.OPTIONS.AUTHOR_AFFILIATION_TYPE[0]
DEFAULT_IDENTIFIER = cfg.OPTIONS.IDENTIFIERS_LIST[0]
IDENTIFIERS_MAPPING = cfg.OPTIONS.IDENTIFIERS_MAPPING
bibrecord = get_record(recid)
try:
paper_title = get_fieldvalues(recid, '245__a')[0]
except IndexError:
paper_title = ""
try:
collaboration_name = get_fieldvalues(recid, '710__g')
except IndexError:
collaboration_name = ""
try:
experiment_number = get_fieldvalues(recid, '693__e')
except IndexError:
experiment_number = ""
record_authors = bibrecord.get('100', [])
record_authors.extend(bibrecord.get('700', []))
author_list = []
unique_affiliations = []
for i, field_instance in enumerate(record_authors, 1):
family_name = ""
given_name = ""
name_on_paper = ""
status = ""
affiliations = []
identifiers = []
field = field_instance[0]
for subfield_code, subfield_value in field:
if subfield_code == "a":
try:
family_name = subfield_value.split(',')[0]
given_name = subfield_value.split(',')[1].lstrip()
except:
pass
name_on_paper = subfield_value
elif subfield_code == "u":
affiliations.append([subfield_value, DEFAULT_AFFILIATION_TYPE])
unique_affiliations.append(subfield_value)
elif subfield_code == "i":
# FIXME This will currently work only with INSPIRE IDs
id_prefix = subfield_value.split("-")[0]
if id_prefix in IDENTIFIERS_MAPPING:
identifiers.append([subfield_value, IDENTIFIERS_MAPPING[id_prefix]])
if not identifiers:
identifiers.append(['', DEFAULT_IDENTIFIER])
if not affiliations:
affiliations.append([UNKNOWN_AFFILIATION, DEFAULT_AFFILIATION_TYPE])
unique_affiliations.append(UNKNOWN_AFFILIATION)
author_list.append([
i, # Row number
'', # Place holder for the web interface
family_name,
given_name,
name_on_paper,
status,
affiliations,
identifiers
])
unique_affiliations = list(set(unique_affiliations))
output.update({'authors': author_list})
# Generate all the affiliation related information
affiliation_list = []
for i, affiliation in enumerate(unique_affiliations, 1):
institution = perform_request_search(c="Institutions", p='110__u:"' + affiliation + '"')
full_name = affiliation
if len(institution) == 1:
full_name_110_a = get_fieldvalues(institution[0], '110__a')
if full_name_110_a:
full_name = str(full_name_110_a[0])
full_name_110_b = get_fieldvalues(institution[0], '110__b')
if full_name_110_b:
full_name += ', ' + str(full_name_110_b[0])
affiliation = [i,
'',
affiliation,
'',
full_name,
'',
True,
'']
affiliation_list.append(affiliation)
output.update({'affiliations': affiliation_list})
output.update({'paper_title': paper_title,
'collaboration': collaboration_name,
'experiment_number': experiment_number,
'last_modified': int(time.time()),
'reference_ids': [],
'paper_id': '1'})
return output
def retrieve_data_from_xml(xml):
"""
Extract data from an XML file to import it to the Author list
interface
"""
def get_element_value_helper(element, tag):
"""
Helper that takes an element and returns text from the first node
of that element
"""
text = ''
elements_list = element.getElementsByTagName(tag)
if elements_list:
child = elements_list[0].firstChild
if child:
text = child.nodeValue
return text
output = {}
# Save the affiliatons variable, the default value for "Affiliation" column
# will be always first value from type_of_affiliation table
type_of_affiliation = cfg.OPTIONS.AUTHOR_AFFILIATION_TYPE
# Save the default identifier - first element from the list of identifiers
default_identifier = cfg.OPTIONS.IDENTIFIERS_LIST[0]
# Save identifiers mapping
identifiers_mapping = cfg.OPTIONS.IDENTIFIERS_MAPPING
parsed_xml = minidom.parseString(xml)
# Extract collaboration name and experiment number
collaboration_name = ''
experiment_number = ''
collaborations = parsed_xml.getElementsByTagName('cal:collaborations')
if len(collaborations) == 1:
collaboration_name = get_element_value_helper(collaborations[0], 'foaf:name')
experiment_number = get_element_value_helper(collaborations[0], 'cal:experimentNumber')
# Extract affiliations
affiliation_list = []
affiliation_id_name = {}
affiliations = parsed_xml.getElementsByTagName('foaf:Organization')
for i, affiliation in enumerate(affiliations):
affiliation_id = affiliation.getAttribute('id') or ''
affiliation_name = get_element_value_helper(affiliation, 'foaf:name')
affiliation_acronym = get_element_value_helper(affiliation, 'cal:orgName')
if not affiliation_acronym:
# No acronym ? Use the name instead
affiliation_acronym = affiliation_name
affiliation_address = get_element_value_helper(affiliation, 'cal:orgAddress')
if not affiliation_address:
affiliation_address = affiliation_name
affiliation_domain = get_element_value_helper(affiliation, 'cal:orgDomain')
# saving {id:name}, it will be needed for authors affiliations
if affiliation_id:
# According to
# http://stackoverflow.com/questions/8214932/how-to-check-if-a-value-exists-in-a-dictionary-python
# itervalues is faster than values() and viewvalues()
if affiliation_acronym in affiliation_id_name.itervalues():
# in case we have a duplicate of acronym, make it unique by
# appending the iteration number
affiliation_acronym += str(i+1)
affiliation_id_name[affiliation_id] = affiliation_acronym
affiliation_info = [long(i+1),
'',
affiliation_acronym,
'',
affiliation_address,
affiliation_domain,
True,
'']
affiliation_list.append(affiliation_info)
# Extract authors
author_list = []
authors = parsed_xml.getElementsByTagName('foaf:Person')
for i, author in enumerate(authors):
first_name = get_element_value_helper(author, 'foaf:givenName')
# In case there was no given name under previous field, we search for initials in cal:authorNamePaperGiven
if not first_name:
first_name = get_element_value_helper(author, 'cal:authorNamePaperGiven')
last_name = get_element_value_helper(author, 'foaf:familyName')
full_name = get_element_value_helper(author, 'cal:authorNamePaper')
status = get_element_value_helper(author, 'cal:authorStatus')
# Extract author affiliations
author_affiliations = []
if author.getElementsByTagName('cal:authorAffiliations'):
for afil in author.getElementsByTagName('cal:authorAffiliations')[0].getElementsByTagName('cal:authorAffiliation'):
a_id = afil.getAttribute('organizationid')
if afil.getAttribute('connection') in type_of_affiliation:
affiliation_type = afil.getAttribute('connection')
else:
affiliation_type = type_of_affiliation[0]
author_affiliations.append([affiliation_id_name.get(a_id, UNKNOWN_AFFILIATION), affiliation_type])
else:
author_affiliations = [UNKNOWN_AFFILIATION, type_of_affiliation[0]]
identifiers = []
if author.getElementsByTagName('cal:authorids'):
for author_id in author.getElementsByTagName('cal:authorids')[0].getElementsByTagName('cal:authorid'):
if author_id.getAttribute('source') in identifiers_mapping and author_id.firstChild:
identifiers.append([
author_id.firstChild.nodeValue,
identifiers_mapping[author_id.getAttribute('source')]])
if not identifiers:
identifiers.append(['', default_identifier])
author_info = [long(i+1),
'',
last_name,
first_name,
full_name,
status,
author_affiliations,
identifiers]
author_list.append(author_info)
output.update({'authors': author_list})
output.update({'affiliations': affiliation_list})
# Add generic information about the paper
output.update({'collaboration': collaboration_name,
'experiment_number': experiment_number,
'last_modified': int(time.time()),
'reference_ids': [],
'paper_id': '1',
'paper_title': ''})
return output
def user_authorization(req, ln):
""" Check user authorization to visit page """
auth_code, auth_message = acc_authorize_action(req, 'runauthorlist')
if auth_code != 0:
referer = '/authorlist/'
return page_not_authorized(req=req, referer=referer,
text=auth_message, navmenuid="authorlist")
else:
return None
def check_user_rights(user_id, paper_id):
"""Check if user can modify this paper"""
# if the paper_id is empty - user is trying to create new record
# we allow him, because everyone can do that
if not paper_id or (user_id == get_owner(paper_id)):
return True
return False
class Converter(object):
CONTENT_TYPE = 'text/plain'
FILE_NAME = 'converted.txt'
def __init__(self):
raise NotImplementedError
def dump(self, data):
raise NotImplementedError
def dumps(self, data):
raise NotImplementedError
class NA62Latex(Converter):
FILE_NAME = 'la.tex'
def __init__(self):
pass
def dump(self, data):
pass
def dumps(self, data):
pass
class ElsevierArticle(Converter):
CONTENT_TYPE = 'text/plain'
FILE_NAME = 'elsarticle.tex'
cal = '{http://www.slac.stanford.edu/spires/hepnames/authors_xml/}'
foaf = '{http://xmlns.com/foaf/0.1/}'
def __init__(self):
pass
def dictionary_to_list(self, node):
res = {}
res[node.tag] = []
self.xmltodict(node, res[node.tag])
reply = {}
reply[node.tag] = {'value': res[node.tag], 'attribs': node.attrib, 'tail': node.tail}
return reply
def xmltodict(self, node, res):
rep = {}
if len(node):
for n in list(node):
rep[node.tag] = []
value = self.xmltodict(n, rep[node.tag])
if len(n):
value = {'value': rep[node.tag], 'attributes': n.attrib, 'tail': n.tail}
res.append({n.tag: value})
else:
res.append(rep[node.tag][0])
else:
value = {}
value = {'value': node.text, 'attributes': node.attrib, 'tail': node.tail}
res.append({node.tag: value})
return
def get_organizations(self, organizations):
organization_dict = dict()
for orgs_element in organizations:
key = orgs_element.keys()[0]
if key == self.foaf + 'Organization':
for name_element in orgs_element[key]['value']:
value_key = name_element.keys()[0]
if value_key == self.cal + 'orgAddress':
if name_element[value_key]['value']:
organization_dict[orgs_element[key]['attributes']['id']] = name_element[value_key]['value'].encode('utf-8')
else:
organization_dict[orgs_element[key]['attributes']['id']] = ''
break
return organization_dict
def get_authors(self, authors):
author_list = []
for auth_element in authors:
key = auth_element.keys()[0]
if key == self.foaf + 'Person':
affiliation_list = []
given_name = ''
family_name = ''
for name_element in auth_element[key]['value']:
value_key = name_element.keys()[0]
if value_key == self.foaf + 'familyName' and name_element[value_key]['value']:
family_name = name_element[value_key]['value'].encode('utf-8')
elif value_key == self.foaf + 'givenName' and name_element[value_key]['value']:
given_name = name_element[value_key]['value'].encode('utf-8')
elif value_key == self.cal + 'authorAffiliations':
for aff_element in name_element[value_key]['value']:
aff_key = aff_element.keys()[0]
if aff_key == self.cal + 'authorAffiliation':
if aff_element[aff_key]['attributes']['connection'] == 'Affiliated with':
affiliation_list.append(aff_element[aff_key]['attributes']['organizationid'])
author_list.append([(given_name, family_name), tuple(affiliation_list)])
return author_list
def dump(self, data):
AuthorsXMLConverter = Converters.get('authorsxml')
AuthorsXML = dumps(data, AuthorsXMLConverter)
root = ET.fromstring(AuthorsXML)
tree = ET.ElementTree(root)
res = self.dictionary_to_list(tree.getroot())
collaboration_author_list_values = res['collaborationauthorlist']['value']
organization_dict = dict()
author_list = []
for element in collaboration_author_list_values:
key = element.keys()[0]
# if the value of the key is empty, start next loop cycle
if element[key]['value'] is None:
continue
if key == self.cal + 'organizations':
organization_dict = self.get_organizations(element[key]['value'])
elif key == self.cal + 'authors':
author_list = self.get_authors(element[key]['value'])
clusters = []
organization_codes = []
for element in author_list:
if len(element[1]) >= 1:
organization_code = element[1][0]
other_affiliations = element[1][1:]
author = [element[0]]
if other_affiliations:
author.extend(other_affiliations)
# if this organization already exists in the cluster
if organization_code in organization_codes:
for cluster in clusters:
if cluster[0] == organization_code:
cluster.append(author)
break
else:
organization_codes.append(organization_code)
clusters.append([organization_code, author])
myout = ""
myout += "\\documentclass[a4paper,12pt]{article}\r\n"
myout += "\\usepackage[utf8]{inputenc}\r\n"
myout += "\\begin{document}\r\n"
myout += "\\begin{center}\r\n"
myout += "{\\Large Collaboration}\\\\\r\n"
myout += "\\vspace{2mm}\r\n%\r\n"
primary_output_string = ""
secondary_affiliation_count = 1
secondary_affiliations = ""
secondary_affiliations_pos = {}
for data in clusters:
primary_output = []
organization_code = data[0]
for author in data[1:]:
name = " " + str(escape_latex(author[0][0])) + '~' + str(escape_latex(author[0][1]))
if len(author) > 1:
for sec_affiliation in author[1:]:
if sec_affiliation in organization_dict.keys():
if organization_dict[sec_affiliation] in secondary_affiliations_pos.keys():
name += "$\\,$\\footnotemark[" + str(secondary_affiliations_pos[organization_dict[sec_affiliation]]) + "]"
else:
name += "$\\,$\\footnotemark[" + str(secondary_affiliation_count) + "]"
secondary_affiliations += "%\r\n\\footnotetext[" + str(secondary_affiliation_count) + "]{" + str(escape_latex(organization_dict[sec_affiliation])) + "}\r\n"
secondary_affiliations_pos[organization_dict[sec_affiliation]] = secondary_affiliation_count
secondary_affiliation_count += 1
primary_output.append(name)
if organization_dict.get(data[0]):
organization = organization_dict.get(data[0])
else:
organization = UNKNOWN_AFFILIATION
primary_output_string += ',\r\n'.join(primary_output) + " \\\\\r\n{\\em \\small " + str(escape_latex(organization)) + "} \\\\[0.2cm]\r\n%\r\n"
myout += primary_output_string
myout += "\\end{center}\r\n"
myout += "\\setcounter{footnote}{0}\r\n"
myout += secondary_affiliations
myout += "\\end{document}\r\n"
return myout
def dumps(self, data):
return self.dump(data)
class APSpaper(Converter):
CONTENT_TYPE = 'text/plain'
FILE_NAME = 'APSpaper.tex'
def __init__(self):
pass
def dump(self, data):
AuthorsXMLConverter = Converters.get('authorsxml')
AuthorsXML = dumps(data, AuthorsXMLConverter)
organizations_list = []
authors_list = []
root = ET.fromstring(AuthorsXML)
# save affiliations
for organization in root.findall('{%s}organizations/{%s}Organization' % (NAMESPACES['cal'], NAMESPACES['foaf'])):
org_id = organization.attrib['id']
org_name = ''
if organization.find('{%s}name' % NAMESPACES['foaf']) is not None:
org_name = organization.find('{%s}name' % NAMESPACES['foaf']).text or ''
organizations_list.append([org_id, org_name.encode('utf-8')])
# save authors
for author in root.findall('{%s}authors/{%s}Person' % (NAMESPACES['cal'], NAMESPACES['foaf'])):
author_name = ''
author_affiliations = []
if author.find('{%s}authorNamePaper' % NAMESPACES['cal']) is not None:
author_name = author.find('{%s}authorNamePaper' % NAMESPACES['cal']).text or ''
for affil in author.findall('{%(cal)s}authorAffiliations/{%(cal)s}authorAffiliation' % {'cal': NAMESPACES['cal']}):
author_affiliations.append(affil.attrib['organizationid'])
authors_list.append([author_name.encode('utf-8'), author_affiliations])
myout = ''
for author in authors_list:
myout += '\\author{' + str(escape_latex(author[0])) + '$^{' + ','.join(author[1]) + '}$}\r\n'
for org in organizations_list:
myout += '\\affiliation{$^{' + str(org[0]) + '}$ ' + str(escape_latex(org[1])) + '}\r\n'
return myout
def dumps(self, data):
return self.dump(data)
class AuthorsXML(Converter):
CONTENT_TYPE = 'text/xml'
FILE_NAME = 'authors.xml'
def __init__(self):
pass
def create_affiliation(self, document, parsed, organization_ids):
affiliation = document.createElement('cal:authorAffiliation')
affiliation_acronym = parsed[cfg.JSON.AFFILIATION_ACRONYM]
affiliation_status = parsed[cfg.JSON.AFFILIATION_STATUS]
if affiliation_acronym not in organization_ids:
affiliation.setAttribute('organizationid',
'Error - there is no organization called ' +
affiliation_acronym)
else:
affiliation.setAttribute('organizationid',
organization_ids[affiliation_acronym])
affiliation.setAttribute('connection', affiliation_status)
return affiliation
def create_identifier(self, document, parsed):
identifier = document.createElement('cal:authorid')
identifier_number = parsed[cfg.JSON.IDENTIFIER_NUMBER]
identifier_name = parsed[cfg.JSON.IDENTIFIER_NAME]
identifier.setAttribute('source', identifier_name)
identifier_text = document.createTextNode(identifier_number)
identifier.appendChild(identifier_text)
return identifier
def create_authors(self, document, root, parsed, organization_ids):
parsed_authors = parsed[cfg.JSON.AUTHORS_KEY]
authors = document.createElement('cal:authors')
root.appendChild(authors)
for parsed_author in parsed_authors:
author = self.create_author(document, parsed_author, organization_ids)
authors.appendChild(author)
def create_author(self, document, parsed, organization_ids):
author = document.createElement('foaf:Person')
# paper name
paper_name = document.createElement('cal:authorNamePaper')
paper_name_info = parsed[cfg.JSON.PAPER_NAME]
paper_name_text = document.createTextNode(paper_name_info)
paper_name.appendChild(paper_name_text)
author.appendChild(paper_name)
# given name
given_name_info = parsed[cfg.JSON.GIVEN_NAME]
if (cfg.EMPTY.match(given_name_info) is None):
given_name = document.createElement('foaf:givenName')
given_name_text = document.createTextNode(given_name_info)
given_name.appendChild(given_name_text)
author.appendChild(given_name)
# family name
family_name_info = parsed[cfg.JSON.FAMILY_NAME]
if (cfg.EMPTY.match(family_name_info) is None):
family_name = document.createElement('foaf:familyName')
family_name_text = document.createTextNode(family_name_info)
family_name.appendChild(family_name_text)
author.appendChild(family_name)
# status
author_status_info = parsed[cfg.JSON.STATUS]
if (author_status_info):
author_status = document.createElement('cal:authorStatus')
author_status_text = document.createTextNode(author_status_info)
author_status.appendChild(author_status_text)
author.appendChild(author_status)
# collaboration
collaboration = document.createElement('cal:authorCollaboration')
collaboration.setAttribute('collaborationid', cfg.AuthorsXML.COLLABORATION_ID)
author.appendChild(collaboration)
# affiliations
affiliations = document.createElement('cal:authorAffiliations')
author.appendChild(affiliations)
for parsed_affiliation in parsed[cfg.JSON.AFFILIATIONS]:
affiliation = self.create_affiliation(document, parsed_affiliation, organization_ids)
affiliations.appendChild(affiliation)
# identifiers
identifiers = document.createElement('cal:authorids')
author.appendChild(identifiers)
for parsed_identifier in parsed[cfg.JSON.IDENTIFIERS]:
identifier = self.create_identifier(document, parsed_identifier)
identifiers.appendChild(identifier)
return author
def create_collaboration(self, document, root, parsed):
# collaborations
collaborations = document.createElement('cal:collaborations')
collaboration = document.createElement('cal:collaboration')
collaboration.setAttribute('id', cfg.AuthorsXML.COLLABORATION_ID)
collaborations.appendChild(collaboration)
# name
name = document.createElement('foaf:name')
name_info = parsed[cfg.JSON.COLLABORATION]
name_text = document.createTextNode(name_info)
name.appendChild(name_text)
collaboration.appendChild(name)
# experiment number
experiment_number_info = parsed[cfg.JSON.EXPERIMENT_NUMBER]
if (cfg.EMPTY.match(experiment_number_info) is None):
experiment_number = document.createElement('cal:experimentNumber')
experiment_number_text = document.createTextNode(experiment_number_info)
experiment_number.appendChild(experiment_number_text)
collaboration.appendChild(experiment_number)
root.appendChild(collaborations)
def create_document(self):
dom = minidom.getDOMImplementation()
document = dom.createDocument(None, 'collaborationauthorlist', None)
root = document.documentElement
root.setAttribute('xmlns:foaf', 'http://xmlns.com/foaf/0.1/')
root.setAttribute('xmlns:cal', 'http://www.slac.stanford.edu/spires/hepnames/authors_xml/')
return document, root
def create_header(self, document, root, parsed):
# creation date
creation_date = document.createElement('cal:creationDate')
creation_date_info = time.strftime(cfg.AuthorsXML.TIME_FORMAT)
creation_date_text = document.createTextNode(creation_date_info)
creation_date.appendChild(creation_date_text)
root.appendChild(creation_date)
# publication reference
for reference_info in parsed[cfg.JSON.REFERENCE_IDS]:
reference = document.createElement('cal:publicationReference')
reference_text = document.createTextNode(reference_info)
reference.appendChild(reference_text)
root.appendChild(reference)
def create_organizations(self, document, root, parsed, ids):
parsed_organizations = parsed[cfg.JSON.AFFILIATIONS_KEY]
# organizations container
organizations = document.createElement('cal:organizations')
root.appendChild(organizations)
# create individual organizations and append them
for parsed_organization in parsed_organizations:
organization = self.create_organization(document, parsed_organization, ids)
organizations.appendChild(organization)
def create_organization(self, document, parsed, ids):
acronym = parsed[cfg.JSON.ACRONYM]
organization = document.createElement('foaf:Organization')
organization.setAttribute('id', ids[acronym])
# create the domain node if field is set
domain_info = parsed[cfg.JSON.DOMAIN]
if (cfg.EMPTY.match(domain_info) is None):
domain = document.createElement('cal:orgDomain')
domain_text = document.createTextNode(domain_info)
domain.appendChild(domain_text)
organization.appendChild(domain)
# organization name, no presence check, already done on the client side
name = document.createElement('foaf:name')
name_info = parsed[cfg.JSON.NAME]
name_text = document.createTextNode(name_info)
name.appendChild(name_text)
organization.appendChild(name)
# organization acronym
org_acronym = document.createElement('cal:orgName')
org_acronym_text = document.createTextNode(acronym)
org_acronym.appendChild(org_acronym_text)
organization.appendChild(org_acronym)
# organization identifier
org_name_info = parsed[cfg.JSON.SPIRES_ID]
if (cfg.EMPTY.match(org_name_info) is None):
org_name = document.createElement('cal:orgName')
org_name.setAttribute('source', cfg.AuthorsXML.SPIRES)
org_name_text = document.createTextNode(org_name_info)
org_name.appendChild(org_name_text)
organization.appendChild(org_name)
else:
org_name_info = parsed[cfg.JSON.NAME]
org_address = document.createElement('cal:orgAddress')
org_address_text = document.createTextNode(org_name_info)
org_address.appendChild(org_address_text)
organization.appendChild(org_address)
# membership
org_status_info = parsed[cfg.JSON.MEMBER]
if (not org_status_info):
org_status_info = cfg.AuthorsXML.NONMEMBER
else:
org_status_info = cfg.AuthorsXML.MEMBER
org_status = document.createElement('cal:orgStatus')
org_status_text = document.createTextNode(org_status_info)
org_status.appendChild(org_status_text)
organization.appendChild(org_status)
# umbrella organization/group
group_info = parsed[cfg.JSON.UMBRELLA]
if (cfg.EMPTY.match(group_info) is None):
if group_info in ids.keys():
group = document.createElement('cal:group')
group.setAttribute('with', ids[group_info])
organization.appendChild(group)
return organization
def dump(self, data):
parsed = json.loads(data)
document, root = self.create_document()
affiliations = parsed[cfg.JSON.AFFILIATIONS_KEY]
organization_ids = self.generate_organization_ids(affiliations)
self.create_header(document, root, parsed)
self.create_collaboration(document, root, parsed)
self.create_organizations(document, root, parsed, organization_ids)
self.create_authors(document, root, parsed, organization_ids)
return document
def dumps(self, data):
# FIX for toprettyxml function from website:
# http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-and-silly-whitespace/
def fixed_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
minidom._write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
if len(self.childNodes) == 1 and self.childNodes[0].nodeType == minidom.Node.TEXT_NODE:
writer.write(">")
self.childNodes[0].writexml(writer, "", "", "")
writer.write("</%s>%s" % (self.tagName, newl))
return
writer.write(">%s" % (newl))
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % (newl))
# replace minidom's function with ours
minidom.Element.writexml = fixed_writexml
# End of FIX
return self.dump(data).toprettyxml(indent=' ', newl='\r\n', encoding='utf-8')
def generate_organization_ids(self, organizations):
ids = {}
# Map each organization acronym to an id of the kind 'o[index]'
for index, organization in enumerate(organizations):
acronym = organization[cfg.JSON.ACRONYM]
ids[acronym] = cfg.AuthorsXML.ORGANIZATION_ID + str(index)
return ids
class Converters:
__converters__ = {'authorsxml': AuthorsXML, 'elsevier': ElsevierArticle, 'aps': APSpaper}
@classmethod
def get(cls, format):
return cls.__converters__.get(format)
def dump(data, converter):
return converter().dump(data)
def dumps(data, converter):
return converter().dumps(data)
|
jmartinm/invenio
|
modules/webauthorlist/lib/authorlist_engine.py
|
Python
|
gpl-2.0
| 34,338
|
[
"VisIt"
] |
f7ddcaf85bdc50a3293cb8181168586f5514d622e7436eedc797b0fd1ac74c29
|
# -*- coding: utf-8 -*-
"""
:mod:`propagator` -- Tree level propagator.
====================================================
The tree level propagator is implemented here.
"""
import numpy as np
from dirac import Pp, Pm, gamma
# Fermion propagator helper functions
def hat(p):
return 2. * np.sin(0.5 * p);
def ring(p):
return np.sin(p);
def funsq(f, p):
assert isinstance(p, np.ndarray)
return np.sum(f(p)*f(p))
def A(p, m):
result = 1. + m
result += 0.5 * funsq(hat, p)
return result
def w(p, m):
arg = A(p, m)
arg += 1. / arg * (1. + funsq(ring, p))
return np.arccosh(0.5 * arg)
def Mp(p, m):
return A(p, m) - np.exp(w(p, m))
def Mm(p, m):
return A(p, m) - np.exp(-w(p, m))
def R(p, T, m):
arg = w(p, m) * T
return Mm(p, m) * np.exp(arg) - Mp(p, m) * np.exp(-arg)
def oneoN(p, T, m):
#print A(p,m), np.sinh(w(p,m)), R(p,T,m), w(p,m)
return 2. * A(p, m) * np.sinh(w(p, m)) * R(p, T, m)
def sqrtN(p, T, m):
#print np.sqrt(1. / oneoN(p, T, m))
return np.sqrt(1. / oneoN(p, T, m))
def f1(p, t, T, m):
arg = w(p, m) * t
if np.linalg.norm(p) < 1e-7 and m == 0:
return t
#print sqrtN(p, T, m) * (np.exp(arg) - np.exp(-arg)),p[0],t,T,m,"f1"
return sqrtN(p, T, m) * (np.exp(arg) - np.exp(-arg))
def f2(p, t, T, m):
arg = w(p, m) * t
if np.linalg.norm(p) < 1e-7 and m == 0:
return 1
#print sqrtN(p, T, m) * (Mm(p, m) * np.exp(arg) - Mp(p, m) * np.exp(-arg)), p[0],t,T,m,"f2"
return sqrtN(p, T, m) * (Mm(p, m) * np.exp(arg) - Mp(p, m) * np.exp(-arg))
def Gp(p, x, y, T, m):
if x > y:
return f1(p, (T - x), T, m) * f2(p, y, T, m)
else:
return f2(p, x, T, m) * f1(p, (T - y), T, m)
def Gm(p, x, y, T, m):
if x > y:
return f2(p, (T - x), T, m) * f1(p, y, T, m)
else:
return f1(p, x, T, m) * f2(p, (T - y), T, m)
def Hp(p, x, y, T, m):
if x >= y:
return f2(p, (T - x), T, m) * f2(p, y, T, m)
else:
return Mm(p, m) * Mp(p, m) * f1(p, x, T, m) * f1(p, (T - y), T, m)
def Hm(p, x, y, T, m):
if x > y:
return Mm(p, m) * Mp(p, m) * f1(p, (T - x), T, m) * f1(p, y, T, m);
else:
return f2(p, x, T, m) * f2(p, (T - y), T, m)
def Sf(p, t, u, T, m = 0.):
r"""Tree level fermion propagator in the Schrödinger functional
with an Abelian background field in the time-momentum
representation
.. math::
S_{\mathbf p}(t,u) = (D^{(0)} + m)^{-1}\,,
where :math:`D + m` is the Dirac operator.
:param t: Initial time.
:param u: Final time.
:param p: Spatial momentum. Phase angles must be included here.
:param T: Lattice extent.
:param m: Bare mass.
"""
result = Pp * Hp(p, t, u, T, m) + Pm * Hm(p, t, u, T, m) + 1J
tmp = (Pp * Gp(p, t, u, T, m) + Pm * Gm(p, t, u, T, m))
for k in range(1,4):
result -= 1J* gamma[k] * ring(p[k-1]) * tmp
return result;
|
dhesse/tree-level-improve
|
propagator.py
|
Python
|
mit
| 2,903
|
[
"DIRAC"
] |
ed75db4f5bbed7fb13d3aa36a6a4854a3ef0acefe9709cbc8f49f56ce7639d7c
|
import img_scale
import pyfits as pyf
import pylab as pyl
from mpl_toolkits.axes_grid1 import axes_grid
import cPickle as pickle
import os
from scipy.stats import scoreatpercentile
def mk_image(galaxy):
base = './../../images_v5/GS_2.5as_matched/gs_all_'
i_img = pyf.getdata(base+str(galaxy)+'_I.fits')
j_img = pyf.getdata(base+str(galaxy)+'_J.fits')
h_img = pyf.getdata(base+str(galaxy)+'_H.fits')
#include 90% of pixels
x = pyl.hstack(i_img)
i_lim = scoreatpercentile(x,99)
x = pyl.hstack(j_img)
j_lim = scoreatpercentile(x,99)
x = pyl.hstack(h_img)
h_lim = scoreatpercentile(x,99)
print galaxy, i_lim, j_lim, h_lim
img = pyl.zeros((h_img.shape[0], h_img.shape[1], 3), dtype=float)
img[:,:,0] = img_scale.asinh(h_img, scale_min=-0.1*h_lim, scale_max=h_lim,
non_linear=0.5)
img[:,:,1] = img_scale.asinh(j_img, scale_min=-0.1*j_lim, scale_max=j_lim,
non_linear=0.5)
img[:,:,2] = img_scale.asinh(i_img, scale_min=-0.1*i_lim, scale_max=i_lim,
non_linear=0.5)
return img
# Get the Galaxy info
galaxies = pickle.load(open('galaxies.pickle','rb'))
galaxies = filter(lambda galaxy: galaxy.ston_I > 30. and galaxy.sfrir != None,
galaxies)
galaxies = pyl.asarray(filter(lambda galaxy: galaxy.ICD_IH < 0.5, galaxies))
# Make the low mass grid first
y = [galaxy.sfrtotal/galaxy.sfr2800 for galaxy in galaxies]
x = [galaxy.ICD_IH *100 for galaxy in galaxies]
ll = 0
ul= 3
bins_y =pyl.linspace(ul, ll, 10)
bins_x = pyl.linspace(0, 50, 10)
grid = []
for i in range(bins_x.size-1):
xmin = bins_x[i]
xmax = bins_x[i+1]
for j in range(bins_y.size-1):
ymax = bins_y[j]
ymin = bins_y[j+1]
cond=[cond1 and cond2 and cond3 and cond4 for cond1, cond2, cond3,
cond4 in zip(x>=xmin, x<xmax, y>=ymin, y<ymax)]
grid.append(galaxies.compress(cond))
# Put the grid together
F = pyl.figure(1, figsize=(4, 6))
grid1 = axes_grid.ImageGrid(F, 111, nrows_ncols=(9,9), axes_pad=0.05,
add_all=True, share_all=True, aspect=True, direction='column')
from random import choice
base = './../../images_v5/GS_2.5as/gs_all_'
for i in range(len(grid)):
print len(grid[i])
if len(grid[i]) > 1:
galaxy = choice(grid[i])
ID = int(galaxy.ID)
while os.path.isfile(base+str(galaxy)+'_I.fits'):
print 'choose again', ID
galaxy = choice(grid[i])
elif len(grid[i]) == 1:
galaxy = grid[i][0]
else:
#grid1[i].axis('off')
grid1[i].spines['bottom'].set_color('0.8')
grid1[i].spines['top'].set_color('0.8')
grid1[i].spines['right'].set_color('0.8')
grid1[i].spines['left'].set_color('0.8')
grid1[i].set_axis_bgcolor('None')
#grid1[i].axis('off')
if len(grid[i]) != 0:
ID = int(galaxy.ID)
img = mk_image(ID)
grid1[i].imshow(img, origin='lower')
grid1[i].text(0.5, 0.5, str(ID), color='white' )
grid1[i].set_xticks([])
grid1[i].set_yticks([])
else:
pass
# Label everything
#grid1[4].set_xlabel('8.75', fontsize=16)
#grid1[9].set_xlabel('9.25', fontsize=16)
#grid1[14].set_xlabel('9.75', fontsize=16)
#grid1[19].set_xlabel('10.25\nLog Mass $(M_\odot)$', fontsize=16)
#grid1[24].set_xlabel('10.75', fontsize=16)
#grid1[29].set_xlabel('11.25', fontsize=16)
#grid1[34].set_xlabel('11.75', fontsize=16)
grid1[0].set_ylabel('Log ssfr = -6', fontsize=16)
#grid1[1].set_ylabel('35%', fontsize=16)
#grid1[2].set_ylabel(r'$\xi[i_{775}, H_{160}]$ (%)'+'\n25%', fontsize=16,
# multialignment='center')
#grid1[3].set_ylabel('15%', fontsize=16)
#grid1[4].set_ylabel('5%', fontsize=16)
grid1[8].set_ylabel('Log ssfr = -10', fontsize=16)
grid1[8].set_xlabel('0% ICD', fontsize=16)
grid1[80].set_xlabel('50% ICD', fontsize=16)
pyl.show()
|
boada/ICD
|
sandbox/legacy_plot_code/plot_icd_sfr_montage.py
|
Python
|
mit
| 3,857
|
[
"Galaxy"
] |
d0712edf30a198c9a50d9ebf675085ee2cd4a10316b79695815bc0b5ecd8251f
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from .dataObject import ObjectProperty, Alias
from .connection import Connection
from .neuron import Neuron
from .biology import BiologyType
from .worm_common import WORM_RDF_TYPE
class Network(BiologyType):
""" A network of neurons """
class_context = BiologyType.class_context
synapse = ObjectProperty(value_type=Connection, multiple=True)
''' Returns a set of all synapses in the network '''
neuron = ObjectProperty(value_type=Neuron, multiple=True)
''' Returns a set of all Neuron objects in the network '''
worm = ObjectProperty(value_rdf_type=WORM_RDF_TYPE)
synapses = Alias(synapse)
neurons = Alias(neuron)
def __init__(self, worm=None, **kwargs):
super(Network, self).__init__(**kwargs)
if worm is not None:
self.worm(worm)
def neuron_names(self):
"""
Gets the complete set of neurons' names in this network.
Example::
# Grabs the representation of the neuronal network
>>> net = Worm().get_neuron_network()
#NOTE: This is a VERY slow operation right now
>>> len(set(net.neuron_names()))
302
>>> set(net.neuron_names())
set(['VB4', 'PDEL', 'HSNL', 'SIBDR', ... 'RIAL', 'MCR', 'LUAL'])
"""
return set(x.name() for x in self.neuron())
def aneuron(self, name):
"""
Get a neuron by name.
Example::
# Grabs the representation of the neuronal network
>>> net = Worm().get_neuron_network()
# Grab a specific neuron
>>> aval = net.aneuron('AVAL')
>>> aval.type()
set([u'interneuron'])
:param name: Name of a c. elegans neuron
:returns: Neuron corresponding to the name given
:rtype: PyOpenWorm.neuron.Neuron
"""
return Neuron.contextualize(self.context)(name=name, conf=self.conf)
def sensory(self):
"""
Get all sensory neurons
:returns: A iterable of all sensory neurons
:rtype: iter(Neuron)
"""
n = Neuron.contextualize(self.context)()
n.type('sensory')
self.neuron.set(n)
res = list(n.load())
self.neuron.unset(n)
return res
def interneurons(self):
"""
Get all interneurons
:returns: A iterable of all interneurons
:rtype: iter(Neuron)
"""
n = Neuron.contextualize(self.context)()
n.type('interneuron')
self.neuron.set(n)
res = list(n.load())
self.neuron.unset(n)
return res
def motor(self):
"""
Get all motor
:returns: A iterable of all motor neurons
:rtype: iter(Neuron)
"""
n = Neuron.contextualize(self.context)()
n.type('motor')
self.neuron.set(n)
res = list(n.load())
self.neuron.unset(n)
return res
def identifier_augment(self):
return self.make_identifier(self.worm.defined_values[0].identifier.n3())
def defined_augment(self):
return self.worm.has_defined_value()
__yarom_mapped_classes__ = (Network,)
|
gsarma/PyOpenWorm
|
PyOpenWorm/network.py
|
Python
|
mit
| 3,251
|
[
"NEURON"
] |
15799bdf9eaa2f39c3d03ffd2ca7ebb89a528bf46530c08b4a44447aaacdb969
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import print_function
import re
import argparse
import os.path
import io
parser = argparse.ArgumentParser()
parser.add_argument('version', help='file version')
parser.add_argument('outfile', help='outfile with extension .c/.h')
parser.add_argument('inputs', nargs='*', action='store', help='input filenames')
args = parser.parse_args()
outname = args.outfile.split("/")[-1]
is_c = False
if outname[-2:] == ".c":
is_c = True
pos = outname.find(".")
if pos > 0:
outname = outname[:pos]
include_re = re.compile("^#include (\".*\").*$")
guard_re = re.compile("^#(?:(?:ifndef|define) [A-Z_]+_H_|endif /\* [A-Z_]+_H_ \*/|endif // [A-Z_]+_H_)")
print ("Starting amalgamating file "+ args.outfile)
file = io.open(args.outfile, 'w')
file.write(u"""/* THIS IS A SINGLE-FILE DISTRIBUTION CONCATENATED FROM THE OPEN62541 SOURCES
* visit http://open62541.org/ for information about this software
* Git-Revision: %s
*/
/*
* Copyright (C) 2014-2016 the contributors as stated in the AUTHORS file
*
* This file is part of open62541. open62541 is free software: you can
* redistribute it and/or modify it under the terms of the Mozilla Public
* License v2.0 as stated in the LICENSE file provided with open62541.
*
* open62541 is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE.
*/\n\n""" % args.version)
if is_c:
file.write(u'''#ifndef UA_DYNAMIC_LINKING_EXPORT
# define UA_DYNAMIC_LINKING_EXPORT
#endif
#include "%s.h"
''' % outname)
else:
file.write(u'''#ifndef %s
#define %s
#ifdef __cplusplus
extern "C" {
#endif\n''' % (outname.upper() + u"_H_", outname.upper() + u"_H_") )
for fname in args.inputs:
with io.open(fname, encoding="utf8") as infile:
file.write(u"\n/*********************************** amalgamated original file \"" + fname + u"\" ***********************************/\n\n")
print ("Integrating file '" + fname + "'...", end=""),
for line in infile:
inc_res = include_re.match(line)
guard_res = guard_re.match(line)
if not inc_res and not guard_res:
file.write(line)
# Ensure file is written to disk.
file.flush()
os.fsync(file.fileno())
print ("done."),
if not is_c:
file.write(u'''
#ifdef __cplusplus
} // extern "C"
#endif
#endif /* %s */\n''' % (outname.upper() + u"_H_"))
# Ensure file is written to disk.
# See https://stackoverflow.com/questions/13761961/large-file-not-flushed-to-disk-immediately-after-calling-close
file.flush()
os.fsync(file.fileno())
file.close()
print ("The size of "+args.outfile+" is "+ str(os.path.getsize(args.outfile))+" Bytes.")
|
AGIsmail/open62541
|
tools/amalgamate.py
|
Python
|
mpl-2.0
| 2,960
|
[
"VisIt"
] |
6f20be4dfa22a2eb596d3a804ccfecabd7e6c10fa73bbf73ca476e19e349575b
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Provides classes for generating high-symmetry k-paths using different conventions.
"""
import abc
import itertools
import operator
from math import ceil, cos, e, pi, sin, tan
from warnings import warn
import networkx as nx
import numpy as np
import spglib
from monty.dev import requires
from scipy.linalg import sqrtm
from pymatgen.core.lattice import Lattice
from pymatgen.core.operations import MagSymmOp, SymmOp
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
try:
from seekpath import get_path # type: ignore
except ImportError:
get_path = None
__author__ = "Geoffroy Hautier, Katherine Latimer, Jason Munro"
__copyright__ = "Copyright 2020, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Jason Munro"
__email__ = "jmunro@lbl.gov"
__status__ = "Development"
__date__ = "March 2020"
class KPathBase(metaclass=abc.ABCMeta):
"""
This is the base class for classes used to generate high-symmetry
paths in reciprocal space (k-paths) for band structure calculations.
"""
@abc.abstractmethod
def __init__(self, structure, symprec=0.01, angle_tolerance=5, atol=1e-5, *args, **kwargs):
"""
Args:
structure (Structure): Structure object
symprec (float): Tolerance for symmetry finding
angle_tolerance (float): Angle tolerance for symmetry finding.
atol (float): Absolute tolerance used to compare structures
and determine symmetric equivalence of points and lines
in the BZ.
"""
self._structure = structure
self._latt = self._structure.lattice
self._rec_lattice = self._structure.lattice.reciprocal_lattice
self._kpath = None
self._symprec = symprec
self._atol = atol
self._angle_tolerance = angle_tolerance
@property
def structure(self):
"""
Returns:
The input structure
"""
return self._structure
@property
def lattice(self):
"""
Returns:
The real space lattice
"""
return self._latt
@property
def rec_lattice(self):
"""
Returns:
The reciprocal space lattice
"""
return self._rec_lattice
@property
def kpath(self):
"""
Returns:
The symmetry line path in reciprocal space
"""
return self._kpath
def get_kpoints(self, line_density=20, coords_are_cartesian=True):
"""
Returns:
the kpoints along the paths in cartesian coordinates
together with the labels for symmetry points -Wei.
"""
list_k_points = []
sym_point_labels = []
for b in self.kpath["path"]:
for i in range(1, len(b)):
start = np.array(self.kpath["kpoints"][b[i - 1]])
end = np.array(self.kpath["kpoints"][b[i]])
distance = np.linalg.norm(
self._rec_lattice.get_cartesian_coords(start) - self._rec_lattice.get_cartesian_coords(end)
)
nb = int(ceil(distance * line_density))
if nb == 0:
continue
sym_point_labels.extend([b[i - 1]] + [""] * (nb - 1) + [b[i]])
list_k_points.extend(
[
self._rec_lattice.get_cartesian_coords(start)
+ float(i)
/ float(nb)
* (self._rec_lattice.get_cartesian_coords(end) - self._rec_lattice.get_cartesian_coords(start))
for i in range(0, nb + 1)
]
)
if coords_are_cartesian:
return list_k_points, sym_point_labels
frac_k_points = [self._rec_lattice.get_fractional_coords(k) for k in list_k_points]
return frac_k_points, sym_point_labels
class KPathSetyawanCurtarolo(KPathBase):
"""
This class looks for path along high symmetry lines in
the Brillouin Zone.
It is based on Setyawan, W., & Curtarolo, S. (2010).
High-throughput electronic band structure calculations:
Challenges and tools. Computational Materials Science,
49(2), 299-312. doi:10.1016/j.commatsci.2010.05.010
It should be used with primitive structures that
comply with the definition from the paper.
The symmetry is determined by spglib through the
SpacegroupAnalyzer class. The analyzer can be used to
produce the correct primitive structure (method
get_primitive_standard_structure(international_monoclinic=False)).
A warning will signal possible compatibility problems
with the given structure. KPoints from get_kpoints() method
are returned in the reciprocal cell basis defined in the paper.
"""
def __init__(self, structure, symprec=0.01, angle_tolerance=5, atol=1e-5):
"""
Args:
structure (Structure): Structure object
symprec (float): Tolerance for symmetry finding
angle_tolerance (float): Angle tolerance for symmetry finding.
atol (float): Absolute tolerance used to compare the input
structure with the one expected as primitive standard.
A warning will be issued if the lattices don't match.
"""
if "magmom" in structure.site_properties.keys():
warn(
"'magmom' entry found in site properties but will be ignored \
for the Setyawan and Curtarolo convention."
)
super().__init__(structure, symprec=symprec, angle_tolerance=angle_tolerance, atol=atol)
self._sym = SpacegroupAnalyzer(structure, symprec=symprec, angle_tolerance=angle_tolerance)
self._prim = self._sym.get_primitive_standard_structure(international_monoclinic=False)
self._conv = self._sym.get_conventional_standard_structure(international_monoclinic=False)
self._rec_lattice = self._prim.lattice.reciprocal_lattice
# Note: this warning will be issued for space groups 38-41, since the primitive cell must be
# reformatted to match Setyawan/Curtarolo convention in order to work with the current k-path
# generation scheme.
if not np.allclose(self._structure.lattice.matrix, self._prim.lattice.matrix, atol=atol):
warn(
"The input structure does not match the expected standard primitive! "
"The path can be incorrect. Use at your own risk."
)
lattice_type = self._sym.get_lattice_type()
spg_symbol = self._sym.get_space_group_symbol()
if lattice_type == "cubic":
if "P" in spg_symbol:
self._kpath = self.cubic()
elif "F" in spg_symbol:
self._kpath = self.fcc()
elif "I" in spg_symbol:
self._kpath = self.bcc()
else:
warn(f"Unexpected value for spg_symbol: {spg_symbol}")
elif lattice_type == "tetragonal":
if "P" in spg_symbol:
self._kpath = self.tet()
elif "I" in spg_symbol:
a = self._conv.lattice.abc[0]
c = self._conv.lattice.abc[2]
if c < a:
self._kpath = self.bctet1(c, a)
else:
self._kpath = self.bctet2(c, a)
else:
warn(f"Unexpected value for spg_symbol: {spg_symbol}")
elif lattice_type == "orthorhombic":
a = self._conv.lattice.abc[0]
b = self._conv.lattice.abc[1]
c = self._conv.lattice.abc[2]
if "P" in spg_symbol:
self._kpath = self.orc()
elif "F" in spg_symbol:
if 1 / a**2 > 1 / b**2 + 1 / c**2:
self._kpath = self.orcf1(a, b, c)
elif 1 / a**2 < 1 / b**2 + 1 / c**2:
self._kpath = self.orcf2(a, b, c)
else:
self._kpath = self.orcf3(a, b, c)
elif "I" in spg_symbol:
self._kpath = self.orci(a, b, c)
elif "C" in spg_symbol or "A" in spg_symbol:
self._kpath = self.orcc(a, b, c)
else:
warn(f"Unexpected value for spg_symbol: {spg_symbol}")
elif lattice_type == "hexagonal":
self._kpath = self.hex()
elif lattice_type == "rhombohedral":
alpha = self._prim.lattice.parameters[3]
if alpha < 90:
self._kpath = self.rhl1(alpha * pi / 180)
else:
self._kpath = self.rhl2(alpha * pi / 180)
elif lattice_type == "monoclinic":
a, b, c = self._conv.lattice.abc
alpha = self._conv.lattice.parameters[3]
# beta = self._conv.lattice.parameters[4]
if "P" in spg_symbol:
self._kpath = self.mcl(b, c, alpha * pi / 180)
elif "C" in spg_symbol:
kgamma = self._rec_lattice.parameters[5]
if kgamma > 90:
self._kpath = self.mclc1(a, b, c, alpha * pi / 180)
if kgamma == 90:
self._kpath = self.mclc2(a, b, c, alpha * pi / 180)
if kgamma < 90:
if b * cos(alpha * pi / 180) / c + b**2 * sin(alpha * pi / 180) ** 2 / a**2 < 1:
self._kpath = self.mclc3(a, b, c, alpha * pi / 180)
if b * cos(alpha * pi / 180) / c + b**2 * sin(alpha * pi / 180) ** 2 / a**2 == 1:
self._kpath = self.mclc4(a, b, c, alpha * pi / 180)
if b * cos(alpha * pi / 180) / c + b**2 * sin(alpha * pi / 180) ** 2 / a**2 > 1:
self._kpath = self.mclc5(a, b, c, alpha * pi / 180)
else:
warn(f"Unexpected value for spg_symbol: {spg_symbol}")
elif lattice_type == "triclinic":
kalpha = self._rec_lattice.parameters[3]
kbeta = self._rec_lattice.parameters[4]
kgamma = self._rec_lattice.parameters[5]
if kalpha > 90 and kbeta > 90 and kgamma > 90:
self._kpath = self.tria()
if kalpha < 90 and kbeta < 90 and kgamma < 90:
self._kpath = self.trib()
if kalpha > 90 and kbeta > 90 and kgamma == 90:
self._kpath = self.tria()
if kalpha < 90 and kbeta < 90 and kgamma == 90:
self._kpath = self.trib()
else:
warn(f"Unknown lattice type {lattice_type}")
@property
def conventional(self):
"""
Returns:
The conventional cell structure
"""
return self._conv
@property
def prim(self):
"""
Returns:
The primitive cell structure
"""
return self._prim
@property
def prim_rec(self):
"""
Returns:
The primitive reciprocal cell structure
"""
return self._rec_lattice
def cubic(self):
"""
CUB Path
"""
self.name = "CUB"
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"X": np.array([0.0, 0.5, 0.0]),
"R": np.array([0.5, 0.5, 0.5]),
"M": np.array([0.5, 0.5, 0.0]),
}
path = [["\\Gamma", "X", "M", "\\Gamma", "R", "X"], ["M", "R"]]
return {"kpoints": kpoints, "path": path}
def fcc(self):
"""
FCC Path
"""
self.name = "FCC"
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"K": np.array([3.0 / 8.0, 3.0 / 8.0, 3.0 / 4.0]),
"L": np.array([0.5, 0.5, 0.5]),
"U": np.array([5.0 / 8.0, 1.0 / 4.0, 5.0 / 8.0]),
"W": np.array([0.5, 1.0 / 4.0, 3.0 / 4.0]),
"X": np.array([0.5, 0.0, 0.5]),
}
path = [
["\\Gamma", "X", "W", "K", "\\Gamma", "L", "U", "W", "L", "K"],
["U", "X"],
]
return {"kpoints": kpoints, "path": path}
def bcc(self):
"""
BCC Path
"""
self.name = "BCC"
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"H": np.array([0.5, -0.5, 0.5]),
"P": np.array([0.25, 0.25, 0.25]),
"N": np.array([0.0, 0.0, 0.5]),
}
path = [["\\Gamma", "H", "N", "\\Gamma", "P", "H"], ["P", "N"]]
return {"kpoints": kpoints, "path": path}
def tet(self):
"""
TET Path
"""
self.name = "TET"
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"A": np.array([0.5, 0.5, 0.5]),
"M": np.array([0.5, 0.5, 0.0]),
"R": np.array([0.0, 0.5, 0.5]),
"X": np.array([0.0, 0.5, 0.0]),
"Z": np.array([0.0, 0.0, 0.5]),
}
path = [
["\\Gamma", "X", "M", "\\Gamma", "Z", "R", "A", "Z"],
["X", "R"],
["M", "A"],
]
return {"kpoints": kpoints, "path": path}
def bctet1(self, c, a):
"""
BCT1 Path
"""
self.name = "BCT1"
eta = (1 + c**2 / a**2) / 4.0
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"M": np.array([-0.5, 0.5, 0.5]),
"N": np.array([0.0, 0.5, 0.0]),
"P": np.array([0.25, 0.25, 0.25]),
"X": np.array([0.0, 0.0, 0.5]),
"Z": np.array([eta, eta, -eta]),
"Z_1": np.array([-eta, 1 - eta, eta]),
}
path = [["\\Gamma", "X", "M", "\\Gamma", "Z", "P", "N", "Z_1", "M"], ["X", "P"]]
return {"kpoints": kpoints, "path": path}
def bctet2(self, c, a):
"""
BCT2 Path
"""
self.name = "BCT2"
eta = (1 + a**2 / c**2) / 4.0
zeta = a**2 / (2 * c**2)
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"N": np.array([0.0, 0.5, 0.0]),
"P": np.array([0.25, 0.25, 0.25]),
"\\Sigma": np.array([-eta, eta, eta]),
"\\Sigma_1": np.array([eta, 1 - eta, -eta]),
"X": np.array([0.0, 0.0, 0.5]),
"Y": np.array([-zeta, zeta, 0.5]),
"Y_1": np.array([0.5, 0.5, -zeta]),
"Z": np.array([0.5, 0.5, -0.5]),
}
path = [
[
"\\Gamma",
"X",
"Y",
"\\Sigma",
"\\Gamma",
"Z",
"\\Sigma_1",
"N",
"P",
"Y_1",
"Z",
],
["X", "P"],
]
return {"kpoints": kpoints, "path": path}
def orc(self):
"""
ORC Path
"""
self.name = "ORC"
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"R": np.array([0.5, 0.5, 0.5]),
"S": np.array([0.5, 0.5, 0.0]),
"T": np.array([0.0, 0.5, 0.5]),
"U": np.array([0.5, 0.0, 0.5]),
"X": np.array([0.5, 0.0, 0.0]),
"Y": np.array([0.0, 0.5, 0.0]),
"Z": np.array([0.0, 0.0, 0.5]),
}
path = [
["\\Gamma", "X", "S", "Y", "\\Gamma", "Z", "U", "R", "T", "Z"],
["Y", "T"],
["U", "X"],
["S", "R"],
]
return {"kpoints": kpoints, "path": path}
def orcf1(self, a, b, c):
"""
ORFC1 Path
"""
self.name = "ORCF1"
zeta = (1 + a**2 / b**2 - a**2 / c**2) / 4
eta = (1 + a**2 / b**2 + a**2 / c**2) / 4
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"A": np.array([0.5, 0.5 + zeta, zeta]),
"A_1": np.array([0.5, 0.5 - zeta, 1 - zeta]),
"L": np.array([0.5, 0.5, 0.5]),
"T": np.array([1, 0.5, 0.5]),
"X": np.array([0.0, eta, eta]),
"X_1": np.array([1, 1 - eta, 1 - eta]),
"Y": np.array([0.5, 0.0, 0.5]),
"Z": np.array([0.5, 0.5, 0.0]),
}
path = [
["\\Gamma", "Y", "T", "Z", "\\Gamma", "X", "A_1", "Y"],
["T", "X_1"],
["X", "A", "Z"],
["L", "\\Gamma"],
]
return {"kpoints": kpoints, "path": path}
def orcf2(self, a, b, c):
"""
ORFC2 Path
"""
self.name = "ORCF2"
phi = (1 + c**2 / b**2 - c**2 / a**2) / 4
eta = (1 + a**2 / b**2 - a**2 / c**2) / 4
delta = (1 + b**2 / a**2 - b**2 / c**2) / 4
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"C": np.array([0.5, 0.5 - eta, 1 - eta]),
"C_1": np.array([0.5, 0.5 + eta, eta]),
"D": np.array([0.5 - delta, 0.5, 1 - delta]),
"D_1": np.array([0.5 + delta, 0.5, delta]),
"L": np.array([0.5, 0.5, 0.5]),
"H": np.array([1 - phi, 0.5 - phi, 0.5]),
"H_1": np.array([phi, 0.5 + phi, 0.5]),
"X": np.array([0.0, 0.5, 0.5]),
"Y": np.array([0.5, 0.0, 0.5]),
"Z": np.array([0.5, 0.5, 0.0]),
}
path = [
["\\Gamma", "Y", "C", "D", "X", "\\Gamma", "Z", "D_1", "H", "C"],
["C_1", "Z"],
["X", "H_1"],
["H", "Y"],
["L", "\\Gamma"],
]
return {"kpoints": kpoints, "path": path}
def orcf3(self, a, b, c):
"""
ORFC3 Path
"""
self.name = "ORCF3"
zeta = (1 + a**2 / b**2 - a**2 / c**2) / 4
eta = (1 + a**2 / b**2 + a**2 / c**2) / 4
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"A": np.array([0.5, 0.5 + zeta, zeta]),
"A_1": np.array([0.5, 0.5 - zeta, 1 - zeta]),
"L": np.array([0.5, 0.5, 0.5]),
"T": np.array([1, 0.5, 0.5]),
"X": np.array([0.0, eta, eta]),
"X_1": np.array([1, 1 - eta, 1 - eta]),
"Y": np.array([0.5, 0.0, 0.5]),
"Z": np.array([0.5, 0.5, 0.0]),
}
path = [
["\\Gamma", "Y", "T", "Z", "\\Gamma", "X", "A_1", "Y"],
["X", "A", "Z"],
["L", "\\Gamma"],
]
return {"kpoints": kpoints, "path": path}
def orci(self, a, b, c):
"""
ORCI Path
"""
self.name = "ORCI"
zeta = (1 + a**2 / c**2) / 4
eta = (1 + b**2 / c**2) / 4
delta = (b**2 - a**2) / (4 * c**2)
mu = (a**2 + b**2) / (4 * c**2)
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"L": np.array([-mu, mu, 0.5 - delta]),
"L_1": np.array([mu, -mu, 0.5 + delta]),
"L_2": np.array([0.5 - delta, 0.5 + delta, -mu]),
"R": np.array([0.0, 0.5, 0.0]),
"S": np.array([0.5, 0.0, 0.0]),
"T": np.array([0.0, 0.0, 0.5]),
"W": np.array([0.25, 0.25, 0.25]),
"X": np.array([-zeta, zeta, zeta]),
"X_1": np.array([zeta, 1 - zeta, -zeta]),
"Y": np.array([eta, -eta, eta]),
"Y_1": np.array([1 - eta, eta, -eta]),
"Z": np.array([0.5, 0.5, -0.5]),
}
path = [
["\\Gamma", "X", "L", "T", "W", "R", "X_1", "Z", "\\Gamma", "Y", "S", "W"],
["L_1", "Y"],
["Y_1", "Z"],
]
return {"kpoints": kpoints, "path": path}
def orcc(self, a, b, c):
"""
ORCC Path
"""
self.name = "ORCC"
zeta = (1 + a**2 / b**2) / 4
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"A": np.array([zeta, zeta, 0.5]),
"A_1": np.array([-zeta, 1 - zeta, 0.5]),
"R": np.array([0.0, 0.5, 0.5]),
"S": np.array([0.0, 0.5, 0.0]),
"T": np.array([-0.5, 0.5, 0.5]),
"X": np.array([zeta, zeta, 0.0]),
"X_1": np.array([-zeta, 1 - zeta, 0.0]),
"Y": np.array([-0.5, 0.5, 0]),
"Z": np.array([0.0, 0.0, 0.5]),
}
path = [
[
"\\Gamma",
"X",
"S",
"R",
"A",
"Z",
"\\Gamma",
"Y",
"X_1",
"A_1",
"T",
"Y",
],
["Z", "T"],
]
return {"kpoints": kpoints, "path": path}
def hex(self):
"""
HEX Path
"""
self.name = "HEX"
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"A": np.array([0.0, 0.0, 0.5]),
"H": np.array([1.0 / 3.0, 1.0 / 3.0, 0.5]),
"K": np.array([1.0 / 3.0, 1.0 / 3.0, 0.0]),
"L": np.array([0.5, 0.0, 0.5]),
"M": np.array([0.5, 0.0, 0.0]),
}
path = [
["\\Gamma", "M", "K", "\\Gamma", "A", "L", "H", "A"],
["L", "M"],
["K", "H"],
]
return {"kpoints": kpoints, "path": path}
def rhl1(self, alpha):
"""
RHL1 Path
"""
self.name = "RHL1"
eta = (1 + 4 * cos(alpha)) / (2 + 4 * cos(alpha))
nu = 3.0 / 4.0 - eta / 2.0
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"B": np.array([eta, 0.5, 1.0 - eta]),
"B_1": np.array([1.0 / 2.0, 1.0 - eta, eta - 1.0]),
"F": np.array([0.5, 0.5, 0.0]),
"L": np.array([0.5, 0.0, 0.0]),
"L_1": np.array([0.0, 0.0, -0.5]),
"P": np.array([eta, nu, nu]),
"P_1": np.array([1.0 - nu, 1.0 - nu, 1.0 - eta]),
"P_2": np.array([nu, nu, eta - 1.0]),
"Q": np.array([1.0 - nu, nu, 0.0]),
"X": np.array([nu, 0.0, -nu]),
"Z": np.array([0.5, 0.5, 0.5]),
}
path = [
["\\Gamma", "L", "B_1"],
["B", "Z", "\\Gamma", "X"],
["Q", "F", "P_1", "Z"],
["L", "P"],
]
return {"kpoints": kpoints, "path": path}
def rhl2(self, alpha):
"""
RHL2 Path
"""
self.name = "RHL2"
eta = 1 / (2 * tan(alpha / 2.0) ** 2)
nu = 3.0 / 4.0 - eta / 2.0
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"F": np.array([0.5, -0.5, 0.0]),
"L": np.array([0.5, 0.0, 0.0]),
"P": np.array([1 - nu, -nu, 1 - nu]),
"P_1": np.array([nu, nu - 1.0, nu - 1.0]),
"Q": np.array([eta, eta, eta]),
"Q_1": np.array([1.0 - eta, -eta, -eta]),
"Z": np.array([0.5, -0.5, 0.5]),
}
path = [["\\Gamma", "P", "Z", "Q", "\\Gamma", "F", "P_1", "Q_1", "L", "Z"]]
return {"kpoints": kpoints, "path": path}
def mcl(self, b, c, beta):
"""
MCL Path
"""
self.name = "MCL"
eta = (1 - b * cos(beta) / c) / (2 * sin(beta) ** 2)
nu = 0.5 - eta * c * cos(beta) / b
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"A": np.array([0.5, 0.5, 0.0]),
"C": np.array([0.0, 0.5, 0.5]),
"D": np.array([0.5, 0.0, 0.5]),
"D_1": np.array([0.5, 0.5, -0.5]),
"E": np.array([0.5, 0.5, 0.5]),
"H": np.array([0.0, eta, 1.0 - nu]),
"H_1": np.array([0.0, 1.0 - eta, nu]),
"H_2": np.array([0.0, eta, -nu]),
"M": np.array([0.5, eta, 1.0 - nu]),
"M_1": np.array([0.5, 1 - eta, nu]),
"M_2": np.array([0.5, 1 - eta, nu]),
"X": np.array([0.0, 0.5, 0.0]),
"Y": np.array([0.0, 0.0, 0.5]),
"Y_1": np.array([0.0, 0.0, -0.5]),
"Z": np.array([0.5, 0.0, 0.0]),
}
path = [
["\\Gamma", "Y", "H", "C", "E", "M_1", "A", "X", "H_1"],
["M", "D", "Z"],
["Y", "D"],
]
return {"kpoints": kpoints, "path": path}
def mclc1(self, a, b, c, alpha):
"""
MCLC1 Path
"""
self.name = "MCLC1"
zeta = (2 - b * cos(alpha) / c) / (4 * sin(alpha) ** 2)
eta = 0.5 + 2 * zeta * c * cos(alpha) / b
psi = 0.75 - a**2 / (4 * b**2 * sin(alpha) ** 2)
phi = psi + (0.75 - psi) * b * cos(alpha) / c
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"N": np.array([0.5, 0.0, 0.0]),
"N_1": np.array([0.0, -0.5, 0.0]),
"F": np.array([1 - zeta, 1 - zeta, 1 - eta]),
"F_1": np.array([zeta, zeta, eta]),
"F_2": np.array([-zeta, -zeta, 1 - eta]),
"I": np.array([phi, 1 - phi, 0.5]),
"I_1": np.array([1 - phi, phi - 1, 0.5]),
"L": np.array([0.5, 0.5, 0.5]),
"M": np.array([0.5, 0.0, 0.5]),
"X": np.array([1 - psi, psi - 1, 0.0]),
"X_1": np.array([psi, 1 - psi, 0.0]),
"X_2": np.array([psi - 1, -psi, 0.0]),
"Y": np.array([0.5, 0.5, 0.0]),
"Y_1": np.array([-0.5, -0.5, 0.0]),
"Z": np.array([0.0, 0.0, 0.5]),
}
path = [
["\\Gamma", "Y", "F", "L", "I"],
["I_1", "Z", "F_1"],
["Y", "X_1"],
["X", "\\Gamma", "N"],
["M", "\\Gamma"],
]
return {"kpoints": kpoints, "path": path}
def mclc2(self, a, b, c, alpha):
"""
MCLC2 Path
"""
self.name = "MCLC2"
zeta = (2 - b * cos(alpha) / c) / (4 * sin(alpha) ** 2)
eta = 0.5 + 2 * zeta * c * cos(alpha) / b
psi = 0.75 - a**2 / (4 * b**2 * sin(alpha) ** 2)
phi = psi + (0.75 - psi) * b * cos(alpha) / c
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"N": np.array([0.5, 0.0, 0.0]),
"N_1": np.array([0.0, -0.5, 0.0]),
"F": np.array([1 - zeta, 1 - zeta, 1 - eta]),
"F_1": np.array([zeta, zeta, eta]),
"F_2": np.array([-zeta, -zeta, 1 - eta]),
"F_3": np.array([1 - zeta, -zeta, 1 - eta]),
"I": np.array([phi, 1 - phi, 0.5]),
"I_1": np.array([1 - phi, phi - 1, 0.5]),
"L": np.array([0.5, 0.5, 0.5]),
"M": np.array([0.5, 0.0, 0.5]),
"X": np.array([1 - psi, psi - 1, 0.0]),
"X_1": np.array([psi, 1 - psi, 0.0]),
"X_2": np.array([psi - 1, -psi, 0.0]),
"Y": np.array([0.5, 0.5, 0.0]),
"Y_1": np.array([-0.5, -0.5, 0.0]),
"Z": np.array([0.0, 0.0, 0.5]),
}
path = [
["\\Gamma", "Y", "F", "L", "I"],
["I_1", "Z", "F_1"],
["N", "\\Gamma", "M"],
]
return {"kpoints": kpoints, "path": path}
def mclc3(self, a, b, c, alpha):
"""
MCLC3 Path
"""
self.name = "MCLC3"
mu = (1 + b**2 / a**2) / 4.0
delta = b * c * cos(alpha) / (2 * a**2)
zeta = mu - 0.25 + (1 - b * cos(alpha) / c) / (4 * sin(alpha) ** 2)
eta = 0.5 + 2 * zeta * c * cos(alpha) / b
phi = 1 + zeta - 2 * mu
psi = eta - 2 * delta
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"F": np.array([1 - phi, 1 - phi, 1 - psi]),
"F_1": np.array([phi, phi - 1, psi]),
"F_2": np.array([1 - phi, -phi, 1 - psi]),
"H": np.array([zeta, zeta, eta]),
"H_1": np.array([1 - zeta, -zeta, 1 - eta]),
"H_2": np.array([-zeta, -zeta, 1 - eta]),
"I": np.array([0.5, -0.5, 0.5]),
"M": np.array([0.5, 0.0, 0.5]),
"N": np.array([0.5, 0.0, 0.0]),
"N_1": np.array([0.0, -0.5, 0.0]),
"X": np.array([0.5, -0.5, 0.0]),
"Y": np.array([mu, mu, delta]),
"Y_1": np.array([1 - mu, -mu, -delta]),
"Y_2": np.array([-mu, -mu, -delta]),
"Y_3": np.array([mu, mu - 1, delta]),
"Z": np.array([0.0, 0.0, 0.5]),
}
path = [
["\\Gamma", "Y", "F", "H", "Z", "I", "F_1"],
["H_1", "Y_1", "X", "\\Gamma", "N"],
["M", "\\Gamma"],
]
return {"kpoints": kpoints, "path": path}
def mclc4(self, a, b, c, alpha):
"""
MCLC4 Path
"""
self.name = "MCLC4"
mu = (1 + b**2 / a**2) / 4.0
delta = b * c * cos(alpha) / (2 * a**2)
zeta = mu - 0.25 + (1 - b * cos(alpha) / c) / (4 * sin(alpha) ** 2)
eta = 0.5 + 2 * zeta * c * cos(alpha) / b
phi = 1 + zeta - 2 * mu
psi = eta - 2 * delta
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"F": np.array([1 - phi, 1 - phi, 1 - psi]),
"F_1": np.array([phi, phi - 1, psi]),
"F_2": np.array([1 - phi, -phi, 1 - psi]),
"H": np.array([zeta, zeta, eta]),
"H_1": np.array([1 - zeta, -zeta, 1 - eta]),
"H_2": np.array([-zeta, -zeta, 1 - eta]),
"I": np.array([0.5, -0.5, 0.5]),
"M": np.array([0.5, 0.0, 0.5]),
"N": np.array([0.5, 0.0, 0.0]),
"N_1": np.array([0.0, -0.5, 0.0]),
"X": np.array([0.5, -0.5, 0.0]),
"Y": np.array([mu, mu, delta]),
"Y_1": np.array([1 - mu, -mu, -delta]),
"Y_2": np.array([-mu, -mu, -delta]),
"Y_3": np.array([mu, mu - 1, delta]),
"Z": np.array([0.0, 0.0, 0.5]),
}
path = [
["\\Gamma", "Y", "F", "H", "Z", "I"],
["H_1", "Y_1", "X", "\\Gamma", "N"],
["M", "\\Gamma"],
]
return {"kpoints": kpoints, "path": path}
def mclc5(self, a, b, c, alpha):
"""
MCLC5 Path
"""
self.name = "MCLC5"
zeta = (b**2 / a**2 + (1 - b * cos(alpha) / c) / sin(alpha) ** 2) / 4
eta = 0.5 + 2 * zeta * c * cos(alpha) / b
mu = eta / 2 + b**2 / (4 * a**2) - b * c * cos(alpha) / (2 * a**2)
nu = 2 * mu - zeta
rho = 1 - zeta * a**2 / b**2
omega = (4 * nu - 1 - b**2 * sin(alpha) ** 2 / a**2) * c / (2 * b * cos(alpha))
delta = zeta * c * cos(alpha) / b + omega / 2 - 0.25
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"F": np.array([nu, nu, omega]),
"F_1": np.array([1 - nu, 1 - nu, 1 - omega]),
"F_2": np.array([nu, nu - 1, omega]),
"H": np.array([zeta, zeta, eta]),
"H_1": np.array([1 - zeta, -zeta, 1 - eta]),
"H_2": np.array([-zeta, -zeta, 1 - eta]),
"I": np.array([rho, 1 - rho, 0.5]),
"I_1": np.array([1 - rho, rho - 1, 0.5]),
"L": np.array([0.5, 0.5, 0.5]),
"M": np.array([0.5, 0.0, 0.5]),
"N": np.array([0.5, 0.0, 0.0]),
"N_1": np.array([0.0, -0.5, 0.0]),
"X": np.array([0.5, -0.5, 0.0]),
"Y": np.array([mu, mu, delta]),
"Y_1": np.array([1 - mu, -mu, -delta]),
"Y_2": np.array([-mu, -mu, -delta]),
"Y_3": np.array([mu, mu - 1, delta]),
"Z": np.array([0.0, 0.0, 0.5]),
}
path = [
["\\Gamma", "Y", "F", "L", "I"],
["I_1", "Z", "H", "F_1"],
["H_1", "Y_1", "X", "\\Gamma", "N"],
["M", "\\Gamma"],
]
return {"kpoints": kpoints, "path": path}
def tria(self):
"""
TRI1a Path
"""
self.name = "TRI1a"
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"L": np.array([0.5, 0.5, 0.0]),
"M": np.array([0.0, 0.5, 0.5]),
"N": np.array([0.5, 0.0, 0.5]),
"R": np.array([0.5, 0.5, 0.5]),
"X": np.array([0.5, 0.0, 0.0]),
"Y": np.array([0.0, 0.5, 0.0]),
"Z": np.array([0.0, 0.0, 0.5]),
}
path = [
["X", "\\Gamma", "Y"],
["L", "\\Gamma", "Z"],
["N", "\\Gamma", "M"],
["R", "\\Gamma"],
]
return {"kpoints": kpoints, "path": path}
def trib(self):
"""
TRI1b Path
"""
self.name = "TRI1b"
kpoints = {
"\\Gamma": np.array([0.0, 0.0, 0.0]),
"L": np.array([0.5, -0.5, 0.0]),
"M": np.array([0.0, 0.0, 0.5]),
"N": np.array([-0.5, -0.5, 0.5]),
"R": np.array([0.0, -0.5, 0.5]),
"X": np.array([0.0, -0.5, 0.0]),
"Y": np.array([0.5, 0.0, 0.0]),
"Z": np.array([-0.5, 0.0, 0.5]),
}
path = [
["X", "\\Gamma", "Y"],
["L", "\\Gamma", "Z"],
["N", "\\Gamma", "M"],
["R", "\\Gamma"],
]
return {"kpoints": kpoints, "path": path}
class KPathSeek(KPathBase):
"""
This class looks for path along high symmetry lines in
the Brillouin Zone.
It is based on Hinuma, Y., Pizzi, G., Kumagai, Y., Oba, F.,
& Tanaka, I. (2017). Band structure diagram paths based on
crystallography. Computational Materials Science, 128, 140–184.
https://doi.org/10.1016/j.commatsci.2016.10.015
It should be used with primitive structures that
comply with the definition from the paper.
The symmetry is determined by spglib through the
SpacegroupAnalyzer class. KPoints from get_kpoints() method
are returned in the reciprocal cell basis defined in the paper.
"""
@requires(
get_path is not None,
"SeeK-path is required to use the convention by Hinuma et al.",
)
def __init__(self, structure, symprec=0.01, angle_tolerance=5, atol=1e-5, system_is_tri=True):
"""
Args:
structure (Structure): Structure object
symprec (float): Tolerance for symmetry finding
angle_tolerance (float): Angle tolerance for symmetry finding.
atol (float): Absolute tolerance used to determine edge cases
for settings of structures.
system_is_tri (boolean): Indicates if the system is time-reversal
invariant.
"""
super().__init__(structure, symprec=symprec, angle_tolerance=angle_tolerance, atol=atol)
positions = structure.frac_coords
sp = structure.site_properties
species = [site.species for site in structure]
site_data = species
if not system_is_tri:
warn("Non-zero 'magmom' data will be used to define unique atoms in the cell.")
site_data = zip(species, [tuple(vec) for vec in sp["magmom"]])
unique_species = []
numbers = []
for species, g in itertools.groupby(site_data):
if species in unique_species:
ind = unique_species.index(species)
numbers.extend([ind + 1] * len(tuple(g)))
else:
unique_species.append(species)
numbers.extend([len(unique_species)] * len(tuple(g)))
cell = (self._latt.matrix, positions, numbers)
lattice, scale_pos, atom_num = spglib.standardize_cell(
cell, to_primitive=False, no_idealize=True, symprec=symprec
)
spg_struct = (lattice, scale_pos, atom_num)
spath_dat = get_path(spg_struct, system_is_tri, "hpkot", atol, symprec, angle_tolerance)
self._tmat = self._trans_sc_to_Hin(spath_dat["bravais_lattice_extended"])
self._rec_lattice = Lattice(spath_dat["reciprocal_primitive_lattice"])
spath_data_formatted = [[spath_dat["path"][0][0]]]
count = 0
for pnum in range(len(spath_dat["path"]) - 1):
if spath_dat["path"][pnum][1] == spath_dat["path"][pnum + 1][0]:
spath_data_formatted[count].append(spath_dat["path"][pnum][1])
else:
spath_data_formatted[count].append(spath_dat["path"][pnum][1])
spath_data_formatted.append([])
count += 1
spath_data_formatted[count].append(spath_dat["path"][pnum + 1][0])
spath_data_formatted[-1].append(spath_dat["path"][-1][1])
self._kpath = {
"kpoints": spath_dat["point_coords"],
"path": spath_data_formatted,
}
@staticmethod
def _trans_sc_to_Hin(sub_class):
if sub_class in [
"cP1",
"cP2",
"cF1",
"cF2",
"cI1",
"tP1",
"oP1",
"hP1",
"hP2",
"tI1",
"tI2",
"oF1",
"oF3",
"oI1",
"oI3",
"oC1",
"hR1",
"hR2",
"aP1",
"aP2",
"aP3",
"oA1",
]:
return np.eye(3)
if sub_class == "oF2":
return np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
if sub_class == "oI2":
return np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]])
if sub_class == "oI3":
return np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
if sub_class == "oA2":
return np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1]])
if sub_class == "oC2":
return np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1]])
if sub_class in ["mP1", "mC1", "mC2", "mC3"]:
return np.array([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])
raise RuntimeError("Sub-classification of crystal not found!")
class KPathLatimerMunro(KPathBase):
"""
This class looks for a path along high symmetry lines in the
Brillouin zone. It is based on the method outlined in:
npj Comput Mater 6, 112 (2020). 10.1038/s41524-020-00383-7
The user should ensure that the lattice of the input structure
is as reduced as possible, i.e. that there is no linear
combination of lattice vectors which can produce a vector of
lesser magnitude than the given set (this is required to
obtain the correct Brillouin zone within the current
implementation). This is checked during initialization and a
warning is issued if the condition is not fulfilled.
In the case of magnetic structures, care must also be taken to
provide the magnetic primitive cell (i.e. that which reproduces
the entire crystal, including the correct magnetic ordering,
upon application of lattice translations). There is no way to
programmatically check for this, so if the input structure is
incorrect, the class will output the incorrect kpath without
any warning being issued.
"""
def __init__(
self,
structure,
has_magmoms=False,
magmom_axis=None,
symprec=0.01,
angle_tolerance=5,
atol=1e-5,
):
"""
Args:
structure (Structure): Structure object
has_magmoms (boolean): Whether the input structure contains
magnetic moments as site properties with the key 'magmom.'
Values may be in the form of 3-component vectors given in
the basis of the input lattice vectors, or as scalars, in
which case the spin axis will default to a_3, the third
real-space lattice vector (this triggers a warning).
magmom_axis (list or numpy array): 3-component vector specifying
direction along which magnetic moments given as scalars
should point. If all magnetic moments are provided as
vectors then this argument is not used.
symprec (float): Tolerance for symmetry finding
angle_tolerance (float): Angle tolerance for symmetry finding.
atol (float): Absolute tolerance used to determine symmetric
equivalence of points and lines on the BZ.
"""
super().__init__(structure, symprec=symprec, angle_tolerance=angle_tolerance, atol=atol)
# Check to see if input lattice is reducible. Ref: B Gruber in Acta. Cryst. Vol. A29,
# pp. 433-440 ('The Relationship between Reduced Cells in a General Bravais lattice').
# The correct BZ will still be obtained if the lattice vectors are reducible by any
# linear combination of themselves with coefficients of absolute value less than 2,
# hence a missing factor of 2 as compared to the reference.
reducible = []
for i in range(3):
for j in range(3):
if i != j:
if (
np.absolute(np.dot(self._latt.matrix[i], self._latt.matrix[j]))
> np.dot(self._latt.matrix[i], self._latt.matrix[i])
and np.absolute(
np.dot(self._latt.matrix[i], self._latt.matrix[j])
- np.dot(self._latt.matrix[i], self._latt.matrix[i])
)
> atol
):
reducible.append(True)
else:
reducible.append(False)
if np.any(reducible):
print("reducible")
warn(
"The lattice of the input structure is not fully reduced!"
"The path can be incorrect. Use at your own risk."
)
if magmom_axis is None:
magmom_axis = np.array([0, 0, 1])
axis_specified = False
else:
axis_specified = True
self._kpath = self._get_ksymm_kpath(has_magmoms, magmom_axis, axis_specified, symprec, angle_tolerance, atol)
@property
def mag_type(self):
"""
Returns:
The type of magnetic space group as a string.
Current implementation does not distinguish
between types 3 and 4, so return value is '3/4'.
If has_magmoms is False, returns '0'.
"""
return self._mag_type
def _get_ksymm_kpath(self, has_magmoms, magmom_axis, axis_specified, symprec, angle_tolerance, atol):
ID = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
# parity, aka the inversion operation (not calling it
PAR = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, -1]])
# INV to avoid confusion with np.linalg.inv() function)
# 1: Get lattices of real and reciprocal structures, and reciprocal
# point group, and Brillouin zone (BZ)
V = self._latt.matrix.T # fractional real space to cartesian real space
# fractional reciprocal space to cartesian reciprocal space
W = self._rec_lattice.matrix.T
# fractional real space to fractional reciprocal space
A = np.dot(np.linalg.inv(W), V)
if has_magmoms:
grey_struct = self._structure.copy()
grey_struct.remove_site_property("magmom")
sga = SpacegroupAnalyzer(grey_struct, symprec=symprec, angle_tolerance=angle_tolerance)
grey_ops = sga.get_symmetry_operations()
self._structure = self._convert_all_magmoms_to_vectors(magmom_axis, axis_specified)
mag_ops = self._get_magnetic_symmetry_operations(self._structure, grey_ops, atol)
D = [
SymmOp.from_rotation_and_translation(
rotation_matrix=op.rotation_matrix,
translation_vec=op.translation_vector,
)
for op in mag_ops
if op.time_reversal == 1
]
fD = [
SymmOp.from_rotation_and_translation(
rotation_matrix=op.rotation_matrix,
translation_vec=op.translation_vector,
)
for op in mag_ops
if op.time_reversal == -1
]
if np.array([m == np.array([0, 0, 0]) for m in self._structure.site_properties["magmom"]]).all():
fD = D
D = []
if len(fD) == 0: # no operations contain time reversal; type 1
self._mag_type = "1"
isomorphic_point_group = [d.rotation_matrix for d in D]
recip_point_group = self._get_reciprocal_point_group(isomorphic_point_group, ID, A)
elif len(D) == 0: # all operations contain time reversal / all magmoms zero; type 2
self._mag_type = "2"
isomorphic_point_group = [d.rotation_matrix for d in fD]
recip_point_group = self._get_reciprocal_point_group(isomorphic_point_group, PAR, A)
else: # half and half; type 3 or 4
self._mag_type = "3/4"
f = self._get_coset_factor(D + fD, D)
isomorphic_point_group = [d.rotation_matrix for d in D]
recip_point_group = self._get_reciprocal_point_group(
isomorphic_point_group, np.dot(PAR, f.rotation_matrix), A
)
else:
self._mag_type = "0"
if "magmom" in self._structure.site_properties:
warn(
"The parameter has_magmoms is False, but site_properties contains the key magmom."
"This property will be removed and could result in different symmetry operations."
)
self._structure.remove_site_property("magmom")
sga = SpacegroupAnalyzer(self._structure)
ops = sga.get_symmetry_operations()
isomorphic_point_group = [op.rotation_matrix for op in ops]
recip_point_group = self._get_reciprocal_point_group(isomorphic_point_group, PAR, A)
self._rpg = recip_point_group
# 2: Get all vertices, edge- and face- center points of BZ ("key points")
key_points, bz_as_key_point_inds, face_center_inds = self._get_key_points()
# 3: Find symmetry-equivalent points, which can be mapped to each other by a combination of point group
# operations and integer translations by lattice vectors. The integers will only be -1, 0, or 1, since
# we are restricting to the BZ.
key_points_inds_orbits = self._get_key_point_orbits(key_points=key_points)
# 4: Get all lines on BZ between adjacent key points and between gamma
# and key points ("key lines")
key_lines = self._get_key_lines(key_points=key_points, bz_as_key_point_inds=bz_as_key_point_inds)
# 5: Find symmetry-equivalent key lines, defined as endpoints of first line being equivalent
# to end points of second line, and a random point in between being equivalent to the mapped
# random point.
key_lines_inds_orbits = self._get_key_line_orbits(
key_points=key_points,
key_lines=key_lines,
key_points_inds_orbits=key_points_inds_orbits,
)
# 6 & 7: Get little groups for key points (group of symmetry elements present at that point).
# Get little groups for key lines (group of symmetry elements present at every point
# along the line). This is implemented by testing the symmetry at a point e/pi of the
# way between the two endpoints.
little_groups_points, little_groups_lines = self._get_little_groups(
key_points=key_points,
key_points_inds_orbits=key_points_inds_orbits,
key_lines_inds_orbits=key_lines_inds_orbits,
)
# 8: Choose key lines for k-path. Loose criteria set: choose any points / segments
# with spatial symmetry greater than the general position (AKA more symmetry operations
# than just the identity or identity * TR in the little group).
# This function can be edited to alter high-symmetry criteria for choosing points and lines
point_orbits_in_path, line_orbits_in_path = self._choose_path(
key_points=key_points,
key_points_inds_orbits=key_points_inds_orbits,
key_lines_inds_orbits=key_lines_inds_orbits,
little_groups_points=little_groups_points,
little_groups_lines=little_groups_lines,
)
# 10: Consolidate selected segments into a single irreducible section of the Brilouin zone (as determined
# by the reciprocal point and lattice symmetries). This is accomplished by identifying the boundary
# planes of the IRBZ. Also, get labels for points according to distance away from axes.
IRBZ_points_inds = self._get_IRBZ(recip_point_group, W, key_points, face_center_inds, atol)
lines_in_path_inds = []
for ind in line_orbits_in_path:
for tup in key_lines_inds_orbits[ind]:
if tup[0] in IRBZ_points_inds and tup[1] in IRBZ_points_inds:
lines_in_path_inds.append(tup)
break
G = nx.Graph(lines_in_path_inds)
lines_in_path_inds = list(nx.edge_dfs(G))
points_in_path_inds = [ind for tup in lines_in_path_inds for ind in tup]
points_in_path_inds_unique = list(set(points_in_path_inds))
orbit_cosines = []
for i, orbit in enumerate(key_points_inds_orbits[:-1]):
orbit_cosines.append(
sorted(
sorted(
(
(
j,
np.round(
np.dot(key_points[k], self.LabelPoints(j))
/ (np.linalg.norm(key_points[k]) * np.linalg.norm(self.LabelPoints(j))),
decimals=3,
),
)
for k in orbit
for j in range(26)
),
key=operator.itemgetter(0),
),
key=operator.itemgetter(1),
reverse=True,
)
)
orbit_labels = self._get_orbit_labels(orbit_cosines, key_points_inds_orbits, atol)
key_points_labels = ["" for i in range(len(key_points))]
for i, orbit in enumerate(key_points_inds_orbits):
for point_ind in orbit:
key_points_labels[point_ind] = self.LabelSymbol(int(orbit_labels[i]))
kpoints = {}
reverse_kpoints = {}
for point_ind in points_in_path_inds_unique:
point_label = key_points_labels[point_ind]
if point_label not in kpoints.keys():
kpoints[point_label] = key_points[point_ind]
reverse_kpoints[point_ind] = point_label
else:
existing_labels = [key for key in kpoints.keys() if point_label in key]
if "'" not in point_label:
existing_labels[:] = [label for label in existing_labels if "'" not in label]
if len(existing_labels) == 1:
max_occurence = 0
else:
if "'" not in point_label:
max_occurence = max(int(label[3:-1]) for label in existing_labels[1:])
else:
max_occurence = max(int(label[4:-1]) for label in existing_labels[1:])
kpoints[point_label + "_{" + str(max_occurence + 1) + "}"] = key_points[point_ind]
reverse_kpoints[point_ind] = point_label + "_{" + str(max_occurence + 1) + "}"
path = []
i = 0
start_of_subpath = True
while i < len(points_in_path_inds):
if start_of_subpath:
path.append([reverse_kpoints[points_in_path_inds[i]]])
i += 1
start_of_subpath = False
elif points_in_path_inds[i] == points_in_path_inds[i + 1]:
path[-1].append(reverse_kpoints[points_in_path_inds[i]])
i += 2
else:
path[-1].append(reverse_kpoints[points_in_path_inds[i]])
i += 1
start_of_subpath = True
if i == len(points_in_path_inds) - 1:
path[-1].append(reverse_kpoints[points_in_path_inds[i]])
i += 1
return {"kpoints": kpoints, "path": path}
def _choose_path(
self,
key_points,
key_points_inds_orbits,
key_lines_inds_orbits,
little_groups_points,
little_groups_lines,
):
#
# This function can be edited to alter high-symmetry criteria for choosing points and lines
#
ID = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
PAR = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, -1]])
gamma_ind = len(key_points) - 1
line_orbits_in_path = []
point_orbits_in_path = []
for (i, little_group) in enumerate(little_groups_lines):
add_rep = False
nC2 = 0
nC3 = 0
nsig = 0
for j, opind in enumerate(little_group):
op = self._rpg[opind]
if not (op == ID).all():
if (np.dot(op, op) == ID).all():
if np.linalg.det(op) == 1:
nC2 += 1
break
if not (op == PAR).all():
nsig += 1
break
elif (np.dot(op, np.dot(op, op)) == ID).all():
nC3 += 1
break
if nC2 > 0 or nC3 > 0 or nsig > 0:
add_rep = True
if add_rep:
line_orbits_in_path.append(i)
l = key_lines_inds_orbits[i][0]
ind0 = l[0]
ind1 = l[1]
found0 = False
found1 = False
for (j, orbit) in enumerate(key_points_inds_orbits):
if ind0 in orbit:
point_orbits_in_path.append(j)
found0 = True
if ind1 in orbit:
point_orbits_in_path.append(j)
found1 = True
if found0 and found1:
break
point_orbits_in_path = list(set(point_orbits_in_path))
# Choose remaining unconnected key points for k-path. The ones that remain are
# those with inversion symmetry. Connect them to gamma.
unconnected = []
for i in range(len(key_points_inds_orbits)):
if i not in point_orbits_in_path:
unconnected.append(i)
for ind in unconnected:
connect = False
for op_ind in little_groups_points[ind]:
op = self._rpg[op_ind]
if (op == ID).all():
pass
elif (op == PAR).all():
connect = True
break
elif np.linalg.det(op) == 1:
if (np.dot(op, np.dot(op, op)) == ID).all():
pass
else:
connect = True
break
else:
pass
if connect:
l = (key_points_inds_orbits[ind][0], gamma_ind)
for (j, orbit) in enumerate(key_lines_inds_orbits):
if l in orbit:
line_orbits_in_path.append(j)
break
if gamma_ind not in point_orbits_in_path:
point_orbits_in_path.append(gamma_ind)
point_orbits_in_path.append(ind)
return point_orbits_in_path, line_orbits_in_path
def _get_key_points(self):
decimals = ceil(-1 * np.log10(self._atol)) - 1
bz = self._rec_lattice.get_wigner_seitz_cell()
key_points = []
face_center_inds = []
bz_as_key_point_inds = []
# pymatgen gives BZ in cartesian coordinates; convert to fractional in
# the primitive basis for reciprocal space
for (i, facet) in enumerate(bz):
for (j, vert) in enumerate(facet):
vert = self._rec_lattice.get_fractional_coords(vert)
bz[i][j] = vert
pop = []
for i, facet in enumerate(bz):
rounded_facet = np.around(facet, decimals=decimals)
u, indices = np.unique(rounded_facet, axis=0, return_index=True)
if len(u) in [1, 2]:
pop.append(i)
else:
bz[i] = [facet[j] for j in np.sort(indices)]
bz = [bz[i] for i in range(len(bz)) if i not in pop]
# use vertex points to calculate edge- and face- centers
for (i, facet) in enumerate(bz):
bz_as_key_point_inds.append([])
for (j, vert) in enumerate(facet):
edge_center = (vert + facet[j + 1]) / 2.0 if j != len(facet) - 1 else (vert + facet[0]) / 2.0
duplicatevert = False
duplicateedge = False
for (k, point) in enumerate(key_points):
if np.allclose(vert, point, atol=self._atol):
bz_as_key_point_inds[i].append(k)
duplicatevert = True
break
for (k, point) in enumerate(key_points):
if np.allclose(edge_center, point, atol=self._atol):
bz_as_key_point_inds[i].append(k)
duplicateedge = True
break
if not duplicatevert:
key_points.append(vert)
bz_as_key_point_inds[i].append(len(key_points) - 1)
if not duplicateedge:
key_points.append(edge_center)
bz_as_key_point_inds[i].append(len(key_points) - 1)
if len(facet) == 4: # parallelogram facet
face_center = (facet[0] + facet[1] + facet[2] + facet[3]) / 4.0
key_points.append(face_center)
face_center_inds.append(len(key_points) - 1)
bz_as_key_point_inds[i].append(len(key_points) - 1)
else: # hexagonal facet
face_center = (facet[0] + facet[1] + facet[2] + facet[3] + facet[4] + facet[5]) / 6.0
key_points.append(face_center)
face_center_inds.append(len(key_points) - 1)
bz_as_key_point_inds[i].append(len(key_points) - 1)
# add gamma point
key_points.append(np.array([0, 0, 0]))
return key_points, bz_as_key_point_inds, face_center_inds
def _get_key_point_orbits(self, key_points):
key_points_copy = dict(zip(range(len(key_points) - 1), key_points[0 : len(key_points) - 1]))
# gamma not equivalent to any on BZ and is last point added to
# key_points
key_points_inds_orbits = []
i = 0
while len(key_points_copy) > 0:
key_points_inds_orbits.append([])
k0ind = list(key_points_copy.keys())[0]
k0 = key_points_copy[k0ind]
key_points_inds_orbits[i].append(k0ind)
key_points_copy.pop(k0ind)
for op in self._rpg:
to_pop = []
k1 = np.dot(op, k0)
for ind_key in key_points_copy:
diff = k1 - key_points_copy[ind_key]
if self._all_ints(diff, atol=self._atol):
key_points_inds_orbits[i].append(ind_key)
to_pop.append(ind_key)
for key in to_pop:
key_points_copy.pop(key)
i += 1
key_points_inds_orbits.append([len(key_points) - 1])
return key_points_inds_orbits
@staticmethod
def _get_key_lines(key_points, bz_as_key_point_inds):
key_lines = []
gamma_ind = len(key_points) - 1
for (i, facet_as_key_point_inds) in enumerate(bz_as_key_point_inds):
facet_as_key_point_inds_bndy = facet_as_key_point_inds[: len(facet_as_key_point_inds) - 1]
# not the face center point (don't need to check it since it's not
# shared with other facets)
face_center_ind = facet_as_key_point_inds[-1]
for (j, ind) in enumerate(facet_as_key_point_inds_bndy):
if (
min(ind, facet_as_key_point_inds_bndy[j - 1]),
max(ind, facet_as_key_point_inds_bndy[j - 1]),
) not in key_lines:
key_lines.append(
(
min(ind, facet_as_key_point_inds_bndy[j - 1]),
max(ind, facet_as_key_point_inds_bndy[j - 1]),
)
)
k = j + 1 if j != len(facet_as_key_point_inds_bndy) - 1 else 0
if (
min(ind, facet_as_key_point_inds_bndy[k]),
max(ind, facet_as_key_point_inds_bndy[k]),
) not in key_lines:
key_lines.append(
(
min(ind, facet_as_key_point_inds_bndy[k]),
max(ind, facet_as_key_point_inds_bndy[k]),
)
)
if (ind, gamma_ind) not in key_lines:
key_lines.append((ind, gamma_ind))
key_lines.append((min(ind, face_center_ind), max(ind, face_center_ind)))
key_lines.append((face_center_ind, gamma_ind))
return key_lines
def _get_key_line_orbits(self, key_points, key_lines, key_points_inds_orbits):
key_lines_copy = dict(zip(range(len(key_lines)), key_lines))
key_lines_inds_orbits = []
i = 0
while len(key_lines_copy) > 0:
key_lines_inds_orbits.append([])
l0ind = list(key_lines_copy.keys())[0]
l0 = key_lines_copy[l0ind]
key_lines_inds_orbits[i].append(l0)
key_lines_copy.pop(l0ind)
to_pop = []
p00 = key_points[l0[0]]
p01 = key_points[l0[1]]
pmid0 = p00 + e / pi * (p01 - p00)
for ind_key in key_lines_copy:
l1 = key_lines_copy[ind_key]
p10 = key_points[l1[0]]
p11 = key_points[l1[1]]
equivptspar = False
equivptsperp = False
equivline = False
if (
np.array([l0[0] in orbit and l1[0] in orbit for orbit in key_points_inds_orbits]).any()
and np.array([l0[1] in orbit and l1[1] in orbit for orbit in key_points_inds_orbits]).any()
):
equivptspar = True
elif (
np.array([l0[1] in orbit and l1[0] in orbit for orbit in key_points_inds_orbits]).any()
and np.array([l0[0] in orbit and l1[1] in orbit for orbit in key_points_inds_orbits]).any()
):
equivptsperp = True
if equivptspar:
pmid1 = p10 + e / pi * (p11 - p10)
for op in self._rpg:
if not equivline:
p00pr = np.dot(op, p00)
diff0 = p10 - p00pr
if self._all_ints(diff0, atol=self._atol):
pmid0pr = np.dot(op, pmid0) + diff0
p01pr = np.dot(op, p01) + diff0
if np.allclose(p11, p01pr, atol=self._atol) and np.allclose(
pmid1, pmid0pr, atol=self._atol
):
equivline = True
elif equivptsperp:
pmid1 = p11 + e / pi * (p10 - p11)
for op in self._rpg:
if not equivline:
p00pr = np.dot(op, p00)
diff0 = p11 - p00pr
if self._all_ints(diff0, atol=self._atol):
pmid0pr = np.dot(op, pmid0) + diff0
p01pr = np.dot(op, p01) + diff0
if np.allclose(p10, p01pr, atol=self._atol) and np.allclose(
pmid1, pmid0pr, atol=self._atol
):
equivline = True
if equivline:
key_lines_inds_orbits[i].append(l1)
to_pop.append(ind_key)
for key in to_pop:
key_lines_copy.pop(key)
i += 1
return key_lines_inds_orbits
def _get_little_groups(self, key_points, key_points_inds_orbits, key_lines_inds_orbits):
little_groups_points = [] # elements are lists of indices of recip_point_group. the
# list little_groups_points[i] is the little group for the
# orbit key_points_inds_orbits[i]
for (i, orbit) in enumerate(key_points_inds_orbits):
k0 = key_points[orbit[0]]
little_groups_points.append([])
for (j, op) in enumerate(self._rpg):
gamma_to = np.dot(op, -1 * k0) + k0
check_gamma = True
if not self._all_ints(gamma_to, atol=self._atol):
check_gamma = False
if check_gamma:
little_groups_points[i].append(j)
# elements are lists of indices of recip_point_group. the list
# little_groups_lines[i] is
little_groups_lines = []
# the little group for the orbit key_points_inds_lines[i]
for (i, orbit) in enumerate(key_lines_inds_orbits):
l0 = orbit[0]
v = key_points[l0[1]] - key_points[l0[0]]
k0 = key_points[l0[0]] + np.e / pi * v
little_groups_lines.append([])
for (j, op) in enumerate(self._rpg):
gamma_to = np.dot(op, -1 * k0) + k0
check_gamma = True
if not self._all_ints(gamma_to, atol=self._atol):
check_gamma = False
if check_gamma:
little_groups_lines[i].append(j)
return little_groups_points, little_groups_lines
def _convert_all_magmoms_to_vectors(self, magmom_axis, axis_specified):
struct = self._structure.copy()
magmom_axis = np.array(magmom_axis)
if "magmom" not in struct.site_properties:
warn(
"The 'magmom' property is not set in the structure's site properties."
"All magnetic moments are being set to zero."
)
struct.add_site_property("magmom", [np.array([0, 0, 0]) for i in range(len(struct.sites))])
return struct
old_magmoms = struct.site_properties["magmom"]
new_magmoms = []
found_scalar = False
for magmom in old_magmoms:
if isinstance(magmom, np.ndarray):
new_magmoms.append(magmom)
elif isinstance(magmom, list):
new_magmoms.append(np.array(magmom))
else:
found_scalar = True
new_magmoms.append(magmom * magmom_axis)
if found_scalar and not axis_specified:
warn("At least one magmom had a scalar value and magmom_axis was not specified. Defaulted to z+ spinor.")
struct.remove_site_property("magmom")
struct.add_site_property("magmom", new_magmoms)
return struct
def _get_magnetic_symmetry_operations(self, struct, grey_ops, atol):
mag_ops = []
magmoms = struct.site_properties["magmom"]
nonzero_magmom_inds = [i for i in range(len(struct.sites)) if not (magmoms[i] == np.array([0, 0, 0])).all()]
init_magmoms = [site.properties["magmom"] for (i, site) in enumerate(struct.sites) if i in nonzero_magmom_inds]
sites = [site for (i, site) in enumerate(struct.sites) if i in nonzero_magmom_inds]
init_site_coords = [site.frac_coords for site in sites]
for op in grey_ops:
r = op.rotation_matrix
t = op.translation_vector
xformed_magmoms = [self._apply_op_to_magmom(r, magmom) for magmom in init_magmoms]
xformed_site_coords = [np.dot(r, site.frac_coords) + t for site in sites]
permutation = ["a" for i in range(len(sites))]
not_found = list(range(len(sites)))
for i in range(len(sites)):
xformed = xformed_site_coords[i]
for k, j in enumerate(not_found):
init = init_site_coords[j]
diff = xformed - init
if self._all_ints(diff, atol=atol):
permutation[i] = j
not_found.pop(k)
break
same = np.zeros(len(sites))
flipped = np.zeros(len(sites))
for i, magmom in enumerate(xformed_magmoms):
if (magmom == init_magmoms[permutation[i]]).all():
same[i] = 1
elif (magmom == -1 * init_magmoms[permutation[i]]).all():
flipped[i] = 1
if same.all(): # add symm op without tr
mag_ops.append(
MagSymmOp.from_rotation_and_translation_and_time_reversal(
rotation_matrix=op.rotation_matrix,
translation_vec=op.translation_vector,
time_reversal=1,
)
)
if flipped.all(): # add symm op with tr
mag_ops.append(
MagSymmOp.from_rotation_and_translation_and_time_reversal(
rotation_matrix=op.rotation_matrix,
translation_vec=op.translation_vector,
time_reversal=-1,
)
)
return mag_ops
@staticmethod
def _get_reciprocal_point_group(ops, R, A):
Ainv = np.linalg.inv(A)
# convert to reciprocal primitive basis
recip_point_group = [np.around(np.dot(A, np.dot(R, Ainv)), decimals=2)]
for op in ops:
op = np.around(np.dot(A, np.dot(op, Ainv)), decimals=2)
new = True
new_coset = True
for thing in recip_point_group:
if (thing == op).all():
new = False
if (thing == np.dot(R, op)).all():
new_coset = False
if new:
recip_point_group.append(op)
if new_coset:
recip_point_group.append(np.dot(R, op))
return recip_point_group
@staticmethod
def _closewrapped(pos1, pos2, tolerance):
pos1 = pos1 % 1.0
pos2 = pos2 % 1.0
if len(pos1) != len(pos2):
return False
for i, v in enumerate(pos1):
if abs(pos1[i] - pos2[i]) > tolerance[i] and abs(pos1[i] - pos2[i]) < 1.0 - tolerance[i]:
return False
return True
def _get_coset_factor(self, G, H):
# finds g for left coset decomposition G = H + gH (H must be subgroup of G with index two.)
# in this implementation, G and H are lists of objects of type
# SymmOp
gH = []
for i, op1 in enumerate(G):
in_H = False
for op2 in H:
if np.allclose(op1.rotation_matrix, op2.rotation_matrix, atol=self._atol) and self._closewrapped(
op1.translation_vector,
op2.translation_vector,
np.ones(3) * self._atol,
):
in_H = True
break
if not in_H:
gH.append(op1)
for op in gH:
opH = [op.__mul__(h) for h in H]
is_coset_factor = True
for op1 in opH:
for op2 in H:
if np.allclose(op1.rotation_matrix, op2.rotation_matrix, atol=self._atol) and self._closewrapped(
op1.translation_vector,
op2.translation_vector,
np.ones(3) * self._atol,
):
is_coset_factor = False
break
if not is_coset_factor:
break
if is_coset_factor:
return op
return "No coset factor found."
@staticmethod
def _apply_op_to_magmom(r, magmom):
if np.linalg.det(r) == 1:
return np.dot(r, magmom)
return -1 * np.dot(r, magmom)
@staticmethod
def _all_ints(arr, atol):
rounded_arr = np.around(arr, decimals=0)
return np.allclose(rounded_arr, arr, atol=atol)
def _get_IRBZ(self, recip_point_group, W, key_points, face_center_inds, atol):
rpgdict = self._get_reciprocal_point_group_dict(recip_point_group, atol)
g = np.dot(W.T, W) # just using change of basis matrix rather than
# Lattice.get_cartesian_coordinates for conciseness
ginv = np.linalg.inv(g)
D = np.linalg.det(W)
primary_orientation = None
secondary_orientation = None
tertiary_orientation = None
planar_boundaries = []
IRBZ_points = list(enumerate(key_points))
for sigma in rpgdict["reflections"]:
norm = sigma["normal"]
if primary_orientation is None:
primary_orientation = norm
planar_boundaries.append(norm)
elif np.isclose(np.dot(primary_orientation, np.dot(g, norm)), 0, atol=atol):
if secondary_orientation is None:
secondary_orientation = norm
planar_boundaries.append(norm)
elif np.isclose(np.dot(secondary_orientation, np.dot(g, norm)), 0, atol=atol):
if tertiary_orientation is None:
tertiary_orientation = norm
planar_boundaries.append(norm)
elif np.allclose(norm, -1 * tertiary_orientation, atol=atol):
pass
elif np.dot(secondary_orientation, np.dot(g, norm)) < 0:
planar_boundaries.append(-1 * norm)
else:
planar_boundaries.append(norm)
elif np.dot(primary_orientation, np.dot(g, norm)) < 0:
planar_boundaries.append(-1 * norm)
else:
planar_boundaries.append(norm)
IRBZ_points = self._reduce_IRBZ(IRBZ_points, planar_boundaries, g, atol)
used_axes = []
# six-fold rotoinversion always comes with horizontal mirror so don't
# need to check
for rotn in rpgdict["rotations"]["six-fold"]:
ax = rotn["axis"]
op = rotn["op"]
if not np.any([np.allclose(ax, usedax, atol) for usedax in used_axes]):
if self._op_maps_IRBZ_to_self(op, IRBZ_points, atol):
face_center_found = False
for point in IRBZ_points:
if point[0] in face_center_inds:
cross = D * np.dot(ginv, np.cross(ax, point[1]))
if not np.allclose(cross, 0, atol=atol):
rot_boundaries = [cross, -1 * np.dot(op, cross)]
face_center_found = True
used_axes.append(ax)
break
if not face_center_found:
print("face center not found")
for point in IRBZ_points:
cross = D * np.dot(ginv, np.cross(ax, point[1]))
if not np.allclose(cross, 0, atol=atol):
rot_boundaries = [cross, -1 * np.dot(op, cross)]
used_axes.append(ax)
break
IRBZ_points = self._reduce_IRBZ(IRBZ_points, rot_boundaries, g, atol)
for rotn in rpgdict["rotations"]["rotoinv-four-fold"]:
ax = rotn["axis"]
op = rotn["op"]
if not np.any([np.allclose(ax, usedax, atol) for usedax in used_axes]):
if self._op_maps_IRBZ_to_self(op, IRBZ_points, atol):
face_center_found = False
for point in IRBZ_points:
if point[0] in face_center_inds:
cross = D * np.dot(ginv, np.cross(ax, point[1]))
if not np.allclose(cross, 0, atol=atol):
rot_boundaries = [cross, np.dot(op, cross)]
face_center_found = True
used_axes.append(ax)
break
if not face_center_found:
print("face center not found")
for point in IRBZ_points:
cross = D * np.dot(ginv, np.cross(ax, point[1]))
if not np.allclose(cross, 0, atol=atol):
rot_boundaries = [cross, -1 * np.dot(op, cross)]
used_axes.append(ax)
break
IRBZ_points = self._reduce_IRBZ(IRBZ_points, rot_boundaries, g, atol)
for rotn in rpgdict["rotations"]["four-fold"]:
ax = rotn["axis"]
op = rotn["op"]
if not np.any([np.allclose(ax, usedax, atol) for usedax in used_axes]):
if self._op_maps_IRBZ_to_self(op, IRBZ_points, atol):
face_center_found = False
for point in IRBZ_points:
if point[0] in face_center_inds:
cross = D * np.dot(ginv, np.cross(ax, point[1]))
if not np.allclose(cross, 0, atol=atol):
rot_boundaries = [cross, -1 * np.dot(op, cross)]
face_center_found = True
used_axes.append(ax)
break
if not face_center_found:
print("face center not found")
for point in IRBZ_points:
cross = D * np.dot(ginv, np.cross(ax, point[1]))
if not np.allclose(cross, 0, atol=atol):
rot_boundaries = [cross, -1 * np.dot(op, cross)]
used_axes.append(ax)
break
IRBZ_points = self._reduce_IRBZ(IRBZ_points, rot_boundaries, g, atol)
for rotn in rpgdict["rotations"]["rotoinv-three-fold"]:
ax = rotn["axis"]
op = rotn["op"]
if not np.any([np.allclose(ax, usedax, atol) for usedax in used_axes]):
if self._op_maps_IRBZ_to_self(op, IRBZ_points, atol):
face_center_found = False
for point in IRBZ_points:
if point[0] in face_center_inds:
cross = D * np.dot(ginv, np.cross(ax, point[1]))
if not np.allclose(cross, 0, atol=atol):
rot_boundaries = [
cross,
-1 * np.dot(sqrtm(-1 * op), cross),
]
face_center_found = True
used_axes.append(ax)
break
if not face_center_found:
print("face center not found")
for point in IRBZ_points:
cross = D * np.dot(ginv, np.cross(ax, point[1]))
if not np.allclose(cross, 0, atol=atol):
rot_boundaries = [cross, -1 * np.dot(op, cross)]
used_axes.append(ax)
break
IRBZ_points = self._reduce_IRBZ(IRBZ_points, rot_boundaries, g, atol)
for rotn in rpgdict["rotations"]["three-fold"]:
ax = rotn["axis"]
op = rotn["op"]
if not np.any([np.allclose(ax, usedax, atol) for usedax in used_axes]):
if self._op_maps_IRBZ_to_self(op, IRBZ_points, atol):
face_center_found = False
for point in IRBZ_points:
if point[0] in face_center_inds:
cross = D * np.dot(ginv, np.cross(ax, point[1]))
if not np.allclose(cross, 0, atol=atol):
rot_boundaries = [cross, -1 * np.dot(op, cross)]
face_center_found = True
used_axes.append(ax)
break
if not face_center_found:
print("face center not found")
for point in IRBZ_points:
cross = D * np.dot(ginv, np.cross(ax, point[1]))
if not np.allclose(cross, 0, atol=atol):
rot_boundaries = [cross, -1 * np.dot(op, cross)]
used_axes.append(ax)
break
IRBZ_points = self._reduce_IRBZ(IRBZ_points, rot_boundaries, g, atol)
for rotn in rpgdict["rotations"]["two-fold"]:
ax = rotn["axis"]
op = rotn["op"]
if not np.any([np.allclose(ax, usedax, atol) for usedax in used_axes]):
if self._op_maps_IRBZ_to_self(op, IRBZ_points, atol):
face_center_found = False
for point in IRBZ_points:
if point[0] in face_center_inds:
cross = D * np.dot(ginv, np.cross(ax, point[1]))
if not np.allclose(cross, 0, atol=atol):
rot_boundaries = [cross, -1 * np.dot(op, cross)]
face_center_found = True
used_axes.append(ax)
break
if not face_center_found:
print("face center not found")
for point in IRBZ_points:
cross = D * np.dot(ginv, np.cross(ax, point[1]))
if not np.allclose(cross, 0, atol=atol):
rot_boundaries = [cross, -1 * np.dot(op, cross)]
used_axes.append(ax)
break
IRBZ_points = self._reduce_IRBZ(IRBZ_points, rot_boundaries, g, atol)
return [point[0] for point in IRBZ_points]
@staticmethod
def _get_reciprocal_point_group_dict(recip_point_group, atol):
PAR = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, -1]])
d = {
"reflections": [],
"rotations": {
"two-fold": [],
"three-fold": [],
"four-fold": [],
"six-fold": [],
"rotoinv-three-fold": [],
"rotoinv-four-fold": [],
"rotoinv-six-fold": [],
},
"inversion": [],
}
for i, op in enumerate(recip_point_group):
evals, evects = np.linalg.eig(op)
tr = np.trace(op)
det = np.linalg.det(op)
# Proper rotations
if np.isclose(det, 1, atol=atol):
if np.isclose(tr, 3, atol=atol):
continue
if np.isclose(tr, -1, atol=atol): # two-fold rotation
for j in range(3):
if np.isclose(evals[j], 1, atol=atol):
ax = evects[:, j]
d["rotations"]["two-fold"].append({"ind": i, "axis": ax, "op": op})
elif np.isclose(tr, 0, atol=atol): # three-fold rotation
for j in range(3):
if np.isreal(evals[j]) and np.isclose(np.absolute(evals[j]), 1, atol=atol):
ax = evects[:, j]
d["rotations"]["three-fold"].append({"ind": i, "axis": ax, "op": op})
# four-fold rotation
elif np.isclose(tr, 1, atol=atol):
for j in range(3):
if np.isreal(evals[j]) and np.isclose(np.absolute(evals[j]), 1, atol=atol):
ax = evects[:, j]
d["rotations"]["four-fold"].append({"ind": i, "axis": ax, "op": op})
elif np.isclose(tr, 2, atol=atol): # six-fold rotation
for j in range(3):
if np.isreal(evals[j]) and np.isclose(np.absolute(evals[j]), 1, atol=atol):
ax = evects[:, j]
d["rotations"]["six-fold"].append({"ind": i, "axis": ax, "op": op})
# Improper rotations
if np.isclose(det, -1, atol=atol):
if np.isclose(tr, -3, atol=atol):
d["inversion"].append({"ind": i, "op": PAR})
elif np.isclose(tr, 1, atol=atol): # two-fold rotation
for j in range(3):
if np.isclose(evals[j], -1, atol=atol):
norm = evects[:, j]
d["reflections"].append({"ind": i, "normal": norm, "op": op})
elif np.isclose(tr, 0, atol=atol): # three-fold rotoinversion
for j in range(3):
if np.isreal(evals[j]) and np.isclose(np.absolute(evals[j]), 1, atol=atol):
ax = evects[:, j]
d["rotations"]["rotoinv-three-fold"].append({"ind": i, "axis": ax, "op": op})
# four-fold rotoinversion
elif np.isclose(tr, -1, atol=atol):
for j in range(3):
if np.isreal(evals[j]) and np.isclose(np.absolute(evals[j]), 1, atol=atol):
ax = evects[:, j]
d["rotations"]["rotoinv-four-fold"].append({"ind": i, "axis": ax, "op": op})
# six-fold rotoinversion
elif np.isclose(tr, -2, atol=atol):
for j in range(3):
if np.isreal(evals[j]) and np.isclose(np.absolute(evals[j]), 1, atol=atol):
ax = evects[:, j]
d["rotations"]["rotoinv-six-fold"].append({"ind": i, "axis": ax, "op": op})
return d
@staticmethod
def _op_maps_IRBZ_to_self(op, IRBZ_points, atol):
point_coords = [point[1] for point in IRBZ_points]
for point in point_coords:
point_prime = np.dot(op, point)
mapped_back = False
for checkpoint in point_coords:
if np.allclose(point_prime, checkpoint, atol):
mapped_back = True
break
if not mapped_back:
return False
return True
@staticmethod
def _reduce_IRBZ(IRBZ_points, boundaries, g, atol):
in_reduced_section = []
for point in IRBZ_points:
in_reduced_section.append(
np.all(
[
(
np.dot(point[1], np.dot(g, boundary)) >= 0
or np.isclose(np.dot(point[1], np.dot(g, boundary)), 0, atol=atol)
)
for boundary in boundaries
]
)
)
return [IRBZ_points[i] for i in range(len(IRBZ_points)) if in_reduced_section[i]]
def _get_orbit_labels(self, orbit_cosines_orig, key_points_inds_orbits, atol):
orbit_cosines_copy = orbit_cosines_orig.copy()
orbit_labels_unsorted = [(len(key_points_inds_orbits) - 1, 26)]
orbit_inds_remaining = range(len(key_points_inds_orbits) - 1)
pop_orbits = []
pop_labels = []
for i, orb_cos in enumerate(orbit_cosines_copy):
if np.isclose(orb_cos[0][1], 1.0, atol=atol):
# (point orbit index, label index)
orbit_labels_unsorted.append((i, orb_cos[0][0]))
pop_orbits.append(i)
pop_labels.append(orb_cos[0][0])
orbit_cosines_copy = self._reduce_cosines_array(orbit_cosines_copy, pop_orbits, pop_labels)
orbit_inds_remaining = [i for i in orbit_inds_remaining if i not in pop_orbits]
# orbit_labels_unsorted already contains gamma orbit
while len(orbit_labels_unsorted) < len(orbit_cosines_orig) + 1:
pop_orbits = []
pop_labels = []
max_cosine_value = max(orb_cos[0][1] for orb_cos in orbit_cosines_copy)
max_cosine_value_inds = [
j for j in range(len(orbit_cosines_copy)) if orbit_cosines_copy[j][0][1] == max_cosine_value
]
max_cosine_label_inds = self._get_max_cosine_labels(
[orbit_cosines_copy[j] for j in max_cosine_value_inds],
key_points_inds_orbits,
atol,
)
for j, label_ind in enumerate(max_cosine_label_inds):
orbit_labels_unsorted.append((orbit_inds_remaining[max_cosine_value_inds[j]], label_ind))
pop_orbits.append(max_cosine_value_inds[j])
pop_labels.append(label_ind)
orbit_cosines_copy = self._reduce_cosines_array(orbit_cosines_copy, pop_orbits, pop_labels)
orbit_inds_remaining = [
orbit_inds_remaining[j] for j in range(len(orbit_inds_remaining)) if j not in pop_orbits
]
orbit_labels = np.zeros(len(key_points_inds_orbits))
for tup in orbit_labels_unsorted:
orbit_labels[tup[0]] = tup[1]
return orbit_labels
@staticmethod
def _reduce_cosines_array(orbit_cosines, pop_orbits, pop_labels):
return [
[orb_cos[i] for i in range(len(orb_cos)) if orb_cos[i][0] not in pop_labels]
for j, orb_cos in enumerate(orbit_cosines)
if j not in pop_orbits
]
def _get_max_cosine_labels(self, max_cosine_orbits_orig, key_points_inds_orbits, atol):
max_cosine_orbits_copy = max_cosine_orbits_orig.copy()
max_cosine_label_inds = np.zeros(len(max_cosine_orbits_copy))
initial_max_cosine_label_inds = [max_cos_orb[0][0] for max_cos_orb in max_cosine_orbits_copy]
u, inds, counts = np.unique(initial_max_cosine_label_inds, return_index=True, return_counts=True)
grouped_inds = [
[
i
for i in range(len(initial_max_cosine_label_inds))
if max_cosine_orbits_copy[i][0][0] == max_cosine_orbits_copy[ind][0][0]
]
for ind in inds
]
pop_orbits = []
pop_labels = []
unassigned_orbits = []
for i, ind in enumerate(inds):
if counts[i] == 1:
max_cosine_label_inds[ind] = initial_max_cosine_label_inds[ind]
pop_orbits.append(ind)
pop_labels.append(initial_max_cosine_label_inds[ind])
else:
next_choices = []
for grouped_ind in grouped_inds[i]:
j = 1
while True:
if max_cosine_orbits_copy[grouped_ind][j][0] not in initial_max_cosine_label_inds:
next_choices.append(max_cosine_orbits_copy[grouped_ind][j][1])
break
j += 1
worst_next_choice = next_choices.index(min(next_choices))
for grouped_ind in grouped_inds[i]:
if grouped_ind != worst_next_choice:
unassigned_orbits.append(grouped_ind)
max_cosine_label_inds[grouped_inds[i][worst_next_choice]] = initial_max_cosine_label_inds[
grouped_inds[i][worst_next_choice]
]
pop_orbits.append(grouped_inds[i][worst_next_choice])
pop_labels.append(initial_max_cosine_label_inds[grouped_inds[i][worst_next_choice]])
if len(unassigned_orbits) != 0:
max_cosine_orbits_copy = self._reduce_cosines_array(max_cosine_orbits_copy, pop_orbits, pop_labels)
unassigned_orbits_labels = self._get_orbit_labels(max_cosine_orbits_copy, key_points_inds_orbits, atol)
for i, unassigned_orbit in enumerate(unassigned_orbits):
max_cosine_label_inds[unassigned_orbit] = unassigned_orbits_labels[i]
return max_cosine_label_inds
@staticmethod
def LabelPoints(index):
"""
Axes used in generating labels for Latimer-Munro convention
"""
points = [
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 1, 0],
[1, 0, 1],
[0, 1, 1],
[1, 1, 1],
[1, 2, 0],
[1, 0, 2],
[1, 2, 2],
[2, 1, 0],
[0, 1, 2],
[2, 1, 2],
[2, 0, 1],
[0, 2, 1],
[2, 2, 1],
[1, 1, 2],
[1, 2, 1],
[2, 1, 1],
[3, 3, 2],
[3, 2, 3],
[2, 3, 3],
[2, 2, 2],
[3, 2, 2],
[2, 3, 2],
[1e-10, 1e-10, 1e-10],
]
return points[index]
@staticmethod
def LabelSymbol(index):
"""
Letters used in generating labels for Latimer-Munro convention
"""
symbols = [
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"Γ",
]
return symbols[index]
|
materialsproject/pymatgen
|
pymatgen/symmetry/kpath.py
|
Python
|
mit
| 94,572
|
[
"CRYSTAL",
"pymatgen"
] |
0e487d62ebcf7d5dd011d60ce0323e6185852499e2ce660e6cc3994f8e5b877d
|
# Copyright 2014 Uri Laserson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
import types
import xml.etree.cElementTree as ElementTree # for VDJXML parsing
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import SeqFeature, FeatureLocation
# ===================
# = DATA STRUCTURES =
# ===================
class ImmuneChain(SeqRecord):
"""Data structure to represent an immune chain.
It extends a biopython SeqRecord object with some simpler interface for
common analyses.
"""
def __init__(self, *args, **kw):
"""Initialize ImmuneChain
This is performed either with a prebuilt SeqRecord object or as a
native SeqRecord object.
"""
if len(args) > 0 and isinstance(args[0],SeqRecord): # pre-built SeqRecord
self._init_with_SeqRecord(args[0])
elif kw.has_key('record'): # pre-built SeqRecord
self._init_with_SeqRecord(kw['record'])
else: # native SeqRecord init
SeqRecord.__init__(self,*args,**kw)
# precompute hash on features for performance
self._update_feature_dict()
# load `source` feature qualifiers into annotations and delete `source`
# feature, if it exists
self._process_source_feature()
# define a set for uniq tags
self._tags = set(self.annotations.setdefault('tags',[]))
# force uppercase sequence
# self.seq = self.seq.upper()
def _init_with_SeqRecord(self,record):
# Check if record has phred_quality letter annotations and convert to
# ASCII string
if 'phred_quality' in record.letter_annotations and isinstance(record.letter_annotations['phred_quality'], types.ListType):
qual = ''.join([chr(q+33) for q in record.letter_annotations['phred_quality']])
record.letter_annotations['phred_quality'] = qual
# Initialize self using existing SeqRecord object
SeqRecord.__init__(self, seq=record.seq, id=record.id,
name=record.name, description=record.description,
dbxrefs=record.dbxrefs, features=record.features,
annotations=record.annotations,
letter_annotations=record.letter_annotations)
def _update_feature_dict(self):
self._features = {}
for (i,feature) in enumerate(self.features):
self._features.setdefault(feature.type,[]).append(i)
def _process_source_feature(self):
if 'source' in self._features:
if len(self._features['source']) > 1:
raise ValueError, "Found more than one `source` feature in %s" % self.id
for (k,v) in self.features[self._features['source'][0]].qualifiers.iteritems():
if k.startswith('__letter_annotations__'):
self.letter_annotations['.'.join(k.split('.')[1:])] = ''.join(v[0].split())
continue
if k != 'tags' and isinstance(v,types.ListType) and len(v) == 1: v = v[0]
# HACK: To deal with feature qualifier values that break across lines, the
# SeqIO parser loads each line separately, joins them with \n, and then
# replaces the newlines with a space. This is performed in
# `_feed_feature_table` in Scanner.py. However, this inserts spurious spaces
# in sequences that are stored along with my SeqRecord, like alignment
# annotations, so here I delete this extra spaces. Here I manually check for
# specific qualifiers that may be problematic.
if k == 'gapped_reference' or \
k == 'gapped_query':
self.annotations[k] = v.translate(None,string.whitespace)
continue
# for everything else
self.annotations[k] = v
self.features.pop(self._features['source'][0])
self._features.pop('source')
def __getattribute__(self,name):
"""Look for attributes in annotations and features."""
try:
return object.__getattribute__(self,name)
except AttributeError:
pass
try:
return self.annotations[name]
except KeyError:
pass
try:
if len(self._features[name]) > 1: raise AttributeError, "%s is not a unique feature" % name
return self.features[self._features[name][0]]
except KeyError:
pass
raise AttributeError, "couldn't find %s" % name
# define interface to tags object
def add_tags(self,tags):
if isinstance(tags,types.StringTypes):
tags = [tags]
elif isinstance(tags,types.ListType):
tags = list(tags)
else:
raise TypeError, "value must be string type or list type"
tags = set(tags)
self._tags.update(tags)
self.annotations['tags'] = list(self._tags)
return self
def add_tag(self,tag):
return self.add_tags(tag)
def has_tags(self,tags):
if isinstance(tags,types.StringTypes):
tags = [tags]
elif isinstance(tags,types.ListType):
tags = list(tags)
else:
raise TypeError, "value must be string type or list type"
return set(tags) <= self._tags
def has_tag(self,tag):
return self.has_tags(tag)
def del_tags(self,tags):
if isinstance(tags,types.StringTypes):
tags = [tags]
elif isinstance(tags,types.ListType):
tags = list(tags)
else:
raise TypeError, "value must be string type or list type"
for tag in tags:
self._tags.remove(tag)
self.annotations['tags'] = list(self._tags)
return self
def del_tag(self,tag):
return self.del_tags(tag)
# define some functional interface:
@property
def junction(self):
return self.junction_nt
@property
def junction_nt(self):
return self.__getattribute__('CDR3-IMGT').extract(self.seq.tostring())
@property
def junction_aa(self):
return self.__getattribute__('CDR3-IMGT').qualifiers['translation']
@property
def full_chain(self):
start = self.__getattribute__('V-REGION').location.nofuzzy_start
end = self.__getattribute__('J-REGION').location.nofuzzy_end
return self[start:end]
@property
def cdr3(self):
return len(self.junction)
@property
def v(self):
return self.__getattribute__('V-REGION').qualifiers['allele'][0]
@property
def v_seq(self):
return self.__getattribute__('V-REGION').extract(self.seq.tostring())
@property
def d(self):
# return self.features[self._features['D-REGION'][0]].qualifiers['allele']
return self.annotations['D-REGION']
@property
def j(self):
return self.__getattribute__('J-REGION').qualifiers['allele'][0]
@property
def j_seq(self):
return self.__getattribute__('J-REGION').extract(self.seq.tostring())
@property
def vj(self):
return '|'.join([self.v,self.j])
@property
def vdj(self):
return '|'.join([self.v,self.d,self.j])
@property
def num_mutations(self):
aln = self.letter_annotations['alignment']
return aln.count('S') + aln.count('I')
@property
def num_substitutions(self):
return self.letter_annotations['alignment'].count('S')
@property
def num_germline(self):
aln = self.letter_annotations['alignment']
return len(aln) - aln.count('3') - aln.count('_') - aln.count('I')
def format(self,*args,**kw):
"""Format SeqRecord using any supported format.
The only reason for redefining this is the hack related to storing
user-defined annotations in a source feature.
"""
self._update_feature_dict()
if 'source' in self._features:
# TODO: elim this whole if statement and only leave the else
raise ValueError, "I should never get here"
assert( len(self._features['source']) == 1 )
feature = self.features[ self._features['source'][0] ]
feature.qualifiers.update(self.annotations)
for (k,v) in self.letter_annotations.iteritems(): feature.qualifiers['__letter_annotations__.'+k] = v
else:
feature = SeqFeature( type='source',
location=FeatureLocation(0,len(self)),
qualifiers=self.annotations )
for (k,v) in self.letter_annotations.iteritems(): feature.qualifiers['__letter_annotations__.'+k] = v
self.features.append(feature)
output = SeqRecord.format(self,*args,**kw)
self.features.pop() # the last one, which is the 'source' feat I just added
# self._features.pop('source')
return output
def __len__(self):
return len(self.seq)
def __str__(self):
return self.__repr__()
def __repr__(self):
return self.format('imgt')
def seq2chain(*args):
"""Convert raw sequence into ImmuneChain object"""
if len(args) == 1:
name = 'seq'
seq = args[0]
elif len(args) == 2:
name = args[0]
seq = args[1]
else:
raise ValueError("Give either name,seq or just seq")
return ImmuneChain(seq=Seq(seq,generic_dna),id=name)
# ================
# = INPUT/OUTPUT =
# ================
def parse_imgt(inputfile):
"""Parser for VDJXML
Really just a wrapper around SeqIO that upgrades SeqRecord to ImmuneChain
"""
for record in SeqIO.parse(inputfile,'imgt'):
yield ImmuneChain(record)
def filter_parse_imgt(inputfile,predicate):
"""Parser that takes a predicate function"""
for record in SeqIO.parse(inputfile,'imgt'):
chain = ImmuneChain(record)
if predicate(chain):
yield chain
# ==========================================================================
# =============================
# = TO PARSE OLD VDJXML FILES =
# =============================
class ImmuneChainXML(object):
"""Data structure to represent an immune chain."""
def __init__(self,**kw):
"""Initialize ImmuneChain
seq is 5'->3'
"""
def kw_init(attrib):
if kw.has_key(attrib):
self.__setattr__(attrib,kw[attrib])
kw_init('seq')
kw_init('descr')
kw_init('locus')
kw_init('v')
kw_init('d')
kw_init('j')
kw_init('c')
kw_init('junction')
if kw.has_key('tags'):
tags = kw['tags']
if isinstance(tags,types.StringTypes): tags = [tags]
self.tags = set(tags)
else:
self.tags = set([])
def get_cdr3(self):
return len(self.junction)
def set_cdr3(self,value):
pass
cdr3 = property(fget=get_cdr3,fset=set_cdr3)
def get_vj(self):
return '|'.join([self.v,self.j])
def set_vj(self):
pass
vj = property(fget=get_vj,fset=set_vj)
def get_vdj(self):
return '|'.join([self.v,self.d,self.j])
def set_vdj(self):
pass
vdj = property(fget=get_vdj,fset=set_vdj)
def add_tags(self,tagset):
if isinstance(tagset,types.StringTypes): tagset = [tagset]
self.tags.update(tagset)
def add_tag(self,tag):
self.add_tags(tag)
def remove_tags(self,tagset):
if isinstance(tagset,types.StringTypes): tagset = [tagset]
for tag in tagset: self.tags.remove(tag)
def remove_tag(self,tag):
self.remove_tags(tag)
def has_tag(self,tag):
if tag in self.tags:
return True
else:
return False
def __len__(self):
return len(self.seq)
def __str__(self):
return self.__repr__()
def __repr__(self):
return self.get_XML()
def get_XML(self):
format_xml = lambda attrib,value: "\t<%(attrib)s>%(value)s</%(attrib)s>\n" % {'attrib':attrib,'value':value}
xmlstring = '<ImmuneChain>\n'
for (attrib,value) in self.__dict__.iteritems():
if attrib == 'tags':
for tag in self.tags:
xmlstring += format_xml('tag',tag)
else:
xmlstring += format_xml(attrib,value)
xmlstring += '</ImmuneChain>\n'
return xmlstring
class ParserVDJXML(object):
"""Parser for VDJXML"""
def __init__(self):
self.chain = None
def start_handler(self,elem):
if elem.tag == 'ImmuneChain':
self.chain = ImmuneChainXML()
def end_handler(self,elem):
if elem.tag == 'tag':
self.chain.add_tags(elem.text)
elif elem.tag == 'v_end_idx' or elem.tag == 'j_start_idx':
self.chain.__setattr__(elem.tag,int(elem.text))
else:
self.chain.__setattr__(elem.tag,elem.text)
def parse(self,inputfile):
for event, elem in ElementTree.iterparse(inputfile,events=('start','end')):
if event == 'start':
if elem.tag == 'root': # to ensure non-incorp of <root> obj in chain
pass
else:
self.start_handler(elem)
elif event == 'end':
if elem.tag == 'ImmuneChain':
yield self.chain
elif elem.tag == 'root': # to ensure clean exit at end of file
pass
else:
self.end_handler(elem)
class PredicateParserVDJXML(ParserVDJXML):
"""VDJXML Parser that takes a predicate function"""
def __init__(self,predicate):
ParserVDJXML.__init__(self)
self.predicate = predicate
def parse(self,inputfile):
for event, elem in ElementTree.iterparse(inputfile,events=('start','end')):
if event == 'start':
self.start_handler(elem)
elif event == 'end':
if elem.tag == 'ImmuneChain':
if self.predicate(self.chain) == True:
yield self.chain
else:
self.end_handler(elem)
def parse_VDJXML(inputfile):
vdjxmlparser = ParserVDJXML()
return vdjxmlparser.parse(inputfile)
def filter_parse_VDJXML(inputfile,predicate):
vdjxmlparser = PredicateParserVDJXML(predicate)
return vdjxmlparser.parse(inputfile)
def xml2imgt(chainXML):
seq = Seq(chainXML.seq,generic_dna)
chain = ImmuneChain(seq=seq,id=chainXML.descr,name=chainXML.descr,description=chainXML.descr)
chain.annotations['barcode'] = chainXML.barcode
chain.add_tags(list(chainXML.tags))
chain.annotations['clone'] = chainXML.clone
if "coding" not in chain.tags: raise ValueError, "I want coding chains only."
vfeature = SeqFeature(location=FeatureLocation(0,chainXML.v_end_idx),type='V-REGION',strand=1,qualifiers={"allele":[chainXML.v]})
jfeature = SeqFeature(location=FeatureLocation(chainXML.j_start_idx,len(seq)),type='J-REGION',strand=1,qualifiers={"allele":[chainXML.j]})
cdr3feature = SeqFeature(location=FeatureLocation(chainXML.v_end_idx+3,chainXML.j_start_idx-3),type='CDR3-IMGT',strand=1)
chain.features.append(vfeature)
chain.features.append(jfeature)
chain.features.append(cdr3feature)
chain._update_feature_dict()
chain._process_source_feature()
return chain
|
churchlab/vdj
|
__init__.py
|
Python
|
apache-2.0
| 16,592
|
[
"Biopython"
] |
7d97935d4275b3b275cb116bee59876825b3d5ef604815c490473e8cd38b0322
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'sphobjinv'
copyright = '2016-2022, Brian Skinn'
author = 'Brian Skinn'
# The full version for `release`, including alpha/beta/rc tags
from sphobjinv import __version__ as release
# Just major.minor for `version`
version = ".".join(release.split(".")[:2])
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinxcontrib.programoutput",
"sphinx_issues",
"sphinx_removed_in",
]
# napoleon configuration
napoleon_google_docstring = False
napoleon_use_rtype = False
# sphinx-issues config
issues_github_path = "bskinn/sphobjinv"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# No module name prepended to object titles in docs
add_module_names = False
# Highlighting style
pygments_style = "sphinx"
# Ignore package prefix when sorting modules
modindex_common_prefix = ["sphobjinv."]
# -- Common epilogue definition ------------------------------------------------
rst_epilog = r"""
.. |extlink| image:: /_static/extlink.svg
.. |dag| replace:: :math:`^\dagger`
.. |None| replace:: :obj:`None`
.. |True| replace:: :obj:`True`
.. |False| replace:: :obj:`False`
.. |int| replace:: :obj:`int`
.. |float| replace:: :obj:`float`
.. |list| replace:: :obj:`list`
.. |tuple| replace:: :obj:`tuple`
.. |type| replace:: :obj:`type`
.. |str| replace:: :obj:`str`
.. |bytes| replace:: :obj:`bytes`
.. |bool| replace:: :obj:`bool`
.. |dict| replace:: :obj:`dict`
.. |Path| replace:: :obj:`~pathlib.Path`
.. |re.compile| replace:: :func:`re.compile`
.. |re| replace:: :doc:`re <python:library/re>`
.. |Enum| replace:: :class:`~enum.Enum`
.. |isphx| replace:: :mod:`~sphinx.ext.intersphinx`
.. |Inventory| replace:: :class:`~sphobjinv.inventory.Inventory`
.. |DataObjStr| replace:: :class:`~sphobjinv.data.DataObjStr`
.. |DataObjBytes| replace:: :class:`~sphobjinv.data.DataObjBytes`
.. |SuperDataObj| replace:: :class:`~sphobjinv.data.SuperDataObj`
.. |license_txt| replace:: LICENSE.txt
.. _license_txt: https://github.com/bskinn/sphobjinv/blob/main/LICENSE.txt
.. |fuzzywuzzy| replace:: ``fuzzywuzzy``
.. _fuzzywuzzy: https://github.com/seatgeek/fuzzywuzzy
.. |python-Levenshtein| replace:: ``python-Levenshtein``
.. _python-Levenshtein: https://pypi.org/project/python-Levenshtein
.. |br| raw:: html
<br />
.. |cour| raw:: html
<span style="font-family:courier, monospace;font-size:90%">
.. |/cour| raw:: html
</span>
.. |objects.inv| replace:: |cour|\ objects.inv\ |/cour|
.. |objects.txt| replace:: |cour|\ objects.txt\ |/cour|
.. |str.format| replace:: :meth:`str.format`
.. |isphxmap| replace:: ``intersphinx_mapping``
.. _isphxmap: https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html#confval-intersphinx_mapping
.. |soi| raw:: html
<span style="font-family:courier, monospace; font-size: 90%; font-weight: bold;">sphobjinv</span>
.. |stdin| replace:: |cour|\ stdin\ |/cour|
.. |stdout| replace:: |cour|\ stdout\ |/cour|
.. |cli:ALL| replace:: :attr:`~sphobjinv.cli.parser.PrsConst.ALL`
.. |cli:DEF_BASENAME| replace:: :attr:`~sphobjinv.cli.parser.PrsConst.DEF_BASENAME`
.. |cli:DEF_OUT_EXT| replace:: :attr:`~sphobjinv.cli.parser.PrsConst.DEF_OUT_EXT`
.. |cli:FOUND_URL| replace:: :attr:`~sphobjinv.cli.parser.PrsConst.FOUND_URL`
.. |cli:INDEX| replace:: :attr:`~sphobjinv.cli.parser.PrsConst.INDEX`
.. |cli:INFILE| replace:: :attr:`~sphobjinv.cli.parser.PrsConst.INFILE`
.. |cli:MODE| replace:: :attr:`~sphobjinv.cli.parser.PrsConst.MODE`
.. |cli:OUTFILE| replace:: :attr:`~sphobjinv.cli.parser.PrsConst.OUTFILE`
.. |cli:OVERWRITE| replace:: :attr:`~sphobjinv.cli.parser.PrsConst.OVERWRITE`
.. |cli:QUIET| replace:: :attr:`~sphobjinv.cli.parser.PrsConst.QUIET`
.. |cli:SCORE| replace:: :attr:`~sphobjinv.cli.parser.PrsConst.SCORE`
.. |cli:SUBPARSER_NAME| replace:: :attr:`~sphobjinv.cli.parser.PrsConst.SUBPARSER_NAME`
.. |cli:SUGGEST_CONFIRM_LENGTH| replace:: :attr:`~sphobjinv.cli.parser.PrsConst.SUGGEST_CONFIRM_LENGTH`
.. |cli:URL| replace:: :attr:`~sphobjinv.cli.parser.PrsConst.URL`
.. |cli:VERSION| replace:: :attr:`~sphobjinv.cli.parser.PrsConst.VERSION`
.. |resolve_inpath| replace:: :func:`~sphobjinv.cli.paths.resolve_inpath`
"""
# -- doctest setup code --------------------------------------------
doctest_global_setup = """\
import json
import os
from pathlib import Path
import shutil as sh
import sphobjinv as soi
# Should always be the doc root
_start_dir = Path().resolve()
# Create scratch dir if missing, and bind
_work_dir = Path('scratch')
_work_dir.mkdir(exist_ok=True)
_work_dir = _work_dir.resolve()
# Link ref to the attrs inventory
_res_inv = (_start_dir.parent / 'tests' / 'resource'
/ 'objects_attrs.inv')
# Scratch-clearing helper for later use
def _clear_files():
for fp in [_ for _ in _work_dir.iterdir() if _.is_file()]:
fp.unlink()
# Move to scratch, clear it, and copy in the attrs inv
os.chdir(str(_work_dir))
_clear_files()
sh.copy(str(_res_inv), str(Path()))
# Define helper(s) for running CLI commands
def cli_run(argstr, *, inp='', head=None):
'''Run as if argstr was passed to shell.
'inp' is input to pre-load to 'stdio_mgr' mocking
of 'stdin.
'head' is an integer, indicating the number
of head lines to print.
Can't handle quoted arguments.
'''
import sys
import sphobjinv.cli as cli
from stdio_mgr import stdio_mgr
old_argv = sys.argv
sys.argv = argstr.strip().split()
with stdio_mgr(inp) as (i_, o_, e_):
try:
cli.main()
except SystemExit:
pass
finally:
sys.argv = old_argv
output = o_.getvalue() + e_.getvalue()
if head:
output = '\\n'.join(output.splitlines()[:head])
print(output)
def file_head(fn, *, head=None):
'''Print the first 'head' lines of file at 'fn'; all if head==None.'''
p = Path(fn)
if not p.is_file():
return "Not a file."
text = p.read_text()
# If head==None, then just returns a complete slice
lines = text.splitlines()[:head]
return "\\n".join(lines)
"""
doctest_global_cleanup = """\
_clear_files()
os.chdir(str(_start_dir))
"""
# -- Options for intersphinx ------------------------------------------------
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"sphinx": ("https://www.sphinx-doc.org/en/master", None),
}
# -- Options for linkcheck --------------------------------------------------
linkcheck_ignore = [r"^https?://(\w+[.])?twitter[.]com.*$"]
linkcheck_anchors_ignore = [r"^L\d+$", r"^L\d+-L\d+$"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Output file basename
htmlhelp_basename = "sphobjinv"
# Location of the favicon and logo images
html_favicon = "_static/soi-logo.png"
html_logo = "_static/soi-logo_duo_border.png"
|
bskinn/sphobjinv
|
doc/source/conf.py
|
Python
|
mit
| 8,510
|
[
"Brian"
] |
3a652817732101ef62e31b5e46dd9b24b678f60b99f338203a5ae2f4aa72a61d
|
"""
C++ Export
----------
This module provides all necessary functionality specify an ODE model and
generate executable C++ simulation code. The user generally won't have to
directly call any function from this module as this will be done by
:py:func:`amici.pysb_import.pysb2amici`,
:py:func:`amici.sbml_import.SbmlImporter.sbml2amici` and
:py:func:`amici.petab_import.import_model`
"""
import sympy as sp
import numpy as np
import re
import shutil
import subprocess
import sys
import os
import copy
import numbers
import logging
import itertools
import contextlib
try:
import pysb
except ImportError:
pysb = None
from typing import (
Callable, Optional, Union, List, Dict, Tuple, SupportsFloat, Sequence,
Set, Any
)
from string import Template
from sympy.printing import cxxcode
from sympy.printing.cxx import _CXXCodePrinterBase
from sympy.matrices.immutable import ImmutableDenseMatrix
from sympy.matrices.dense import MutableDenseMatrix
from sympy.logic.boolalg import BooleanAtom
from itertools import chain
from . import (
amiciSwigPath, amiciSrcPath, amiciModulePath, __version__, __commit__,
sbml_import
)
from .logging import get_logger, log_execution_time, set_log_level
from .constants import SymbolId
from .import_utils import smart_subs_dict, toposort_symbols
# Template for model simulation main.cpp file
CXX_MAIN_TEMPLATE_FILE = os.path.join(amiciSrcPath, 'main.template.cpp')
# Template for model/swig/CMakeLists.txt
SWIG_CMAKE_TEMPLATE_FILE = os.path.join(amiciSwigPath,
'CMakeLists_model.cmake')
# Template for model/CMakeLists.txt
MODEL_CMAKE_TEMPLATE_FILE = os.path.join(amiciSrcPath,
'CMakeLists.template.cmake')
# prototype for generated C++ functions, keys are the names of functions
#
# signature: defines the argument part of the function signature,
# input variables
# should have a const flag
#
# assume_pow_positivity: identifies the functions on which
# assume_pow_positivity will have an effect when specified during model
# generation. generally these are functions that are used for solving the
# ODE, where negative values may negatively affect convergence of the
# integration algorithm
#
# sparse: specifies whether the result of this function will be stored in
# sparse format. sparse format means that the function will only return an
# array of nonzero values and not a full matrix.
functions = {
'Jy': {
'signature':
'(realtype *Jy, const int iy, const realtype *p, '
'const realtype *k, const realtype *y, const realtype *sigmay, '
'const realtype *my)',
},
'dJydsigma': {
'signature':
'(realtype *dJydsigma, const int iy, const realtype *p, '
'const realtype *k, const realtype *y, const realtype *sigmay, '
'const realtype *my)',
},
'dJydy': {
'signature':
'(realtype *dJydy, const int iy, const realtype *p, '
'const realtype *k, const realtype *y, '
'const realtype *sigmay, const realtype *my)',
'flags': ['sparse']
},
'root': {
'signature':
'(realtype *root, const realtype t, const realtype *x, '
'const realtype *p, const realtype *k, const realtype *h)'
},
'dwdp': {
'signature':
'(realtype *dwdp, const realtype t, const realtype *x, '
'const realtype *p, const realtype *k, const realtype *h, '
'const realtype *w, const realtype *tcl, const realtype *dtcldp)',
'flags': ['assume_pow_positivity', 'sparse']
},
'dwdx': {
'signature':
'(realtype *dwdx, const realtype t, const realtype *x, '
'const realtype *p, const realtype *k, const realtype *h, '
'const realtype *w, const realtype *tcl)',
'flags': ['assume_pow_positivity', 'sparse']
},
'dwdw': {
'signature':
'(realtype *dwdw, const realtype t, const realtype *x, '
'const realtype *p, const realtype *k, const realtype *h, '
'const realtype *w, const realtype *tcl)',
'flags': ['assume_pow_positivity', 'sparse']
},
'dxdotdw': {
'signature':
'(realtype *dxdotdw, const realtype t, const realtype *x, '
'const realtype *p, const realtype *k, const realtype *h, '
'const realtype *w)',
'flags': ['assume_pow_positivity', 'sparse']
},
'dxdotdx_explicit': {
'signature':
'(realtype *dxdotdx_explicit, const realtype t, '
'const realtype *x, const realtype *p, const realtype *k, '
'const realtype *h, const realtype *w)',
'flags': ['assume_pow_positivity', 'sparse']
},
'dxdotdp_explicit': {
'signature':
'(realtype *dxdotdp_explicit, const realtype t, '
'const realtype *x, const realtype *p, const realtype *k, '
'const realtype *h, const realtype *w)',
'flags': ['assume_pow_positivity', 'sparse']
},
'dydx': {
'signature':
'(realtype *dydx, const realtype t, const realtype *x, '
'const realtype *p, const realtype *k, const realtype *h, '
'const realtype *w, const realtype *dwdx)',
},
'dydp': {
'signature':
'(realtype *dydp, const realtype t, const realtype *x, '
'const realtype *p, const realtype *k, const realtype *h, '
'const int ip, const realtype *w, const realtype *dtcldp)',
},
'dsigmaydp': {
'signature':
'(realtype *dsigmaydp, const realtype t, const realtype *p, '
'const realtype *k, const int ip)',
},
'sigmay': {
'signature':
'(realtype *sigmay, const realtype t, const realtype *p, '
'const realtype *k)',
},
'sroot': {
'signature':
'(realtype *stau, const realtype t, const realtype *x, '
'const realtype *p, const realtype *k, const realtype *h, '
'const realtype *sx, const int ip, const int ie)',
'flags': ['dont_generate_body']
},
'drootdt': {
'signature': '()',
'flags': ['dont_generate_body']
},
'drootdt_total': {
'signature': '()',
'flags': ['dont_generate_body']
},
'drootdp': {
'signature': '()',
'flags': ['dont_generate_body']
},
'drootdx': {
'signature': '()',
'flags': ['dont_generate_body']
},
'stau': {
'signature':
'(realtype *stau, const realtype t, const realtype *x, '
'const realtype *p, const realtype *k, const realtype *h, '
'const realtype *sx, const int ip, const int ie)'
},
'deltax': {
'signature':
'(double *deltax, const realtype t, const realtype *x, '
'const realtype *p, const realtype *k, const realtype *h, '
'const int ie, const realtype *xdot, const realtype *xdot_old)'
},
'ddeltaxdx': {
'signature': '()',
'flags': ['dont_generate_body']
},
'ddeltaxdt': {
'signature': '()',
'flags': ['dont_generate_body']
},
'ddeltaxdp': {
'signature': '()',
'flags': ['dont_generate_body']
},
'deltasx': {
'signature':
'(realtype *deltasx, const realtype t, const realtype *x, '
'const realtype *p, const realtype *k, const realtype *h, '
'const realtype *w, const int ip, const int ie, '
'const realtype *xdot, const realtype *xdot_old, '
'const realtype *sx, const realtype *stau)'
},
'w': {
'signature':
'(realtype *w, const realtype t, const realtype *x, '
'const realtype *p, const realtype *k, '
'const realtype *h, const realtype *tcl)',
'flags': ['assume_pow_positivity']
},
'x0': {
'signature':
'(realtype *x0, const realtype t, const realtype *p, '
'const realtype *k)',
},
'x0_fixedParameters': {
'signature':
'(realtype *x0_fixedParameters, const realtype t, '
'const realtype *p, const realtype *k, '
'gsl::span<const int> reinitialization_state_idxs)',
},
'sx0': {
'signature':
'(realtype *sx0, const realtype t,const realtype *x, '
'const realtype *p, const realtype *k, const int ip)',
},
'sx0_fixedParameters': {
'signature':
'(realtype *sx0_fixedParameters, const realtype t, '
'const realtype *x0, const realtype *p, const realtype *k, '
'const int ip, gsl::span<const int> reinitialization_state_idxs)',
},
'xdot': {
'signature':
'(realtype *xdot, const realtype t, const realtype *x, '
'const realtype *p, const realtype *k, const realtype *h, '
'const realtype *w)',
'flags': ['assume_pow_positivity']
},
'xdot_old': {
'signature': '()',
'flags': ['dont_generate_body'],
},
'y': {
'signature':
'(realtype *y, const realtype t, const realtype *x, '
'const realtype *p, const realtype *k, '
'const realtype *h, const realtype *w)',
},
'x_rdata': {
'signature':
'(realtype *x_rdata, const realtype *x, const realtype *tcl)',
},
'total_cl': {
'signature':
'(realtype *total_cl, const realtype *x_rdata)',
},
'x_solver': {
'signature':
'(realtype *x_solver, const realtype *x_rdata)',
},
}
# list of sparse functions
sparse_functions = [
function for function in functions
if 'sparse' in functions[function].get('flags', [])
]
# list of nobody functions
nobody_functions = [
function for function in functions
if 'dont_generate_body' in functions[function].get('flags', [])
]
# list of sensitivity functions
sensi_functions = [
function for function in functions
if 'const int ip' in functions[function]['signature']
]
# list of sensitivity functions
sparse_sensi_functions = [
function for function in functions
if 'const int ip' not in functions[function]['signature']
and function.endswith('dp') or function.endswith('dp_explicit')
]
# list of event functions
event_functions = [
function for function in functions
if 'const int ie' in functions[function]['signature'] and
'const int ip' not in functions[function]['signature']
]
event_sensi_functions = [
function for function in functions
if 'const int ie' in functions[function]['signature'] and
'const int ip' in functions[function]['signature']
]
# list of multiobs functions
multiobs_functions = [
function for function in functions
if 'const int iy' in functions[function]['signature']
]
# list of equations that have ids which may not be unique
non_unique_id_symbols = [
'x_rdata', 'y'
]
# custom c++ function replacements
CUSTOM_FUNCTIONS = [
{'sympy': 'polygamma',
'c++': 'boost::math::polygamma',
'include': '#include <boost/math/special_functions/polygamma.hpp>',
'build_hint': 'Using polygamma requires libboost-math header files.'
},
{'sympy': 'Heaviside',
'c++': 'amici::heaviside'},
{'sympy': 'DiracDelta',
'c++': 'amici::dirac'}
]
# python log manager
logger = get_logger(__name__, logging.ERROR)
def var_in_function_signature(name: str, varname: str) -> bool:
"""
Checks if the values for a symbolic variable is passed in the signature
of a function
:param name:
name of the function
:param varname:
name of the symbolic variable
:return:
boolean indicating whether the variable occurs in the function
signature
"""
return name in functions \
and re.search(
rf'const (realtype|double) \*{varname}[0]*[,)]+',
functions[name]['signature']
)
class ModelQuantity:
"""
Base class for model components
"""
def __init__(self,
identifier: sp.Symbol,
name: str,
value: Union[SupportsFloat, numbers.Number, sp.Expr]):
"""
Create a new ModelQuantity instance.
:param identifier:
unique identifier of the quantity
:param name:
individual name of the quantity (does not need to be unique)
:param value:
either formula, numeric value or initial value
"""
if not isinstance(identifier, sp.Symbol):
raise TypeError(f'identifier must be sympy.Symbol, was '
f'{type(identifier)}')
self._identifier: sp.Symbol = identifier
if not isinstance(name, str):
raise TypeError(f'name must be str, was {type(name)}')
self._name: str = name
self._value: sp.Expr = cast_to_sym(value, 'value')
def __repr__(self) -> str:
"""
Representation of the ModelQuantity object
:return:
string representation of the ModelQuantity
"""
return str(self._identifier)
def get_id(self) -> sp.Symbol:
"""
ModelQuantity identifier
:return:
identifier of the ModelQuantity
"""
return self._identifier
def get_name(self) -> str:
"""
ModelQuantity name
:return:
name of the ModelQuantity
"""
return self._name
def get_val(self) -> sp.Expr:
"""
ModelQuantity value
:return:
value of the ModelQuantity
"""
return self._value
def set_val(self, val: sp.Expr):
"""
Set ModelQuantity value
:return:
value of the ModelQuantity
"""
self._value = cast_to_sym(val, 'value')
class State(ModelQuantity):
"""
A State variable defines an entity that evolves with time according to
the provided time derivative, abbreviated by `x`
:ivar _conservation_law:
algebraic formula that allows computation of this
state according to a conservation law
:ivar _dt:
algebraic formula that defines the temporal derivative of this state
"""
_dt: Union[sp.Expr, None] = None
_conservation_law: Union[sp.Expr, None] = None
def __init__(self,
identifier: sp.Symbol,
name: str,
init: sp.Expr,
dt: sp.Expr):
"""
Create a new State instance. Extends :meth:`ModelQuantity.__init__`
by dt
:param identifier:
unique identifier of the state
:param name:
individual name of the state (does not need to be unique)
:param init:
initial value
:param dt:
time derivative
"""
super(State, self).__init__(identifier, name, init)
self._dt = cast_to_sym(dt, 'dt')
self._conservation_law = None
def set_conservation_law(self,
law: sp.Expr) -> None:
"""
Sets the conservation law of a state. If the a conservation law
is set, the respective state will be replaced by an algebraic
formula according to the respective conservation law.
:param law:
linear sum of states that if added to this state remain
constant over time
"""
if not isinstance(law, sp.Expr):
raise TypeError(f'conservation law must have type sympy.Expr, '
f'was {type(law)}')
self._conservation_law = law
def set_dt(self,
dt: sp.Expr) -> None:
"""
Sets the time derivative
:param dt:
time derivative
"""
self._dt = cast_to_sym(dt, 'dt')
def get_dt(self) -> sp.Expr:
"""
Gets the time derivative
:return:
time derivative
"""
return self._dt
def get_free_symbols(self) -> Set[sp.Symbol]:
"""
Gets the set of free symbols in time derivative and initial conditions
:return:
free symbols
"""
return self._dt.free_symbols.union(self._value.free_symbols)
class ConservationLaw(ModelQuantity):
"""
A conservation law defines the absolute the total amount of a
(weighted) sum of states
"""
def __init__(self,
identifier: sp.Symbol,
name: str,
value: sp.Expr):
"""
Create a new ConservationLaw instance.
:param identifier:
unique identifier of the ConservationLaw
:param name:
individual name of the ConservationLaw (does not need to be
unique)
:param value: formula (sum of states)
"""
super(ConservationLaw, self).__init__(identifier, name, value)
class Observable(ModelQuantity):
"""
An Observable links model simulations to experimental measurements,
abbreviated by `y`
:ivar _measurement_symbol:
sympy symbol used in the objective function to represent
measurements to this observable
"""
_measurement_symbol: Union[sp.Symbol, None] = None
def __init__(self,
identifier: sp.Symbol,
name: str,
value: sp.Expr,
measurement_symbol: Optional[sp.Symbol] = None):
"""
Create a new Observable instance.
:param identifier:
unique identifier of the Observable
:param name:
individual name of the Observable (does not need to be unique)
:param value:
formula
"""
super(Observable, self).__init__(identifier, name, value)
self._measurement_symbol = measurement_symbol
def get_measurement_symbol(self) -> sp.Symbol:
if self._measurement_symbol is None:
self._measurement_symbol = generate_measurement_symbol(
self.get_id()
)
return self._measurement_symbol
class SigmaY(ModelQuantity):
"""
A Standard Deviation SigmaY rescales the distance between simulations
and measurements when computing residuals or objective functions,
abbreviated by `sigmay`
"""
def __init__(self,
identifier: sp.Symbol,
name: str,
value: sp.Expr):
"""
Create a new Standard Deviation instance.
:param identifier:
unique identifier of the Standard Deviation
:param name:
individual name of the Standard Deviation (does not need to
be unique)
:param value:
formula
"""
super(SigmaY, self).__init__(identifier, name, value)
class Expression(ModelQuantity):
"""
An Expressions is a recurring elements in symbolic formulas. Specifying
this may yield more compact expression which may lead to substantially
shorter model compilation times, but may also reduce model simulation time,
abbreviated by `w`
"""
def __init__(self,
identifier: sp.Symbol,
name: str,
value: sp.Expr):
"""
Create a new Expression instance.
:param identifier:
unique identifier of the Expression
:param name:
individual name of the Expression (does not need to be unique)
:param value:
formula
"""
super(Expression, self).__init__(identifier, name, value)
class Parameter(ModelQuantity):
"""
A Parameter is a free variable in the model with respect to which
sensitivities may be computed, abbreviated by `p`
"""
def __init__(self,
identifier: sp.Symbol,
name: str,
value: numbers.Number):
"""
Create a new Expression instance.
:param identifier:
unique identifier of the Parameter
:param name:
individual name of the Parameter (does not need to be
unique)
:param value:
numeric value
"""
super(Parameter, self).__init__(identifier, name, value)
class Constant(ModelQuantity):
"""
A Constant is a fixed variable in the model with respect to which
sensitivities cannot be computed, abbreviated by `k`
"""
def __init__(self,
identifier: sp.Symbol,
name: str,
value: numbers.Number):
"""
Create a new Expression instance.
:param identifier:
unique identifier of the Constant
:param name:
individual name of the Constant (does not need to be unique)
:param value:
numeric value
"""
super(Constant, self).__init__(identifier, name, value)
class LogLikelihood(ModelQuantity):
"""
A LogLikelihood defines the distance between measurements and
experiments for a particular observable. The final LogLikelihood value
in the simulation will be the sum of all specified LogLikelihood
instances evaluated at all timepoints, abbreviated by `Jy`
"""
def __init__(self,
identifier: sp.Symbol,
name: str,
value: sp.Expr):
"""
Create a new Expression instance.
:param identifier:
unique identifier of the LogLikelihood
:param name:
individual name of the LogLikelihood (does not need to be
unique)
:param value:
formula
"""
super(LogLikelihood, self).__init__(identifier, name, value)
class Event(ModelQuantity):
"""
An Event defines either a SBML event or a root of the argument of a
Heaviside function. The Heaviside functions will be tracked via the
vector `h` during simulation and are needed to inform the ODE solver about
a discontinuity in either the right hand side or the states themselves,
causing a reinitialization of the solver.
"""
def __init__(self,
identifier: sp.Symbol,
name: str,
value: sp.Expr,
state_update: Union[sp.Expr, None],
event_observable: Union[sp.Expr, None]):
"""
Create a new Event instance.
:param identifier:
unique identifier of the Event
:param name:
individual name of the Event (does not need to be unique)
:param value:
formula for the root / trigger function
:param state_update:
formula for the bolus function (None for Heaviside functions,
zero vector for events without bolus)
:param event_observable:
formula a potential observable linked to the event
(None for Heaviside functions, empty events without observable)
"""
super(Event, self).__init__(identifier, name, value)
# add the Event specific components
self._state_update = state_update
self._observable = event_observable
def __eq__(self, other):
"""
Check equality of events at the level of trigger/root functions, as we
need to collect unique root functions for roots.cpp
"""
return self.get_val() == other.get_val()
# defines the type of some attributes in ODEModel
symbol_to_type = {
SymbolId.SPECIES: State,
SymbolId.PARAMETER: Parameter,
SymbolId.FIXED_PARAMETER: Constant,
SymbolId.OBSERVABLE: Observable,
SymbolId.SIGMAY: SigmaY,
SymbolId.LLHY: LogLikelihood,
SymbolId.EXPRESSION: Expression,
SymbolId.EVENT: Event
}
@log_execution_time('running smart_jacobian', logger)
def smart_jacobian(eq: sp.MutableDenseMatrix,
sym_var: sp.MutableDenseMatrix) -> sp.MutableDenseMatrix:
"""
Wrapper around symbolic jacobian with some additional checks that reduce
computation time for large matrices
:param eq:
equation
:param sym_var:
differentiation variable
:return:
jacobian of eq wrt sym_var
"""
if min(eq.shape) and min(sym_var.shape) \
and not smart_is_zero_matrix(eq) \
and not smart_is_zero_matrix(sym_var) \
and not sym_var.free_symbols.isdisjoint(eq.free_symbols):
return eq.jacobian(sym_var)
return sp.zeros(eq.shape[0], sym_var.shape[0])
@log_execution_time('running smart_multiply', logger)
def smart_multiply(x: Union[sp.MutableDenseMatrix, sp.MutableSparseMatrix],
y: sp.MutableDenseMatrix
) -> Union[sp.MutableDenseMatrix, sp.MutableSparseMatrix]:
"""
Wrapper around symbolic multiplication with some additional checks that
reduce computation time for large matrices
:param x:
educt 1
:param y:
educt 2
:return:
product
"""
if not x.shape[0] or not y.shape[1] or smart_is_zero_matrix(x) or \
smart_is_zero_matrix(y):
return sp.zeros(x.shape[0], y.shape[1])
return x.multiply(y)
def smart_is_zero_matrix(x: Union[sp.MutableDenseMatrix,
sp.MutableSparseMatrix]) -> bool:
"""A faster implementation of sympy's is_zero_matrix
Avoids repeated indexer type checks and double iteration to distinguish
False/None. Found to be about 100x faster for large matrices.
:param x: Matrix to check
"""
if isinstance(x, sp.MutableDenseMatrix):
nonzero = any(xx.is_zero is not True for xx in x._mat)
else:
nonzero = x.nnz() > 0
return not nonzero
class ODEModel:
"""
Defines an Ordinary Differential Equation as set of ModelQuantities.
This class provides general purpose interfaces to ompute arbitrary
symbolic derivatives that are necessary for model simulation or
sensitivity computation
:ivar _states:
list of state variables
:ivar _observables:
list of observables
:ivar _sigmays:
list of sigmays
:ivar _parameters:
list of parameters
:ivar _loglikelihoods:
list of loglikelihoods
:ivar _expressions:
list of expressions instances
:ivar _conservationlaws:
list of conservation laws
:ivar _symboldim_funs:
define functions that compute model dimensions, these
are functions as the underlying symbolic expressions have not been
populated at compile time
:ivar _eqs:
carries symbolic formulas of the symbolic variables of the model
:ivar _sparseeqs:
carries linear list of all symbolic formulas for sparsified
variables
:ivar _vals:
carries numeric values of symbolic identifiers of the symbolic
variables of the model
:ivar _names:
carries names of symbolic identifiers of the symbolic variables
of the model
:ivar _syms:
carries symbolic identifiers of the symbolic variables of the
model
:ivar _strippedsyms:
carries symbolic identifiers that were stripped of additional class
information
:ivar _sparsesyms:
carries linear list of all symbolic identifiers for sparsified
variables
:ivar _colptrs:
carries column pointers for sparsified variables. See
SUNMatrixContent_Sparse definition in <sunmatrix/sunmatrix_sparse.h>
:ivar _rowvals:
carries row values for sparsified variables. See
SUNMatrixContent_Sparse definition in <sunmatrix/sunmatrix_sparse.h>
:ivar _equation_prototype:
defines the attribute from which an equation should be generated via
list comprehension (see :meth:`ODEModel._generate_equation`)
:ivar _variable_prototype:
defines the attribute from which a variable should be generated via
list comprehension (see :meth:`ODEModel._generate_symbol`)
:ivar _value_prototype:
defines the attribute from which a value should be generated via
list comprehension (see :meth:`ODEModel._generate_value`)
:ivar _total_derivative_prototypes:
defines how a total derivative equation is computed for an equation,
key defines the name and values should be arguments for
ODEModel.totalDerivative()
:ivar _lock_total_derivative:
add chainvariables to this set when computing total derivative from
a partial derivative call to enforce a partial derivative in the
next recursion. prevents infinite recursion
:ivar _simplify:
If not None, this function will be used to simplify symbolic
derivative expressions. Receives sympy expressions as only argument.
To apply multiple simplifications, wrap them in a lambda expression.
:ivar _x0_fixedParameters_idx:
Index list of subset of states for which x0_fixedParameters was
computed
:ivar _w_recursion_depth:
recursion depth in w, quantified as nilpotency of dwdw
:ivar _has_quadratic_nllh:
whether all observables have a gaussian noise model, i.e. whether
res and FIM make sense.
"""
def __init__(self, verbose: Optional[Union[bool, int]] = False,
simplify: Optional[Callable] = sp.powsimp):
"""
Create a new ODEModel instance.
:param verbose:
verbosity level for logging, True/False default to
``logging.DEBUG``/``logging.ERROR``
:param simplify:
see :meth:`ODEModel._simplify`
"""
self._states: List[State] = []
self._observables: List[Observable] = []
self._sigmays: List[SigmaY] = []
self._parameters: List[Parameter] = []
self._constants: List[Constant] = []
self._loglikelihoods: List[LogLikelihood] = []
self._expressions: List[Expression] = []
self._conservationlaws: List[ConservationLaw] = []
self._events: List[Event] = []
self._symboldim_funs: Dict[str, Callable[[], int]] = {
'sx': self.num_states_solver,
'v': self.num_states_solver,
'vB': self.num_states_solver,
'xB': self.num_states_solver,
'sigmay': self.num_obs,
}
self._eqs: Dict[str, Union[sp.Matrix, List[sp.Matrix]]] = dict()
self._sparseeqs: Dict[str, Union[sp.Matrix, List[sp.Matrix]]] = dict()
self._vals: Dict[str, List[float]] = dict()
self._names: Dict[str, List[str]] = dict()
self._syms: Dict[str, Union[sp.Matrix, List[sp.Matrix]]] = dict()
self._strippedsyms: Dict[str, sp.Matrix] = dict()
self._sparsesyms: Dict[str, Union[List[str], List[List[str]]]] = dict()
self._colptrs: Dict[str, Union[List[int], List[List[int]]]] = dict()
self._rowvals: Dict[str, Union[List[int], List[List[int]]]] = dict()
self._equation_prototype: Dict[str, str] = {
'total_cl': '_conservationlaws',
'x0': '_states',
'y': '_observables',
'Jy': '_loglikelihoods',
'w': '_expressions',
'root': '_events',
'sigmay': '_sigmays'
}
self._variable_prototype: Dict[str, str] = {
'tcl': '_conservationlaws',
'x_rdata': '_states',
'y': '_observables',
'p': '_parameters',
'k': '_constants',
'w': '_expressions',
'sigmay': '_sigmays',
'h': '_events'
}
self._value_prototype: Dict[str, str] = {
'p': '_parameters',
'k': '_constants',
}
self._total_derivative_prototypes: \
Dict[str, Dict[str, Union[str, List[str]]]] = {
'sx_rdata': {
'eq': 'x_rdata',
'chainvars': ['x'],
'var': 'p',
'dxdz_name': 'sx',
},
'sroot': {
'eq': 'root',
'chainvars': ['x'],
'var': 'p',
'dxdz_name': 'sx',
}
}
self._lock_total_derivative: List[str] = list()
self._simplify: Callable = simplify
self._x0_fixedParameters_idx: Union[None, Sequence[int]]
self._w_recursion_depth: int = 0
self._has_quadratic_nllh: bool = True
set_log_level(logger, verbose)
@log_execution_time('importing SbmlImporter', logger)
def import_from_sbml_importer(self,
si: 'sbml_import.SbmlImporter',
compute_cls: Optional[bool] = True) -> None:
"""
Imports a model specification from a
:class:`amici.sbml_import.SbmlImporter`
instance.
:param si:
imported SBML model
"""
# get symbolic expression from SBML importers
symbols = copy.copy(si.symbols)
nexpr = len(symbols[SymbolId.EXPRESSION])
# assemble fluxes and add them as expressions to the model
fluxes = []
for ir, flux in enumerate(si.flux_vector):
flux_id = generate_flux_symbol(ir)
fluxes.append(flux_id)
nr = len(fluxes)
# correct time derivatives for compartment changes
dxdotdw_updates = []
def transform_dxdt_to_concentration(species_id, dxdt):
"""
Produces the appropriate expression for the first derivative of a
species with respect to time, for species that reside in
compartments with a constant volume, or a volume that is defined by
an assignment or rate rule.
:param species_id:
The identifier of the species (generated in "sbml_import.py").
:param dxdt:
The element-wise product of the row in the stoichiometric
matrix that corresponds to the species (row x_index) and the
flux (kinetic laws) vector. Ignored in the case of rate rules.
"""
# The derivation of the below return expressions can be found in
# the documentation. They are found by rearranging
# $\frac{d}{dt} (vx) = Sw$ for $\frac{dx}{dt}$, where $v$ is the
# vector of species compartment volumes, $x$ is the vector of
# species concentrations, $S$ is the stoichiometric matrix, and $w$
# is the flux vector. The conditional below handles the cases of
# species in (i) compartments with a rate rule, (ii) compartments
# with an assignment rule, and (iii) compartments with a constant
# volume, respectively.
species = si.symbols[SymbolId.SPECIES][species_id]
comp = species['compartment']
x_index = species['index']
if comp in si.symbols[SymbolId.SPECIES]:
dv_dt = si.symbols[SymbolId.SPECIES][comp]['dt']
xdot = (dxdt - dv_dt * species_id) / comp
dxdotdw_updates.extend(
(x_index, w_index, xdot.diff(r_flux))
for w_index, r_flux in enumerate(fluxes)
)
return xdot
elif comp in si.compartment_assignment_rules:
v = si.compartment_assignment_rules[comp]
# we need to flatten out assignments in the compartment in
# order to ensure that we catch all species dependencies
v = smart_subs_dict(v, si.symbols[SymbolId.EXPRESSION],
'value')
dv_dt = v.diff(si.amici_time_symbol)
# we may end up with a time derivative of the compartment
# volume due to parameter rate rules
comp_rate_vars = [p for p in v.free_symbols
if p in si.symbols[SymbolId.SPECIES]]
for var in comp_rate_vars:
dv_dt += \
v.diff(var) * si.symbols[SymbolId.SPECIES][var]['dt']
dv_dx = v.diff(species_id)
xdot = (dxdt - dv_dt * species_id) / (dv_dx * species_id + v)
dxdotdw_updates.extend(
(x_index, w_index, xdot.diff(r_flux))
for w_index, r_flux in enumerate(fluxes)
)
return xdot
else:
v = si.compartments[comp]
if v == 1.0:
return dxdt
dxdotdw_updates.extend(
(x_index, w_index,
si.stoichiometric_matrix[x_index, w_index] / v)
for w_index in range(si.stoichiometric_matrix.shape[1])
if si.stoichiometric_matrix[x_index, w_index] != 0
)
return dxdt / v
# create dynamics without respecting conservation laws first
dxdt = smart_multiply(si.stoichiometric_matrix,
MutableDenseMatrix(fluxes))
for ix, ((species_id, species), formula) in enumerate(zip(
symbols[SymbolId.SPECIES].items(),
dxdt
)):
assert ix == species['index'] # check that no reordering occurred
# rate rules and amount species don't need to be updated
if 'dt' in species:
continue
if species['amount']:
species['dt'] = formula
else:
species['dt'] = transform_dxdt_to_concentration(species_id,
formula)
# create all basic components of the ODE model and add them.
for symbol_name in symbols:
# transform dict of lists into a list of dicts
args = ['name', 'identifier']
if symbol_name == SymbolId.SPECIES:
args += ['dt', 'init']
else:
args += ['value']
if symbol_name == SymbolId.EVENT:
args += ['state_update', 'event_observable']
protos = [
{
'identifier': var_id,
**{k: v for k, v in var.items() if k in args}
}
for var_id, var in symbols[symbol_name].items()
]
for proto in protos:
self.add_component(symbol_to_type[symbol_name](**proto))
# add fluxes as expressions, this needs to happen after base
# expressions from symbols have been parsed
for flux_id, flux in zip(fluxes, si.flux_vector):
self.add_component(Expression(
identifier=flux_id,
name=str(flux_id),
value=flux
))
# process conservation laws
if compute_cls:
dxdotdw_updates = si.process_conservation_laws(self,
dxdotdw_updates)
nx_solver = si.stoichiometric_matrix.shape[0]
nw = len(self._expressions)
ncl = nw - nr - nexpr
# set derivatives of xdot, if applicable. We do this as we can save
# a substantial amount of computations by exploiting the structure
# of the right hand side.
# the tricky part is that the expressions w do not only contain the
# flux entries, but also assignment rules and conservation laws.
# assignment rules are added before the fluxes and
# _process_conservation_laws is called after the fluxes,
# but conservation law expressions are inserted at the beginning
# of the self.eq['w']. Accordingly we concatenate a zero matrix (for
# rule assignments and conservation laws) with the stoichiometric
# matrix and then apply the necessary updates from
# transform_dxdt_to_concentration
if not any(s in [e.get_id() for e in self._expressions]
for s in si.stoichiometric_matrix.free_symbols):
self._eqs['dxdotdw'] = sp.zeros(nx_solver, ncl + nexpr).row_join(
si.stoichiometric_matrix
)
for ix, iw, val in dxdotdw_updates:
# offset update according to concatenated zero matrix
self._eqs['dxdotdw'][ix, ncl + nexpr + iw] = val
# fill in 'self._sym' based on prototypes and components in ode_model
self.generate_basic_variables(from_sbml=True)
# substitute 'w' expressions into event expressions now, to avoid
# rewriting '{model_name}_root.cpp' headers to include 'w.h'
w_sorted = toposort_symbols(dict(zip(self._syms['w'], self._eqs['w'])))
for index, event in enumerate(self._events):
self._events[index] = Event(
identifier=event.get_id(),
name=event.get_name(),
value=event.get_val().subs(w_sorted),
state_update=event._state_update,
event_observable=event._observable,
)
self._has_quadratic_nllh = all(
llh['dist'] in ['normal', 'lin-normal']
for llh in si.symbols[SymbolId.LLHY].values()
)
def add_component(self, component: ModelQuantity,
insert_first: Optional[bool] = False) -> None:
"""
Adds a new ModelQuantity to the model.
:param component:
model quantity to be added
:param insert_first:
whether to add quantity first or last, relevant when components
may refer to other components of the same type.
"""
for comp_type in [Observable, Expression, Parameter, Constant, State,
LogLikelihood, SigmaY, ConservationLaw, Event]:
if isinstance(component, comp_type):
component_list = getattr(
self, f'_{type(component).__name__.lower()}s'
)
if insert_first:
component_list.insert(0, component)
else:
component_list.append(component)
return
raise ValueError(f'Invalid component type {type(component)}')
def add_conservation_law(self,
state: sp.Symbol,
total_abundance: sp.Symbol,
state_expr: sp.Expr,
abundance_expr: sp.Expr) -> None:
"""
Adds a new conservation law to the model. A conservation law is defined
by the conserved quantity T = sum_i(a_i * x_i), where a_i are
coefficients and x_i are different state variables.
:param state:
symbolic identifier of the state that should be replaced by
the conservation law (x_j)
:param total_abundance:
symbolic identifier of the total abundance (T/a_j)
:param state_expr:
symbolic algebraic formula that replaces the the state. This is
used to compute the numeric value of of `state` during simulations.
x_j = T/a_j - sum_i≠j(a_i * x_i)/a_j
:param abundance_expr:
symbolic algebraic formula that computes the value of the
conserved quantity. This is used to update the numeric value for
`total_abundance` after (re-)initialization.
T/a_j = sum_i≠j(a_i * x_i)/a_j + x_j
"""
try:
ix = [
s.get_id()
for s in self._states
].index(state)
except ValueError:
raise ValueError(f'Specified state {state} was not found in the '
f'model states.')
state_id = self._states[ix].get_id()
self.add_component(
Expression(state_id, str(state_id), state_expr),
insert_first=True
)
self.add_component(
ConservationLaw(
total_abundance,
f'total_{state_id}',
abundance_expr
)
)
self._states[ix].set_conservation_law(state_expr)
def num_states_rdata(self) -> int:
"""
Number of states.
:return:
number of state variable symbols
"""
return len(self.sym('x_rdata'))
def num_states_solver(self) -> int:
"""
Number of states after applying conservation laws.
:return:
number of state variable symbols
"""
return len(self.sym('x'))
def num_cons_law(self) -> int:
"""
Number of conservation laws.
:return:
number of conservation laws
"""
return self.num_states_rdata() - self.num_states_solver()
def num_state_reinits(self) -> int:
"""
Number of solver states which would be reinitialized after
preequilibration
:return:
number of state variable symbols with reinitialization
"""
reinit_states = self.eq('x0_fixedParameters')
solver_states = self.eq('x_solver')
return sum([1 for ix in reinit_states if ix in solver_states])
def num_obs(self) -> int:
"""
Number of Observables.
:return:
number of observable symbols
"""
return len(self.sym('y'))
def num_const(self) -> int:
"""
Number of Constants.
:return:
number of constant symbols
"""
return len(self.sym('k'))
def num_par(self) -> int:
"""
Number of Parameters.
:return:
number of parameter symbols
"""
return len(self.sym('p'))
def num_expr(self) -> int:
"""
Number of Expressions.
:return:
number of expression symbols
"""
return len(self.sym('w'))
def num_events(self) -> int:
"""
Number of Events.
:return:
number of event symbols (length of the root vector in AMICI)
"""
return len(self.sym('h'))
def sym(self,
name: str,
stripped: Optional[bool] = False) -> sp.Matrix:
"""
Returns (and constructs if necessary) the identifiers for a symbolic
entity.
:param name:
name of the symbolic variable
:param stripped:
should additional class information be stripped from the
symbolic variables (default=False)
:return:
matrix of symbolic identifiers
"""
if name not in self._syms:
self._generate_symbol(name)
if stripped and name in self._variable_prototype:
return self._strippedsyms[name]
else:
return self._syms[name]
def sparsesym(self, name: str, force_generate: bool = True) -> List[str]:
"""
Returns (and constructs if necessary) the sparsified identifiers for
a sparsified symbolic variable.
:param name:
name of the symbolic variable
:param force_generate:
whether the symbols should be generated if not available
:return:
linearized Matrix containing the symbolic identifiers
"""
if name not in sparse_functions:
raise ValueError(f'{name} is not marked as sparse')
if name not in self._sparsesyms and force_generate:
self._generate_sparse_symbol(name)
return self._sparsesyms.get(name, [])
def eq(self, name: str) -> sp.Matrix:
"""
Returns (and constructs if necessary) the formulas for a symbolic
entity.
:param name:
name of the symbolic variable
:return:
matrix of symbolic formulas
"""
if name not in self._eqs:
dec = log_execution_time(f'computing {name}', logger)
dec(self._compute_equation)(name)
return self._eqs[name]
def sparseeq(self, name) -> sp.Matrix:
"""
Returns (and constructs if necessary) the sparsified formulas for a
sparsified symbolic variable.
:param name:
name of the symbolic variable
:return:
linearized matrix containing the symbolic formulas
"""
if name not in sparse_functions:
raise ValueError(f'{name} is not marked as sparse')
if name not in self._sparseeqs:
self._generate_sparse_symbol(name)
return self._sparseeqs[name]
def colptrs(self, name: str) -> Union[List[sp.Number],
List[List[sp.Number]]]:
"""
Returns (and constructs if necessary) the column pointers for
a sparsified symbolic variable.
:param name:
name of the symbolic variable
:return:
list containing the column pointers
"""
if name not in sparse_functions:
raise ValueError(f'{name} is not marked as sparse')
if name not in self._sparseeqs:
self._generate_sparse_symbol(name)
return self._colptrs[name]
def rowvals(self, name: str) -> Union[List[sp.Number],
List[List[sp.Number]]]:
"""
Returns (and constructs if necessary) the row values for a
sparsified symbolic variable.
:param name:
name of the symbolic variable
:return:
list containing the row values
"""
if name not in sparse_functions:
raise ValueError(f'{name} is not marked as sparse')
if name not in self._sparseeqs:
self._generate_sparse_symbol(name)
return self._rowvals[name]
def val(self, name: str) -> List[float]:
"""
Returns (and constructs if necessary) the numeric values of a
symbolic entity
:param name:
name of the symbolic variable
:return:
list containing the numeric values
"""
if name not in self._vals:
self._generate_value(name)
return self._vals[name]
def name(self, name: str) -> List[str]:
"""
Returns (and constructs if necessary) the names of a symbolic
variable
:param name:
name of the symbolic variable
:return:
list of names
"""
if name not in self._names:
self._generate_name(name)
return self._names[name]
def free_symbols(self) -> Set[sp.Basic]:
"""
Returns list of free symbols that appear in ODE rhs and initial
conditions.
"""
return set(chain.from_iterable(
state.get_free_symbols()
for state in self._states
))
def _generate_symbol(self, name: str, *, from_sbml: bool = False) -> None:
"""
Generates the symbolic identifiers for a symbolic variable
:param name:
name of the symbolic variable
"""
if name in self._variable_prototype:
component = self._variable_prototype[name]
self._syms[name] = sp.Matrix([
comp.get_id()
for comp in getattr(self, component)
])
# this gives us access to the "stripped" symbols that were
# generated by pysb (if compiling a pysb model). To ensure
# correctness of derivatives, the same assumptions as in pysb
# have to be used (currently no assumptions)
# NB if we are compiling a SBML model, then it will be the same
# as the "non-stripped" in order to preserve assumptions
self._strippedsyms[name] = self._syms[name] if from_sbml \
else sp.Matrix([
sp.Symbol(comp.get_name())
for comp in getattr(self, component)
])
if name == 'y':
self._syms['my'] = sp.Matrix([
comp.get_measurement_symbol()
for comp in getattr(self, component)
])
return
elif name == 'x':
self._syms[name] = sp.Matrix([
state.get_id()
for state in self._states
if state._conservation_law is None
])
return
elif name == 'sx0':
self._syms[name] = sp.Matrix([
f's{state.get_id()}_0'
for state in self._states
if state._conservation_law is None
])
return
elif name == 'dtcldp':
# check, whether the CL consists of only one state. Then,
# sensitivities drop out, otherwise generate symbols
self._syms[name] = sp.Matrix([
[sp.Symbol(f's{strip_pysb(tcl.get_id())}__'
f'{strip_pysb(par.get_id())}', real=True)
for par in self._parameters]
if self.conservation_law_has_multispecies(tcl)
else [0] * self.num_par()
for tcl in self._conservationlaws
])
return
elif name == 'xdot_old':
length = len(self.eq('xdot'))
elif name in sparse_functions:
self._generate_sparse_symbol(name)
return
elif name in self._symboldim_funs:
length = self._symboldim_funs[name]()
elif name in sensi_functions:
length = self.eq(name).shape[0]
else:
length = len(self.eq(name))
self._syms[name] = sp.Matrix([
sp.Symbol(f'{name}{i}', real=True) for i in range(length)
])
def generate_basic_variables(self, *, from_sbml: bool = False) -> None:
"""
Generates the symbolic identifiers for all variables in
ODEModel.variable_prototype
"""
# Workaround to generate `'w'` before events, such that `'w'` can be
# replaced in events, to avoid adding `w` to the header of
# "{model_name}_stau.cpp".
if 'w' not in self._syms:
self._generate_symbol('w', from_sbml=from_sbml)
# We need to process events and Heaviside functions in the ODE Model,
# before adding it to ODEExporter
self.parse_events()
for var in self._variable_prototype:
# Part of the workaround described earlier in this method.
if var == 'w':
continue
if var not in self._syms:
self._generate_symbol(var, from_sbml=from_sbml)
self._generate_symbol('x', from_sbml=from_sbml)
def parse_events(self) -> None:
"""
This functions checks the right hand side for roots of Heaviside
functions or events, collects the roots, removes redundant roots,
and replaces the formulae of the found roots by identifiers of AMICI's
Heaviside function implementation in the right hand side
"""
# Track all roots functions in the right hand side
roots = copy.deepcopy(self._events)
for state in self._states:
state.set_dt(self._process_heavisides(state.get_dt(), roots))
for expr in self._expressions:
expr.set_val(self._process_heavisides(expr.get_val(), roots))
# Now add the found roots to the model components
for root in roots:
# skip roots of SBML events, as these have already been added
if root in self._events:
continue
# add roots of heaviside functions
self.add_component(root)
def get_appearance_counts(self, idxs: List[int]) -> List[int]:
"""
Counts how often a state appears in the time derivative of
another state and expressions for a subset of states
:param idxs:
list of state indices for which counts are to be computed
:return:
list of counts for the states ordered according to the provided
indices
"""
free_symbols_dt = list(itertools.chain.from_iterable(
[
str(symbol)
for symbol in state.get_dt().free_symbols
]
for state in self._states
))
free_symbols_expr = list(itertools.chain.from_iterable(
[
str(symbol)
for symbol in expr.get_val().free_symbols
]
for expr in self._expressions
))
return [
free_symbols_dt.count(str(self._states[idx].get_id()))
+
free_symbols_expr.count(str(self._states[idx].get_id()))
for idx in idxs
]
def _generate_sparse_symbol(self, name: str) -> None:
"""
Generates the sparse symbolic identifiers, symbolic identifiers,
sparse equations, column pointers and row values for a symbolic
variable
:param name:
name of the symbolic variable
"""
matrix = self.eq(name)
match_deriv = re.match(r'd([\w]+)d([a-z]+)', name)
if match_deriv:
rownames = self.sym(match_deriv.group(1))
colnames = self.sym(match_deriv.group(2))
if name == 'dJydy':
# One entry per y-slice
self._colptrs[name] = []
self._rowvals[name] = []
self._sparseeqs[name] = []
self._sparsesyms[name] = []
self._syms[name] = []
for iy in range(self.num_obs()):
symbol_col_ptrs, symbol_row_vals, sparse_list, symbol_list, \
sparse_matrix = csc_matrix(matrix[iy, :],
rownames=rownames,
colnames=colnames,
identifier=iy)
self._colptrs[name].append(symbol_col_ptrs)
self._rowvals[name].append(symbol_row_vals)
self._sparseeqs[name].append(sparse_list)
self._sparsesyms[name].append(symbol_list)
self._syms[name].append(sparse_matrix)
else:
symbol_col_ptrs, symbol_row_vals, sparse_list, symbol_list, \
sparse_matrix = csc_matrix(
matrix, rownames=rownames, colnames=colnames,
pattern_only=name in nobody_functions
)
self._colptrs[name] = symbol_col_ptrs
self._rowvals[name] = symbol_row_vals
self._sparseeqs[name] = sparse_list
self._sparsesyms[name] = symbol_list
self._syms[name] = sparse_matrix
def _compute_equation(self, name: str) -> None:
"""
computes the symbolic formula for a symbolic variable
:param name:
name of the symbolic variable
"""
# replacement ensures that we don't have to adapt name in abstract
# model and keep backwards compatibility with matlab
match_deriv = re.match(r'd([\w_]+)d([a-z_]+)',
name.replace('dJydsigma', 'dJydsigmay'))
time_symbol = sp.Matrix([symbol_with_assumptions('t')])
if name in self._equation_prototype:
self._equation_from_component(name, self._equation_prototype[name])
elif name in self._total_derivative_prototypes:
args = self._total_derivative_prototypes[name]
args['name'] = name
self._lock_total_derivative += args['chainvars']
self._total_derivative(**args)
for cv in args['chainvars']:
self._lock_total_derivative.remove(cv)
elif name == 'xdot':
self._eqs[name] = sp.Matrix([
s.get_dt() for s in self._states
if s._conservation_law is None
])
elif name == 'x_rdata':
self._eqs[name] = sp.Matrix([
state.get_id()
if state._conservation_law is None
else state._conservation_law
for state in self._states
])
elif name == 'x_solver':
self._eqs[name] = sp.Matrix([
state.get_id()
for state in self._states
if state._conservation_law is None
])
elif name == 'sx_solver':
self._eqs[name] = sp.Matrix([
self.sym('sx_rdata')[ix]
for ix, state in enumerate(self._states)
if state._conservation_law is None
])
elif name == 'sx0':
self._derivative(name[1:], 'p', name=name)
elif name == 'sx0_fixedParameters':
# deltax = -x+x0_fixedParameters if x0_fixedParameters>0 else 0
# deltasx = -sx+dx0_fixed_parametersdx*sx+dx0_fixedParametersdp
# if x0_fixedParameters>0 else 0
# sx0_fixedParameters = sx+deltasx =
# dx0_fixed_parametersdx*sx+dx0_fixedParametersdp
self._eqs[name] = smart_jacobian(
self.eq('x0_fixedParameters'), self.sym('p')
)
dx0_fixed_parametersdx = smart_jacobian(
self.eq('x0_fixedParameters'), self.sym('x')
)
if not smart_is_zero_matrix(dx0_fixed_parametersdx):
if isinstance(self._eqs[name], ImmutableDenseMatrix):
self._eqs[name] = MutableDenseMatrix(self._eqs[name])
for ip in range(self._eqs[name].shape[1]):
self._eqs[name][:, ip] += smart_multiply(
dx0_fixed_parametersdx, self.sym('sx0')
)
elif name == 'x0_fixedParameters':
k = self.sym('k')
self._x0_fixedParameters_idx = [
ix
for ix, eq in enumerate(self.eq('x0'))
if any([sym in eq.free_symbols for sym in k])
]
eq = self.eq('x0')
self._eqs[name] = sp.Matrix([eq[ix] for ix in
self._x0_fixedParameters_idx])
elif name == 'dtotal_cldx_rdata':
# not correctly parsed in regex
self._derivative('total_cl', 'x_rdata')
elif name == 'dtcldx':
# this is always zero
self._eqs[name] = \
sp.zeros(self.num_cons_law(), self.num_states_solver())
elif name == 'dtcldp':
# force symbols
self._eqs[name] = self.sym(name)
elif name == 'dxdotdx_explicit':
# force symbols
self._derivative('xdot', 'x', name=name)
elif name == 'dxdotdp_explicit':
# force symbols
self._derivative('xdot', 'p', name=name)
elif name == 'drootdt':
self._eqs[name] = smart_jacobian(self.eq('root'), time_symbol)
elif name == 'drootdt_total':
# backsubstitution of optimized right hand side terms into RHS
# calling subs() is costly. Due to looping over events though, the
# following lines are only evaluated if a model has events
w_sorted = \
toposort_symbols(dict(zip(self._syms['w'], self._eqs['w'])))
tmp_xdot = self._eqs['xdot'].subs(w_sorted)
self._eqs[name] = (
smart_multiply(self.eq('drootdx'), tmp_xdot)
+ self.eq('drootdt')
)
elif name == 'deltax':
# fill boluses for Heaviside functions, as empty state updates
# would cause problems when writing the function file later
event_eqs = []
for event in self._events:
if event._state_update is None:
event_eqs.append(sp.zeros(self.num_states_solver(), 1))
else:
event_eqs.append(event._state_update)
self._eqs[name] = event_eqs
elif name == 'ddeltaxdx':
self._eqs[name] = [
smart_jacobian(self.eq('deltax')[ie], self.sym('x'))
for ie in range(self.num_events())
]
elif name == 'ddeltaxdt':
self._eqs[name] = [
smart_jacobian(self.eq('deltax')[ie], time_symbol)
for ie in range(self.num_events())
]
elif name == 'ddeltaxdp':
self._eqs[name] = [
smart_jacobian(self.eq('deltax')[ie], self.sym('p'))
for ie in range(self.num_events())
]
elif name == 'stau':
self._eqs[name] = [
-self.eq('sroot')[ie, :] / self.eq('drootdt_total')[ie]
for ie in range(self.num_events())
]
elif name == 'deltasx':
event_eqs = []
for ie, event in enumerate(self._events):
if event._state_update is not None:
# ====== chain rule for the state variables ===============
# get xdot with expressions back-substituted
tmp_eq = smart_multiply(
(self.sym('xdot_old') - self.sym('xdot')),
self.eq('stau')[ie])
# construct an enhanced state sensitivity, which accounts
# for the time point sensitivity as well
tmp_dxdp = self.sym('sx') * sp.ones(1, self.num_par())
tmp_dxdp += smart_multiply(self.sym('xdot'),
self.eq('stau')[ie])
tmp_eq += smart_multiply(self.eq('ddeltaxdx')[ie],
tmp_dxdp)
# ====== chain rule for the time point ====================
tmp_eq += smart_multiply(self.eq('ddeltaxdt')[ie],
self.eq('stau')[ie])
# ====== partial derivative for the parameters ============
tmp_eq += self.eq('ddeltaxdp')[ie]
else:
tmp_eq = smart_multiply(
(self.eq('xdot_old') - self.eq('xdot')),
self.eq('stau')[ie])
event_eqs.append(tmp_eq)
self._eqs[name] = event_eqs
elif name == 'xdot_old':
# force symbols
self._eqs[name] = self.sym(name)
elif match_deriv:
self._derivative(match_deriv.group(1), match_deriv.group(2), name)
else:
raise ValueError(f'Unknown equation {name}')
if name == 'root':
# Events are processed after the ODE model has been set up.
# Equations are there, but symbols for roots must be added
self.sym('h')
if name in ['Jy', 'dydx']:
# do not transpose if we compute the partial derivative as part of
# a total derivative
if not len(self._lock_total_derivative):
self._eqs[name] = self._eqs[name].transpose()
if self._simplify:
dec = log_execution_time(f'simplifying {name}', logger)
if isinstance(self._eqs[name], list):
self._eqs[name] = [dec(sub_eq.applyfunc)(self._simplify)
for sub_eq in self._eqs[name]]
else:
self._eqs[name] = \
dec(self._eqs[name].applyfunc)(self._simplify)
def sym_names(self) -> List[str]:
"""
Returns a list of names of generated symbolic variables
:return:
list of names
"""
return list(self._syms.keys())
def _derivative(self, eq: str, var: str, name: str = None) -> None:
"""
Creates a new symbolic variable according to a derivative
:param eq:
name of the symbolic variable that defines the formula
:param var:
name of the symbolic variable that defines the identifiers
with respect to which the derivatives are to be computed
:param name:
name of resulting symbolic variable, default is d{eq}d{var}
"""
if not name:
name = f'd{eq}d{var}'
# automatically detect chainrule
chainvars = []
ignore_chainrule = {
('xdot', 'p'): 'w', # has generic implementation in c++ code
('xdot', 'x'): 'w', # has generic implementation in c++ code
('w', 'w'): 'tcl', # dtcldw = 0
('w', 'x'): 'tcl', # dtcldx = 0
}
for cv in ['w', 'tcl']:
if var_in_function_signature(eq, cv) \
and cv not in self._lock_total_derivative \
and var is not cv \
and min(self.sym(cv).shape) \
and (
(eq, var) not in ignore_chainrule
or ignore_chainrule[(eq, var)] != cv
):
chainvars.append(cv)
if len(chainvars):
self._lock_total_derivative += chainvars
self._total_derivative(name, eq, chainvars, var)
for cv in chainvars:
self._lock_total_derivative.remove(cv)
return
# this is the basic requirement check
needs_stripped_symbols = eq == 'xdot' and var != 'x'
# partial derivative
if eq == 'Jy':
sym_eq = self.eq(eq).transpose()
else:
sym_eq = self.eq(eq)
if pysb is not None and needs_stripped_symbols:
needs_stripped_symbols = not any(
isinstance(sym, pysb.Component)
for sym in sym_eq.free_symbols
)
# now check whether we are working with energy_modeling branch
# where pysb class info is already stripped
# TODO: fixme as soon as energy_modeling made it to the main pysb
# branch
sym_var = self.sym(var, needs_stripped_symbols)
derivative = smart_jacobian(sym_eq, sym_var)
self._eqs[name] = derivative
# compute recursion depth based on nilpotency of jacobian. computing
# nilpotency can be done more efficiently on numerical sparsity pattern
if name == 'dwdw':
nonzeros = np.asarray(
derivative.applyfunc(lambda x: int(not x.is_zero))
).astype(np.int64)
if max(nonzeros.shape):
while nonzeros.max():
nonzeros = nonzeros.dot(nonzeros)
self._w_recursion_depth += 1
if self._w_recursion_depth > len(sym_eq):
raise RuntimeError(
'dwdw is not nilpotent. Something, somewhere went '
'terribly wrong. Please file a bug report at '
'https://github.com/AMICI-dev/AMICI/issues and '
'attach this model.'
)
if name == 'dydw' and not smart_is_zero_matrix(derivative):
dwdw = self.eq('dwdw')
# h(k) = d{eq}dw*dwdw^k* (k=1)
h = smart_multiply(derivative, dwdw)
while not smart_is_zero_matrix(h):
self._eqs[name] += h
# h(k+1) = d{eq}dw*dwdw^(k+1) = h(k)*dwdw
h = smart_multiply(h, dwdw)
def _total_derivative(self, name: str, eq: str, chainvars: List[str],
var: str, dydx_name: str = None,
dxdz_name: str = None) -> None:
"""
Creates a new symbolic variable according to a total derivative
using the chain rule
:param name:
name of resulting symbolic variable
:param eq:
name of the symbolic variable that defines the formula
:param chainvars:
names of the symbolic variable that define the
identifiers with respect to which the chain rules are applied
:param var:
name of the symbolic variable that defines the identifiers
whith respect to which the derivatives are to be computed
:param dydx_name:
defines the name of the symbolic variable that
defines the derivative of the `eq` with respect to `chainvar`,
default is d{eq}d{chainvar}
:param dxdz_name:
defines the name of the symbolic variable that
defines the derivative of the `chainvar` with respect to `var`,
default is d{chainvar}d{var}
"""
# compute total derivative according to chainrule
# Dydz = dydx*dxdz + dydz
# initialize with partial derivative dydz without chain rule
self._eqs[name] = self.sym_or_eq(name, f'd{eq}d{var}')
if not isinstance(self._eqs[name], sp.Symbol):
# if not a Symbol, create a copy using sympy API
# NB deepcopy does not work safely, see sympy issue #7672
self._eqs[name] = self._eqs[name].copy()
for chainvar in chainvars:
if dydx_name is None:
dydx_name = f'd{eq}d{chainvar}'
if dxdz_name is None:
dxdz_name = f'd{chainvar}d{var}'
dydx = self.sym_or_eq(name, dydx_name)
dxdz = self.sym_or_eq(name, dxdz_name)
# Save time for for large models if one multiplicand is zero,
# which is not checked for by sympy
if not smart_is_zero_matrix(dydx) and not \
smart_is_zero_matrix(dxdz):
if dxdz.shape[1] == 1 and \
self._eqs[name].shape[1] != dxdz.shape[1]:
for iz in range(self._eqs[name].shape[1]):
self._eqs[name][:, iz] += smart_multiply(dydx, dxdz)
else:
self._eqs[name] += smart_multiply(dydx, dxdz)
def sym_or_eq(self, name: str, varname: str) -> sp.Matrix:
"""
Returns symbols or equations depending on whether a given
variable appears in the function signature or not.
:param name:
name of function for which the signature should be checked
:param varname:
name of the variable which should be contained in the
function signature
:return:
the variable symbols if the variable is part of the signature and
the variable equations otherwise.
"""
# dwdx and dwdp will be dynamically computed and their ordering
# within a column may differ from the initialization of symbols here,
# so those are not safe to use. Not removing them from signature as
# this would break backwards compatibility.
if var_in_function_signature(name, varname) \
and varname not in ['dwdx', 'dwdp']:
return self.sym(varname)
else:
return self.eq(varname)
def _multiplication(self, name: str, x: str, y: str,
transpose_x: Optional[bool] = False,
sign: Optional[int] = 1):
"""
Creates a new symbolic variable according to a multiplication
:param name:
name of resulting symbolic variable, default is d{eq}d{var}
:param x:
name of the symbolic variable that defines the first factor
:param y:
name of the symbolic variable that defines the second factor
:param transpose_x:
indicates whether the first factor should be
transposed before multiplication
:param sign:
defines the sign of the product, should be +1 or -1
"""
if sign not in [-1, 1]:
raise TypeError(f'sign must be +1 or -1, was {sign}')
variables = dict()
for varname in [x, y]:
if var_in_function_signature(name, varname):
variables[varname] = self.sym(varname)
else:
variables[varname] = self.eq(varname)
if transpose_x:
xx = variables[x].transpose()
else:
xx = variables[x]
yy = variables[y]
self._eqs[name] = sign * smart_multiply(xx, yy)
def _equation_from_component(self, name: str, component: str) -> None:
"""
Generates the formulas of a symbolic variable from the attributes
:param name:
name of resulting symbolic variable
:param component:
name of the attribute
"""
self._eqs[name] = sp.Matrix(
[comp.get_val() for comp in getattr(self, component)]
)
def get_conservation_laws(self) -> List[Tuple[sp.Symbol, sp.Basic]]:
""" Returns a list of states with conservation law set
:return:
list of state identifiers
"""
return [
(state.get_id(), state._conservation_law)
for state in self._states
if state._conservation_law is not None
]
def _generate_value(self, name: str) -> None:
"""
Generates the numeric values of a symbolic variable from value
prototypes
:param name:
name of resulting symbolic variable
"""
if name in self._value_prototype:
component = self._value_prototype[name]
else:
raise ValueError(f'No values for {name}')
self._vals[name] = [comp.get_val()
for comp in getattr(self, component)]
def _generate_name(self, name: str) -> None:
"""
Generates the names of a symbolic variable from variable prototypes or
equation prototypes
:param name:
name of resulting symbolic variable
"""
if name in self._variable_prototype:
component = self._variable_prototype[name]
elif name in self._equation_prototype:
component = self._equation_prototype[name]
else:
raise ValueError(f'No names for {name}')
self._names[name] = [comp.get_name()
for comp in getattr(self, component)]
def state_has_fixed_parameter_initial_condition(self, ix: int) -> bool:
"""
Checks whether the state at specified index has a fixed parameter
initial condition
:param ix:
state index
:return:
boolean indicating if any of the initial condition free
variables is contained in the model constants
"""
ic = self._states[ix].get_val()
if not isinstance(ic, sp.Basic):
return False
return any([
fp in [c.get_id() for c in self._constants]
for fp in ic.free_symbols
])
def state_has_conservation_law(self, ix: int) -> bool:
"""
Checks whether the state at specified index has a conservation
law set
:param ix:
state index
:return:
boolean indicating if conservation_law is not None
"""
return self._states[ix]._conservation_law is not None
def state_is_constant(self, ix: int) -> bool:
"""
Checks whether the temporal derivative of the state is zero
:param ix:
state index
:return:
boolean indicating if constant over time
"""
return self._states[ix].get_dt() == 0.0
def conservation_law_has_multispecies(self,
tcl: ConservationLaw) -> bool:
"""
Checks whether a conservation law has multiple species or it just
defines one constant species
:param tcl:
conservation law
:return:
boolean indicating if conservation_law is not None
"""
state_set = set(self.sym('x_rdata'))
n_species = len(state_set.intersection(tcl.get_val().free_symbols))
return n_species > 1
def _expr_is_time_dependent(self, expr: sp.Expr) -> bool:
"""Determine whether an expression is time-dependent.
:param expr:
The expression.
:returns:
Whether the expression is time-dependent.
"""
# `expr.free_symbols` will be different to `self._states.keys()`, so
# it's easier to compare as `str`.
expr_syms = {str(sym) for sym in expr.free_symbols}
# Check if the time variable is in the expression.
if 't' in expr_syms:
return True
# Check if any time-dependent states are in the expression.
state_syms = [str(sym) for sym in self._states]
for state in expr_syms.intersection(state_syms):
if not self.state_is_constant(state_syms.index(state)):
return True
return False
def _get_unique_root(
self,
root_found: sp.Expr,
roots: List[Event],
) -> sp.Symbol:
"""
Collects roots of Heaviside functions and events and stores them in
the roots list. It checks for redundancy to not store symbolically
equivalent root functions more than once.
:param root_found:
equation of the root function
:param roots:
list of already known root functions with identifier
:returns:
unique identifier for root, or `None` if the root is not
time-dependent
"""
if not self._expr_is_time_dependent(root_found):
return None
for root in roots:
if sp.simplify(root_found - root.get_val()) == 0:
return root.get_id()
# create an event for a new root function
root_symstr = f'Heaviside_{len(roots)}'
roots.append(Event(
identifier=sp.Symbol(root_symstr),
name=root_symstr,
value=root_found,
state_update=None,
event_observable=None
))
return roots[-1].get_id()
def _collect_heaviside_roots(
self,
args: Sequence[sp.Expr]
) -> List[sp.Expr]:
"""
Recursively checks an expression for the occurrence of Heaviside
functions and return all roots found
:param args:
args attribute of the expanded expression
:returns:
root functions that were extracted from Heaviside function
arguments
"""
root_funs = []
for arg in args:
if arg.func == sp.Heaviside:
root_funs.append(arg.args[0])
elif arg.has(sp.Heaviside):
root_funs.extend(self._collect_heaviside_roots(arg.args))
# substitute 'w' expressions into root expressions now, to avoid
# rewriting '{model_name}_stau.cpp' headers to include 'w.h'
w_sorted = toposort_symbols(dict(zip(self._syms['w'], self.eq('w'))))
root_funs = [
r.subs(w_sorted)
for r in root_funs
]
return root_funs
def _process_heavisides(
self,
dxdt: sp.Expr,
roots: List[Event],
) -> sp.Expr:
"""
Parses the RHS of a state variable, checks for Heaviside functions,
collects unique roots functions that can be tracked by SUNDIALS and
replaces Heaviside Functions by amici helper variables that will be
updated based on SUNDIALS root tracking.
:param dxdt:
right hand side of state variable
:param roots:
list of known root functions with identifier
:returns:
dxdt with Heaviside functions replaced by amici helper variables
"""
# expanding the rhs will in general help to collect the same
# heaviside function
dt_expanded = dxdt.expand()
# track all the old Heaviside expressions in tmp_roots_old
# replace them later by the new expressions
heavisides = []
# run through the expression tree and get the roots
tmp_roots_old = self._collect_heaviside_roots(dt_expanded.args)
for tmp_old in tmp_roots_old:
# we want unique identifiers for the roots
tmp_new = self._get_unique_root(tmp_old, roots)
# `tmp_new` is None if the root is not time-dependent.
if tmp_new is None:
continue
# For Heavisides, we need to add the negative function as well
self._get_unique_root(sp.sympify(- tmp_old), roots)
heavisides.append((sp.Heaviside(tmp_old), tmp_new))
if heavisides:
# only apply subs if necessary
for heaviside_sympy, heaviside_amici in heavisides:
dxdt = dxdt.subs(heaviside_sympy, heaviside_amici)
return dxdt
def _print_with_exception(math: sp.Expr) -> str:
"""
Generate C++ code for a symbolic expression
:param math:
symbolic expression
:return:
C++ code for the specified expression
"""
# get list of custom replacements
user_functions = {fun['sympy']: fun['c++'] for fun in CUSTOM_FUNCTIONS}
try:
# Required until https://github.com/sympy/sympy/pull/20558 is released
with _monkeypatched(_CXXCodePrinterBase, '_print_Max',
_custom_print_max),\
_monkeypatched(_CXXCodePrinterBase, '_print_Min',
_custom_print_min):
ret = cxxcode(math, standard='c++11',
user_functions=user_functions)
ret = re.sub(r'(^|\W)M_PI(\W|$)', r'\1amici::pi\2', ret)
return ret
except TypeError as e:
raise ValueError(
f'Encountered unsupported function in expression "{math}": '
f'{e}!'
)
def _get_sym_lines_array(equations: sp.Matrix,
variable: str,
indent_level: int) -> List[str]:
"""
Generate C++ code for assigning symbolic terms in symbols to C++ array
`variable`.
:param equations:
vectors of symbolic expressions
:param variable:
name of the C++ array to assign to
:param indent_level:
indentation level (number of leading blanks)
:return:
C++ code as list of lines
"""
return [' ' * indent_level + f'{variable}[{index}] = '
f'{_print_with_exception(math)};'
for index, math in enumerate(equations)
if not (math == 0 or math == 0.0)]
def _get_sym_lines_symbols(symbols: sp.Matrix,
equations: sp.Matrix,
variable: str,
indent_level: int) -> List[str]:
"""
Generate C++ code for where array elements are directly replaced with
their corresponding macro symbol
:param symbols:
vectors of symbols that equations are assigned to
:param equations:
vectors of expressions
:param variable:
name of the C++ array to assign to, only used in comments
:param indent_level:
indentation level (number of leading blanks)
:return:
C++ code as list of lines
"""
return [f'{" " * indent_level}{sym} = {_print_with_exception(math)};'
f' // {variable}[{index}]'.replace('\n',
'\n' + ' ' * indent_level)
for index, (sym, math) in enumerate(zip(symbols, equations))
if not (math == 0 or math == 0.0)]
class ODEExporter:
"""
The ODEExporter class generates AMICI C++ files for ODE model as
defined in symbolic expressions.
:ivar model:
ODE definition
:ivar outdir:
see :meth:`amici.ode_export.ODEExporter.set_paths`
:ivar verbose:
more verbose output if True
:ivar assume_pow_positivity:
if set to true, a special pow function is
used to avoid problems with state variables that may become negative
due to numerical errors
compiler: distutils/setuptools compiler selection to build the
python extension
:ivar functions:
carries C++ function signatures and other specifications
:ivar model_name:
name of the model that will be used for compilation
:ivar model_path:
path to the generated model specific files
:ivar model_swig_path:
path to the generated swig files
:ivar allow_reinit_fixpar_initcond:
indicates whether reinitialization of
initial states depending on fixedParameters is allowed for this model
:ivar _build_hints:
If the given model uses special functions, this set contains hints for
model building.
:ivar generate_sensitivity_code:
Specifies whether code for sensitivity computation is to be generated
"""
def __init__(
self,
ode_model: ODEModel,
outdir: Optional[str] = None,
verbose: Optional[Union[bool, int]] = False,
assume_pow_positivity: Optional[bool] = False,
compiler: Optional[str] = None,
allow_reinit_fixpar_initcond: Optional[bool] = True,
generate_sensitivity_code: Optional[bool] = True
):
"""
Generate AMICI C++ files for the ODE provided to the constructor.
:param ode_model:
ODE definition
:param outdir:
see :meth:`amici.ode_export.ODEExporter.set_paths`
:param verbose:
verbosity level for logging, True/False default to
logging.Error/logging.DEBUG
:param assume_pow_positivity:
if set to true, a special pow function is
used to avoid problems with state variables that may become
negative due to numerical errors
:param compiler: distutils/setuptools compiler selection to build the
python extension
:param allow_reinit_fixpar_initcond:
see :class:`amici.ode_export.ODEExporter`
:param generate_sensitivity_code specifies whether code required for
sensitivity computation will be generated
"""
set_log_level(logger, verbose)
self.outdir: str = outdir
self.verbose: bool = logger.getEffectiveLevel() <= logging.DEBUG
self.assume_pow_positivity: bool = assume_pow_positivity
self.compiler: str = compiler
self.model_name: str = 'model'
output_dir = os.path.join(os.getcwd(),
f'amici-{self.model_name}')
self.model_path: str = os.path.abspath(output_dir)
self.model_swig_path: str = os.path.join(self.model_path, 'swig')
# Signatures and properties of generated model functions (see
# include/amici/model.h for details)
self.model: ODEModel = ode_model
# To only generate a subset of functions, apply subselection here
self.functions: Dict[str, Dict[str, Union[str, List[str]]]] = \
copy.deepcopy(functions)
self.allow_reinit_fixpar_initcond: bool = allow_reinit_fixpar_initcond
self._build_hints = set()
self.generate_sensitivity_code: bool = generate_sensitivity_code
@log_execution_time('generating cpp code', logger)
def generate_model_code(self) -> None:
"""
Generates the native C++ code for the loaded model and a Matlab
script that can be run to compile a mex file from the C++ code
"""
with _monkeypatched(sp.Pow, '_eval_derivative',
_custom_pow_eval_derivative):
self._prepare_model_folder()
self._generate_c_code()
self._generate_m_code()
@log_execution_time('compiling cpp code', logger)
def compile_model(self) -> None:
"""
Compiles the generated code it into a simulatable module
"""
self._compile_c_code(compiler=self.compiler,
verbose=self.verbose)
def _prepare_model_folder(self) -> None:
"""
Remove all files from the model folder.
"""
for file in os.listdir(self.model_path):
file_path = os.path.join(self.model_path, file)
if os.path.isfile(file_path):
os.remove(file_path)
def _generate_c_code(self) -> None:
"""
Create C++ code files for the model based on ODEExporter.model
"""
for function in self.functions.keys():
if function in sensi_functions + sparse_sensi_functions and \
not self.generate_sensitivity_code:
continue
if 'dont_generate_body' not in \
self.functions[function].get('flags', []):
dec = log_execution_time(f'writing {function}.cpp', logger)
dec(self._write_function_file)(function)
if function in sparse_functions \
and 'body' in self.functions[function]:
self._write_function_index(function, 'colptrs')
self._write_function_index(function, 'rowvals')
for name in self.model.sym_names():
# only generate for those that have nontrivial implementation,
# check for both basic variables (not in functions) and function
# computed values
if (name in self.functions and
'body' not in self.functions[name] and
name not in nobody_functions) or \
(name not in self.functions and
len(self.model.sym(name)) == 0):
continue
self._write_index_files(name)
self._write_wrapfunctions_cpp()
self._write_wrapfunctions_header()
self._write_model_header_cpp()
self._write_c_make_file()
self._write_swig_files()
self._write_module_setup()
shutil.copy(CXX_MAIN_TEMPLATE_FILE,
os.path.join(self.model_path, 'main.cpp'))
def _compile_c_code(self,
verbose: Optional[Union[bool, int]] = False,
compiler: Optional[str] = None) -> None:
"""
Compile the generated model code
:param verbose:
Make model compilation verbose
:param compiler:
distutils/setuptools compiler selection to build the python
extension
"""
# setup.py assumes it is run from within the model directory
module_dir = self.model_path
script_args = [sys.executable, os.path.join(module_dir, 'setup.py')]
if verbose:
script_args.append('--verbose')
else:
script_args.append('--quiet')
script_args.extend(['build_ext', f'--build-lib={module_dir}'])
if compiler is not None:
script_args.extend([f'--compiler={compiler}'])
# distutils.core.run_setup looks nicer, but does not let us check the
# result easily
try:
result = subprocess.run(script_args,
cwd=module_dir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=True)
except subprocess.CalledProcessError as e:
print(e.output.decode('utf-8'))
print("Failed building the model extension.")
if self._build_hints:
print("Note:")
print('\n'.join(self._build_hints))
raise
if verbose:
print(result.stdout.decode('utf-8'))
def _generate_m_code(self) -> None:
"""
Create a Matlab script for compiling code files to a mex file
"""
# creating the code lines for the Matlab compile script
lines = []
# Events are not yet implemented. Once this is done, the variable nz
# will have to be replaced by "self.model.nz()"
nz = 0
# Second order code is not yet implemented. Once this is done,
# those variables will have to be replaced by
# "self.model.<var>true()", or the corresponding "model.self.o2flag"
nxtrue_rdata = self.model.num_states_rdata()
nytrue = self.model.num_obs()
o2flag = 0
# a preliminary comment
lines.append('% This compile script was automatically created from'
' Python SBML import.')
lines.append('% If mex compiler is set up within MATLAB, it can be run'
' from MATLAB ')
lines.append('% in order to compile a mex-file from the Python'
' generated C++ files.')
lines.append('')
# write the actual compiling code
lines.append(f"modelName = '{self.model_name}';")
lines.append("amimodel.compileAndLinkModel"
"(modelName, '', [], [], [], []);")
lines.append(f"amimodel.generateMatlabWrapper({nxtrue_rdata}, "
f"{nytrue}, {self.model.num_par()}, "
f"{self.model.num_const()}, {nz}, {o2flag}, ...\n [], "
"['simulate_' modelName '.m'], modelName, ...\n"
" 'lin', 1, 1);")
# write compile script (for mex)
compile_script = os.path.join(self.model_path, 'compileMexFile.m')
with open(compile_script, 'w') as fileout:
fileout.write('\n'.join(lines))
def _write_index_files(self, name: str) -> None:
"""
Write index file for a symbolic array.
:param name:
key in self.model._syms for which the respective file should
be written
"""
lines = []
if name in self.model.sym_names():
if name in sparse_functions:
symbols = self.model.sparsesym(name)
else:
symbols = self.model.sym(name).T
# flatten multiobs
if isinstance(next(iter(symbols), None), list):
symbols = [symbol for obs in symbols for symbol in obs]
else:
raise ValueError(f'Unknown symbolic array: {name}')
for index, symbol in enumerate(symbols):
symbol_name = strip_pysb(symbol)
if str(symbol) == '0':
continue
if str(symbol_name) == '':
raise ValueError(f'{name} contains a symbol called ""')
lines.append(
f'#define {symbol_name} {name}[{index}]'
)
filename = os.path.join(self.model_path, f'{self.model_name}_{name}.h')
with open(filename, 'w') as fileout:
fileout.write('\n'.join(lines))
def _write_function_file(self, function: str) -> None:
"""
Generate equations and write the C++ code for the function
`function`.
:param function:
name of the function to be written (see self.functions)
"""
# first generate the equations to make sure we have everything we
# need in subsequent steps
if function in sparse_functions:
equations = self.model.sparseeq(function)
elif not self.allow_reinit_fixpar_initcond \
and function == 'sx0_fixedParameters':
# Not required. Will create empty function body.
equations = sp.Matrix()
else:
equations = self.model.eq(function)
# function header
lines = [
'#include "amici/symbolic_functions.h"',
'#include "amici/defines.h"',
'#include "sundials/sundials_types.h"',
'',
'#include <gsl/gsl-lite.hpp>',
'#include <array>',
]
# function signature
signature = self.functions[function]['signature']
lines.append('')
# extract symbols that need definitions from signature
# don't add includes for files that won't be generated.
# Unfortunately we cannot check for `self.functions[sym]['body']`
# here since it may not have been generated yet.
for match in re.findall(
fr'const (realtype|double) \*([\w]+)[0]*[,\)]+', signature
):
sym = match[1]
if sym not in self.model.sym_names():
continue
if sym in sparse_functions:
iszero = smart_is_zero_matrix(self.model.sparseeq(sym))
elif sym in self.functions:
iszero = smart_is_zero_matrix(self.model.eq(sym))
else:
iszero = len(self.model.sym(sym)) == 0
if iszero:
continue
lines.append(f'#include "{self.model_name}_{sym}.h"')
# include return symbols
if function in self.model.sym_names() and \
function not in non_unique_id_symbols:
lines.append(f'#include "{self.model_name}_{function}.h"')
lines.extend([
'',
'namespace amici {',
f'namespace model_{self.model_name} {{',
'',
])
lines.append(f'void {function}_{self.model_name}{signature}{{')
# function body
body = self._get_function_body(function, equations)
if self.assume_pow_positivity and 'assume_pow_positivity' \
in self.functions[function].get('flags', []):
body = [re.sub(r'(^|\W)std::pow\(', r'\1amici::pos_pow(', line)
for line in body]
# execute this twice to catch cases where the ending ( would be the
# starting (^|\W) for the following match
body = [re.sub(r'(^|\W)std::pow\(', r'\1amici::pos_pow(', line)
for line in body]
if body:
self.functions[function]['body'] = body
else:
return
lines += body
lines.extend([
'}',
'',
f'}} // namespace model_{self.model_name}',
'} // namespace amici\n',
])
# check custom functions
for fun in CUSTOM_FUNCTIONS:
if 'include' in fun and any(fun['c++'] in line for line in lines):
if 'build_hint' in fun:
self._build_hints.add(fun['build_hint'])
lines.insert(0, fun['include'])
# if not body is None:
with open(os.path.join(
self.model_path, f'{self.model_name}_{function}.cpp'), 'w'
) as fileout:
fileout.write('\n'.join(lines))
def _write_function_index(self, function: str, indextype: str) -> None:
"""
Generate equations and write the C++ code for the function
`function`.
:param function:
name of the function to be written (see self.functions)
:param indextype:
type of index {'colptrs', 'rowvals'}
"""
if indextype == 'colptrs':
values = self.model.colptrs(function)
setter = 'indexptrs'
elif indextype == 'rowvals':
values = self.model.rowvals(function)
setter = 'indexvals'
else:
raise ValueError('Invalid value for indextype, must be colptrs or '
f'rowvals: {indextype}')
# function signature
if function in multiobs_functions:
signature = f'(SUNMatrixWrapper &{function}, int index)'
else:
signature = f'(SUNMatrixWrapper &{function})'
lines = [
'#include "amici/sundials_matrix_wrapper.h"',
'#include "sundials/sundials_types.h"',
'',
'#include <array>',
'#include <algorithm>',
'',
'namespace amici {',
f'namespace model_{self.model_name} {{',
'',
]
# Generate static array with indices
if len(values):
static_array_name = f"{function}_{indextype}_{self.model_name}_"
if function in multiobs_functions:
# list of index vectors
lines.append(
"static constexpr std::array<std::array<sunindextype, "
f"{len(values[0])}>, {len(values)}> "
f"{static_array_name} = {{{{"
)
lines.extend([' {'
+ ', '.join(map(str, index_vector)) + '}, '
for index_vector in values])
lines.append("}};")
else:
# single index vector
lines.append("static constexpr std::array<sunindextype, "
f"{len(values)}> {static_array_name} = {{")
lines.append(' ' + ', '.join(map(str, values)))
lines.append("};")
lines.extend([
'',
f'void {function}_{indextype}_{self.model_name}{signature}{{',
])
if len(values):
if function in multiobs_functions:
lines.append(
f" {function}.set_{setter}"
f"(gsl::make_span({static_array_name}[index]));"
)
else:
lines.append(
f" {function}.set_{setter}"
f"(gsl::make_span({static_array_name}));"
)
lines.extend([
'}'
'',
f'}} // namespace model_{self.model_name}',
'} // namespace amici\n',
])
filename = f'{self.model_name}_{function}_{indextype}.cpp'
filename = os.path.join(self.model_path, filename)
with open(filename, 'w') as fileout:
fileout.write('\n'.join(lines))
def _get_function_body(self,
function: str,
equations: sp.Matrix) -> List[str]:
"""
Generate C++ code for body of function `function`.
:param function:
name of the function to be written (see self.functions)
:param equations:
symbolic definition of the function body
:return:
generated C++ code
"""
lines = []
if (
len(equations) == 0
or (
isinstance(equations, (sp.Matrix, sp.ImmutableDenseMatrix))
and min(equations.shape) == 0
)
):
# dJydy is a list
return lines
if not self.allow_reinit_fixpar_initcond \
and function in ['sx0_fixedParameters', 'x0_fixedParameters']:
return lines
if function == 'sx0_fixedParameters':
# here we only want to overwrite values where x0_fixedParameters
# was applied
lines.extend([
# Keep list of indices of fixed parameters occurring in x0
" static const std::array<int, "
+ str(len(self.model._x0_fixedParameters_idx))
+ "> _x0_fixedParameters_idxs = {",
" "
+ ', '.join(str(x)
for x in self.model._x0_fixedParameters_idx),
" };",
"",
# Set all parameters that are to be reset to 0, so that the
# switch statement below only needs to handle non-zero entries
# (which usually reduces file size and speeds up
# compilation significantly).
" for(auto idx: reinitialization_state_idxs) {",
" if(std::find(_x0_fixedParameters_idxs.cbegin(), "
"_x0_fixedParameters_idxs.cend(), idx) != "
"_x0_fixedParameters_idxs.cend())\n"
" sx0_fixedParameters[idx] = 0.0;",
" }"])
cases = dict()
for ipar in range(self.model.num_par()):
expressions = []
for index, formula in zip(
self.model._x0_fixedParameters_idx,
equations[:, ipar]
):
if not formula.is_zero:
expressions.extend([
f'if(std::find('
'reinitialization_state_idxs.cbegin(), '
f'reinitialization_state_idxs.cend(), {index}) != '
'reinitialization_state_idxs.cend())',
f' {function}[{index}] = '
f'{_print_with_exception(formula)};'
])
cases[ipar] = expressions
lines.extend(get_switch_statement('ip', cases, 1))
elif function == 'x0_fixedParameters':
for index, formula in zip(
self.model._x0_fixedParameters_idx,
equations
):
lines.append(
f' if(std::find(reinitialization_state_idxs.cbegin(), '
f'reinitialization_state_idxs.cend(), {index}) != '
'reinitialization_state_idxs.cend())\n '
f'{function}[{index}] = '
f'{_print_with_exception(formula)};')
elif function in event_functions:
cases = {ie: _get_sym_lines_array(equations[ie], function, 0)
for ie in range(self.model.num_events())
if not smart_is_zero_matrix(equations[ie])}
lines.extend(get_switch_statement('ie', cases, 1))
elif function in event_sensi_functions:
outer_cases = {}
for ie, inner_equations in enumerate(equations):
inner_lines = []
inner_cases = {
ipar: _get_sym_lines_array(inner_equations[:, ipar],
function, 0)
for ipar in range(self.model.num_par())
if not smart_is_zero_matrix(inner_equations[:, ipar])}
inner_lines.extend(get_switch_statement(
'ip', inner_cases, 0))
outer_cases[ie] = copy.copy(inner_lines)
lines.extend(get_switch_statement('ie', outer_cases, 1))
elif function in sensi_functions:
cases = {ipar: _get_sym_lines_array(equations[:, ipar], function,
0)
for ipar in range(self.model.num_par())
if not smart_is_zero_matrix(equations[:, ipar])}
lines.extend(get_switch_statement('ip', cases, 1))
elif function in multiobs_functions:
if function == 'dJydy':
cases = {iobs: _get_sym_lines_array(equations[iobs], function,
0)
for iobs in range(self.model.num_obs())
if not smart_is_zero_matrix(equations[iobs])}
else:
cases = {
iobs: _get_sym_lines_array(equations[:, iobs], function, 0)
for iobs in range(self.model.num_obs())
if not smart_is_zero_matrix(equations[:, iobs])
}
lines.extend(get_switch_statement('iy', cases, 1))
elif function in self.model.sym_names() \
and function not in non_unique_id_symbols:
if function in sparse_functions:
symbols = self.model.sparsesym(function)
else:
symbols = self.model.sym(function, stripped=True)
lines += _get_sym_lines_symbols(symbols, equations, function, 4)
else:
lines += _get_sym_lines_array(equations, function, 4)
return [line for line in lines if line]
def _write_wrapfunctions_cpp(self) -> None:
"""
Write model-specific 'wrapper' file (wrapfunctions.cpp).
"""
template_data = {'MODELNAME': self.model_name}
apply_template(
os.path.join(amiciSrcPath, 'wrapfunctions.template.cpp'),
os.path.join(self.model_path, 'wrapfunctions.cpp'),
template_data
)
def _write_wrapfunctions_header(self) -> None:
"""
Write model-specific header file (wrapfunctions.h).
"""
template_data = {'MODELNAME': str(self.model_name)}
apply_template(
os.path.join(amiciSrcPath, 'wrapfunctions.ODE_template.h'),
os.path.join(self.model_path, 'wrapfunctions.h'),
template_data
)
def _write_model_header_cpp(self) -> None:
"""
Write model-specific header and cpp file (MODELNAME.{h,cpp}).
"""
tpl_data = {
'MODELNAME': str(self.model_name),
'NX_RDATA': str(self.model.num_states_rdata()),
'NXTRUE_RDATA': str(self.model.num_states_rdata()),
'NX_SOLVER': str(self.model.num_states_solver()),
'NXTRUE_SOLVER': str(self.model.num_states_solver()),
'NX_SOLVER_REINIT': str(self.model.num_state_reinits()),
'NY': str(self.model.num_obs()),
'NYTRUE': str(self.model.num_obs()),
'NZ': '0',
'NZTRUE': '0',
'NEVENT': str(self.model.num_events()),
'NOBJECTIVE': '1',
'NW': str(len(self.model.sym('w'))),
'NDWDP': str(len(self.model.sparsesym(
'dwdp', force_generate=self.generate_sensitivity_code
))),
'NDWDX': str(len(self.model.sparsesym('dwdx'))),
'NDWDW': str(len(self.model.sparsesym('dwdw'))),
'NDXDOTDW': str(len(self.model.sparsesym('dxdotdw'))),
'NDXDOTDP_EXPLICIT': str(len(self.model.sparsesym(
'dxdotdp_explicit',
force_generate=self.generate_sensitivity_code
))),
'NDXDOTDX_EXPLICIT': str(len(self.model.sparsesym(
'dxdotdx_explicit'))),
'NDJYDY': 'std::vector<int>{%s}'
% ','.join(str(len(x))
for x in self.model.sparsesym('dJydy')),
'UBW': str(self.model.num_states_solver()),
'LBW': str(self.model.num_states_solver()),
'NP': str(self.model.num_par()),
'NK': str(self.model.num_const()),
'O2MODE': 'amici::SecondOrderMode::none',
# using cxxcode ensures proper handling of nan/inf
'PARAMETERS': _print_with_exception(self.model.val('p'))[1:-1],
'FIXED_PARAMETERS': _print_with_exception(self.model.val('k'))[
1:-1],
'PARAMETER_NAMES_INITIALIZER_LIST':
self._get_symbol_name_initializer_list('p'),
'STATE_NAMES_INITIALIZER_LIST':
self._get_symbol_name_initializer_list('x_rdata'),
'FIXED_PARAMETER_NAMES_INITIALIZER_LIST':
self._get_symbol_name_initializer_list('k'),
'OBSERVABLE_NAMES_INITIALIZER_LIST':
self._get_symbol_name_initializer_list('y'),
'EXPRESSION_NAMES_INITIALIZER_LIST':
self._get_symbol_name_initializer_list('w'),
'PARAMETER_IDS_INITIALIZER_LIST':
self._get_symbol_id_initializer_list('p'),
'STATE_IDS_INITIALIZER_LIST':
self._get_symbol_id_initializer_list('x_rdata'),
'FIXED_PARAMETER_IDS_INITIALIZER_LIST':
self._get_symbol_id_initializer_list('k'),
'OBSERVABLE_IDS_INITIALIZER_LIST':
self._get_symbol_id_initializer_list('y'),
'EXPRESSION_IDS_INITIALIZER_LIST':
self._get_symbol_id_initializer_list('w'),
'REINIT_FIXPAR_INITCOND':
'true' if self.allow_reinit_fixpar_initcond else
'false',
'AMICI_VERSION_STRING': __version__,
'AMICI_COMMIT_STRING': __commit__,
'W_RECURSION_DEPTH': self.model._w_recursion_depth,
'QUADRATIC_LLH': 'true'
if self.model._has_quadratic_nllh else 'false',
}
for fun, fundef in self.functions.items():
if fun in nobody_functions:
continue
if 'body' not in fundef:
tpl_data[f'{fun.upper()}_DEF'] = ''
if fun in sensi_functions + sparse_sensi_functions and \
not self.generate_sensitivity_code:
impl = ''
else:
impl = get_model_override_implementation(
fun, self.model_name, nobody=True
)
tpl_data[f'{fun.upper()}_IMPL'] = impl
if fun in sparse_functions:
for indexfield in ['colptrs', 'rowvals']:
if fun in sparse_sensi_functions and \
not self.generate_sensitivity_code:
impl = ''
else:
impl = get_sunindex_override_implementation(
fun, self.model_name, indexfield, nobody=True
)
tpl_data[f'{fun.upper()}_{indexfield.upper()}_DEF'] \
= ''
tpl_data[f'{fun.upper()}_{indexfield.upper()}_IMPL'] \
= impl
continue
tpl_data[f'{fun.upper()}_DEF'] = \
get_function_extern_declaration(fun, self.model_name)
tpl_data[f'{fun.upper()}_IMPL'] = \
get_model_override_implementation(fun, self.model_name)
if fun in sparse_functions:
tpl_data[f'{fun.upper()}_COLPTRS_DEF'] = \
get_sunindex_extern_declaration(fun, self.model_name,
'colptrs')
tpl_data[f'{fun.upper()}_COLPTRS_IMPL'] = \
get_sunindex_override_implementation(fun, self.model_name,
'colptrs')
tpl_data[f'{fun.upper()}_ROWVALS_DEF'] = \
get_sunindex_extern_declaration(fun, self.model_name,
'rowvals')
tpl_data[f'{fun.upper()}_ROWVALS_IMPL'] = \
get_sunindex_override_implementation(fun, self.model_name,
'rowvals')
if self.model.num_states_solver() == self.model.num_states_rdata():
tpl_data['X_RDATA_DEF'] = ''
tpl_data['X_RDATA_IMPL'] = ''
apply_template(
os.path.join(amiciSrcPath, 'model_header.ODE_template.h'),
os.path.join(self.model_path, f'{self.model_name}.h'),
tpl_data
)
apply_template(
os.path.join(amiciSrcPath, 'model.ODE_template.cpp'),
os.path.join(self.model_path, f'{self.model_name}.cpp'),
tpl_data
)
def _get_symbol_name_initializer_list(self, name: str) -> str:
"""
Get SBML name initializer list for vector of names for the given
model entity
:param name:
any key present in self.model._syms
:return:
Template initializer list of names
"""
return '\n'.join(
[
f'"{symbol}", // {name}[{idx}]'
for idx, symbol in enumerate(self.model.name(name))
]
)
def _get_symbol_id_initializer_list(self, name: str) -> str:
"""
Get C++ initializer list for vector of names for the given model
entity
:param name:
any key present in self.model._syms
:return:
Template initializer list of ids
"""
return '\n'.join(
[
f'"{strip_pysb(symbol)}", // {name}[{idx}]'
for idx, symbol in enumerate(self.model.sym(name))
]
)
def _write_c_make_file(self):
"""
Write CMake CMakeLists.txt file for this model.
"""
sources = [
f + ' ' for f in os.listdir(self.model_path)
if f.endswith('.cpp') and f != 'main.cpp'
]
template_data = {'MODELNAME': self.model_name,
'SOURCES': '\n'.join(sources),
'AMICI_VERSION': __version__}
apply_template(
MODEL_CMAKE_TEMPLATE_FILE,
os.path.join(self.model_path, 'CMakeLists.txt'),
template_data
)
def _write_swig_files(self) -> None:
"""
Write SWIG interface files for this model.
"""
if not os.path.exists(self.model_swig_path):
os.makedirs(self.model_swig_path)
template_data = {'MODELNAME': self.model_name}
apply_template(
os.path.join(amiciSwigPath, 'modelname.template.i'),
os.path.join(self.model_swig_path, self.model_name + '.i'),
template_data
)
shutil.copy(SWIG_CMAKE_TEMPLATE_FILE,
os.path.join(self.model_swig_path, 'CMakeLists.txt'))
def _write_module_setup(self) -> None:
"""
Create a distutils setup.py file for compile the model module.
"""
template_data = {'MODELNAME': self.model_name,
'AMICI_VERSION': __version__,
'PACKAGE_VERSION': '0.1.0'}
apply_template(os.path.join(amiciModulePath, 'setup.template.py'),
os.path.join(self.model_path, 'setup.py'),
template_data)
apply_template(os.path.join(amiciModulePath, 'MANIFEST.template.in'),
os.path.join(self.model_path, 'MANIFEST.in'), {})
# write __init__.py for the model module
if not os.path.exists(os.path.join(self.model_path, self.model_name)):
os.makedirs(os.path.join(self.model_path, self.model_name))
apply_template(
os.path.join(amiciModulePath, '__init__.template.py'),
os.path.join(self.model_path, self.model_name, '__init__.py'),
template_data
)
def set_paths(self, output_dir: str) -> None:
"""
Set output paths for the model and create if necessary
:param output_dir:
relative or absolute path where the generated model
code is to be placed. will be created if does not exists.
"""
self.model_path = os.path.abspath(output_dir)
self.model_swig_path = os.path.join(self.model_path, 'swig')
for directory in [self.model_path, self.model_swig_path]:
if not os.path.exists(directory):
os.makedirs(directory)
def set_name(self, model_name: str) -> None:
"""
Sets the model name
:param model_name:
name of the model (may only contain upper and lower case letters,
digits and underscores, and must not start with a digit)
"""
if not is_valid_identifier(model_name):
raise ValueError(
f"'{model_name}' is not a valid model name. "
"Model name may only contain upper and lower case letters, "
"digits and underscores, and must not start with a digit.")
self.model_name = model_name
class TemplateAmici(Template):
"""
Template format used in AMICI (see string.template for more details).
:ivar delimiter:
delimiter that identifies template variables
"""
delimiter = 'TPL_'
def apply_template(source_file: str,
target_file: str,
template_data: Dict[str, str]) -> None:
"""
Load source file, apply template substitution as provided in
templateData and save as targetFile.
:param source_file:
relative or absolute path to template file
:param target_file:
relative or absolute path to output file
:param template_data:
template keywords to substitute (key is template
variable without :attr:`TemplateAmici.delimiter`)
"""
with open(source_file) as filein:
src = TemplateAmici(filein.read())
result = src.safe_substitute(template_data)
with open(target_file, 'w') as fileout:
fileout.write(result)
def strip_pysb(symbol: sp.Basic) -> sp.Basic:
"""
Strips pysb info from a :class:`pysb.Component` object
:param symbol:
symbolic expression
:return:
stripped expression
"""
# strip pysb type and transform into a flat sympy.Symbol.
# this ensures that the pysb type specific __repr__ is used when converting
# to string
if pysb and isinstance(symbol, pysb.Component):
return sp.Symbol(symbol.name, real=True)
else:
# in this case we will use sympy specific transform anyways
return symbol
def get_function_extern_declaration(fun: str, name: str) -> str:
"""
Constructs the extern function declaration for a given function
:param fun:
function name
:param name:
model name
:return:
c++ function definition string
"""
return \
f'extern void {fun}_{name}{functions[fun]["signature"]};'
def get_sunindex_extern_declaration(fun: str, name: str,
indextype: str) -> str:
"""
Constructs the function declaration for an index function of a given
function
:param fun:
function name
:param name:
model name
:param indextype:
index function {'colptrs', 'rowvals'}
:return:
c++ function declaration string
"""
index_arg = ', int index' if fun in multiobs_functions else ''
return \
f'extern void {fun}_{indextype}_{name}' \
f'(SUNMatrixWrapper &{indextype}{index_arg});'
def get_model_override_implementation(fun: str, name: str,
nobody: bool = False) -> str:
"""
Constructs amici::Model::* override implementation for a given function
:param fun:
function name
:param name:
model name
:param nobody:
whether the function has a nontrivial implementation
:return:
c++ function implementation string
"""
impl = 'virtual void f{fun}{signature} override {{'
if nobody:
impl += '}}\n'
else:
impl += '\n{ind8}{fun}_{name}{eval_signature};\n{ind4}}}\n'
return impl.format(
ind4=' '*4,
ind8=' '*8,
fun=fun,
name=name,
signature=functions[fun]["signature"],
eval_signature=remove_typedefs(functions[fun]["signature"])
)
def get_sunindex_override_implementation(fun: str, name: str,
indextype: str,
nobody: bool = False) -> str:
"""
Constructs the amici::Model:: function implementation for an index
function of a given function
:param fun:
function name
:param name:
model name
:param indextype:
index function {'colptrs', 'rowvals'}
:param nobody:
whether the corresponding function has a nontrivial implementation
:return:
c++ function implementation string
"""
index_arg = ', int index' if fun in multiobs_functions else ''
index_arg_eval = ', index' if fun in multiobs_functions else ''
impl = 'virtual void f{fun}_{indextype}{signature} override {{'
if nobody:
impl += '}}\n'
else:
impl += '{ind8}{fun}_{indextype}_{name}{eval_signature};\n{ind4}}}\n'
return impl.format(
ind4=' '*4,
ind8=' '*8,
fun=fun,
indextype=indextype,
name=name,
signature=f'(SUNMatrixWrapper &{indextype}{index_arg})',
eval_signature=f'({indextype}{index_arg_eval})',
)
def remove_typedefs(signature: str) -> str:
"""
Strips typedef info from a function signature
:param signature:
function signature
:return:
string that can be used to construct function calls with the same
variable names and ordering as in the function signature
"""
# remove * pefix for pointers (pointer must always be removed before
# values otherwise we will inadvertently dereference values,
# same applies for const specifications)
#
# always add whitespace after type definition for cosmetic reasons
typedefs = [
'const realtype *',
'const double *',
'const realtype ',
'double *',
'realtype *',
'const int ',
'int ',
'SUNMatrixContent_Sparse ',
'gsl::span<const int>'
]
for typedef in typedefs:
signature = signature.replace(typedef, '')
return signature
def get_switch_statement(condition: str, cases: Dict[int, List[str]],
indentation_level: Optional[int] = 0,
indentation_step: Optional[str] = ' ' * 4):
"""
Generate code for switch statement
:param condition:
Condition for switch
:param cases:
Cases as dict with expressions as keys and statement as
list of strings
:param indentation_level:
indentation level
:param indentation_step:
indentation whitespace per level
:return:
Code for switch expression as list of strings
"""
lines = list()
if not cases:
return lines
for expression, statements in cases.items():
if statements:
lines.append((indentation_level + 1) * indentation_step
+ f'case {expression}:')
for statement in statements:
lines.append((indentation_level + 2) * indentation_step
+ statement)
lines.append((indentation_level + 2) * indentation_step + 'break;')
if lines:
lines.insert(0, indentation_level * indentation_step
+ f'switch({condition}) {{')
lines.append(indentation_level * indentation_step + '}')
return lines
def csc_matrix(matrix: sp.Matrix,
rownames: List[sp.Symbol],
colnames: List[sp.Symbol],
identifier: Optional[int] = 0,
pattern_only: Optional[bool] = False) -> Tuple[
List[int], List[int], sp.Matrix, List[str], sp.Matrix
]:
"""
Generates the sparse symbolic identifiers, symbolic identifiers,
sparse matrix, column pointers and row values for a symbolic
variable
:param matrix:
dense matrix to be sparsified
:param rownames:
ids of the variable of which the derivative is computed (assuming
matrix is the jacobian)
:param colnames:
ids of the variable with respect to which the derivative is computed
(assuming matrix is the jacobian)
:param identifier:
additional identifier that gets appended to symbol names to
ensure their uniqueness in outer loops
:param pattern_only:
flag for computing sparsity pattern without whole matrix
:return:
symbol_col_ptrs, symbol_row_vals, sparse_list, symbol_list,
sparse_matrix
"""
idx = 0
nrows, ncols = matrix.shape
if not pattern_only:
sparse_matrix = sp.zeros(nrows, ncols)
symbol_list = []
sparse_list = []
symbol_col_ptrs = []
symbol_row_vals = []
for col in range(0, ncols):
symbol_col_ptrs.append(idx)
for row in range(0, nrows):
if matrix[row, col] == 0:
continue
symbol_row_vals.append(row)
idx += 1
symbol_name = f'd{_print_with_exception(rownames[row])}' \
f'_d{_print_with_exception(colnames[col])}'
if identifier:
symbol_name += f'_{identifier}'
symbol_list.append(symbol_name)
if pattern_only:
continue
sparse_matrix[row, col] = sp.Symbol(symbol_name, real=True)
sparse_list.append(matrix[row, col])
if idx == 0:
symbol_col_ptrs = [] # avoid bad memory access for empty matrices
else:
symbol_col_ptrs.append(idx)
if pattern_only:
sparse_matrix = None
else:
sparse_list = sp.Matrix(sparse_list)
return symbol_col_ptrs, symbol_row_vals, sparse_list, symbol_list, \
sparse_matrix
def is_valid_identifier(x: str) -> bool:
"""
Check whether `x` is a valid identifier for conditions, parameters,
observables... . Identifiers may only contain upper and lower case letters,
digits and underscores, and must not start with a digit.
:param x:
string to check
:return:
``True`` if valid, ``False`` otherwise
"""
return re.match(r'^[a-zA-Z_]\w*$', x) is not None
def generate_measurement_symbol(observable_id: Union[str, sp.Symbol]):
"""
Generates the appropriate measurement symbol for the provided observable
:param observable_id:
symbol (or string representation) of the observable
:return:
symbol for the corresponding measurement
"""
if not isinstance(observable_id, str):
observable_id = strip_pysb(observable_id)
return symbol_with_assumptions(f'm{observable_id}')
def generate_flux_symbol(reaction_index: int) -> sp.Symbol:
"""
Generate identifier symbol for a reaction flux.
This function will always return the same unique python object for a
given entity.
:param reaction_index:
index of the reaction to which the flux corresponds
:return:
identifier symbol
"""
return symbol_with_assumptions(f'flux_r{reaction_index}')
def symbol_with_assumptions(name: str):
"""
Central function to create symbols with consistent, canonical assumptions
:param name:
name of the symbol
:return:
symbol with canonical assumptions
"""
return sp.Symbol(name, real=True)
def cast_to_sym(value: Union[SupportsFloat, sp.Expr, BooleanAtom],
input_name: str) -> sp.Expr:
"""
Typecasts the value to sympy.Float if possible, and ensures the
value is a symbolic expression.
:param value:
value to be cast
:param input_name:
name of input variable
:return:
typecast value
"""
if isinstance(value, (sp.RealNumber, numbers.Number)):
value = sp.Float(float(value))
elif isinstance(value, BooleanAtom):
value = sp.Float(float(bool(value)))
if not isinstance(value, sp.Expr):
raise TypeError(f"Couldn't cast {input_name} to sympy.Expr, was "
f"{type(value)}")
return value
@contextlib.contextmanager
def _monkeypatched(obj: object, name: str, patch: Any):
"""
Temporarily monkeypatches an object.
:param obj:
object to be patched
:param name:
name of the attribute to be patched
:param patch:
patched value
"""
pre_patched_value = getattr(obj, name)
setattr(obj, name, patch)
try:
yield object
finally:
setattr(obj, name, pre_patched_value)
def _custom_pow_eval_derivative(self, s):
"""
Custom Pow derivative that removes a removeable singularity for
self.base == 0 and self.base.diff(s) == 0. This function is intended to
be monkeypatched into sp.Pow._eval_derivative.
:param self:
sp.Pow class
:param s:
variable with respect to which the derivative will be computed
"""
dbase = self.base.diff(s)
dexp = self.exp.diff(s)
part1 = sp.Pow(self.base, self.exp - 1) * self.exp * dbase
part2 = self * dexp * sp.log(self.base)
if self.base.is_nonzero or dbase.is_nonzero or part2.is_zero:
# first piece never applies or is zero anyways
return part1 + part2
return part1 + sp.Piecewise(
(self.base, sp.And(sp.Eq(self.base, 0), sp.Eq(dbase, 0))),
(part2, True)
)
def _custom_print_max(self, expr):
"""
Custom Max printing function, see https://github.com/sympy/sympy/pull/20558
"""
from sympy import Max
if len(expr.args) == 1:
return self._print(expr.args[0])
return "%smax(%s, %s)" % (self._ns, self._print(expr.args[0]),
self._print(Max(*expr.args[1:])))
def _custom_print_min(self, expr):
"""
Custom Min printing function, see https://github.com/sympy/sympy/pull/20558
"""
from sympy import Min
if len(expr.args) == 1:
return self._print(expr.args[0])
return "%smin(%s, %s)" % (self._ns, self._print(expr.args[0]),
self._print(Min(*expr.args[1:])))
|
AMICI-developer/AMICI
|
python/amici/ode_export.py
|
Python
|
bsd-2-clause
| 137,457
|
[
"DIRAC",
"Gaussian"
] |
9917cdac720147844cdae9c1b93a48cf99cd6f3fc13e7dd32f978c81d7b41bf0
|
#!/usr/bin/env python
# File: plot_icd_vs_colorgrad.py
# Created on: Tue 08 May 2012 11:03:26 AM CDT
# Last Change: Sun 21 Oct 2012 02:43:33 PM CDT
# Purpose of script: <+INSERT+>
# Author: Steven Boada
import pylab as pyl
from mk_galaxy_struc import mk_galaxy_struc
galaxies = mk_galaxy_struc()
f1 = pyl.figure(1,figsize=(8,8))
f1s1 = f1.add_subplot(221)
f1s2 = f1.add_subplot(222)
f1s3 = f1.add_subplot(223)
f1s4 = f1.add_subplot(224)
for galaxy in galaxies:
if galaxy.ston_I >= 30. and galaxy.Color_grad != None and galaxy.sersic !=\
None:
if galaxy.sersic < 1.:
col1 =f1s1.scatter(galaxy.ICD_IH, galaxy.Color_grad, s=50, c='k',
edgecolor='w')
if 1. < galaxy.sersic < 2.:
col2 =f1s2.scatter(galaxy.ICD_IH, galaxy.Color_grad, s=50,c='k',
edgecolor='w')
if 2. < galaxy.sersic < 3.:
col3 =f1s3.scatter(galaxy.ICD_IH, galaxy.Color_grad, s=50, c='k',
edgecolor='w')
if 3. < galaxy.sersic:
col4 =f1s4.scatter(galaxy.ICD_IH, galaxy.Color_grad, s=50, c='k',
edgecolor='w')
#pyl.scatter(galaxy.ICD_IH,galaxy.Color_grad,s=50,edgecolor='w')
#f1s1.vlines(0.04,-3.,1,lw=2,zorder=0)
#f1s1.hlines(0.0,-0.1,0.25,lw=2,zorder=0)
#pyl.text(0.24, 0.7, "Blue Core, Red Edge", size=15, ha="right", va="top",
# bbox = dict(boxstyle="round", ec=(1., 0.5, 0.5),
# fc=(1., 0.8, 0.8)))
#pyl.text(0.24, -2.5, "Red Core, Blue Edge", size=15, ha="right", va="top",
# bbox = dict(boxstyle="round", ec=(1., 0.5, 0.5),
# fc=(1., 0.8, 0.8)))
# Finish Plot
f1s1.set_xlim(-0.05,0.25)
f1s1.set_ylim(-3.,1)
f1s2.set_xlim(-0.05,0.25)
f1s2.set_ylim(-3.,1)
f1s3.set_xlim(-0.05,0.25)
f1s3.set_ylim(-3.,1)
f1s4.set_xlim(-0.05,0.25)
f1s4.set_ylim(-3.,1)
#pyl.subplots_adjust(left=0.15,bottom=0.15)
f1s1.set_xlabel(r'$\xi[I,H]$')
f1s1.set_ylabel('Color Gradient')
pyl.savefig('icd_vs_color_grad_vs_sersic_IH.eps',bbox='tight')
pyl.show()
|
boada/ICD
|
sandbox/legacy_plot_code/plot_icd_vs_colorgrad_vs_sersic.py
|
Python
|
mit
| 1,979
|
[
"Galaxy"
] |
fdf1d3038e1c570d16c378f57982aabcce3a3bae09b4181904922eca6bb9ef45
|
""" Test case for DIRAC.Core.Utilities.Network module
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
from DIRAC.Core.Utilities.Network import discoverInterfaces, getFQDN, getIPsForHostName, checkHostsMatch
def test_discoverInterfaces():
interfaces = discoverInterfaces()
assert len(interfaces) >= 1
assert "lo" in interfaces
for interfaceInfo in interfaces.values():
assert "ip" in interfaceInfo
assert "mac" in interfaceInfo
def test_getFQDN():
assert isinstance(getFQDN(), str)
@pytest.mark.parametrize(
"host1, host2, isValid, expected",
[
("localhost", "localhost", True, True),
("localhost", "example.com", True, False),
("localhost", "example.invalid", False, False),
("example.com", "localhost", True, False),
("example.invalid", "localhost", False, False),
],
)
def test_checkHostsMatch(host1, host2, isValid, expected):
result = checkHostsMatch(host1, host2)
if isValid:
assert result["OK"]
assert result["Value"] is expected
else:
assert not result["OK"]
@pytest.mark.parametrize("hostname", ["localhost", "example.com"])
def test_getIPsForHostName(hostname):
result = getIPsForHostName(hostname)
assert result["OK"]
assert len(result["Value"]) >= 1
|
ic-hep/DIRAC
|
src/DIRAC/Core/Utilities/test/Test_Network.py
|
Python
|
gpl-3.0
| 1,386
|
[
"DIRAC"
] |
1a6d97fdda84ab2b288ff877c758c087e90abec1460a8d24681928ea4984cecc
|
#!/usr/bin/env python
from nac.workflows.input_validation import process_input
from nac.workflows import (
workflow_derivative_couplings, workflow_single_points, workflow_stddft)
import argparse
import os
import yaml
msg = "namd.py -i input"
parser = argparse.ArgumentParser(description=msg)
parser.add_argument('-i', required=True,
help="Input file in YAML format")
dict_workflows = {'absorption_spectrum': workflow_stddft,
'derivative_couplings': workflow_derivative_couplings,
'single_points': workflow_single_points}
def main():
input_file = read_cmd_line()
with open(input_file, 'r') as f:
dict_input = yaml.load(f, Loader=yaml.FullLoader)
if 'workflow' not in dict_input:
raise RuntimeError("The name of the workflow is required in the input file")
else:
workflow_name = dict_input['workflow']
# Read and process input
inp = process_input(input_file, workflow_name)
# run workflow
function = dict_workflows[workflow_name]
# if comm is None or comm.Get_rank() == 0:
print("Running worflow: ", os.path.abspath(input_file))
function(inp)
def read_cmd_line():
"""
Read the input file and the workflow name from the command line
"""
args = parser.parse_args()
return args.i
if __name__ == "__main__":
main()
|
felipeZ/nonAdiabaticCoupling
|
scripts/cli/run_workflow.py
|
Python
|
mit
| 1,377
|
[
"NAMD"
] |
d995dbf3c929a15ac660448bcd60b8fd9dfcb5d3aba343c87fd238043bf91930
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2018 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
|
amjames/psi4
|
psi4/driver/util/__init__.py
|
Python
|
lgpl-3.0
| 913
|
[
"Psi4"
] |
103b56a78d94d0fd9bfd46dfe3772e3566801ca77263e709151c72c2a7694207
|
#!/usr/bin/env python
###########################################################################
# #
# This Python script may be used to simulate a monatomic LJ fluid in the #
# NVE or NVT ensemble. The starting configuration may be taken from #
# either a LAMMPS data file or by generating coordinates on a lattice. #
# #
###########################################################################
def openmpi_workaround():
# find the flag to be set to open all shared libraries at once
try:
import dl
globalFlag = dl.RTLD_GLOBAL
except:
try:
import ctypes
globalFlag = ctypes.RTLD_GLOBAL
except:
print 'ATTENTION: could not find flag RTLD_GLOBAL for dlopen'
# take a good value for Linux (but is platform-dependent)
globalFlag = 256
# now set this flag so that dlopen will use it
import sys
flags = sys.getdlopenflags()
sys.setdlopenflags(flags | globalFlag)
openmpi_workaround()
import sys
import time
import _espressopp
import espressopp
import mpi4py.MPI as MPI
import logging
from espressopp import Real3D, Int3D
from espressopp.tools import lammps
from espressopp.tools import decomp
from espressopp.tools import lattice
from espressopp.tools import timers
# logging.getLogger("Storage").setLevel(logging.INFO)
# simulation parameters (nvt = False implies NVE)
steps = 10
rc = 2.5
skin = 0.3
nvt = False
timestep = 0.005
######################################################################
### IT SHOULD BE UNNECESSARY TO MAKE MODIFICATIONS BELOW THIS LINE ###
######################################################################
sys.stdout.write('Setting up simulation ...\n')
x, y, z, Lx, Ly, Lz, vx, vy, vz = lammps.read('espressopp_lennard_jones.start')
num_particles = len(x)
density = num_particles / (Lx * Ly * Lz)
size = (Lx, Ly, Lz)
system = espressopp.System()
system.rng = espressopp.esutil.RNG()
system.bc = espressopp.bc.OrthorhombicBC(system.rng, size)
system.skin = skin
comm = MPI.COMM_WORLD
nodeGrid = decomp.nodeGrid(comm.size)
cellGrid = decomp.cellGrid(size, nodeGrid, rc, skin)
system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid)
# add particles to the system and then decompose
props = ['id', 'type', 'mass', 'pos', 'v']
new_particles = []
for i in range(num_particles):
part = [i + 1, 0, 1.0, Real3D(x[i], y[i], z[i]), Real3D(vx[i], vy[i], vz[i])]
new_particles.append(part)
system.storage.addParticles(new_particles, *props)
system.storage.decompose()
# all particles interact via a LJ interaction (use Verlet lists)
vl = espressopp.VerletList(system, cutoff=rc+system.skin)
#potLJ = espressopp.interaction.LennardJones(epsilon=1.0, sigma=1.0, cutoff=rc, shift=False)
#interLJ = espressopp.interaction.VerletListLennardJones(vl)
potLJ = espressopp.interaction.LennardJonesGromacs(epsilon=1.0, sigma=1.0, r1=2.0, cutoff=rc, shift=False)
potLJX = espressopp.interaction.LennardJones(epsilon=1.0, sigma=1.0, cutoff=rc, shift=False)
interLJ = espressopp.interaction.VerletListLennardJonesGromacs(vl)
interLJ.setPotential(type1=0, type2=0, potential=potLJ)
system.addInteraction(interLJ)
for i in range(2,101):
r = (i/100.0) * rc
print r, potLJ.computeEnergy(r), potLJX.computeEnergy(r), potLJX.computeForce(Real3D(r, 0, 0))
# setup integrator
integrator = espressopp.integrator.VelocityVerlet(system)
integrator.dt = timestep
if(nvt):
langevin = espressopp.integrator.LangevinThermostat(system)
langevin.gamma = 1.0
langevin.temperature = 1.0
integrator.addExtension(langevin)
print ''
print 'number of particles =', num_particles
print 'density = %.4f' % (density)
print 'rc =', rc
print 'dt =', integrator.dt
print 'skin =', system.skin
print 'nvt =', nvt
print 'steps =', steps
print 'NodeGrid = %s' % (nodeGrid,)
print 'CellGrid = %s' % (cellGrid,)
print ''
# analysis
temperature = espressopp.analysis.Temperature(system)
pressure = espressopp.analysis.Pressure(system)
pressureTensor = espressopp.analysis.PressureTensor(system)
fmt = '%5d %8.4f %10.5f %8.5f %12.3f %12.3f %12.3f\n'
T = temperature.compute()
P = pressure.compute()
Pij = pressureTensor.compute()
Ek = 0.5 * T * (3 * num_particles)
Ep = interLJ.computeEnergy()
sys.stdout.write(' step T P Pxy etotal epotential ekinetic\n')
sys.stdout.write(fmt % (0, T, P, Pij[3], Ek + Ep, Ep, Ek))
start_time = time.clock()
integrator.run(steps)
end_time = time.clock()
T = temperature.compute()
P = pressure.compute()
Pij = pressureTensor.compute()
Ek = 0.5 * T * (3 * num_particles)
Ep = interLJ.computeEnergy()
sys.stdout.write(fmt % (steps, T, P, Pij[3], Ek + Ep, Ep, Ek))
sys.stdout.write('\n')
timers.show(integrator.getTimers(), precision=3)
sys.stdout.write('Total # of neighbors = %d\n' % vl.totalSize())
sys.stdout.write('Ave neighs/atom = %.1f\n' % (vl.totalSize() / float(num_particles)))
sys.stdout.write('Neighbor list builds = %d\n' % vl.builds)
sys.stdout.write('Integration steps = %d\n' % integrator.step)
sys.stdout.write('CPU time = %.1f\n' % (end_time - start_time))
|
acfogarty/espressopp
|
bench/lennard_jones/espressopp/espressopp_lennard_jones.py
|
Python
|
gpl-3.0
| 5,196
|
[
"LAMMPS"
] |
09963eb09c773b6e1f41c5df47d908e13db930f175572d1f1e34e0fd4b325490
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, with_statement
import os
import pyqtgraph.multiprocess as mp
from acq4.devices.AxoPatch200 import CancelException
from acq4.devices.DAQGeneric import DAQGeneric, DAQGenericTask, DAQGenericTaskGui
from acq4.devices.PatchClamp import PatchClamp
from pyqtgraph.WidgetGroup import WidgetGroup
from acq4.util import Qt
from acq4.util.Mutex import Mutex
from acq4.util.debug import printExc
Ui_MockClampDevGui = Qt.importTemplate('.devTemplate')
ivModes = {'I=0': 'IC', 'VC': 'VC', 'IC': 'IC'}
modeNames = ['VC', 'I=0', 'IC']
class MockClamp(PatchClamp):
def __init__(self, dm, config, name):
PatchClamp.__init__(self, dm, config, name)
# Generate config to use for DAQ
self.devLock = Mutex(Mutex.Recursive)
self.daqConfig = {
'command': config['Command'],
'primary': config['ScaledSignal'],
}
self.holding = {
'VC': config.get('vcHolding', -0.05),
'IC': config.get('icHolding', 0.0)
}
self.mode = 'I=0'
self.config = config
# create a daq device under the hood
self.daqDev = DAQGeneric(dm, self.daqConfig, '{}Daq'.format(name))
try:
self.setHolding()
except:
printExc("Error while setting holding value:")
# Start a remote process to run the simulation.
self.process = mp.Process()
rsys = self.process._import('sys')
rsys._setProxyOptions(returnType='proxy') # need to access remote path by proxy, not by value
rsys.path.append(os.path.abspath(os.path.dirname(__file__)))
if config['simulator'] == 'builtin':
self.simulator = self.process._import('hhSim')
elif config['simulator'] == 'neuron':
self.simulator = self.process._import('neuronSim')
dm.declareInterface(name, ['clamp'], self)
def createTask(self, cmd, parentTask):
return MockClampTask(self, cmd, parentTask)
def taskInterface(self, taskRunner):
return MockClampTaskGui(self, taskRunner)
def deviceInterface(self, win):
return MockClampDevGui(self)
def setHolding(self, mode=None, value=None, force=False):
global ivModes
with self.devLock:
currentMode = self.getMode()
if mode is None:
mode = currentMode
ivMode = ivModes[mode] ## determine vc/ic
if value is None:
value = self.holding[ivMode]
else:
self.holding[ivMode] = value
if ivMode == ivModes[currentMode] or force:
# gain = self.getCmdGain(mode)
## override the scale since getChanScale won't necessarily give the correct value
## (we may be about to switch modes)
# DAQGeneric.setChanHolding(self, 'command', value, scale=gain)
pass
self.sigHoldingChanged.emit('primary', self.holding.copy())
def setChanHolding(self, chan, value=None):
if chan == 'command':
self.setHolding(value=value)
else:
self.daqDev.setChanHolding(self, chan, value)
def getChanHolding(self, chan):
if chan == 'command':
return self.getHolding()
else:
return self.daqDev.getChanHolding(chan)
def getHolding(self, mode=None):
global ivModes
with self.devLock:
if mode is None:
mode = self.getMode()
ivMode = ivModes[mode] ## determine vc/ic
return self.holding[ivMode]
def getState(self):
return {
'mode': self.getMode(),
}
def listModes(self):
global modeNames
return modeNames
def setMode(self, mode):
"""Set the mode of the AxoPatch (by requesting user intervention). Takes care of switching holding levels in I=0 mode if needed."""
mode = mode.upper()
startMode = self.getMode()
if startMode == mode:
return
startIvMode = ivModes[startMode]
ivMode = ivModes[mode]
if (startIvMode == 'VC' and ivMode == 'IC') or (startIvMode == 'IC' and ivMode == 'VC'):
## switch to I=0 first
# self.requestModeSwitch('I=0')
self.mode = 'I=0'
self.setHolding(ivMode, force=True) ## we're in I=0 mode now, so it's ok to force the holding value.
### TODO:
### If mode switches back the wrong direction, we need to reset the holding value and cancel.
self.mode = ivMode
self.sigStateChanged.emit(self.getState())
def getMode(self):
return self.mode
def getChanUnits(self, chan):
global ivModes
iv = ivModes[self.getMode()]
if iv == 'VC':
units = ['V', 'A']
else:
units = ['A', 'V']
if chan == 'command':
return units[0]
elif chan == 'secondary':
return units[0]
elif chan == 'primary':
return units[1]
def readChannel(self, ch):
pass
def quit(self):
# self.process.send(None)
self.process.close()
self.daqDev.quit()
def getDAQName(self, channel):
"""Return the DAQ name used by this device. (assumes there is only one DAQ for now)"""
return self.daqConfig[channel]['device']
def autoPipetteOffset(self):
"""Automatically set the pipette offset.
"""
pass
def autoBridgeBalance(self):
"""Automatically set the bridge balance.
"""
pass
def autoCapComp(self):
"""Automatically configure capacitance compensation.
"""
pass
class MockClampTask(DAQGenericTask):
def __init__(self, dev, cmd, parentTask):
## make a few changes for compatibility with multiclamp
if 'daqProtocol' not in cmd:
cmd['daqProtocol'] = {}
daqP = cmd['daqProtocol']
if 'command' in cmd:
if 'holding' in cmd:
daqP['command'] = {'command': cmd['command'], 'holding': cmd['holding']}
else:
daqP['command'] = {'command': cmd['command']}
daqP['command']['lowLevelConf'] = {'mockFunc': self.write}
cmd['daqProtocol']['primary'] = {'record': True, 'lowLevelConf': {'mockFunc': self.read}}
DAQGenericTask.__init__(self, dev.daqDev, cmd['daqProtocol'], parentTask)
self.cmd = cmd
self.clampDev = dev
modPath = os.path.abspath(os.path.split(__file__)[0])
def configure(self):
### Record initial state or set initial value
##if 'holding' in self.cmd:
## self.dev.setHolding(self.cmd['mode'], self.cmd['holding'])
if 'mode' in self.cmd:
self.clampDev.setMode(self.cmd['mode'])
mode = self.clampDev.getMode()
self.ampState = {
'mode': mode,
'primaryUnits': 'A' if mode == 'VC' else 'V',
# copying multiclamp format here, but should eventually pick something more universal
'ClampParams': ({
'BridgeBalResist': 0,
'BridgeBalEnable': True,
} if mode == 'IC' else {}),
}
### Do not configure daq until mode is set. Otherwise, holding values may be incorrect.
DAQGenericTask.configure(self)
def read(self):
## Called by DAQGeneric to simulate a read-from-DAQ
res = self.job.result(timeout=30)._getValue()
return res
def write(self, data, dt):
## Called by DAQGeneric to simulate a write-to-DAQ
self.job = self.clampDev.simulator.run({'data': data, 'dt': dt, 'mode': self.cmd['mode']}, _callSync='async')
def isDone(self):
## check on neuron process
# return self.process.poll() is not None
return True
def stop(self, abort=False):
DAQGenericTask.stop(self, abort)
def getResult(self):
result = DAQGenericTask.getResult(self)
result._info[-1]['startTime'] = next(iter(result._info[-1][self.clampDev.getDAQName("primary")].values()))['startTime']
result._info[-1]['ClampState'] = self.ampState
return result
class MockClampTaskGui(DAQGenericTaskGui):
def __init__(self, dev, taskRunner):
DAQGenericTaskGui.__init__(self, dev.daqDev, taskRunner, ownUi=False)
self.clampDev = dev
self.layout = Qt.QGridLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.layout)
self.splitter1 = Qt.QSplitter()
self.splitter1.setOrientation(Qt.Qt.Horizontal)
self.layout.addWidget(self.splitter1)
self.splitter2 = Qt.QSplitter()
self.splitter2.setOrientation(Qt.Qt.Vertical)
self.modeCombo = Qt.QComboBox()
self.splitter2.addWidget(self.modeCombo)
self.modeCombo.addItems(self.clampDev.listModes())
self.splitter3 = Qt.QSplitter()
self.splitter3.setOrientation(Qt.Qt.Vertical)
(w1, p1) = self.createChannelWidget('primary')
(w2, p2) = self.createChannelWidget('command')
self.cmdWidget = w2
self.inputWidget = w1
self.cmdPlot = p2
self.inputPlot = p1
self.cmdWidget.setMeta('x', siPrefix=True, suffix='s', dec=True)
self.cmdWidget.setMeta('y', siPrefix=True, dec=True)
self.splitter1.addWidget(self.splitter2)
self.splitter1.addWidget(self.splitter3)
self.splitter2.addWidget(w1)
self.splitter2.addWidget(w2)
self.splitter3.addWidget(p1)
self.splitter3.addWidget(p2)
self.splitter1.setSizes([100, 500])
self.stateGroup = WidgetGroup([
(self.splitter1, 'splitter1'),
(self.splitter2, 'splitter2'),
(self.splitter3, 'splitter3'),
])
self.modeCombo.currentIndexChanged.connect(self.modeChanged)
self.modeChanged()
def saveState(self):
"""Return a dictionary representing the current state of the widget."""
state = {}
state['daqState'] = DAQGenericTaskGui.saveState(self)
state['mode'] = self.getMode()
# state['holdingEnabled'] = self.ctrl.holdingCheck.isChecked()
# state['holding'] = self.ctrl.holdingSpin.value()
return state
def restoreState(self, state):
"""Restore the state of the widget from a dictionary previously generated using saveState"""
# print 'state: ', state
# print 'DaqGeneric : ', dir(DAQGenericTaskGui)
if 'mode' in state:
self.modeCombo.setCurrentIndex(self.modeCombo.findText(state['mode']))
# self.ctrl.holdingCheck.setChecked(state['holdingEnabled'])
# if state['holdingEnabled']:
# self.ctrl.holdingSpin.setValue(state['holding'])
if 'daqState' in state:
return DAQGenericTaskGui.restoreState(self, state['daqState'])
else:
return None
def generateTask(self, params=None):
daqTask = DAQGenericTaskGui.generateTask(self, params)
task = {
'mode': self.getMode(),
'daqProtocol': daqTask
}
return task
def modeChanged(self):
global ivModes
ivm = ivModes[self.getMode()]
w = self.cmdWidget
if ivm == 'VC':
scale = 1e-3
cmdUnits = 'V'
inpUnits = 'A'
else:
scale = 1e-12
cmdUnits = 'A'
inpUnits = 'V'
self.inputWidget.setUnits(inpUnits)
self.cmdWidget.setUnits(cmdUnits)
self.cmdWidget.setMeta('y', minStep=scale, step=scale * 10, value=0.)
self.inputPlot.setLabel('left', units=inpUnits)
self.cmdPlot.setLabel('left', units=cmdUnits)
# w.setScale(scale)
# for s in w.getSpins():
# s.setOpts(minStep=scale)
self.cmdWidget.updateHolding()
def getMode(self):
return str(self.modeCombo.currentText())
def sequenceChanged(self):
self.sigSequenceChanged.emit(self.clampDev.name())
def getChanHolding(self, chan):
if chan == 'command':
return self.clampDev.getHolding(self.getMode())
else:
raise Exception("Can't get holding value for channel %s" % chan)
class MockClampDevGui(Qt.QWidget):
def __init__(self, dev):
Qt.QWidget.__init__(self)
self.dev = dev
self.ui = Ui_MockClampDevGui()
self.ui.setupUi(self)
self.ui.vcHoldingSpin.setOpts(step=1, minStep=1e-3, dec=True, suffix='V', siPrefix=True)
self.ui.icHoldingSpin.setOpts(step=1, minStep=1e-12, dec=True, suffix='A', siPrefix=True)
# self.ui.modeCombo.currentIndexChanged.connect(self.modeComboChanged)
self.modeRadios = {
'VC': self.ui.vcModeRadio,
'IC': self.ui.icModeRadio,
'I=0': self.ui.i0ModeRadio,
}
self.updateStatus()
for v in self.modeRadios.values():
v.toggled.connect(self.modeRadioChanged)
self.ui.vcHoldingSpin.valueChanged.connect(self.vcHoldingChanged)
self.ui.icHoldingSpin.valueChanged.connect(self.icHoldingChanged)
self.dev.sigHoldingChanged.connect(self.devHoldingChanged)
self.dev.sigStateChanged.connect(self.devStateChanged)
def updateStatus(self):
global modeNames
mode = self.dev.getMode()
if mode is None:
return
vcHold = self.dev.getHolding('VC')
icHold = self.dev.getHolding('IC')
self.modeRadios[mode].setChecked(True)
# self.ui.modeCombo.setCurrentIndex(self.ui.modeCombo.findText(mode))
self.ui.vcHoldingSpin.setValue(vcHold)
self.ui.icHoldingSpin.setValue(icHold)
def devHoldingChanged(self, chan, hval):
if isinstance(hval, dict):
self.ui.vcHoldingSpin.blockSignals(True)
self.ui.icHoldingSpin.blockSignals(True)
self.ui.vcHoldingSpin.setValue(hval['VC'])
self.ui.icHoldingSpin.setValue(hval['IC'])
self.ui.vcHoldingSpin.blockSignals(False)
self.ui.icHoldingSpin.blockSignals(False)
def devStateChanged(self):
mode = self.dev.getMode()
for r in self.modeRadios.values():
r.blockSignals(True)
# self.ui.modeCombo.blockSignals(True)
# self.ui.modeCombo.setCurrentIndex(self.ui.modeCombo.findText(mode))
self.modeRadios[mode].setChecked(True)
# self.ui.modeCombo.blockSignals(False)
for r in self.modeRadios.values():
r.blockSignals(False)
def vcHoldingChanged(self):
self.dev.setHolding('VC', self.ui.vcHoldingSpin.value())
def icHoldingChanged(self):
self.dev.setHolding('IC', self.ui.icHoldingSpin.value())
def modeRadioChanged(self, m):
try:
if not m:
return
for mode, r in self.modeRadios.items():
if r.isChecked():
self.dev.setMode(mode)
except CancelException:
self.updateStatus()
|
acq4/acq4
|
acq4/devices/MockClamp/MockClamp.py
|
Python
|
mit
| 15,237
|
[
"NEURON"
] |
fe9d81b49531830fadcd64c7b660b0673e901579abf7c39f3cce21d34d71ff52
|
"""
Acceptance tests for Home Page (My Courses / My Libraries).
"""
from bok_choy.web_app_test import WebAppTest
from opaque_keys.edx.locator import LibraryLocator
from ...fixtures import PROGRAMS_STUB_URL
from ...fixtures.config import ConfigModelFixture
from ...fixtures.programs import ProgramsFixture
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.library import LibraryEditPage
from ...pages.studio.index import DashboardPage, DashboardPageWithPrograms
from ...pages.lms.account_settings import AccountSettingsPage
from ..helpers import (
select_option_by_text,
get_selected_option_text
)
class CreateLibraryTest(WebAppTest):
"""
Test that we can create a new content library on the studio home page.
"""
def setUp(self):
"""
Load the helper for the home page (dashboard page)
"""
super(CreateLibraryTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
def test_create_library(self):
"""
From the home page:
Click "New Library"
Fill out the form
Submit the form
We should be redirected to the edit view for the library
Return to the home page
The newly created library should now appear in the list of libraries
"""
name = "New Library Name"
org = "TestOrgX"
number = "TESTLIB"
self.auth_page.visit()
self.dashboard_page.visit()
self.assertFalse(self.dashboard_page.has_library(name=name, org=org, number=number))
self.assertTrue(self.dashboard_page.has_new_library_button())
self.dashboard_page.click_new_library()
self.assertTrue(self.dashboard_page.is_new_library_form_visible())
self.dashboard_page.fill_new_library_form(name, org, number)
self.assertTrue(self.dashboard_page.is_new_library_form_valid())
self.dashboard_page.submit_new_library_form()
# The next page is the library edit view; make sure it loads:
lib_page = LibraryEditPage(self.browser, LibraryLocator(org, number))
lib_page.wait_for_page()
# Then go back to the home page and make sure the new library is listed there:
self.dashboard_page.visit()
self.assertTrue(self.dashboard_page.has_library(name=name, org=org, number=number))
class DashboardProgramsTabTest(WebAppTest):
"""
Test the programs tab on the studio home page.
"""
def setUp(self):
super(DashboardProgramsTabTest, self).setUp()
ProgramsFixture().install_programs([])
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPageWithPrograms(self.browser)
self.auth_page.visit()
def set_programs_api_configuration(self, is_enabled=False, api_version=1, api_url=PROGRAMS_STUB_URL,
js_path='/js', css_path='/css'):
"""
Dynamically adjusts the programs API config model during tests.
"""
ConfigModelFixture('/config/programs', {
'enabled': is_enabled,
'enable_studio_tab': is_enabled,
'enable_student_dashboard': is_enabled,
'api_version_number': api_version,
'internal_service_url': api_url,
'public_service_url': api_url,
'authoring_app_js_path': js_path,
'authoring_app_css_path': css_path,
'cache_ttl': 0
}).install()
def test_tab_is_disabled(self):
"""
The programs tab and "new program" button should not appear at all
unless enabled via the config model.
"""
self.set_programs_api_configuration()
self.dashboard_page.visit()
self.assertFalse(self.dashboard_page.is_programs_tab_present())
self.assertFalse(self.dashboard_page.is_new_program_button_present())
def test_tab_is_enabled_with_empty_list(self):
"""
The programs tab and "new program" button should appear when enabled
via config. When the programs list is empty, a button should appear
that allows creating a new program.
"""
self.set_programs_api_configuration(True)
self.dashboard_page.visit()
self.assertTrue(self.dashboard_page.is_programs_tab_present())
self.assertTrue(self.dashboard_page.is_new_program_button_present())
results = self.dashboard_page.get_program_list()
self.assertEqual(results, [])
self.assertTrue(self.dashboard_page.is_empty_list_create_button_present())
def test_tab_is_enabled_with_nonempty_list(self):
"""
The programs tab and "new program" button should appear when enabled
via config, and the results of the program list should display when
the list is nonempty.
"""
test_program_values = [('first program', 'org1'), ('second program', 'org2')]
ProgramsFixture().install_programs(test_program_values)
self.set_programs_api_configuration(True)
self.dashboard_page.visit()
self.assertTrue(self.dashboard_page.is_programs_tab_present())
self.assertTrue(self.dashboard_page.is_new_program_button_present())
results = self.dashboard_page.get_program_list()
self.assertEqual(results, test_program_values)
self.assertFalse(self.dashboard_page.is_empty_list_create_button_present())
def test_tab_requires_staff(self):
"""
The programs tab and "new program" button will not be available, even
when enabled via config, if the user is not global staff.
"""
self.set_programs_api_configuration(True)
AutoAuthPage(self.browser, staff=False).visit()
self.dashboard_page.visit()
self.assertFalse(self.dashboard_page.is_programs_tab_present())
self.assertFalse(self.dashboard_page.is_new_program_button_present())
class StudioLanguageTest(WebAppTest):
""" Test suite for the Studio Language """
def setUp(self):
super(StudioLanguageTest, self).setUp()
self.dashboard_page = DashboardPage(self.browser)
self.account_settings = AccountSettingsPage(self.browser)
AutoAuthPage(self.browser).visit()
def test_studio_language_change(self):
"""
Scenario: Ensure that language selection is working fine.
First I go to the user dashboard page in studio. I can see 'English' is selected by default.
Then I choose 'Dummy Language' from drop down (at top of the page).
Then I visit the student account settings page and I can see the language has been updated to 'Dummy Language'
in both drop downs.
"""
dummy_language = u'Dummy Language (Esperanto)'
self.dashboard_page.visit()
language_selector = self.dashboard_page.language_selector
self.assertEqual(
get_selected_option_text(language_selector),
u'English'
)
select_option_by_text(language_selector, dummy_language)
self.dashboard_page.wait_for_ajax()
self.account_settings.visit()
self.assertEqual(self.account_settings.value_for_dropdown_field('pref-lang'), dummy_language)
self.assertEqual(
get_selected_option_text(language_selector),
u'Dummy Language (Esperanto)'
)
|
antoviaque/edx-platform
|
common/test/acceptance/tests/studio/test_studio_home.py
|
Python
|
agpl-3.0
| 7,445
|
[
"VisIt"
] |
d3e3febf188cf6ae53acdc99c0b3b452bd9800b7b143a0276c11adc0bd41fca5
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import numpy as np
from espressomd import System, polymer, interactions
class DiamondPolymer(ut.TestCase):
"""
Test the functionality of espressomd.polymer.setup_diamond_polymer()
in terms of
* properties of particles created
* connections via bonds
* the geometry of the polymer network
"""
system = System(box_l=3 * [16])
diamond_params = {'MPC': 15,
'dist_cM': 3,
'val_cM': -1.3,
'val_nodes': 0.6,
'start_id': 3,
'no_bonds': False,
'type_nodes': 2,
'type_nM': 5,
'type_cM': 7}
bond_length = system.box_l[0] * \
(0.25 * np.sqrt(3)) / (diamond_params['MPC'] + 1)
def setUp(self):
bond = interactions.HarmonicBond(k=1.5, r_0=self.bond_length, r_cut=3)
self.system.bonded_inter.add(bond)
polymer.setup_diamond_polymer(system=self.system,
bond=bond,
**self.diamond_params)
self.system.time_step = 0.1
self.node_parts = self.system.part.select(
type=self.diamond_params['type_nodes'])
self.charged_mono = self.system.part.select(
type=self.diamond_params['type_cM'])
self.noncharged_mono = self.system.part.select(
type=self.diamond_params['type_nM'])
def tearDown(self):
self.system.part.clear()
@utx.skipIfMissingFeatures(["ELECTROSTATICS"])
def test_particle_properties(self):
"""
checks if the particles created have the right type and charge
"""
# number of particles created
number_non_node = 16 * self.diamond_params['MPC']
self.assertEqual(len(self.system.part), number_non_node + 8)
# number of each type
self.assertEqual(len(self.node_parts), 8)
self.assertEqual(len(self.charged_mono),
np.rint(number_non_node / self.diamond_params['dist_cM']))
self.assertEqual(len(self.noncharged_mono),
np.rint(number_non_node * (1 - 1 / self.diamond_params['dist_cM'])))
# charge
np.testing.assert_allclose(self.node_parts.q,
len(self.node_parts) * [self.diamond_params['val_nodes']])
np.testing.assert_allclose(self.noncharged_mono.q,
len(self.noncharged_mono) * [0])
np.testing.assert_allclose(self.charged_mono.q,
len(self.charged_mono) * [self.diamond_params['val_cM']])
# particle id
self.assertGreaterEqual(min(self.system.part[:].id),
self.diamond_params['start_id'])
def test_bonds(self):
"""
test that the right number of bonds is formed on each particle
"""
# 4 bonds on nodes
for part in self.node_parts:
self.assertEqual(len(part.bonds), 4)
# 1 or 0 bonds on chain monomers
n_bonds = [len(bonds) for bonds in
self.noncharged_mono.bonds + self.charged_mono.bonds]
self.assertLessEqual(np.max(n_bonds), 1)
# total number of bonds
number_non_node = 16 * self.diamond_params['MPC']
self.assertEqual(np.sum(n_bonds), number_non_node - 16)
@utx.skipIfMissingFeatures(["EXTERNAL_FORCES"])
def test_connected(self):
"""
test that all particles in the polymer are connected by pushing one particle
"""
self.system.part[self.diamond_params['start_id']].ext_force = 3 * [2]
self.system.integrator.run(200)
vels = np.linalg.norm(self.system.part[:].v, axis=1)
self.assertGreater(np.min(vels), 1e-6)
def test_geometry(self):
"""
check if the distance between all monomers is correct,
only nearest neighbouring nodes are connected
and that the nodes have the right position
"""
# Energy calculation checks distance indirectly through
# position of minimum of HarmonicBond
# With formula for self.bond_length this also ensures
# that only nearest neighbours can be reached
E = self.system.analysis.energy()['total']
self.assertAlmostEqual(E, 0., delta=1e-13)
node_pos_scaled = np.array(self.node_parts.pos) / self.system.box_l[0]
node_pos_shouldbe = 0.25 * np.array([[0, 0, 0], [1, 1, 1],
[2, 2, 0], [0, 2, 2],
[2, 0, 2], [3, 3, 1],
[1, 3, 3], [3, 1, 3]])
for pos1, pos2 in zip(node_pos_scaled, node_pos_shouldbe):
np.testing.assert_allclose(pos1, pos2)
if __name__ == "__main__":
ut.main()
|
KaiSzuttor/espresso
|
testsuite/python/polymer_diamond.py
|
Python
|
gpl-3.0
| 5,649
|
[
"ESPResSo"
] |
f39de00919a7756ff8d4a5860c067e89d013fd20c27197e84c6d0369071b26b9
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2018 Miha Purg <miha.purg@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
#
"""
This module contains the make_fep() function for generating Q FEP-files,
and implements a custom exception class QMakeFepError.
"""
from __future__ import absolute_import, division, unicode_literals
import six
from six.moves import range
from io import open
import sys
import os
import re
import time
import tempfile
import logging
from collections import OrderedDict as ODict
from Qpyl.core.qlibrary import QLib, QLibError
from Qpyl.core.qparameter import QPrm, QPrmError
from Qpyl.core.qstructure import QStruct, QStructError
from Qpyl.core.qtopology import QTopology, QTopologyError
from Qpyl.common import raise_or_log, __version__
logger = logging.getLogger(__name__)
SUPPORTED_FF = ["amber", "oplsaa"]
class QMakeFepError(Exception):
pass
class _FepPrmMorse(object):
"""Class for Morse parameters."""
def __init__(self, harmonic_prm):
self.harmonic_prm = harmonic_prm
def make_fep(qmap_file, pdb_file, forcefield,
parm_files, lib_files, ignore_errors=False):
"""Generate a template FEP file for EVB simulations in Q.
Parses a QMAP file (see below), state 1 structure file (PDB) and
all states libraries and parameters, and determines the changes
in connectivity/charges/parameters that occur between the states.
QMAP is a text file that defines mappings of library ids (for each state)
to state 1 structure/topology ids, best explained on an example:
q 315.O OHH.O OHH.O
q 315.H1 OHH.H1 HIP.HE2
q 315.H2 OHH.H2 OHH.H2
q 155.NE2 HID.NE2 HIP.NE2
...
n 155.CA HID.CA HIP.CA
The first column defines the atom as being a 'Q' atom or a 'neighbouring'
atom. The latter will not be included in the 'Q-region' but will be
included in the 'change_bonds/change_angles...' sections in case there is a
change in bonding/parameters outside the Q region. Additionally, you can
define a 'q' atom with 'q_qcp', which will create an additional section for
isotopically clean masses used in QCP calculations.
The second column is the PDB ID, comprised of residue index and atom name,
separated by a dot.
The third column is the library ID of this atom in state 1, comprised of
residue name and atom name (should be the same as in the structure).
The fourth column is the library ID of this atom in state 2.
Additional columns can be added for other states.
The returned template string contains several missing parts, denoted with
<FIX>, which have to be manually replaced with appropriate values. These
include the softpair C parameters, Morse parameters, Hij parameters.
Args:
qmap_file (string): QMAP file path
pdb_file (string): state 1 PDB file path (the one built with qprep)
forcefield (string): forcefield type (see SUPPORTED_FF)
prms_files (list): Q parameter-file paths
libs_files (list): Q library-file paths
ignore_errors (boolean, optional): don't fail on certain non critical\
errors
Returns:
fepstr (string): fepfile template
Raises:
QMakeFepError
"""
if forcefield not in SUPPORTED_FF:
raise QMakeFepError("Force field '{}' not supported. Use {}"
"".format(forcefield, " or ".join(SUPPORTED_FF)))
fep_types = {"atoms": [], "bonds": [], "angles": [],
"torsions": [], "impropers": []}
fep_changes = {"atoms": [], "charges": [],
"bonds": ODict(), "angles": ODict(),
"torsions": ODict(), "impropers": ODict()}
fep_qcp_atoms = []
fep_morse_prms = {}
fep_reacting_atoms = set()
num_evb_states = None
# parse the MAP file
# pdb_ids_map = [ ('q', [pdbid1_state1,]),
# ('q', [pdbid2_state1,]),
# ...
# ('n', [pdbid11_state1,]),
# ...
# ]
# lib_ids_map = [ [lib_id1_state1, lib_id2_state1...],
# [lib_id1_state2, lib_id2_state2...],
# ...
# ]
#
lib_ids_map = []
pdb_ids_map = []
with open(qmap_file, 'r') as qatom_map:
for i, line in enumerate(qatom_map.readlines()):
line = re.split("#|\*|\!", line, 1)[0].strip() # remove comments
if line == "":
continue
c = line.split()
atom_type = c[0].lower()
pdb_id = c[1]
lib_ids = c[2:]
if atom_type not in ["q", "n", "q_qcp"]:
raise QMakeFepError("Lines in the QMAP file should begin "
"with either 'q' (qatom) or 'n' "
"(neighboring atom) or 'q_qcp' "
"(QPI q atom)")
try:
resid, name = pdb_id.split(".")
if not name or not int(resid): raise ValueError
except ValueError:
raise QMakeFepError("Invalid PDB ID '{}'. Should be "
"RESID.ATOMNAME".format(pdb_id))
tmp = (atom_type, [pdb_id,])
if tmp in pdb_ids_map:
raise QMakeFepError("Duplicate PDB ID: '{}'".format(pdb_id))
pdb_ids_map.append(tmp)
if num_evb_states == None:
num_evb_states = len(lib_ids)
elif len(lib_ids) != num_evb_states:
raise QMakeFepError("Number of states in line '{}' not equal "
"to number of PDB files".format(line))
for state, lib_id in enumerate(lib_ids):
try:
resname, name = lib_id.split(".")
if not resname or not name: raise ValueError
except ValueError:
raise QMakeFepError("Invalid library ID '{}'. Should be "
"RESNAME.ATOMNAME".format(lib_id))
try:
if lib_id in lib_ids_map[state]:
raise QMakeFepError("The library IDs in one EVB state "
"should be unique (double '{}'), "
"otherwise proper bonding can't "
"be determined.".format(lib_id))
except IndexError:
lib_ids_map.append([])
lib_ids_map[state].append(lib_id)
# load libraries
qlib = QLib(forcefield, ignore_errors=ignore_errors)
for lib in lib_files:
try:
qlib.read_lib(lib)
except QLibError as e:
raise QMakeFepError("Problem parsing lib ({}): {}"
"".format(lib, e))
# make dummy structures for other states
structures = [None for _ in range(num_evb_states)]
structures[0] = pdb_file
libid_pdbid_map = [{} for _ in range(num_evb_states)]
for state in range(1, num_evb_states):
state_structure = []
atom_index = 1
processed_residues = []
for i, (q_or_n, pdb_ids_all_states) in enumerate(pdb_ids_map):
lib_id = lib_ids_map[state][i]
resname, aname = lib_id.split(".")
# add all atoms of current residue to the dummy structure
# at the same time, storing the mapping lib_id:pdb_id
# in libid_pdbid_map for later
if resname not in processed_residues:
try:
residue_lib = qlib.residue_dict[resname]
except KeyError:
raise QMakeFepError("Residue '{}' not found in library."
"".format(resname))
processed_residues.append(resname)
res_index = len(processed_residues)
for atom in residue_lib.atoms:
lib_id2 = "{}.{}".format(resname, atom.name)
pdb_id2 = "{}.{}".format(res_index, atom.name)
state_structure.append("{:<6}{:5} {:4} {:3} {:5} "
"{:8.3f}{:8.3f}{:8.3f}"
"".format("ATOM", atom_index,
atom.name, resname,
res_index, 0, 0, 0))
atom_index += 1
# map the newly created dummy atom's pdb_id to lib_id
libid_pdbid_map[state][lib_id2] = pdb_id2
# add pdb_id of current atom in current (dummy structure)
# state to pdb_ids_map (using its lib_id)
try:
pdb_id_this_state = libid_pdbid_map[state][lib_id]
except KeyError:
raise QMakeFepError("Library ID '{}' not valid.".format(lib_id))
pdb_ids_all_states.append(pdb_id_this_state)
_, structures[state] = tempfile.mkstemp()
open(structures[state], "w").write("\n".join(state_structure))
# DEBUG
# print "Dummy PDB for st.{}: {}".format(state + 1, structures[state])
# load parameters
qprm = QPrm(forcefield, ignore_errors=ignore_errors)
for parm in parm_files:
try:
qprm.read_prm(parm)
except QPrmError as e:
raise QMakeFepError("Problem with parm ({}): {}"
"".format(parm, e))
# load structures and make topologies
topologies = []
for state in range(num_evb_states):
try:
qstruct = QStruct(structures[state], "pdb",
ignore_errors=ignore_errors)
except QStructError as e:
raise QMakeFepError("Problem parsing PDB file ({}): {} "
"".format(structures[state], e))
try:
topologies.append(QTopology(qlib, qprm, qstruct))
except QTopologyError as e:
raise QMakeFepError("Problem building the topology: {}"
"".format(e))
# Make _TopoAtom (atoms in QTopology) maps out of qmap's lists
# and extract types, type changes and charge changes
#
# atom_map = [ [_TopoAtom1_state1, _TopoAtom1_state2, ... ],
# [_TopoAtom2_state1, _TopoAtom2_state2, ... ],
# [_TopoAtom3_state1, _TopoAtom3_state2, ... ],
# ...
# ]
atom_map = []
for i, (q_or_n, pdb_id_all_states) in enumerate(pdb_ids_map):
atom_all_states = []
for state, pdb_id in enumerate(pdb_id_all_states):
residue, aname = pdb_id.split(".")
try:
residue = topologies[state].residues[int(residue)-1]
atom = [a for a in residue.atoms if a.name == aname][0]
except (KeyError, IndexError) as e:
raise QMakeFepError("Atom '{}' doesn't exist in PDB '{}'"
"".format(pdb_id, structures[state]))
atom_all_states.append(atom)
atom_map.append(atom_all_states)
# check for stupidity - lib_id in QMAP state 1
# not matching the structure/topology
lib_id_qmap = lib_ids_map[0][i]
lib_id = "{}.{}".format(atom_all_states[0].residue.name,
atom_all_states[0].name)
if lib_id != lib_id_qmap:
pdb_id = pdb_ids_map[i][1][0]
raise QMakeFepError("QMAP state 1 library ID ({}) of atom '{}' "
"doesn't match topology library ID ({})."
"".format(lib_id_qmap, pdb_id, lib_id))
# For Q atoms (and not the neighbor atoms):
# get FEP atom types, type changes and charge changes
if q_or_n in ["q", "q_qcp"]:
for atom in atom_all_states:
if atom.prm not in fep_types["atoms"]:
fep_types["atoms"].append(atom.prm)
fep_changes["charges"].append([a.charge for a in atom_all_states])
fep_changes["atoms"].append(atom_all_states)
if q_or_n == "q_qcp":
fep_qcp_atoms.append(atom_all_states)
charge_sums = []
for state in range(num_evb_states):
charge_sum = sum([c[state] for c in fep_changes["charges"]])
if abs(round(charge_sum) - charge_sum) > 1e-6:
raise_or_log("Net charge in state {} not integer: {}"
"".format(state + 1, charge_sum),
QMakeFepError, logger, ignore_errors=ignore_errors)
charge_sums.append(charge_sum)
if any([abs(c-charge_sums[0]) > 1e-6 for c in charge_sums]):
logger.warning("Net charge changes between states: {}"
"".format(" -> ".join([str(c) for c in charge_sums])))
# get all Bonds, Angles, Torsions and Impropers which include
# at least one atom defined in qmap
batis = {"bonds": [], "angles": [], "torsions": [], "impropers": []}
batis["bonds"] = [set() for _ in range(num_evb_states)]
batis["angles"] = [set() for _ in range(num_evb_states)]
batis["torsions"] = [set() for _ in range(num_evb_states)]
batis["impropers"] = [set() for _ in range(num_evb_states)]
for atom_all_states in atom_map:
for state, atom in enumerate(atom_all_states):
_ = [batis["bonds"][state].add(b) for b in atom.bonds]
_ = [batis["angles"][state].add(a) for a in atom.angles]
_ = [batis["torsions"][state].add(t) for t in atom.torsions]
_ = [batis["impropers"][state].add(i) for i in atom.impropers]
# map the bonds,angles,torsions,impropers (bati) in different states
# to same key (ordered list of state1 PDB_IDs)
#
# bati_map =
# { "bonds": {state1_bond1_key: [bond1_state1, bond1_state2,...],
# state1_bond2_key: [bond2_state1, bond2_state2,...], ...},
# "angles": {state1_angle1_key: [angle1_state1, angle1_state2,...],...}
# ... }
#
# also, include only batis which have all atoms defined in qmap
# also, detect inter-residue batis and raies QMakeFepError
bati_map = {"bonds": {}, "angles": {}, "torsions": {}, "impropers": {}}
for state in range(num_evb_states):
atoms_in_state = [a_all_st[state] for a_all_st in atom_map]
for bati_type in bati_map:
for bati in batis[bati_type][state]:
# find the corresponding atoms in state1
try:
atoms_st1 = [atom_map[atoms_in_state.index(a)][0] for a in
bati.atoms]
except ValueError:
# one of the Atoms is not defined in QMAP
continue
pdbid_index = []
for atom in atoms_st1:
pdbid_index.append((atom.index,
"{}.{}".format(atom.residue.index,
atom.name)))
# order the pdbids to prevent double entries
if bati_type == "bonds":
pids = sorted(pdbid_index)
elif bati_type == "angles":
pids = min(pdbid_index, list(reversed(pdbid_index)))
elif bati_type == "torsions":
pids = min(pdbid_index, list(reversed(pdbid_index)))
elif bati_type == "impropers":
# topology order == library order == correct order
pids = pdbid_index
key = " ".join([p[1] for p in pids])
# check for bonds/angles/torsions/impropers that are
# shared between residues
residue_ids = set(atom.residue.index for atom in bati.atoms)
if len(residue_ids) > 1:
raise QMakeFepError("Inter-residue bond/angle/torsion '{}'"
" not supported. Combine the residues "
"into a single library entry if you "
"want to make changes over the "
"'head-tail' bond.".format(key))
# add bati to bati_map
try:
bati_map[bati_type][key][state] = bati
except KeyError:
bati_map[bati_type][key] = [None for _ in
range(num_evb_states)]
bati_map[bati_type][key][state] = bati
# DEBUG
# for k,v in bati_map.iteritems():
# print k
# for k2, v2 in v.iteritems():
# print k2, v2[0], v2[1]
def _bati_sort(key, bati_all_states):
# to sort bonds/angles.. based on the key
# also, breaking and forming bonds have priority
try:
return (-1 * bati_all_states.index(None), key)
except:
return (1, key)
# find changes between states (add to fep_changes dict)
for bati_type, batis in six.iteritems(bati_map):
for bati_key, bati_all_states in sorted(batis.items(),
key=lambda key_val: \
_bati_sort(key_val[0], key_val[1])):
# bond/angle/.. breaking or forming
if None in bati_all_states:
fep_changes[bati_type][bati_key] = bati_all_states
# add bond atoms to "reactive atoms" set
# and replace the bond parameter with a Morse type
if bati_type == "bonds":
for bati in bati_all_states:
if bati != None:
fep_reacting_atoms |= set(bati.atoms)
# the bond parameter is replaced with a Morse
# parameter (_FepPrmMorse)
prm_id = bati.prm.prm_id
try:
bati.prm = fep_morse_prms[prm_id]
except KeyError:
bati.prm = _FepPrmMorse(bati.prm)
fep_morse_prms[prm_id] = bati.prm
# the actual values of the parameters are not exactly the same
else:
tmp = [bati_all_states[0].prm.strval == bati.prm.strval
for bati in bati_all_states]
if not all(tmp):
fep_changes[bati_type][bati_key] = bati_all_states
# DEBUG
# for k,v in fep_changes.iteritems():
# print k
# try:
# for k2,(v1,v2) in v.iteritems():
# print k2,v1,v2
# except:
# for (v1,v2) in v:
# print v1,v2
# add parameters of changing batis to fep_types
for bati_type in bati_map:
for bati_all_states in fep_changes[bati_type].values():
prms = [bati.prm for bati in bati_all_states if bati != None]
for prm in prms:
if prm not in fep_types[bati_type]:
fep_types[bati_type].append(prm)
# DEBUG
# for k,v in fep_types.iteritems():
# print k
# for v2 in v:
# print v2
# add reactive atoms from states that have bond==None to fep_reacting_atoms
for atom_all_states in fep_changes["atoms"]:
for atom in atom_all_states:
if atom in fep_reacting_atoms:
fep_reacting_atoms |= set(atom_all_states)
########################
# Prepare the output
########################
fep_l = {"atoms": [],
"atom_types": [],
"qcp_mass": [],
"change_atoms": [],
"change_charges": [],
"soft_pairs": [],
"off_diagonals": [],
"bond_types": [],
"change_bonds": [],
"angle_types": [],
"change_angles": [],
"torsion_types": [],
"change_torsions": [],
"improper_types": [],
"change_impropers": []}
####################
# ATOMS
# CHANGE_ATOMS
# CHANGE_CHARGES
####################
format_atoms = "{:<15} {:<10} # {:<15} {:<15} {:>3}"
format_ch_atoms = "{:<10} " + " {:<12}"*num_evb_states + " # {:<}"
format_ch_crgs = "{:<10} " + " {:12}"*num_evb_states + " # {:<10}"\
+ " {:>12}"*(num_evb_states-1)
format_qcp = "{:<15} {:<10} # {:<10}"
fep_l["atoms"].append(format_atoms.format("#Q index", "PDB index",
"St.1 PDB_ID", "St.1 LIB_ID", ""))
tmp = ["#Q index"]
tmp.extend(["Type st.{}".format(n+1) for n in range(num_evb_states)])
tmp.append("St.1 PDB_ID")
fep_l["change_atoms"].append(format_ch_atoms.format(*tmp))
tmp = ["#Q index"]
tmp.extend(["Charge st.{}".format(n+1) for n in range(num_evb_states)])
tmp.append("St.1 PDB_ID")
tmp.extend(["dq({}->{})".format(n+1, n+2) for n in range(num_evb_states-1)])
fep_l["change_charges"].append(format_ch_crgs.format(*tmp))
if fep_qcp_atoms:
fep_l["qcp_mass"].append("[qcp_mass]")
fep_l["qcp_mass"].append(format_qcp.format("#Q index", "Mass",
"St.1 PDB_ID"))
for i, atom_all_states in enumerate(fep_changes["atoms"]):
q_index = i + 1
a = atom_all_states[0]
pdb_id = "{}.{}".format(a.residue.index, a.name)
lib_id = "{}.{}".format(a.residue.name, a.name)
# atoms
reacting_flag = " !" * bool([atom for atom in atom_all_states
if atom in fep_reacting_atoms])
fep_l["atoms"].append(format_atoms.format(q_index, "$"+pdb_id+"$",
pdb_id, lib_id,
reacting_flag))
# change_atoms
tmp = [q_index] + [a.prm.prm_id for a in atom_all_states] + [pdb_id]
fep_l["change_atoms"].append(format_ch_atoms.format(*tmp))
# charges
crgs = [float(a.charge) for a in atom_all_states]
tmp = [q_index] + crgs + [pdb_id] \
+ [crgs[n+1]-crgs[n] for n in range(num_evb_states-1)]
fep_l["change_charges"].append(format_ch_crgs.format(*tmp))
# qcp_atoms
if atom_all_states in fep_qcp_atoms:
fep_l["qcp_mass"].append(format_qcp.format(q_index,
"<FIX>", pdb_id))
###############
# ATOM_TYPES
###############
format_atypes = "{:<12} {:>10} {:>10} {:>10} {:>10} {:>10} {:>10} {:>10}"
if forcefield == "amber":
fep_l["atom_types"].append(format_atypes.format("#Atom_type", "LJ_Rm",
"LJ_eps", "SP_Ci",
"SP_ai", "LJ_Rm",
"LJ_eps_14", "mass"))
else:
fep_l["atom_types"].append(format_atypes.format("#Atom_type", "LJ_A",
"LJ_B", "SP_Ci",
"SP_ai", "LJ_A_14",
"LJ_B_14", "mass"))
fep_reacting_atoms_prms = [a.prm for a in fep_reacting_atoms]
for prm in fep_types["atoms"]:
sp_c = 1
sp_a = 2.5
if prm in fep_reacting_atoms_prms:
sp_c = "<FIX>"
if forcefield == "amber":
lj1, lj2 = prm.lj_R, prm.lj_eps
lj3, lj4 = lj1, round(lj2/1.2, 4)
else:
lj1, lj2 = prm.lj_A, prm.lj_B
lj3, lj4 = round(lj1/(2**0.5), 4), round(lj2/(2**0.5), 4)
fep_l["atom_types"].append(format_atypes.format(prm.prm_id, lj1, lj2,
sp_c, sp_a, lj3, lj4,
prm.mass))
###############
# BOND_TYPES
###############
format_hbonds = "{:<8} {:>10} {:>10} # {}"
format_mbonds = "{:<8} {:^10} {:^10} {:>10} # {}"
fep_l["bond_types"].append("## Harmonic format")
fep_l["bond_types"].append(format_hbonds.format("#Index", "Fc",
"r0", "PRM_ID"))
fep_l["bond_types"].append("## Morse format")
fep_l["bond_types"].append(format_mbonds.format("#Index", "D", "alpha",
"r0", "PRM_ID"))
for i, bond_type in enumerate(fep_types["bonds"]):
b_index = i + 1
if isinstance(bond_type, _FepPrmMorse):
prm_id = "-".join(bond_type.harmonic_prm.prm_id.split())
tmp = format_mbonds.format(b_index, "<FIX_D>", "<FIX_a>",
"<FIX_r0>", prm_id)
fep_l["bond_types"].append(tmp)
else:
prm_id = "-".join(bond_type.prm_id.split())
tmp = format_hbonds.format(b_index, bond_type.fc, bond_type.r0,
prm_id)
fep_l["bond_types"].append(tmp)
###############
# CHANGE_BONDS
###############
format_bondch = "{:<10} {:<10} " + "{:^5} "*num_evb_states + " # {}"
tmp = ["#Atom1", "Atom2"]
tmp.extend(["St.{}".format(n+1) for n in range(num_evb_states)])
tmp.append("St.1 PDB_IDs")
fep_l["change_bonds"].append(format_bondch.format(*tmp))
for bond_key, bond_all_states in six.iteritems(fep_changes["bonds"]):
# bond_key == "PDB_ID1 PDB_ID2"
prm_indexes = []
for b in bond_all_states:
if b == None:
prm_indexes.append(0)
else:
btype_index = fep_types["bonds"].index(b.prm) + 1
prm_indexes.append(btype_index)
placeholders = ["${}$".format(a) for a in bond_key.split()]
pdb_id = "-".join(bond_key.split())
tmp = placeholders + prm_indexes + [pdb_id]
fep_l["change_bonds"].append(format_bondch.format(*tmp))
###############
# ANGLE_TYPES
###############
format_angles = "{:<8} {:>10} {:>10} # {}"
fep_l["angle_types"].append(format_angles.format("#Index", "Fc",
"theta0", "PRM_ID"))
for i, angle_type in enumerate(fep_types["angles"]):
an_index = i + 1
prm_id = "-".join(angle_type.prm_id.split())
tmp = format_angles.format(an_index, angle_type.fc,
angle_type.theta0, prm_id)
fep_l["angle_types"].append(tmp)
#################
# CHANGE_ANGLES
#################
format_angch = "{:<10} {:<10} {:<10} " + "{:^5} "*num_evb_states + " # {}"
tmp = ["#Atom1", "Atom2", "Atom3"]
tmp.extend(["St.{}".format(n+1) for n in range(num_evb_states)])
tmp.append("St.1 PDB_IDs")
fep_l["change_angles"].append(format_angch.format(*tmp))
for angle_key, angle_all_states in six.iteritems(fep_changes["angles"]):
# angle_key == "PDB_ID1 PDB_ID2 PDB_ID3"
prm_indexes = []
for ang in angle_all_states:
if ang == None:
prm_indexes.append(0)
else:
atype_index = fep_types["angles"].index(ang.prm) + 1
prm_indexes.append(atype_index)
placeholders = ["${}$".format(a) for a in angle_key.split()]
pdb_id = "-".join(angle_key.split())
tmp = placeholders + prm_indexes + [pdb_id]
fep_l["change_angles"].append(format_angch.format(*tmp))
#################
# TORSION_TYPES
#################
format_torsions = "{:<8} {:>10} {:>10} {:>10} # {}"
fep_l["torsion_types"].append(format_torsions.format("#Index", "Fc",
"mult", "psi0",
"PRM_ID"))
tor_index = 1
tor_indexes = []
for i, torsion_type in enumerate(fep_types["torsions"]):
prm_id = "-".join(torsion_type.prm_id.split())
prm_indexes = []
for fc, per, psi0, npath in torsion_type.get_prms():
fc = fc/npath
tmp = format_torsions.format(tor_index, fc, per, psi0, prm_id)
fep_l["torsion_types"].append(tmp)
prm_indexes.append(tor_index)
tor_index += 1
tor_indexes.append(prm_indexes)
###################
# CHANGE_TORSIONS
###################
format_torch = "{:<10} {:<10} {:<10} {:<10} " \
+ "{:^5} "*num_evb_states + " # {}"
tmp = ["#Atom1", "Atom2", "Atom3", "Atom4"]
tmp.extend(["St.{}".format(n+1) for n in range(num_evb_states)])
tmp.append("St.1 PDB_IDs")
fep_l["change_torsions"].append(format_torch.format(*tmp))
for torsion_key, torsion_all_states in six.iteritems(fep_changes["torsions"]):
# torsion_key == "PDB_ID1 PDB_ID2 PDB_ID3 PDB_ID4"
for state, tor in enumerate(torsion_all_states):
if tor == None:
continue
for i in range(len(tor.prm.fcs)):
tprm_index = fep_types["torsions"].index(tor.prm)
ttype_index = tor_indexes[tprm_index][i]
prm_indexes = [0 for _ in range(len(torsion_all_states))]
prm_indexes[state] = ttype_index
placeholders = ["${}$".format(t) for t in torsion_key.split()]
pdb_id = "-".join(torsion_key.split())
tmp = placeholders + prm_indexes + [pdb_id]
fep_l["change_torsions"].append(format_torch.format(*tmp))
#################
# IMPROPER_TYPES
#################
format_impropers = "{:<8} {:>10} {:>10} # {}"
fep_l["improper_types"].append(format_impropers.format("#Index",
"Fc", "phi0",
"PRM_ID"))
for i, improper_type in enumerate(fep_types["impropers"]):
imp_index = i + 1
prm_id = "-".join(improper_type.prm_id.split())
tmp = format_impropers.format(imp_index, improper_type.fc,
improper_type.phi0, prm_id)
fep_l["improper_types"].append(tmp)
###################
# CHANGE_IMPROPERS
###################
format_impch = "{:<10} {:<10} {:<10} {:<10} " \
+ "{:^5} "*num_evb_states + " # {}"
tmp = ["#Atom1", "Atom2", "Atom3", "Atom4"]
tmp.extend(["St.{}".format(n+1) for n in range(num_evb_states)])
tmp.append("St.1 PDB_IDs")
fep_l["change_impropers"].append(format_impch.format(*tmp))
for improper_key, improper_all_states in six.iteritems(fep_changes["impropers"]):
# improper_key == "PDB_ID1 PDB_ID2 PDB_ID3 PDB_ID4"
prm_indexes = []
for imp in improper_all_states:
if imp == None:
prm_indexes.append(0)
else:
itype_index = fep_types["impropers"].index(imp.prm) + 1
prm_indexes.append(itype_index)
placeholders = ["${}$".format(i) for i in improper_key.split()]
pdb_id = "-".join(improper_key.split())
tmp = placeholders + prm_indexes + [pdb_id]
fep_l["change_impropers"].append(format_impch.format(*tmp))
##############
# SOFT_PAIRS
##############
for bond_key, bond_all_states in six.iteritems(fep_changes["bonds"]):
if None in bond_all_states:
for state, bond in enumerate(bond_all_states):
if bond == None:
continue
atoms_in_state = [atom_all_states[state] for atom_all_states \
in fep_changes["atoms"]]
a1_qindex = atoms_in_state.index(bond.atoms[0]) + 1
a2_qindex = atoms_in_state.index(bond.atoms[1]) + 1
fep_l["soft_pairs"].append("{:10} {:10}".format(a1_qindex,
a2_qindex))
for k in fep_l.keys():
fep_l[k] = "\n".join(fep_l[k])
fepstr = """\
# Generated with Qtools, version {version}
# Date: {date}
# CWD: {cwd}
# CMDline: {cmd}
#
[FEP]
states {states}
[atoms]
{atoms}
[atom_types]
{atom_types}
[change_atoms]
{change_atoms}
[change_charges]
{change_charges}
[soft_pairs]
{soft_pairs}
[off_diagonals]
# State_i State_j Atom1 Atom2 A_ij mu_ij
#
## Example1, Hij=H12=0 (not known in advance)
## 1 2 13 14 0 0
## Example2, Hij=H12=C*exp(-mu * r_13_14) (C=20.0, mu=0.45)
## 1 2 13 14 20.0 0.45
#
<FIX>
[bond_types]
{bond_types}
[change_bonds]
{change_bonds}
[angle_types]
{angle_types}
[change_angles]
{change_angles}
[torsion_types]
{torsion_types}
[change_torsions]
{change_torsions}
[improper_types]
{improper_types}
[change_impropers]
{change_impropers}
{qcp_mass}
""".format(states=num_evb_states, date=time.ctime(), cmd=" ".join(sys.argv),
cwd=os.getcwd(), version=__version__, **fep_l)
return fepstr
|
mpurg/qtools
|
packages/Qpyl/qmakefep.py
|
Python
|
mit
| 34,438
|
[
"Amber"
] |
2206f2f9f4e3cb80ad1382ce3370c0fc89313dbd457a1f7cd22c22c3b04344db
|
import moogli
import moose
# class MooseSpineHead(moogli.core.Frustum):
# pass
# class MooseSpineShaft(moogli.core.Frustum):
# pass
# class MooseSoma(moogli.core.Sphere):
# pass
# class MooseDendrite(moogli.core.Frustum):
# def __init__(self, moose_element):
# self.parent = pass
# class MooseChemicalCompartment(moogli.core.Frustum):
# pass
# class MooseNetwork()
def read(path="", vertices=10, track_parent_radius=False):
network = moogli.Group(path)
neuron = moogli.Group("neuron")
soma = moogli.Group("soma")
axon = moogli.Group("axon")
dendrite = moogli.Group("dendrite")
basal = moogli.Group("basal")
apical = moogli.Group("apical")
spine = moogli.Group("spine")
head = moogli.Group("head")
shaft = moogli.Group("shaft")
network.attach_group(soma)
network.attach_group(axon)
network.attach_group(dendrite)
network.attach_group(spine)
dendrite.attach_group(basal)
dendrite.attach_group(apical)
spine.attach_group(head)
spine.attach_group(shaft)
compartments = moose.wildcardFind(path + "/##[ISA=CompartmentBase]")
for compartment in compartments:
neuron_id = compartment.parent.path
try:
neuron = network.groups[neuron_id]
soma = neuron.groups["soma"]
axon = neuron.groups["axon"]
dendrite = neuron.groups["dendrite"]
spine = neuron.groups["spine"]
head = spine.groups["head"]
shaft = spine.groups["shaft"]
basal = dendrite.groups["basal"]
apical = dendrite.groups["apical"]
except:
neuron = moogli.Group(neuron_id)
soma = moogli.Group("soma")
axon = moogli.Group("axon")
dendrite = moogli.Group("dendrite")
basal = moogli.Group("basal")
apical = moogli.Group("apical")
spine = moogli.Group("spine")
head = moogli.Group("head")
shaft = moogli.Group("shaft")
neuron.attach_group(soma)
neuron.attach_group(axon)
neuron.attach_group(dendrite)
dendrite.attach_group(basal)
dendrite.attach_group(apical)
neuron.attach_group(spine)
spine.attach_group(head)
spine.attach_group(shaft)
network.attach_group(neuron)
distal = moogli.geometry.Vec3f(compartment.x * 1.0e6,
compartment.y * 1.0e6,
compartment.z * 1.0e6)
proximal = moogli.geometry.Vec3f(compartment.x0 * 1.0e6,
compartment.y0 * 1.0e6,
compartment.z0 * 1.0e6)
distal_radius = compartment.diameter * 5.0e5
proximal_radius = compartment.diameter * 5.0e5
if not track_parent_radius:
proximal_radius = distal_radius
if proximal == distal:
shape = moogli.shapes.Sphere(compartment.path,
proximal,
distal_radius,
vertices,
moogli.colors.GREEN)
else:
shape = moogli.shapes.Frustum(compartment.path,
proximal,
distal,
proximal_radius,
distal_radius,
vertices,
moogli.colors.ORANGE,
moogli.colors.ORANGE)
if "axon" in compartment.name:
axon.attach_shape(shape)
network.groups["axon"].attach_shape(shape)
elif "soma" in compartment.name:
soma.attach_shape(shape)
network.groups["soma"].attach_shape(shape)
elif "head" in compartment.name:
head.attach_shape(shape)
network.groups["spine"].groups["head"].attach_shape(shape)
spine.attach_shape(shape)
network.groups["spine"].attach_shape(shape)
elif "shaft" in compartment.name:
shaft.attach_shape(shape)
network.groups["spine"].groups["shaft"].attach_shape(shape)
spine.attach_shape(shape)
network.groups["spine"].attach_shape(shape)
elif "basal" in compartment.name:
basal.attach_shape(shape)
network.groups["dendrite"].groups["basal"].attach_shape(shape)
dendrite.attach_shape(shape)
network.groups["dendrite"].attach_shape(shape)
elif "apical" in compartment.name:
apical.attach_shape(shape)
network.groups["dendrite"].groups["apical"].attach_shape(shape)
dendrite.attach_shape(shape)
network.groups["dendrite"].attach_shape(shape)
else:
dendrite.attach_shape(shape)
network.groups["dendrite"].attach_shape(shape)
neuron.attach_shape(shape)
network.attach_shape(shape)
return network
|
dilawar/moogli
|
moogli/extensions/moose/network.py
|
Python
|
gpl-2.0
| 5,203
|
[
"MOOSE",
"NEURON"
] |
371d325d3f513893f80c6e24a0c1c53f0a6c4e853fdf6d12a9edcdd91c2caf8d
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
import re
import abc
import six
from abc import ABCMeta, abstractmethod
"""
Error handlers for errors originating from the Submission systems.
"""
__author__ = "Michiel van Setten"
__copyright__ = " "
__version__ = "0.9"
__maintainer__ = "Michiel van Setten"
__email__ = "mjvansetten@gmail.com"
__date__ = "May 2014"
__all_errors__ = ['SubmitError', 'FullQueueError', 'DiskError', 'TimeCancelError', 'MemoryCancelError',
'NodeFailureError']
@six.add_metaclass(ABCMeta)
class CorrectorProtocolScheduler(object):
"""
Abstract class to define the protocol / interface for correction operators. The client code quadapters / submission
script generator method / ... should implement these methods.
"""
@property
@abc.abstractmethod
def name(self):
return str()
@abstractmethod
def exclude_nodes(self, nodes):
"""
Method to exclude certain nodes from being used in the calculation. It is called when a calculation seemed to
have been crashed due to a hardware failure at the nodes specified.
nodes: list of node numbers that were found to cause problems
returns True if the memory could be increased False otherwise
"""
@abstractmethod
def increase_mem(self):
"""
Method to increase then memory in the calculation. It is called when a calculation seemed to have been crashed
due to a insufficient memory.
returns True if the memory could be increased False otherwise
"""
@abstractmethod
def increase_time(self):
"""
Method to increase te time for the calculation. It is called when a calculation seemed to
have been crashed due to a time limit.
returns True if the memory could be increased False otherwise
"""
@abstractmethod
def increase_cpus(self):
"""
Method to increse the number of cpus being used in the calculation. It is called when a calculation seemed to
have been crashed due to time or memory limits being broken.
returns True if the memory could be increased False otherwise
"""
@six.add_metaclass(ABCMeta)
class CorrectorProtocolApplication(object):
"""
Abstract class to define the protocol / interface for correction operators. The client code quadapters / submission
script generator method / ... should implement these methods.
"""
@property
@abc.abstractmethod
def name(self):
return str()
@abstractmethod
def decrease_mem(self):
"""
Method to increase then memory in the calculation. It is called when a calculation seemed to have been crashed
due to a insufficient memory.
returns True if the memory could be increased False otherwise
"""
@abstractmethod
def speed_up(self):
"""
Method to speed_up the calculation. It is called when a calculation seemed to time limits being broken.
returns True if the memory could be increased False otherwise
"""
@six.add_metaclass(ABCMeta)
class AbstractError(object):
"""
Error base class
"""
def __init__(self, errmsg, meta_data):
self.errmsg = errmsg
self.meta_data = meta_data if meta_data is not None else {}
def __str__(self):
_message = '%s %s\n' \
' error message : %s \n' \
' meta data : %s' % (self.name, self.__doc__, self.errmsg, str(self.meta_data))
return _message
@property
def name(self):
return self.__class__.__name__
@property
def scheduler_adapter_solutions(self):
"""
to be implemented by concrete errors returning a list of tuples defining corrections. The First element of the
tuple should be a string of one of the methods in CorrectorProtocolScheduler, the second element should
contain the arguments.
"""
return []
@property
def application_adapter_solutions(self):
"""
to be implemented by concrete errors returning a list of tuples defining corrections. The First element of the
tuple should be a string of one of the methods in CorrectorProtocolApplication, the second element should
contain the arguments.
"""
return []
def last_resort_solution(self):
"""
what to do if every thing else fails...
"""
print('non of the defined solutions for %s returned success...' % self.name)
return
class SubmitError(AbstractError):
"""
Errors occurring at submission. The limits on the cluster may have changed.
"""
class FullQueueError(AbstractError):
"""
Errors occurring at submission. To many jobs in the queue / total cpus / .. .
"""
class DiskError(AbstractError):
"""
Errors involving problems writing to disk.
"""
class TimeCancelError(AbstractError):
"""
Error due to exceeding the time limit for the job.
.limit will return a list of limits that were broken, None if it could not be determined.
"""
@property
def limit(self):
return self.meta_data.get('broken_limit')
@property
def scheduler_adapter_solutions(self):
return [(CorrectorProtocolScheduler.increase_time,)]
@property
def application_adapter_solutions(self):
return [(CorrectorProtocolApplication.speed_up,)]
class MemoryCancelError(AbstractError):
"""
Error due to exceeding the memory limit for the job.
.limit will return a list of limits that were broken, None if it could not be determined.
"""
@property
def limit(self):
return self.meta_data.get('broken_limit')
@property
def scheduler_adapter_solutions(self):
return [(CorrectorProtocolScheduler.increase_mem,)]
@property
def application_adapter_solutions(self):
return [(CorrectorProtocolApplication.decrease_mem,)]
class MasterProcessMemoryCancelError(AbstractError):
"""
Error due to exceeding the memory limit for the job on the master node.
"""
class SlaveProcessMemoryCancelError(AbstractError):
"""
Error due to exceeding the memory limit for the job on a node different from the master.
"""
class NodeFailureError(AbstractError):
"""
Error due the hardware failure of a specific node.
.node will return a list of problematic nodes, None if it could not be determined.
"""
@property
def nodes(self):
return self.meta_data.get('nodes')
@property
def scheduler_adapter_solutions(self):
return [(CorrectorProtocolScheduler.exclude_nodes, [self.nodes])]
@six.add_metaclass(ABCMeta)
class AbstractErrorParser(object):
"""
Abstract class for parsing errors originating from the scheduler system and error that are not reported by the
program itself, i.e. segmentation faults.
A concrete implementation of this class for a specific scheduler needs a class attribute ERRORS for containing a
dictionary specifying error:
ERRORS = {ErrorClass: {
'file_specifier' : {
'string': "the string to be looked for",
'meta_filter': "string specifing the regular expression to obtain the meta data"
}
}
"""
def __init__(self, err_file, out_file=None, run_err_file=None, batch_err_file=None):
self.files = {'err': err_file, 'out': out_file, 'run_err': run_err_file, 'batch_err': batch_err_file}
self.errors = []
@property
@abc.abstractmethod
def error_definitions(self):
return dict()
@staticmethod
def extract_metadata(lines, meta_filter):
meta_dict = {}
for key in meta_filter.keys():
values = []
for line in lines:
match = re.match(meta_filter[key][0], line)
if match is not None:
values.append(re.match(meta_filter[key][0], line).group(meta_filter[key][1]))
values = sorted(set(values))
meta_dict.update({key: values})
return meta_dict
def parse_single(self, errmsg):
"""
Parse the provided files for the corresponding strings.
"""
found = False
message = None
metadata = None
for k in errmsg.keys():
if self.files[k] is not None:
#print('parsing ', self.files[k], ' for ', errmsg[k]['string'])
try:
with open(self.files[k], mode='r') as f:
lines = f.read().split('\n')
for line in lines:
if errmsg[k]['string'] in line:
message = line
found = True
if found:
metadata = self.extract_metadata(lines, errmsg[k]['meta_filter'])
except (IOError, OSError):
print(self.files[k], 'not found')
pass
except TypeError:
print('type error', self.files[k], ' has type ', self.files[k].cls(), ' should be string.')
pass
return found, message, metadata
def parse(self):
"""
Parse for the occurens of all errors defined in ERRORS
"""
errors_tested = 0
for error in self.error_definitions:
errors_tested += 1
result = self.parse_single(self.error_definitions[error])
if result[0]:
self.errors.append(error(result[1], result[2]))
if len(self.errors) > 0:
print('QUEUE_ERROR FOUND')
for error in self.errors:
print(error)
return errors_tested
class SlurmErrorParser(AbstractErrorParser):
"""
Implementation of the error definitions for the Slurm scheduler
"""
@property
def error_definitions(self):
return {
SubmitError: {
'batch_err': {
'string': "Batch job submission failed",
'meta_filter': {}
}
},
FullQueueError: {
'batch_err': {
'string': "Job violates accounting/QOS policy",
'meta_filter': {}
}
},
MemoryCancelError: {
'err': {
'string': "Exceeded job memory limit",
'meta_filter': {}
}
},
#slurmstepd: error: *** JOB 1803480 CANCELLED AT 2015-12-16T14:57:32 DUE TO TIME LIMIT on lmWn009 ***
#slurmstepd: error: *** JOB 1803712 CANCELLED AT 2015-12-17T15:21:41 DUE TO TIME LIMIT on lmWn001 ***
TimeCancelError: {
'err': {
'string': "DUE TO TIME LIMIT",
'meta_filter': {
'time_of_cancel': [r"(.*)JOB (\d+) CANCELLED AT (\S*) DUE TO TIME LIMIT(.*)", 3]
}
}
},
NodeFailureError: {
'run_err': {
'string': "can't open /dev/ipath, network down",
'meta_filter': {
'nodes': [r"node(\d+)\.(\d+)can't open (\S*), network down \(err=26\)", 1]
}
}
},
AbstractError: {
'out': {
'string': "a string to be found",
'meta_filter': {}
}
}
}
class PBSErrorParser(AbstractErrorParser):
"""
Implementation for the PBS scheduler
PBS: job killed: walltime 932 exceeded limit 900
PBS: job killed: walltime 46 exceeded limit 30
PBS: job killed: vmem 2085244kb exceeded limit 1945600kb
"""
@property
def error_definitions(self):
return {
TimeCancelError: {
'out': {
'string': "job killed: walltime",
'meta_filter': {
'broken_limit': [r"=>> PBS: job killed: walltime (\d+) exceeded limit (\d+)", 2]
}
}
},
AbstractError: {
'out': {
'string': "a string to be found",
'meta_filter': {}
}
},
MemoryCancelError: {
'out': {
'string': "job killed: vmem",
'meta_filter': {
'broken_limit': [r"(.*)job killed: vmem (\d+)kb exceeded limit (\d+)kb", 3]
}
}
}
}
ALL_PARSERS = {'slurm': SlurmErrorParser, 'pbspro': PBSErrorParser, 'torque': PBSErrorParser}
def get_parser(scheduler, err_file, out_file=None, run_err_file=None, batch_err_file=None):
"""
Factory function to provide the parser for the specified scheduler. If the scheduler is not implemented None is
returned. The files, string, correspond to file names of the out and err files:
err_file stderr of the scheduler
out_file stdout of the scheduler
run_err_file stderr of the application
batch_err_file stderr of the submission
Returns:
None if scheduler is not supported.
"""
cls = ALL_PARSERS.get(scheduler)
return cls if cls is None else cls(err_file, out_file, run_err_file, batch_err_file)
if __name__ == "__main__":
my_parser = get_parser('pbs', err_file='queue.err', out_file='queue.out', run_err_file='run.err',
batch_err_file='sbatch.err')
my_parser.parse()
print('parser.errors', my_parser.errors)
for my_error in my_parser.errors:
print(my_error)
|
setten/pymatgen
|
pymatgen/io/abinit/scheduler_error_parsers.py
|
Python
|
mit
| 14,025
|
[
"pymatgen"
] |
688d15bc0f1da69c60d9a857781034dba2bccfe94171a03bfbe2f851cfdaae04
|
""" X509Chain is a class for managing X509 chains with their Pkeys
"""
__RCSID__ = "$Id$"
import os
import stat
import tempfile
import hashlib
import random
import binascii
from GSI import crypto
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities import DErrno
from DIRAC.Core.Security.X509Certificate import X509Certificate
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
random.seed()
class X509Chain(object):
__validExtensionValueTypes = (basestring, )
def __init__(self, certList=False, keyObj=False):
self.__isProxy = False
self.__firstProxyStep = 0
self.__isLimitedProxy = True
self.__isRFC = False
self.__hash = False
if certList:
self.__loadedChain = True
self.__certList = certList
else:
self.__loadedChain = False
if keyObj:
self.__loadedPKey = True
self.__keyObj = keyObj
else:
self.__loadedPKey = False
if self.__loadedChain:
self.__checkProxyness()
@classmethod
def instanceFromFile(cls, chainLocation):
""" Instance a X509Chain from a file
"""
chain = cls()
result = chain.loadChainFromFile(chainLocation)
if not result['OK']:
return result
return S_OK(chain)
def loadChainFromFile(self, chainLocation):
"""
Load a x509 chain from a pem file
Return : S_OK / S_ERROR
"""
try:
with open(chainLocation) as fd:
pemData = fd.read()
except Exception as e:
return S_ERROR(DErrno.EOF, "%s: %s" % (chainLocation, repr(e).replace(',)', ')')))
return self.loadChainFromString(pemData)
def loadChainFromString(self, data, dataFormat=crypto.FILETYPE_PEM):
"""
Load a x509 cert from a string containing the pem data
Return : S_OK / S_ERROR
"""
self.__loadedChain = False
try:
self.__certList = crypto.load_certificate_chain(crypto.FILETYPE_PEM, data)
except Exception as e:
return S_ERROR(DErrno.ECERTREAD, "%s" % repr(e).replace(',)', ')'))
if not self.__certList:
return S_ERROR(DErrno.EX509)
self.__loadedChain = True
# Update internals
self.__checkProxyness()
return S_OK()
def setChain(self, certList):
"""
Set the chain
Return : S_OK / S_ERROR
"""
self.__certList = certList
self.__loadedChain = True
return S_OK()
def loadKeyFromFile(self, chainLocation, password=False):
"""
Load a PKey from a pem file
Return : S_OK / S_ERROR
"""
try:
with open(chainLocation) as fd:
pemData = fd.read()
except Exception as e:
return S_ERROR(DErrno.EOF, "%s: %s" % (chainLocation, repr(e).replace(',)', ')')))
return self.loadKeyFromString(pemData, password)
def loadKeyFromString(self, pemData, password=False):
"""
Load a xPKey from a string containing the pem data
Return : S_OK / S_ERROR
"""
self.__loadedPKey = False
try:
self.__keyObj = crypto.load_privatekey(crypto.FILETYPE_PEM, pemData, password)
except Exception as e:
return S_ERROR(DErrno.ECERTREAD, "%s (Probably bad pass phrase?)" % repr(e).replace(',)', ')'))
self.__loadedPKey = True
return S_OK()
def setPKey(self, pkeyObj):
"""
Set the chain
Return : S_OK / S_ERROR
"""
self.__keyObj = pkeyObj
self.__loadedPKey = True
return S_OK()
def loadProxyFromFile(self, chainLocation):
"""
Load a Proxy from a pem file
Return : S_OK / S_ERROR
"""
try:
with open(chainLocation) as fd:
pemData = fd.read()
except Exception as e:
return S_ERROR(DErrno.EOF, "%s: %s" % (chainLocation, repr(e).replace(',)', ')')))
return self.loadProxyFromString(pemData)
def loadProxyFromString(self, pemData):
"""
Load a Proxy from a pem buffer
Return : S_OK / S_ERROR
"""
retVal = self.loadChainFromString(pemData)
if not retVal['OK']:
return retVal
return self.loadKeyFromString(pemData)
def __getProxyExtensionList(self, diracGroup=False, rfc=False, rfcLimited=False):
"""
Get the list of extensions for a proxy
"""
extList = []
extList.append(crypto.X509Extension('keyUsage',
'critical, digitalSignature, keyEncipherment, dataEncipherment'))
if diracGroup and isinstance(diracGroup, self.__validExtensionValueTypes):
extList.append(crypto.X509Extension('diracGroup', diracGroup))
if rfc or rfcLimited:
blob = [["1.3.6.1.5.5.7.21.1"]] if not rfcLimited else [["1.3.6.1.4.1.3536.1.1.1.9"]]
asn1Obj = crypto.ASN1(blob)
asn1Obj[0][0].convert_to_object()
asn1dump = binascii.hexlify(asn1Obj.dump())
extval = "critical,DER:" + ":".join(asn1dump[i:i + 2] for i in range(0, len(asn1dump), 2))
ext = crypto.X509Extension("proxyCertInfo", extval)
extList.append(ext)
return extList
def getCertInChain(self, certPos=0):
"""
Get a certificate in the chain
"""
if not self.__loadedChain:
return S_ERROR(DErrno.ENOCHAIN)
return S_OK(X509Certificate(self.__certList[certPos]))
def getIssuerCert(self):
"""
Get a issuer cert in the chain
"""
if not self.__loadedChain:
return S_ERROR(DErrno.ENOCHAIN)
if self.__isProxy:
return S_OK(X509Certificate(self.__certList[self.__firstProxyStep + 1]))
return S_OK(X509Certificate(self.__certList[-1]))
def getPKeyObj(self):
"""
Get the pkey obj
"""
if not self.__loadedPKey:
return S_ERROR(DErrno.ENOCHAIN)
return S_OK(self.__keyObj)
def getCertList(self):
"""
Get the cert list
"""
if not self.__loadedChain:
return S_ERROR(DErrno.ENOCHAIN)
return S_OK(self.__certList)
def getNumCertsInChain(self):
"""
Numbers of certificates in chain
"""
if not self.__loadedChain:
return S_ERROR(DErrno.ENOCHAIN)
return S_OK(len(self.__certList))
def generateProxyToString(self, lifeTime,
diracGroup=False, strength=1024, limited=False, rfc=False, proxyKey=False):
"""
Generate a proxy and get it as a string
Args:
lifeTime (int): expected lifetime in seconds of proxy
diracGroup (str): diracGroup to add to the certificate
strength (int): length in bits of the pair
limited (bool): Create a limited proxy
"""
if not self.__loadedChain:
return S_ERROR(DErrno.ENOCHAIN)
if not self.__loadedPKey:
return S_ERROR(DErrno.ENOPKEY)
if self.__isProxy:
rfc = self.isRFC().get('Value', False)
issuerCert = self.__certList[0]
if not proxyKey:
proxyKey = crypto.PKey()
proxyKey.generate_key(crypto.TYPE_RSA, strength)
proxyCert = crypto.X509()
if rfc:
proxyCert.set_serial_number(str(int(random.random() * 10 ** 10)))
cloneSubject = issuerCert.get_subject().clone()
cloneSubject.insert_entry("CN", str(int(random.random() * 10 ** 10)))
proxyCert.set_subject(cloneSubject)
proxyCert.add_extensions(self.__getProxyExtensionList(diracGroup, rfc and not limited, rfc and limited))
else:
proxyCert.set_serial_number(issuerCert.get_serial_number())
cloneSubject = issuerCert.get_subject().clone()
if limited:
cloneSubject.insert_entry("CN", "limited proxy")
else:
cloneSubject.insert_entry("CN", "proxy")
proxyCert.set_subject(cloneSubject)
proxyCert.add_extensions(self.__getProxyExtensionList(diracGroup))
proxyCert.set_issuer(issuerCert.get_subject())
proxyCert.set_version(issuerCert.get_version())
proxyCert.set_pubkey(proxyKey)
proxyCert.gmtime_adj_notBefore(-900)
proxyCert.gmtime_adj_notAfter(int(lifeTime))
proxyCert.sign(self.__keyObj, 'sha256')
proxyString = "%s%s" % (crypto.dump_certificate(crypto.FILETYPE_PEM, proxyCert),
crypto.dump_privatekey(crypto.FILETYPE_PEM, proxyKey))
for i in range(len(self.__certList)):
proxyString += crypto.dump_certificate(crypto.FILETYPE_PEM, self.__certList[i])
return S_OK(proxyString)
def generateProxyToFile(self, filePath, lifeTime,
diracGroup=False, strength=1024, limited=False, rfc=False):
"""
Generate a proxy and put it into a file
Args:
filePath: file to write
lifeTime: expected lifetime in seconds of proxy
diracGroup: diracGroup to add to the certificate
strength: length in bits of the pair
limited: Create a limited proxy
"""
retVal = self.generateProxyToString(lifeTime, diracGroup, strength, limited, rfc)
if not retVal['OK']:
return retVal
try:
with open(filePath, 'w') as fd:
fd.write(retVal['Value'])
except Exception as e:
return S_ERROR(DErrno.EWF, "%s :%s" % (filePath, repr(e).replace(',)', ')')))
try:
os.chmod(filePath, stat.S_IRUSR | stat.S_IWUSR)
except Exception as e:
return S_ERROR(DErrno.ESPF, "%s :%s" % (filePath, repr(e).replace(',)', ')')))
return S_OK()
def isProxy(self):
"""
Check wether this chain is a proxy
"""
if not self.__loadedChain:
return S_ERROR(DErrno.ENOCHAIN)
return S_OK(self.__isProxy)
def isLimitedProxy(self):
"""
Check wether this chain is a proxy
"""
if not self.__loadedChain:
return S_ERROR(DErrno.ENOCHAIN)
return S_OK(self.__isProxy and self.__isLimitedProxy)
def isValidProxy(self, ignoreDefault=False):
"""
Check wether this chain is a valid proxy
checks if its a proxy
checks if its expired
"""
if not self.__loadedChain:
return S_ERROR(DErrno.ENOCHAIN)
if not self.__isProxy:
return S_ERROR(DErrno.ENOCHAIN, "Chain is not a proxy")
elif self.hasExpired()['Value']:
return S_ERROR(DErrno.ENOCHAIN)
elif ignoreDefault:
groupRes = self.getDIRACGroup(ignoreDefault=ignoreDefault)
if not groupRes['OK']:
return groupRes
if not groupRes['Value']:
return S_ERROR(DErrno.ENOGROUP)
return S_OK(True)
def isVOMS(self):
"""
Check wether this chain is a proxy
"""
retVal = self.isProxy()
if not retVal['OK'] or not retVal['Value']:
return retVal
for i in range(len(self.__certList)):
cert = self.getCertInChain(i)['Value']
if cert.hasVOMSExtensions()['Value']:
return S_OK(True)
return S_OK(False)
def getVOMSData(self):
"""
Check wether this chain is a proxy
"""
retVal = self.isProxy()
if not retVal['OK'] or not retVal['Value']:
return retVal
for i in range(len(self.__certList)):
cert = self.getCertInChain(i)['Value']
res = cert.getVOMSData()
if res['OK']:
return res
return S_ERROR(DErrno.EVOMS)
def __checkProxyness(self):
self.__hash = False
self.__firstProxyStep = len(self.__certList) - 2 # -1 is user cert by default, -2 is first proxy step
self.__isProxy = True
self.__isRFC = None
self.__isLimitedProxy = False
prevDNMatch = 2
# If less than 2 steps in the chain is no proxy
if len(self.__certList) < 2:
self.__isProxy = False
return
# Check proxyness in steps
for step in range(len(self.__certList) - 1):
issuerMatch = self.__checkIssuer(step, step + 1)
if not issuerMatch:
self.__isProxy = False
return
# Do we need to check the proxy DN?
if prevDNMatch:
dnMatch = self.__checkProxyDN(step, step + 1)
# No DN match
if dnMatch == 0:
# If we are not in the first step we've found the entity cert
if step > 0:
self.__firstProxyStep = step - 1
# If we are in the first step this is not a proxy
else:
self.__isProxy = False
return
# Limited proxy DN match
elif dnMatch == 2:
self.__isLimitedProxy = True
if prevDNMatch != 2:
self.__isProxy = False
self.__isLimitedProxy = False
return
prevDNMatch = dnMatch
def __checkProxyDN(self, certStep, issuerStep):
"""
Check the proxy DN in a step in the chain
0 = no match
1 = proxy match
2 = limited proxy match
"""
issuerSubject = self.__certList[issuerStep].get_subject()
proxySubject = self.__certList[certStep].get_subject().clone()
psEntries = proxySubject.num_entries()
lastEntry = proxySubject.get_entry(psEntries - 1)
limited = False
if lastEntry[0] != 'CN':
return 0
if lastEntry[1] not in ('proxy', 'limited proxy'):
extList = self.__certList[certStep].get_extensions()
for ext in extList:
if ext.get_sn() == "proxyCertInfo":
contraint = [line.split(":")[1].strip() for line in ext.get_value().split("\n")
if line.split(":")[0] == "Path Length Constraint"]
if not contraint:
return 0
if self.__isRFC is None:
self.__isRFC = True
if contraint[0] == "1.3.6.1.4.1.3536.1.1.1.9":
limited = True
else:
if self.__isRFC is None:
self.__isRFC = False
if lastEntry[1] == "limited proxy":
limited = True
proxySubject.remove_entry(psEntries - 1)
if not issuerSubject.one_line() == proxySubject.one_line():
return 0
return 1 if not limited else 2
def __checkIssuer(self, certStep, issuerStep):
"""
Check the issuer is really the issuer
"""
issuerCert = self.__certList[issuerStep]
cert = self.__certList[certStep]
return cert.verify_pkey_is_issuer(issuerCert.get_pubkey())
def getDIRACGroup(self, ignoreDefault=False):
"""
Get the dirac group if present
"""
if not self.__loadedChain:
return S_ERROR(DErrno.ENOCHAIN)
if not self.__isProxy:
return S_ERROR(DErrno.EX509, "Chain does not contain a valid proxy")
if self.isPUSP()['Value']:
return self.getCertInChain(self.__firstProxyStep - 2)['Value'].getDIRACGroup(ignoreDefault=ignoreDefault)
# The code below will find the first match of the DIRAC group
for i in range(len(self.__certList) - 1, -1, -1):
retVal = self.getCertInChain(i)['Value'].getDIRACGroup(ignoreDefault=True)
if retVal['OK'] and 'Value' in retVal and retVal['Value']:
return retVal
# No DIRAC group found, try to get the default one
return self.getCertInChain(self.__firstProxyStep)['Value'].getDIRACGroup(ignoreDefault=ignoreDefault)
def hasExpired(self):
"""
Is any of the elements in the chain expired?
"""
if not self.__loadedChain:
return S_ERROR(DErrno.ENOCHAIN)
for iC in range(len(self.__certList) - 1, -1, -1):
if self.__certList[iC].has_expired():
return S_OK(True)
return S_OK(False)
def getNotAfterDate(self):
"""
Get the smallest not after date
"""
if not self.__loadedChain:
return S_ERROR(DErrno.ENOCHAIN)
notAfter = self.__certList[0].get_not_after()
for iC in range(len(self.__certList) - 1, -1, -1):
stepNotAfter = self.__certList[iC].get_not_after()
if self.__certList[iC].has_expired():
return S_OK(stepNotAfter)
if notAfter > stepNotAfter:
notAfter = stepNotAfter
return S_OK(notAfter)
def generateProxyRequest(self, bitStrength=1024, limited=False):
"""
Generate a proxy request
Return S_OK( X509Request ) / S_ERROR
"""
if not self.__loadedChain:
return S_ERROR(DErrno.ENOCHAIN)
if not bitStrength:
return S_ERROR(DErrno.EX509, "bitStrength has to be greater than 1024 (%s)" % bitStrength)
x509 = self.getCertInChain(0)['Value']
return x509.generateProxyRequest(bitStrength, limited)
def generateChainFromRequestString(self, pemData,
lifetime=86400, requireLimited=False, diracGroup=False, rfc=False):
"""
Generate a x509 chain from a request
return S_OK( string ) / S_ERROR
"""
if not self.__loadedChain:
return S_ERROR(DErrno.ENOCHAIN)
if not self.__loadedPKey:
return S_ERROR(DErrno.ENOPKEY)
try:
req = crypto.load_certificate_request(crypto.FILETYPE_PEM, pemData)
except Exception as e:
return S_ERROR(DErrno.ECERTREAD, "Can't load request data: %s" % repr(e).replace(',)', ')'))
limited = requireLimited and self.isLimitedProxy().get('Value', False)
return self.generateProxyToString(lifetime, diracGroup, 1024, limited, rfc, req.get_pubkey())
def getRemainingSecs(self):
"""
Get remaining time
"""
if not self.__loadedChain:
return S_ERROR(DErrno.ENOCHAIN)
remainingSecs = self.getCertInChain(0)['Value'].getRemainingSecs()['Value']
for i in range(1, len(self.__certList)):
stepRS = self.getCertInChain(i)['Value'].getRemainingSecs()['Value']
remainingSecs = min(remainingSecs, stepRS)
return S_OK(remainingSecs)
def dumpAllToString(self):
"""
Dump all to string
"""
if not self.__loadedChain:
return S_ERROR(DErrno.ENOCHAIN)
data = crypto.dump_certificate(crypto.FILETYPE_PEM, self.__certList[0])
if self.__loadedPKey:
data += crypto.dump_privatekey(crypto.FILETYPE_PEM, self.__keyObj)
for i in range(1, len(self.__certList)):
data += crypto.dump_certificate(crypto.FILETYPE_PEM, self.__certList[i])
return S_OK(data)
def dumpAllToFile(self, filename=False):
"""
Dump all to file. If no filename specified a temporal one will be created
"""
retVal = self.dumpAllToString()
if not retVal['OK']:
return retVal
pemData = retVal['Value']
try:
if not filename:
fd, filename = tempfile.mkstemp()
os.write(fd, pemData)
os.close(fd)
else:
with open(filename, "w") as fd:
fd.write(pemData)
except Exception as e:
return S_ERROR(DErrno.EWF, "%s :%s" % (filename, repr(e).replace(',)', ')')))
try:
os.chmod(filename, stat.S_IRUSR | stat.S_IWUSR)
except Exception as e:
return S_ERROR(DErrno.ESPF, "%s :%s" % (filename, repr(e).replace(',)', ')')))
return S_OK(filename)
def isRFC(self):
if not self.__loadedChain:
return S_ERROR(DErrno.ENOCHAIN)
return S_OK(self.__isRFC)
def dumpChainToString(self):
"""
Dump only cert chain to string
"""
if not self.__loadedChain:
return S_ERROR(DErrno.ENOCHAIN)
data = ''
for i in range(len(self.__certList)):
data += crypto.dump_certificate(crypto.FILETYPE_PEM, self.__certList[i])
return S_OK(data)
def dumpPKeyToString(self):
"""
Dump key to string
"""
if not self.__loadedPKey:
return S_ERROR(DErrno.ENOCHAIN)
return S_OK(crypto.dump_privatekey(crypto.FILETYPE_PEM, self.__keyObj))
def __str__(self):
repStr = "<X509Chain"
if self.__loadedChain:
repStr += " %s certs " % len(self.__certList)
for cert in self.__certList:
repStr += "[%s]" % cert.get_subject().one_line()
if self.__loadedPKey:
repStr += " with key"
repStr += ">"
return repStr
def __repr__(self):
return self.__str__()
def isPUSP(self):
if self.__isProxy:
# Check if we have a subproxy
trialSubidentity = self.__certList[self.__firstProxyStep].get_subject()
dn = trialSubidentity.one_line()
subproxyUser = isPUSPdn(dn)
if subproxyUser:
result = S_OK(True)
result['Identity'] = dn
result['SubproxyUser'] = subproxyUser
return result
return S_OK(False)
def getCredentials(self, ignoreDefault=False):
if not self.__loadedChain:
return S_ERROR(DErrno.ENOCHAIN)
credDict = {'subject': self.__certList[0].get_subject().one_line(),
'issuer': self.__certList[0].get_issuer().one_line(),
'secondsLeft': self.getRemainingSecs()['Value'],
'isProxy': self.__isProxy,
'isLimitedProxy': self.__isProxy and self.__isLimitedProxy,
'validDN': False,
'validGroup': False}
if self.__isProxy:
credDict['identity'] = self.__certList[self.__firstProxyStep + 1].get_subject().one_line()
# Check if we have the PUSP case
result = self.isPUSP()
if result['OK'] and result['Value']:
credDict['identity'] = result['Identity']
credDict['subproxyUser'] = result['SubproxyUser']
credDict['rfc'] = self.__isRFC
retVal = Registry.getUsernameForDN(credDict['identity'])
if not retVal['OK']:
return S_OK(credDict)
credDict['username'] = retVal['Value']
credDict['validDN'] = True
retVal = self.getDIRACGroup(ignoreDefault=ignoreDefault)
if retVal['OK']:
diracGroup = retVal['Value']
credDict['group'] = diracGroup
retVal = Registry.getGroupsForUser(credDict['username'])
if retVal['OK'] and diracGroup in retVal['Value']:
credDict['validGroup'] = True
credDict['groupProperties'] = Registry.getPropertiesForGroup(diracGroup)
else:
retVal = Registry.getHostnameForDN(credDict['subject'])
if retVal['OK']:
credDict['group'] = 'hosts'
credDict['hostname'] = retVal['Value']
credDict['validDN'] = True
credDict['validGroup'] = True
credDict['groupProperties'] = Registry.getHostOption(credDict['hostname'], 'Properties')
retVal = Registry.getUsernameForDN(credDict['subject'])
if retVal['OK']:
credDict['username'] = retVal['Value']
credDict['validDN'] = True
return S_OK(credDict)
def hash(self):
if not self.__loadedChain:
return S_ERROR(DErrno.ENOCHAIN)
if self.__hash:
return S_OK(self.__hash)
sha1 = hashlib.sha1()
for cert in self.__certList:
sha1.update(cert.get_subject().one_line())
sha1.update(str(self.getRemainingSecs()['Value'] / 3600))
sha1.update(self.getDIRACGroup()['Value'])
if self.isVOMS():
sha1.update("VOMS")
from DIRAC.Core.Security.VOMS import VOMS
result = VOMS().getVOMSAttributes(self)
if result['OK']:
sha1.update(str(result['Value']))
self.__hash = sha1.hexdigest()
return S_OK(self.__hash)
def isPUSPdn(userDN):
""" Evaluate if the DN is of the PUSP type or not
:param str userDN: user DN string
:return: the subproxy user name or None
"""
lastEntry = userDN.split('/')[-1].split('=')
if lastEntry[0] == "CN" and lastEntry[1].startswith("user:"):
return userDN.split('/')[-1].split(':')[1]
return None
g_X509ChainType = type(X509Chain())
|
arrabito/DIRAC
|
Core/Security/X509Chain.py
|
Python
|
gpl-3.0
| 22,583
|
[
"DIRAC"
] |
59b1fe2375541218d255bc237d02008aa349b10ca3a60078b49e8ba1e73b53f1
|
# 2017 DeepCrystal Technologies - Patrick Hop
#
# Data loading a splitting file
#
# MIT License - have fun!!
# ===========================================================
import os
import random
from collections import OrderedDict
import deepchem as dc
from deepchem.utils import ScaffoldGenerator
from deepchem.utils.save import log
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from sklearn import preprocessing
from sklearn.decomposition import TruncatedSVD
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
random.seed(2)
np.random.seed(2)
torch.manual_seed(2)
def generate_scaffold(smiles, include_chirality=False):
"""Compute the Bemis-Murcko scaffold for a SMILES string."""
mol = Chem.MolFromSmiles(smiles)
engine = ScaffoldGenerator(include_chirality=include_chirality)
scaffold = engine.get_scaffold(mol)
return scaffold
def split(dataset,
frac_train=.80,
frac_valid=.10,
frac_test=.10,
log_every_n=1000):
"""
Splits internal compounds into train/validation/test by scaffold.
"""
np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.)
scaffolds = {}
log("About to generate scaffolds", True)
data_len = len(dataset)
for ind, smiles in enumerate(dataset):
if ind % log_every_n == 0:
log("Generating scaffold %d/%d" % (ind, data_len), True)
scaffold = generate_scaffold(smiles)
if scaffold not in scaffolds:
scaffolds[scaffold] = [ind]
else:
scaffolds[scaffold].append(ind)
scaffolds = {key: sorted(value) for key, value in scaffolds.items()}
scaffold_sets = [
scaffold_set
for (scaffold, scaffold_set) in sorted(
scaffolds.items(), key=lambda x: (len(x[1]), x[1][0]), reverse=True)
]
train_cutoff = frac_train * len(dataset)
valid_cutoff = (frac_train + frac_valid) * len(dataset)
train_inds, valid_inds, test_inds = [], [], []
log("About to sort in scaffold sets", True)
for scaffold_set in scaffold_sets:
if len(train_inds) + len(scaffold_set) > train_cutoff:
if len(train_inds) + len(valid_inds) + len(scaffold_set) > valid_cutoff:
test_inds += scaffold_set
else:
valid_inds += scaffold_set
else:
train_inds += scaffold_set
return train_inds, valid_inds, test_inds
def load_dataset(filename, whiten=False):
f = open(filename, 'r')
features = []
labels = []
tracer = 0
for line in f:
if tracer == 0:
tracer += 1
continue
splits = line[:-1].split(',')
features.append(splits[-1])
labels.append(float(splits[-2]))
features = np.array(features)
labels = np.array(labels, dtype='float32').reshape(-1, 1)
train_ind, val_ind, test_ins = split(features)
train_features = np.take(features, train_ind)
train_labels = np.take(labels, train_ind)
val_features = np.take(features, val_ind)
val_labels = np.take(labels, val_ind)
return train_features, train_labels, val_features, val_labels
|
deepchem/deepchem
|
contrib/mpnn/donkey.py
|
Python
|
mit
| 3,029
|
[
"RDKit"
] |
59be2ed99af07d0df581ba38c05320eb2b1c47e4ccbc33cb6e7fbefb8ad68cd9
|
"""
Experiment on real text data.
"""
from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
import freqopttest.data as data
import freqopttest.tst as tst
import freqopttest.glo as glo
import freqopttest.util as util
import freqopttest.kernel as kernel
from . import exglobal
try:
import pickle as pickle
except:
import pickle
# need independent_jobs package
# https://github.com/karlnapf/independent-jobs
# The independent_jobs and freqopttest have to be in the global search path (.bashrc)
import independent_jobs as inj
from independent_jobs.jobs.IndependentJob import IndependentJob
from independent_jobs.results.SingleResult import SingleResult
from independent_jobs.aggregators.SingleResultAggregator import SingleResultAggregator
from independent_jobs.engines.BatchClusterParameters import BatchClusterParameters
from independent_jobs.engines.SerialComputationEngine import SerialComputationEngine
from independent_jobs.engines.SlurmComputationEngine import SlurmComputationEngine
from independent_jobs.tools.Log import logger
import math
import autograd.numpy as np
import os
import sys
def job_met_opt(sample_source, tr, te, r):
"""MeanEmbeddingTest with test locations optimzied."""
# MeanEmbeddingTest. optimize the test locations
with util.ContextTimer() as t:
met_opt_options = {'n_test_locs': J, 'max_iter': 200,
'locs_step_size': 500.0, 'gwidth_step_size': 0.2, 'seed': r+92856,
'tol_fun': 1e-4}
test_locs, gwidth, info = tst.MeanEmbeddingTest.optimize_locs_width(tr, alpha, **met_opt_options)
met_opt = tst.MeanEmbeddingTest(test_locs, gwidth, alpha)
met_opt_test = met_opt.perform_test(te)
result = {'test_method': met_opt, 'test_result': met_opt_test, 'time_secs': t.secs}
return result
def job_met_gwgrid(sample_source, tr, te, r):
"""MeanEmbeddingTest. Optimize only the Gaussian width with grid search
Fix the test locations."""
with util.ContextTimer() as t:
# optimize on the training set
T_randn = tst.MeanEmbeddingTest.init_locs_2randn(tr, J, seed=r+92856)
med = util.meddistance(tr.stack_xy(), 1000)
list_gwidth = np.hstack( ( (med**2) *(2.0**np.linspace(-5, 5, 40) ) ) )
list_gwidth.sort()
besti, powers = tst.MeanEmbeddingTest.grid_search_gwidth(tr, T_randn,
list_gwidth, alpha)
best_width2 = list_gwidth[besti]
met_grid = tst.MeanEmbeddingTest(T_randn, best_width2, alpha)
test_result = met_grid.perform_test(te)
result = {'test_method': met_grid, 'test_result': test_result, 'time_secs': t.secs}
return result
def job_scf_opt(sample_source, tr, te, r):
"""SmoothCFTest with frequencies optimized."""
with util.ContextTimer() as t:
op = {'n_test_freqs': J, 'max_iter': 500, 'freqs_step_size': 1.0,
'gwidth_step_size': 0.1, 'seed': r+92856, 'tol_fun': 1e-3}
test_freqs, gwidth, info = tst.SmoothCFTest.optimize_freqs_width(tr, alpha, **op)
scf_opt = tst.SmoothCFTest(test_freqs, gwidth, alpha)
scf_opt_test = scf_opt.perform_test(te)
result = {'test_method': scf_opt, 'test_result': scf_opt_test, 'time_secs': t.secs}
return result
def job_scf_gwgrid(sample_source, tr, te, r):
rand_state = np.random.get_state()
np.random.seed(r+92856)
with util.ContextTimer() as t:
d = tr.dim()
T_randn = np.random.randn(J, d)
np.random.set_state(rand_state)
# grid search to determine the initial gwidth
mean_sd = tr.mean_std()
scales = 2.0**np.linspace(-4, 4, 20)
list_gwidth = np.hstack( (mean_sd*scales*(d**0.5), 2**np.linspace(-8, 8, 20) ))
list_gwidth.sort()
besti, powers = tst.SmoothCFTest.grid_search_gwidth(tr, T_randn,
list_gwidth, alpha)
# initialize with the best width from the grid search
best_width = list_gwidth[besti]
scf_gwgrid = tst.SmoothCFTest(T_randn, best_width, alpha)
test_result = scf_gwgrid.perform_test(te)
result = {'test_method': scf_gwgrid, 'test_result': test_result, 'time_secs': t.secs}
return result
def job_quad_mmd(sample_source, tr, te, r):
"""Quadratic mmd with grid search to choose the best Gaussian width.
One-sample U-statistic. This should NOT be used anymore."""
# If n is too large, pairwise meddian computation can cause a memory error.
with util.ContextTimer() as t:
med = util.meddistance(tr.stack_xy(), 1000)
list_gwidth = np.hstack( ( (med**2) *(2.0**np.linspace(-4, 4, 40) ) ) )
list_gwidth.sort()
list_kernels = [kernel.KGauss(gw2) for gw2 in list_gwidth]
# grid search to choose the best Gaussian width
besti, powers = tst.QuadMMDTest.grid_search_kernel(tr, list_kernels, alpha)
# perform test
best_ker = list_kernels[besti]
mmd_test = tst.QuadMMDTest(best_ker, n_permute=1000, alpha=alpha,
use_1sample_U=True)
test_result = mmd_test.perform_test(te)
result = {'test_method': mmd_test, 'test_result': test_result, 'time_secs': t.secs}
return result
def job_quad_mmd_2U(sample_source, tr, te, r):
"""Quadratic mmd with grid search to choose the best Gaussian width.
Use two-sample U statistics to compute k(X,Y).
"""
# If n is too large, pairwise meddian computation can cause a memory error.
with util.ContextTimer() as t:
med = util.meddistance(tr.stack_xy(), 1000)
list_gwidth = np.hstack( ( (med**2) *(2.0**np.linspace(-4, 4, 40) ) ) )
list_gwidth.sort()
list_kernels = [kernel.KGauss(gw2) for gw2 in list_gwidth]
# grid search to choose the best Gaussian width
besti, powers = tst.QuadMMDTest.grid_search_kernel(tr, list_kernels, alpha)
# perform test
best_ker = list_kernels[besti]
mmd_test = tst.QuadMMDTest(best_ker, n_permute=1000, alpha=alpha,
use_1sample_U=False)
test_result = mmd_test.perform_test(te)
result = {'test_method': mmd_test, 'test_result': test_result, 'time_secs': t.secs}
return result
def job_lin_mmd(sample_source, tr, te, r):
"""Linear mmd with grid search to choose the best Gaussian width."""
# should be completely deterministic
# If n is too large, pairwise meddian computation can cause a memory error.
with util.ContextTimer() as t:
X, Y = tr.xy()
Xr = X[:min(X.shape[0], 1000), :]
Yr = Y[:min(Y.shape[0], 1000), :]
med = util.meddistance(np.vstack((Xr, Yr)) )
widths = [ (med*f) for f in 2.0**np.linspace(-1, 4, 40)]
list_kernels = [kernel.KGauss( w**2 ) for w in widths]
# grid search to choose the best Gaussian width
besti, powers = tst.LinearMMDTest.grid_search_kernel(tr, list_kernels, alpha)
# perform test
best_ker = list_kernels[besti]
lin_mmd_test = tst.LinearMMDTest(best_ker, alpha)
test_result = lin_mmd_test.perform_test(te)
result = {'test_method': lin_mmd_test, 'test_result': test_result, 'time_secs': t.secs}
return result
def job_hotelling(sample_source, tr, te, r):
"""Hotelling T-squared test"""
# Since text data are high-d, T-test will likely cause a LinAlgError because
# of the singular covariance matrix.
with util.ContextTimer() as t:
htest = tst.HotellingT2Test(alpha=alpha)
try:
test_result = htest.perform_test(te)
except np.linalg.linalg.LinAlgError:
test_result = {'alpha': alpha, 'pvalue': 1.0, 'test_stat': 1e-5,
'h0_rejected': False}
result = {'test_method': htest, 'test_result': test_result, 'time_secs': t.secs}
return result
# Define our custom Job, which inherits from base class IndependentJob
class Ex4Job(IndependentJob):
def __init__(self, aggregator, prob_label, rep, n, job_func):
walltime = 60*59*24
memory = int(tr_proportion*n*1e-1) + 50
IndependentJob.__init__(self, aggregator, walltime=walltime,
memory=memory)
self.prob_label = prob_label
self.rep = rep
self.n = n
self.job_func = job_func
# we need to define the abstract compute method. It has to return an instance
# of JobResult base class
def compute(self):
r = self.rep
sample_source, nmax = get_sample_source(self.prob_label)
d = sample_source.dim()
job_func = self.job_func
logger.info("computing. %s. r=%d "%(job_func.__name__, r ))
tst_data = sample_source.sample(self.n, seed=r)
tr, te = tst_data.split_tr_te(tr_proportion=tr_proportion, seed=r+20 )
prob_label = self.prob_label
job_result = job_func(sample_source, tr, te, r)
# create ScalarResult instance
result = SingleResult(job_result)
# submit the result to my own aggregator
self.aggregator.submit_result(result)
logger.info("done. ex4: %s, r=%d "%(job_func.__name__, r))
# save result
func_name = job_func.__name__
fname = '%s-%s-J%d_r%d_d%d_a%.3f_trp%.2f.p' \
%(prob_label, func_name, J, r, d, alpha, tr_proportion)
glo.ex_save_result(ex, job_result, prob_label, fname)
# This import is needed so that pickle knows about the class Ex1Job.
# pickle is used when collecting the results from the submitted jobs.
from freqopttest.ex.ex4_text import job_met_opt
from freqopttest.ex.ex4_text import job_met_gwgrid
from freqopttest.ex.ex4_text import job_scf_opt
from freqopttest.ex.ex4_text import job_scf_gwgrid
from freqopttest.ex.ex4_text import job_quad_mmd
from freqopttest.ex.ex4_text import job_quad_mmd_2U
from freqopttest.ex.ex4_text import job_lin_mmd
from freqopttest.ex.ex4_text import job_hotelling
from freqopttest.ex.ex4_text import Ex4Job
#--- experimental setting -----
ex = 4
# number of test locations / test frequencies J
J = 1
alpha = 0.01
tr_proportion = 0.5
# repetitions
reps = 500
#method_job_funcs = [ job_met_opt, job_scf_opt, job_lin_mmd, job_hotelling]
#method_job_funcs = [ job_met_opt, job_scf_opt, job_quad_mmd_2U,
# job_lin_mmd]
method_job_funcs = [ job_met_opt, job_met_gwgrid, job_scf_opt, job_scf_gwgrid,
job_quad_mmd_2U, job_lin_mmd]
# If is_rerun==False, do not rerun the experiment if a result file for the current
# setting of (ni, r) already exists.
is_rerun = False
#---------------------------
label2fname = {'bayes_neuro_d2000_rnoun':'bayes_neuro_np794_nq788_d2000_random_noun.p',
'bayes_learning_d2000_rnoun': 'bayes_learning_np821_nq276_d2000.p',
'bayes_bayes_d2000_rnoun': 'bayes_bayes_np430_nq432_d2000.p',
#'bayes_neuro_d800_rverb': 'bayes_neuro_np794_nq788_d800_random_verb.p',
#'bayes_neuro_d300_rnoun': 'bayes_neuro_np794_nq788_d300_random_noun.p',
#'deep_learning_d1000_rnoun': 'deep_learning_np427_nq339_d1000_random_noun.p',
'deep_learning_d2000_rnoun': 'deep_learning_np431_nq299_d2000_random_noun.p',
'bayes_deep_d2000_rnoun': 'bayes_deep_np846_nq433_d2000_random_noun.p',
#'deep_neuro_d2000_rnoun': 'deep_neuro_np105_nq512_d2000.p',
'neuro_learning_d2000_rnoun': 'neuro_learning_np832_nq293_d2000.p',
}
cache_loaded = {}
def load_nips_TSTData(fname):
if fname in cache_loaded:
return cache_loaded[fname]
fpath = glo.data_file(fname)
with open(fpath, 'r') as f:
loaded = pickle.load(f)
X = loaded['P']
Y = loaded['Q']
n_min = min(X.shape[0], Y.shape[0])
X = X[:n_min, :]
Y = Y[:n_min, :]
assert(X.shape[0] == Y.shape[0])
tst_data = data.TSTData(X, Y)
cache_loaded[fname] = (tst_data, n_min)
return tst_data, n_min
def get_sample_source(prob_label):
"""Return a (SampleSource, n) representing the problem"""
if prob_label not in label2fname:
raise ValueError('Unknown problem label. Need to be one of %s'%str(list(label2fname.keys())) )
fname = label2fname[prob_label]
tst_data, n = load_nips_TSTData(fname)
ss = data.SSResample(tst_data)
return ss, n
def main():
if len(sys.argv) != 2:
print('Usage: %s problem_label'%sys.argv[0])
sys.exit(1)
prob_label = sys.argv[1]
run_dataset(prob_label)
def run_dataset(prob_label):
"""Run the experiment"""
sample_source, n = get_sample_source(prob_label)
# /////// submit jobs //////////
# create folder name string
home = os.path.expanduser("~")
foldername = os.path.join(home, "freqopttest_slurm", 'e%d'%ex)
logger.info("Setting engine folder to %s" % foldername)
# create parameter instance that is needed for any batch computation engine
logger.info("Creating batch parameter instance")
batch_parameters = BatchClusterParameters(
foldername=foldername, job_name_base="e%d_"%ex, parameter_prefix="")
# Use the following line if Slurm queue is not used.
#engine = SerialComputationEngine()
engine = SlurmComputationEngine(batch_parameters)
n_methods = len(method_job_funcs)
# repetitions x #methods
aggregators = np.empty((reps, n_methods ), dtype=object)
d = sample_source.dim()
for r in range(reps):
for mi, f in enumerate(method_job_funcs):
# name used to save the result
func_name = f.__name__
fname = '%s-%s-J%d_r%d_d%d_a%.3f_trp%.2f.p' \
%(prob_label, func_name, J, r, d, alpha, tr_proportion)
if not is_rerun and glo.ex_file_exists(ex, prob_label, fname):
logger.info('%s exists. Load and return.'%fname)
test_result = glo.ex_load_result(ex, prob_label, fname)
sra = SingleResultAggregator()
if test_result is SingleResult:
sra.submit_result(test_result)
else:
sra.submit_result(SingleResult(test_result))
aggregators[r, mi] = sra
else:
# result not exists or rerun
job = Ex4Job(SingleResultAggregator(), prob_label, r, n, f)
agg = engine.submit_job(job)
aggregators[r, mi] = agg
# let the engine finish its business
logger.info("Wait for all call in engine")
engine.wait_for_all()
# ////// collect the results ///////////
logger.info("Collecting results")
test_results = np.empty((reps, n_methods), dtype=object)
for r in range(reps):
for mi, f in enumerate(method_job_funcs):
logger.info("Collecting result (%s, r=%d)" % (f.__name__, r ))
# let the aggregator finalize things
aggregators[r, mi].finalize()
# aggregators[i].get_final_result() returns a SingleResult instance,
# which we need to extract the actual result
test_result = aggregators[r, mi].get_final_result().result
if isinstance(test_result, SingleResult):
test_result = test_result.result
if isinstance(test_result, SingleResult):
test_result = test_result.result
if isinstance(test_result, SingleResult):
test_result = test_result.result
test_results[r, mi] = test_result
func_name = f.__name__
fname = '%s-%s-J%d_r%d_d%d_a%.3f_trp%.2f.p' \
%(prob_label, func_name, J, r, d, alpha, tr_proportion)
glo.ex_save_result(ex, test_result, prob_label, fname)
func_names = [f.__name__ for f in method_job_funcs]
func2labels = exglobal.get_func2label_map()
method_labels = [func2labels[f] for f in func_names if f in func2labels]
# save results
results = {'results': test_results, 'n': n, 'data_fname':label2fname[prob_label],
'alpha': alpha, 'J': J, 'sample_source': sample_source,
'tr_proportion': tr_proportion, 'method_job_funcs': method_job_funcs,
'prob_label': prob_label, 'method_labels': method_labels}
# class name
fname = 'ex%d-%s-me%d_J%d_rs%d_nma%d_d%d_a%.3f_trp%.2f.p' \
%(ex, prob_label, n_methods, J, reps, n, d, alpha, tr_proportion)
glo.ex_save_result(ex, results, fname)
logger.info('Saved aggregated results to %s'%fname)
if __name__ == '__main__':
main()
|
wittawatj/interpretable-test
|
freqopttest/ex/ex4_text.py
|
Python
|
mit
| 16,542
|
[
"Gaussian"
] |
4473f195e7b836a03df0c4e2d1485b13eaeda6285e9ae25f4c8a7c5e53b0816e
|
"""
Solve Helmholtz equation on a sphere
Using spherical coordinates
"""
import os
from shenfun import *
from shenfun.la import SolverGeneric1ND
import sympy as sp
by_parts = False
# Define spherical coordinates
r = 1
theta, phi = psi = sp.symbols('x,y', real=True, positive=True)
rv = (r*sp.sin(theta)*sp.cos(phi), r*sp.sin(theta)*sp.sin(phi), r*sp.cos(theta))
alpha = 2
# Manufactured solution
sph = sp.functions.special.spherical_harmonics.Ynm
ue = sph(6, 3, theta, phi)
#ue = sp.cos(8*(sp.sin(theta)*sp.cos(phi) + sp.sin(theta)*sp.sin(phi) + sp.cos(theta)))
#g = - ue.diff(theta, 2) - (1/sp.tan(theta))*ue.diff(theta, 1) - (1/sp.sin(theta)**2)*ue.diff(phi, 2) + alpha*ue
N, M = 60, 40
L0 = FunctionSpace(N, 'C', domain=(0, np.pi))
F1 = FunctionSpace(M, 'F', dtype='D')
T = TensorProductSpace(comm, (L0, F1), coordinates=(psi, rv, sp.Q.positive(sp.sin(theta))))
v = TestFunction(T)
u = TrialFunction(T)
# Compute the right hand side on the quadrature mesh
g = (-div(grad(u))+alpha*u).tosympy(basis=ue, psi=psi)
gj = Array(T, buffer=g)
# Take scalar product
g_hat = Function(T)
g_hat = inner(v, gj, output_array=g_hat)
# Assemble matrices.
if by_parts:
mats = inner(grad(v), grad(u))
mats += [inner(v, alpha*u)]
else:
mats = inner(v, -div(grad(u))+alpha*u)
# Solve
u_hat = Function(T)
Sol1 = SolverGeneric1ND(mats)
u_hat = Sol1(g_hat, u_hat)
# Transform back to real space.
uj = u_hat.backward()
uq = Array(T, buffer=ue)
print('Error =', np.linalg.norm(uj-uq))
if 'pytest' not in os.environ:
# Postprocess
# Refine for a nicer plot. Refine simply pads Functions with zeros, which
# gives more quadrature points. u_hat has NxM quadrature points, refine
# using any higher number.
u_hat2 = u_hat.refine([N*3, M*3])
ur = u_hat2.backward(kind='uniform')
from mayavi import mlab
xx, yy, zz = u_hat2.function_space().local_cartesian_mesh(uniform=True)
# Wrap periodic direction around
if T.bases[1].domain == (0, 2*np.pi):
xx = np.hstack([xx, xx[:, 0][:, None]])
yy = np.hstack([yy, yy[:, 0][:, None]])
zz = np.hstack([zz, zz[:, 0][:, None]])
ur = np.hstack([ur, ur[:, 0][:, None]])
mlab.figure(bgcolor=(1, 1, 1), size=(400, 400))
mlab.mesh(xx, yy, zz, scalars=ur.real, colormap='jet')
mlab.savefig('spherewhite.png')
#mlab.show()
|
spectralDNS/shenfun
|
demo/sphere_helmholtz.py
|
Python
|
bsd-2-clause
| 2,342
|
[
"Mayavi"
] |
e17437cd0b0e64663042f005480001571ca1e3ef8caeaa54ec429654a2042156
|
#
# AUTHORS:
# Hakan Ozadam
# Rachel Brown
#
# Moore Laboratory
# UMASS Medical School / HHMI
# RNA Therapeutics Institute
# Albert Sherman Center, ASC4-1009
# 368 Plantation Street
# Worcester, MA 01605
# USA
#
#################################################################
import os
import subprocess
from collections import OrderedDict
from glob import glob
import argparse
from ..genomic_io.fastq import FastqFile
from ..genomic_io.fasta import FastaFile, FastaEntry
import pysam
#################################################################
def get_commandline_arguments():
parser = argparse.ArgumentParser(description=
'''
A Modified Version of Lasso Algorithm
The user provides the coordinates, gene name, strand and chrsomosome of the intron.
The provided fastq file is mapped to all possible branchpoints from that intron.
''')
parser.add_argument("-f" ,
help = "Input fasta file" ,
required = True ,
metavar = "input_fasta_file" ,
type = str)
parser.add_argument("-U" ,
help = "Input fastq file single end" ,
required = False ,
metavar = "input_fastq_file" ,
type = str)
parser.add_argument("-1" ,
help = "Input fastq file, mate 1, paired end" ,
required = False ,
metavar = "input_fastq_file_mate_1" ,
dest = 'mate_1',
type = str)
parser.add_argument("-2" ,
help = "Input fastq file, mate 2, paired end" ,
required = False ,
metavar = "input_fastq_file_mate_2" ,
dest = 'mate_2',
type = str)
parser.add_argument("-o" ,
help = "Output directory" ,
required = True ,
metavar = "output_fastq_file" ,
type = str)
parser.add_argument("--start" ,
help = "Intron start coordinate. 0-based inclusive." ,
required = True ,
metavar = "intron_start" ,
type = int)
parser.add_argument("--end" ,
help = "Intron end coordinate. 0-based inclusive." ,
required = True ,
metavar = "intron_end" ,
type = int)
parser.add_argument("--chr" ,
help = "Chromosome" ,
required = True ,
metavar = "chromosome of the intron" ,
type = str)
parser.add_argument("--gene" ,
help = "Gene Name" ,
required = True ,
metavar = "gene" ,
type = str)
parser.add_argument("--strand" ,
help = "Strand" ,
required = True ,
choices = ['+', '-'],
metavar = "Strand" ,
type = str)
parser.add_argument("--radius" ,
help = "Radius: Number of nucleotides to the left and to the right of the bp" ,
required = True ,
metavar = "radius" ,
type = int)
parser.add_argument("--min-bp-coverage" ,
help = "Minimum number of nucleotids needed to cover either side of the candidate branchpoint" ,
required = False ,
default = 5,
metavar = "min_bp_coverage" ,
dest = 'min_bp_coverage',
type = int)
return parser.parse_args()
###########################################################################
def get_intron_sequence(fasta_file, chromosome, intron_start, intron_end, strand):
found_chr = False
with FastaFile(fasta_file) as opened_fasta_file:
for entry in opened_fasta_file:
if entry.header == chromosome:
found_chr = True
if intron_start >= len(entry.sequence) or\
intron_end >= len(entry.sequence):
print('The intron boundaries are out of range.'
'Intron start = ', intron_start, 'intron_end = ', intron_end,
'chromosome has', len(entery.sequence), 'nucleotides.')
raise(IndexError('Out of chr range'))
intron_fasta_entry = FastaEntry(header = 'intron_identifier',
sequence = entry.sequence[intron_start:(intron_end + 1)])
if strand == '-':
intron_fasta_entry.reverse_complement()
print(intron_fasta_entry)
if found_chr == False:
raise(Exception('Could not find the chromosome ', chromosome))
return intron_fasta_entry.sequence
##############################################################################
def make_reference_sequences(arguments, output_fasta_file):
intron_sequence = get_intron_sequence(arguments.f, arguments.chr, arguments.start, arguments.end, arguments.strand)
this_five_prime_end = arguments.start
this_three_prime_end = arguments.end
radius = arguments.radius
if arguments.strand == '-':
this_five_prime_end = arguments.end
this_three_prime_end = arguments.start
with open(output_fasta_file, 'w') as output_stream:
for i in range(arguments.min_bp_coverage, len(intron_sequence)):
this_distance_to_three_prime = len(intron_sequence) - i - 1
this_bp_location = arguments.start + i
piece_A_start = max( i - radius, 0 )
piece_B_start = 0
piece_A_end = i + 1
piece_B_end = min( radius, len(intron_sequence) )
if arguments.strand == "-":
this_bp_location = len(intron_sequence) - i - 1 + arguments.start
bp_label = "__".join( (
str(i), arguments.chr, arguments.strand, arguments.gene, str(this_five_prime_end),
str(this_bp_location), str(this_distance_to_three_prime), str(this_three_prime_end) )
)
this_sequence = intron_sequence[ piece_A_start : piece_A_end ] +\
intron_sequence[piece_B_start : piece_B_end]
this_fasta_entry = FastaEntry( header = bp_label, sequence = this_sequence)
print(this_fasta_entry, file = output_stream)
#####################################################################################
def make_bt2_reference(reference_base, fasta_file , executable):
command = " ".join((executable , fasta_file, reference_base))
p = subprocess.Popen([command], stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell = True)
std_out , std_err = p.communicate()
std_out = std_out.decode("utf-8").rstrip()
std_err = std_err.decode("utf-8").rstrip()
returncode = p.returncode
if returncode:
print(std_out, std_err)
return returncode
########################################################################################
def align_reads(input_files, reference_base, output_sam, executable, parameters):
if len(input_files) == 1:
command_arg = " -U " + input_files[0]
else:
command_arg = " -1 % -2 % "%input_files
command_arg += " -x " + reference_base + " -S " + output_sam + " --no-unal " + parameters
command = " ".join( (executable, command_arg) )
print(command)
p = subprocess.Popen([command], stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell = True)
std_out , std_err = p.communicate()
std_out = std_out.decode("utf-8").rstrip()
std_err = std_err.decode("utf-8").rstrip()
returncode = p.returncode
print(std_out, std_err)
return returncode
##########################################################################################
def find_bp_alignments(input_sam, output_sam, radius, coverage_threshold):
input_file = pysam.AlignmentFile(input_sam, 'r')
output_sam_stream = pysam.AlignmentFile(output_sam, "wh", header = input_file.header)
lengths = dict()
for ref in input_file.header['SQ']:
print(ref['SN'], ' --- ', ref['LN'])
lengths[ref['SN']] = int(ref['LN'])
for read in input_file:
if read.is_unmapped:
continue
output_sam_stream.write(read)
aligned_ref_label = input_file.getrname(read.reference_id)
ref_contents = aligned_ref_label.split("__")
bp_position = int(ref_contents[0])
if bp_position - read.pos >= coverage_threshold and\
read.aend - bp_position >= coverage_threshold:
output_sam_stream.write(read)
|
hakanozadam/bal
|
bal/lasso/lasso.py
|
Python
|
gpl-2.0
| 9,229
|
[
"pysam"
] |
d027d6c4901c1d82bc013bede48b3b6c7670a827786ca7bcf42f92c7d7d74750
|
import sys
import os
import numpy as np
import h5py
import multiprocessing
import cPickle
import ephem
import matplotlib.pyplot as plt
import types
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import train_test_split
from sklearn import metrics, linear_model, tree, ensemble
# NOTE: endless empehm warnings
# DeprecationWarning: PyOS_ascii_strtod and PyOS_ascii_atof are deprecated. Use PyOS_string_to_double instead.
# https://github.com/brandon-rhodes/pyephem/issues/18
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
# NOTE, this one does cross-validation of the last 1000 points.
# Instead of a random selections. Duh...
fMapper = {
"apcp_sfc" : "Total_precipitation",
"dlwrf_sfc" : "Downward_Long-Wave_Rad_Flux",
"dswrf_sfc" : "Downward_Short-Wave_Rad_Flux",
"pres_msl" : "Pressure",
"pwat_eatm" : "Precipitable_water",
"spfh_2m" : "Specific_humidity_height_above_ground",
"tcdc_eatm" : "Total_cloud_cover",
"tcolc_eatm" : "Total_Column-Integrated_Condensate",
"tmax_2m" : "Maximum_temperature",
"tmin_2m" : "Minimum_temperature",
"tmp_2m" : "Temperature_height_above_ground",
"tmp_sfc" : "Temperature_surface",
"ulwrf_sfc" : "Upward_Long-Wave_Rad_Flux_surface",
"ulwrf_tatm" : "Upward_Long-Wave_Rad_Flux",
"uswrf_sfc" : "Upward_Short-Wave_Rad_Flux"
}
fKeys = ("apcp_sfc", "dlwrf_sfc", "dswrf_sfc", "pres_msl", "pwat_eatm",
"spfh_2m", "tcdc_eatm", "tcolc_eatm", "tmax_2m", "tmin_2m",
"tmp_2m", "tmp_sfc", "ulwrf_sfc", "ulwrf_tatm", "uswrf_sfc")
NPTSt = 5113 # Train
NPTSp = 1796 # Predict
# Minimal script for gaussian process estimation
class Mesonet(object):
dtimet = np.recarray((NPTSt,), dtype={"names": ("time",),
"formats": ("datetime64[D]",)})
dtimep = np.recarray((NPTSp,), dtype={"names": ("time",),
"formats": ("datetime64[D]",)})
def __init__(self, stid, nlat, elon, elev):
self.stid = stid
self.nlat = nlat
self.elon = elon
self.elev = elev
# Measured data
self.datat = np.recarray((NPTSt,), dtype={"names": ("flux", "sun_alt", "moon_phase"),
"formats": (np.int64, np.float64, np.float64)})
self.datap = np.recarray((NPTSp,), dtype={"names": ("flux", "sun_alt", "moon_phase"),
"formats": (np.int64, np.float64, np.float64)})
def setAstro(self, time, data):
sun = ephem.Sun()
moon = ephem.Moon()
obs = ephem.Observer()
obs.lon = (self.elon * np.pi / 180) # need radians
obs.lat = (self.nlat * np.pi / 180) # need radians
obs.elevation = self.elev # meters
for i in range(len(time)):
obs.date = str(time[i])
sun.compute(obs)
moon.compute(obs)
# LOGIT ASTRO TERMS
# Sun Alt goes from 0 to 90
# Moon phase goes from 0 to 1
salt = float(180 / np.pi * sun.transit_alt)
salt /= 90.0
mphase = moon.moon_phase
data["sun_alt"][i] = np.log(salt / (1.0 - salt))
data["moon_phase"][i] = np.log(mphase / (1.0 - mphase))
def regressTest(feattr, featcv, fluxtr, fluxcv):
alphas = np.logspace(-5, 1, 6, base=10)
models = []
for alpha in alphas:
models.append(linear_model.Ridge(normalize=True, fit_intercept=True, alpha=alpha))
models.append(linear_model.Lasso(normalize=True, fit_intercept=True, alpha=alpha))
models.append(linear_model.LassoLars(normalize=True, fit_intercept=True, alpha=alpha))
models.append(ensemble.RandomForestRegressor())
models.append(ensemble.ExtraTreesRegressor())
models.append(ensemble.AdaBoostRegressor())
models.append(ensemble.GradientBoostingRegressor(loss="lad", n_estimators=100))
models.append(ensemble.GradientBoostingRegressor(loss="lad", n_estimators=1000))
models.append(tree.DecisionTreeRegressor())
models.append(tree.ExtraTreeRegressor())
maes = []
for m in range(len(models)):
model = models[m]
fit = model.fit(feattr, fluxtr)
preds = fit.predict(featcv)
mae = metrics.mean_absolute_error(fluxcv, preds)
print " MAE %d: %.1f" % (m, mae)
maes.append(mae)
idx = np.argsort(maes)
model = models[idx[0]]
print "BEST", maes[idx[0]], model
return model.fit(np.vstack((feattr, featcv)),
np.hstack((fluxtr, fluxcv))
) # fit all data
def sigclip(data, switch):
mean = np.mean(data, axis=1)
std = np.std(data, axis=1)
idx = np.where(std == 0.0)
std[idx] = 1e10
if switch:
nsig = np.abs(data - mean[:,np.newaxis,:]) / std[:,np.newaxis,:]
else:
nsig = np.abs(data - mean[:,np.newaxis]) / std[:,np.newaxis]
idx = np.where(nsig > 3.0)
ma = np.ma.array(data)
ma[idx] = np.ma.masked
return ma.mean(axis=1).data
if __name__ == "__main__":
suffix = sys.argv[1]
trainFile = "gp2_train_%s.pickle" % (suffix)
predFile = "gp2_pred_%s.pickle" % (suffix)
if suffix.find("logit") > -1:
buff = open(trainFile, "rb")
train, fmin, fmax = cPickle.load(buff)
buff.close()
buff = open(predFile, "rb")
pred, fmin, fmax = cPickle.load(buff)
buff.close()
else:
buff = open(trainFile, "rb")
train = cPickle.load(buff)
buff.close()
buff = open(predFile, "rb")
pred = cPickle.load(buff)
buff.close()
# QUESTION: do we logit the flux? Not sure, might screw up CV interpretation
#pool = multiprocessing.Pool(multiprocessing.cpu_count())
#pool.map(int, range(multiprocessing.cpu_count())) # Trick to "warm up" the Pool
# Need to load the positions and times of training data
sdata = np.loadtxt("../station_info.csv", delimiter=",", skiprows=1,
dtype = [("stid", np.str_, 4),
("nlat", np.float64),
("elon", np.float64),
("elev", np.float64)])
fields = np.loadtxt("../train.csv", skiprows=1, delimiter=",", dtype=np.int64)
dates = [np.datetime64(str(x)[:4]+"-"+str(x)[4:6]+"-"+str(x)[6:8]) for x in fields[:,0]]
Mesonet.dtimet = dates
mesonets = {}
for sidx in range(len(sdata)):
s = sdata[sidx]
station = Mesonet(s[0], s[1], s[2], s[3])
station.datat["flux"] = fields[:,sidx+1]
mesonets[s[0]] = station
# Dates of prediction data
fields = np.loadtxt("../sampleSubmission.csv", skiprows=1, delimiter=",", unpack=True).astype(np.int)
dates = [np.datetime64(str(x)[:4]+"-"+str(x)[4:6]+"-"+str(x)[6:8]) for x in fields[0]]
Mesonet.dtimep = dates
sdates = [np.str(x) for x in fields[0]]
# Do we do Astro terms?
useAstro = 1
if useAstro:
for mesonet in mesonets.values():
mesonet.setAstro(mesonet.dtimet, mesonet.datat)
mesonet.setAstro(mesonet.dtimep, mesonet.datap)
nCv = 0
nTr = NPTSt-nCv
# Now regress all sites at once
print "ALL"
feattr = np.empty((nTr * len(mesonets.keys()), len(fKeys) + 2 * useAstro))
fluxtr = np.empty((nTr * len(mesonets.keys())))
fIdx = 0
for mKey in mesonets.keys():
for f in range(len(fKeys)):
fKey = fKeys[f]
data1 = sigclip(train[mKey].pdata[fKey].reshape((NPTSt, 11, 5)), True)
data2 = sigclip(data1, False)
feattr[fIdx*nTr:(fIdx*nTr + nTr),f] = data2
if useAstro:
feattr[fIdx*nTr:(fIdx*nTr + nTr),len(fKeys)] = mesonets[mKey].datat["sun_alt"]
feattr[fIdx*nTr:(fIdx*nTr + nTr),len(fKeys)+1] = mesonets[mKey].datat["moon_phase"]
fluxtr[fIdx*nTr:(fIdx*nTr + nTr)] = mesonets[mKey].datat["flux"]
fIdx += 1
# Actual regression
model = ensemble.GradientBoostingRegressor(loss="lad", n_estimators=1000)
fit = model.fit(feattr, fluxtr)
# Output data
dnames = ["Date"]
dtypes = [np.dtype("a8")]
fmats = ["%s"]
for key in sdata["stid"]:
dnames.append(key)
dtypes.append(np.float64)
fmats.append("%.1f")
outdata = np.recarray((len(Mesonet.dtimep,)), dtype={"names": dnames, "formats": dtypes})
outdata["Date"] = sdates
nPr = NPTSp
featpr = np.empty((nPr * len(mesonets.keys()), len(fKeys) + 2 * useAstro))
fIdx = 0
for mKey in mesonets.keys():
for f in range(len(fKeys)):
fKey = fKeys[f]
data1 = sigclip(pred[mKey].pdata[fKey].reshape((NPTSp, 11, 5)), True)
data2 = sigclip(data1, False)
featpr[fIdx*nPr:(fIdx*nPr + nPr),f] = data2
if useAstro:
featpr[fIdx*nPr:(fIdx*nPr + nPr),len(fKeys)] = mesonets[mKey].datap["sun_alt"]
featpr[fIdx*nPr:(fIdx*nPr + nPr),len(fKeys)+1] = mesonets[mKey].datap["moon_phase"]
fIdx += 1
fluxpr = fit.predict(featpr)
for m in range(len(mesonets.keys())):
outdata[mesonets.keys()[m]] = fluxpr[m*nPr:(m*nPr + nPr)]
np.savetxt("%s_out6_%d.txt" % (suffix, useAstro), outdata, fmt=fmats, delimiter=",")
print ",".join(outdata.dtype.names)
|
acbecker/solar
|
regress6submit.py
|
Python
|
mit
| 9,650
|
[
"Gaussian"
] |
97ea74b07aca4fb09453b283da2ff9405c0d28a50e535a92cb42d408c455bdad
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,.pct.py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Variational Fourier Features in the GPflow framework
#
# In this notebook we demonstrate how new types of inducing variables can easily be incorporated in the GPflow framework. As an example case, we use the variational Fourier features from [Hensman, Durrande, and Solin (JMLR 2018)](http://jmlr.csail.mit.edu/papers/v18/16-579). All equation and table references are to this paper.
#
# **Note:** This implementation is meant as an example, not as a feature-complete implementation. For more features, such as multi-dimensional inputs, use the [GPflow 2 version of the original VFF code](https://github.com/st--/VFF).
#
# We cannot directly use Fourier features within the multi-output framework without losing the computational advantages, as `Kuu` and `Kuf` for SharedIndependent and SeparateIndependent inducing variables assume that the sub-inducing variable's covariances are simply computed as dense Tensors. However, there is nothing preventing a dedicated implementation of multi-output Fourier features that is computationally more efficient - feel free to discuss this within [the GPflow community](https://github.com/GPflow/GPflow/#the-gpflow-community)!
# %%
import tensorflow as tf
import numpy as np
import gpflow
from gpflow.inducing_variables import InducingVariables
from gpflow.base import TensorLike
from gpflow.utilities import to_default_float
from gpflow import covariances as cov
from gpflow import kullback_leiblers as kl
from gpflow.ci_utils import ci_niter
# %%
# VFF give structured covariance matrices that are computationally efficient.
# We take advantage of this using TensorFlow's LinearOperators:
BlockDiag = tf.linalg.LinearOperatorBlockDiag
Diag = tf.linalg.LinearOperatorDiag
LowRank = tf.linalg.LinearOperatorLowRankUpdate
# %%
import matplotlib.pyplot as plt
# %matplotlib inline
# %% [markdown]
# The VFF inducing variables are defined as a projection $u_m = \mathcal{P}_{\phi_m}(f)$ (eq. (59)) of the GP $f(\cdot)$ onto a truncated Fourier basis, $\phi_m = [1, \cos(\omega_1(x-a)),\dots,\cos(\omega_M(x-a)),\sin(\omega_1(x-a)),\dots,\sin(\omega_M(x-a))]$ (eq. (47)). To represent this we define a new inducing variables class that derives from the `InducingVariables` base class.
# %%
class FourierFeatures1D(InducingVariables):
def __init__(self, a, b, M):
"""
`a` and `b` define the interval [a, b] of the Fourier representation.
`M` specifies the number of frequencies to use.
"""
# [a, b] defining the interval of the Fourier representation:
self.a = gpflow.Parameter(a, dtype=gpflow.default_float())
self.b = gpflow.Parameter(b, dtype=gpflow.default_float())
# integer array defining the frequencies, ω_m = 2π (b - a)/m:
self.ms = np.arange(M)
@property
def num_inducing(self):
""" number of inducing variables (defines dimensionality of q(u)) """
return 2 * tf.shape(self.ms)[0] - 1 # `M` cosine and `M-1` sine components
# %% [markdown]
# Next, we need to define how to compute $\mathrm{K}_\mathbf{uu} = \operatorname{cov}(u_m, u_{m'})$ (eq. (61)) and $\mathrm{K}_\mathbf{uf} = \operatorname{cov}(u_m, f(x_n))$ (eq. (60)).
# %%
@cov.Kuu.register(FourierFeatures1D, gpflow.kernels.Matern12)
def Kuu_matern12_fourierfeatures1d(inducing_variable, kernel, jitter=None):
a, b, ms = (lambda u: (u.a, u.b, u.ms))(inducing_variable)
omegas = 2.0 * np.pi * ms / (b - a)
# Cosine block:
lamb = 1.0 / kernel.lengthscales
two_or_four = to_default_float(tf.where(omegas == 0, 2.0, 4.0))
d_cos = (
(b - a) * (tf.square(lamb) + tf.square(omegas)) / lamb / kernel.variance / two_or_four
) # eq. (111)
v_cos = tf.ones_like(d_cos) / tf.sqrt(kernel.variance) # eq. (110)
cosine_block = LowRank(Diag(d_cos, is_positive_definite=True), v_cos[:, None])
# Sine block:
omegas = omegas[tf.not_equal(omegas, 0)] # the sine block does not include omega=0
d_sin = (
(b - a) * (tf.square(lamb) + tf.square(omegas)) / lamb / kernel.variance / 4.0
) # eq. (113)
sine_block = Diag(d_sin, is_positive_definite=True)
return BlockDiag([cosine_block, sine_block])
@cov.Kuf.register(FourierFeatures1D, gpflow.kernels.Matern12, TensorLike)
def Kuf_matern12_fourierfeatures1d(inducing_variable, kernel, X):
X = tf.squeeze(X, axis=1)
a, b, ms = (lambda u: (u.a, u.b, u.ms))(inducing_variable)
omegas = 2.0 * np.pi * ms / (b - a)
Kuf_cos = tf.cos(omegas[:, None] * (X[None, :] - a))
omegas_sin = omegas[tf.not_equal(omegas, 0)] # don't compute zero frequency
Kuf_sin = tf.sin(omegas_sin[:, None] * (X[None, :] - a))
# correct Kuf outside [a, b] -- see Table 1
Kuf_sin = tf.where((X < a) | (X > b), tf.zeros_like(Kuf_sin), Kuf_sin) # just zero
left_tail = tf.exp(-tf.abs(X - a) / kernel.lengthscales)[None, :]
right_tail = tf.exp(-tf.abs(X - b) / kernel.lengthscales)[None, :]
Kuf_cos = tf.where(X < a, left_tail, Kuf_cos) # replace with left tail
Kuf_cos = tf.where(X > b, right_tail, Kuf_cos) # replace with right tail
return tf.concat([Kuf_cos, Kuf_sin], axis=0)
@cov.Kuu.register(FourierFeatures1D, gpflow.kernels.Matern32)
def Kuu_matern32_fourierfeatures1d(inducing_variable, kernel, jitter=None):
a, b, ms = (lambda u: (u.a, u.b, u.ms))(inducing_variable)
omegas = 2.0 * np.pi * ms / (b - a)
# Cosine block: eq. (114)
lamb = np.sqrt(3.0) / kernel.lengthscales
four_or_eight = to_default_float(tf.where(omegas == 0, 4.0, 8.0))
d_cos = (
(b - a)
* tf.square(tf.square(lamb) + tf.square(omegas))
/ tf.pow(lamb, 3)
/ kernel.variance
/ four_or_eight
)
v_cos = tf.ones_like(d_cos) / tf.sqrt(kernel.variance)
cosine_block = LowRank(Diag(d_cos, is_positive_definite=True), v_cos[:, None])
# Sine block: eq. (115)
omegas = omegas[tf.not_equal(omegas, 0)] # don't compute omega=0
d_sin = (
(b - a)
* tf.square(tf.square(lamb) + tf.square(omegas))
/ tf.pow(lamb, 3)
/ kernel.variance
/ 8.0
)
v_sin = omegas / lamb / tf.sqrt(kernel.variance)
sine_block = LowRank(Diag(d_sin, is_positive_definite=True), v_sin[:, None])
return BlockDiag([cosine_block, sine_block]) # eq. (116)
@cov.Kuf.register(FourierFeatures1D, gpflow.kernels.Matern32, TensorLike)
def Kuf_matern32_fourierfeatures1d(inducing_variable, kernel, X):
X = tf.squeeze(X, axis=1)
a, b, ms = (lambda u: (u.a, u.b, u.ms))(inducing_variable)
omegas = 2.0 * np.pi * ms / (b - a)
Kuf_cos = tf.cos(omegas[:, None] * (X[None, :] - a))
omegas_sin = omegas[tf.not_equal(omegas, 0)] # don't compute zeros freq.
Kuf_sin = tf.sin(omegas_sin[:, None] * (X[None, :] - a))
# correct Kuf outside [a, b] -- see Table 1
def tail_cos(delta_X):
arg = np.sqrt(3) * tf.abs(delta_X) / kernel.lengthscales
return (1 + arg) * tf.exp(-arg)[None, :]
Kuf_cos = tf.where(X < a, tail_cos(X - a), Kuf_cos)
Kuf_cos = tf.where(X > b, tail_cos(X - b), Kuf_cos)
def tail_sin(delta_X):
arg = np.sqrt(3) * tf.abs(delta_X) / kernel.lengthscales
return delta_X[None, :] * tf.exp(-arg) * omegas_sin[:, None]
Kuf_sin = tf.where(X < a, tail_sin(X - a), Kuf_sin)
Kuf_sin = tf.where(X > b, tail_sin(X - b), Kuf_sin)
return tf.concat([Kuf_cos, Kuf_sin], axis=0)
# %% [markdown]
# In principle, this is all we need; however, to be able to take advantage of the structure of `Kuu`, we need to also implement new versions of the KL divergence from the prior to the approximate posterior (`prior_kl`) and the computation of the Gaussian process conditional (posterior) equations:
# %%
@kl.prior_kl.register(FourierFeatures1D, gpflow.kernels.Kernel, TensorLike, TensorLike)
def prior_kl_vff(inducing_variable, kernel, q_mu, q_sqrt, whiten=False):
if whiten:
raise NotImplementedError
K = cov.Kuu(inducing_variable, kernel)
return gauss_kl_vff(q_mu, q_sqrt, K)
def gauss_kl_vff(q_mu, q_sqrt, K):
"""
Compute the KL divergence from
q(x) = N(q_mu, q_sqrt^2)
to
p(x) = N(0, K)
q_mu is a vector [N, 1] that contains the mean.
q_sqrt is a matrix that is the lower triangular square-root matrix of the covariance of q.
K is a positive definite matrix: the covariance of p.
NOTE: K is a LinearOperator that provides efficient methjods
for solve(), log_abs_determinant(), and trace()
"""
# KL(N₀ || N₁) = ½ [tr(Σ₁⁻¹ Σ₀) + (μ₁ - μ₀)ᵀ Σ₁⁻¹ (μ₁ - μ₀) - k + ln(det(Σ₁)/det(Σ₀))]
# N₀ = q; μ₀ = q_mu, Σ₀ = q_sqrt q_sqrtᵀ
# N₁ = p; μ₁ = 0, Σ₁ = K
# KL(q || p) =
# ½ [tr(K⁻¹ q_sqrt q_sqrtᵀA + q_muᵀ K⁻¹ q_mu - k + logdet(K) - logdet(q_sqrt q_sqrtᵀ)]
# k = number of dimensions, if q_sqrt is m x m this is m²
Kinv_q_mu = K.solve(q_mu)
mahalanobis_term = tf.squeeze(tf.matmul(q_mu, Kinv_q_mu, transpose_a=True))
# GPflow: q_sqrt is num_latent_gps x N x N
num_latent_gps = to_default_float(tf.shape(q_mu)[1])
logdet_prior = num_latent_gps * K.log_abs_determinant()
product_of_dimensions__int = tf.reduce_prod(tf.shape(q_sqrt)[:-1]) # dimensions are integers
constant_term = to_default_float(product_of_dimensions__int)
Lq = tf.linalg.band_part(q_sqrt, -1, 0) # force lower triangle
logdet_q = tf.reduce_sum(tf.math.log(tf.square(tf.linalg.diag_part(Lq))))
# S = tf.matmul(q_sqrt, q_sqrt, transpose_b=True)
# trace_term = tf.trace(K.solve(S))
trace_term = tf.squeeze(
tf.reduce_sum(Lq * K.solve(Lq), axis=[-1, -2])
) # [O(N²) instead of O(N³)
twoKL = trace_term + mahalanobis_term - constant_term + logdet_prior - logdet_q
return 0.5 * twoKL
# %%
import gpflow.posteriors
class VFFPosterior(gpflow.posteriors.BasePosterior):
def _conditional_fused(self, Xnew, full_cov, full_output_cov):
"""
Xnew is a tensor with the points of the data or minibatch, shape N x D
"""
if full_output_cov:
raise NotImplementedError
f = self._q_dist.q_mu
q_sqrt = self._q_dist.q_sqrt
# num_data = tf.shape(Xnew)[0] # M
num_func = tf.shape(f)[1] # K
Kuu = cov.Kuu(self.X_data, self.kernel) # this is now a LinearOperator
Kuf = cov.Kuf(self.X_data, self.kernel, Xnew) # still a Tensor
KuuInv_Kuf = Kuu.solve(Kuf)
# compute the covariance due to the conditioning
if full_cov:
fvar = self.kernel(Xnew) - tf.matmul(Kuf, KuuInv_Kuf, transpose_a=True)
shape = (num_func, 1, 1)
else:
KufT_KuuInv_Kuf_diag = tf.reduce_sum(Kuf * KuuInv_Kuf, axis=-2)
fvar = self.kernel(Xnew, full_cov=False) - KufT_KuuInv_Kuf_diag
shape = (num_func, 1)
fvar = tf.expand_dims(fvar, 0) * tf.ones(
shape, dtype=gpflow.default_float()
) # K x N x N or K x N
if self.whiten:
raise NotImplementedError
A = KuuInv_Kuf
# construct the conditional mean
fmean = tf.matmul(A, f, transpose_a=True)
if q_sqrt is not None:
if q_sqrt.get_shape().ndims == 2:
# LTA = A * tf.expand_dims(q_sqrt, 2) # K x M x N
# won't work # make ticket for this?
raise NotImplementedError
elif q_sqrt.get_shape().ndims == 3:
# L = tf.matrix_band_part(tf.transpose(q_sqrt, (2, 0, 1)), -1, 0) # K x M x M
# K x M x N
# A_tiled = tf.expand_dims(A.get(), 0) * tf.ones((num_func, 1, 1), dtype=float_type)
# LTA = tf.matmul(L, A_tiled, transpose_a=True) # K x M x N
# TODO the following won't work for K > 1
assert q_sqrt.shape[0] == 1
# LTA = (A.T @ DenseMatrix(q_sqrt[:,:,0])).T.get()[None, :, :]
ATL = tf.matmul(A, q_sqrt, transpose_a=True)
else:
raise ValueError("Bad dimension for q_sqrt: %s" % str(q_sqrt.get_shape().ndims))
if full_cov:
# fvar = fvar + tf.matmul(LTA, LTA, transpose_a=True) # K x N x N
fvar = fvar + tf.matmul(ATL, ATL, transpose_b=True) # K x N x N
else:
# fvar = fvar + tf.reduce_sum(tf.square(LTA), 1) # K x N
fvar = fvar + tf.reduce_sum(tf.square(ATL), 2) # K x N
fvar = tf.transpose(fvar) # N x K or N x N x K
return fmean, fvar
# We can also provide a conditional that precomputes as much as possible,
# to speed up predictions:
def _precompute(self):
Kuu = cov.Kuu(self.X_data, self.kernel) # this is now a LinearOperator
q_mu = self._q_dist.q_mu
q_sqrt = self._q_dist.q_sqrt
if self.whiten:
raise NotImplementedError
else:
# alpha = Kuu⁻¹ q_mu
alpha = Kuu.solve(q_mu) # type: tf.Tensor
if self.whiten:
raise NotImplementedError
else:
# Qinv = Kuu⁻¹ - Kuu⁻¹ S Kuu⁻¹
KuuInv_qsqrt = Kuu.solve(q_sqrt)
KuuInv_covu_KuuInv = tf.matmul(KuuInv_qsqrt, KuuInv_qsqrt, transpose_b=True)
Qinv = Kuu.inverse().to_dense() - KuuInv_covu_KuuInv
return gpflow.posteriors.PrecomputedValue.wrap_alpha_Qinv(alpha, Qinv)
def _conditional_with_precompute(self, cache, Xnew, full_cov, full_output_cov):
alpha, Qinv = cache
if full_output_cov:
raise NotImplementedError
Kuf = cov.Kuf(self.X_data, self.kernel, Xnew) # still a Tensor
# construct the conditional mean
fmean = tf.matmul(Kuf, alpha, transpose_a=True)
num_func = tf.shape(alpha)[1] # K
Qinv_Kuf = tf.matmul(Qinv, Kuf)
# compute the covariance due to the conditioning
if full_cov:
fvar = self.kernel(Xnew) - tf.matmul(Kuf, Qinv_Kuf, transpose_a=True)
else:
KufT_Qinv_Kuf_diag = tf.reduce_sum(Kuf * Qinv_Kuf, axis=-2)
fvar = self.kernel(Xnew, full_cov=False) - KufT_Qinv_Kuf_diag
fvar = tf.transpose(fvar)
return fmean, fvar
# %% [markdown]
# We now have to register our Posterior object:
# %%
@gpflow.posteriors.get_posterior_class.register(gpflow.kernels.Kernel, FourierFeatures1D)
def _get_posterior_vff(kernel, inducing_variable):
return VFFPosterior
# %% [markdown]
# `gpflow.conditionals.conditional` is a short-hand for calling the fused prediction code path:
# %%
Mf = 2
M = 2 * Mf - 1
kernel = gpflow.kernels.Matern32()
inducing_variable = FourierFeatures1D(-0.5, 1.5, Mf)
Xnew = np.random.rand(7, 1)
f = np.random.randn(M, 1)
q_sqrt = tf.convert_to_tensor(np.tril(np.random.randn(1, M, M)))
conditional_f_mean, conditional_f_var = gpflow.conditionals.conditional(
Xnew, inducing_variable, kernel, f, q_sqrt=q_sqrt, white=False, full_cov=True
)
posterior = VFFPosterior(kernel, inducing_variable, f, q_sqrt, whiten=False, precompute_cache=None)
posterior_f_mean, posterior_f_var = posterior.fused_predict_f(Xnew, full_cov=True)
np.testing.assert_array_equal(conditional_f_mean, posterior_f_mean)
np.testing.assert_array_equal(conditional_f_var, posterior_f_var)
# %% [markdown]
# We can also call the cached path:
#
# %%
posterior.update_cache(gpflow.posteriors.PrecomputeCacheType.TENSOR)
precomputed_posterior_f_mean, precomputed_posterior_f_var = posterior.predict_f(Xnew, full_cov=True)
np.testing.assert_allclose(precomputed_posterior_f_mean, posterior_f_mean)
np.testing.assert_allclose(precomputed_posterior_f_var, posterior_f_var)
# %% [markdown]
# We now demonstrate how to use these new types of inducing variables with the `SVGP` model class. First, let's create some toy data:
# %%
X = np.linspace(-2, 2, 510)
Xnew = np.linspace(-4, 4, 501)
def f(x):
return np.cos(2 * np.pi * x / 4 * 2)
F = f(X)
Fnew = f(Xnew)
noise_scale = 0.1
np.random.seed(1)
Y = F + np.random.randn(*F.shape) * noise_scale
data = (X.reshape(-1, 1), Y.reshape(-1, 1))
# %%
plt.figure()
plt.plot(X, F, label="f(x)")
plt.plot(X, Y, ".", label="observations")
plt.legend()
plt.show()
# %% [markdown]
# Setting up an SVGP model with variational Fourier feature inducing variables is as simple as replacing the `inducing_variable` argument:
# %%
Mfreq = 9
m = gpflow.models.SVGP(
kernel=gpflow.kernels.Matern32(),
likelihood=gpflow.likelihoods.Gaussian(variance=noise_scale ** 2),
inducing_variable=FourierFeatures1D(-4.5, 4.5, Mfreq),
num_data=len(X),
whiten=False,
)
gpflow.set_trainable(m.kernel, False)
gpflow.set_trainable(m.likelihood, False)
gpflow.set_trainable(m.inducing_variable, True) # whether to optimize bounds [a, b]
# %%
opt = gpflow.optimizers.Scipy()
opt.minimize(
m.training_loss_closure(data),
m.trainable_variables,
options=dict(maxiter=ci_niter(5000)),
)
gpflow.utilities.print_summary(m, fmt="notebook")
# %% [markdown]
# For comparison we also construct an SVGP model using inducing points and an exact GPR model:
# %%
m_ip = gpflow.models.SVGP(
kernel=gpflow.kernels.Matern32(),
likelihood=gpflow.likelihoods.Gaussian(variance=noise_scale ** 2),
inducing_variable=np.linspace(-2, 2, Mfreq * 2 - 1)[:, None],
num_data=len(X),
whiten=False,
)
gpflow.set_trainable(m_ip.kernel, False)
gpflow.set_trainable(m_ip.likelihood, False)
gpflow.set_trainable(m_ip.inducing_variable, True) # whether to optimize inducing point locations
# %%
opt = gpflow.optimizers.Scipy()
opt.minimize(
m_ip.training_loss_closure(data),
m_ip.trainable_variables,
options=dict(maxiter=ci_niter(5000)),
)
gpflow.utilities.print_summary(m_ip, fmt="notebook")
# %%
m_ref = gpflow.models.GPR((X.reshape(-1, 1), Y.reshape(-1, 1)), kernel=gpflow.kernels.Matern32())
m_ref.likelihood.variance = np.array(noise_scale ** 2).astype(np.float64)
gpflow.set_trainable(m_ref.kernel, False)
gpflow.set_trainable(m_ref.likelihood, False)
# Because we fixed the kernel and likelihood hyperparameters, we don't need to optimize anything.
gpflow.utilities.print_summary(m_ref, fmt="notebook")
# %%
exact_gpr_lml = m_ref.log_marginal_likelihood().numpy().item()
print("LML (exact GPR) =", exact_gpr_lml)
ip_svgp_elbo = m_ip.elbo(data).numpy().item()
print("ELBO (SVGP, inducing points) =", ip_svgp_elbo)
vff_svgp_elbo = m.elbo(data).numpy().item()
print("ELBO (SVGP, Fourier features) =", vff_svgp_elbo)
# %%
def plot_gp(m, Xnew, name=""):
Fmean, Fvar = m.predict_f(Xnew[:, None])
Fmean = Fmean.numpy().squeeze()
Fvar = Fvar.numpy().squeeze()
(p,) = plt.plot(Xnew, Fmean, label=name)
plt.fill_between(
Xnew, Fmean - 2 * np.sqrt(Fvar), Fmean + 2 * np.sqrt(Fvar), alpha=0.3, color=p.get_color()
)
def plot_data():
plt.plot(Xnew, Fnew, label="f(x)")
plt.plot(X, Y, ".", label="observations")
plt.figure(figsize=(15, 10))
plot_data()
plot_gp(m, Xnew, "VFF [ELBO={:.3}]".format(vff_svgp_elbo))
plot_gp(m_ip, Xnew, "inducing points [ELBO={:.3}]".format(ip_svgp_elbo))
plot_gp(m_ref, Xnew, "exact [LML={:.3}]".format(exact_gpr_lml))
plt.legend(loc="best")
plt.show()
|
GPflow/GPflow
|
doc/source/notebooks/advanced/variational_fourier_features.pct.py
|
Python
|
apache-2.0
| 19,554
|
[
"Gaussian"
] |
c5024673046fe30cf72c948dc4fe3ed74d0cd56961e1712f683a7faea64f92b7
|
import logging
from flask import current_app
from flask.ext.sqlalchemy import BaseQuery
from sqlalchemy import ForeignKey
from sqlalchemy.sql import desc, text
from sqlalchemy.sql.expression import func
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.ext.mutable import MutableDict
from sqlalchemy.orm import relationship, backref
from sqlalchemy.schema import Column
from sqlalchemy.dialects.postgresql import JSONB, UUID
from sqlalchemy.types import Boolean, DateTime, Integer, String, Text
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import SearchQueryMixin
from registry.extensions import db
from datetime import datetime
from uuid import uuid4
class PassportQuery(BaseQuery, SearchQueryMixin):
pass
class Passport(db.Model):
"""DB representation of a single passport."""
__tablename__ = 'passport'
query_class = PassportQuery
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid4)
pass_id = Column(Integer, nullable=False, unique=True)
name = Column(String(length=128), nullable=False)
surname = Column(String(length=128), nullable=False)
age = Column(Integer, nullable=False)
address = Column(Text())
phone = Column(String(length=128), nullable=False)
email = Column(String(length=128))
notes = Column(Text())
flags = Column(MutableDict.as_mutable(JSONB))
infos_wanted = Column(Boolean, default=False)
photos_allowed = Column(Boolean, default=False)
lexemes = Column(TSVectorType('surname', 'name', regconfig='german'),
nullable=False)
group_id = Column(UUID(as_uuid=True), ForeignKey('group.id'))
visits = relationship('Visit', backref=backref('passport'),
order_by='desc(Visit.timestamp)')
def __to_dict__(self):
return dict(
id=str(self.id),
pass_id=self.pass_id,
name=self.name,
surname=self.surname,
age=self.age,
address=self.address,
phone=self.phone,
email=self.email,
notes=self.notes,
flags=self.flags,
infos_wanted=self.infos_wanted,
photos_allowed=self.photos_allowed,
group_id=str(self.group_id))
@property
def is_active(self):
"""Property indicating whether the passport has any visits at all"""
return len(self.visits) > 0
@property
def checked_in(self):
return len(self.visits) > 0 and not self.visits[0].check_out
def check_in(self, when=None, commit=True):
"""Check in passport, creating a new visit
:param when: timestamp to use for checkin, defaults to datetime.now()
"""
visit = Visit()
visit.passport = self
visit.check_in = when if when else datetime.now()
db.session.add(visit)
if commit:
db.session.commit()
return visit
def check_out(self, when=None, commit=True):
"""Check out passport.
This either closes the last visit or creates a new visit with only
an checkout time.
:param when: timestamp to use for checkout, defaults to datetime.now()
"""
if len(self.visits) and not self.visits[0].check_out:
current_visit = self.visits[0]
else:
current_visit = Visit()
current_visit.passport = self
current_visit.check_out = when if when else datetime.now()
if commit:
db.session.commit()
return current_visit
@classmethod
def get(cls, pass_id):
try:
return cls.query.filter(cls.pass_id == pass_id).one()
except:
msg = 'Non-unique query result for pass_id "%d"' % pass_id
logger = logging.getLogger(__name__)
logger.exception(msg)
@classmethod
def active_passes(cls):
last_visits = db.session.query(
Visit,
func.row_number().over(
partition_by=Visit.passport_id,
order_by=desc(Visit.timestamp)).label('row_number')) \
.subquery('lv')
open_visits = db.session.query(Visit) \
.select_entity_from(last_visits) \
.filter(last_visits.c.row_number == 1) \
.filter(last_visits.c.check_out == None) \
.subquery()
open_passports = db.session.query(Passport).join(open_visits)
return open_passports
def __repr__(self):
return u'<Passport (%r)>' % self.pass_id
class Visit(db.Model):
"""DB representation of a single visit."""
__tablename__ = 'visit'
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid4)
passport_id = Column(UUID(as_uuid=True), ForeignKey('passport.id'),
index=True)
check_in = Column(DateTime(timezone=False), index=True)
check_out = Column(DateTime(timezone=False), index=True)
sweeped = Column(Boolean, nullable=True)
def __to_dict__(self):
return dict(
id=str(self.id),
passport_id=str(self.passport_id),
check_in=self.check_in.isoformat() if self.check_in else None,
check_out=self.check_out.isoformat() if self.check_out else None,
sweeped=self.sweeped)
@hybrid_property
def timestamp(self):
"""Property giving timestamp for last visit check in or check out"""
return self.check_out if self.check_out else self.check_in
@timestamp.expression
def timestamp(cls):
return text('GREATEST(%s, %s)' % (cls.check_in, cls.check_out))
@property
def is_open(self):
"""Shorthand property to check if visit is still open"""
return self.check_out is None
@classmethod
def sweep(cls):
Visit.query \
.filter(Visit.check_out == None) \
.update({Visit.sweeped: True, Visit.check_out: datetime.now()})
db.session.flush()
db.session.commit()
@classmethod
def binned(cls, bin_size=None):
if not bin_size:
bin_size = current_app.config['CHART_BIN_SIZE']
sql = """
WITH checks AS (
SELECT
True AS is_check_in,
ts_round(check_in, %(bin_size)s) AS ts
FROM
visit
WHERE
check_in IS NOT NULL
UNION ALL
SELECT
False AS is_check_in,
ts_round(check_out + interval '%(bin_size)s seconds', %(bin_size)s) AS ts
FROM
visit
WHERE
check_out IS NOT NULL
)
SELECT
ts,
is_check_in,
count(*)
FROM
checks
GROUP BY is_check_in, ts
ORDER BY ts ASC
"""
result = db.engine.execute(sql, bin_size=bin_size)
return result.fetchall()
class Group(db.Model):
__tablename__ = 'group'
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid4)
name = Column(String(length=128), nullable=False, unique=True)
flags = Column(JSONB)
passports = relationship(Passport, backref='group')
def __to_dict__(self):
return dict(
id=str(self.id),
name=self.name,
flags=self.flags)
def commit_model(cls, *args, **kwargs):
model = cls(*args, **kwargs)
db.session.add(model)
db.session.commit()
return model
|
arsgeografica/kinderstadt-registry
|
registry/models.py
|
Python
|
gpl-3.0
| 7,534
|
[
"VisIt"
] |
3c2ebfa6f63e5c13a94b06c8cacd3d93b9d78347485a13859e68ec6107860385
|
#!/usr/bin/python
__author__ = "Kishori M Konwar"
__copyright__ = "Copyright 2013, MetaPathways"
__credits__ = ["r"]
__version__ = "1.0"
__maintainer__ = "Kishori M Konwar"
__status__ = "Release"
try:
import traceback
from optparse import make_option
from os import makedirs, path, listdir, remove, rename, _exit
import os, sys, errno, shutil, re
from glob import glob
from datetime import date
#from metapaths_utils import pars[s._command_line_parameters
from libs.python_modules.utils.sysutil import getstatusoutput, pathDelim
#from libs.python_modules.utils.utils import *, hasInput, createFolderIfNotFound
from libs.python_modules.utils.utils import *
from libs.python_modules.parsers.parse import parse_metapaths_parameters
from libs.python_modules.pipeline.metapathways_pipeline import print_commands, execute_tasks
from libs.python_modules.pipeline.MetaPathways_gather_run_stats import MetaPathways_gather_run_stats
from libs.python_modules.utils.metapathways_utils import fprintf, printf, eprintf, remove_existing_pgdb, exit_process, WorkflowLogger, generate_log_fp
from libs.python_modules.pipeline.sampledata import *
from libs.python_modules.pipeline.jobscreator import *
from libs.python_modules.pipeline.commands import *
import libs.python_scripts
except:
print(""" Could not load some user defined module functions""")
print(""" Make sure your typed \"source MetaPathwaysrc\" """)
sys.exit(3)
PATHDELIM = pathDelim()
def copyFile(src, dst):
try:
shutil.copytree(src, dst)
except OSError as exc: # python >2.5
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else: raise
def dry_run_status( commands ):
for command in commands:
printf("%s", command[0])
if command[4] == True:
printf("%s", " Required")
else:
printf("%s", " Not Required")
printf("\n")
def get_refdb_name( dbstring ):
dbstring = dbstring.rstrip()
dbstring = dbstring.lstrip()
dbstring = dbstring.lower()
return dbstring
def format_db(formatdb_executable, seqType, raw_sequence_file, formatted_db, algorithm):
_temp_formatted_db = formatted_db+ "__temp__"
""" format with 4GB file size """
if algorithm=='BLAST':
cmd='%s -dbtype %s --max_file_sz 4294967296 -in %s -out %s' %(formatdb_executable, seqType, raw_sequence_file, _temp_formatted_db)
if algorithm=='LAST':
# dirname = os.path.dirname(raw_sequence_file)
cmd='%s -s 4G -p -c %s %s' %(formatdb_executable, _temp_formatted_db, raw_sequence_file)
result= getstatusoutput(cmd)
temp_fileList = glob(_temp_formatted_db + '*')
try:
for tempFile in temp_fileList:
file = re.sub('__temp__','', tempFile)
rename( tempFile, file);
except:
return False
if result[0]==0:
return True
else:
return False
# convert an input gbk file to fna faa and gff file
def convert_gbk_to_fna_faa_gff(input_gbk, output_fna, output_faa, output_gff, config_settings):
cmd = "%s -g %s --output-fna %s --output-faa %s --output-gff %s" %((config_settings['METAPATHWAYS_PATH'] \
+ config_settings['GBK_TO_FNA_FAA_GFF']), input_gbk, output_fna, output_faa, output_gff)
return cmd
# convert an input gff file to fna faa and gff file
def convert_gff_to_fna_faa_gff(inputs, outputs, config_settings):
cmd = "%s " %(config_settings['METAPATHWAYS_PATH']+ config_settings['GFF_TO_FNA_FAA_GFF'])
for source, target in zip(inputs, outputs):
cmd += ' --source ' + source + ' --target ' + target
return cmd
def make_sure_map_file_exists(config_settings, dbname, globallogger = None):
dbmapFile = config_settings['REFDBS'] + PATHDELIM + 'functional' + PATHDELIM + 'formatted' + PATHDELIM + dbname + "-names.txt"
seqFilePath = config_settings['REFDBS'] + PATHDELIM + 'functional' + PATHDELIM + dbname
if not doFilesExist( [dbmapFile ] ):
eprintf("WARNING: Trying to create database map file for %s\n", dbname)
if globallogger!= None:
globallogger.write("WARNING: Trying to create database map file for %s\n" %( dbname) )
if not doFilesExist( [seqFilePath] ):
eprintf("ERROR : You do not even have the raw sequence for Database %s to format!\n", dbname)
eprintf(" : Make sure you have the file %s\n", seqFilePath)
if globallogger!= None:
globallogger.write("ERROR \t You do not even have the raw sequence for Database %s to format!\n" %( dbname))
globallogger.write("Make sure you have the file %s\n" %( seqFilePath))
exit_process()
mapfile = open(dbmapFile,'w')
seqFile = open(seqFilePath,'r')
for line in seqFile:
if re.match(r'>', line):
fprintf(mapfile, "%s\n",line.strip())
seqFile.close()
mapfile.close()
return dbmapFile
# create the command to make the MLTreeMap Images
def create_MLTreeMap_Imagemaker(mltreemap_image_output, mltreemap_final_outputs, config_settings):
executable_path = config_settings['MLTREEMAP_IMAGEMAKER']
if not path.isfile( executable_path):
executable_path = config_settings['METAPATHWAYS_PATH'] + executable_path
cmd= "%s -i %s -o %s -m a" %(executable_path, mltreemap_final_outputs, mltreemap_image_output)
return cmd
# gather mltreemap calculations
def create_MLTreeMap_Hits(mltreemap_output_dir, output_folder, config_settings):
cmd= "%s -i %s -o %s" %(config_settings['METAPATHWAYS_PATH'] + config_settings['MLTREEMAP_HITS'], mltreemap_output_dir, output_folder +PATHDELIM + 'sequence_to_cog_hits.txt')
return cmd
#gets the parameter value from a category as.ecified in the
# parameter file
def get_parameter(params, category, field, default = None):
if params == None:
return default
if category in params:
if field in params[category]:
return params[category][field]
else:
return default
return default
# parameter file
def get_make_parameter(params,category, field, default = False):
if category in params:
if field in params[category]:
return params[category][field]
else:
return default
return default
def get_pipeline_steps(steps_log_file):
try:
logfile = open(steps_log_file, 'r')
except IOError:
eprintf("Did not find %s!\n", logfile)
eprintf("Try running in \'complete\' run-type\n")
else:
lines = logfile.readlines()
pipeline_steps = None
return pipeline_steps
def write_run_parameters_file(fileName, parameters):
try:
paramFile = open(fileName, 'w')
except IOError:
eprintf("Cannot write run parameters to file %s!\n", fileName)
exit_process("Cannot write run parameters to file %s" %(fileName) )
# 16s_rRNA {'min_identity': '40', 'max_evalue': '0.000001', 'min_bitscore': '06', 'refdbs': 'silva_104_rep_set,greengenes_db_DW'}
paramFile.write("\nRun Date : " + str(date.today()) + " \n")
paramFile.write("\n\nNucleotide Quality Control parameters[s.n")
paramFile.write( " min length" + "\t" + str(parameters['quality_control']['min_length']) + "\n")
paramFile.write("\n\nORF prediction parameters[s.n")
paramFile.write( " min length" + "\t" + str(parameters['orf_prediction']['min_length']) + "\n")
paramFile.write( " algorithm" + "\t" + str(parameters['orf_prediction']['algorithm']) + "\n")
paramFile.write("\n\nAmino acid quality control and annotation parameters[s.n")
paramFile.write( " min bit score" + "\t" + str(parameters['annotation']['min_score']) + "\n")
paramFile.write( " min seq length" + "\t" + str(parameters['annotation']['min_length']) + "\n")
paramFile.write( " annotation reference dbs" + "\t" + str(parameters['annotation']['dbs']) + "\n")
paramFile.write( " min BSR" + "\t" + str(parameters['annotation']['min_bsr']) + "\n")
paramFile.write( " max evalue" + "\t" + str(parameters['annotation']['max_evalue']) + "\n")
paramFile.write("\n\nPathway Tools parameters[s.n")
paramFile.write( " taxonomic pruning " + "\t" + str(parameters['ptools_settings']['taxonomic_pruning']) + "\n")
paramFile.write("\n\nrRNA search/match parameters[s.n")
paramFile.write( " min identity" + "\t" + str(parameters['rRNA']['min_identity']) + "\n")
paramFile.write( " max evalue" + "\t" + str(parameters['rRNA']['max_evalue']) + "\n")
paramFile.write( " rRNA reference dbs" + "\t" + str(parameters['rRNA']['refdbs']) + "\n")
paramFile.close()
# checks if the necessary files, directories and executables really exis.or not
def check_config_settings(config_settings, file, globalerrorlogger = None):
essentialItems= ['METAPATHWAYS_PATH', 'EXECUTABLES_DIR', 'RESOURCES_DIR']
missingItems = []
for key, value in config_settings.items():
# these are not files or executables
if key in ['NUM_CPUS', 'FORMATTED_DB_SIZE' ]:
continue
if key in ['FORMATDB_EXECUTABLE', 'BLASTP_EXECUTABLE', 'BLASTN_EXECUTABLE' ] and value=='':
continue
# make sure MetaPathways directory is present
if key in ['METAPATHWAYS_PATH' ]:
if not path.isdir( config_settings[key]) :
eprintf("ERROR: Path for \"%s\" is NOT set properly (or missing) in configuration file \"%s\"\n", key, file)
eprintf("ERROR: 1.Currently it is set to \"%s\"\n", config_settings[key] )
if globalerrorlogger!=None:
globalerrorlogger.write("ERROR\tPath for \"%s\" is NOT set properly (or missing) in configuration file \"%s\"\n" %(key, file))
globalerrorlogger.write(" Currently it is set to \"%s\". Please correct it and try again.\n" %(config_settings[key] ) )
missingItems.append(key)
continue
# make sure REFDB directories are present
if key in [ 'REFDBS' ]:
if not path.isdir( config_settings[key]) :
eprintf("ERROR: Path for \"%s\" is NOT set properly (or missing) in configuration file \"%s\"\n", key, file)
eprintf("ERROR: 2.Currently it is set to \"%s\"\n", config_settings[key] )
if globalerrorlogger!=None:
globalerrorlogger.write("ERROR\tPath for \"%s\" is NOT set properly (or missing) in configuration file \"%s\"\n" %(key,file))
globalerrorlogger.write("Currently it is set to \"%s\". Please correct it and try again.\n" %( config_settings[key]) )
missingItems.append(key)
continue
# make sure EXECUTABLES_DIR directories are present
if key in [ 'EXECUTABLES_DIR']:
if not path.isdir( config_settings['METAPATHWAYS_PATH'] + PATHDELIM + config_settings[key]) :
eprintf("ERROR: Path for \"%s\" is NOT set properly (or missing) in configuration file \"%s\"\n", key, file)
eprintf("ERROR: 3.Currently it is set to \"%s\"\n", config_settings[key] )
if globalerrorlogger!=None:
globalerrorlogger.write("ERROR\tPath for \"%s\" is NOT set properly (or missing) in configuration file \"%s\"\n" %(key, file))
globalerrorlogger.write("Currently it is set to \"%s\". Please correct the path.\n" %( config_settings[key] ))
missingItems.append(key)
continue
if key in [ 'ACCESSION_TO_TAXONID']:
if not path.isfile( config_settings['REFDBS'] + PATHDELIM + 'ncbi_tree' + PATHDELIM + config_settings[key]) :
eprintf("ERROR: Path for \"%s\" is NOT set properly (or missing) in configuration file \"%s\"\n", key, file)
eprintf("ERROR: 7.Currently it is set to \"%s\"\n", config_settings['REFDBS'] + PATHDELIM + 'ncbi_tree' + PATHDELIM +config_settings[key] )
if globalerrorlogger!=None:
globalerrorlogger.write("ERROR\tPath for \"%s\" is NOT set properly (or missing) in configuration file \"%s\"\n" %(key, file))
globalerrorlogger.write("Currently it is set to \"%s\". Please correct the path to compute LCA with accession id translation.\n" %( config_settings[key] ))
missingItems.append(key)
continue
# make sure RESOURCES_DIR directories are present
if key in [ 'RESOURCES_DIR']:
if not path.isdir( config_settings['METAPATHWAYS_PATH'] + PATHDELIM + config_settings[key]) :
eprintf("ERROR: Path for \"%s\" is NOT set properly (or missing) in configuration file \"%s\"\n", key, file)
eprintf("ERROR: 4.Currently it is set to \"%s\"\n", config_settings['METAPATHWAYS_PATH'] + PATHDELIM + config_settings[key] )
print(config_settings['METAPATHWAYS_PATH'], config_settings[key])
if globalerrorlogger!=None:
globalerrorlogger.write("ERROR\tPath for \"%s\" is NOT set properly (or missing) in configuration file \"%s\"\n" %(key, file))
globalerrorlogger.write("Currently it is set to \"%s\"\n" %( config_settings[key]))
missingItems.append(key)
continue
# make sure MetaPaths directory is present
if key in ['PYTHON_EXECUTABLE' , 'PATHOLOGIC_EXECUTABLE' ]:
if not path.isfile( config_settings[key]) :
eprintf("ERROR: Path for \"%s\" is NOT set properly (or missing) in configuration file \"%s\"\n", key, file)
eprintf("ERROR: 5.Currently it is set to \"%s\"\n", config_settings[key] )
if globalerrorlogger!=None:
globalerrorlogger.write("ERROR\tPath for \"%s\" is NOT set properly (or missing) in configuration file \"%s\"\n" %(key, file))
globalerrorlogger.write("Currently it is set to \"%s\"\n" %( config_settings[key] ) )
missingItems.append(key)
continue
# ignore pgdb folder for now
if key in ['PGDB_FOLDER' ]:
continue
# check if the desired file exists. if not, then print a message
if not path.isfile( config_settings['METAPATHWAYS_PATH'] + PATHDELIM + value)\
and not path.isfile( config_settings['METAPATHWAYS_PATH'] + PATHDELIM + config_settings['EXECUTABLES_DIR'] + PATHDELIM + value ) :
eprintf("ERROR:Path for \"%s\" is NOT set properly (or missing) in configuration file \"%s\"\n", key, file)
eprintf("6.Currently it is set to \"%s\"\n", config_settings['METAPATHWAYS_PATH']+ PATHDELIM + config_settings['EXECUTABLES_DIR'] + PATHDELIM + value )
if globalerrorlogger!=None:
globalerrorlogger.write("ERROR\tPath for \"%s\" is NOT set properly (or missing) in configuration file \"%s\"\n" %(key, file) )
globalerrorlogger.write("Currently it is set to \"%s\"\n" %(config_settings['METAPATHWAYS_PATH'] + value))
missingItems.append(key)
continue
stop_execution = False
for item in missingItems:
if item in essentialItems:
eprintf("ERROR\t Essential field in setting %s is missing in configuration file!\n", item)
if globalerrorlogger!=None:
globalerrorlogger.write("ERROR\tEssential field in setting %s is missing in configuration file!\n" %(item))
stop_execution = True
if stop_execution ==True:
eprintf("ERROR: Terminating execution due to missing essential fields in configuration file!\n")
if globalerrorlogger!=None:
globalerrorlogger.write("ERROR\tTerminating execution due to missing essential fields in configuration file!\n")
exit_process()
# This function reads the pipeline configuration file and sets the
# paths to differenc scripts and executables the pipeline call
def read_pipeline_configuration( file, globallogger ):
patternKEYVALUE = re.compile(r'^([^\t\s]+)[\t\s]+\'(.*)\'')
try:
configfile = open(file, 'r')
except IOError:
eprintf("ERROR :Did not find pipeline config %s!\n", file)
globalerrorlogger.write("ERROR\tDid not find pipeline config %s!\n" %(file))
else:
lines = configfile.readlines()
config_settings = {}
for line in lines:
if not re.match("#",line) and len(line.strip()) > 0 :
line = line.strip()
result = patternKEYVALUE.search(line)
try:
if len(result.groups()) == 2:
fields = result.groups()
else:
eprintf(" The following line in your config settings files is not set up yet\n")
eprintf(" Please rerun the pipeline after setting up this line\n")
eprintf(" Error in line : %s\n", line)
globalerrorlogger(
"WARNING\t\n"+\
" The following line in your config settings files is not set up yet\n"+\
" Please rerun the pipeline after setting up this line\n"+\
" Error in line : %s\n" %(line))
exit_process()
except:
eprintf(" The following line in your config settings files is not set up yet\n")
eprintf(" Please rerun the pipeline after setting up this line\n")
eprintf(" Error ine line : %s\n", line)
globalerrorlogger(
"WARNING\t\n"+\
" The following line in your config settings files is not set up yet\n"+\
" Please rerun the pipeline after setting up this line\n"+\
" Error in line : %s\n" %(line))
exit_process()
if PATHDELIM=='\\':
config_settings[fields[0]] = re.sub(r'/',r'\\',fields[1])
else:
config_settings[fields[0]] = re.sub(r'\\','/',fields[1])
config_settings['METAPATHWAYS_PATH'] = config_settings['METAPATHWAYS_PATH'] + PATHDELIM
config_settings['REFDBS'] = config_settings['REFDBS'] + PATHDELIM
check_config_settings(config_settings, file, globallogger);
config_settings['configuration_file'] = file
return config_settings
#check for empty values in parameter settings
def checkMissingParam_values(params, choices, logger = None):
reqdCategoryParams = {
'annotation': {'dbs': False},
'orf_prediction':{},
'rRNA':{},
'metapaths_steps':{}
}
success = True
for category in choices:
for parameter in choices[category]:
if (not params[category][parameter]) and\
( (category in reqdCategoryParams) and\
(parameter in reqdCategoryParams[category]) and reqdCategoryParams[category][parameter]) :
print(category, parameter)
print(reqdCategoryParams)
print(reqdCategoryParams[category])
eprintf('ERROR: Empty parameter %s of type %s\n' %(parameter, category))
eprintf('Please select at least one database for %s\n' %(category))
if logger!=None:
logger.write('ERROR\tEmpty parameter %s of type %s\n' %(parameter, category))
logger.write('Please select at least one database for %s\n' %(category))
success = False
return success
# check if all of the metapaths_steps have
# settings from the valid list [ yes, skip stop, redo]
def checkParam_values(allcategorychoices, parameters, runlogger = None):
for category in allcategorychoices:
for choice in allcategorychoices[category]:
if choice in parameters:
if not parameters[choice] in allcategorychoices[category][choice]:
logger.write('ERROR\tIncorrect setting in your parameter file')
logger.write('for step %s as %s' %(choice, parameters[choices]))
eprintf("ERROR: Incorrect setting in your parameter file" +\
" for step %s as %s", choice, parameters[choices])
exit_process()
def checkMetapathsteps(params, runlogger = None):
choices = { 'metapaths_steps':{}, 'annotation':{}, 'INPUT':{} }
choices['INPUT']['format'] = ['fasta', 'gbk_unannotated', 'gbk_annotated', 'gff_unannotated', 'gff_annotated']
choices['annotation']['algorithm'] = ['last', 'blast']
choices['metapaths_steps']['PREPROCESS_FASTA'] = ['yes', 'skip', 'stop', 'redo']
choices['metapaths_steps']['ORF_PREDICTION'] = ['yes', 'skip', 'stop', 'redo']
choices['metapaths_steps']['GFF_TO_AMINO'] = ['yes', 'skip', 'stop', 'redo']
choices['metapaths_steps']['FILTERED_FASTA'] = ['yes', 'skip', 'stop', 'redo']
choices['metapaths_steps']['COMPUTE_REFSCORE'] = ['yes', 'skip', 'stop', 'redo']
choices['metapaths_steps']['BLAST_REFDB'] = ['yes', 'skip', 'stop', 'redo', 'grid']
choices['metapaths_steps']['PARSE._BLAST'] = ['yes', 'skip', 'stop', 'redo']
choices['metapaths_steps']['SCAN_rRNA'] = ['yes', 'skip', 'stop', 'redo']
choices['metapaths_steps']['STATS_rRNA'] = ['yes', 'skip', 'stop', 'redo']
choices['metapaths_steps']['ANNOTATE'] = ['yes', 'skip', 'stop', 'redo']
choices['metapaths_steps']['PATHOLOGIC_INPUT'] = ['yes', 'skip', 'stop', 'redo']
choices['metapaths_steps']['GENBANK_FILE'] = ['yes', 'skip', 'stop', 'redo']
choices['metapaths_steps']['CREATE_SEQUIN_FILE'] = ['yes', 'skip', 'stop', 'redo']
choices['metapaths_steps']['CREATE_REPORT_FILES'] = ['yes', 'skip', 'stop', 'redo']
choices['metapaths_steps']['SCAN_tRNA'] = ['yes', 'skip', 'stop', 'redo']
choices['metapaths_steps']['MLTREEMAP_CALCULATION'] = ['yes', 'skip', 'stop', 'redo']
choices['metapaths_steps']['MLTREEMAP_IMAGEMAKER'] = ['yes', 'skip', 'stop', 'redo']
choices['metapaths_steps']['PATHOLOGIC'] = ['yes', 'skip', 'stop', 'redo']
if params['metapaths_steps']:
checkParam_values(choices, params['metapaths_steps'], runlogger)
checkparams = {}
checkparams['annotation'] = []
checkparams['annotation'].append('dbs')
if not checkMissingParam_values(params, checkparams, runlogger):
exit_process("Missing parameters")
def copy_fna_faa_gff_orf_prediction( source_files, target_files, config_settings) :
for source, target in zip(source_files, target_files):
sourcefile = open(source, 'r')
targetfile = open(target, 'w')
sourcelines = sourcefile.readlines()
for line in sourcelines:
fprintf(targetfile, "%s\n", line.strip())
sourcefile.close()
targetfile.close()
#################################################################################
########################### BEFORE BLAST ########################################
#################################################################################
def run_metapathways(samplesData, output_dir, all_samples_output_dir, globallogger,\
command_line_params, params, metapaths_config, status_update_callback,\
config_file, run_type, config_settings = None, block_mode = False, runid = ""):
jobcreator = JobCreator(params, config_settings)
sorted_samplesData_keys = sorted(samplesData.keys())
for input_file in sorted_samplesData_keys:
s = samplesData[input_file]
jobcreator.addJobs(s, block_mode = block_mode)
_params = Singleton(Params)(params)
if block_mode:
eprintf("============== RUNNING STEPS IN BLOCK 0 ================\n")
for input_file in sorted_samplesData_keys:
s = samplesData[input_file]
s.stepslogger.printf("\n\n============== BEGIN RUN " + s.sample_name + " " + runid + " BLOCK0 ================\n")
sample_name_banner = "PROCESSING INPUT " + input_file
eprintf('\n'+ '#'*len(sample_name_banner) + "\n")
eprintf( '\n' + sample_name_banner + ' [STEPS BLOCK 0] ' + '\n')
s.writeParamsToRunLogs(_params)
try:
execute_tasks(s, verbose = command_line_params['verbose'], block = 0)
except:
print(traceback.print_exc(10))
pass
for input_file in sorted_samplesData_keys:
s = samplesData[input_file]
s.stepslogger.printf("\n\n============== BEGIN RUN " + s.sample_name + " " + runid + " BLOCK1 ================\n")
sample_name_banner = "PROCESSING INPUT " + input_file
eprintf('\n' + '#'*len(sample_name_banner) + "\n")
eprintf( '\n' + sample_name_banner + ' [STEPS BLOCK 1] ' + '\n')
try:
execute_tasks(s, verbose = command_line_params['verbose'], block = 1)
except:
pass
for input_file in sorted_samplesData_keys:
s = samplesData[input_file]
s.stepslogger.printf("\n\n============== BEGIN RUN " + s.sample_name + " " + runid + " BLOCK2 ================\n")
sample_name_banner = "PROCESSING INPUT " + input_file
eprintf('\n' + '#'*len(sample_name_banner) + "\n")
eprintf( '\n' + sample_name_banner + ' [STEPS BLOCK 2] ' + '\n')
try:
execute_tasks(s, verbose = command_line_params['verbose'], block = 2)
except:
pass
else:
for input_file in sorted_samplesData_keys:
s = samplesData[input_file]
s.stepslogger.printf("\n\n============== BEGIN RUN " + s.sample_name + " " + runid + " ==================\n")
sample_name_banner = "PROCESSING INPUT " + input_file
eprintf('#'*len(sample_name_banner) + "\n")
eprintf( '\n' + sample_name_banner + '\n')
try:
execute_tasks(s, verbose = command_line_params['verbose'], block = 0)
except:
pass
try:
execute_tasks(s, verbose = command_line_params['verbose'], block = 1)
except:
pass
try:
execute_tasks(s, verbose = command_line_params['verbose'], block = 2)
except:
pass
return
|
kishori82/MetaPathways_Python.3.0
|
libs/python_modules/pipeline/metapathways.py
|
Python
|
mit
| 26,328
|
[
"BLAST"
] |
f867fc8d96ddb0f77cdda04e1990a9077c7ef8571cd7be5b94c22f6b9c4fe607
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import numpy as np
#### Utility functions for conversion to vtkUnstructuredGrid.
def _get_cell_faces(G, cell_idx):
# MRST equivalent: G.cells.faces(facePos(i) : facePos(i+1)-1, :)
return G.cells.faces[G.cells.facePos[cell_idx]:G.cells.facePos[cell_idx+1],:]
def _get_cell_faces_num(G, cell_idx):
"""Get number of faces a certain cell has."""
return G.cells.facePos[cell_idx+1] - G.cells.facePos[cell_idx]
def _get_cell_nodes(G, cell_idx):
"""Get array of nodes for a cell."""
# To get face nodes in MRST:
# (The second column in cells_faces are cell indices)
# >> G.faces.nodes(G.faces.nodePos(cell_idx) : G.faces.nodePos(cell_idx+1)-1, 1)
cell_faces = G.cells.faces[G.cells.facePos[cell_idx,0]:G.cells.facePos[cell_idx+1,0],0]
# The iterator returns nodes for every cell face.
# The union of these nodes contain the unique nodes for the cell.
# = face1_nodes U face2_nodes U ...
return reduce(
np.union1d,
(G.faces.nodes[
G.faces.nodePos[face_idx,0]:G.faces.nodePos[face_idx+1,0]
] for face_idx in cell_faces)
)
def _get_cells_faces_num(G):
"""Get number of faces for all cells."""
return np.diff(G.cells.facePos, axis=0)
def _get_face_nodes(G, face_idx):
"""Get nodes of a certain face."""
return G.faces.nodes[G.faces.nodePos[face_idx,0] : G.faces.nodePos[face_idx+1,0]]
def _get_face_nodes_num(G, face_idx):
"""Get number of nodes of a certain face."""
return G.faces.nodePos[face_idx+1,0] - G.faces.nodePos[face_idx,0]
def createVtkUnstructuredGrid(G):
"""
Creates a tvtk.UnstructuredGrid object from a PRST grid.
Synopsis:
vtkGrid = createVtkUnstructuredGrid(G)
This is currently only used for plotting purposes.
"""
try:
from tvtk.api import tvtk
from mayavi import mlab
from mayavi.sources.vtk_data_source import VTKDataSource
except ImportError as e:
prst.log.error("Couldn't import", e)
if G.gridDim == 2:
raise NotImplementedError("Only 3d for now")
# Initialize grid object with point coordinates
ug = tvtk.UnstructuredGrid(points=G.nodes.coords)
# We are now going to add cells as VTK polyhedrons, one by one
# using the insert_next_cell method of the `ug` object.
# The first argument is the shape type. In this case we will use the most
# general VTK type, the polyhedron, where every cell can have a variable
# amount of faces.
poly_type = tvtk.Polyhedron().cell_type
# The second argument to insert_next_cell is a list in the following format:
# (num_cell_faces, num_face0_pts, id1, id2, id3, num_face1_pts,id_1, id2,
# id3, ...).
# First, then number of faces.
# Then, for each face: The number of nodes for this face, and the nodes of the faces.
# Loop through cells
for cell_idx in range(G.cells.num):
pt_ids = []
pt_ids.append(_get_cell_faces_num(G, cell_idx))
# Loop through faces for this cell
for face_idx in np.nditer(_get_cell_faces(G, cell_idx)[:,0]):
pt_ids.append(_get_face_nodes_num(G, face_idx))
pt_ids += list(_get_face_nodes(G, face_idx))
ug.insert_next_cell(poly_type, pt_ids)
return ug
def plotGrid(G, cell_data=None, bgcolor=(0.5,0.5,0.5), size=(400,300),
show_edges=True, mlab_figure=True, mlab_show=True,
colorbar=True, colorbar_kwargs=None):
"""Plot grid in MayaVi.
Synopsis:
plotGrid(G)
plotGrid(G, cell_data)
Arguments:
G:
PRST grid object
cell_data (Optional[ndarray]):
Array of shape (G.cells.num,) containing one scalar value for each
cell.
bgcolor (Optional[3-tuple]):
Background color of figure as a tuple of 3 float numbers. Default
is grey (0.5, 0.5, 0.5). Useful for creating figures with a white
background.
size (Optional[2-tuple]):
Figure size. Default is (400,300) pixels.
show_edges (Optional[bool]):
Show cell edges. Default is True.
mlab_figure (Optional[bool]):
Whether a new figure is created. Default is True: A new figure is
created when this function is called. If False, the grid is plotted
in a previous figure. If a previous figure does not exist, one is
created.
mlab_show (Optional[bool]):
Whether or not to call mlab.show() to display the figure.
If mlab_show=False the figure can be modified after plotting.
colorbar (Optional[bool]):
Whether or not to show a colorbar.
colorbar_kwargs (Optional[dict]):
Keyword arguments passed on to mlab.colorbar(). For example, to
orient the colorbar vertically, let
colorbar_kwargs={'orientation':'vertical'}.
Default is {} (no arguments).
Returns: None
If more advanced data transformation is necessary, it is recommended to
create a custom plotting function utilizing
prst.plotting.createVtkUnstructuredGrid manually. See the source code of
this function.
Technical note: VtkUnstructuredGrid does not support face data. Only cell
data and point data. This function can only display cell data for now, but
point data should be easy to implement. Data scaling (e.g., scaling z-axis)
is not supported in MayaVi either, but is possible to mimic using a custom
plotting function and the extent=(0,1,0,1,0,1) parameter of
mlab.pipeline.surface.
"""
try:
from tvtk.api import tvtk
from mayavi import mlab
from mayavi.sources.vtk_data_source import VTKDataSource
except ImportError as e:
prst.log.error("Couldn't import", e)
ug = createVtkUnstructuredGrid(G)
if not cell_data is None:
ug.cell_data.scalars = cell_data
ug.cell_data.scalars.name = "Cell values"
vtkSrc = VTKDataSource(data=ug)
if mlab_figure:
mlab.figure(bgcolor=bgcolor, size=size)
if size != (400,300):
prst.warning("Custom size has no effect for mlab_figure=False")
dataset = mlab.pipeline.add_dataset(vtkSrc, name="PRST cell data")
# Yellow surface with translucent black wireframe.
# VTK takes care of surface extraction, unlike in MRST where this is done
# manually. The downside is performance, since the whole grid is converted.
# On the other hand, this makes it possible to use various VTK filters to
# transform the data.
if cell_data is None:
# MRST yellow
mlab.pipeline.surface(dataset, opacity=1., color=(1,1,0))
else:
# Display using default colormap
mlab.pipeline.surface(dataset, opacity=1.)
if show_edges:
mlab.pipeline.surface(mlab.pipeline.extract_edges(vtkSrc), color=(0,0,0), opacity=0.3)
if colorbar:
if colorbar_kwargs is None:
colorbar_kwargs = {}
mlab.colorbar(**colorbar_kwargs)
if mlab_show:
mlab.show()
def plotCellData(G, cell_data, **kwargs):
"""See plotGrid."""
return plotGrid(G, cell_data, **kwargs)
|
roessland/PRST
|
prst/plotting.py
|
Python
|
gpl-3.0
| 7,321
|
[
"Mayavi",
"VTK"
] |
8fed2bba2b91160548253fb1219605d98b0ec62c40760423949a58867e3b7a67
|
from os import path, mkdir,curdir
from ..external import Structure
from ..core import Workflow,IOTask
from numpy import int as np_int
from numpy import array as np_array
from numpy import linalg as np_linalg
from numpy import ones as np_ones
__all__ = ['KKflow']
class KKflow(Workflow,IOTask):
def __init__(self,**kwargs):
"""
keyword arguments:
structure : pymatgen.Structure
Structure object containing information on the unit cell.
IBZ : executable
prefix : str, prefix for calculation
dirname : str, directory name
kgrid_response : int, array(3), k-point grid for response
"""
super(KKflow, self).__init__(**kwargs)
self.structure = kwargs['structure']
self.prefix = kwargs['prefix']
self.dirname = kwargs.pop('dirname','KK')
self.kgrid_response = kwargs['kgrid_response']
self.kgrid="{}x{}x{}".format(self.kgrid_response[0],self.kgrid_response[1],self.kgrid_response[2])
self.ibz=kwargs.pop('IBZ','ibz')
# --- Write run.sh file ---
# Define variables:
self.runscript.variables={
'IBZ' : self.ibz}
#
# Copy files:
#
self.update_link(self.pvectors_fname,'pvectors')
self.update_link(self.symd_fname,'sym.d')
# Load modules in run script:
if ( 'modules' in kwargs):
self.runscript.append(kwargs['modules'])
#
# Extra lines:
#
self.runscript.append("#Executable")
self.runscript.append("$IBZ -abinit -tetrahedra -cartesian -symmetries -reduced -mesh\n")
self.runscript.append("#Rename output files:")
self.runscript.append("mv kpoints.reciprocal {0}".format(self.kreciprocal_fname))
self.runscript.append("mv kpoints.cartesian {0}".format(self.kcartesian_fname))
self.runscript.append("mv tetrahedra {0}".format(self.tetrahedra_fname))
self.runscript.append("mv Symmetries.Cartesian {0}".format(self.symmetries_fname))
# self.runscript.append("cd ..")
# self.runscript.append("rm -rf TMP/")
def write(self):
""" Makes KK directory.
This contains symmetries and lattice parameters"""
super(IOTask, self).write()
self.get_syms()
self.write_grid()
@property
def tetrahedra_fname(self):
original = path.realpath(curdir)
tetrahedra_fname='symmetries/tetrahedra_{0}'.format(self.kgrid)
return path.join(original, tetrahedra_fname)
@property
def symmetries_fname(self):
original = path.realpath(curdir)
symmetries_fname='symmetries/Symmetries.Cartesian_{0}'.format(self.kgrid)
return path.join(original, symmetries_fname)
@property
def pvectors_fname(self):
original = path.realpath(curdir)
pvectors_fname='symmetries/pvectors'
return path.join(original, pvectors_fname)
@property
def symd_fname(self):
original = path.realpath(curdir)
symd_fname='symmetries/symd'
return path.join(original, symd_fname)
@property
def kreciprocal_fname(self):
original = path.realpath(curdir)
kreciprocal_fname='{0}.klist_{1}'.format(self.prefix,self.kgrid)
return path.join(original, kreciprocal_fname)
@property
def kcartesian_fname(self):
original = path.realpath(curdir)
kcartesian_fname='symmetries/{0}.kcartesian_{1}'.format(self.prefix,self.kgrid)
return path.join(original, kcartesian_fname)
def get_syms(self):
""" Gets symmetries with Pymatgen"""
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
# Gets symmetries with pymatgen:
symprec=0.1 # symmetry tolerance for the Spacegroup Analyzer
#used to generate the symmetry operations
sga = SpacegroupAnalyzer(self.structure, symprec)
# ([SymmOp]): List of symmetry operations.
SymmOp=sga.get_symmetry_operations()
nsym=len(SymmOp)
# Symmetries directory:
# dirname=path.dirname(path.abspath(__file__))
dirname=path.realpath(curdir)
newdir="symmetries"
SYMdir=path.join(dirname,newdir)
if not path.exists(SYMdir):
mkdir(SYMdir)
# symmetries/sym.d file:
#self.symd_fname=SYMdir+"/sym.d"
f=open(self.symd_fname,"w")
f.write("%i\n" % (nsym))
for isym in range(0,nsym):
symrel=np_array(SymmOp[isym].rotation_matrix) #rotations
# Transpose all symmetry matrices
symrel = np_linalg.inv(symrel.transpose())
symrel = np_array(symrel,np_int)
#translation=SymmOp[isym].translation_vector
f.write(" ".join(map(str, symrel[0][:]))+" ")
f.write(" ".join(map(str, symrel[1][:]))+" ")
f.write(" ".join(map(str, symrel[2][:]))+"\n")
f.close()
# Get lattice parameters
lattice=self.structure.lattice
rprim=lattice
ang2bohr=1.88972613
acell=np_ones(3)*ang2bohr
# Write pvectors file
# symmetries/pvectors file:
# self.pvectors_fname=SYMdir+"/pvectors"
f=open(self.pvectors_fname,"w")
f.write(str(lattice)+"\n")
f.write(" ".join(map(str, acell[:]))+"\n")
f.close()
def write_grid(self):
""" Writes KK/grid file to define the Tetrahedra k-point grid """
#Write KK/grid file
filename=self.dirname+"/grid"
f=open(filename,"w")
f.write(" ".join(map(str, self.kgrid_response[:]))+"\n")
f.close()
#PENDING: need to get rid of this file:
#Write KK/fort.83
#0 if 'odd_rank'
#1 if system was rendered non-centrosymmetric via odd_rank.sh
#2 normal case
filename=self.dirname+"/fort.83"
f=open(filename,"w")
f.write("2 \n")
f.close()
|
trangel/OPTpy
|
OPTpy/utils/kk.py
|
Python
|
gpl-3.0
| 5,948
|
[
"ABINIT",
"pymatgen"
] |
7a99d1516340fc6df76ef51f7bef82d80c54f9b605e039d6f6cb49bcc7f80568
|
"""Module containing CMSAF CLAAS v2 FileHandler."""
import datetime
import pyresample.geometry
from .netcdf_utils import NetCDF4FileHandler
class CLAAS2(NetCDF4FileHandler):
"""Handle CMSAF CLAAS-2 files."""
def __init__(self, *args, **kwargs):
"""Initialise class."""
super().__init__(*args, **kwargs, cache_handle=False,
auto_maskandscale=True)
@property
def start_time(self):
"""Get start time from file."""
# datetime module can't handle timezone identifier
return datetime.datetime.fromisoformat(
self["/attr/time_coverage_start"].rstrip("Z"))
@property
def end_time(self):
"""Get end time from file."""
return datetime.datetime.fromisoformat(
self["/attr/time_coverage_end"].rstrip("Z"))
def available_datasets(self, configured_datasets=None):
"""Yield a collection of available datasets.
Return a generator that will yield the datasets available in the loaded
files. See docstring in parent class for specification details.
"""
# this method should work for any (CF-conform) NetCDF file, should it
# be somewhere more generically available? Perhaps in the
# `NetCDF4FileHandler`?
yield from super().available_datasets(configured_datasets)
data_vars = [k for k in self.file_content
if k + "/dimensions" in self.file_content]
for k in data_vars:
# if it doesn't have a y-dimension we're not interested
if "y" not in self.file_content[k + "/dimensions"]:
continue
ds_info = self._get_dsinfo(k)
yield (True, ds_info)
def _get_dsinfo(self, var):
"""Get metadata for variable.
Return metadata dictionary for variable ``var``.
"""
ds_info = {"name": var,
"file_type": self.filetype_info["file_type"]}
# attributes for this data variable
attrs = {k[len(f"{k:s}/attr")+1]: v
for (k, v) in self.file_content.items()
if k.startswith(f"{k:s}/attr")}
# we don't need "special" attributes in our metadata here
for unkey in {"_FillValue", "add_offset", "scale_factor"}:
attrs.pop(unkey, None)
return ds_info
def get_dataset(self, dataset_id, info):
"""Get the dataset."""
ds = self[dataset_id['name']]
if "time" in ds.dims:
return ds.squeeze(["time"])
return ds
def get_area_def(self, dataset_id):
"""Get the area definition."""
return pyresample.geometry.AreaDefinition(
"some_area_name",
"on-the-fly area",
"geos",
self["/attr/CMSAF_proj4_params"],
self["/dimension/x"],
self["/dimension/y"],
self["/attr/CMSAF_area_extent"])
|
pytroll/satpy
|
satpy/readers/cmsaf_claas2.py
|
Python
|
gpl-3.0
| 2,965
|
[
"NetCDF"
] |
e63fec3ba3d878e5dba22a82b13983a0e11ea3cd121a0d6bdad4b1c96d74fe90
|
#
# Copyright (c) 2016 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import codecs
from collections import OrderedDict
from operator import itemgetter
from os.path import abspath
from os.path import basename
from os.path import dirname
from os.path import exists
from os.path import expanduser
from os.path import isfile
from os.path import join
import simplejson as json
from commoncode import fileutils
"""
Format scans outputs.
"""
def get_template(templates_dir, template_name='template.html'): # @ReservedAssignment
"""
Given a template directory, load and return the template file in the template_name
file found in that directory.
"""
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader(templates_dir))
template = env.get_template(template_name)
return template
def get_template_dir(format): # @ReservedAssignment
"""
Given a format string return the corresponding standard template directory.
"""
return join(dirname(__file__), 'templates', format)
def as_html_app(scanned_path, output_file):
"""
Return an HTML string built from a list of results and the html-app template.
"""
template = get_template(get_template_dir('html-app'))
_, assets_dir = get_html_app_files_dirs(output_file)
return template.render(assets_dir=assets_dir, scanned_path=scanned_path)
def get_html_app_help(output_filename):
"""
Return an HTML string containing html-app help page with a reference back
to the main app
"""
template = get_template(get_template_dir('html-app'), template_name='help_template.html')
return template.render(main_app=output_filename)
class HtmlAppAssetCopyWarning(Exception):
pass
class HtmlAppAssetCopyError(Exception):
pass
def is_stdout(output_file):
return output_file.name == '<stdout>'
def get_html_app_files_dirs(output_file):
"""
Return a tuple of (parent_dir, dir_name) directory named after the
`output_file` file object file_base_name (stripped from extension) and a
`_files` suffix Return empty strings if output is to stdout.
"""
if is_stdout(output_file):
return '', ''
file_name = output_file.name
parent_dir = dirname(file_name)
dir_name = fileutils.file_base_name(file_name) + '_files'
return parent_dir, dir_name
def create_html_app_assets(results, output_file):
"""
Given an html-app output_file, create the corresponding `_files` directory
and copy the assets to this directory. The target directory is deleted if it
exists.
Raise HtmlAppAssetCopyWarning if the output_file is <stdout> or
HtmlAppAssetCopyError if the copy was not possible.
"""
try:
if is_stdout(output_file):
raise HtmlAppAssetCopyWarning()
assets_dir = join(get_template_dir('html-app'), 'assets')
# delete old assets
tgt_dirs = get_html_app_files_dirs(output_file)
target_dir = join(*tgt_dirs)
if exists(target_dir):
fileutils.delete(target_dir)
# copy assets
fileutils.copytree(assets_dir, target_dir)
# write json data
root_path, assets_dir = get_html_app_files_dirs(output_file)
with codecs.open(join(root_path, assets_dir, 'data.json'), 'wb', encoding='utf-8') as f:
f.write('data=')
json.dump(results, f, iterable_as_array=True)
# create help file
with codecs.open(join(root_path, assets_dir, 'help.html'), 'wb', encoding='utf-8') as f:
f.write(get_html_app_help(basename(output_file.name)))
except HtmlAppAssetCopyWarning, w:
raise w
except Exception, e:
raise HtmlAppAssetCopyError(e)
def as_template(scanned_files, template='html'):
"""
Return an string built from a list of results and the provided template.
The template defaults to the standard HTML template format or can point to
the path of a custom template file.
"""
from licensedcode.models import get_licenses
if template == 'html':
template = get_template(get_template_dir('html'))
else:
# load a custom template
tpath = fileutils.as_posixpath(abspath(expanduser(template)))
assert isfile(tpath)
tdir = fileutils.parent_directory(tpath)
tfile = fileutils.file_name(tpath)
template = get_template(tdir, tfile)
converted = OrderedDict()
converted_infos = OrderedDict()
converted_packages = OrderedDict()
licenses = {}
LICENSES = 'licenses'
COPYRIGHTS = 'copyrights'
PACKAGES = 'packages'
URLS = 'urls'
EMAILS = 'emails'
# Create a flattened data dict keyed by path
for scanned_file in scanned_files:
path = scanned_file['path']
results = []
if COPYRIGHTS in scanned_file:
for entry in scanned_file[COPYRIGHTS]:
results.append({
'start': entry['start_line'],
'end': entry['end_line'],
'what': 'copyright',
# NOTE: we display one statement per line.
'value': '\n'.join(entry['statements']),
})
if LICENSES in scanned_file:
for entry in scanned_file[LICENSES]:
results.append({
'start': entry['start_line'],
'end': entry['end_line'],
'what': 'license',
'value': entry['key'],
})
if entry['key'] not in licenses:
licenses[entry['key']] = entry
entry['object'] = get_licenses().get(entry['key'])
if results:
converted[path] = sorted(results, key=itemgetter('start'))
# this is klunky: we need to drop templates entirely
converted_infos[path] = OrderedDict()
for name, value in scanned_file.items():
if name in (LICENSES, PACKAGES, COPYRIGHTS, EMAILS, URLS):
continue
converted_infos[path][name] = value
if PACKAGES in scanned_file:
converted_packages[path] = scanned_file[PACKAGES]
licenses = OrderedDict(sorted(licenses.items()))
files = {
'license_copyright': converted,
'infos': converted_infos,
'packages': converted_packages
}
return template.generate(files=files, licenses=licenses)
|
yasharmaster/scancode-toolkit
|
src/formattedcode/format.py
|
Python
|
apache-2.0
| 7,852
|
[
"VisIt"
] |
b5bf9265a07087b8963edc7806fb5fc010c4d936d90bab0ebe6c6921fbc69c9a
|
"""
Plot the first two TICS with ensembler models.
"""
#Note ensembler models are still after implicit refine.
import matplotlib
matplotlib.use('Agg')
from msmbuilder import example_datasets, cluster, msm, featurizer, lumping, utils, dataset, decomposition
from sklearn.pipeline import make_pipeline
import numpy as np
import matplotlib.pyplot as plt
import mdtraj as md
tica_lagtime = 1600
dih = dataset.NumpyDirDataset("./dihedrals/")
X = dataset.dataset("./tica%d.h5" % tica_lagtime)
Xf = np.concatenate(X)
tica_model = utils.load("./tica%d.pkl" % tica_lagtime)
#Load trajectory with ensembler models
t_models = md.load("../ensembler-models/traj-refine_implicit_md.xtc", top = "../ensembler-models/topol-renumbered-implicit.pdb")
#Now make dihedrals of this.
dihedrals_models = featurizer.DihedralFeaturizer(types=["phi", "psi", "chi1", "chi2"]).transform([t_models])
x_models = tica_model.transform(dihedrals_models)
#Now plot on the slow MSM features found before.
plt.plot(x_models[0][:, 0], x_models[0][:, 1], 'o', markersize=5, label="ensembler models", color='white')
plt.title("Dihedral tICA Analysis - Abl")
plt.xlabel("Slowest Coordinate")
plt.ylabel("Second Slowest Coordinate")
plt.legend()
plt.hexbin(Xf[:, 0], Xf[:, 1], bins='log')
plt.savefig('fig_abl_ensembler.png')
|
hainm/MSMs
|
initial_ipynbs/do_gmm_4src_n_abl/make_ensembler_fig.py
|
Python
|
gpl-2.0
| 1,294
|
[
"MDTraj"
] |
c21afdb722113e42eaea62778082d20e6399e9da3efa4c7cb2f8edbe60ff9f71
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from os.path import join as joinp
from nose import tools as nt
from neurom.core.population import Population
from neurom import load_neuron
_path = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = joinp(_path, '../../../test_data')
NRN1 = load_neuron(joinp(DATA_PATH, 'swc/Neuron.swc'))
NRN2 = load_neuron(joinp(DATA_PATH, 'swc/Single_basal.swc'))
NRN3 = load_neuron(joinp(DATA_PATH, 'swc/Neuron_small_radius.swc'))
NEURONS = [NRN1, NRN2, NRN3]
TOT_NEURITES = sum(len(N.neurites) for N in NEURONS)
POP = Population(NEURONS, name='foo')
def test_population():
nt.assert_equal(len(POP.neurons), 3)
nt.ok_(POP.neurons[0].name, 'Neuron')
nt.ok_(POP.neurons[1].name, 'Single_basal')
nt.ok_(POP.neurons[2].name, 'Neuron_small_radius')
nt.assert_equal(len(POP.somata), 3)
nt.assert_equal(len(POP.neurites), TOT_NEURITES)
nt.assert_equal(POP.name, 'foo')
def test_neurons():
for i, n in enumerate(NEURONS):
nt.assert_true(n is POP.neurons[i])
def test_iterate_neurons():
for a, b in zip(NEURONS, POP):
nt.assert_true(a is b)
def test_len():
nt.assert_equal(len(POP), len(NEURONS))
def test_getitem():
for i in range(len(NEURONS)):
nt.assert_true(POP[i] is NEURONS[i])
def test_str():
nt.ok_('Population' in str(POP))
|
liesbethvanherpe/NeuroM
|
neurom/core/tests/test_population.py
|
Python
|
bsd-3-clause
| 3,010
|
[
"NEURON"
] |
ec62896fa1a8c96eee1f2dc38299cb2f4bee09f5bfbe1a349fe344e51f7a2c9b
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from bigdl.orca.test_zoo_utils import ZooTestCase
from bigdl.orca.tfpark import TFNet, TFDataset
from bigdl.dllib.utils.common import *
np.random.seed(1337) # for reproducibility
class TestTF(ZooTestCase):
resource_path = os.path.join(os.path.split(__file__)[0], "../resources")
def test_init_tf_net(self):
tfnet_path = os.path.join(TestTF.resource_path, "tfnet")
net = TFNet.from_export_folder(tfnet_path)
output = net.forward(np.random.rand(2, 4))
assert output.shape == (2, 2)
def test_for_scalar(self):
import tensorflow as tf
with tf.Graph().as_default():
input1 = tf.placeholder(dtype=tf.float32, shape=())
output = input1 + 1
sess = tf.Session()
net = TFNet.from_session(sess, [input1], [output])
sess.close()
out_value = net.forward(np.array(1.0))
assert len(out_value.shape) == 0
# the following test would fail on bigdl 0.6.0 due to a bug in bigdl,
# comment it out for now
# out_value = net.predict(np.array([1.0])).first()
# assert len(out_value.shape) == 0
def test_init_tfnet_from_session(self):
import tensorflow as tf
with tf.Graph().as_default():
input1 = tf.placeholder(dtype=tf.float32, shape=(None, 2))
label1 = tf.placeholder(dtype=tf.float32, shape=(None, 1))
hidden = tf.layers.dense(input1, 4)
output = tf.layers.dense(hidden, 1)
loss = tf.reduce_mean(tf.square(output - label1))
grad_inputs = tf.gradients(loss, input1)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
data = np.random.rand(2, 2)
output_value_ref = sess.run(output, feed_dict={input1: data})
label_value = output_value_ref - 1.0
grad_input_value_ref = sess.run(grad_inputs[0],
feed_dict={input1: data,
label1: label_value})
net = TFNet.from_session(sess, [input1], [output], generate_backward=True)
output_value = net.forward(data)
grad_input_value = net.backward(data, np.ones(shape=(2, 1)))
self.assert_allclose(output_value, output_value_ref)
self.assert_allclose(grad_input_value, grad_input_value_ref)
def test_init_tfnet_from_saved_model(self):
model_path = os.path.join(TestTF.resource_path, "saved-model-resource")
tfnet = TFNet.from_saved_model(model_path, inputs=["flatten_input:0"],
outputs=["dense_2/Softmax:0"])
result = tfnet.predict(np.ones(dtype=np.float32, shape=(20, 28, 28, 1)))
result.collect()
def test_tf_net_predict(self):
tfnet_path = os.path.join(TestTF.resource_path, "tfnet")
import tensorflow as tf
tf_session_config = tf.ConfigProto(inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
net = TFNet.from_export_folder(tfnet_path, tf_session_config=tf_session_config)
output = net.predict(np.random.rand(16, 4), batch_per_thread=5, distributed=False)
assert output.shape == (16, 2)
def test_tf_net_predict_dataset(self):
tfnet_path = os.path.join(TestTF.resource_path, "tfnet")
net = TFNet.from_export_folder(tfnet_path)
dataset = TFDataset.from_ndarrays((np.random.rand(16, 4),))
output = net.predict(dataset)
output = np.stack(output.collect())
assert output.shape == (16, 2)
if __name__ == "__main__":
pytest.main([__file__])
|
intel-analytics/BigDL
|
python/orca/test/bigdl/orca/tfpark/test_tfnet.py
|
Python
|
apache-2.0
| 4,335
|
[
"ORCA"
] |
00a700b304552515873f9289376925a8e6f51aee2b5aa09379a590c0e4778f35
|
from collections import OrderedDict
from datetime import datetime, timedelta
from itertools import product
import warnings
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.compat import range, zip
from pandas.errors import UnsupportedFunctionCall
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame, Index, Series, Timestamp, bdate_range, concat, isna, notna)
from pandas.core.base import SpecificationError
from pandas.core.sorting import safe_sort
import pandas.core.window as rwindow
import pandas.util.testing as tm
import pandas.tseries.offsets as offsets
N, K = 100, 10
def assert_equal(left, right):
if isinstance(left, Series):
tm.assert_series_equal(left, right)
else:
tm.assert_frame_equal(left, right)
@pytest.fixture(params=[True, False])
def raw(request):
return request.param
@pytest.fixture(params=['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann'])
def win_types(request):
return request.param
@pytest.fixture(params=['kaiser', 'gaussian', 'general_gaussian'])
def win_types_special(request):
return request.param
class Base(object):
_nan_locs = np.arange(20, 40)
_inf_locs = np.array([])
def _create_data(self):
arr = randn(N)
arr[self._nan_locs] = np.NaN
self.arr = arr
self.rng = bdate_range(datetime(2009, 1, 1), periods=N)
self.series = Series(arr.copy(), index=self.rng)
self.frame = DataFrame(randn(N, K), index=self.rng,
columns=np.arange(K))
class TestApi(Base):
def setup_method(self, method):
self._create_data()
def test_getitem(self):
r = self.frame.rolling(window=5)
tm.assert_index_equal(r._selected_obj.columns, self.frame.columns)
r = self.frame.rolling(window=5)[1]
assert r._selected_obj.name == self.frame.columns[1]
# technically this is allowed
r = self.frame.rolling(window=5)[1, 3]
tm.assert_index_equal(r._selected_obj.columns,
self.frame.columns[[1, 3]])
r = self.frame.rolling(window=5)[[1, 3]]
tm.assert_index_equal(r._selected_obj.columns,
self.frame.columns[[1, 3]])
def test_select_bad_cols(self):
df = DataFrame([[1, 2]], columns=['A', 'B'])
g = df.rolling(window=5)
pytest.raises(KeyError, g.__getitem__, ['C']) # g[['C']]
pytest.raises(KeyError, g.__getitem__, ['A', 'C']) # g[['A', 'C']]
with pytest.raises(KeyError, match='^[^A]+$'):
# A should not be referenced as a bad column...
# will have to rethink regex if you change message!
g[['A', 'C']]
def test_attribute_access(self):
df = DataFrame([[1, 2]], columns=['A', 'B'])
r = df.rolling(window=5)
tm.assert_series_equal(r.A.sum(), r['A'].sum())
pytest.raises(AttributeError, lambda: r.F)
def tests_skip_nuisance(self):
df = DataFrame({'A': range(5), 'B': range(5, 10), 'C': 'foo'})
r = df.rolling(window=3)
result = r[['A', 'B']].sum()
expected = DataFrame({'A': [np.nan, np.nan, 3, 6, 9],
'B': [np.nan, np.nan, 18, 21, 24]},
columns=list('AB'))
tm.assert_frame_equal(result, expected)
def test_skip_sum_object_raises(self):
df = DataFrame({'A': range(5), 'B': range(5, 10), 'C': 'foo'})
r = df.rolling(window=3)
with pytest.raises(TypeError, match='cannot handle this type'):
r.sum()
def test_agg(self):
df = DataFrame({'A': range(5), 'B': range(0, 10, 2)})
r = df.rolling(window=3)
a_mean = r['A'].mean()
a_std = r['A'].std()
a_sum = r['A'].sum()
b_mean = r['B'].mean()
b_std = r['B'].std()
b_sum = r['B'].sum()
result = r.aggregate([np.mean, np.std])
expected = concat([a_mean, a_std, b_mean, b_std], axis=1)
expected.columns = pd.MultiIndex.from_product([['A', 'B'], ['mean',
'std']])
tm.assert_frame_equal(result, expected)
result = r.aggregate({'A': np.mean, 'B': np.std})
expected = concat([a_mean, b_std], axis=1)
tm.assert_frame_equal(result, expected, check_like=True)
result = r.aggregate({'A': ['mean', 'std']})
expected = concat([a_mean, a_std], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'), ('A',
'std')])
tm.assert_frame_equal(result, expected)
result = r['A'].aggregate(['mean', 'sum'])
expected = concat([a_mean, a_sum], axis=1)
expected.columns = ['mean', 'sum']
tm.assert_frame_equal(result, expected)
with catch_warnings(record=True):
# using a dict with renaming
warnings.simplefilter("ignore", FutureWarning)
result = r.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}})
expected = concat([a_mean, a_sum], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'sum')])
tm.assert_frame_equal(result, expected, check_like=True)
with catch_warnings(record=True):
warnings.simplefilter("ignore", FutureWarning)
result = r.aggregate({'A': {'mean': 'mean',
'sum': 'sum'},
'B': {'mean2': 'mean',
'sum2': 'sum'}})
expected = concat([a_mean, a_sum, b_mean, b_sum], axis=1)
exp_cols = [('A', 'mean'), ('A', 'sum'), ('B', 'mean2'), ('B', 'sum2')]
expected.columns = pd.MultiIndex.from_tuples(exp_cols)
tm.assert_frame_equal(result, expected, check_like=True)
result = r.aggregate({'A': ['mean', 'std'], 'B': ['mean', 'std']})
expected = concat([a_mean, a_std, b_mean, b_std], axis=1)
exp_cols = [('A', 'mean'), ('A', 'std'), ('B', 'mean'), ('B', 'std')]
expected.columns = pd.MultiIndex.from_tuples(exp_cols)
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_apply(self, raw):
# passed lambda
df = DataFrame({'A': range(5), 'B': range(0, 10, 2)})
r = df.rolling(window=3)
a_sum = r['A'].sum()
result = r.agg({'A': np.sum, 'B': lambda x: np.std(x, ddof=1)})
rcustom = r['B'].apply(lambda x: np.std(x, ddof=1), raw=raw)
expected = concat([a_sum, rcustom], axis=1)
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_consistency(self):
df = DataFrame({'A': range(5), 'B': range(0, 10, 2)})
r = df.rolling(window=3)
result = r.agg([np.sum, np.mean]).columns
expected = pd.MultiIndex.from_product([list('AB'), ['sum', 'mean']])
tm.assert_index_equal(result, expected)
result = r['A'].agg([np.sum, np.mean]).columns
expected = Index(['sum', 'mean'])
tm.assert_index_equal(result, expected)
result = r.agg({'A': [np.sum, np.mean]}).columns
expected = pd.MultiIndex.from_tuples([('A', 'sum'), ('A', 'mean')])
tm.assert_index_equal(result, expected)
def test_agg_nested_dicts(self):
# API change for disallowing these types of nested dicts
df = DataFrame({'A': range(5), 'B': range(0, 10, 2)})
r = df.rolling(window=3)
def f():
r.aggregate({'r1': {'A': ['mean', 'sum']},
'r2': {'B': ['mean', 'sum']}})
pytest.raises(SpecificationError, f)
expected = concat([r['A'].mean(), r['A'].std(),
r['B'].mean(), r['B'].std()], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), (
'ra', 'std'), ('rb', 'mean'), ('rb', 'std')])
with catch_warnings(record=True):
warnings.simplefilter("ignore", FutureWarning)
result = r[['A', 'B']].agg({'A': {'ra': ['mean', 'std']},
'B': {'rb': ['mean', 'std']}})
tm.assert_frame_equal(result, expected, check_like=True)
with catch_warnings(record=True):
warnings.simplefilter("ignore", FutureWarning)
result = r.agg({'A': {'ra': ['mean', 'std']},
'B': {'rb': ['mean', 'std']}})
expected.columns = pd.MultiIndex.from_tuples([('A', 'ra', 'mean'), (
'A', 'ra', 'std'), ('B', 'rb', 'mean'), ('B', 'rb', 'std')])
tm.assert_frame_equal(result, expected, check_like=True)
def test_count_nonnumeric_types(self):
# GH12541
cols = ['int', 'float', 'string', 'datetime', 'timedelta', 'periods',
'fl_inf', 'fl_nan', 'str_nan', 'dt_nat', 'periods_nat']
df = DataFrame(
{'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'datetime': pd.date_range('20170101', periods=3),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s'),
'periods': [pd.Period('2012-01'), pd.Period('2012-02'),
pd.Period('2012-03')],
'fl_inf': [1., 2., np.Inf],
'fl_nan': [1., 2., np.NaN],
'str_nan': ['aa', 'bb', np.NaN],
'dt_nat': [Timestamp('20170101'), Timestamp('20170203'),
Timestamp(None)],
'periods_nat': [pd.Period('2012-01'), pd.Period('2012-02'),
pd.Period(None)]},
columns=cols)
expected = DataFrame(
{'int': [1., 2., 2.],
'float': [1., 2., 2.],
'string': [1., 2., 2.],
'datetime': [1., 2., 2.],
'timedelta': [1., 2., 2.],
'periods': [1., 2., 2.],
'fl_inf': [1., 2., 2.],
'fl_nan': [1., 2., 1.],
'str_nan': [1., 2., 1.],
'dt_nat': [1., 2., 1.],
'periods_nat': [1., 2., 1.]},
columns=cols)
result = df.rolling(window=2).count()
tm.assert_frame_equal(result, expected)
result = df.rolling(1).count()
expected = df.notna().astype(float)
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
def test_window_with_args(self):
# make sure that we are aggregating window functions correctly with arg
r = Series(np.random.randn(100)).rolling(window=10, min_periods=1,
win_type='gaussian')
expected = concat([r.mean(std=10), r.mean(std=.01)], axis=1)
expected.columns = ['<lambda>', '<lambda>']
result = r.aggregate([lambda x: x.mean(std=10),
lambda x: x.mean(std=.01)])
tm.assert_frame_equal(result, expected)
def a(x):
return x.mean(std=10)
def b(x):
return x.mean(std=0.01)
expected = concat([r.mean(std=10), r.mean(std=.01)], axis=1)
expected.columns = ['a', 'b']
result = r.aggregate([a, b])
tm.assert_frame_equal(result, expected)
def test_preserve_metadata(self):
# GH 10565
s = Series(np.arange(100), name='foo')
s2 = s.rolling(30).sum()
s3 = s.rolling(20).sum()
assert s2.name == 'foo'
assert s3.name == 'foo'
@pytest.mark.parametrize("func,window_size,expected_vals", [
('rolling', 2, [[np.nan, np.nan, np.nan, np.nan],
[15., 20., 25., 20.],
[25., 30., 35., 30.],
[np.nan, np.nan, np.nan, np.nan],
[20., 30., 35., 30.],
[35., 40., 60., 40.],
[60., 80., 85., 80]]),
('expanding', None, [[10., 10., 20., 20.],
[15., 20., 25., 20.],
[20., 30., 30., 20.],
[10., 10., 30., 30.],
[20., 30., 35., 30.],
[26.666667, 40., 50., 30.],
[40., 80., 60., 30.]])])
def test_multiple_agg_funcs(self, func, window_size, expected_vals):
# GH 15072
df = pd.DataFrame([
['A', 10, 20],
['A', 20, 30],
['A', 30, 40],
['B', 10, 30],
['B', 30, 40],
['B', 40, 80],
['B', 80, 90]], columns=['stock', 'low', 'high'])
f = getattr(df.groupby('stock'), func)
if window_size:
window = f(window_size)
else:
window = f()
index = pd.MultiIndex.from_tuples([
('A', 0), ('A', 1), ('A', 2),
('B', 3), ('B', 4), ('B', 5), ('B', 6)], names=['stock', None])
columns = pd.MultiIndex.from_tuples([
('low', 'mean'), ('low', 'max'), ('high', 'mean'),
('high', 'min')])
expected = pd.DataFrame(expected_vals, index=index, columns=columns)
result = window.agg(OrderedDict((
('low', ['mean', 'max']),
('high', ['mean', 'min']),
)))
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestWindow(Base):
def setup_method(self, method):
self._create_data()
@td.skip_if_no_scipy
@pytest.mark.parametrize(
'which', ['series', 'frame'])
def test_constructor(self, which):
# GH 12669
o = getattr(self, which)
c = o.rolling
# valid
c(win_type='boxcar', window=2, min_periods=1)
c(win_type='boxcar', window=2, min_periods=1, center=True)
c(win_type='boxcar', window=2, min_periods=1, center=False)
# not valid
for w in [2., 'foo', np.array([2])]:
with pytest.raises(ValueError):
c(win_type='boxcar', window=2, min_periods=w)
with pytest.raises(ValueError):
c(win_type='boxcar', window=2, min_periods=1, center=w)
for wt in ['foobar', 1]:
with pytest.raises(ValueError):
c(win_type=wt, window=2)
@td.skip_if_no_scipy
@pytest.mark.parametrize(
'which', ['series', 'frame'])
def test_constructor_with_win_type(self, which, win_types):
# GH 12669
o = getattr(self, which)
c = o.rolling
c(win_type=win_types, window=2)
@pytest.mark.parametrize(
'method', ['sum', 'mean'])
def test_numpy_compat(self, method):
# see gh-12811
w = rwindow.Window(Series([2, 4, 6]), window=[0, 2])
msg = "numpy operations are not valid with window objects"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(w, method)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(w, method)(dtype=np.float64)
class TestRolling(Base):
def setup_method(self, method):
self._create_data()
def test_doc_string(self):
df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
df
df.rolling(2).sum()
df.rolling(2, min_periods=1).sum()
@pytest.mark.parametrize(
'which', ['series', 'frame'])
def test_constructor(self, which):
# GH 12669
o = getattr(self, which)
c = o.rolling
# valid
c(window=2)
c(window=2, min_periods=1)
c(window=2, min_periods=1, center=True)
c(window=2, min_periods=1, center=False)
# GH 13383
with pytest.raises(ValueError):
c(0)
c(-1)
# not valid
for w in [2., 'foo', np.array([2])]:
with pytest.raises(ValueError):
c(window=w)
with pytest.raises(ValueError):
c(window=2, min_periods=w)
with pytest.raises(ValueError):
c(window=2, min_periods=1, center=w)
@td.skip_if_no_scipy
@pytest.mark.parametrize(
'which', ['series', 'frame'])
def test_constructor_with_win_type(self, which):
# GH 13383
o = getattr(self, which)
c = o.rolling
with pytest.raises(ValueError):
c(-1, win_type='boxcar')
@pytest.mark.parametrize(
'window', [timedelta(days=3), pd.Timedelta(days=3)])
def test_constructor_with_timedelta_window(self, window):
# GH 15440
n = 10
df = DataFrame({'value': np.arange(n)},
index=pd.date_range('2015-12-24', periods=n, freq="D"))
expected_data = np.append([0., 1.], np.arange(3., 27., 3))
result = df.rolling(window=window).sum()
expected = DataFrame({'value': expected_data},
index=pd.date_range('2015-12-24', periods=n,
freq="D"))
tm.assert_frame_equal(result, expected)
expected = df.rolling('3D').sum()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
'window', [timedelta(days=3), pd.Timedelta(days=3), '3D'])
def test_constructor_timedelta_window_and_minperiods(self, window, raw):
# GH 15305
n = 10
df = DataFrame({'value': np.arange(n)},
index=pd.date_range('2017-08-08', periods=n, freq="D"))
expected = DataFrame(
{'value': np.append([np.NaN, 1.], np.arange(3., 27., 3))},
index=pd.date_range('2017-08-08', periods=n, freq="D"))
result_roll_sum = df.rolling(window=window, min_periods=2).sum()
result_roll_generic = df.rolling(window=window,
min_periods=2).apply(sum, raw=raw)
tm.assert_frame_equal(result_roll_sum, expected)
tm.assert_frame_equal(result_roll_generic, expected)
@pytest.mark.parametrize(
'method', ['std', 'mean', 'sum', 'max', 'min', 'var'])
def test_numpy_compat(self, method):
# see gh-12811
r = rwindow.Rolling(Series([2, 4, 6]), window=2)
msg = "numpy operations are not valid with window objects"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(r, method)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(r, method)(dtype=np.float64)
def test_closed(self):
df = DataFrame({'A': [0, 1, 2, 3, 4]})
# closed only allowed for datetimelike
with pytest.raises(ValueError):
df.rolling(window=3, closed='neither')
@pytest.mark.parametrize("func", ['min', 'max'])
def test_closed_one_entry(self, func):
# GH24718
ser = pd.Series(data=[2], index=pd.date_range('2000', periods=1))
result = getattr(ser.rolling('10D', closed='left'), func)()
tm.assert_series_equal(result, pd.Series([np.nan], index=ser.index))
@pytest.mark.parametrize("func", ['min', 'max'])
def test_closed_one_entry_groupby(self, func):
# GH24718
ser = pd.DataFrame(data={'A': [1, 1, 2], 'B': [3, 2, 1]},
index=pd.date_range('2000', periods=3))
result = getattr(
ser.groupby('A', sort=False)['B'].rolling('10D', closed='left'),
func)()
exp_idx = pd.MultiIndex.from_arrays(arrays=[[1, 1, 2], ser.index],
names=('A', None))
expected = pd.Series(data=[np.nan, 3, np.nan], index=exp_idx, name='B')
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("input_dtype", ['int', 'float'])
@pytest.mark.parametrize("func,closed,expected", [
('min', 'right', [0.0, 0, 0, 1, 2, 3, 4, 5, 6, 7]),
('min', 'both', [0.0, 0, 0, 0, 1, 2, 3, 4, 5, 6]),
('min', 'neither', [np.nan, 0, 0, 1, 2, 3, 4, 5, 6, 7]),
('min', 'left', [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, 6]),
('max', 'right', [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
('max', 'both', [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
('max', 'neither', [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]),
('max', 'left', [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8])
])
def test_closed_min_max_datetime(self, input_dtype,
func, closed,
expected):
# see gh-21704
ser = pd.Series(data=np.arange(10).astype(input_dtype),
index=pd.date_range('2000', periods=10))
result = getattr(ser.rolling('3D', closed=closed), func)()
expected = pd.Series(expected, index=ser.index)
tm.assert_series_equal(result, expected)
def test_closed_uneven(self):
# see gh-21704
ser = pd.Series(data=np.arange(10),
index=pd.date_range('2000', periods=10))
# uneven
ser = ser.drop(index=ser.index[[1, 5]])
result = ser.rolling('3D', closed='left').min()
expected = pd.Series([np.nan, 0, 0, 2, 3, 4, 6, 6],
index=ser.index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func,closed,expected", [
('min', 'right', [np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan, np.nan]),
('min', 'both', [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, np.nan]),
('min', 'neither', [np.nan, np.nan, 0, 1, 2, 3, 4, 5, np.nan, np.nan]),
('min', 'left', [np.nan, np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan]),
('max', 'right', [np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan, np.nan]),
('max', 'both', [np.nan, 1, 2, 3, 4, 5, 6, 6, 6, np.nan]),
('max', 'neither', [np.nan, np.nan, 1, 2, 3, 4, 5, 6, np.nan, np.nan]),
('max', 'left', [np.nan, np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan])
])
def test_closed_min_max_minp(self, func, closed, expected):
# see gh-21704
ser = pd.Series(data=np.arange(10),
index=pd.date_range('2000', periods=10))
ser[ser.index[-3:]] = np.nan
result = getattr(ser.rolling('3D', min_periods=2, closed=closed),
func)()
expected = pd.Series(expected, index=ser.index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('roller', ['1s', 1])
def tests_empty_df_rolling(self, roller):
# GH 15819 Verifies that datetime and integer rolling windows can be
# applied to empty DataFrames
expected = DataFrame()
result = DataFrame().rolling(roller).sum()
tm.assert_frame_equal(result, expected)
# Verifies that datetime and integer rolling windows can be applied to
# empty DataFrames with datetime index
expected = DataFrame(index=pd.DatetimeIndex([]))
result = DataFrame(index=pd.DatetimeIndex([])).rolling(roller).sum()
tm.assert_frame_equal(result, expected)
def test_missing_minp_zero(self):
# https://github.com/pandas-dev/pandas/pull/18921
# minp=0
x = pd.Series([np.nan])
result = x.rolling(1, min_periods=0).sum()
expected = pd.Series([0.0])
tm.assert_series_equal(result, expected)
# minp=1
result = x.rolling(1, min_periods=1).sum()
expected = pd.Series([np.nan])
tm.assert_series_equal(result, expected)
def test_missing_minp_zero_variable(self):
# https://github.com/pandas-dev/pandas/pull/18921
x = pd.Series([np.nan] * 4,
index=pd.DatetimeIndex(['2017-01-01', '2017-01-04',
'2017-01-06', '2017-01-07']))
result = x.rolling(pd.Timedelta("2d"), min_periods=0).sum()
expected = pd.Series(0.0, index=x.index)
tm.assert_series_equal(result, expected)
def test_multi_index_names(self):
# GH 16789, 16825
cols = pd.MultiIndex.from_product([['A', 'B'], ['C', 'D', 'E']],
names=['1', '2'])
df = DataFrame(np.ones((10, 6)), columns=cols)
result = df.rolling(3).cov()
tm.assert_index_equal(result.columns, df.columns)
assert result.index.names == [None, '1', '2']
@pytest.mark.parametrize('klass', [pd.Series, pd.DataFrame])
def test_iter_raises(self, klass):
# https://github.com/pandas-dev/pandas/issues/11704
# Iteration over a Window
obj = klass([1, 2, 3, 4])
with pytest.raises(NotImplementedError):
iter(obj.rolling(2))
def test_rolling_axis(self, axis_frame):
# see gh-23372.
df = DataFrame(np.ones((10, 20)))
axis = df._get_axis_number(axis_frame)
if axis == 0:
expected = DataFrame({
i: [np.nan] * 2 + [3.0] * 8
for i in range(20)
})
else:
# axis == 1
expected = DataFrame([
[np.nan] * 2 + [3.0] * 18
] * 10)
result = df.rolling(3, axis=axis_frame).sum()
tm.assert_frame_equal(result, expected)
class TestExpanding(Base):
def setup_method(self, method):
self._create_data()
def test_doc_string(self):
df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
df
df.expanding(2).sum()
@pytest.mark.parametrize(
'which', ['series', 'frame'])
def test_constructor(self, which):
# GH 12669
o = getattr(self, which)
c = o.expanding
# valid
c(min_periods=1)
c(min_periods=1, center=True)
c(min_periods=1, center=False)
# not valid
for w in [2., 'foo', np.array([2])]:
with pytest.raises(ValueError):
c(min_periods=w)
with pytest.raises(ValueError):
c(min_periods=1, center=w)
@pytest.mark.parametrize(
'method', ['std', 'mean', 'sum', 'max', 'min', 'var'])
def test_numpy_compat(self, method):
# see gh-12811
e = rwindow.Expanding(Series([2, 4, 6]), window=2)
msg = "numpy operations are not valid with window objects"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(dtype=np.float64)
@pytest.mark.parametrize(
'expander',
[1, pytest.param('ls', marks=pytest.mark.xfail(
reason='GH#16425 expanding with '
'offset not supported'))])
def test_empty_df_expanding(self, expander):
# GH 15819 Verifies that datetime and integer expanding windows can be
# applied to empty DataFrames
expected = DataFrame()
result = DataFrame().expanding(expander).sum()
tm.assert_frame_equal(result, expected)
# Verifies that datetime and integer expanding windows can be applied
# to empty DataFrames with datetime index
expected = DataFrame(index=pd.DatetimeIndex([]))
result = DataFrame(
index=pd.DatetimeIndex([])).expanding(expander).sum()
tm.assert_frame_equal(result, expected)
def test_missing_minp_zero(self):
# https://github.com/pandas-dev/pandas/pull/18921
# minp=0
x = pd.Series([np.nan])
result = x.expanding(min_periods=0).sum()
expected = pd.Series([0.0])
tm.assert_series_equal(result, expected)
# minp=1
result = x.expanding(min_periods=1).sum()
expected = pd.Series([np.nan])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('klass', [pd.Series, pd.DataFrame])
def test_iter_raises(self, klass):
# https://github.com/pandas-dev/pandas/issues/11704
# Iteration over a Window
obj = klass([1, 2, 3, 4])
with pytest.raises(NotImplementedError):
iter(obj.expanding(2))
def test_expanding_axis(self, axis_frame):
# see gh-23372.
df = DataFrame(np.ones((10, 20)))
axis = df._get_axis_number(axis_frame)
if axis == 0:
expected = DataFrame({
i: [np.nan] * 2 + [float(j) for j in range(3, 11)]
for i in range(20)
})
else:
# axis == 1
expected = DataFrame([
[np.nan] * 2 + [float(i) for i in range(3, 21)]
] * 10)
result = df.expanding(3, axis=axis_frame).sum()
tm.assert_frame_equal(result, expected)
class TestEWM(Base):
def setup_method(self, method):
self._create_data()
def test_doc_string(self):
df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
df
df.ewm(com=0.5).mean()
@pytest.mark.parametrize(
'which', ['series', 'frame'])
def test_constructor(self, which):
o = getattr(self, which)
c = o.ewm
# valid
c(com=0.5)
c(span=1.5)
c(alpha=0.5)
c(halflife=0.75)
c(com=0.5, span=None)
c(alpha=0.5, com=None)
c(halflife=0.75, alpha=None)
# not valid: mutually exclusive
with pytest.raises(ValueError):
c(com=0.5, alpha=0.5)
with pytest.raises(ValueError):
c(span=1.5, halflife=0.75)
with pytest.raises(ValueError):
c(alpha=0.5, span=1.5)
# not valid: com < 0
with pytest.raises(ValueError):
c(com=-0.5)
# not valid: span < 1
with pytest.raises(ValueError):
c(span=0.5)
# not valid: halflife <= 0
with pytest.raises(ValueError):
c(halflife=0)
# not valid: alpha <= 0 or alpha > 1
for alpha in (-0.5, 1.5):
with pytest.raises(ValueError):
c(alpha=alpha)
@pytest.mark.parametrize(
'method', ['std', 'mean', 'var'])
def test_numpy_compat(self, method):
# see gh-12811
e = rwindow.EWM(Series([2, 4, 6]), alpha=0.5)
msg = "numpy operations are not valid with window objects"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(dtype=np.float64)
# gh-12373 : rolling functions error on float32 data
# make sure rolling functions works for different dtypes
#
# NOTE that these are yielded tests and so _create_data
# is explicitly called.
#
# further note that we are only checking rolling for fully dtype
# compliance (though both expanding and ewm inherit)
class Dtype(object):
window = 2
funcs = {
'count': lambda v: v.count(),
'max': lambda v: v.max(),
'min': lambda v: v.min(),
'sum': lambda v: v.sum(),
'mean': lambda v: v.mean(),
'std': lambda v: v.std(),
'var': lambda v: v.var(),
'median': lambda v: v.median()
}
def get_expects(self):
expects = {
'sr1': {
'count': Series([1, 2, 2, 2, 2], dtype='float64'),
'max': Series([np.nan, 1, 2, 3, 4], dtype='float64'),
'min': Series([np.nan, 0, 1, 2, 3], dtype='float64'),
'sum': Series([np.nan, 1, 3, 5, 7], dtype='float64'),
'mean': Series([np.nan, .5, 1.5, 2.5, 3.5], dtype='float64'),
'std': Series([np.nan] + [np.sqrt(.5)] * 4, dtype='float64'),
'var': Series([np.nan, .5, .5, .5, .5], dtype='float64'),
'median': Series([np.nan, .5, 1.5, 2.5, 3.5], dtype='float64')
},
'sr2': {
'count': Series([1, 2, 2, 2, 2], dtype='float64'),
'max': Series([np.nan, 10, 8, 6, 4], dtype='float64'),
'min': Series([np.nan, 8, 6, 4, 2], dtype='float64'),
'sum': Series([np.nan, 18, 14, 10, 6], dtype='float64'),
'mean': Series([np.nan, 9, 7, 5, 3], dtype='float64'),
'std': Series([np.nan] + [np.sqrt(2)] * 4, dtype='float64'),
'var': Series([np.nan, 2, 2, 2, 2], dtype='float64'),
'median': Series([np.nan, 9, 7, 5, 3], dtype='float64')
},
'df': {
'count': DataFrame({0: Series([1, 2, 2, 2, 2]),
1: Series([1, 2, 2, 2, 2])},
dtype='float64'),
'max': DataFrame({0: Series([np.nan, 2, 4, 6, 8]),
1: Series([np.nan, 3, 5, 7, 9])},
dtype='float64'),
'min': DataFrame({0: Series([np.nan, 0, 2, 4, 6]),
1: Series([np.nan, 1, 3, 5, 7])},
dtype='float64'),
'sum': DataFrame({0: Series([np.nan, 2, 6, 10, 14]),
1: Series([np.nan, 4, 8, 12, 16])},
dtype='float64'),
'mean': DataFrame({0: Series([np.nan, 1, 3, 5, 7]),
1: Series([np.nan, 2, 4, 6, 8])},
dtype='float64'),
'std': DataFrame({0: Series([np.nan] + [np.sqrt(2)] * 4),
1: Series([np.nan] + [np.sqrt(2)] * 4)},
dtype='float64'),
'var': DataFrame({0: Series([np.nan, 2, 2, 2, 2]),
1: Series([np.nan, 2, 2, 2, 2])},
dtype='float64'),
'median': DataFrame({0: Series([np.nan, 1, 3, 5, 7]),
1: Series([np.nan, 2, 4, 6, 8])},
dtype='float64'),
}
}
return expects
def _create_dtype_data(self, dtype):
sr1 = Series(np.arange(5), dtype=dtype)
sr2 = Series(np.arange(10, 0, -2), dtype=dtype)
df = DataFrame(np.arange(10).reshape((5, 2)), dtype=dtype)
data = {
'sr1': sr1,
'sr2': sr2,
'df': df
}
return data
def _create_data(self):
self.data = self._create_dtype_data(self.dtype)
self.expects = self.get_expects()
def test_dtypes(self):
self._create_data()
for f_name, d_name in product(self.funcs.keys(), self.data.keys()):
f = self.funcs[f_name]
d = self.data[d_name]
exp = self.expects[d_name][f_name]
self.check_dtypes(f, f_name, d, d_name, exp)
def check_dtypes(self, f, f_name, d, d_name, exp):
roll = d.rolling(window=self.window)
result = f(roll)
tm.assert_almost_equal(result, exp)
class TestDtype_object(Dtype):
dtype = object
class Dtype_integer(Dtype):
pass
class TestDtype_int8(Dtype_integer):
dtype = np.int8
class TestDtype_int16(Dtype_integer):
dtype = np.int16
class TestDtype_int32(Dtype_integer):
dtype = np.int32
class TestDtype_int64(Dtype_integer):
dtype = np.int64
class Dtype_uinteger(Dtype):
pass
class TestDtype_uint8(Dtype_uinteger):
dtype = np.uint8
class TestDtype_uint16(Dtype_uinteger):
dtype = np.uint16
class TestDtype_uint32(Dtype_uinteger):
dtype = np.uint32
class TestDtype_uint64(Dtype_uinteger):
dtype = np.uint64
class Dtype_float(Dtype):
pass
class TestDtype_float16(Dtype_float):
dtype = np.float16
class TestDtype_float32(Dtype_float):
dtype = np.float32
class TestDtype_float64(Dtype_float):
dtype = np.float64
class TestDtype_category(Dtype):
dtype = 'category'
include_df = False
def _create_dtype_data(self, dtype):
sr1 = Series(range(5), dtype=dtype)
sr2 = Series(range(10, 0, -2), dtype=dtype)
data = {
'sr1': sr1,
'sr2': sr2
}
return data
class DatetimeLike(Dtype):
def check_dtypes(self, f, f_name, d, d_name, exp):
roll = d.rolling(window=self.window)
if f_name == 'count':
result = f(roll)
tm.assert_almost_equal(result, exp)
else:
# other methods not Implemented ATM
with pytest.raises(NotImplementedError):
f(roll)
class TestDtype_timedelta(DatetimeLike):
dtype = np.dtype('m8[ns]')
class TestDtype_datetime(DatetimeLike):
dtype = np.dtype('M8[ns]')
class TestDtype_datetime64UTC(DatetimeLike):
dtype = 'datetime64[ns, UTC]'
def _create_data(self):
pytest.skip("direct creation of extension dtype "
"datetime64[ns, UTC] is not supported ATM")
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestMoments(Base):
def setup_method(self, method):
self._create_data()
def test_centered_axis_validation(self):
# ok
Series(np.ones(10)).rolling(window=3, center=True, axis=0).mean()
# bad axis
with pytest.raises(ValueError):
Series(np.ones(10)).rolling(window=3, center=True, axis=1).mean()
# ok ok
DataFrame(np.ones((10, 10))).rolling(window=3, center=True,
axis=0).mean()
DataFrame(np.ones((10, 10))).rolling(window=3, center=True,
axis=1).mean()
# bad axis
with pytest.raises(ValueError):
(DataFrame(np.ones((10, 10)))
.rolling(window=3, center=True, axis=2).mean())
def test_rolling_sum(self):
self._check_moment_func(np.nansum, name='sum',
zero_min_periods_equal=False)
def test_rolling_count(self):
counter = lambda x: np.isfinite(x).astype(float).sum()
self._check_moment_func(counter, name='count', has_min_periods=False,
fill_value=0)
def test_rolling_mean(self):
self._check_moment_func(np.mean, name='mean')
@td.skip_if_no_scipy
def test_cmov_mean(self):
# GH 8238
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48,
10.63, 14.48])
result = Series(vals).rolling(5, center=True).mean()
expected = Series([np.nan, np.nan, 9.962, 11.27, 11.564, 12.516,
12.818, 12.952, np.nan, np.nan])
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window(self):
# GH 8238
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48,
10.63, 14.48])
result = Series(vals).rolling(5, win_type='boxcar', center=True).mean()
expected = Series([np.nan, np.nan, 9.962, 11.27, 11.564, 12.516,
12.818, 12.952, np.nan, np.nan])
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window_corner(self):
# GH 8238
# all nan
vals = pd.Series([np.nan] * 10)
result = vals.rolling(5, center=True, win_type='boxcar').mean()
assert np.isnan(result).all()
# empty
vals = pd.Series([])
result = vals.rolling(5, center=True, win_type='boxcar').mean()
assert len(result) == 0
# shorter than window
vals = pd.Series(np.random.randn(5))
result = vals.rolling(10, win_type='boxcar').mean()
assert np.isnan(result).all()
assert len(result) == 5
@td.skip_if_no_scipy
def test_cmov_window_frame(self):
# Gh 8238
vals = np.array([[12.18, 3.64], [10.18, 9.16], [13.24, 14.61],
[4.51, 8.11], [6.15, 11.44], [9.14, 6.21],
[11.31, 10.67], [2.94, 6.51], [9.42, 8.39], [12.44,
7.34]])
xp = np.array([[np.nan, np.nan], [np.nan, np.nan], [9.252, 9.392],
[8.644, 9.906], [8.87, 10.208], [6.81, 8.588],
[7.792, 8.644], [9.05, 7.824], [np.nan, np.nan
], [np.nan, np.nan]])
# DataFrame
rs = DataFrame(vals).rolling(5, win_type='boxcar', center=True).mean()
tm.assert_frame_equal(DataFrame(xp), rs)
# invalid method
with pytest.raises(AttributeError):
(DataFrame(vals).rolling(5, win_type='boxcar', center=True)
.std())
# sum
xp = np.array([[np.nan, np.nan], [np.nan, np.nan], [46.26, 46.96],
[43.22, 49.53], [44.35, 51.04], [34.05, 42.94],
[38.96, 43.22], [45.25, 39.12], [np.nan, np.nan
], [np.nan, np.nan]])
rs = DataFrame(vals).rolling(5, win_type='boxcar', center=True).sum()
tm.assert_frame_equal(DataFrame(xp), rs)
@td.skip_if_no_scipy
def test_cmov_window_na_min_periods(self):
# min_periods
vals = Series(np.random.randn(10))
vals[4] = np.nan
vals[8] = np.nan
xp = vals.rolling(5, min_periods=4, center=True).mean()
rs = vals.rolling(5, win_type='boxcar', min_periods=4,
center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular(self, win_types):
# GH 8238
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48,
10.63, 14.48])
xps = {
'hamming': [np.nan, np.nan, 8.71384, 9.56348, 12.38009, 14.03687,
13.8567, 11.81473, np.nan, np.nan],
'triang': [np.nan, np.nan, 9.28667, 10.34667, 12.00556, 13.33889,
13.38, 12.33667, np.nan, np.nan],
'barthann': [np.nan, np.nan, 8.4425, 9.1925, 12.5575, 14.3675,
14.0825, 11.5675, np.nan, np.nan],
'bohman': [np.nan, np.nan, 7.61599, 9.1764, 12.83559, 14.17267,
14.65923, 11.10401, np.nan, np.nan],
'blackmanharris': [np.nan, np.nan, 6.97691, 9.16438, 13.05052,
14.02156, 15.10512, 10.74574, np.nan, np.nan],
'nuttall': [np.nan, np.nan, 7.04618, 9.16786, 13.02671, 14.03559,
15.05657, 10.78514, np.nan, np.nan],
'blackman': [np.nan, np.nan, 7.73345, 9.17869, 12.79607, 14.20036,
14.57726, 11.16988, np.nan, np.nan],
'bartlett': [np.nan, np.nan, 8.4425, 9.1925, 12.5575, 14.3675,
14.0825, 11.5675, np.nan, np.nan]
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_linear_range(self, win_types):
# GH 8238
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_missing_data(self, win_types):
# GH 8238
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan,
10.63, 14.48])
xps = {
'bartlett': [np.nan, np.nan, 9.70333, 10.5225, 8.4425, 9.1925,
12.5575, 14.3675, 15.61667, 13.655],
'blackman': [np.nan, np.nan, 9.04582, 11.41536, 7.73345, 9.17869,
12.79607, 14.20036, 15.8706, 13.655],
'barthann': [np.nan, np.nan, 9.70333, 10.5225, 8.4425, 9.1925,
12.5575, 14.3675, 15.61667, 13.655],
'bohman': [np.nan, np.nan, 8.9444, 11.56327, 7.61599, 9.1764,
12.83559, 14.17267, 15.90976, 13.655],
'hamming': [np.nan, np.nan, 9.59321, 10.29694, 8.71384, 9.56348,
12.38009, 14.20565, 15.24694, 13.69758],
'nuttall': [np.nan, np.nan, 8.47693, 12.2821, 7.04618, 9.16786,
13.02671, 14.03673, 16.08759, 13.65553],
'triang': [np.nan, np.nan, 9.33167, 9.76125, 9.28667, 10.34667,
12.00556, 13.82125, 14.49429, 13.765],
'blackmanharris': [np.nan, np.nan, 8.42526, 12.36824, 6.97691,
9.16438, 13.05052, 14.02175, 16.1098, 13.65509]
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, min_periods=3).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special(self, win_types_special):
# GH 8238
kwds = {
'kaiser': {'beta': 1.},
'gaussian': {'std': 1.},
'general_gaussian': {'power': 2., 'width': 2.}}
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48,
10.63, 14.48])
xps = {
'gaussian': [np.nan, np.nan, 8.97297, 9.76077, 12.24763, 13.89053,
13.65671, 12.01002, np.nan, np.nan],
'general_gaussian': [np.nan, np.nan, 9.85011, 10.71589, 11.73161,
13.08516, 12.95111, 12.74577, np.nan, np.nan],
'kaiser': [np.nan, np.nan, 9.86851, 11.02969, 11.65161, 12.75129,
12.90702, 12.83757, np.nan, np.nan]
}
xp = Series(xps[win_types_special])
rs = Series(vals).rolling(
5, win_type=win_types_special, center=True).mean(
**kwds[win_types_special])
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special_linear_range(self, win_types_special):
# GH 8238
kwds = {
'kaiser': {'beta': 1.},
'gaussian': {'std': 1.},
'general_gaussian': {'power': 2., 'width': 2.},
'slepian': {'width': 0.5}}
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = Series(vals).rolling(
5, win_type=win_types_special, center=True).mean(
**kwds[win_types_special])
tm.assert_series_equal(xp, rs)
def test_rolling_median(self):
self._check_moment_func(np.median, name='median')
def test_rolling_min(self):
self._check_moment_func(np.min, name='min')
a = pd.Series([1, 2, 3, 4, 5])
result = a.rolling(window=100, min_periods=1).min()
expected = pd.Series(np.ones(len(a)))
tm.assert_series_equal(result, expected)
with pytest.raises(ValueError):
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).min()
def test_rolling_max(self):
self._check_moment_func(np.max, name='max')
a = pd.Series([1, 2, 3, 4, 5], dtype=np.float64)
b = a.rolling(window=100, min_periods=1).max()
tm.assert_almost_equal(a, b)
with pytest.raises(ValueError):
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).max()
@pytest.mark.parametrize('q', [0.0, .1, .5, .9, 1.0])
def test_rolling_quantile(self, q):
def scoreatpercentile(a, per):
values = np.sort(a, axis=0)
idx = int(per / 1. * (values.shape[0] - 1))
if idx == values.shape[0] - 1:
retval = values[-1]
else:
qlow = float(idx) / float(values.shape[0] - 1)
qhig = float(idx + 1) / float(values.shape[0] - 1)
vlow = values[idx]
vhig = values[idx + 1]
retval = vlow + (vhig - vlow) * (per - qlow) / (qhig - qlow)
return retval
def quantile_func(x):
return scoreatpercentile(x, q)
self._check_moment_func(quantile_func, name='quantile',
quantile=q)
def test_rolling_quantile_np_percentile(self):
# #9413: Tests that rolling window's quantile default behavior
# is analogus to Numpy's percentile
row = 10
col = 5
idx = pd.date_range('20100101', periods=row, freq='B')
df = DataFrame(np.random.rand(row * col).reshape((row, -1)), index=idx)
df_quantile = df.quantile([0.25, 0.5, 0.75], axis=0)
np_percentile = np.percentile(df, [25, 50, 75], axis=0)
tm.assert_almost_equal(df_quantile.values, np.array(np_percentile))
@pytest.mark.parametrize('quantile', [0.0, 0.1, 0.45, 0.5, 1])
@pytest.mark.parametrize('interpolation', ['linear', 'lower', 'higher',
'nearest', 'midpoint'])
@pytest.mark.parametrize('data', [[1., 2., 3., 4., 5., 6., 7.],
[8., 1., 3., 4., 5., 2., 6., 7.],
[0., np.nan, 0.2, np.nan, 0.4],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 0.1, np.nan, 0.3, 0.4, 0.5],
[0.5], [np.nan, 0.7, 0.6]])
def test_rolling_quantile_interpolation_options(self, quantile,
interpolation, data):
# Tests that rolling window's quantile behavior is analogous to
# Series' quantile for each interpolation option
s = Series(data)
q1 = s.quantile(quantile, interpolation)
q2 = s.expanding(min_periods=1).quantile(
quantile, interpolation).iloc[-1]
if np.isnan(q1):
assert np.isnan(q2)
else:
assert q1 == q2
def test_invalid_quantile_value(self):
data = np.arange(5)
s = Series(data)
with pytest.raises(ValueError, match="Interpolation 'invalid'"
" is not supported"):
s.rolling(len(data), min_periods=1).quantile(
0.5, interpolation='invalid')
def test_rolling_quantile_param(self):
ser = Series([0.0, .1, .5, .9, 1.0])
with pytest.raises(ValueError):
ser.rolling(3).quantile(-0.1)
with pytest.raises(ValueError):
ser.rolling(3).quantile(10.0)
with pytest.raises(TypeError):
ser.rolling(3).quantile('foo')
def test_rolling_apply(self, raw):
# suppress warnings about empty slices, as we are deliberately testing
# with a 0-length Series
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message=".*(empty slice|0 for slice).*",
category=RuntimeWarning)
def f(x):
return x[np.isfinite(x)].mean()
self._check_moment_func(np.mean, name='apply', func=f, raw=raw)
expected = Series([])
result = expected.rolling(10).apply(lambda x: x.mean(), raw=raw)
tm.assert_series_equal(result, expected)
# gh-8080
s = Series([None, None, None])
result = s.rolling(2, min_periods=0).apply(lambda x: len(x), raw=raw)
expected = Series([1., 2., 2.])
tm.assert_series_equal(result, expected)
result = s.rolling(2, min_periods=0).apply(len, raw=raw)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('klass', [Series, DataFrame])
@pytest.mark.parametrize(
'method', [lambda x: x.rolling(window=2), lambda x: x.expanding()])
def test_apply_future_warning(self, klass, method):
# gh-5071
s = klass(np.arange(3))
with tm.assert_produces_warning(FutureWarning):
method(s).apply(lambda x: len(x))
def test_rolling_apply_out_of_bounds(self, raw):
# gh-1850
vals = pd.Series([1, 2, 3, 4])
result = vals.rolling(10).apply(np.sum, raw=raw)
assert result.isna().all()
result = vals.rolling(10, min_periods=1).apply(np.sum, raw=raw)
expected = pd.Series([1, 3, 6, 10], dtype=float)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize('window', [2, '2s'])
def test_rolling_apply_with_pandas_objects(self, window):
# 5071
df = pd.DataFrame({'A': np.random.randn(5),
'B': np.random.randint(0, 10, size=5)},
index=pd.date_range('20130101', periods=5, freq='s'))
# we have an equal spaced timeseries index
# so simulate removing the first period
def f(x):
if x.index[0] == df.index[0]:
return np.nan
return x.iloc[-1]
result = df.rolling(window).apply(f, raw=False)
expected = df.iloc[2:].reindex_like(df)
tm.assert_frame_equal(result, expected)
with pytest.raises(AttributeError):
df.rolling(window).apply(f, raw=True)
def test_rolling_std(self):
self._check_moment_func(lambda x: np.std(x, ddof=1),
name='std')
self._check_moment_func(lambda x: np.std(x, ddof=0),
name='std', ddof=0)
def test_rolling_std_1obs(self):
vals = pd.Series([1., 2., 3., 4., 5.])
result = vals.rolling(1, min_periods=1).std()
expected = pd.Series([np.nan] * 5)
tm.assert_series_equal(result, expected)
result = vals.rolling(1, min_periods=1).std(ddof=0)
expected = pd.Series([0.] * 5)
tm.assert_series_equal(result, expected)
result = (pd.Series([np.nan, np.nan, 3, 4, 5])
.rolling(3, min_periods=2).std())
assert np.isnan(result[2])
def test_rolling_std_neg_sqrt(self):
# unit test from Bottleneck
# Test move_nanstd for neg sqrt.
a = pd.Series([0.0011448196318903589, 0.00028718669878572767,
0.00028718669878572767, 0.00028718669878572767,
0.00028718669878572767])
b = a.rolling(window=3).std()
assert np.isfinite(b[2:]).all()
b = a.ewm(span=3).std()
assert np.isfinite(b[2:]).all()
def test_rolling_var(self):
self._check_moment_func(lambda x: np.var(x, ddof=1),
name='var')
self._check_moment_func(lambda x: np.var(x, ddof=0),
name='var', ddof=0)
@td.skip_if_no_scipy
def test_rolling_skew(self):
from scipy.stats import skew
self._check_moment_func(lambda x: skew(x, bias=False), name='skew')
@td.skip_if_no_scipy
def test_rolling_kurt(self):
from scipy.stats import kurtosis
self._check_moment_func(lambda x: kurtosis(x, bias=False),
name='kurt')
def _check_moment_func(self, static_comp, name, has_min_periods=True,
has_center=True, has_time_rule=True,
fill_value=None, zero_min_periods_equal=True,
**kwargs):
def get_result(obj, window, min_periods=None, center=False):
r = obj.rolling(window=window, min_periods=min_periods,
center=center)
return getattr(r, name)(**kwargs)
series_result = get_result(self.series, window=50)
assert isinstance(series_result, Series)
tm.assert_almost_equal(series_result.iloc[-1],
static_comp(self.series[-50:]))
frame_result = get_result(self.frame, window=50)
assert isinstance(frame_result, DataFrame)
tm.assert_series_equal(
frame_result.iloc[-1, :],
self.frame.iloc[-50:, :].apply(static_comp, axis=0, raw=raw),
check_names=False)
# check time_rule works
if has_time_rule:
win = 25
minp = 10
series = self.series[::2].resample('B').mean()
frame = self.frame[::2].resample('B').mean()
if has_min_periods:
series_result = get_result(series, window=win,
min_periods=minp)
frame_result = get_result(frame, window=win,
min_periods=minp)
else:
series_result = get_result(series, window=win)
frame_result = get_result(frame, window=win)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = self.series[::2].truncate(prev_date, last_date)
trunc_frame = self.frame[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1],
static_comp(trunc_series))
tm.assert_series_equal(frame_result.xs(last_date),
trunc_frame.apply(static_comp, raw=raw),
check_names=False)
# excluding NaNs correctly
obj = Series(randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
if has_min_periods:
result = get_result(obj, 50, min_periods=30)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# min_periods is working correctly
result = get_result(obj, 20, min_periods=15)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(randn(20))
result = get_result(obj2, 10, min_periods=5)
assert isna(result.iloc[3])
assert notna(result.iloc[4])
if zero_min_periods_equal:
# min_periods=0 may be equivalent to min_periods=1
result0 = get_result(obj, 20, min_periods=0)
result1 = get_result(obj, 20, min_periods=1)
tm.assert_almost_equal(result0, result1)
else:
result = get_result(obj, 50)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# window larger than series length (#7297)
if has_min_periods:
for minp in (0, len(self.series) - 1, len(self.series)):
result = get_result(self.series, len(self.series) + 1,
min_periods=minp)
expected = get_result(self.series, len(self.series),
min_periods=minp)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask],
expected[nan_mask])
else:
result = get_result(self.series, len(self.series) + 1)
expected = get_result(self.series, len(self.series))
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
# check center=True
if has_center:
if has_min_periods:
result = get_result(obj, 20, min_periods=15, center=True)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]), 20,
min_periods=15)[9:].reset_index(drop=True)
else:
result = get_result(obj, 20, center=True)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]),
20)[9:].reset_index(drop=True)
tm.assert_series_equal(result, expected)
# shifter index
s = ['x%d' % x for x in range(12)]
if has_min_periods:
minp = 10
series_xp = get_result(
self.series.reindex(list(self.series.index) + s),
window=25,
min_periods=minp).shift(-12).reindex(self.series.index)
frame_xp = get_result(
self.frame.reindex(list(self.frame.index) + s),
window=25,
min_periods=minp).shift(-12).reindex(self.frame.index)
series_rs = get_result(self.series, window=25,
min_periods=minp, center=True)
frame_rs = get_result(self.frame, window=25, min_periods=minp,
center=True)
else:
series_xp = get_result(
self.series.reindex(list(self.series.index) + s),
window=25).shift(-12).reindex(self.series.index)
frame_xp = get_result(
self.frame.reindex(list(self.frame.index) + s),
window=25).shift(-12).reindex(self.frame.index)
series_rs = get_result(self.series, window=25, center=True)
frame_rs = get_result(self.frame, window=25, center=True)
if fill_value is not None:
series_xp = series_xp.fillna(fill_value)
frame_xp = frame_xp.fillna(fill_value)
tm.assert_series_equal(series_xp, series_rs)
tm.assert_frame_equal(frame_xp, frame_rs)
def test_ewma(self):
self._check_ew(name='mean')
vals = pd.Series(np.zeros(1000))
vals[5] = 1
result = vals.ewm(span=100, adjust=False).mean().sum()
assert np.abs(result - 1) < 1e-2
@pytest.mark.parametrize('adjust', [True, False])
@pytest.mark.parametrize('ignore_na', [True, False])
def test_ewma_cases(self, adjust, ignore_na):
# try adjust/ignore_na args matrix
s = Series([1.0, 2.0, 4.0, 8.0])
if adjust:
expected = Series([1.0, 1.6, 2.736842, 4.923077])
else:
expected = Series([1.0, 1.333333, 2.222222, 4.148148])
result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
def test_ewma_nan_handling(self):
s = Series([1.] + [np.nan] * 5 + [1.])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([1.] * len(s)))
s = Series([np.nan] * 2 + [1.] + [np.nan] * 2 + [1.])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([np.nan] * 2 + [1.] * 4))
# GH 7603
s0 = Series([np.nan, 1., 101.])
s1 = Series([1., np.nan, 101.])
s2 = Series([np.nan, 1., np.nan, np.nan, 101., np.nan])
s3 = Series([1., np.nan, 101., 50.])
com = 2.
alpha = 1. / (1. + com)
def simple_wma(s, w):
return (s.multiply(w).cumsum() / w.cumsum()).fillna(method='ffill')
for (s, adjust, ignore_na, w) in [
(s0, True, False, [np.nan, (1. - alpha), 1.]),
(s0, True, True, [np.nan, (1. - alpha), 1.]),
(s0, False, False, [np.nan, (1. - alpha), alpha]),
(s0, False, True, [np.nan, (1. - alpha), alpha]),
(s1, True, False, [(1. - alpha) ** 2, np.nan, 1.]),
(s1, True, True, [(1. - alpha), np.nan, 1.]),
(s1, False, False, [(1. - alpha) ** 2, np.nan, alpha]),
(s1, False, True, [(1. - alpha), np.nan, alpha]),
(s2, True, False, [np.nan, (1. - alpha) **
3, np.nan, np.nan, 1., np.nan]),
(s2, True, True, [np.nan, (1. - alpha),
np.nan, np.nan, 1., np.nan]),
(s2, False, False, [np.nan, (1. - alpha) **
3, np.nan, np.nan, alpha, np.nan]),
(s2, False, True, [np.nan, (1. - alpha),
np.nan, np.nan, alpha, np.nan]),
(s3, True, False, [(1. - alpha) **
3, np.nan, (1. - alpha), 1.]),
(s3, True, True, [(1. - alpha) **
2, np.nan, (1. - alpha), 1.]),
(s3, False, False, [(1. - alpha) ** 3, np.nan,
(1. - alpha) * alpha,
alpha * ((1. - alpha) ** 2 + alpha)]),
(s3, False, True, [(1. - alpha) ** 2,
np.nan, (1. - alpha) * alpha, alpha])]:
expected = simple_wma(s, Series(w))
result = s.ewm(com=com, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
if ignore_na is False:
# check that ignore_na defaults to False
result = s.ewm(com=com, adjust=adjust).mean()
tm.assert_series_equal(result, expected)
def test_ewmvar(self):
self._check_ew(name='var')
def test_ewmvol(self):
self._check_ew(name='vol')
def test_ewma_span_com_args(self):
A = self.series.ewm(com=9.5).mean()
B = self.series.ewm(span=20).mean()
tm.assert_almost_equal(A, B)
with pytest.raises(ValueError):
self.series.ewm(com=9.5, span=20)
with pytest.raises(ValueError):
self.series.ewm().mean()
def test_ewma_halflife_arg(self):
A = self.series.ewm(com=13.932726172912965).mean()
B = self.series.ewm(halflife=10.0).mean()
tm.assert_almost_equal(A, B)
with pytest.raises(ValueError):
self.series.ewm(span=20, halflife=50)
with pytest.raises(ValueError):
self.series.ewm(com=9.5, halflife=50)
with pytest.raises(ValueError):
self.series.ewm(com=9.5, span=20, halflife=50)
with pytest.raises(ValueError):
self.series.ewm()
def test_ewm_alpha(self):
# GH 10789
s = Series(self.arr)
a = s.ewm(alpha=0.61722699889169674).mean()
b = s.ewm(com=0.62014947789973052).mean()
c = s.ewm(span=2.240298955799461).mean()
d = s.ewm(halflife=0.721792864318).mean()
tm.assert_series_equal(a, b)
tm.assert_series_equal(a, c)
tm.assert_series_equal(a, d)
def test_ewm_alpha_arg(self):
# GH 10789
s = self.series
with pytest.raises(ValueError):
s.ewm()
with pytest.raises(ValueError):
s.ewm(com=10.0, alpha=0.5)
with pytest.raises(ValueError):
s.ewm(span=10.0, alpha=0.5)
with pytest.raises(ValueError):
s.ewm(halflife=10.0, alpha=0.5)
def test_ewm_domain_checks(self):
# GH 12492
s = Series(self.arr)
# com must satisfy: com >= 0
pytest.raises(ValueError, s.ewm, com=-0.1)
s.ewm(com=0.0)
s.ewm(com=0.1)
# span must satisfy: span >= 1
pytest.raises(ValueError, s.ewm, span=-0.1)
pytest.raises(ValueError, s.ewm, span=0.0)
pytest.raises(ValueError, s.ewm, span=0.9)
s.ewm(span=1.0)
s.ewm(span=1.1)
# halflife must satisfy: halflife > 0
pytest.raises(ValueError, s.ewm, halflife=-0.1)
pytest.raises(ValueError, s.ewm, halflife=0.0)
s.ewm(halflife=0.1)
# alpha must satisfy: 0 < alpha <= 1
pytest.raises(ValueError, s.ewm, alpha=-0.1)
pytest.raises(ValueError, s.ewm, alpha=0.0)
s.ewm(alpha=0.1)
s.ewm(alpha=1.0)
pytest.raises(ValueError, s.ewm, alpha=1.1)
@pytest.mark.parametrize('method', ['mean', 'vol', 'var'])
def test_ew_empty_series(self, method):
vals = pd.Series([], dtype=np.float64)
ewm = vals.ewm(3)
result = getattr(ewm, method)()
tm.assert_almost_equal(result, vals)
def _check_ew(self, name=None, preserve_nan=False):
series_result = getattr(self.series.ewm(com=10), name)()
assert isinstance(series_result, Series)
frame_result = getattr(self.frame.ewm(com=10), name)()
assert type(frame_result) == DataFrame
result = getattr(self.series.ewm(com=10), name)()
if preserve_nan:
assert result[self._nan_locs].isna().all()
# excluding NaNs correctly
arr = randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
s = Series(arr)
# check min_periods
# GH 7898
result = getattr(s.ewm(com=50, min_periods=2), name)()
assert result[:11].isna().all()
assert not result[11:].isna().any()
for min_periods in (0, 1):
result = getattr(s.ewm(com=50, min_periods=min_periods), name)()
if name == 'mean':
assert result[:10].isna().all()
assert not result[10:].isna().any()
else:
# ewm.std, ewm.vol, ewm.var (with bias=False) require at least
# two values
assert result[:11].isna().all()
assert not result[11:].isna().any()
# check series of length 0
result = getattr(Series().ewm(com=50, min_periods=min_periods),
name)()
tm.assert_series_equal(result, Series())
# check series of length 1
result = getattr(Series([1.]).ewm(50, min_periods=min_periods),
name)()
if name == 'mean':
tm.assert_series_equal(result, Series([1.]))
else:
# ewm.std, ewm.vol, ewm.var with bias=False require at least
# two values
tm.assert_series_equal(result, Series([np.NaN]))
# pass in ints
result2 = getattr(Series(np.arange(50)).ewm(span=10), name)()
assert result2.dtype == np.float_
class TestPairwise(object):
# GH 7738
df1s = [DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0, 1]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 0]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 1]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]],
columns=['C', 'C']),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1., 0]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0., 1]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=['C', 1]),
DataFrame([[2., 4.], [1., 2.], [5., 2.], [8., 1.]],
columns=[1, 0.]),
DataFrame([[2, 4.], [1, 2.], [5, 2.], [8, 1.]],
columns=[0, 1.]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1.]],
columns=[1., 'X']), ]
df2 = DataFrame([[None, 1, 1], [None, 1, 2],
[None, 3, 2], [None, 8, 1]], columns=['Y', 'Z', 'X'])
s = Series([1, 1, 3, 8])
def compare(self, result, expected):
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize('f', [lambda x: x.cov(), lambda x: x.corr()])
def test_no_flex(self, f):
# DataFrame methods (which do not call _flex_binary_moment())
results = [f(df) for df in self.df1s]
for (df, result) in zip(self.df1s, results):
tm.assert_index_equal(result.index, df.columns)
tm.assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.compare(result, results[0])
@pytest.mark.parametrize(
'f', [lambda x: x.expanding().cov(pairwise=True),
lambda x: x.expanding().corr(pairwise=True),
lambda x: x.rolling(window=3).cov(pairwise=True),
lambda x: x.rolling(window=3).corr(pairwise=True),
lambda x: x.ewm(com=3).cov(pairwise=True),
lambda x: x.ewm(com=3).corr(pairwise=True)])
def test_pairwise_with_self(self, f):
# DataFrame with itself, pairwise=True
# note that we may construct the 1st level of the MI
# in a non-motononic way, so compare accordingly
results = []
for i, df in enumerate(self.df1s):
result = f(df)
tm.assert_index_equal(result.index.levels[0],
df.index,
check_names=False)
tm.assert_numpy_array_equal(safe_sort(result.index.levels[1]),
safe_sort(df.columns.unique()))
tm.assert_index_equal(result.columns, df.columns)
results.append(df)
for i, result in enumerate(results):
if i > 0:
self.compare(result, results[0])
@pytest.mark.parametrize(
'f', [lambda x: x.expanding().cov(pairwise=False),
lambda x: x.expanding().corr(pairwise=False),
lambda x: x.rolling(window=3).cov(pairwise=False),
lambda x: x.rolling(window=3).corr(pairwise=False),
lambda x: x.ewm(com=3).cov(pairwise=False),
lambda x: x.ewm(com=3).corr(pairwise=False), ])
def test_no_pairwise_with_self(self, f):
# DataFrame with itself, pairwise=False
results = [f(df) for df in self.df1s]
for (df, result) in zip(self.df1s, results):
tm.assert_index_equal(result.index, df.index)
tm.assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.compare(result, results[0])
@pytest.mark.parametrize(
'f', [lambda x, y: x.expanding().cov(y, pairwise=True),
lambda x, y: x.expanding().corr(y, pairwise=True),
lambda x, y: x.rolling(window=3).cov(y, pairwise=True),
lambda x, y: x.rolling(window=3).corr(y, pairwise=True),
lambda x, y: x.ewm(com=3).cov(y, pairwise=True),
lambda x, y: x.ewm(com=3).corr(y, pairwise=True), ])
def test_pairwise_with_other(self, f):
# DataFrame with another DataFrame, pairwise=True
results = [f(df, self.df2) for df in self.df1s]
for (df, result) in zip(self.df1s, results):
tm.assert_index_equal(result.index.levels[0],
df.index,
check_names=False)
tm.assert_numpy_array_equal(safe_sort(result.index.levels[1]),
safe_sort(self.df2.columns.unique()))
for i, result in enumerate(results):
if i > 0:
self.compare(result, results[0])
@pytest.mark.parametrize(
'f', [lambda x, y: x.expanding().cov(y, pairwise=False),
lambda x, y: x.expanding().corr(y, pairwise=False),
lambda x, y: x.rolling(window=3).cov(y, pairwise=False),
lambda x, y: x.rolling(window=3).corr(y, pairwise=False),
lambda x, y: x.ewm(com=3).cov(y, pairwise=False),
lambda x, y: x.ewm(com=3).corr(y, pairwise=False), ])
def test_no_pairwise_with_other(self, f):
# DataFrame with another DataFrame, pairwise=False
results = [f(df, self.df2) if df.columns.is_unique else None
for df in self.df1s]
for (df, result) in zip(self.df1s, results):
if result is not None:
with catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
# we can have int and str columns
expected_index = df.index.union(self.df2.index)
expected_columns = df.columns.union(self.df2.columns)
tm.assert_index_equal(result.index, expected_index)
tm.assert_index_equal(result.columns, expected_columns)
else:
with pytest.raises(ValueError,
match="'arg1' columns are not unique"):
f(df, self.df2)
with pytest.raises(ValueError,
match="'arg2' columns are not unique"):
f(self.df2, df)
@pytest.mark.parametrize(
'f', [lambda x, y: x.expanding().cov(y),
lambda x, y: x.expanding().corr(y),
lambda x, y: x.rolling(window=3).cov(y),
lambda x, y: x.rolling(window=3).corr(y),
lambda x, y: x.ewm(com=3).cov(y),
lambda x, y: x.ewm(com=3).corr(y), ])
def test_pairwise_with_series(self, f):
# DataFrame with a Series
results = ([f(df, self.s) for df in self.df1s] +
[f(self.s, df) for df in self.df1s])
for (df, result) in zip(self.df1s, results):
tm.assert_index_equal(result.index, df.index)
tm.assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.compare(result, results[0])
# create the data only once as we are not setting it
def _create_consistency_data():
def create_series():
return [Series(),
Series([np.nan]),
Series([np.nan, np.nan]),
Series([3.]),
Series([np.nan, 3.]),
Series([3., np.nan]),
Series([1., 3.]),
Series([2., 2.]),
Series([3., 1.]),
Series([5., 5., 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan,
np.nan]),
Series([np.nan, 5., 5., 5., np.nan, np.nan, np.nan, 5., 5.,
np.nan, np.nan]),
Series([np.nan, np.nan, 5., 5., np.nan, np.nan, np.nan, 5., 5.,
np.nan, np.nan]),
Series([np.nan, 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7.,
12., 13., 14., 15.]),
Series([np.nan, 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3.,
12., 13., 14., 15.]),
Series([2., 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7.,
12., 13., 14., 15.]),
Series([2., 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3.,
12., 13., 14., 15.]),
Series(range(10)),
Series(range(20, 0, -2)), ]
def create_dataframes():
return ([DataFrame(),
DataFrame(columns=['a']),
DataFrame(columns=['a', 'a']),
DataFrame(columns=['a', 'b']),
DataFrame(np.arange(10).reshape((5, 2))),
DataFrame(np.arange(25).reshape((5, 5))),
DataFrame(np.arange(25).reshape((5, 5)),
columns=['a', 'b', 99, 'd', 'd'])] +
[DataFrame(s) for s in create_series()])
def is_constant(x):
values = x.values.ravel()
return len(set(values[notna(values)])) == 1
def no_nans(x):
return x.notna().all().all()
# data is a tuple(object, is_contant, no_nans)
data = create_series() + create_dataframes()
return [(x, is_constant(x), no_nans(x)) for x in data]
_consistency_data = _create_consistency_data()
def _rolling_consistency_cases():
for window in [1, 2, 3, 10, 20]:
for min_periods in {0, 1, 2, 3, 4, window}:
if min_periods and (min_periods > window):
continue
for center in [False, True]:
yield window, min_periods, center
class TestMomentsConsistency(Base):
base_functions = [
(lambda v: Series(v).count(), None, 'count'),
(lambda v: Series(v).max(), None, 'max'),
(lambda v: Series(v).min(), None, 'min'),
(lambda v: Series(v).sum(), None, 'sum'),
(lambda v: Series(v).mean(), None, 'mean'),
(lambda v: Series(v).std(), 1, 'std'),
(lambda v: Series(v).cov(Series(v)), None, 'cov'),
(lambda v: Series(v).corr(Series(v)), None, 'corr'),
(lambda v: Series(v).var(), 1, 'var'),
# restore once GH 8086 is fixed
# lambda v: Series(v).skew(), 3, 'skew'),
# (lambda v: Series(v).kurt(), 4, 'kurt'),
# restore once GH 8084 is fixed
# lambda v: Series(v).quantile(0.3), None, 'quantile'),
(lambda v: Series(v).median(), None, 'median'),
(np.nanmax, 1, 'max'),
(np.nanmin, 1, 'min'),
(np.nansum, 1, 'sum'),
(np.nanmean, 1, 'mean'),
(lambda v: np.nanstd(v, ddof=1), 1, 'std'),
(lambda v: np.nanvar(v, ddof=1), 1, 'var'),
(np.nanmedian, 1, 'median'),
]
no_nan_functions = [
(np.max, None, 'max'),
(np.min, None, 'min'),
(np.sum, None, 'sum'),
(np.mean, None, 'mean'),
(lambda v: np.std(v, ddof=1), 1, 'std'),
(lambda v: np.var(v, ddof=1), 1, 'var'),
(np.median, None, 'median'),
]
def _create_data(self):
super(TestMomentsConsistency, self)._create_data()
self.data = _consistency_data
def setup_method(self, method):
self._create_data()
def _test_moments_consistency(self, min_periods, count, mean, mock_mean,
corr, var_unbiased=None, std_unbiased=None,
cov_unbiased=None, var_biased=None,
std_biased=None, cov_biased=None,
var_debiasing_factors=None):
def _non_null_values(x):
values = x.values.ravel()
return set(values[notna(values)].tolist())
for (x, is_constant, no_nans) in self.data:
count_x = count(x)
mean_x = mean(x)
if mock_mean:
# check that mean equals mock_mean
expected = mock_mean(x)
assert_equal(mean_x, expected.astype('float64'))
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = corr(x, x)
# assert _non_null_values(corr_x_x).issubset(set([1.]))
# restore once rolling_cov(x, x) is identically equal to var(x)
if is_constant:
exp = x.max() if isinstance(x, Series) else x.max().max()
# check mean of constant series
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = exp
assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
expected[:] = np.nan
assert_equal(corr_x_x, expected)
if var_unbiased and var_biased and var_debiasing_factors:
# check variance debiasing factors
var_unbiased_x = var_unbiased(x)
var_biased_x = var_biased(x)
var_debiasing_factors_x = var_debiasing_factors(x)
assert_equal(var_unbiased_x, var_biased_x *
var_debiasing_factors_x)
for (std, var, cov) in [(std_biased, var_biased, cov_biased),
(std_unbiased, var_unbiased, cov_unbiased)
]:
# check that var(x), std(x), and cov(x) are all >= 0
var_x = var(x)
std_x = std(x)
assert not (var_x < 0).any().any()
assert not (std_x < 0).any().any()
if cov:
cov_x_x = cov(x, x)
assert not (cov_x_x < 0).any().any()
# check that var(x) == cov(x, x)
assert_equal(var_x, cov_x_x)
# check that var(x) == std(x)^2
assert_equal(var_x, std_x * std_x)
if var is var_biased:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = mean(x * x)
assert_equal(var_x, mean_x2 - (mean_x * mean_x))
if is_constant:
# check that variance of constant series is identically 0
assert not (var_x > 0).any().any()
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.
if var is var_unbiased:
expected[count_x < 2] = np.nan
assert_equal(var_x, expected)
if isinstance(x, Series):
for (y, is_constant, no_nans) in self.data:
if not x.isna().equals(y.isna()):
# can only easily test two Series with similar
# structure
continue
# check that cor(x, y) is symmetric
corr_x_y = corr(x, y)
corr_y_x = corr(y, x)
assert_equal(corr_x_y, corr_y_x)
if cov:
# check that cov(x, y) is symmetric
cov_x_y = cov(x, y)
cov_y_x = cov(y, x)
assert_equal(cov_x_y, cov_y_x)
# check that cov(x, y) == (var(x+y) - var(x) -
# var(y)) / 2
var_x_plus_y = var(x + y)
var_y = var(y)
assert_equal(cov_x_y, 0.5 *
(var_x_plus_y - var_x - var_y))
# check that corr(x, y) == cov(x, y) / (std(x) *
# std(y))
std_y = std(y)
assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
if cov is cov_biased:
# check that biased cov(x, y) == mean(x*y) -
# mean(x)*mean(y)
mean_y = mean(y)
mean_x_times_y = mean(x * y)
assert_equal(cov_x_y, mean_x_times_y -
(mean_x * mean_y))
@pytest.mark.slow
@pytest.mark.parametrize('min_periods', [0, 1, 2, 3, 4])
@pytest.mark.parametrize('adjust', [True, False])
@pytest.mark.parametrize('ignore_na', [True, False])
def test_ewm_consistency(self, min_periods, adjust, ignore_na):
def _weights(s, com, adjust, ignore_na):
if isinstance(s, DataFrame):
if not len(s.columns):
return DataFrame(index=s.index, columns=s.columns)
w = concat([
_weights(s.iloc[:, i], com=com, adjust=adjust,
ignore_na=ignore_na)
for i, _ in enumerate(s.columns)], axis=1)
w.index = s.index
w.columns = s.columns
return w
w = Series(np.nan, index=s.index)
alpha = 1. / (1. + com)
if ignore_na:
w[s.notna()] = _weights(s[s.notna()], com=com,
adjust=adjust, ignore_na=False)
elif adjust:
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
w.iat[i] = pow(1. / (1. - alpha), i)
else:
sum_wts = 0.
prev_i = -1
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
if prev_i == -1:
w.iat[i] = 1.
else:
w.iat[i] = alpha * sum_wts / pow(1. - alpha,
i - prev_i)
sum_wts += w.iat[i]
prev_i = i
return w
def _variance_debiasing_factors(s, com, adjust, ignore_na):
weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)
cum_sum = weights.cumsum().fillna(method='ffill')
cum_sum_sq = (weights * weights).cumsum().fillna(method='ffill')
numerator = cum_sum * cum_sum
denominator = numerator - cum_sum_sq
denominator[denominator <= 0.] = np.nan
return numerator / denominator
def _ewma(s, com, min_periods, adjust, ignore_na):
weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)
result = s.multiply(weights).cumsum().divide(weights.cumsum(
)).fillna(method='ffill')
result[s.expanding().count() < (max(min_periods, 1) if min_periods
else 1)] = np.nan
return result
com = 3.
# test consistency between different ewm* moments
self._test_moments_consistency(
min_periods=min_periods,
count=lambda x: x.expanding().count(),
mean=lambda x: x.ewm(com=com, min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na).mean(),
mock_mean=lambda x: _ewma(x, com=com,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na),
corr=lambda x, y: x.ewm(com=com, min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na).corr(y),
var_unbiased=lambda x: (
x.ewm(com=com, min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na).var(bias=False)),
std_unbiased=lambda x: (
x.ewm(com=com, min_periods=min_periods,
adjust=adjust, ignore_na=ignore_na)
.std(bias=False)),
cov_unbiased=lambda x, y: (
x.ewm(com=com, min_periods=min_periods,
adjust=adjust, ignore_na=ignore_na)
.cov(y, bias=False)),
var_biased=lambda x: (
x.ewm(com=com, min_periods=min_periods,
adjust=adjust, ignore_na=ignore_na)
.var(bias=True)),
std_biased=lambda x: x.ewm(com=com, min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na).std(bias=True),
cov_biased=lambda x, y: (
x.ewm(com=com, min_periods=min_periods,
adjust=adjust, ignore_na=ignore_na)
.cov(y, bias=True)),
var_debiasing_factors=lambda x: (
_variance_debiasing_factors(x, com=com, adjust=adjust,
ignore_na=ignore_na)))
@pytest.mark.slow
@pytest.mark.parametrize(
'min_periods', [0, 1, 2, 3, 4])
def test_expanding_consistency(self, min_periods):
# suppress warnings about empty slices, as we are deliberately testing
# with empty/0-length Series/DataFrames
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message=".*(empty slice|0 for slice).*",
category=RuntimeWarning)
# test consistency between different expanding_* moments
self._test_moments_consistency(
min_periods=min_periods,
count=lambda x: x.expanding().count(),
mean=lambda x: x.expanding(
min_periods=min_periods).mean(),
mock_mean=lambda x: x.expanding(
min_periods=min_periods).sum() / x.expanding().count(),
corr=lambda x, y: x.expanding(
min_periods=min_periods).corr(y),
var_unbiased=lambda x: x.expanding(
min_periods=min_periods).var(),
std_unbiased=lambda x: x.expanding(
min_periods=min_periods).std(),
cov_unbiased=lambda x, y: x.expanding(
min_periods=min_periods).cov(y),
var_biased=lambda x: x.expanding(
min_periods=min_periods).var(ddof=0),
std_biased=lambda x: x.expanding(
min_periods=min_periods).std(ddof=0),
cov_biased=lambda x, y: x.expanding(
min_periods=min_periods).cov(y, ddof=0),
var_debiasing_factors=lambda x: (
x.expanding().count() /
(x.expanding().count() - 1.)
.replace(0., np.nan)))
# test consistency between expanding_xyz() and either (a)
# expanding_apply of Series.xyz(), or (b) expanding_apply of
# np.nanxyz()
for (x, is_constant, no_nans) in self.data:
functions = self.base_functions
# GH 8269
if no_nans:
functions = self.base_functions + self.no_nan_functions
for (f, require_min_periods, name) in functions:
expanding_f = getattr(
x.expanding(min_periods=min_periods), name)
if (require_min_periods and
(min_periods is not None) and
(min_periods < require_min_periods)):
continue
if name == 'count':
expanding_f_result = expanding_f()
expanding_apply_f_result = x.expanding(
min_periods=0).apply(func=f, raw=True)
else:
if name in ['cov', 'corr']:
expanding_f_result = expanding_f(
pairwise=False)
else:
expanding_f_result = expanding_f()
expanding_apply_f_result = x.expanding(
min_periods=min_periods).apply(func=f, raw=True)
# GH 9422
if name in ['sum', 'prod']:
assert_equal(expanding_f_result,
expanding_apply_f_result)
@pytest.mark.slow
@pytest.mark.parametrize(
'window,min_periods,center', list(_rolling_consistency_cases()))
def test_rolling_consistency(self, window, min_periods, center):
# suppress warnings about empty slices, as we are deliberately testing
# with empty/0-length Series/DataFrames
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message=".*(empty slice|0 for slice).*",
category=RuntimeWarning)
# test consistency between different rolling_* moments
self._test_moments_consistency(
min_periods=min_periods,
count=lambda x: (
x.rolling(window=window, center=center)
.count()),
mean=lambda x: (
x.rolling(window=window, min_periods=min_periods,
center=center).mean()),
mock_mean=lambda x: (
x.rolling(window=window,
min_periods=min_periods,
center=center).sum()
.divide(x.rolling(window=window,
min_periods=min_periods,
center=center).count())),
corr=lambda x, y: (
x.rolling(window=window, min_periods=min_periods,
center=center).corr(y)),
var_unbiased=lambda x: (
x.rolling(window=window, min_periods=min_periods,
center=center).var()),
std_unbiased=lambda x: (
x.rolling(window=window, min_periods=min_periods,
center=center).std()),
cov_unbiased=lambda x, y: (
x.rolling(window=window, min_periods=min_periods,
center=center).cov(y)),
var_biased=lambda x: (
x.rolling(window=window, min_periods=min_periods,
center=center).var(ddof=0)),
std_biased=lambda x: (
x.rolling(window=window, min_periods=min_periods,
center=center).std(ddof=0)),
cov_biased=lambda x, y: (
x.rolling(window=window, min_periods=min_periods,
center=center).cov(y, ddof=0)),
var_debiasing_factors=lambda x: (
x.rolling(window=window, center=center).count()
.divide((x.rolling(window=window, center=center)
.count() - 1.)
.replace(0., np.nan))))
# test consistency between rolling_xyz() and either (a)
# rolling_apply of Series.xyz(), or (b) rolling_apply of
# np.nanxyz()
for (x, is_constant, no_nans) in self.data:
functions = self.base_functions
# GH 8269
if no_nans:
functions = self.base_functions + self.no_nan_functions
for (f, require_min_periods, name) in functions:
rolling_f = getattr(
x.rolling(window=window, center=center,
min_periods=min_periods), name)
if require_min_periods and (
min_periods is not None) and (
min_periods < require_min_periods):
continue
if name == 'count':
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=0,
center=center).apply(func=f, raw=True)
else:
if name in ['cov', 'corr']:
rolling_f_result = rolling_f(
pairwise=False)
else:
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods,
center=center).apply(func=f, raw=True)
# GH 9422
if name in ['sum', 'prod']:
assert_equal(rolling_f_result,
rolling_apply_f_result)
# binary moments
def test_rolling_cov(self):
A = self.series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_cov_pairwise(self):
self._check_pairwise_moment('rolling', 'cov', window=10, min_periods=5)
def test_rolling_corr(self):
A = self.series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = a.rolling(window=len(a), min_periods=1).corr(b)
tm.assert_almost_equal(result[-1], a.corr(b))
def test_rolling_corr_pairwise(self):
self._check_pairwise_moment('rolling', 'corr', window=10,
min_periods=5)
@pytest.mark.parametrize('window', range(7))
def test_rolling_corr_with_zero_variance(self, window):
# GH 18430
s = pd.Series(np.zeros(20))
other = pd.Series(np.arange(20))
assert s.rolling(window=window).corr(other=other).isna().all()
def _check_pairwise_moment(self, dispatch, name, **kwargs):
def get_result(obj, obj2=None):
return getattr(getattr(obj, dispatch)(**kwargs), name)(obj2)
result = get_result(self.frame)
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = get_result(self.frame[1], self.frame[5])
tm.assert_series_equal(result, expected, check_names=False)
def test_flex_binary_moment(self):
# GH3155
# don't blow the stack
pytest.raises(TypeError, rwindow._flex_binary_moment, 5, 6, None)
def test_corr_sanity(self):
# GH 3155
df = DataFrame(np.array(
[[0.87024726, 0.18505595], [0.64355431, 0.3091617],
[0.92372966, 0.50552513], [0.00203756, 0.04520709],
[0.84780328, 0.33394331], [0.78369152, 0.63919667]]))
res = df[0].rolling(5, center=True).corr(df[1])
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
# and some fuzzing
for _ in range(10):
df = DataFrame(np.random.rand(30, 2))
res = df[0].rolling(5, center=True).corr(df[1])
try:
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
except AssertionError:
print(res)
@pytest.mark.parametrize('method', ['corr', 'cov'])
def test_flex_binary_frame(self, method):
series = self.frame[1]
res = getattr(series.rolling(window=10), method)(self.frame)
res2 = getattr(self.frame.rolling(window=10), method)(series)
exp = self.frame.apply(lambda x: getattr(
series.rolling(window=10), method)(x))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = self.frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = getattr(self.frame.rolling(window=10), method)(frame2)
exp = DataFrame({k: getattr(self.frame[k].rolling(
window=10), method)(frame2[k]) for k in self.frame})
tm.assert_frame_equal(res3, exp)
def test_ewmcov(self):
self._check_binary_ew('cov')
def test_ewmcov_pairwise(self):
self._check_pairwise_moment('ewm', 'cov', span=10, min_periods=5)
def test_ewmcorr(self):
self._check_binary_ew('corr')
def test_ewmcorr_pairwise(self):
self._check_pairwise_moment('ewm', 'corr', span=10, min_periods=5)
def _check_binary_ew(self, name):
def func(A, B, com, **kwargs):
return getattr(A.ewm(com, **kwargs), name)(B)
A = Series(randn(50), index=np.arange(50))
B = A[2:] + randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = func(A, B, 20, min_periods=5)
assert np.isnan(result.values[:14]).all()
assert not np.isnan(result.values[14:]).any()
# GH 7898
for min_periods in (0, 1, 2):
result = func(A, B, 20, min_periods=min_periods)
# binary functions (ewmcov, ewmcorr) with bias=False require at
# least two values
assert np.isnan(result.values[:11]).all()
assert not np.isnan(result.values[11:]).any()
# check series of length 0
result = func(Series([]), Series([]), 50, min_periods=min_periods)
tm.assert_series_equal(result, Series([]))
# check series of length 1
result = func(
Series([1.]), Series([1.]), 50, min_periods=min_periods)
tm.assert_series_equal(result, Series([np.NaN]))
pytest.raises(Exception, func, A, randn(50), 20, min_periods=5)
def test_expanding_apply_args_kwargs(self, raw):
def mean_w_arg(x, const):
return np.mean(x) + const
df = DataFrame(np.random.rand(20, 3))
expected = df.expanding().apply(np.mean, raw=raw) + 20.
result = df.expanding().apply(mean_w_arg,
raw=raw,
args=(20, ))
tm.assert_frame_equal(result, expected)
result = df.expanding().apply(mean_w_arg,
raw=raw,
kwargs={'const': 20})
tm.assert_frame_equal(result, expected)
def test_expanding_corr(self):
A = self.series.dropna()
B = (A + randn(len(A)))[:-5]
result = A.expanding().corr(B)
rolling_result = A.rolling(window=len(A), min_periods=1).corr(B)
tm.assert_almost_equal(rolling_result, result)
def test_expanding_count(self):
result = self.series.expanding().count()
tm.assert_almost_equal(result, self.series.rolling(
window=len(self.series)).count())
def test_expanding_quantile(self):
result = self.series.expanding().quantile(0.5)
rolling_result = self.series.rolling(window=len(self.series),
min_periods=1).quantile(0.5)
tm.assert_almost_equal(result, rolling_result)
def test_expanding_cov(self):
A = self.series
B = (A + randn(len(A)))[:-5]
result = A.expanding().cov(B)
rolling_result = A.rolling(window=len(A), min_periods=1).cov(B)
tm.assert_almost_equal(rolling_result, result)
def test_expanding_cov_pairwise(self):
result = self.frame.expanding().corr()
rolling_result = self.frame.rolling(window=len(self.frame),
min_periods=1).corr()
tm.assert_frame_equal(result, rolling_result)
def test_expanding_corr_pairwise(self):
result = self.frame.expanding().corr()
rolling_result = self.frame.rolling(window=len(self.frame),
min_periods=1).corr()
tm.assert_frame_equal(result, rolling_result)
def test_expanding_cov_diff_index(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.expanding().cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.expanding().cov(s2a)
tm.assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = s1.expanding().cov(s2)
expected = Series([None, None, None, 4.5])
tm.assert_series_equal(result, expected)
def test_expanding_corr_diff_index(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.expanding().corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.expanding().corr(s2a)
tm.assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = s1.expanding().corr(s2)
expected = Series([None, None, None, 1.])
tm.assert_series_equal(result, expected)
def test_rolling_cov_diff_length(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2a)
tm.assert_series_equal(result, expected)
def test_rolling_corr_diff_length(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2a)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
'f',
[
lambda x: (x.rolling(window=10, min_periods=5)
.cov(x, pairwise=False)),
lambda x: (x.rolling(window=10, min_periods=5)
.corr(x, pairwise=False)),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(
window=10, min_periods=5).quantile(quantile=0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(
sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(
sum, raw=True),
lambda x: x.rolling(win_type='boxcar',
window=10, min_periods=5).mean()])
def test_rolling_functions_window_non_shrinkage(self, f):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=['A', 'B'])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
try:
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
except (ImportError):
# scipy needed for rolling_window
pytest.skip("scipy not available")
def test_rolling_functions_window_non_shrinkage_binary(self):
# corr/cov return a MI DataFrame
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]],
columns=Index(['A', 'B'], name='foo'),
index=Index(range(4), name='bar'))
df_expected = DataFrame(
columns=Index(['A', 'B'], name='foo'),
index=pd.MultiIndex.from_product([df.index, df.columns],
names=['bar', 'foo']),
dtype='float64')
functions = [lambda x: (x.rolling(window=10, min_periods=5)
.cov(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5)
.corr(x, pairwise=True))]
for f in functions:
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
def test_moment_functions_zero_length(self):
# GH 8056
s = Series()
s_expected = s
df1 = DataFrame()
df1_expected = df1
df2 = DataFrame(columns=['a'])
df2['a'] = df2['a'].astype('float64')
df2_expected = df2
functions = [lambda x: x.expanding().count(),
lambda x: x.expanding(min_periods=5).cov(
x, pairwise=False),
lambda x: x.expanding(min_periods=5).corr(
x, pairwise=False),
lambda x: x.expanding(min_periods=5).max(),
lambda x: x.expanding(min_periods=5).min(),
lambda x: x.expanding(min_periods=5).sum(),
lambda x: x.expanding(min_periods=5).mean(),
lambda x: x.expanding(min_periods=5).std(),
lambda x: x.expanding(min_periods=5).var(),
lambda x: x.expanding(min_periods=5).skew(),
lambda x: x.expanding(min_periods=5).kurt(),
lambda x: x.expanding(min_periods=5).quantile(0.5),
lambda x: x.expanding(min_periods=5).median(),
lambda x: x.expanding(min_periods=5).apply(
sum, raw=False),
lambda x: x.expanding(min_periods=5).apply(
sum, raw=True),
lambda x: x.rolling(window=10).count(),
lambda x: x.rolling(window=10, min_periods=5).cov(
x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(
x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(
window=10, min_periods=5).quantile(0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(
sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(
sum, raw=True),
lambda x: x.rolling(win_type='boxcar',
window=10, min_periods=5).mean(),
]
for f in functions:
try:
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df1_result = f(df1)
tm.assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
tm.assert_frame_equal(df2_result, df2_expected)
except (ImportError):
# scipy needed for rolling_window
continue
def test_moment_functions_zero_length_pairwise(self):
df1 = DataFrame()
df1_expected = df1
df2 = DataFrame(columns=Index(['a'], name='foo'),
index=Index([], name='bar'))
df2['a'] = df2['a'].astype('float64')
df1_expected = DataFrame(
index=pd.MultiIndex.from_product([df1.index, df1.columns]),
columns=Index([]))
df2_expected = DataFrame(
index=pd.MultiIndex.from_product([df2.index, df2.columns],
names=['bar', 'foo']),
columns=Index(['a'], name='foo'),
dtype='float64')
functions = [lambda x: (x.expanding(min_periods=5)
.cov(x, pairwise=True)),
lambda x: (x.expanding(min_periods=5)
.corr(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5)
.cov(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5)
.corr(x, pairwise=True)),
]
for f in functions:
df1_result = f(df1)
tm.assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
tm.assert_frame_equal(df2_result, df2_expected)
def test_expanding_cov_pairwise_diff_length(self):
# GH 7512
df1 = DataFrame([[1, 5], [3, 2], [3, 9]],
columns=Index(['A', 'B'], name='foo'))
df1a = DataFrame([[1, 5], [3, 9]],
index=[0, 2],
columns=Index(['A', 'B'], name='foo'))
df2 = DataFrame([[5, 6], [None, None], [2, 1]],
columns=Index(['X', 'Y'], name='foo'))
df2a = DataFrame([[5, 6], [2, 1]],
index=[0, 2],
columns=Index(['X', 'Y'], name='foo'))
# TODO: xref gh-15826
# .loc is not preserving the names
result1 = df1.expanding().cov(df2a, pairwise=True).loc[2]
result2 = df1.expanding().cov(df2a, pairwise=True).loc[2]
result3 = df1a.expanding().cov(df2, pairwise=True).loc[2]
result4 = df1a.expanding().cov(df2a, pairwise=True).loc[2]
expected = DataFrame([[-3.0, -6.0], [-5.0, -10.0]],
columns=Index(['A', 'B'], name='foo'),
index=Index(['X', 'Y'], name='foo'))
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected)
tm.assert_frame_equal(result4, expected)
def test_expanding_corr_pairwise_diff_length(self):
# GH 7512
df1 = DataFrame([[1, 2], [3, 2], [3, 4]],
columns=['A', 'B'],
index=Index(range(3), name='bar'))
df1a = DataFrame([[1, 2], [3, 4]],
index=Index([0, 2], name='bar'),
columns=['A', 'B'])
df2 = DataFrame([[5, 6], [None, None], [2, 1]],
columns=['X', 'Y'],
index=Index(range(3), name='bar'))
df2a = DataFrame([[5, 6], [2, 1]],
index=Index([0, 2], name='bar'),
columns=['X', 'Y'])
result1 = df1.expanding().corr(df2, pairwise=True).loc[2]
result2 = df1.expanding().corr(df2a, pairwise=True).loc[2]
result3 = df1a.expanding().corr(df2, pairwise=True).loc[2]
result4 = df1a.expanding().corr(df2a, pairwise=True).loc[2]
expected = DataFrame([[-1.0, -1.0], [-1.0, -1.0]],
columns=['A', 'B'],
index=Index(['X', 'Y']))
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected)
tm.assert_frame_equal(result4, expected)
def test_rolling_skew_edge_cases(self):
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).skew()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=2).skew()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 0.177994, 1.548824]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401
])
expected = Series([np.NaN, np.NaN, np.NaN, 0.177994, 1.548824])
x = d.rolling(window=4).skew()
tm.assert_series_equal(expected, x)
def test_rolling_kurt_edge_cases(self):
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).kurt()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=3).kurt()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 1.224307, 2.671499]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401
])
expected = Series([np.NaN, np.NaN, np.NaN, 1.224307, 2.671499])
x = d.rolling(window=4).kurt()
tm.assert_series_equal(expected, x)
def test_rolling_skew_eq_value_fperr(self):
# #18804 all rolling skew for all equal values should return Nan
a = Series([1.1] * 15).rolling(window=10).skew()
assert np.isnan(a).all()
def test_rolling_kurt_eq_value_fperr(self):
# #18804 all rolling kurt for all equal values should return Nan
a = Series([1.1] * 15).rolling(window=10).kurt()
assert np.isnan(a).all()
@pytest.mark.parametrize('func,static_comp', [('sum', np.sum),
('mean', np.mean),
('max', np.max),
('min', np.min)],
ids=['sum', 'mean', 'max', 'min'])
def test_expanding_func(self, func, static_comp):
def expanding_func(x, min_periods=1, center=False, axis=0):
exp = x.expanding(min_periods=min_periods,
center=center, axis=axis)
return getattr(exp, func)()
self._check_expanding(expanding_func, static_comp, preserve_nan=False)
def test_expanding_apply(self, raw):
def expanding_mean(x, min_periods=1):
exp = x.expanding(min_periods=min_periods)
result = exp.apply(lambda x: x.mean(), raw=raw)
return result
# TODO(jreback), needed to add preserve_nan=False
# here to make this pass
self._check_expanding(expanding_mean, np.mean, preserve_nan=False)
ser = Series([])
tm.assert_series_equal(ser, ser.expanding().apply(
lambda x: x.mean(), raw=raw))
# GH 8080
s = Series([None, None, None])
result = s.expanding(min_periods=0).apply(lambda x: len(x), raw=raw)
expected = Series([1., 2., 3.])
tm.assert_series_equal(result, expected)
def _check_expanding(self, func, static_comp, has_min_periods=True,
has_time_rule=True, preserve_nan=True):
series_result = func(self.series)
assert isinstance(series_result, Series)
frame_result = func(self.frame)
assert isinstance(frame_result, DataFrame)
result = func(self.series)
tm.assert_almost_equal(result[10], static_comp(self.series[:11]))
if preserve_nan:
assert result.iloc[self._nan_locs].isna().all()
ser = Series(randn(50))
if has_min_periods:
result = func(ser, min_periods=30)
assert result[:29].isna().all()
tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))
# min_periods is working correctly
result = func(ser, min_periods=15)
assert isna(result.iloc[13])
assert notna(result.iloc[14])
ser2 = Series(randn(20))
result = func(ser2, min_periods=5)
assert isna(result[3])
assert notna(result[4])
# min_periods=0
result0 = func(ser, min_periods=0)
result1 = func(ser, min_periods=1)
tm.assert_almost_equal(result0, result1)
else:
result = func(ser)
tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))
def test_rolling_max_gh6297(self):
"""Replicate result expected in GH #6297"""
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 2 datapoints on one of the days
indices.append(datetime(1975, 1, 3, 6, 0))
series = Series(range(1, 7), index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
expected = Series([1.0, 2.0, 6.0, 4.0, 5.0],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
x = series.resample('D').max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_max_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be max
expected = Series([0.0, 1.0, 2.0, 3.0, 20.0],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
x = series.resample('D').max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify median (10.0)
expected = Series([0.0, 1.0, 2.0, 3.0, 10.0],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
x = series.resample('D').median().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify mean (4+10+20)/3
v = (4.0 + 10.0 + 20.0) / 3.0
expected = Series([0.0, 1.0, 2.0, 3.0, v],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
x = series.resample('D').mean().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_min_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be min
expected = Series([0.0, 1.0, 2.0, 3.0, 4.0],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
r = series.resample('D').min().rolling(window=1)
tm.assert_series_equal(expected, r.min())
def test_rolling_median_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be median
expected = Series([0.0, 1.0, 2.0, 3.0, 10],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
x = series.resample('D').median().rolling(window=1).median()
tm.assert_series_equal(expected, x)
def test_rolling_median_memory_error(self):
# GH11722
n = 20000
Series(np.random.randn(n)).rolling(window=2, center=False).median()
Series(np.random.randn(n)).rolling(window=2, center=False).median()
def test_rolling_min_max_numeric_types(self):
# GH12373
types_test = [np.dtype("f{}".format(width)) for width in [4, 8]]
types_test.extend([np.dtype("{}{}".format(sign, width))
for width in [1, 2, 4, 8] for sign in "ui"])
for data_type in types_test:
# Just testing that these don't throw exceptions and that
# the return type is float64. Other tests will cover quantitative
# correctness
result = (DataFrame(np.arange(20, dtype=data_type))
.rolling(window=5).max())
assert result.dtypes[0] == np.dtype("f8")
result = (DataFrame(np.arange(20, dtype=data_type))
.rolling(window=5).min())
assert result.dtypes[0] == np.dtype("f8")
class TestGrouperGrouping(object):
def setup_method(self, method):
self.series = Series(np.arange(10))
self.frame = DataFrame({'A': [1] * 20 + [2] * 12 + [3] * 8,
'B': np.arange(40)})
def test_mutated(self):
def f():
self.frame.groupby('A', foo=1)
pytest.raises(TypeError, f)
g = self.frame.groupby('A')
assert not g.mutated
g = self.frame.groupby('A', mutated=True)
assert g.mutated
def test_getitem(self):
g = self.frame.groupby('A')
g_mutated = self.frame.groupby('A', mutated=True)
expected = g_mutated.B.apply(lambda x: x.rolling(2).mean())
result = g.rolling(2).mean().B
tm.assert_series_equal(result, expected)
result = g.rolling(2).B.mean()
tm.assert_series_equal(result, expected)
result = g.B.rolling(2).mean()
tm.assert_series_equal(result, expected)
result = self.frame.B.groupby(self.frame.A).rolling(2).mean()
tm.assert_series_equal(result, expected)
def test_getitem_multiple(self):
# GH 13174
g = self.frame.groupby('A')
r = g.rolling(2)
g_mutated = self.frame.groupby('A', mutated=True)
expected = g_mutated.B.apply(lambda x: x.rolling(2).count())
result = r.B.count()
tm.assert_series_equal(result, expected)
result = r.B.count()
tm.assert_series_equal(result, expected)
def test_rolling(self):
g = self.frame.groupby('A')
r = g.rolling(window=4)
for f in ['sum', 'mean', 'min', 'max', 'count', 'kurt', 'skew']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.rolling(4), f)())
tm.assert_frame_equal(result, expected)
for f in ['std', 'var']:
result = getattr(r, f)(ddof=1)
expected = g.apply(lambda x: getattr(x.rolling(4), f)(ddof=1))
tm.assert_frame_equal(result, expected)
result = r.quantile(0.5)
expected = g.apply(lambda x: x.rolling(4).quantile(0.5))
tm.assert_frame_equal(result, expected)
def test_rolling_corr_cov(self):
g = self.frame.groupby('A')
r = g.rolling(window=4)
for f in ['corr', 'cov']:
result = getattr(r, f)(self.frame)
def func(x):
return getattr(x.rolling(4), f)(self.frame)
expected = g.apply(func)
tm.assert_frame_equal(result, expected)
result = getattr(r.B, f)(pairwise=True)
def func(x):
return getattr(x.B.rolling(4), f)(pairwise=True)
expected = g.apply(func)
tm.assert_series_equal(result, expected)
def test_rolling_apply(self, raw):
g = self.frame.groupby('A')
r = g.rolling(window=4)
# reduction
result = r.apply(lambda x: x.sum(), raw=raw)
expected = g.apply(
lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw))
tm.assert_frame_equal(result, expected)
def test_rolling_apply_mutability(self):
# GH 14013
df = pd.DataFrame({'A': ['foo'] * 3 + ['bar'] * 3, 'B': [1] * 6})
g = df.groupby('A')
mi = pd.MultiIndex.from_tuples([('bar', 3), ('bar', 4), ('bar', 5),
('foo', 0), ('foo', 1), ('foo', 2)])
mi.names = ['A', None]
# Grouped column should not be a part of the output
expected = pd.DataFrame([np.nan, 2., 2.] * 2, columns=['B'], index=mi)
result = g.rolling(window=2).sum()
tm.assert_frame_equal(result, expected)
# Call an arbitrary function on the groupby
g.sum()
# Make sure nothing has been mutated
result = g.rolling(window=2).sum()
tm.assert_frame_equal(result, expected)
def test_expanding(self):
g = self.frame.groupby('A')
r = g.expanding()
for f in ['sum', 'mean', 'min', 'max', 'count', 'kurt', 'skew']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.expanding(), f)())
tm.assert_frame_equal(result, expected)
for f in ['std', 'var']:
result = getattr(r, f)(ddof=0)
expected = g.apply(lambda x: getattr(x.expanding(), f)(ddof=0))
tm.assert_frame_equal(result, expected)
result = r.quantile(0.5)
expected = g.apply(lambda x: x.expanding().quantile(0.5))
tm.assert_frame_equal(result, expected)
def test_expanding_corr_cov(self):
g = self.frame.groupby('A')
r = g.expanding()
for f in ['corr', 'cov']:
result = getattr(r, f)(self.frame)
def func(x):
return getattr(x.expanding(), f)(self.frame)
expected = g.apply(func)
tm.assert_frame_equal(result, expected)
result = getattr(r.B, f)(pairwise=True)
def func(x):
return getattr(x.B.expanding(), f)(pairwise=True)
expected = g.apply(func)
tm.assert_series_equal(result, expected)
def test_expanding_apply(self, raw):
g = self.frame.groupby('A')
r = g.expanding()
# reduction
result = r.apply(lambda x: x.sum(), raw=raw)
expected = g.apply(
lambda x: x.expanding().apply(lambda y: y.sum(), raw=raw))
tm.assert_frame_equal(result, expected)
class TestRollingTS(object):
# rolling time-series friendly
# xref GH13327
def setup_method(self, method):
self.regular = DataFrame({'A': pd.date_range('20130101',
periods=5,
freq='s'),
'B': range(5)}).set_index('A')
self.ragged = DataFrame({'B': range(5)})
self.ragged.index = [Timestamp('20130101 09:00:00'),
Timestamp('20130101 09:00:02'),
Timestamp('20130101 09:00:03'),
Timestamp('20130101 09:00:05'),
Timestamp('20130101 09:00:06')]
def test_doc_string(self):
df = DataFrame({'B': [0, 1, 2, np.nan, 4]},
index=[Timestamp('20130101 09:00:00'),
Timestamp('20130101 09:00:02'),
Timestamp('20130101 09:00:03'),
Timestamp('20130101 09:00:05'),
Timestamp('20130101 09:00:06')])
df
df.rolling('2s').sum()
def test_valid(self):
df = self.regular
# not a valid freq
with pytest.raises(ValueError):
df.rolling(window='foobar')
# not a datetimelike index
with pytest.raises(ValueError):
df.reset_index().rolling(window='foobar')
# non-fixed freqs
for freq in ['2MS', pd.offsets.MonthBegin(2)]:
with pytest.raises(ValueError):
df.rolling(window=freq)
for freq in ['1D', pd.offsets.Day(2), '2ms']:
df.rolling(window=freq)
# non-integer min_periods
for minp in [1.0, 'foo', np.array([1, 2, 3])]:
with pytest.raises(ValueError):
df.rolling(window='1D', min_periods=minp)
# center is not implemented
with pytest.raises(NotImplementedError):
df.rolling(window='1D', center=True)
def test_on(self):
df = self.regular
# not a valid column
with pytest.raises(ValueError):
df.rolling(window='2s', on='foobar')
# column is valid
df = df.copy()
df['C'] = pd.date_range('20130101', periods=len(df))
df.rolling(window='2d', on='C').sum()
# invalid columns
with pytest.raises(ValueError):
df.rolling(window='2d', on='B')
# ok even though on non-selected
df.rolling(window='2d', on='C').B.sum()
def test_monotonic_on(self):
# on/index must be monotonic
df = DataFrame({'A': pd.date_range('20130101',
periods=5,
freq='s'),
'B': range(5)})
assert df.A.is_monotonic
df.rolling('2s', on='A').sum()
df = df.set_index('A')
assert df.index.is_monotonic
df.rolling('2s').sum()
# non-monotonic
df.index = reversed(df.index.tolist())
assert not df.index.is_monotonic
with pytest.raises(ValueError):
df.rolling('2s').sum()
df = df.reset_index()
with pytest.raises(ValueError):
df.rolling('2s', on='A').sum()
def test_frame_on(self):
df = DataFrame({'B': range(5),
'C': pd.date_range('20130101 09:00:00',
periods=5,
freq='3s')})
df['A'] = [Timestamp('20130101 09:00:00'),
Timestamp('20130101 09:00:02'),
Timestamp('20130101 09:00:03'),
Timestamp('20130101 09:00:05'),
Timestamp('20130101 09:00:06')]
# we are doing simulating using 'on'
expected = (df.set_index('A')
.rolling('2s')
.B
.sum()
.reset_index(drop=True)
)
result = (df.rolling('2s', on='A')
.B
.sum()
)
tm.assert_series_equal(result, expected)
# test as a frame
# we should be ignoring the 'on' as an aggregation column
# note that the expected is setting, computing, and resetting
# so the columns need to be switched compared
# to the actual result where they are ordered as in the
# original
expected = (df.set_index('A')
.rolling('2s')[['B']]
.sum()
.reset_index()[['B', 'A']]
)
result = (df.rolling('2s', on='A')[['B']]
.sum()
)
tm.assert_frame_equal(result, expected)
def test_frame_on2(self):
# using multiple aggregation columns
df = DataFrame({'A': [0, 1, 2, 3, 4],
'B': [0, 1, 2, np.nan, 4],
'C': Index([Timestamp('20130101 09:00:00'),
Timestamp('20130101 09:00:02'),
Timestamp('20130101 09:00:03'),
Timestamp('20130101 09:00:05'),
Timestamp('20130101 09:00:06')])},
columns=['A', 'C', 'B'])
expected1 = DataFrame({'A': [0., 1, 3, 3, 7],
'B': [0, 1, 3, np.nan, 4],
'C': df['C']},
columns=['A', 'C', 'B'])
result = df.rolling('2s', on='C').sum()
expected = expected1
tm.assert_frame_equal(result, expected)
expected = Series([0, 1, 3, np.nan, 4], name='B')
result = df.rolling('2s', on='C').B.sum()
tm.assert_series_equal(result, expected)
expected = expected1[['A', 'B', 'C']]
result = df.rolling('2s', on='C')[['A', 'B', 'C']].sum()
tm.assert_frame_equal(result, expected)
def test_basic_regular(self):
df = self.regular.copy()
df.index = pd.date_range('20130101', periods=5, freq='D')
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window='1D').sum()
tm.assert_frame_equal(result, expected)
df.index = pd.date_range('20130101', periods=5, freq='2D')
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window='2D', min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window='2D', min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1).sum()
result = df.rolling(window='2D').sum()
tm.assert_frame_equal(result, expected)
def test_min_periods(self):
# compare for min_periods
df = self.regular
# these slightly different
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling('2s').sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling('2s', min_periods=1).sum()
tm.assert_frame_equal(result, expected)
def test_closed(self):
# xref GH13965
df = DataFrame({'A': [1] * 5},
index=[Timestamp('20130101 09:00:01'),
Timestamp('20130101 09:00:02'),
Timestamp('20130101 09:00:03'),
Timestamp('20130101 09:00:04'),
Timestamp('20130101 09:00:06')])
# closed must be 'right', 'left', 'both', 'neither'
with pytest.raises(ValueError):
self.regular.rolling(window='2s', closed="blabla")
expected = df.copy()
expected["A"] = [1.0, 2, 2, 2, 1]
result = df.rolling('2s', closed='right').sum()
tm.assert_frame_equal(result, expected)
# default should be 'right'
result = df.rolling('2s').sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [1.0, 2, 3, 3, 2]
result = df.rolling('2s', closed='both').sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 2, 2, 1]
result = df.rolling('2s', closed='left').sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 1, 1, np.nan]
result = df.rolling('2s', closed='neither').sum()
tm.assert_frame_equal(result, expected)
def test_ragged_sum(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).sum()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).sum()
expected = df.copy()
expected['B'] = [0.0, 1, 3, 3, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=2).sum()
expected = df.copy()
expected['B'] = [np.nan, np.nan, 3, np.nan, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='3s', min_periods=1).sum()
expected = df.copy()
expected['B'] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='3s').sum()
expected = df.copy()
expected['B'] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='4s', min_periods=1).sum()
expected = df.copy()
expected['B'] = [0.0, 1, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='4s', min_periods=3).sum()
expected = df.copy()
expected['B'] = [np.nan, np.nan, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).sum()
expected = df.copy()
expected['B'] = [0.0, 1, 3, 6, 10]
tm.assert_frame_equal(result, expected)
def test_ragged_mean(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).mean()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).mean()
expected = df.copy()
expected['B'] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_median(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).median()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).median()
expected = df.copy()
expected['B'] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_quantile(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).quantile(0.5)
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).quantile(0.5)
expected = df.copy()
expected['B'] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_std(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).std(ddof=0)
expected = df.copy()
expected['B'] = [0.0] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='1s', min_periods=1).std(ddof=1)
expected = df.copy()
expected['B'] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='3s', min_periods=1).std(ddof=0)
expected = df.copy()
expected['B'] = [0.0] + [0.5] * 4
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).std(ddof=1)
expected = df.copy()
expected['B'] = [np.nan, 0.707107, 1.0, 1.0, 1.290994]
tm.assert_frame_equal(result, expected)
def test_ragged_var(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).var(ddof=0)
expected = df.copy()
expected['B'] = [0.0] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='1s', min_periods=1).var(ddof=1)
expected = df.copy()
expected['B'] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='3s', min_periods=1).var(ddof=0)
expected = df.copy()
expected['B'] = [0.0] + [0.25] * 4
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).var(ddof=1)
expected = df.copy()
expected['B'] = [np.nan, 0.5, 1.0, 1.0, 1 + 2 / 3.]
tm.assert_frame_equal(result, expected)
def test_ragged_skew(self):
df = self.ragged
result = df.rolling(window='3s', min_periods=1).skew()
expected = df.copy()
expected['B'] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).skew()
expected = df.copy()
expected['B'] = [np.nan] * 2 + [0.0, 0.0, 0.0]
tm.assert_frame_equal(result, expected)
def test_ragged_kurt(self):
df = self.ragged
result = df.rolling(window='3s', min_periods=1).kurt()
expected = df.copy()
expected['B'] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).kurt()
expected = df.copy()
expected['B'] = [np.nan] * 4 + [-1.2]
tm.assert_frame_equal(result, expected)
def test_ragged_count(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).count()
expected = df.copy()
expected['B'] = [1.0, 1, 1, 1, 1]
tm.assert_frame_equal(result, expected)
df = self.ragged
result = df.rolling(window='1s').count()
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).count()
expected = df.copy()
expected['B'] = [1.0, 1, 2, 1, 2]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=2).count()
expected = df.copy()
expected['B'] = [np.nan, np.nan, 2, np.nan, 2]
tm.assert_frame_equal(result, expected)
def test_regular_min(self):
df = DataFrame({'A': pd.date_range('20130101',
periods=5,
freq='s'),
'B': [0.0, 1, 2, 3, 4]}).set_index('A')
result = df.rolling('1s').min()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
df = DataFrame({'A': pd.date_range('20130101',
periods=5,
freq='s'),
'B': [5, 4, 3, 4, 5]}).set_index('A')
tm.assert_frame_equal(result, expected)
result = df.rolling('2s').min()
expected = df.copy()
expected['B'] = [5.0, 4, 3, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling('5s').min()
expected = df.copy()
expected['B'] = [5.0, 4, 3, 3, 3]
tm.assert_frame_equal(result, expected)
def test_ragged_min(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).min()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).min()
expected = df.copy()
expected['B'] = [0.0, 1, 1, 3, 3]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).min()
expected = df.copy()
expected['B'] = [0.0, 0, 0, 1, 1]
tm.assert_frame_equal(result, expected)
def test_perf_min(self):
N = 10000
dfp = DataFrame({'B': np.random.randn(N)},
index=pd.date_range('20130101',
periods=N,
freq='s'))
expected = dfp.rolling(2, min_periods=1).min()
result = dfp.rolling('2s').min()
assert ((result - expected) < 0.01).all().bool()
expected = dfp.rolling(200, min_periods=1).min()
result = dfp.rolling('200s').min()
assert ((result - expected) < 0.01).all().bool()
def test_ragged_max(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).max()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).max()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).max()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
def test_ragged_apply(self, raw):
df = self.ragged
f = lambda x: 1
result = df.rolling(window='1s', min_periods=1).apply(f, raw=raw)
expected = df.copy()
expected['B'] = 1.
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).apply(f, raw=raw)
expected = df.copy()
expected['B'] = 1.
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).apply(f, raw=raw)
expected = df.copy()
expected['B'] = 1.
tm.assert_frame_equal(result, expected)
def test_all(self):
# simple comparison of integer vs time-based windowing
df = self.regular * 2
er = df.rolling(window=1)
r = df.rolling(window='1s')
for f in ['sum', 'mean', 'count', 'median', 'std',
'var', 'kurt', 'skew', 'min', 'max']:
result = getattr(r, f)()
expected = getattr(er, f)()
tm.assert_frame_equal(result, expected)
result = r.quantile(0.5)
expected = er.quantile(0.5)
tm.assert_frame_equal(result, expected)
def test_all_apply(self, raw):
df = self.regular * 2
er = df.rolling(window=1)
r = df.rolling(window='1s')
result = r.apply(lambda x: 1, raw=raw)
expected = er.apply(lambda x: 1, raw=raw)
tm.assert_frame_equal(result, expected)
def test_all2(self):
# more sophisticated comparison of integer vs.
# time-based windowing
df = DataFrame({'B': np.arange(50)},
index=pd.date_range('20130101',
periods=50, freq='H')
)
# in-range data
dft = df.between_time("09:00", "16:00")
r = dft.rolling(window='5H')
for f in ['sum', 'mean', 'count', 'median', 'std',
'var', 'kurt', 'skew', 'min', 'max']:
result = getattr(r, f)()
# we need to roll the days separately
# to compare with a time-based roll
# finally groupby-apply will return a multi-index
# so we need to drop the day
def agg_by_day(x):
x = x.between_time("09:00", "16:00")
return getattr(x.rolling(5, min_periods=1), f)()
expected = df.groupby(df.index.day).apply(
agg_by_day).reset_index(level=0, drop=True)
tm.assert_frame_equal(result, expected)
def test_groupby_monotonic(self):
# GH 15130
# we don't need to validate monotonicity when grouping
data = [
['David', '1/1/2015', 100], ['David', '1/5/2015', 500],
['David', '5/30/2015', 50], ['David', '7/25/2015', 50],
['Ryan', '1/4/2014', 100], ['Ryan', '1/19/2015', 500],
['Ryan', '3/31/2016', 50], ['Joe', '7/1/2015', 100],
['Joe', '9/9/2015', 500], ['Joe', '10/15/2015', 50]]
df = DataFrame(data=data, columns=['name', 'date', 'amount'])
df['date'] = pd.to_datetime(df['date'])
expected = df.set_index('date').groupby('name').apply(
lambda x: x.rolling('180D')['amount'].sum())
result = df.groupby('name').rolling('180D', on='date')['amount'].sum()
tm.assert_series_equal(result, expected)
def test_non_monotonic(self):
# GH 13966 (similar to #15130, closed by #15175)
dates = pd.date_range(start='2016-01-01 09:30:00',
periods=20, freq='s')
df = DataFrame({'A': [1] * 20 + [2] * 12 + [3] * 8,
'B': np.concatenate((dates, dates)),
'C': np.arange(40)})
result = df.groupby('A').rolling('4s', on='B').C.mean()
expected = df.set_index('B').groupby('A').apply(
lambda x: x.rolling('4s')['C'].mean())
tm.assert_series_equal(result, expected)
df2 = df.sort_values('B')
result = df2.groupby('A').rolling('4s', on='B').C.mean()
tm.assert_series_equal(result, expected)
def test_rolling_cov_offset(self):
# GH16058
idx = pd.date_range('2017-01-01', periods=24, freq='1h')
ss = Series(np.arange(len(idx)), index=idx)
result = ss.rolling('2h').cov()
expected = Series([np.nan] + [0.5] * (len(idx) - 1), index=idx)
tm.assert_series_equal(result, expected)
expected2 = ss.rolling(2, min_periods=1).cov()
tm.assert_series_equal(result, expected2)
result = ss.rolling('3h').cov()
expected = Series([np.nan, 0.5] + [1.0] * (len(idx) - 2), index=idx)
tm.assert_series_equal(result, expected)
expected2 = ss.rolling(3, min_periods=1).cov()
tm.assert_series_equal(result, expected2)
|
GuessWhoSamFoo/pandas
|
pandas/tests/test_window.py
|
Python
|
bsd-3-clause
| 156,476
|
[
"Gaussian"
] |
aa4276ef037855c8259de625956981a4973923bac5f1577571514dfcf2433e89
|
#
# Transmission Line Simulator
#
# Author(s): Jiacong Xu
# Created: Jul-14-2017
#
from popupeditor import PopupEditor
from kivy.properties import *
from materialbutton import MaterialButton
from util.constants import *
from kivy.animation import Animation
from models.powersource import *
from kivy.metrics import *
import util.signal as signal
import matplotlib.pyplot as plt
import numpy as np
from kivy.uix.boxlayout import BoxLayout
class SourceEditor(PopupEditor):
"""
Supports wire editing.
"""
impedanceTextField = ObjectProperty(None)
widthTextField = ObjectProperty(None)
voltageTextField = ObjectProperty(None)
prevButton = ObjectProperty(None)
nextButton = ObjectProperty(None)
gaussButton = ObjectProperty(None)
squareButton = ObjectProperty(None)
triangleButton = ObjectProperty(None)
triangleButton = ObjectProperty(None)
selection = ObjectProperty(None)
def __init__(self, source, **kwargs):
super(SourceEditor, self).__init__(**kwargs)
self._source = source
self.gaussButton.changeStyle('flat')
self.squareButton.changeStyle('flat')
self.triangleButton.changeStyle('flat')
self.prevButton.changeStyle('flat')
self.nextButton.changeStyle('flat')
self.prevButton.iconLabel.color = PRIMARY
self.nextButton.iconLabel.color = PRIMARY
self.prevButton.on_press = self.showPrev
self.nextButton.on_press = self.showNext
self.gaussButton.on_press = lambda: self.onWaveShapeClicked(WaveShape.Gaussian)
self.squareButton.on_press = lambda: self.onWaveShapeClicked(WaveShape.Square)
self.triangleButton.on_press = lambda: self.onWaveShapeClicked(WaveShape.Triangle)
self.animateSwitch(source.shape, False)
self._anim = None
self._setupIcons()
def _setupIcons(self):
"""
Add icons to buttons.
"""
x = np.linspace(0, 10, 50)
y = signal.gaussian(50, 7)
self.gaussButton.container.add_widget(self._generateIcon(x, y))
y0 = [0] * 10
y = [1] * 30
self.squareButton.container.add_widget(self._generateIcon(x, y0 + y + y0))
y = []
for i in range(15):
y.append(i / 15.0)
for i in range(15):
y.append(1 - i / 15.0)
self.triangleButton.container.add_widget(self._generateIcon(x, y0 + y + y0))
def _generateIcon(self, x, y):
fig, ax = plt.subplots()
fig.set_tight_layout({"pad": 0})
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.tick_params(axis = 'both', length = 0)
ax.set_frame_on(False)
ax.set_ylim([-0.1, 1.1])
ax.plot(x, y, linewidth = dp(2), color = TEXT_BLACK)[0]
return fig.canvas
def on_focus(self, instance, focus):
if instance == self.impedanceTextField.inputText and not focus:
# Update impedance.
if len(self.impedanceTextField.text) == 0:
self._source.impedance = 0
else:
self._source.impedance = float(self.impedanceTextField.text)
if instance == self.voltageTextField.inputText and not focus:
# Update voltage.
if len(self.voltageTextField.text) == 0:
self._source.amplitude = 0
else:
self._source.amplitude = float(self.voltageTextField.text)
if instance == self.widthTextField.inputText and not focus:
# Update impedance.
if len(self.widthTextField.text) == 0:
self._source.width = 0
else:
self._source.width = float(self.widthTextField.text)
def onWaveShapeClicked(self, shape):
self._source.shape = shape
self.animateSwitch(shape, True)
def updateValues(self):
self.prevButton.disabled = self._source.prev == None
self.nextButton.disabled = self._source.next == None
self.impedanceTextField.text = '{:g}'.format(self._source.impedance)
self.impedanceTextField.inputText.input_filter = 'float'
self.impedanceTextField.inputText.bind(focus = self.on_focus)
self.voltageTextField.text = '{:g}'.format(self._source.amplitude)
self.voltageTextField.inputText.input_filter = 'float'
self.voltageTextField.inputText.bind(focus = self.on_focus)
self.widthTextField.text = '{:g}'.format(self._source.width)
self.widthTextField.inputText.input_filter = 'float'
self.widthTextField.inputText.bind(focus = self.on_focus)
self.impedanceTextField.animateLabel(False)
self.voltageTextField.animateLabel(False)
self.widthTextField.animateLabel(False)
# Animation is true here to make sure the selection ends up in the correct position on top
# of the popup show animation.
self.animateSwitch(self._source.shape, True)
def animateSwitch(self, mode, animated):
if self._anim != None:
self._anim.cancel(self.selection)
t = 0.3 if animated else 0
if mode == WaveShape.Gaussian:
self._anim = Animation(center = self.gaussButton.center, d = t, t = 'in_out_quad')
elif mode == WaveShape.Square:
self._anim = Animation(center = self.squareButton.center, d = t, t = 'in_out_quad')
else:
self._anim = Animation(center = self.triangleButton.center, d = t, t = 'in_out_quad')
self._anim.start(self.selection)
def showPrev(self):
self.onPrev()
def showNext(self):
self.onNext()
|
flyingbanana1024102/transmission-line-simulator
|
src/views/sourceeditor.py
|
Python
|
mit
| 5,620
|
[
"Gaussian"
] |
0776d0689f2bffce3f53d25aed21cf5c7a21feca44c1183c9cc48d4c96363535
|
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal, assert_array_less
from numpy.testing import assert_raises
from skimage.measure import LineModelND, CircleModel, EllipseModel, ransac
from skimage.transform import AffineTransform
from skimage.measure.fit import _dynamic_max_trials
from skimage._shared._warnings import expected_warnings
def test_line_model_invalid_input():
assert_raises(ValueError, LineModelND().estimate, np.empty((1, 3)))
def test_line_model_predict():
model = LineModelND()
model.params = ((0, 0), (1, 1))
x = np.arange(-10, 10)
y = model.predict_y(x)
assert_almost_equal(x, model.predict_x(y))
def test_line_model_estimate():
# generate original data without noise
model0 = LineModelND()
model0.params = ((0, 0), (1, 1))
x0 = np.arange(-100, 100)
y0 = model0.predict_y(x0)
data = np.column_stack([x0, y0])
# estimate parameters of noisy data
model_est = LineModelND()
model_est.estimate(data)
# test whether estimated parameters almost equal original parameters
random_state = np.random.RandomState(1234)
x = random_state.rand(100, 2)
assert_almost_equal(model0.predict(x), model_est.predict(x), 1)
def test_line_model_residuals():
model = LineModelND()
model.params = (np.array([0, 0]), np.array([0, 1]))
assert_equal(model.residuals(np.array([[0, 0]])), 0)
assert_equal(model.residuals(np.array([[0, 10]])), 0)
assert_equal(model.residuals(np.array([[10, 0]])), 10)
model.params = (np.array([-2, 0]), np.array([1, 1]) / np.sqrt(2))
assert_equal(model.residuals(np.array([[0, 0]])), np.sqrt(2))
assert_almost_equal(model.residuals(np.array([[-4, 0]])), np.sqrt(2))
def test_line_model_under_determined():
data = np.empty((1, 2))
assert_raises(ValueError, LineModelND().estimate, data)
def test_line_modelND_invalid_input():
assert_raises(ValueError, LineModelND().estimate, np.empty((5, 1)))
def test_line_modelND_predict():
model = LineModelND()
model.params = (np.array([0, 0]), np.array([0.2, 0.98]))
x = np.arange(-10, 10)
y = model.predict_y(x)
assert_almost_equal(x, model.predict_x(y))
def test_line_modelND_estimate():
# generate original data without noise
model0 = LineModelND()
model0.params = (np.array([0,0,0], dtype='float'),
np.array([1,1,1], dtype='float')/np.sqrt(3))
# we scale the unit vector with a factor 10 when generating points on the
# line in order to compensate for the scale of the random noise
data0 = (model0.params[0] +
10 * np.arange(-100,100)[...,np.newaxis] * model0.params[1])
# add gaussian noise to data
random_state = np.random.RandomState(1234)
data = data0 + random_state.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = LineModelND()
model_est.estimate(data)
# test whether estimated parameters are correct
# we use the following geometric property: two aligned vectors have
# a cross-product equal to zero
# test if direction vectors are aligned
assert_almost_equal(np.linalg.norm(np.cross(model0.params[1],
model_est.params[1])), 0, 1)
# test if origins are aligned with the direction
a = model_est.params[0] - model0.params[0]
if np.linalg.norm(a) > 0:
a /= np.linalg.norm(a)
assert_almost_equal(np.linalg.norm(np.cross(model0.params[1], a)), 0, 1)
def test_line_modelND_residuals():
model = LineModelND()
model.params = (np.array([0, 0, 0]), np.array([0, 0, 1]))
assert_equal(abs(model.residuals(np.array([[0, 0, 0]]))), 0)
assert_equal(abs(model.residuals(np.array([[0, 0, 1]]))), 0)
assert_equal(abs(model.residuals(np.array([[10, 0, 0]]))), 10)
def test_line_modelND_under_determined():
data = np.empty((1, 3))
assert_raises(ValueError, LineModelND().estimate, data)
def test_circle_model_invalid_input():
assert_raises(ValueError, CircleModel().estimate, np.empty((5, 3)))
def test_circle_model_predict():
model = CircleModel()
r = 5
model.params = (0, 0, r)
t = np.arange(0, 2 * np.pi, np.pi / 2)
xy = np.array(((5, 0), (0, 5), (-5, 0), (0, -5)))
assert_almost_equal(xy, model.predict_xy(t))
def test_circle_model_estimate():
# generate original data without noise
model0 = CircleModel()
model0.params = (10, 12, 3)
t = np.linspace(0, 2 * np.pi, 1000)
data0 = model0.predict_xy(t)
# add gaussian noise to data
random_state = np.random.RandomState(1234)
data = data0 + random_state.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = CircleModel()
model_est.estimate(data)
# test whether estimated parameters almost equal original parameters
assert_almost_equal(model0.params, model_est.params, 1)
def test_circle_model_residuals():
model = CircleModel()
model.params = (0, 0, 5)
assert_almost_equal(abs(model.residuals(np.array([[5, 0]]))), 0)
assert_almost_equal(abs(model.residuals(np.array([[6, 6]]))),
np.sqrt(2 * 6**2) - 5)
assert_almost_equal(abs(model.residuals(np.array([[10, 0]]))), 5)
def test_ellipse_model_invalid_input():
assert_raises(ValueError, EllipseModel().estimate, np.empty((5, 3)))
def test_ellipse_model_predict():
model = EllipseModel()
r = 5
model.params = (0, 0, 5, 10, 0)
t = np.arange(0, 2 * np.pi, np.pi / 2)
xy = np.array(((5, 0), (0, 10), (-5, 0), (0, -10)))
assert_almost_equal(xy, model.predict_xy(t))
def test_ellipse_model_estimate():
# generate original data without noise
model0 = EllipseModel()
model0.params = (10, 20, 15, 25, 0)
t = np.linspace(0, 2 * np.pi, 100)
data0 = model0.predict_xy(t)
# add gaussian noise to data
random_state = np.random.RandomState(1234)
data = data0 + random_state.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = EllipseModel()
model_est.estimate(data)
# test whether estimated parameters almost equal original parameters
assert_almost_equal(model0.params[:4], model_est.params[:4], 0)
def test_ellipse_model_estimate_from_data():
data = np.array([
[264, 854], [265, 875], [268, 863], [270, 857], [275, 905], [285, 915],
[305, 925], [324, 934], [335, 764], [336, 915], [345, 925], [345, 945],
[354, 933], [355, 745], [364, 936], [365, 754], [375, 745], [375, 735],
[385, 736], [395, 735], [394, 935], [405, 727], [415, 736], [415, 727],
[425, 727], [426, 929], [435, 735], [444, 933], [445, 735], [455, 724],
[465, 934], [465, 735], [475, 908], [475, 726], [485, 753], [485, 728],
[492, 762], [495, 745], [491, 910], [493, 909], [499, 904], [505, 905],
[504, 747], [515, 743], [516, 752], [524, 855], [525, 844], [525, 885],
[533, 845], [533, 873], [535, 883], [545, 874], [543, 864], [553, 865],
[553, 845], [554, 825], [554, 835], [563, 845], [565, 826], [563, 855],
[563, 795], [565, 735], [573, 778], [572, 815], [574, 804], [575, 665],
[575, 685], [574, 705], [574, 745], [575, 875], [572, 732], [582, 795],
[579, 709], [583, 805], [583, 854], [586, 755], [584, 824], [585, 655],
[581, 718], [586, 844], [585, 915], [587, 905], [594, 824], [593, 855],
[590, 891], [594, 776], [596, 767], [593, 763], [603, 785], [604, 775],
[603, 885], [605, 753], [605, 655], [606, 935], [603, 761], [613, 802],
[613, 945], [613, 965], [615, 693], [617, 665], [623, 962], [624, 972],
[625, 995], [633, 673], [633, 965], [633, 683], [633, 692], [633, 954],
[634, 1016], [635, 664], [641, 804], [637, 999], [641, 956], [643, 946],
[643, 926], [644, 975], [643, 655], [646, 705], [651, 664], [651, 984],
[647, 665], [651, 715], [651, 725], [651, 734], [647, 809], [651, 825],
[651, 873], [647, 900], [652, 917], [651, 944], [652, 742], [648, 811],
[651, 994], [652, 783], [650, 911], [654, 879]])
# estimate parameters of real data
model = EllipseModel()
model.estimate(data)
# test whether estimated parameters are smaller then 1000, so means stable
assert_array_less(np.abs(model.params[:4]), np.array([2e3] * 4))
def test_ellipse_model_residuals():
model = EllipseModel()
# vertical line through origin
model.params = (0, 0, 10, 5, 0)
assert_almost_equal(abs(model.residuals(np.array([[10, 0]]))), 0)
assert_almost_equal(abs(model.residuals(np.array([[0, 5]]))), 0)
assert_almost_equal(abs(model.residuals(np.array([[0, 10]]))), 5)
def test_ransac_shape():
# generate original data without noise
model0 = CircleModel()
model0.params = (10, 12, 3)
t = np.linspace(0, 2 * np.pi, 1000)
data0 = model0.predict_xy(t)
# add some faulty data
outliers = (10, 30, 200)
data0[outliers[0], :] = (1000, 1000)
data0[outliers[1], :] = (-50, 50)
data0[outliers[2], :] = (-100, -10)
# estimate parameters of corrupted data
model_est, inliers = ransac(data0, CircleModel, 3, 5,
random_state=1)
# test whether estimated parameters equal original parameters
assert_equal(model0.params, model_est.params)
for outlier in outliers:
assert outlier not in inliers
def test_ransac_geometric():
random_state = np.random.RandomState(1)
# generate original data without noise
src = 100 * random_state.random_sample((50, 2))
model0 = AffineTransform(scale=(0.5, 0.3), rotation=1,
translation=(10, 20))
dst = model0(src)
# add some faulty data
outliers = (0, 5, 20)
dst[outliers[0]] = (10000, 10000)
dst[outliers[1]] = (-100, 100)
dst[outliers[2]] = (50, 50)
# estimate parameters of corrupted data
model_est, inliers = ransac((src, dst), AffineTransform, 2, 20,
random_state=random_state)
# test whether estimated parameters equal original parameters
assert_almost_equal(model0.params, model_est.params)
assert np.all(np.nonzero(inliers == False)[0] == outliers)
def test_ransac_is_data_valid():
is_data_valid = lambda data: data.shape[0] > 2
model, inliers = ransac(np.empty((10, 2)), LineModelND, 2, np.inf,
is_data_valid=is_data_valid, random_state=1)
assert_equal(model, None)
assert_equal(inliers, None)
def test_ransac_is_model_valid():
def is_model_valid(model, data):
return False
model, inliers = ransac(np.empty((10, 2)), LineModelND, 2, np.inf,
is_model_valid=is_model_valid, random_state=1)
assert_equal(model, None)
assert_equal(inliers, None)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 5
assert_equal(_dynamic_max_trials(1, 100, 5, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 5, 1), np.inf)
def test_ransac_invalid_input():
assert_raises(ValueError, ransac, np.zeros((10, 2)), None, min_samples=2,
residual_threshold=0, max_trials=-1)
assert_raises(ValueError, ransac, np.zeros((10, 2)), None, min_samples=2,
residual_threshold=0, stop_probability=-1)
assert_raises(ValueError, ransac, np.zeros((10, 2)), None, min_samples=2,
residual_threshold=0, stop_probability=1.01)
if __name__ == "__main__":
np.testing.run_module_suite()
|
paalge/scikit-image
|
skimage/measure/tests/test_fit.py
|
Python
|
bsd-3-clause
| 12,580
|
[
"Gaussian"
] |
2e68e20f662ccc17522aa04b513b21905626f5473a1b9fcbbebabf4872d218e5
|
# lint as python3
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
# pylint: disable=unused-argument
# pylint: disable=g-import-not-at-top
import warnings
import autograd
import autograd.core
import autograd.extend
import autograd.numpy as anp
from neural_structural_optimization import caching
import numpy as np
import scipy.ndimage
import scipy.sparse
import scipy.sparse.linalg
try:
import sksparse.cholmod
HAS_CHOLMOD = True
except ImportError:
warnings.warn(
'sksparse.cholmod not installed. Falling back to SciPy/SuperLU, but '
'simulations will be about twice as slow.')
HAS_CHOLMOD = False
# internal utilities
def _grad_undefined(_, *args):
raise TypeError('gradient undefined for this input argument')
def _zero_grad(_, *args, **kwargs):
def jvp(grad_ans):
return 0.0 * grad_ans
return jvp
# Gaussian filter
@autograd.extend.primitive
def gaussian_filter(x, width):
"""Apply gaussian blur of a given radius."""
return scipy.ndimage.gaussian_filter(x, width, mode='reflect')
def _gaussian_filter_vjp(ans, x, width):
del ans, x # unused
return lambda g: gaussian_filter(g, width)
autograd.extend.defvjp(gaussian_filter, _gaussian_filter_vjp)
# Cone filter
def _cone_filter_matrix(nelx, nely, radius, mask):
x, y = np.meshgrid(np.arange(nelx), np.arange(nely), indexing='ij')
rows = []
cols = []
values = []
r_bound = int(np.ceil(radius))
for dx in range(-r_bound, r_bound+1):
for dy in range(-r_bound, r_bound+1):
weight = np.maximum(0, radius - np.sqrt(dx**2 + dy**2))
row = x + nelx * y
column = x + dx + nelx * (y + dy)
value = np.broadcast_to(weight, x.shape)
# exclude cells beyond the boundary
valid = (
(mask > 0) &
((x+dx) >= 0) &
((x+dx) < nelx) &
((y+dy) >= 0) &
((y+dy) < nely)
)
rows.append(row[valid])
cols.append(column[valid])
values.append(value[valid])
data = np.concatenate(values)
i = np.concatenate(rows)
j = np.concatenate(cols)
return scipy.sparse.coo_matrix((data, (i, j)), (nelx * nely,) * 2)
@caching.ndarray_safe_lru_cache()
def normalized_cone_filter_matrix(nx, ny, radius, mask):
"""Calculate a sparse matrix appropriate for applying a cone filter."""
raw_filters = _cone_filter_matrix(nx, ny, radius, mask).tocsr()
weights = 1 / raw_filters.sum(axis=0).squeeze()
diag_weights = scipy.sparse.spdiags(weights, 0, nx*ny, nx*ny)
return (diag_weights @ raw_filters).tocsr()
@autograd.extend.primitive
def cone_filter(inputs, radius, mask=1, transpose=False):
"""Apply a cone filter of the given radius."""
inputs = np.asarray(inputs)
filters = normalized_cone_filter_matrix(
*inputs.shape, radius=radius, mask=mask)
if transpose:
filters = filters.T
outputs = filters @ inputs.ravel(order='F')
return outputs.reshape(inputs.shape, order='F')
def _cone_filter_vjp(ans, inputs, radius, mask=1, transpose=False):
del ans, inputs # unused
return lambda g: cone_filter(g, radius, mask, transpose=not transpose)
autograd.extend.defvjp(cone_filter, _cone_filter_vjp)
## a useful utility for 1D scatter operations
def inverse_permutation(indices):
inverse_perm = np.zeros(len(indices), dtype=anp.int64)
inverse_perm[indices] = np.arange(len(indices), dtype=anp.int64)
return inverse_perm
# the 1D scatter operation
def scatter1d(nonzero_values, nonzero_indices, array_len):
all_indices = np.arange(array_len, dtype=anp.int64)
zero_indices = anp.setdiff1d(all_indices, nonzero_indices, assume_unique=True)
index_map = inverse_permutation(
anp.concatenate([nonzero_indices, zero_indices]))
u_values = anp.concatenate([nonzero_values, anp.zeros(len(zero_indices))])
return u_values[index_map]
@caching.ndarray_safe_lru_cache(1)
def _get_solver(a_entries, a_indices, size, sym_pos):
"""Get a solver for applying the desired matrix factorization."""
# A cache size of one is sufficient to avoid re-computing the factorization in
# the backwawrds pass.
a = scipy.sparse.coo_matrix((a_entries, a_indices), shape=(size,)*2).tocsc()
if sym_pos and HAS_CHOLMOD:
return sksparse.cholmod.cholesky(a).solve_A
else:
# could also use scikits.umfpack.splu
# should be about twice as slow as the cholesky
return scipy.sparse.linalg.splu(a).solve
## Sparse solver
@autograd.primitive
def solve_coo(a_entries, a_indices, b, sym_pos=False):
"""Solve a sparse system of linear equations.
Args:
a_entries: numpy array with shape (num_zeros,) giving values for non-zero
matrix entries.
a_indices: numpy array with shape (2, num_zeros) giving x and y indices for
non-zero matrix entries.
b: 1d numpy array specifying the right hand side of the equation.
sym_pos: is the matrix guaranteed to be positive-definite?
Returns:
1d numpy array corresponding to the solution of a*x=b.
"""
solver = _get_solver(a_entries, a_indices, b.size, sym_pos)
return solver(b)
# see autograd's np.linalg.solve:
# https://github.com/HIPS/autograd/blob/96a03f44da43cd7044c61ac945c483955deba957/autograd/numpy/linalg.py#L40
def solve_coo_adjoint(a_entries, a_indices, b, sym_pos=False):
# NOTE: not tested on complex valued inputs.
if sym_pos:
return solve_coo(a_entries, a_indices, b, sym_pos)
else:
return solve_coo(a_entries, a_indices[::-1], b, sym_pos)
def grad_solve_coo_entries(ans, a_entries, a_indices, b, sym_pos=False):
def jvp(grad_ans):
lambda_ = solve_coo_adjoint(a_entries, a_indices, grad_ans, sym_pos)
i, j = a_indices
return -lambda_[i] * ans[j]
return jvp
def grad_solve_coo_b(ans, a_entries, a_indices, b, sym_pos=False):
def jvp(grad_ans):
return solve_coo_adjoint(a_entries, a_indices, grad_ans, sym_pos)
return jvp
autograd.extend.defvjp(
solve_coo, grad_solve_coo_entries, _grad_undefined, grad_solve_coo_b)
@autograd.primitive
def find_root(
f, x, lower_bound, upper_bound, tolerance=1e-12, max_iterations=64):
# Implicitly solve f(x,y)=0 for y(x) using binary search.
# Assumes that y is a scalar and f(x,y) is monotonic in y.
for _ in range(max_iterations):
y = 0.5 * (lower_bound + upper_bound)
if upper_bound - lower_bound < tolerance:
break
if f(x, y) > 0:
upper_bound = y
else:
lower_bound = y
return y
def grad_find_root(y, f, x, lower_bound, upper_bound, tolerance=None):
# This uses a special case of the adjoint gradient rule:
# http://www.dolfin-adjoint.org/en/latest/documentation/maths/3-gradients.html#the-adjoint-approach
def jvp(grad_y):
g = lambda x: f(x, y)
h = lambda y: f(x, y)
return -autograd.grad(g)(x) / autograd.grad(h)(y) * grad_y
return jvp
autograd.extend.defvjp(
find_root, _grad_undefined, grad_find_root,
_zero_grad, _zero_grad, _zero_grad)
|
google-research/neural-structural-optimization
|
neural_structural_optimization/autograd_lib.py
|
Python
|
apache-2.0
| 7,407
|
[
"Gaussian"
] |
91d5e199e51de53204762ddecbe4c544bf433a882ef31930e093d6c4c31ca5b8
|
import datetime as dt
import os
import sys
import re
import pytest
from click.testing import CliRunner
from freezegun import freeze_time
from khal.cli import main_ikhal, main_khal
from .utils import _get_ics_filepath, _get_text
class CustomCliRunner(CliRunner):
def __init__(self, config_file, db=None, calendars=None,
xdg_data_home=None, xdg_config_home=None, tmpdir=None, **kwargs):
self.config_file = config_file
self.db = db
self.calendars = calendars
self.xdg_data_home = xdg_data_home
self.xdg_config_home = xdg_config_home
self.tmpdir = tmpdir
super().__init__(**kwargs)
def invoke(self, cli, args=None, *a, **kw):
args = ['-c', str(self.config_file)] + (args or [])
return super().invoke(cli, args, *a, **kw)
@pytest.fixture
def runner(tmpdir, monkeypatch):
db = tmpdir.join('khal.db')
calendar = tmpdir.mkdir('calendar')
calendar2 = tmpdir.mkdir('calendar2')
calendar3 = tmpdir.mkdir('calendar3')
xdg_data_home = tmpdir.join('vdirs')
xdg_config_home = tmpdir.join('.config')
config_file = xdg_config_home.join('khal').join('config')
# TODO create a vdir config on disk and let vdirsyncer actually read it
monkeypatch.setattr('vdirsyncer.cli.config.load_config', lambda: Config())
monkeypatch.setattr('xdg.BaseDirectory.xdg_data_home', str(xdg_data_home))
monkeypatch.setattr('xdg.BaseDirectory.xdg_config_home', str(xdg_config_home))
monkeypatch.setattr('xdg.BaseDirectory.xdg_config_dirs', [str(xdg_config_home)])
def inner(print_new=False, default_calendar=True, days=2, **kwargs):
if default_calendar:
default_calendar = 'default_calendar = one'
else:
default_calendar = ''
if not os.path.exists(str(xdg_config_home.join('khal'))):
os.makedirs(str(xdg_config_home.join('khal')))
config_file.write(config_template.format(
delta=str(days) + 'd',
calpath=str(calendar), calpath2=str(calendar2), calpath3=str(calendar3),
default_calendar=default_calendar,
print_new=print_new,
dbpath=str(db), **kwargs))
runner = CustomCliRunner(
config_file=config_file, db=db, calendars={"one": calendar},
xdg_data_home=xdg_data_home, xdg_config_home=xdg_config_home,
tmpdir=tmpdir,
)
return runner
return inner
config_template = '''
[calendars]
[[one]]
path = {calpath}
color = dark blue
[[two]]
path = {calpath2}
color = dark green
[[three]]
path = {calpath3}
[locale]
local_timezone = Europe/Berlin
default_timezone = Europe/Berlin
timeformat = %H:%M
dateformat = %d.%m.
longdateformat = %d.%m.%Y
datetimeformat = %d.%m. %H:%M
longdatetimeformat = %d.%m.%Y %H:%M
firstweekday = 0
[default]
{default_calendar}
timedelta = {delta}
print_new = {print_new}
[sqlite]
path = {dbpath}
'''
def test_direct_modification(runner):
runner = runner()
result = runner.invoke(main_khal, ['list'])
assert result.output == 'No events\n'
assert not result.exception
cal_dt = _get_text('event_dt_simple')
event = runner.calendars['one'].join('test.ics')
event.write(cal_dt)
format = '{start-end-time-style}: {title}'
args = ['list', '--format', format, '--day-format', '', '09.04.2014']
result = runner.invoke(main_khal, args)
assert not result.exception
assert result.output == '09:30-10:30: An Event\n'
os.remove(str(event))
result = runner.invoke(main_khal, ['list'])
assert not result.exception
assert result.output == 'No events\n'
def test_simple(runner):
runner = runner(days=2)
result = runner.invoke(main_khal, ['list'])
assert not result.exception
assert result.output == 'No events\n'
now = dt.datetime.now().strftime('%d.%m.%Y')
result = runner.invoke(
main_khal, f'new {now} 18:00 myevent'.split())
assert result.output == ''
assert not result.exception
result = runner.invoke(main_khal, ['list'])
print(result.output)
assert 'myevent' in result.output
assert '18:00' in result.output
# test show_all_days default value
assert 'Tomorrow:' not in result.output
assert not result.exception
def test_simple_color(runner):
runner = runner(days=2)
now = dt.datetime.now().strftime('%d.%m.%Y')
result = runner.invoke(main_khal, f'new {now} 18:00 myevent'.split())
assert result.output == ''
assert not result.exception
result = runner.invoke(main_khal, ['list'], color=True)
assert not result.exception
assert '\x1b[34m' in result.output
def test_days(runner):
runner = runner(days=9)
when = (dt.datetime.now() + dt.timedelta(days=7)).strftime('%d.%m.%Y')
result = runner.invoke(main_khal, f'new {when} 18:00 nextweek'.split())
assert result.output == ''
assert not result.exception
when = (dt.datetime.now() + dt.timedelta(days=30)).strftime('%d.%m.%Y')
result = runner.invoke(main_khal, f'new {when} 18:00 nextmonth'.split())
assert result.output == ''
assert not result.exception
result = runner.invoke(main_khal, ['list'])
assert 'nextweek' in result.output
assert 'nextmonth' not in result.output
assert '18:00' in result.output
assert not result.exception
def test_notstarted(runner):
with freeze_time('2015-6-1 15:00'):
runner = runner(days=2)
for command in [
'new 30.5.2015 5.6.2015 long event',
'new 2.6.2015 4.6.2015 two day event',
'new 1.6.2015 14:00 18:00 four hour event',
'new 1.6.2015 16:00 17:00 one hour event',
'new 2.6.2015 10:00 13:00 three hour event',
]:
result = runner.invoke(main_khal, command.split())
assert not result.exception
result = runner.invoke(main_khal, 'list now'.split())
assert result.output == \
"""Today, 01.06.2015
↔ long event
14:00-18:00 four hour event
16:00-17:00 one hour event
Tomorrow, 02.06.2015
↔ long event
↦ two day event
10:00-13:00 three hour event
Wednesday, 03.06.2015
↔ long event
↔ two day event
"""
assert not result.exception
result = runner.invoke(main_khal, 'list now --notstarted'.split())
assert result.output == \
"""Today, 01.06.2015
16:00-17:00 one hour event
Tomorrow, 02.06.2015
↦ two day event
10:00-13:00 three hour event
Wednesday, 03.06.2015
↔ two day event
"""
assert not result.exception
result = runner.invoke(main_khal, 'list now --once'.split())
assert result.output == \
"""Today, 01.06.2015
↔ long event
14:00-18:00 four hour event
16:00-17:00 one hour event
Tomorrow, 02.06.2015
↦ two day event
10:00-13:00 three hour event
"""
assert not result.exception
result = runner.invoke(main_khal, 'list now --once --notstarted'.split())
assert result.output == \
"""Today, 01.06.2015
16:00-17:00 one hour event
Tomorrow, 02.06.2015
↦ two day event
10:00-13:00 three hour event
"""
assert not result.exception
def test_calendar(runner):
with freeze_time('2015-6-1'):
runner = runner(days=0)
result = runner.invoke(main_khal, ['calendar'])
assert not result.exception
assert result.exit_code == 0
output = '\n'.join([
" Mo Tu We Th Fr Sa Su No events",
"Jun 1 2 3 4 5 6 7 ",
" 8 9 10 11 12 13 14 ",
" 15 16 17 18 19 20 21 ",
" 22 23 24 25 26 27 28 ",
"Jul 29 30 1 2 3 4 5 ",
" 6 7 8 9 10 11 12 ",
" 13 14 15 16 17 18 19 ",
" 20 21 22 23 24 25 26 ",
"Aug 27 28 29 30 31 1 2 ",
" 3 4 5 6 7 8 9 ",
" 10 11 12 13 14 15 16 ",
" 17 18 19 20 21 22 23 ",
" 24 25 26 27 28 29 30 ",
"Sep 31 1 2 3 4 5 6 ",
"",
])
assert result.output == output
def test_long_calendar(runner):
with freeze_time('2015-6-1'):
runner = runner(days=100)
result = runner.invoke(main_khal, ['calendar'])
assert not result.exception
assert result.exit_code == 0
output = '\n'.join([
" Mo Tu We Th Fr Sa Su No events",
"Jun 1 2 3 4 5 6 7 ",
" 8 9 10 11 12 13 14 ",
" 15 16 17 18 19 20 21 ",
" 22 23 24 25 26 27 28 ",
"Jul 29 30 1 2 3 4 5 ",
" 6 7 8 9 10 11 12 ",
" 13 14 15 16 17 18 19 ",
" 20 21 22 23 24 25 26 ",
"Aug 27 28 29 30 31 1 2 ",
" 3 4 5 6 7 8 9 ",
" 10 11 12 13 14 15 16 ",
" 17 18 19 20 21 22 23 ",
" 24 25 26 27 28 29 30 ",
"Sep 31 1 2 3 4 5 6 ",
" 7 8 9 10 11 12 13 ",
" 14 15 16 17 18 19 20 ",
" 21 22 23 24 25 26 27 ",
"Oct 28 29 30 1 2 3 4 ",
"",
])
assert result.output == output
def test_default_command_empty(runner):
runner = runner(days=2)
result = runner.invoke(main_khal)
assert result.exception
assert result.exit_code == 2
assert result.output.startswith('Usage: ')
def test_invalid_calendar(runner):
runner = runner(days=2)
result = runner.invoke(
main_khal, ['new'] + '-a one 18:00 myevent'.split())
assert not result.exception
result = runner.invoke(
main_khal, ['new'] + '-a inexistent 18:00 myevent'.split())
assert result.exception
assert result.exit_code == 2
assert 'Unknown calendar ' in result.output
def test_attach_calendar(runner):
runner = runner(days=2)
result = runner.invoke(main_khal, ['printcalendars'])
assert set(result.output.split('\n')[:3]) == {'one', 'two', 'three'}
assert not result.exception
result = runner.invoke(main_khal, ['printcalendars', '-a', 'one'])
assert result.output == 'one\n'
assert not result.exception
result = runner.invoke(main_khal, ['printcalendars', '-d', 'one'])
assert set(result.output.split('\n')[:2]) == {'two', 'three'}
assert not result.exception
@pytest.mark.parametrize('contents', [
'',
'BEGIN:VCALENDAR\nBEGIN:VTODO\nEND:VTODO\nEND:VCALENDAR\n'
])
def test_no_vevent(runner, tmpdir, contents):
runner = runner(days=2)
broken_item = runner.calendars['one'].join('broken_item.ics')
broken_item.write(contents.encode('utf-8'), mode='wb')
result = runner.invoke(main_khal, ['list'])
assert not result.exception
assert 'No events' in result.output
def test_printformats(runner):
runner = runner(days=2)
result = runner.invoke(main_khal, ['printformats'])
assert '\n'.join(['longdatetimeformat: 21.12.2013 21:45',
'datetimeformat: 21.12. 21:45',
'longdateformat: 21.12.2013',
'dateformat: 21.12.',
'timeformat: 21:45',
'']) == result.output
assert not result.exception
# "see #810"
@pytest.mark.xfail
def test_repeating(runner):
runner = runner(days=2)
now = dt.datetime.now().strftime('%d.%m.%Y')
end_date = dt.datetime.now() + dt.timedelta(days=10)
result = runner.invoke(
main_khal, (f"new {now} 18:00 myevent -r weekly -u "
f"{end_date.strftime('%d.%m.%Y')}").split())
assert not result.exception
assert result.output == ''
def test_at(runner):
runner = runner(days=2)
now = dt.datetime.now().strftime('%d.%m.%Y')
end_date = dt.datetime.now() + dt.timedelta(days=10)
result = runner.invoke(
main_khal,
f"new {now} {end_date.strftime('%d.%m.%Y')} 18:00 myevent".split())
args = ['--color', 'at', '--format', '{start-time}{title}', '--day-format', '', '18:30']
result = runner.invoke(main_khal, args)
assert not result.exception
assert result.output.startswith('myevent')
def test_at_day_format(runner):
runner = runner(days=2)
now = dt.datetime.now().strftime('%d.%m.%Y')
end_date = dt.datetime.now() + dt.timedelta(days=10)
result = runner.invoke(
main_khal,
f"new {now} {end_date.strftime('%d.%m.%Y')} 18:00 myevent".split())
args = ['--color', 'at', '--format', '{start-time}{title}', '--day-format', '{name}', '18:30']
result = runner.invoke(main_khal, args)
assert not result.exception
assert result.output.startswith('Today\x1b[0m\nmyevent')
def test_list(runner):
runner = runner(days=2)
now = dt.datetime.now().strftime('%d.%m.%Y')
result = runner.invoke(
main_khal,
f'new {now} 18:00 myevent'.split())
format = '{red}{start-end-time-style}{reset} {title} :: {description}'
args = ['--color', 'list', '--format', format, '--day-format', 'header', '18:30']
result = runner.invoke(main_khal, args)
expected = 'header\x1b[0m\n\x1b[31m18:00-19:00\x1b[0m myevent :: \x1b[0m\n'
assert not result.exception
assert result.output.startswith(expected)
def test_search(runner):
runner = runner(days=2)
now = dt.datetime.now().strftime('%d.%m.%Y')
result = runner.invoke(main_khal, f'new {now} 18:00 myevent'.split())
format = '{red}{start-end-time-style}{reset} {title} :: {description}'
result = runner.invoke(main_khal, ['--color', 'search', '--format', format, 'myevent'])
assert not result.exception
assert result.output.startswith('\x1b[34m\x1b[31m18:00')
def test_no_default_new(runner):
runner = runner(default_calendar=False)
result = runner.invoke(main_khal, 'new 18:00 beer'.split())
assert ("Error: Invalid value: No default calendar is configured, "
"please provide one explicitly.") in result.output
assert result.exit_code == 2
def test_import(runner, monkeypatch):
runner = runner()
result = runner.invoke(main_khal, 'import -a one -a two import file.ics'.split())
assert result.exception
assert result.exit_code == 2
assert 'Can\'t use "--include-calendar" / "-a" more than once' in result.output
class FakeImport():
args, kwargs = None, None
def clean(self):
self.args, self.kwargs = None, None
def import_ics(self, *args, **kwargs):
print('saving args')
print(args)
self.args = args
self.kwargs = kwargs
fake = FakeImport()
monkeypatch.setattr('khal.controllers.import_ics', fake.import_ics)
# as we are not actually parsing the file we want to import, we can use
# any readable file at all, therefore re-using the configuration file
result = runner.invoke(main_khal, f'import -a one {runner.config_file}'.split())
assert not result.exception
assert {cal['name'] for cal in fake.args[0].calendars} == {'one'}
fake.clean()
result = runner.invoke(main_khal, f'import {runner.config_file}'.split())
assert not result.exception
assert {cal['name'] for cal in fake.args[0].calendars} == {'one', 'two', 'three'}
def test_import_proper(runner):
runner = runner()
result = runner.invoke(main_khal, ['import', _get_ics_filepath('cal_d')], input='0\ny\n')
assert result.output.startswith('09.04.-09.04. An Event')
assert not result.exception
result = runner.invoke(main_khal, ['search', 'Event'])
assert result.output == '09.04.-09.04. An Event\n'
def test_import_proper_invalid_timezone(runner):
runner = runner()
result = runner.invoke(
main_khal, ['import', _get_ics_filepath('invalid_tzoffset')], input='0\ny\n')
assert result.output.startswith(
'warning: Invalid timezone offset encountered, timezone information may be wrong')
assert not result.exception
result = runner.invoke(main_khal, ['search', 'Event'])
assert result.output.startswith(
'warning: Invalid timezone offset encountered, timezone information may be wrong')
assert '02.12. 08:00-02.12. 09:30 Some event' in result.output
def test_import_invalid_choice_and_prefix(runner):
runner = runner()
result = runner.invoke(main_khal, ['import', _get_ics_filepath('cal_d')], input='9\nth\ny\n')
assert result.output.startswith('09.04.-09.04. An Event')
assert result.output.find('invalid choice') == 125
assert not result.exception
result = runner.invoke(main_khal, ['search', 'Event'])
assert result.output == '09.04.-09.04. An Event\n'
def test_import_from_stdin(runner, monkeypatch):
ics_data = 'This is some really fake icalendar data'
class FakeImport():
args, kwargs = None, None
call_count = 0
def clean(self):
self.args, self.kwargs = None, None
def import_ics(self, *args, **kwargs):
print('saving args')
print(args)
self.call_count += 1
self.args = args
self.kwargs = kwargs
importer = FakeImport()
monkeypatch.setattr('khal.controllers.import_ics', importer.import_ics)
runner = runner()
result = runner.invoke(main_khal, ['import'], input=ics_data)
assert not result.exception
assert importer.call_count == 1
assert importer.kwargs['ics'] == ics_data
def test_interactive_command(runner, monkeypatch):
runner = runner(days=2)
token = "hooray"
def fake_ui(*a, **kw):
print(token)
sys.exit(0)
monkeypatch.setattr('khal.ui.start_pane', fake_ui)
result = runner.invoke(main_ikhal, ['-a', 'one'])
assert not result.exception
assert result.output.strip() == token
result = runner.invoke(main_khal, ['interactive', '-a', 'one'])
assert not result.exception
assert result.output.strip() == token
def test_color_option(runner):
runner = runner(days=2)
result = runner.invoke(main_khal, ['--no-color', 'list'])
assert result.output == 'No events\n'
result = runner.invoke(main_khal, ['--color', 'list'])
assert 'No events' in result.output
assert result.output != 'No events\n'
def choices(dateformat=0, timeformat=0,
parse_vdirsyncer_conf=True,
create_vdir=False,
default_calendar='',
write_config=True):
"""helper function to generate input for testing `configure`"""
confirm = {True: 'y', False: 'n'}
out = [
str(dateformat), str(timeformat),
confirm[parse_vdirsyncer_conf],
]
if not parse_vdirsyncer_conf:
out.append(confirm[create_vdir])
out.append(default_calendar)
out.append(confirm[write_config])
out.append('')
return '\n'.join(out)
class Config():
"""helper class for mocking vdirsyncer's config objects"""
# TODO crate a vdir config on disk and let vdirsyncer actually read it
storages = {
'home_calendar_local': {
'type': 'filesystem',
'instance_name': 'home_calendar_local',
'path': '~/.local/share/calendars/home/',
'fileext': '.ics',
},
'events_local': {
'type': 'filesystem',
'instance_name': 'events_local',
'path': '~/.local/share/calendars/events/',
'fileext': '.ics',
},
'home_calendar_remote': {
'type': 'caldav',
'url': 'https://some.url/caldav',
'username': 'foo',
'password.fetch': ['command', 'get_secret'],
'instance_name': 'home_calendar_remote',
},
'home_contacts_remote': {
'type': 'carddav',
'url': 'https://another.url/caldav',
'username': 'bar',
'password.fetch': ['command', 'get_secret'],
'instance_name': 'home_contacts_remote',
},
'home_contacts_local': {
'type': 'filesystem',
'instance_name': 'home_contacts_local',
'path': '~/.local/share/contacts/',
'fileext': '.vcf',
},
'events_remote': {
'type': 'http',
'instance_name': 'events_remote',
'url': 'http://list.of/events/',
},
}
def test_configure_command(runner):
runner_factory = runner
runner = runner()
runner.config_file.remove()
result = runner.invoke(main_khal, ['configure'], input=choices())
assert f'Successfully wrote configuration to {runner.config_file}' in result.output
assert result.exit_code == 0
with open(str(runner.config_file)) as f:
actual_config = ''.join(f.readlines())
assert actual_config == '''[calendars]
[[events_local]]
path = ~/.local/share/calendars/events/*
type = discover
[[home_calendar_local]]
path = ~/.local/share/calendars/home/*
type = discover
[[home_contacts_local]]
path = ~/.local/share/contacts/*
type = discover
[locale]
timeformat = %H:%M
dateformat = %Y-%m-%d
longdateformat = %Y-%m-%d
datetimeformat = %Y-%m-%d %H:%M
longdatetimeformat = %Y-%m-%d %H:%M
[default]
default_calendar = events_local
'''
# if aborting, no config file should be written
runner = runner_factory()
assert os.path.exists(str(runner.config_file))
runner.config_file.remove()
assert not os.path.exists(str(runner.config_file))
result = runner.invoke(main_khal, ['configure'], input=choices(write_config=False))
assert 'aborted' in result.output
assert result.exit_code == 1
def test_print_ics_command(runner):
runner = runner()
# Input is empty and loading from stdin
result = runner.invoke(main_khal, ['printics', '-'])
assert result.exception
# Non existing file
result = runner.invoke(main_khal, ['printics', 'nonexisting_file'])
assert result.exception
assert re.search(r'''Error: Invalid value for "?'?\[?(ICS|ics)\]?'?"?: '''
r'''('nonexisting_file': No such file or directory\n|'''
r'Could not open file:)', result.output)
# Run on test files
result = runner.invoke(main_khal, ['printics', _get_ics_filepath('cal_d')])
assert not result.exception
result = runner.invoke(main_khal, ['printics', _get_ics_filepath('cal_dt_two_tz')])
assert not result.exception
# Test with some nice format strings
form = '{uid}\t{title}\t{description}\t{start}\t{start-long}\t{start-date}' \
'\t{start-date-long}\t{start-time}\t{end}\t{end-long}\t{end-date}' \
'\t{end-date-long}\t{end-time}\t{repeat-symbol}\t{description}' \
'\t{description-separator}\t{location}\t{calendar}' \
'\t{calendar-color}\t{start-style}\t{to-style}\t{end-style}' \
'\t{start-end-time-style}\t{end-necessary}\t{end-necessary-long}'
result = runner.invoke(main_khal, [
'printics', '-f', form, _get_ics_filepath('cal_dt_two_tz')])
assert not result.exception
assert 25 == len(result.output.split('\t'))
result = runner.invoke(main_khal, [
'printics', '-f', form, _get_ics_filepath('cal_dt_two_tz')])
assert not result.exception
assert 25 == len(result.output.split('\t'))
def test_printics_read_from_stdin(runner):
runner = runner(command='printics')
result = runner.invoke(main_khal, ['printics'], input=_get_text('cal_d'))
assert not result.exception
assert '1 events found in stdin input\n09.04.-09.04. An Event\n' in result.output
def test_configure_command_config_exists(runner):
runner = runner()
result = runner.invoke(main_khal, ['configure'], input=choices())
assert 'Found an existing' in result.output
assert result.exit_code == 1
def test_configure_command_create_vdir(runner):
runner = runner()
runner.config_file.remove()
runner.xdg_config_home.remove()
result = runner.invoke(
main_khal, ['configure'],
input=choices(parse_vdirsyncer_conf=False, create_vdir=True),
)
assert f'Successfully wrote configuration to {str(runner.config_file)}' in result.output
assert result.exit_code == 0
with open(str(runner.config_file)) as f:
actual_config = ''.join(f.readlines())
assert actual_config == f'''[calendars]
[[private]]
path = {str(runner.xdg_data_home)}/khal/calendars/private
type = calendar
[locale]
timeformat = %H:%M
dateformat = %Y-%m-%d
longdateformat = %Y-%m-%d
datetimeformat = %Y-%m-%d %H:%M
longdatetimeformat = %Y-%m-%d %H:%M
[default]
default_calendar = private
'''
# running configure again, should yield another vdir path, as the old
# one still exists
runner.config_file.remove()
result = runner.invoke(
main_khal, ['configure'],
input=choices(parse_vdirsyncer_conf=False, create_vdir=True),
)
assert f'Successfully wrote configuration to {str(runner.config_file)}' in result.output
assert result.exit_code == 0
with open(str(runner.config_file)) as f:
actual_config = ''.join(f.readlines())
assert f'{runner.xdg_data_home}/khal/calendars/private1' in actual_config
def cleanup(paths):
"""reset permissions of all files and folders in `paths` to 644 resp. 755"""
for path in paths:
if os.path.exists(path):
os.chmod(str(path), 0o755)
for dirpath, _dirnames, filenames in os.walk(path):
os.chmod(str(dirpath), 0o755)
for filename in filenames:
os.chmod(str(os.path.join(dirpath, filename)), 0o644)
def test_configure_command_cannot_write_config_file(runner):
runner = runner()
runner.config_file.remove()
os.chmod(str(runner.xdg_config_home), 555)
result = runner.invoke(main_khal, ['configure'], input=choices())
assert result.exit_code == 1
# make sure pytest can clean up behind us
cleanup([runner.xdg_config_home])
def test_configure_command_cannot_create_vdir(runner):
runner = runner()
runner.config_file.remove()
os.mkdir(str(runner.xdg_data_home), mode=555)
result = runner.invoke(
main_khal, ['configure'],
input=choices(parse_vdirsyncer_conf=False, create_vdir=True),
)
assert 'Exiting' in result.output
assert result.exit_code == 1
# make sure pytest can clean up behind us
cleanup([runner.xdg_data_home])
def test_configure_no_vdir(runner):
runner = runner()
runner.config_file.remove()
result = runner.invoke(
main_khal, ['configure'],
input=choices(parse_vdirsyncer_conf=False, create_vdir=False),
)
assert 'khal will not be usable like this' in result.output
assert result.exit_code == 0
assert not result.exception
def test_edit(runner):
runner = runner()
result = runner.invoke(main_khal, ['list'])
assert not result.exception
assert result.output == 'No events\n'
for name in ['event_dt_simple', 'event_d_15']:
cal_dt = _get_text(name)
event = runner.calendars['one'].join(f'{name}.ics')
event.write(cal_dt)
format = '{start-end-time-style}: {title}'
result = runner.invoke(
main_khal, ['edit', '--show-past', 'Event'], input='s\nGreat Event\nn\nn\n')
assert not result.exception
args = ['list', '--format', format, '--day-format', '', '09.04.2014']
result = runner.invoke(main_khal, args)
assert '09:30-10:30: Great Event' in result.output
assert not result.exception
args = ['list', '--format', format, '--day-format', '', '09.04.2015']
result = runner.invoke(main_khal, args)
assert ': An Event' in result.output
assert not result.exception
def test_new(runner):
runner = runner(print_new='path')
result = runner.invoke(main_khal, 'new 13.03.2016 3d Visit'.split())
assert not result.exception
assert result.output.endswith('.ics\n')
assert result.output.startswith(str(runner.tmpdir))
@freeze_time('2015-6-1 8:00')
def test_new_interactive(runner):
runner = runner(print_new='path')
result = runner.invoke(
main_khal, 'new -i'.split(),
'Another event\n13:00 17:00\n\nNone\nn\n'
)
assert not result.exception
assert result.exit_code == 0
def test_debug(runner):
runner = runner()
result = runner.invoke(main_khal, ['-v', 'debug', 'printformats'])
assert result.output.startswith('debug: khal 0.')
assert 'using the config file at' in result.output
assert 'debug: Using config:\ndebug: [calendars]' in result.output
assert not result.exception
@freeze_time('2015-6-1 8:00')
def test_new_interactive_extensive(runner):
runner = runner(print_new='path', default_calendar=False)
result = runner.invoke(
main_khal, 'new -i 15:00 15:30'.split(),
'?\ninvalid\ntwo\n'
'Unicce Name\n'
'\n'
'Europe/London\n'
'bar\n'
'l\non a boat\n'
'p\nweekly\n'
'1.1.2018\n'
'a\n30m\n'
'c\nwork\n'
'n\n'
)
assert not result.exception
assert result.exit_code == 0
@freeze_time('2015-6-1 8:00')
def test_issue_1056(runner):
"""if an ansi escape sequence is contained in the output, we can't parse it
properly"""
runner = runner(print_new='path', default_calendar=False)
result = runner.invoke(
main_khal, 'new -i'.split(),
'two\n'
'new event\n'
'now\n'
'Europe/London\n'
'None\n'
't\n' # edit datetime range
'\n'
'n\n'
)
assert 'error parsing range' not in result.output
assert not result.exception
assert result.exit_code == 0
|
pdav/khal
|
tests/cli_test.py
|
Python
|
mit
| 29,696
|
[
"VisIt"
] |
2f12048791c4902b42cdab8e01c9102c8b280d34bcb2c2a87222bcbb5ad00a2c
|
#coding=utf8
# I don't know about this author. LEE said.
# @status Thanks God AC
# write-only
def start():
def has(lst,ele):
return lst.count(ele)>0
n,m=map(int,raw_input().split(' '))
mlist=[
map(int,raw_input().split(' '))
for i in xrange(m)
]
group=[[] for i in xrange(n+10)]
# desire-satisfy
# write-only
# @GET i should use *set* at very first
# FML
for mpair in mlist:
s=set(group[mpair[0]])
s.add(mpair[1])
group[mpair[0]]=list(s)
if len(group[mpair[0]])==2:
# group[group[mpair[0]][0]].append(mpair[1])
s=set(group[group[mpair[0]][0]])
s.add(mpair[1])
group[group[mpair[0]][0]]= list(s)
# group[mpair[1]].append(group[mpair[0]][0])
s=set(group[mpair[1]])
s.add(group[mpair[0]][0])
group[mpair[1]]=list(s)
s=set(group[mpair[1]])
s.add(mpair[0])
group[mpair[1]]=list(s)
if len(group[mpair[1]])==2:
# group[group[mpair[1]][0]].append(mpair[0])
s=set(group[group[mpair[1]][0]])
s.add(mpair[0])
group[group[mpair[1]][0]]=list(s)
# group[mpair[0]].append(group[mpair[1]][0])
s=set(group[mpair[0]])
s.add(group[mpair[1]][0])
group[mpair[0]]=list(s)
####################################
# print group
# desire-correctness
for i in range(1,n+1):
#FFFFFFFFFFFFFFFFFFFFFFFuck my life
if group[i].count(i)>0:
group[i].remove(i) # 忘单词の重造
g=group[i]
if len(g)>2:
####################################
# print 'len3 break', g
print -1
return
# 3p-satisfy
for i in range(1,n+1):
if len(group[i])==1:
foundPartner=False
for j in range(i+1,n+1):
if len(group[j])==0:
group[j].append(i)
group[j].append(group[i][0])
group[i].append(j)
group[group[i][0]].append(j)
foundPartner=True
break
if not foundPartner:
####################################
# print i,group[i],'has no partner'
print -1
return
# one-person-group-satisfy will be done next step
elif len(group[i])==0:
# foundPartner=False
for j in range(i+1,n+1):
if len(group[j])==1:
group[i].append(j)
group[i].append(group[j][0])
group[j].append(i)
group[group[j][0]].append(i)
# foundPartner=True
break
# if not foundPartner:
# ####################################
# print i,group[i],'has no partner'
# print -1
# return
# print group
def printGroupLine(g):
for mem in g:
print mem,
print ''
# print '='*30
visit=[False for x in xrange(n+10)]
temp3=[]
for i in range(1,n+1):
if visit[i]:
continue
visit[i]=True
if len(group[i])==0:
temp3.append(i)
if len(temp3)==3:
printGroupLine(temp3)
temp3=[]
else: #len==2
group[i].append(i)
printGroupLine(group[i])
visit[group[i][0]]=True
visit[group[i][1]]=True
start()
|
SnowOnion/CodeForcesLee
|
thisAndThat/300b.py
|
Python
|
mit
| 2,765
|
[
"VisIt"
] |
0eaa093f55e05e595e6681bff742e33f919ee46c0e877119ba314288a50e9a8a
|
# ##WARNING###WARNING###WARNING###WARNING###WARNING###WARNING###WARNING###WARNING###WARNING###WARNING###WARNING #
# Under development #
# ##WARNING###WARNING###WARNING###WARNING###WARNING###WARNING###WARNING###WARNING###WARNING###WARNING###WARNING #
""" Module to upload specified job output files according to the parameters
defined in the production workflow.
"""
from DIRAC import gLogger
from DIRAC.Workflow.Modules.ModuleBase import ModuleBase, GracefulTermination
class UploadOutputs( ModuleBase ):
#############################################################################
def __init__( self ):
""" c'tor
"""
self.log = gLogger.getSubLogger( "UploadOutputs" )
super( UploadOutputs, self ).__init__( self.log )
self.outputDataStep = ''
self.outputData = []
self.outputList = []
#############################################################################
def _resolveInputVariables( self ):
""" The module parameters are resolved here.
"""
super( UploadOutputs, self )._resolveInputVariables()
# this comes from Job().setOutputData(). Typical for user jobs
if self.workflow_commons.has_key( 'OutputData' ):
self.outputData = self.workflow_commons['OutputData']
if not isinstance( self.outputData, list ): # type( userOutputData ) == type( [] ):
self.outputData = [ i.strip() for i in self.outputData.split( ';' ) ]
# if not present, we use the outputList, which is instead incrementally created based on the single step outputs
# This is more typical for production jobs, that can have many steps linked one after the other
elif self.workflow_commons.has_key( 'outputList' ):
self.outputList = self.workflow_commons['outputList']
else:
raise GracefulTermination, 'Nothing to upload'
# in case you want to put a mask on the steps
# TODO: add it to the DIRAC API
if self.workflow_commons.has_key( 'outputDataStep' ):
self.outputDataStep = self.workflow_commons['outputDataStep']
# this comes from Job().setOutputData(). Typical for user jobs
if self.workflow_commons.has_key( 'OutputSE' ):
specifiedSE = self.workflow_commons['OutputSE']
if not type( specifiedSE ) == type( [] ):
self.utputSE = [i.strip() for i in specifiedSE.split( ';' )]
else:
self.log.verbose( 'No OutputSE specified, using default value: %s' % ( ', '.join( self.defaultOutputSE ) ) )
self.outputSE = []
# this comes from Job().setOutputData(). Typical for user jobs
if self.workflow_commons.has_key( 'OutputPath' ):
self.outputPath = self.workflow_commons['OutputPath']
def _initialize( self ):
""" gets the files to upload, check if to upload
"""
# lfnsList = self.__getOutputLFNs( self.outputData ) or outputList?
if not self._checkWFAndStepStatus():
raise GracefulTermination, 'No output data upload attempted'
def __getOuputLFNs( self, outputList, *args ):
""" This is really VO-specific.
It should be replaced by each VO. Setting an LFN here just as an idea, and for testing purposes.
"""
lfnList = []
for outputFile in outputList:
lfnList.append( '/'.join( [str( x ) for x in args] ) + outputFile )
return lfnList
def _execute( self ):
""" uploads the files
"""
pass
|
avedaee/DIRAC
|
Workflow/Modules/UploadOutputs.py
|
Python
|
gpl-3.0
| 3,453
|
[
"DIRAC"
] |
799893ba316d24e183357ad63b73aad7b21472d54fd828b014aa9da497e8c6a0
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build phase 3 cmap requirements data.
This starts with default assignments based on unicode property
script and script_extensions data, then applies a sequences of
operations to generate an allocation of cmaps to 'scripts' i.e.
font families. The operations include assigning/removing common
characters in blocks, or entire blocks, to/from scripts,
assigning additional punctuation (based on reading the Unicode
8 standard and various L2 docs), and so on.
This uses pseudo script codes to represent the font families,
but this needs to be changed to some better representation.
for now, these are:
CJK: for all CJK scripts
EXCL: for excluded blocks (PUA, surrogates)
MONO: for blocks going into a monospace font
MUSIC: for blocks going into a music font
SYM2: for blocks going into a 'symbols 2' font with fewer masters
Zmth: for blocks going into a 'math' font
ZSym: for blocks going into the main symbols font (6 masters)
ZSye: for blocks going into the color emoji font
"""
import argparse
import collections
import sys
from nototools.py23 import unichr
from nototools import cldr_data
from nototools import cmap_data
from nototools import compare_cmap_data
from nototools import collect_cldr_punct
from nototools import noto_data
from nototools import opentype_data
from nototools import tool_utils
from nototools import unicode_data
_MERGED_SCRIPTS_BY_TARGET = {
"CJK": "Bopo Hang Hani Hans Hant Hira Jpan Kana Kore".split(),
"LGC": "Latn Grek Cyrl".split(),
}
def _invert_script_to_chars(script_to_chars):
"""Convert script_to_chars to char_to_scripts and return."""
char_to_scripts = collections.defaultdict(set)
for script, cps in script_to_chars.items():
for cp in cps:
char_to_scripts[cp].add(script)
return char_to_scripts
class CmapOps(object):
def __init__(
self,
script_to_chars=None,
log_events=False,
log_details=False,
undefined_exceptions=None,
):
if script_to_chars is None:
self._script_to_chars = {}
else:
self._script_to_chars = {
script: set(script_to_chars[script]) for script in script_to_chars
}
self._log_events = log_events
self._log_details = log_details
self._suppressed_blocks = {
"Hangul Jamo",
"Kangxi Radicals",
"Kanbun",
"CJK Symbols and Punctuation",
"Hangul Compatibility Jamo",
"CJK Strokes",
"Enclosed CJK Letters and Months",
"CJK Compatibility",
"CJK Compatibility Ideographs",
"CJK Compatibility Ideographs Supplement",
"CJK Unified Ideographs Extension A",
"CJK Unified Ideographs Extension B",
"CJK Unified Ideographs Extension C",
"CJK Unified Ideographs Extension D",
"CJK Unified Ideographs Extension E",
"CJK Unified Ideographs",
"CJK Radicals Supplement",
"Hangul Jamo Extended-A",
"Hangul Jamo Extended-B",
"Hangul Syllables",
}
self._suppressed_scripts = {
"EXCL",
}
self._block = None
self._undefined_exceptions = undefined_exceptions or set()
def _report(self, text):
if self._log_events:
print(text)
def _finish_block(self):
if self._block and self._log_events and not self._log_details:
for text in sorted(self._block_count):
print(
"%s: %s"
% (text, tool_utils.write_int_ranges(self._block_count[text]))
)
def _report_cp(self, cp, text, script):
if not self._log_events:
return
cp_block = unicode_data.block(cp)
if cp_block != self._block:
self._finish_block()
self._block = cp_block
print("# block: " + self._block)
self._block_count = collections.defaultdict(set)
if self._log_details:
if not (
self._block in self._suppressed_blocks
or script in self._suppressed_scripts
):
print(self._cp_info(cp), text)
else:
self._block_count[text].add(cp)
def _error(self, text):
sys.stderr.write(text + "\n")
raise ValueError("failed")
def _verify_script_exists(self, script):
if script not in self._script_to_chars:
self._error("script %s does not exist" % script)
def _verify_script_does_not_exist(self, script):
if script in self._script_to_chars:
self._error("script %s already exists" % script)
def _verify_scripts_exist(self, scripts):
for script in scripts:
self._verify_script_exists(script)
return sorted(scripts)
def _verify_script_empty(self, script):
if len(self._script_to_chars[script]):
self._error("script %s is not empty, cannot delete" % script)
def _cp_info(self, cp):
return "%04X (%s)" % (cp, unicode_data.name(cp, "<unnamed>"))
def _script_ok_add(self, cp, script):
if unicode_data.is_defined(cp) or cp in self._undefined_exceptions:
self._script_cp_ok_add(cp, script)
def _script_cp_ok_add(self, cp, script):
if cp not in self._script_to_chars[script]:
self._script_to_chars[script].add(cp)
self._report_cp(cp, "added to " + script, script)
def _script_ok_remove(self, cp, script):
if unicode_data.is_defined(cp):
self._script_cp_ok_remove(cp, script)
def _script_cp_ok_remove(self, cp, script):
if cp in self._script_to_chars[script]:
self._report_cp(cp, "removed from " + script, script)
self._script_to_chars[script].remove(cp)
def _finish_phase(self):
self._finish_block()
self._block = None
def phase(self, phase_name):
self._finish_phase()
self._report("\n# phase: " + phase_name)
def log(self, log_msg):
self._report("# log: " + log_msg)
def ensure_script(self, script):
if script in self._script_to_chars:
return
self.create_script(script)
def create_script(self, script):
self._verify_script_does_not_exist(script)
self._script_to_chars[script] = set()
self._report("# create script: " + script)
def delete_script(self, script):
self._verify_script_exists(script)
self._verify_script_empty(script)
del self._script_to_chars[script]
self._report("# delete script: " + script)
def add(self, cp, script):
self._verify_script_exists(script)
self._script_ok_add(cp, script)
def add_all(self, cps, script):
self._verify_script_exists(script)
for cp in sorted(cps):
self._script_ok_add(cp, script)
def add_all_to_all(self, cps, scripts):
scripts = self._verify_scripts_exist(scripts)
for cp in sorted(cps):
if unicode_data.is_defined(cp):
for script in scripts:
self._script_cp_ok_add(cp, script)
def remove(self, cp, script):
self._verify_script_exists(script)
self._script_ok_remove(cp, script)
def remove_all(self, cps, script):
self._verify_script_exists(script)
for cp in sorted(cps):
self._script_ok_remove(cp, script)
def remove_all_from_all(self, cps, scripts):
scripts = self._verify_scripts_exist(scripts)
for cp in sorted(cps):
if unicode_data.is_defined(cp):
for script in scripts:
self._script_cp_ok_remove(cp, script)
def remove_script_from(self, src_script, from_script):
self._verify_script_exists(from_script)
cps = self.script_chars(src_script)
for cp in cps:
self._script_ok_remove(cp, from_script)
def move_to_from(self, cp, to_script, from_script):
self._verify_script_exists(from_script)
self._verify_script_exists(to_script)
self._script_ok_add(cp, to_script)
self._script_ok_remove(cp, from_script)
def move_all_to_from(self, cps, to_script, from_script):
"""Combines add and remove."""
self._verify_script_exists(from_script)
self._verify_script_exists(to_script)
sorted_cps = sorted(cps)
for cp in sorted_cps:
self._script_ok_add(cp, to_script)
for cp in sorted_cps:
self._script_ok_remove(cp, from_script)
def all_scripts(self):
return self._script_to_chars.keys()
def create_char_to_scripts(self):
return _invert_script_to_chars(self._script_to_chars)
def script_chars(self, script):
self._verify_script_exists(script)
return sorted(self._script_to_chars[script])
def create_script_to_chars(self):
return {
script: set(self._script_to_chars[script])
for script in self._script_to_chars
}
def finish(self):
self._finish_phase()
def _build_block_to_primary_script():
"""Create a map from block to the primary script in a block.
If there are no characters defined in the block, it gets the script 'EXCL',
for 'exclude.' We don't define characters in this block.
If the most common script accounts for less than 80% of the defined characters
in the block, we use the primary from assigned_primaries, which might be None.
It's an error if there's no default primary and it's not listed in
assigned_primaries."""
assigned_primaries = {
"Basic Latin": "Latn",
"Latin-1 Supplement": "Latn",
"Vedic Extensions": "Deva",
"Superscripts and Subscripts": "Latn",
"Number Forms": "Zyyy",
"CJK Symbols and Punctuation": "CJK",
"Enclosed CJK Letters and Months": "CJK",
"CJK Compatibility": "CJK",
"Alphabetic Presentation Forms": None,
"Halfwidth and Fullwidth Forms": "CJK",
"Kana Supplement": "CJK",
}
inherited_primaries = {
"Combining Diacritical Marks": "Latn",
"Combining Diacritical Marks Extended": "Latn",
"Combining Diacritical Marks Supplement": "Latn",
"Combining Diacritical Marks for Symbols": "Zyyy",
"Variation Selectors": "EXCL",
"Combining Half Marks": "Latn",
"Variation Selectors Supplement": "EXCL",
}
block_to_script = {}
for block in unicode_data.block_names():
start, finish = unicode_data.block_range(block)
script_counts = collections.defaultdict(int)
num = 0
for cp in range(start, finish + 1):
script = unicode_data.script(cp)
if script != "Zzzz":
script_counts[script] += 1
num += 1
max_script = None
max_script_count = 0
for script, count in script_counts.items():
if count > max_script_count:
max_script = script
max_script_count = count
if num == 0:
max_script = "EXCL" # exclude
elif float(max_script_count) / num < 0.8:
info = sorted(script_counts.items(), key=lambda t: (-t[1], t[0]))
block_info = "%s %s" % (block, ", ".join("%s/%d" % t for t in info))
if block in assigned_primaries:
max_script = assigned_primaries[block]
# print('assigning primary', block_info, '->', max_script)
else:
sys.stderr.write("ERROR: no primary\n", block, block_info)
max_script = None
elif max_script == "Zinh":
if block in inherited_primaries:
max_script = inherited_primaries[block]
else:
sys.stderr.write("ERROR: no inherited primary\n", block, block_info)
max_script = None
block_to_script[block] = max_script
return block_to_script
_block_to_primary_script = None
def _primary_script_for_block(block):
"""Return the primary script for the block, or None if no primary script."""
global _block_to_primary_script
if not _block_to_primary_script:
_block_to_primary_script = _build_block_to_primary_script()
return _block_to_primary_script[block]
def _remove_unicode_assignments(cmap_ops):
"""The starting point is based on the script and script extensions data from
Unicode. Sometimes the assignments seem premature."""
cmap_ops.phase("remove unicode assignments")
# Jelle says A8F1 makes no sense for Bengali since other characters needed
# for cantillation are not defined. Unicode script extensions assign it to
# Deva and Beng, leave it for Deva.
cmap_ops.remove(0xA8F1, "Beng")
def _unassign_inherited_and_common_with_extensions(cmap_ops):
"""Inherited and common characters with an extension that is neither of
these get removed from inherited/common scripts."""
def remove_cps_with_extensions(script):
for cp in cmap_ops.script_chars(script):
for s in unicode_data.script_extensions(cp):
if s != "Zinh" and s != "Zyyy":
cmap_ops.remove(cp, script)
break
cmap_ops.phase("unassign inherited with extensions")
remove_cps_with_extensions("Zinh")
cmap_ops.phase("unassign common with extensions")
remove_cps_with_extensions("Zyyy")
def _reassign_inherited(cmap_ops):
"""Assign all 'Zinh' chars to the primary script in their block.
Fail if there's no primary script. 'Zinh' is removed from script_to_chars."""
cmap_ops.phase("reassign inherited")
for cp in cmap_ops.script_chars("Zinh"):
primary_script = _primary_script_for_block(unicode_data.block(cp))
if not primary_script:
sys.stderr.write("Error: no primary script for %04X\n" % cp)
elif primary_script == "Zinh":
sys.stderr.write("Error: primary script for %04X is Zinh\n" % cp)
else:
cmap_ops.ensure_script(primary_script)
cmap_ops.add(cp, primary_script)
cmap_ops.remove(cp, "Zinh")
cmap_ops.delete_script("Zinh")
def _reassign_common(cmap_ops):
"""Move 'Zyyy' chars in blocks where 'Zyyy' is not primary to the primary
script."""
cmap_ops.phase("reassign common")
for cp in cmap_ops.script_chars("Zyyy"):
primary_script = _primary_script_for_block(unicode_data.block(cp))
if primary_script is not None and primary_script != "Zyyy":
cmap_ops.ensure_script(primary_script)
cmap_ops.add(cp, primary_script)
cmap_ops.remove(cp, "Zyyy")
def _unassign_latin(cmap_ops):
"""Remove some characters that extensions assigns to Latin but which we don't
need there."""
unwanted_latn = tool_utils.parse_int_ranges(
"""
0951 0952 # devanagari marks
10FB # Georgian paragraph separator
"""
)
cmap_ops.phase("unassign latin")
cmap_ops.remove_all(unwanted_latn, "Latn")
def _assign_cldr_punct(cmap_ops):
"""Assigns cldr punctuation to scripts."""
for script, punct in collect_cldr_punct.script_to_punct().items():
if script != "CURRENCY":
cmap_ops.phase("assign cldr punct for " + script)
cmap_ops.ensure_script(script)
for cp in punct:
cmap_ops.add(ord(cp), script)
def _reassign_scripts(cmap_ops, scripts, new_script):
"""Reassign all chars in scripts to new_script."""
assert new_script not in scripts
cmap_ops.phase("reassign scripts")
cmap_ops.ensure_script(new_script)
for script in sorted(scripts):
cmap_ops.phase("reassign %s to %s" % (script, new_script))
for cp in cmap_ops.script_chars(script):
cmap_ops.remove(cp, script)
cmap_ops.add(cp, new_script)
cmap_ops.delete_script(script)
def _reassign_merged_scripts(cmap_ops):
"""Reassign merged scripts."""
for target, scripts in sorted(_MERGED_SCRIPTS_BY_TARGET.items()):
cmap_ops.phase("reassign to " + target)
_reassign_scripts(cmap_ops, scripts, target)
def _reassign_common_by_block(cmap_ops):
"""Reassign common chars to new scripts based on block."""
block_assignments = {
"Spacing Modifier Letters": "LGC",
"General Punctuation": "LGC",
"Currency Symbols": "LGC",
"Combining Diacritical Marks for Symbols": "Zsym",
"Letterlike Symbols": "LGC",
"Number Forms": "Zsym",
"Arrows": "Zmth",
"Mathematical Operators": "Zmth",
"Miscellaneous Technical": "Zsym",
"Control Pictures": "SYM2",
"Optical Character Recognition": "SYM2",
"Enclosed Alphanumerics": "Zsym",
"Box Drawing": "MONO",
"Block Elements": "MONO",
"Geometric Shapes": "SYM2", # change
"Miscellaneous Symbols": "Zsym",
"Dingbats": "SYM2",
"Miscellaneous Mathematical Symbols-A": "Zmth",
"Supplemental Arrows-A": "Zmth",
"Supplemental Arrows-B": "Zmth",
"Miscellaneous Mathematical Symbols-B": "Zmth",
"Supplemental Mathematical Operators": "Zmth",
"Miscellaneous Symbols and Arrows": "SYM2",
"Supplemental Punctuation": "LGC",
"Ideographic Description Characters": "CJK",
"Yijing Hexagram Symbols": "SYM2",
"Modifier Tone Letters": "LGC",
"Vertical Forms": "CJK",
"CJK Compatibility Forms": "CJK",
"Small Form Variants": "CJK",
"Specials": "SYM2",
"Ancient Symbols": "SYM2",
"Phaistos Disc": "SYM2",
"Byzantine Musical Symbols": "MUSIC",
"Musical Symbols": "MUSIC",
"Tai Xuan Jing Symbols": "SYM2",
"Mathematical Alphanumeric Symbols": "Zmth",
"Mahjong Tiles": "SYM2",
"Domino Tiles": "SYM2",
"Playing Cards": "SYM2",
"Enclosed Alphanumeric Supplement": "Zsym",
"Enclosed Ideographic Supplement": "CJK",
"Miscellaneous Symbols and Pictographs": "SYM2",
"Emoticons": "SYM2",
"Ornamental Dingbats": "SYM2",
"Transport and Map Symbols": "SYM2",
"Alchemical Symbols": "Zsym",
"Geometric Shapes Extended": "SYM2",
"Supplemental Arrows-C": "SYM2",
"Supplemental Symbols and Pictographs": "SYM2",
"Tags": "EXCL",
}
cmap_ops.phase("reassign common by block")
used_assignments = set()
last_block = None
for cp in cmap_ops.script_chars("Zyyy"):
block = unicode_data.block(cp)
if block != last_block:
last_block = block
if block not in block_assignments:
sys.stderr.write("ERROR: no assignment for block %s\n" % block)
new_script = None
else:
new_script = block_assignments[block]
cmap_ops.ensure_script(new_script)
used_assignments.add(block)
if new_script:
cmap_ops.remove(cp, "Zyyy")
cmap_ops.add(cp, new_script)
else:
sys.stderr.write(
" could not assign %04x %s\n" % (cp, unicode_data.name(cp))
)
if len(used_assignments) != len(block_assignments):
sys.stderr.write("ERROR: some block assignments unused\n")
unused = set(
[block for block in block_assignments if block not in used_assignments]
)
for block in unicode_data.block_names():
if block in unused:
sys.stderr.write(" %s\n" % block)
unused.remove(block)
if unused:
sys.stderr.write("ERROR: unknown block names\n")
for block in sorted(unused):
sys.stderr.write(" %s\n" % block)
cmap_ops.delete_script("Zyyy")
def _block_cps(block):
start, end = unicode_data.block_range(block)
return frozenset(
[cp for cp in range(start, end + 1) if unicode_data.is_defined(cp)]
)
def _reassign_by_block(cmap_ops):
"""Reassign all chars in select blocks to designated scripts."""
# block, from, to. from '*' means from all scripts.
block_assignments = [
("Number Forms", "LGC", "Zsym"),
("Halfwidth and Fullwidth Forms", "LGC", "CJK"),
("Aegean Numbers", "*", "Linb"),
("Ancient Greek Numbers", "*", "SYM2"),
("Ancient Symbols", "LGC", "SYM2"),
("Braille Patterns", "Brai", "SYM2"),
("Coptic Epact Numbers", "*", "SYM2"),
("Rumi Numeral Symbols", "*", "SYM2"),
("Ancient Greek Musical Notation", "*", "MUSIC"),
("Counting Rod Numerals", "CJK", "SYM2"),
("Arabic Mathematical Alphabetic Symbols", "*", "Zmth"),
("High Surrogates", "*", "EXCL"),
("High Private Use Surrogates", "*", "EXCL"),
("Low Surrogates", "*", "EXCL"),
("Private Use Area", "*", "EXCL"),
("Variation Selectors", "*", "EXCL"),
("Tags", "*", "EXCL"),
("Variation Selectors Supplement", "*", "EXCL"),
("Supplementary Private Use Area-A", "*", "EXCL"),
("Supplementary Private Use Area-B", "*", "EXCL"),
]
block_assignments = sorted(
block_assignments, key=lambda k: unicode_data.block_range(k[0])[0]
)
cmap_ops.phase("reassign by block")
char_to_scripts = cmap_ops.create_char_to_scripts()
for block, from_scripts, to_script in block_assignments:
start, finish = unicode_data.block_range(block)
if from_scripts == "*":
all_scripts = True
else:
all_scripts = False
from_scripts = from_scripts.split()
for cp in range(start, finish + 1):
if not unicode_data.is_defined(cp):
continue
if cp not in char_to_scripts and to_script != "EXCL":
sys.stderr.write(
"reassign missing %04X %s\n"
% (cp, unicode_data.name(cp, "<unnamed>"))
)
continue
if all_scripts:
from_list = char_to_scripts[cp]
else:
from_list = from_scripts
for from_script in from_list:
if from_script == to_script:
continue
if not all_scripts and (from_script not in from_scripts):
continue
cmap_ops.remove(cp, from_script)
cmap_ops.add(cp, to_script)
def _remove_empty(cmap_ops):
"""Remove any empty scripts (Braille should be one)."""
cmap_ops.phase("remove empty")
script_to_chars = cmap_ops.create_script_to_chars()
for script, chars in script_to_chars.items():
if not chars:
cmap_ops.delete_script(script)
def _reassign_symbols(cmap_ops):
"""Some symbols belong together but get split up when we assign by block."""
cmap_ops.phase("reassign symbols")
white_arrow_parts = tool_utils.parse_int_ranges("2b00-2b04 1f8ac-1f8ad")
cmap_ops.move_all_to_from(white_arrow_parts, "Zsym", "SYM2")
tv_symbols = tool_utils.parse_int_ranges("23fb-23fe 2b58")
cmap_ops.move_all_to_from(tv_symbols, "SYM2", "Zsym")
# we want a copy in SYM2 for sizes, assume MATH will do its own thing
# in context.
math_circles = tool_utils.parse_int_ranges("2219 2299 22c5")
cmap_ops.add_all(math_circles, "SYM2")
# keyboard symbols, user interface symbols, media play symbols
misc_tech = tool_utils.parse_int_ranges(
"2318 231a-231b 2324-2328 232b 237d 23ce-23cf 23e9-23fa 23fb-23fe"
)
cmap_ops.move_all_to_from(misc_tech, "SYM2", "Zsym")
# Split Miscellaneous Symbols into SYM2 and Zsym by related symbols.
# mostly this is based on whether the group of symbols seems to have a use
# in running text or is based on some alphabetic character.
to_sym2 = tool_utils.parse_int_ranges(
"""2600-2609 # weather
260e-2612 # ballot box
2614 # umbrella with rain
2615 # hot beverage
2616-2617 # shogi pieces
261a-261f # pointing hands
2620-2623 # caution signs
2626-262f 2638 # religious/political
2630-2637 # chinese trigrams
2668 # hot springs
267f # wheelchair symbol
2686-2689 # go markers
268a-268f # yijing monograms/diagrms
269e-269f # closed captioning
26a1 # high voltage
26aa-26ac # circles
26bd-26be # sports
26bf # squared key
26c0-26c3 # checkers/draughts
26c4-26c8 # weather
26c9-26ca # more shogi
26cb # game symbol
"""
)
to_zsym = tool_utils.parse_int_ranges(
"""260a-260d # alchemical symbols
2613 # saltire
2618-2619 # shamrock, floral bullet
2624-2625 # medical, ankh
2639-263b # smiley faces
263c-2647 # astrological
2648-2653 # western zodiac
2654-265f # western chess
2660-2667 # card suits
2669-266f # music symbols
2670-2671 # syriac cross
2672-267d # recycling
267e # paper
2680-2685 # die faces
2690-269b # dictionary and map symbols, go with Zsym since dictionary use
269c # fleur-de-lis
269d # outlined white star, a symbol of morocco
26a0 # warning sign (exclamation point inside rounded triangle)
26a2-26a9 # gender
26ad-26b1 # genealogical
26b2 # gender
26b3-26bc # astrological
26cc-26cd # traffic signs
26ce # zodiac
26cf-26e1 # traffic signs again
26e2 # astronomical
26e3 # map symbol
26e4-26e7 # pentagrams
26e8-26ff # more map symbols
"""
)
# sanity check
duplicate_cps = to_sym2 & to_zsym
if duplicate_cps:
raise Exception(
"%d cps in both from and to symbols: %s"
% (len(duplicate_cps), tool_utils.write_int_ranges(duplicate_cps))
)
missing_cps = set(range(0x2600, 0x2700))
missing_cps -= to_zsym
missing_cps -= to_sym2
if missing_cps:
raise Exception(
"%d cps from Misc. Symbols in neither from nor to symbols: %s"
% (len(missing_cps), tool_utils.write_int_ranges(missing_cps))
)
cmap_ops.move_all_to_from(to_sym2, "SYM2", "Zsym")
cmap_ops.move_all_to_from(to_zsym, "Zsym", "SYM2")
# neutral face should go with smiley faces, which are in Zsym
cmap_ops.move_to_from(0x1F610, "Zsym", "SYM2")
# more math symbols that are geometric and might want dual treatment
more_math = tool_utils.parse_int_ranges("2981 29bf 29eb")
cmap_ops.add_all(more_math, "SYM2")
# let's put white arrows into Sym2
white_arrows = tool_utils.parse_int_ranges(
"""21e6 21e8 21e7 21e9 21f3 2b04 2b00-2b03 1f8ac 1f8ad 21ea-21f0
"""
)
cmap_ops.move_all_to_from(white_arrows, "SYM2", "Zsym")
# circled digits should all go into Symbols
circled_digits = tool_utils.parse_int_ranges(
"""24ea # circled digit 0
2460-2473 # circled digit 1-9, number 10-20
24ff # negative circled digit 0
1f10c # dingbat negative circled sans-serif digit 0
2776-277f # dingbat negative circled digits 1-9, number 10
2780-2789 # dingbat circled sans-serif digits 1-9, number 10
278a-2793 # dingbat negative circled sans-serif digits 1-9, number 10
24eb-24f4 # negative circled number 11-20
1f10b # dingbat circled sans-serif digit 0
"""
)
cmap_ops.move_all_to_from(circled_digits, "Zsym", "SYM2")
# hourglass with flowing sand is in a block that got assigned to Zsym by
# default. Looking at it and its neighbors, it seems really odd that these
# are with 'technical symbols'
emoji_symbols = tool_utils.parse_int_ranges("23f0-23f3")
cmap_ops.add_all(emoji_symbols, "SYM2")
cmap_ops.remove_all(emoji_symbols, "Zsym")
# neutral face should go with white smiling/frowning face, which are in Zsym
cmap_ops.add(0x1F610, "Zsym")
cmap_ops.remove(0x1F610, "SYM2")
# Zsym has combining marks, so add dotted circle.
# Combining enclosing marks in Symbols need latin to combine with, so add
# letters and digits, also dotted circle if not there already.
alphanum = tool_utils.parse_int_ranges("30-39 41-5a 61-7a 25cc")
cmap_ops.add_all(alphanum, "Zsym")
def _reassign_emoji(cmap_ops):
"""Reassign all emoji to emoji-color. Then assign all emoji with default
text presentation, plus those with variation selectors, plus select
others, to SYM2."""
cmap_ops.phase("reassign emoji")
color_only_emoji = set(unicode_data.get_presentation_default_emoji())
color_only_emoji.remove(0x1F004) # mahjong tile red dragon
color_only_emoji.remove(0x1F0CF) # playing card black joker
# remove emoji with a variation selector that allows a text presentation
# include proposed variants from 2016/08/23
color_only_emoji -= unicode_data.get_unicode_emoji_variants("proposed_extra")
all_emoji = unicode_data.get_emoji()
cmap_ops.create_script("Zsye")
cmap_ops.add_all(all_emoji, "Zsye")
cmap_ops.remove_all_from_all(color_only_emoji, ["Zsym", "SYM2"])
def _assign_nastaliq(cmap_ops):
"""Create Aran script based on requirements doc."""
# Range spec matches "Noto Nastaliq requirements" doc, Tier 1.
urdu_chars = tool_utils.parse_int_ranges(
"""
0600-0604 060b-0614 061b 061c 061e-061f 0620 0621-063a
0640-0659 065e-066d 0670-0673 0679 067a-067b 067c 067d
067e 067f-0680 0681 0683-0684 0685-0686 0687 0688-0689
068a 068b 068c-068d 068e 068f 0691 0693 0696 0698 0699
069a 069e 06a6 06a9 06ab 06af-06b0 06b1 06b3 06b7 06ba
06bb 06bc 06be 06c0-06c4 06cc-06cd 06d0 06d2-06d5
06dd-06de 06e9 06ee-06ef 06f0-06f9 06ff 0759 075c 0763
0767-0769 076b-077d 08ff fbb2-fbc1 fd3e-fd3f fdf2
fdfa-fdfd"""
)
cmap_ops.phase("assign nastaliq")
cmap_ops.create_script("Aran")
cmap_ops.add_all(urdu_chars, "Aran")
# These additional arabic were in phase 2 scripts.
additional_arabic = tool_utils.parse_int_ranges(
"""
0609 # ARABIC-INDIC PER MILLE SIGN
060a # ARABIC-INDIC PER TEN THOUSAND SIGN
063b # ARABIC LETTER KEHEH WITH TWO DOTS ABOVE
063c # ARABIC LETTER KEHEH WITH THREE DOTS BELOW
063d # ARABIC LETTER FARSI YEH WITH INVERTED V
063e # ARABIC LETTER FARSI YEH WITH TWO DOTS ABOVE
063f # ARABIC LETTER FARSI YEH WITH THREE DOTS ABOVE
065d # ARABIC REVERSED DAMMA
066e # ARABIC LETTER DOTLESS BEH
066f # ARABIC LETTER DOTLESS QAF
06a1 # ARABIC LETTER DOTLESS FEH
06a4 # ARABIC LETTER VEH
06e0 # ARABIC SMALL HIGH UPRIGHT RECTANGULAR ZERO
06e1 # ARABIC SMALL HIGH DOTLESS HEAD OF KHAH
076a # ARABIC LETTER LAM WITH BAR
"""
)
cmap_ops.add_all(additional_arabic, "Aran")
# noto-fonts#597 requests exclamation point
# noto-fonts#449 requests european digits
european_digits = tool_utils.parse_int_ranges("0021 0030-0039")
cmap_ops.add_all(european_digits, "Aran")
# noto-fonts#368 requests these characters
extra_arabic_1 = tool_utils.parse_int_ranges("067b 0684 068a 06b3 0759 0768")
cmap_ops.add_all(extra_arabic_1, "Aran")
# noto-fonts#606 requests a few additional characters
extra_arabic_2 = tool_utils.parse_int_ranges("06c6 06c7 06ca 06d5")
cmap_ops.add_all(extra_arabic_2, "Aran")
def _assign_complex_script_extra(cmap_ops):
"""Assigns Harfbuzz and USE characters to the corresponding scripts."""
# Based on harfbuzz hb-ot-shape-complex-private
# Removes Hang, Jungshik reports Behdad says it's not needed for Hang.
hb_complex_scripts = """
Arab Aran Bali Batk Beng Brah Bugi Buhd Cakm Cham Deva Dupl Egyp Gran
Gujr Guru Hano Hebr Hmng Java Kali Khar Khmr Khoj Knda Kthi Lana Laoo
Lepc Limb Mahj Mand Mani Mlym Modi Mong Mtei Mymr Nkoo Orya Phag Phlp
Rjng Saur Shrd Sidd Sind Sinh Sund Sylo Syrc Tagb Takr Tale Talu Taml
Tavt Telu Tfng Tglg Thai Tibt Tirh
""".split()
hb_extra = tool_utils.parse_int_ranges(
"""
200c # ZWNJ
200d # ZWJ
25cc # dotted circle"""
)
# these scripts are based on github noto-fonts#576
use_complex_scripts = """
Bali Batk Brah Bugi Buhd Hano Kthi Khar Lepc Limb Mtei Rjng Saur Sund
Sylo Tglg Tagb Tale Tavt
""".split()
# these characters are based on
# https://www.microsoft.com/typography/OpenTypeDev/USE/intro.htm
use_extra = tool_utils.parse_int_ranges(
"""
200b # ZWS
200c # ZWNJ
200d # ZWJ
25cc # dotted circle
00a0 # NBS
00d7 # multiplication sign
2012 # figure dash
2013 # en dash
2014 # em dash
2015 # horizontal bar
2022 # bullet
25fb # white medium square
25fc # black medium square
25fd # white medium small square
25fe # black medium small square"""
)
cmap_ops.phase("assign hb complex")
cmap_ops.add_all_to_all(hb_extra, hb_complex_scripts)
cmap_ops.phase("assign use complex")
cmap_ops.add_all_to_all(use_extra, use_complex_scripts)
def _assign_hyphens_for_autohyphenation(cmap_ops):
"""Assign hyphens per Roozbeh's request."""
hyphens = [0x002D, 0x2010] # hyphen-minus # hyphen
# see github noto-fonts#524
# Cyrl, Grek, Latn rolled into LGC
# CJK not listed, these don't hyphenate, data is in CLDR for other reasons
hyphen_scripts = """
Arab Aran Armn Beng Copt Deva Ethi Geor Gujr Guru Hebr
Khmr Knda LGC Mlym Orya Taml Telu Thai Tibt
""".split()
cmap_ops.phase("assign hyphens")
cmap_ops.add_all_to_all(hyphens, hyphen_scripts)
def _generate_script_extra(script_to_chars):
"""Generate script extra table."""
for script in sorted(noto_data.P3_EXTRA_CHARACTERS_NEEDED):
block = None
cps = noto_data.P3_EXTRA_CHARACTERS_NEEDED[script]
chars = script_to_chars[script]
if script == "Zsym":
chars.update(script_to_chars["Zmth"])
chars.update(script_to_chars["SYM2"])
chars.update(script_to_chars["MUSIC"])
chars.update(script_to_chars["MONO"])
for cp in sorted(cps):
if not unicode_data.is_defined(cp):
continue
name = unicode_data.name(cp, '<unnamed">')
if cp not in chars:
if block is None:
print('\'%s\': tool_utils.parse_int_ranges("""' % script)
cp_block = unicode_data.block(cp)
if cp_block != block:
block = cp_block
print(" # %s" % block)
print(" %04X # %s" % (cp, name))
chars.add(cp)
if block is not None:
print(' """),')
# maintained using 'regen_script_required' fn
_SCRIPT_REQUIRED = [
# Adlm - Adlm (Adlam)
(
"Adlm",
# Comment
"""
Additional characters recommended by Monotype.
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
# Arabic
061F # ARABIC QUESTION MARK
# General Punctuation
204F # REVERSED SEMICOLON
# Supplemental Punctuation
2E41 # REVERSED COMMA
""",
),
# Aghb - Caucasian Albanian
(
"Aghb",
# Comment
"""
From core specification.
""",
# Data
"""
# Combining Diacritical Marks
0304 # COMBINING MACRON
0331 # COMBINING MACRON BELOW
# Combining Half Marks
FE20 # COMBINING LIGATURE LEFT HALF
FE21 # COMBINING LIGATURE RIGHT HALF
FE22 # COMBINING DOUBLE TILDE LEFT HALF
FE23 # COMBINING DOUBLE TILDE RIGHT HALF
FE24 # COMBINING MACRON LEFT HALF
FE25 # COMBINING MACRON RIGHT HALF
FE26 # COMBINING CONJOINING MACRON
FE27 # COMBINING LIGATURE LEFT HALF BELOW
FE28 # COMBINING LIGATURE RIGHT HALF BELOW
FE29 # COMBINING TILDE LEFT HALF BELOW
FE2A # COMBINING TILDE RIGHT HALF BELOW
FE2B # COMBINING MACRON LEFT HALF BELOW
FE2C # COMBINING MACRON RIGHT HALF BELOW
FE2D # COMBINING CONJOINING MACRON BELOW
FE2E # COMBINING CYRILLIC TITLO LEFT HALF
FE2F # COMBINING CYRILLIC TITLO RIGHT HALF
""",
),
# Ahom - Ahom
# Arab - Arabic
(
"Arab",
# Comment
"""
According to Roozbeh (and existing fonts) the following punctuation and
digits are used with and interact with Arabic characters. Hyphen and
comma are to align with Aran.
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002C # COMMA
002E # FULL STOP
0030 # DIGIT ZERO
0031 # DIGIT ONE
0032 # DIGIT TWO
0033 # DIGIT THREE
0034 # DIGIT FOUR
0035 # DIGIT FIVE
0036 # DIGIT SIX
0037 # DIGIT SEVEN
0038 # DIGIT EIGHT
0039 # DIGIT NINE
003A # COLON
# Latin-1 Supplement
00A0 # NO-BREAK SPACE
# Combining Diacritical Marks
034F # COMBINING GRAPHEME JOINER
# General Punctuation
200E # LEFT-TO-RIGHT MARK
200F # RIGHT-TO-LEFT MARK
2010 # HYPHEN
2011 # NON-BREAKING HYPHEN
204F # REVERSED SEMICOLON
# Supplemental Punctuation
2E41 # REVERSED COMMA
""",
),
# Aran - Aran (Nastaliq)
(
"Aran",
# Comment
"""
Hyphens are required for Urdu from the Arabic
Guillimets used for Persian according to Behdad
Other punctuation was in phase2 fonts, so presumably from Kamal.
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
002C # COMMA
002E # FULL STOP
003A # COLON
# Latin-1 Supplement
00AB # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
00BB # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
# Arabic
061C # ARABIC LETTER MARK
# General Punctuation
2010 # HYPHEN
2011 # NON-BREAKING HYPHEN
# Arabic Presentation Forms-A
FDF4 # ARABIC LIGATURE MOHAMMAD ISOLATED FORM
""",
),
# Armi - Imperial Aramaic
# Armn - Armenian
(
"Armn",
# Comment
"""
Characters referenced in Armenian encoding cross ref page
see http://www.unicode.org/L2/L2010/10354-n3924-armeternity.pdf
also see http://man7.org/linux/man-pages/man7/armscii-8.7.html
also see core specification.
""",
# Data
"""
# Basic Latin
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002D # HYPHEN-MINUS
002E # FULL STOP
# Latin-1 Supplement
00A0 # NO-BREAK SPACE
00A7 # SECTION SIGN
# Spacing Modifier Letters
02BB # MODIFIER LETTER TURNED COMMA
# General Punctuation
2010 # HYPHEN
2014 # EM DASH
2019 # RIGHT SINGLE QUOTATION MARK
2024 # ONE DOT LEADER
# Alphabetic Presentation Forms
FB13 # ARMENIAN SMALL LIGATURE MEN NOW
FB14 # ARMENIAN SMALL LIGATURE MEN ECH
FB15 # ARMENIAN SMALL LIGATURE MEN INI
FB16 # ARMENIAN SMALL LIGATURE VEW NOW
FB17 # ARMENIAN SMALL LIGATURE MEN XEH
""",
),
# Avst - Avestan
(
"Avst",
# Comment
"""
From Core Specification and NamesList.txt
www.unicode.org/L2/L2007/07006r-n3197r-avestan.pdf
""",
# Data
"""
# Basic Latin
002E # FULL STOP
# Latin-1 Supplement
00B7 # MIDDLE DOT
# General Punctuation
200C # ZERO WIDTH NON-JOINER
# Supplemental Punctuation
2E30 # RING POINT
2E31 # WORD SEPARATOR MIDDLE DOT
""",
),
# Bali - Balinese
# Bamu - Bamum
# Bass - Bassa Vah
(
"Bass",
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
0022 # QUOTATION MARK
002C # COMMA
002E # FULL STOP
# General Punctuation
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
""",
),
# Batk - Batak
# Beng - Bengali
(
"Beng",
# Comment
"""
Added by Monotype.
""",
# Data
"""
# Spacing Modifier Letters
02BC # MODIFIER LETTER APOSTROPHE
""",
),
# Bhks - Bhks (Bhaiksuki)
(
"Bhks",
# Comment
"""
Reported by user on nototools#429
""",
# Data
"""
# General Punctuation
200B # ZERO WIDTH SPACE
# Geometric Shapes
25CC # DOTTED CIRCLE
""",
),
# Brah - Brahmi
# Brai - Braille
# Bugi - Buginese
# Buhd - Buhid
# CJK - (Bopo,Hang,Hani,Hans,Hant,Hira,Jpan,Kana,Kore)
# Cakm - Chakma
# Cans - Canadian Aboriginal
(
"Cans",
# Comment
"""
From core specification and web sites.
""",
# Data
"""
# Basic Latin
0022 # QUOTATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002C # COMMA
002D # HYPHEN-MINUS
002E # FULL STOP
# General Punctuation
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
""",
),
# Cari - Carian
(
"Cari",
# Comment
"""
From core specification.
""",
# Data
"""
# Latin-1 Supplement
00B7 # MIDDLE DOT
# General Punctuation
205A # TWO DOT PUNCTUATION
205D # TRICOLON
# Supplemental Punctuation
2E31 # WORD SEPARATOR MIDDLE DOT
""",
),
# Cham - Cham
(
"Cham",
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
002D # HYPHEN-MINUS
003A # COLON
003F # QUESTION MARK
# General Punctuation
2010 # HYPHEN
""",
),
# Cher - Cherokee
(
"Cher",
# Comment
"""
From core specification and
http://www.unicode.org/L2/L2014/14064r-n4537r-cherokee.pdf section 8.
Core spec says 'uses latin punctuation', these are a subset of the latin-1
punct because the intent of listing them is to ensure that use in running
text works with the script.
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0022 # QUOTATION MARK
0027 # APOSTROPHE
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002C # COMMA
002D # HYPHEN-MINUS
002E # FULL STOP
002F # SOLIDUS
003A # COLON
003B # SEMICOLON
003F # QUESTION MARK
005B # LEFT SQUARE BRACKET
005D # RIGHT SQUARE BRACKET
007E # TILDE
# Combining Diacritical Marks
0300 # COMBINING GRAVE ACCENT
0301 # COMBINING ACUTE ACCENT
0302 # COMBINING CIRCUMFLEX ACCENT
0304 # COMBINING MACRON
030B # COMBINING DOUBLE ACUTE ACCENT
030C # COMBINING CARON
0323 # COMBINING DOT BELOW
0324 # COMBINING DIAERESIS BELOW
0330 # COMBINING TILDE BELOW
0331 # COMBINING MACRON BELOW
# General Punctuation
2010 # HYPHEN
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
""",
),
# Copt - Coptic
(
"Copt",
# Comment
"""
From Core specification and
http://std.dkuug.dk/JTC1/SC2/WG2/docs/n2636.pdf
0323 referenced in the following according to Kamal:
http://ucbclassics.dreamhosters.com/djm/coptic.html
0361 and 1DCD resolve bug #748 according to MTI. We
originally intended to remove them since we didn't have
a reference for their use. We still don't, but they
were re-added because of the bug.
""",
# Data
"""
# Basic Latin
002E # FULL STOP
003A # COLON
003B # SEMICOLON
# Latin-1 Supplement
00B7 # MIDDLE DOT
# Combining Diacritical Marks
0300 # COMBINING GRAVE ACCENT
0301 # COMBINING ACUTE ACCENT
0302 # COMBINING CIRCUMFLEX ACCENT
0304 # COMBINING MACRON
0305 # COMBINING OVERLINE
0307 # COMBINING DOT ABOVE
0308 # COMBINING DIAERESIS
0323 # COMBINING DOT BELOW
033F # COMBINING DOUBLE OVERLINE
0361 # COMBINING DOUBLE INVERTED BREVE
# Greek and Coptic
0374 # GREEK NUMERAL SIGN
0375 # GREEK LOWER NUMERAL SIGN
# General Punctuation
2019 # RIGHT SINGLE QUOTATION MARK
# Supplemental Punctuation
2E17 # DOUBLE OBLIQUE HYPHEN
# Combining Half Marks
FE24 # COMBINING MACRON LEFT HALF
FE25 # COMBINING MACRON RIGHT HALF
FE26 # COMBINING CONJOINING MACRON
""",
),
# Cprt - Cypriot
# Deva - Devanagari
(
"Deva",
# Comment
"""
Email from Jelle, SHY was encoded as Macron by accident.
""",
# Data
"""
# Latin-1 Supplement
00AD # SOFT HYPHEN
""",
),
# Dsrt - Deseret
# Dupl - Duployan shorthand (Duployan)
# Egyp - Egyptian hieroglyphs
# Elba - Elbasan
(
"Elba",
# Comment
"""
see http://www.unicode.org/L2/L2011/11050-n3985-elbasan.pdf
adds combining overbar and greek numerals for ones and tens, and
both stigma/digamma for 6.
""",
# Data
"""
# Latin-1 Supplement
00B7 # MIDDLE DOT
# Combining Diacritical Marks
0305 # COMBINING OVERLINE
# Greek and Coptic
0391 # GREEK CAPITAL LETTER ALPHA
0392 # GREEK CAPITAL LETTER BETA
0393 # GREEK CAPITAL LETTER GAMMA
0394 # GREEK CAPITAL LETTER DELTA
0395 # GREEK CAPITAL LETTER EPSILON
0396 # GREEK CAPITAL LETTER ZETA
0397 # GREEK CAPITAL LETTER ETA
0398 # GREEK CAPITAL LETTER THETA
0399 # GREEK CAPITAL LETTER IOTA
039A # GREEK CAPITAL LETTER KAPPA
039B # GREEK CAPITAL LETTER LAMDA
039C # GREEK CAPITAL LETTER MU
039D # GREEK CAPITAL LETTER NU
039E # GREEK CAPITAL LETTER XI
039F # GREEK CAPITAL LETTER OMICRON
03A0 # GREEK CAPITAL LETTER PI
03DA # GREEK LETTER STIGMA
03DD # GREEK SMALL LETTER DIGAMMA
03DE # GREEK LETTER KOPPA
""",
),
# Ethi - Ethiopic
(
"Ethi",
# Comment
"""
From core specification, also see
http://abyssiniagateway.net/fidel/l10n/
Recommends combining diaeresis 'for scholarly use', should look Ethiopian.
Also claims hyphen is not used, but a wikipedia page in Amharic does use
it, see
https://am.wikipedia.org/wiki/1_%E1%8A%A5%E1%88%BD%E1%88%98-%E1%8B%B3%E1%8C%8B%E1%8A%95
Western numerals and punctuation should look heavier to match the Ethiopic.
A keyboard standard is here:
See http://www.mcit.gov.et/documents/1268465/1282796/Keyboard+Layout+Standard/a8aa75ca-e125-4e25-872e-380e2a9b2313
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002B # PLUS SIGN
002E # FULL STOP
002F # SOLIDUS
003D # EQUALS SIGN
# Combining Diacritical Marks
0308 # COMBINING DIAERESIS
030E # COMBINING DOUBLE VERTICAL LINE ABOVE
# Mathematical Operators
22EE # VERTICAL ELLIPSIS
# Geometric Shapes
25CC # DOTTED CIRCLE
""",
),
# Geor - Georgian
(
"Geor",
# Comment
"""
From core specification (references unspecified additionl latin punct), also
see example news article: http://www.civil.ge/geo/article.php?id=29970
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0025 # PERCENT SIGN
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002E # FULL STOP
003A # COLON
003B # SEMICOLON
# Latin-1 Supplement
00A0 # NO-BREAK SPACE
00B7 # MIDDLE DOT
# General Punctuation
2014 # EM DASH
2056 # THREE DOT PUNCTUATION
2057 # QUADRUPLE PRIME
2058 # FOUR DOT PUNCTUATION
2059 # FIVE DOT PUNCTUATION
205A # TWO DOT PUNCTUATION
205B # FOUR DOT MARK
205C # DOTTED CROSS
205D # TRICOLON
205E # VERTICAL FOUR DOTS
20BE # LARI SIGN
# Supplemental Punctuation
2E2A # TWO DOTS OVER ONE DOT PUNCTUATION
2E2B # ONE DOT OVER TWO DOTS PUNCTUATION
2E2C # SQUARED FOUR DOT PUNCTUATION
2E2D # FIVE DOT MARK
2E31 # WORD SEPARATOR MIDDLE DOT
""",
),
# Glag - Glagolitic
(
"Glag",
# Comment
"""
See core specification. It refers to 'numerous diacritical marks', these
are not listed.
""",
# Data
"""
# Basic Latin
0022 # QUOTATION MARK
002C # COMMA
002E # FULL STOP
003B # SEMICOLON
# Latin-1 Supplement
00B7 # MIDDLE DOT
# Combining Diacritical Marks
0303 # COMBINING TILDE
0305 # COMBINING OVERLINE
# General Punctuation
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
2056 # THREE DOT PUNCTUATION
2058 # FOUR DOT PUNCTUATION
2059 # FIVE DOT PUNCTUATION
""",
),
# Goth - Gothic
(
"Goth",
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
003A # COLON
# Latin-1 Supplement
00B7 # MIDDLE DOT
# Combining Diacritical Marks
0304 # COMBINING MACRON
0305 # COMBINING OVERLINE
0308 # COMBINING DIAERESIS
0331 # COMBINING MACRON BELOW
""",
),
# Gran - Grantha
(
"Gran",
# Comment
"""
From core specification.
""",
# Data
"""
# Devanagari
0951 # DEVANAGARI STRESS SIGN UDATTA
0952 # DEVANAGARI STRESS SIGN ANUDATTA
# Vedic Extensions
1CD0 # VEDIC TONE KARSHANA
1CD2 # VEDIC TONE PRENKHA
1CD3 # VEDIC SIGN NIHSHVASA
1CF2 # VEDIC SIGN ARDHAVISARGA
1CF3 # VEDIC SIGN ROTATED ARDHAVISARGA
1CF4 # VEDIC TONE CANDRA ABOVE
1CF8 # VEDIC TONE RING ABOVE
1CF9 # VEDIC TONE DOUBLE RING ABOVE
# Combining Diacritical Marks for Symbols
20F0 # COMBINING ASTERISK ABOVE
""",
),
# Gujr - Gujarati
# Guru - Gurmukhi
(
"Guru",
# Comment
"""
From core specification.
""",
# Data
"""
# Miscellaneous Symbols
262C # ADI SHAKTI
""",
),
# Hano - Hanunoo
# Hatr - Hatr (Hatran)
(
"Hatr",
# Comment
"""
See http://www.unicode.org/L2/L2012/12312-n4324-hatran.pdf (most info, but
not latest assignment, which doesn't have all digits shown here)
single and double vertical line, also ZWNJ in case ligatures need breaking
might want to ligate hatran digit 1 forms 11 (2), 111 (3), 1111 (4) to
look as the suggested (dropped) digits were represented in the doc.
""",
# Data
"""
# Basic Latin
007C # VERTICAL LINE
# General Punctuation
200C # ZERO WIDTH NON-JOINER
2016 # DOUBLE VERTICAL LINE
""",
),
# Hebr - Hebrew
(
"Hebr",
# Comment
"""
From core specification, adds currency.
""",
# Data
"""
# Basic Latin
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
# Combining Diacritical Marks
0307 # COMBINING DOT ABOVE
0308 # COMBINING DIAERESIS
034F # COMBINING GRAPHEME JOINER
# General Punctuation
200C # ZERO WIDTH NON-JOINER
200D # ZERO WIDTH JOINER
200E # LEFT-TO-RIGHT MARK
200F # RIGHT-TO-LEFT MARK
# Currency Symbols
20AA # NEW SHEQEL SIGN
# Letterlike Symbols
2135 # ALEF SYMBOL
2136 # BET SYMBOL
2137 # GIMEL SYMBOL
2138 # DALET SYMBOL
""",
),
# Hluw - Anatolian Hieroglyphs
(
"Hluw",
# Comment
"""
see http://www.unicode.org/L2/L2012/12213-n4282-anatolian.pdf
""",
# Data
"""
# General Punctuation
200B # ZERO WIDTH SPACE
""",
),
# Hmng - Pahawh Hmong
# Hrkt - Japanese syllabaries (Katakana Or Hiragana)
# Hung - Old Hungarian
(
"Hung",
# Comment
"""
see http://www.unicode.org/L2/L2012/12168r-n4268r-oldhungarian.pdf
letters with LTR override mirror reverse (!) "which has to be handled by
the rendering engine"
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
002C # COMMA
002D # HYPHEN-MINUS
002E # FULL STOP
003A # COLON
# General Punctuation
200D # ZERO WIDTH JOINER
2010 # HYPHEN
201F # DOUBLE HIGH-REVERSED-9 QUOTATION MARK
204F # REVERSED SEMICOLON
205A # TWO DOT PUNCTUATION
205D # TRICOLON
205E # VERTICAL FOUR DOTS
# Supplemental Punctuation
2E2E # REVERSED QUESTION MARK
2E31 # WORD SEPARATOR MIDDLE DOT
2E41 # REVERSED COMMA
2E42 # DOUBLE LOW-REVERSED-9 QUOTATION MARK
""",
),
# Ital - Old Italic
# Java - Javanese
# Kali - Kayah Li
(
"Kali",
# Comment
"""
From core specification, also see
http://www.unicode.org/L2/L2006/06073-n3038r-kayahli.pdf
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0022 # QUOTATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002C # COMMA
002D # HYPHEN-MINUS
003F # QUESTION MARK
# General Punctuation
2010 # HYPHEN
""",
),
# Khar - Kharoshthi
(
"Khar",
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
002D # HYPHEN-MINUS
# General Punctuation
2010 # HYPHEN
""",
),
# Khmr - Khmer
(
"Khmr",
# Comment
"""
Latin punct see web sites
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
""",
),
# Khoj - Khojki
(
"Khoj",
# Comment
"""
From core specification, also see
http://www.unicode.org/L2/L2011/11021-khojki.pdf
""",
# Data
"""
# Basic Latin
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002C # COMMA
002E # FULL STOP
003B # SEMICOLON
# General Punctuation
2013 # EN DASH
2026 # HORIZONTAL ELLIPSIS
""",
),
# Knda - Kannada
# Kthi - Kaithi
(
"Kthi",
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
002B # PLUS SIGN
002D # HYPHEN-MINUS
# General Punctuation
2010 # HYPHEN
# Supplemental Punctuation
2E31 # WORD SEPARATOR MIDDLE DOT
""",
),
# LGC - (Latn,Grek,Cyrl)
(
"LGC",
# Comment
"""
FE00 is for variant zero.
""",
# Data
"""
# Spacing Modifier Letters
02EA # MODIFIER LETTER YIN DEPARTING TONE MARK
02EB # MODIFIER LETTER YANG DEPARTING TONE MARK
# Letterlike Symbols
2100 # ACCOUNT OF
2101 # ADDRESSED TO THE SUBJECT
2103 # DEGREE CELSIUS
2105 # CARE OF
2106 # CADA UNA
2109 # DEGREE FAHRENHEIT
2113 # SCRIPT SMALL L
2116 # NUMERO SIGN
2117 # SOUND RECORDING COPYRIGHT
211E # PRESCRIPTION TAKE
2120 # SERVICE MARK
2121 # TELEPHONE SIGN
2122 # TRADE MARK SIGN
2127 # INVERTED OHM SIGN
2129 # TURNED GREEK SMALL LETTER IOTA
212E # ESTIMATED SYMBOL
213B # FACSIMILE SIGN
214B # TURNED AMPERSAND
214D # AKTIESELSKAB
# Number Forms
2150 # VULGAR FRACTION ONE SEVENTH
2151 # VULGAR FRACTION ONE NINTH
2152 # VULGAR FRACTION ONE TENTH
2153 # VULGAR FRACTION ONE THIRD
2154 # VULGAR FRACTION TWO THIRDS
2155 # VULGAR FRACTION ONE FIFTH
2156 # VULGAR FRACTION TWO FIFTHS
2157 # VULGAR FRACTION THREE FIFTHS
2158 # VULGAR FRACTION FOUR FIFTHS
2159 # VULGAR FRACTION ONE SIXTH
215A # VULGAR FRACTION FIVE SIXTHS
215B # VULGAR FRACTION ONE EIGHTH
215C # VULGAR FRACTION THREE EIGHTHS
215D # VULGAR FRACTION FIVE EIGHTHS
215E # VULGAR FRACTION SEVEN EIGHTHS
215F # FRACTION NUMERATOR ONE
2184 # LATIN SMALL LETTER REVERSED C
2189 # VULGAR FRACTION ZERO THIRDS
# Variation Selectors
FE00 # VARIATION SELECTOR-1
# Specials
FFFC # OBJECT REPLACEMENT CHARACTER
FFFD # REPLACEMENT CHARACTER
""",
),
# Lana - Lanna (Tai Tham)
# Laoo - Lao
(
"Laoo",
# Comment
"""
For latin punct use see web sites, e.g. nuol.edu.la
""",
# Data
"""
# Basic Latin
0022 # QUOTATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002C # COMMA
002E # FULL STOP
003A # COLON
# General Punctuation
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
# Currency Symbols
20AD # KIP SIGN
""",
),
# Lepc - Lepcha
(
"Lepc",
# Comment
"""
From core specification, only the specificially mentioned punct.
""",
# Data
"""
# Basic Latin
002C # COMMA
002E # FULL STOP
003F # QUESTION MARK
""",
),
# Limb - Limbu
(
"Limb",
# Comment
"""
From core specification.
""",
# Data
"""
# Devanagari
0965 # DEVANAGARI DOUBLE DANDA
""",
),
# Lina - Linear A
# Linb - Linear B
# Lisu - Fraser (Lisu)
(
"Lisu",
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0022 # QUOTATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002D # HYPHEN-MINUS
003A # COLON
003B # SEMICOLON
003F # QUESTION MARK
# Spacing Modifier Letters
02BC # MODIFIER LETTER APOSTROPHE
02CD # MODIFIER LETTER LOW MACRON
# General Punctuation
2010 # HYPHEN
2026 # HORIZONTAL ELLIPSIS
# CJK Symbols and Punctuation
300A # LEFT DOUBLE ANGLE BRACKET
300B # RIGHT DOUBLE ANGLE BRACKET
""",
),
# Lyci - Lycian
(
"Lyci",
# Comment
"""
From core specification.
""",
# Data
"""
# General Punctuation
205A # TWO DOT PUNCTUATION
""",
),
# Lydi - Lydian
(
"Lydi",
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
003A # COLON
# Latin-1 Supplement
00B7 # MIDDLE DOT
# Supplemental Punctuation
2E31 # WORD SEPARATOR MIDDLE DOT
""",
),
# MUSIC - MUSIC
(
"MUSIC",
# Comment
"""
Characters not in standard music blocks.
""",
# Data
"""
# Miscellaneous Symbols
2669 # QUARTER NOTE
266A # EIGHTH NOTE
266B # BEAMED EIGHTH NOTES
266C # BEAMED SIXTEENTH NOTES
266D # MUSIC FLAT SIGN
266E # MUSIC NATURAL SIGN
266F # MUSIC SHARP SIGN
""",
),
# Mahj - Mahajani
(
"Mahj",
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
002D # HYPHEN-MINUS
003A # COLON
# Latin-1 Supplement
00B7 # MIDDLE DOT
# Devanagari
0964 # DEVANAGARI DANDA
0965 # DEVANAGARI DOUBLE DANDA
# General Punctuation
2013 # EN DASH
""",
),
# Mand - Mandaean (Mandaic)
(
"Mand",
# Comment
"""
From core specification.
""",
# Data
"""
# Arabic
0640 # ARABIC TATWEEL
""",
),
# Mani - Manichaean
# Marc - Marc (Marchen)
# Mend - Mende (Mende Kikakui)
# Merc - Meroitic Cursive
(
"Merc",
# Comment
"""
From core specification.
also see http://www.unicode.org/L2/L2009/09188r-n3646-meroitic.pdf
""",
# Data
"""
# Basic Latin
003A # COLON
# General Punctuation
2026 # HORIZONTAL ELLIPSIS
205D # TRICOLON
""",
),
# Mero - Meroitic (Meroitic Hieroglyphs)
# Mlym - Malayalam
# Modi - Modi
(
"Modi",
# Comment
"""
From core specification, also see
http://www.unicode.org/L2/L2011/11212r2-n4034-modi.pdf
""",
# Data
"""
# Basic Latin
002C # COMMA
002E # FULL STOP
003B # SEMICOLON
""",
),
# Mong - Mongolian
(
"Mong",
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0022 # QUOTATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
003F # QUESTION MARK
# General Punctuation
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
2048 # QUESTION EXCLAMATION MARK
2049 # EXCLAMATION QUESTION MARK
""",
),
# Mroo - Mro
# Mtei - Meitei Mayek (Meetei Mayek)
# Mult - Mult (Multani)
# Mymr - Myanmar
(
"Mymr",
# Comment
"""
From core specification; FE00 is for dotted forms.
""",
# Data
"""
# General Punctuation
200B # ZERO WIDTH SPACE
# Variation Selectors
FE00 # VARIATION SELECTOR-1
""",
),
# Narb - Old North Arabian
# Nbat - Nabataean
# Newa - Newa
# Nkoo - N'Ko (N'Ko)
(
"Nkoo",
# Comment
"""
From core specification.
""",
# Data
"""
# Arabic
060C # ARABIC COMMA
061B # ARABIC SEMICOLON
061F # ARABIC QUESTION MARK
# Supplemental Punctuation
2E1C # LEFT LOW PARAPHRASE BRACKET
2E1D # RIGHT LOW PARAPHRASE BRACKET
# Arabic Presentation Forms-A
FD3E # ORNATE LEFT PARENTHESIS
FD3F # ORNATE RIGHT PARENTHESIS
""",
),
# Ogam - Ogham
# Olck - Ol Chiki
(
"Olck",
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
002C # COMMA
003F # QUESTION MARK
# General Punctuation
2014 # EM DASH
2018 # LEFT SINGLE QUOTATION MARK
2019 # RIGHT SINGLE QUOTATION MARK
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
""",
),
# Orkh - Orkhon (Old Turkic)
(
"Orkh",
# Comment
"""
From core specification.
""",
# Data
"""
# General Punctuation
205A # TWO DOT PUNCTUATION
# Supplemental Punctuation
2E30 # RING POINT
""",
),
# Orya - Oriya
# Osge - Osge (Osage)
(
"Osge",
# Comment
"""
Added by Monotype.
""",
# Data
"""
# Combining Diacritical Marks
0301 # COMBINING ACUTE ACCENT
0304 # COMBINING MACRON
030B # COMBINING DOUBLE ACUTE ACCENT
0358 # COMBINING DOT ABOVE RIGHT
# Geometric Shapes
25CC # DOTTED CIRCLE
""",
),
# Osma - Osmanya
# Palm - Palmyrene
# Pauc - Pau Cin Hau
(
"Pauc",
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
002E # FULL STOP
""",
),
# Perm - Old Permic
(
"Perm",
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
0027 # APOSTROPHE
003A # COLON
# Latin-1 Supplement
00B7 # MIDDLE DOT
# Combining Diacritical Marks
0300 # COMBINING GRAVE ACCENT
0306 # COMBINING BREVE
0307 # COMBINING DOT ABOVE
0308 # COMBINING DIAERESIS
0313 # COMBINING COMMA ABOVE
# Cyrillic
0483 # COMBINING CYRILLIC TITLO
# Combining Diacritical Marks for Symbols
20DB # COMBINING THREE DOTS ABOVE
""",
),
# Phag - Phags-pa
# Phli - Inscriptional Pahlavi
# Phlp - Psalter Pahlavi
(
"Phlp",
# Comment
"""
from core specification.
""",
# Data
"""
# Arabic
0640 # ARABIC TATWEEL
""",
),
# Phnx - Phoenician
# Plrd - Pollard Phonetic (Miao)
# Prti - Inscriptional Parthian
# Rjng - Rejang
(
"Rjng",
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
002C # COMMA
002E # FULL STOP
003A # COLON
""",
),
# Runr - Runic
# Samr - Samaritan
(
"Samr",
# Comment
"""
From core specification.
""",
# Data
"""
# Supplemental Punctuation
2E31 # WORD SEPARATOR MIDDLE DOT
""",
),
# Sarb - Old South Arabian
# Saur - Saurashtra
(
"Saur",
# Comment
"""
From core specification, only the specificially mentioned punct.
""",
# Data
"""
# Basic Latin
002C # COMMA
002E # FULL STOP
003F # QUESTION MARK
""",
),
# Sgnw - SignWriting
# Shaw - Shavian
(
"Shaw",
# Comment
"""
From core specification.
""",
# Data
"""
# Latin-1 Supplement
00B7 # MIDDLE DOT
""",
),
# Shrd - Sharada
# Sidd - Siddham
# Sind - Khudawadi
(
"Sind",
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
002E # FULL STOP
003A # COLON
003B # SEMICOLON
# Devanagari
0964 # DEVANAGARI DANDA
0965 # DEVANAGARI DOUBLE DANDA
# General Punctuation
2013 # EN DASH
2014 # EM DASH
""",
),
# Sinh - Sinhala
(
"Sinh",
# Comment
"""
From core specification, plus unspecified latin punctuation seen on web
sites.
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002C # COMMA
002E # FULL STOP
# Devanagari
0964 # DEVANAGARI DANDA
""",
),
# Sora - Sora Sompeng
(
"Sora",
# Comment
"""
From core specification and
http://www.unicode.org/L2/L2009/09189r-n3647r-sora-sompeng.pdf
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002C # COMMA
002D # HYPHEN-MINUS
002E # FULL STOP
003B # SEMICOLON
# General Punctuation
2010 # HYPHEN
""",
),
# Sund - Sundanese
(
"Sund",
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
0022 # QUOTATION MARK
002D # HYPHEN-MINUS
003C # LESS-THAN SIGN
003E # GREATER-THAN SIGN
003F # QUESTION MARK
# General Punctuation
2010 # HYPHEN
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
""",
),
# Sylo - Syloti Nagri
(
"Sylo",
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
002C # COMMA
002E # FULL STOP
003A # COLON
003B # SEMICOLON
# Devanagari
0964 # DEVANAGARI DANDA
0965 # DEVANAGARI DOUBLE DANDA
# General Punctuation
2055 # FLOWER PUNCTUATION MARK
""",
),
# Syrc - Syriac
(
"Syrc",
# Comment
"""
From core specification. In it, the reference to 'arabic harakat' used with
Garshuni is based on the Harakat section of the wikipedia page on Arabic
diacritics.
""",
# Data
"""
# Combining Diacritical Marks
0303 # COMBINING TILDE
0304 # COMBINING MACRON
0307 # COMBINING DOT ABOVE
0308 # COMBINING DIAERESIS
030A # COMBINING RING ABOVE
0320 # COMBINING MINUS SIGN BELOW
0323 # COMBINING DOT BELOW
0324 # COMBINING DIAERESIS BELOW
0325 # COMBINING RING BELOW
032D # COMBINING CIRCUMFLEX ACCENT BELOW
032E # COMBINING BREVE BELOW
0330 # COMBINING TILDE BELOW
# Arabic
060C # ARABIC COMMA
061B # ARABIC SEMICOLON
061F # ARABIC QUESTION MARK
0640 # ARABIC TATWEEL
064E # ARABIC FATHA
064F # ARABIC DAMMA
0650 # ARABIC KASRA
0651 # ARABIC SHADDA
0652 # ARABIC SUKUN
0653 # ARABIC MADDAH ABOVE
0670 # ARABIC LETTER SUPERSCRIPT ALEF
0671 # ARABIC LETTER ALEF WASLA
# General Punctuation
200C # ZERO WIDTH NON-JOINER
""",
),
# Tagb - Tagbanwa
# Takr - Takri
(
"Takr",
# Comment
"""
From core specification.
""",
# Data
"""
# Devanagari
0964 # DEVANAGARI DANDA
0965 # DEVANAGARI DOUBLE DANDA
""",
),
# Tale - Tai Le
(
"Tale",
# Comment
"""
From core specification & http://www.unicode.org/L2/L2001/01369-n2372.pdf
Myanmar digits have glyphic variants according to the spec.
""",
# Data
"""
# Basic Latin
002C # COMMA
002E # FULL STOP
003A # COLON
003F # QUESTION MARK
# Combining Diacritical Marks
0300 # COMBINING GRAVE ACCENT
0301 # COMBINING ACUTE ACCENT
0307 # COMBINING DOT ABOVE
0308 # COMBINING DIAERESIS
030C # COMBINING CARON
# Myanmar
1040 # MYANMAR DIGIT ZERO
1041 # MYANMAR DIGIT ONE
1042 # MYANMAR DIGIT TWO
1043 # MYANMAR DIGIT THREE
1044 # MYANMAR DIGIT FOUR
1045 # MYANMAR DIGIT FIVE
1046 # MYANMAR DIGIT SIX
1047 # MYANMAR DIGIT SEVEN
1048 # MYANMAR DIGIT EIGHT
1049 # MYANMAR DIGIT NINE
# General Punctuation
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
# CJK Symbols and Punctuation
3002 # IDEOGRAPHIC FULL STOP
""",
),
# Talu - New Tai Lue
# Taml - Tamil
(
"Taml",
# Comment
"""
From core specificaion and
http://www.unicode.org/L2/L2010/10407-ext-tamil-follow2.pdf
""",
# Data
"""
# Latin-1 Supplement
00B2 # SUPERSCRIPT TWO
00B3 # SUPERSCRIPT THREE
# Superscripts and Subscripts
2074 # SUPERSCRIPT FOUR
2082 # SUBSCRIPT TWO
2083 # SUBSCRIPT THREE
2084 # SUBSCRIPT FOUR
""",
),
# Tang - Tangut
# Tavt - Tai Viet
(
"Tavt",
# Comment
"""
Used in SIL fonts.
""",
# Data
"""
# Latin Extended-D
A78B # LATIN CAPITAL LETTER SALTILLO
A78C # LATIN SMALL LETTER SALTILLO
""",
),
# Telu - Telugu
# Tfng - Tifinagh
(
"Tfng",
# Comment
"""
From core specification.
""",
# Data
"""
# Combining Diacritical Marks
0302 # COMBINING CIRCUMFLEX ACCENT
0304 # COMBINING MACRON
0307 # COMBINING DOT ABOVE
0309 # COMBINING HOOK ABOVE
# General Punctuation
200D # ZERO WIDTH JOINER
""",
),
# Tglg - Tagalog
# Thaa - Thaana
(
"Thaa",
# Comment
"""
From core specification, parens from text sample. Probably other punct
as well but spec does not list.
""",
# Data
"""
# Basic Latin
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002E # FULL STOP
# Arabic
060C # ARABIC COMMA
061B # ARABIC SEMICOLON
061F # ARABIC QUESTION MARK
""",
),
# Thai - Thai
(
"Thai",
# Comment
"""
From core specification and
http://www.unicode.org/L2/L2010/10451-patani-proposal.pdf
for latin punct see web sites e.g. pandip.com, sanook.com
Bhat already here, or should be
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0022 # QUOTATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002C # COMMA
002E # FULL STOP
003A # COLON
003F # QUESTION MARK
# Spacing Modifier Letters
02BC # MODIFIER LETTER APOSTROPHE
02D7 # MODIFIER LETTER MINUS SIGN
# Combining Diacritical Marks
0303 # COMBINING TILDE
0331 # COMBINING MACRON BELOW
# General Punctuation
200B # ZERO WIDTH SPACE
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
2026 # HORIZONTAL ELLIPSIS
""",
),
# Tibt - Tibetan
(
"Tibt",
# Comment
"""
Wheel of Dharma from core specification, not sure of source for vertical
line.
""",
# Data
"""
# Basic Latin
007C # VERTICAL LINE
# Miscellaneous Symbols
2638 # WHEEL OF DHARMA
""",
),
# Tirh - Tirhuta
(
"Tirh",
# Comment
"""
From core specification.
""",
# Data
"""
# Devanagari
0964 # DEVANAGARI DANDA
0965 # DEVANAGARI DOUBLE DANDA
""",
),
# Ugar - Ugaritic
# Vaii - Vai
(
"Vaii",
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
002C # COMMA
002D # HYPHEN-MINUS
""",
),
# Wara - Varang Kshiti (Warang Citi)
(
"Wara",
# Comment
"""
"Uses latin punctuation," so guess based on sample text from
proposal doc, see
http://www.unicode.org/L2/L2012/12118-n4259-warang-citi.pdf
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002C # COMMA
002D # HYPHEN-MINUS
002E # FULL STOP
003A # COLON
003B # SEMICOLON
003F # QUESTION MARK
# General Punctuation
2013 # EN DASH
2014 # EM DASH
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
""",
),
# Xpeo - Old Persian
# Xsux - Sumero-Akkadian Cuneiform (Cuneiform)
# Yiii - Yi
(
"Yiii",
# Comment
"""
From core specification.
Fullwidth Comma is used in UDHR text for Yi. Add standard comma
so we have a 'halfwidth' form to match.
""",
# Data
"""
# Latin-1
002C # COMMA
# CJK Symbols and Punctuation
3001 # IDEOGRAPHIC COMMA
3002 # IDEOGRAPHIC FULL STOP
# Halfwidth and Fullwidth Forms
FF0C # FULLWIDTH COMMA
""",
),
]
# This is a utility function that parses the _script_required data
# and spits it out again in the above format. When editing the
# above data, just type in the hex values, then run this to regenerate
# the source in sorted order with block labels and codepoint names.
def _regen_script_required():
"""Rerun after editing script required to check/reformat."""
script_to_comment_and_data = {
script: (comment, data) for script, comment, data in _SCRIPT_REQUIRED
}
scripts = set(unicode_data.all_scripts())
for to_script, from_scripts in _MERGED_SCRIPTS_BY_TARGET.items():
scripts.add(to_script)
scripts -= set(from_scripts)
# keep extra script data, e.g. 'Aran'
scripts.update(set(script_to_comment_and_data.keys()))
scripts -= {"Zinh", "Zyyy", "Zzzz"}
for script in sorted(scripts):
if script in _MERGED_SCRIPTS_BY_TARGET:
script_name = "(%s)" % ",".join(_MERGED_SCRIPTS_BY_TARGET[script])
else:
script_name = cldr_data.get_english_script_name(script)
try:
unicode_script_name = unicode_data.human_readable_script_name(script)
if script_name.lower() != unicode_script_name.lower():
script_name += " (%s)" % unicode_script_name
except KeyError:
pass
script_name = script_name.replace(unichr(0x2019), "'")
print(" # %s - %s" % (script, script_name))
if script in script_to_comment_and_data:
print(" ('%s'," % script)
lines = []
comment, data = script_to_comment_and_data[script]
lines.append(" # Comment")
lines.append('"""')
for line in comment.strip().splitlines():
lines.append(line.strip())
lines.append('""",')
lines.append("# Data")
lines.append('"""')
cps = tool_utils.parse_int_ranges(data)
block = None
for cp in sorted(cps):
cp_block = unicode_data.block(cp)
if cp_block != block:
block = cp_block
lines.append("# " + block)
cp_name = unicode_data.name(cp, "<unnamed>")
lines.append("%04X # %s" % (cp, cp_name))
lines.append('"""),')
print("\n ".join(lines))
print()
def _assign_script_required(cmap_ops):
"""Assign extra characters for various scripts."""
for script, _, data in _SCRIPT_REQUIRED:
extra = tool_utils.parse_int_ranges(data)
cmap_ops.phase("assign script required for " + script)
cmap_ops.add_all(extra, script)
# Because of a miscommunication the Coptic EPACT numbers, which had
# been assigned to SYM2, were also added to the Coptic font because
# they were listed in a bug from two years ago. So we'll now put them
# in the Coptic requirements just so we know they're supposed to be there.
epact = tool_utils.parse_int_ranges("102e0-102fb")
cmap_ops.add_all(epact, "Copt")
def _assign_script_special_chars(cmap_ops):
"""Assign special characters listed in opentype_data."""
cmap_ops.phase("assign special chars")
for script, chars in opentype_data.SPECIAL_CHARACTERS_NEEDED.items():
cmap_ops.add_all(frozenset(chars), script)
def _assign_legacy_phase2(cmap_ops):
"""Assign legacy chars in some scripts, excluding some blocks."""
legacy_data = cmap_data.read_cmap_data_file("data/noto_cmap_phase2.xml")
legacy_map = cmap_data.create_map_from_table(legacy_data.table)
legacy_script_to_chars = {
script: tool_utils.parse_int_ranges(row.ranges)
for script, row in legacy_map.items()
}
# The default is to include all legacy characters, except for the chars
# listed for these scripts, for some default chars, and for some scripts.
# Find out why these were included in the phase two fonts.
# This excludes lots of punctuation and digits from Cham, Khmer, and Lao
# but leaves some common latin characters like quotes, parens, comma/period,
# and so on.
exclude_script_ranges = {
"Cham": "23-26 2A-2B 30-39 3C-3E 40 5B-60 7B-7E 037E",
"Deva": "00AF", # Jelle says this was encoded by accident, should be 00AD
"Kthi": "0030-0039",
"Khmr": "23-26 2A-2B 30-39 3C-3E 40 5B-60 7B-7E 037E",
"LGC": "03E2",
"Lana": "2219",
"Laoo": "23-26 2A-2B 30-39 3C-3E 40 5B-60 7B-7E 037E",
"Limb": "0964", # I think double-danda was intended
"Mlym": "0307 0323",
"Syrc": "250C 2510", # box drawing?
"Tavt": "A78C",
}
# mono temporarily
ignore_legacy = frozenset("LGC Zsye Zsym MONO".split())
ignore_cps = frozenset([0x0, 0xD, 0x20, 0xA0, 0xFEFF])
cmap_ops.phase("assign legacy phase 2")
script_to_chars = cmap_ops.create_script_to_chars()
for script in sorted(legacy_script_to_chars):
if script not in script_to_chars:
cmap_ops.log("skipping script %s" % script)
continue
if script in ignore_legacy:
cmap_ops.log("ignoring %s" % script)
continue
script_chars = script_to_chars[script]
legacy_chars = legacy_script_to_chars[script]
missing_legacy = set(legacy_chars) - set(script_chars) - ignore_cps
if script in exclude_script_ranges:
ranges = exclude_script_ranges[script]
missing_legacy -= set(tool_utils.parse_int_ranges(ranges))
if missing_legacy:
cmap_ops.phase("assign legacy %s" % script)
cmap_ops.add_all(missing_legacy, script)
def _check_CJK():
# not used
# check CJK
cmap_ops.log("check cjk legacy")
legacy_cjk_chars = set()
for script in _MERGED_SCRIPTS_BY_TARGET["CJK"]:
if script in legacy_script_to_chars:
legacy_cjk_chars |= legacy_script_to_chars[script]
cjk_chars = script_to_chars["CJK"]
not_in_legacy = cjk_chars - legacy_cjk_chars
# ignore plane 2 and above
not_in_legacy -= set(range(0x20000, 0x120000))
if not_in_legacy:
print("not in legacy (%d):" % len(not_in_legacy))
compare_cmap_data._print_detailed(not_in_legacy)
not_in_new = legacy_cjk_chars - cjk_chars
if not_in_new:
print("not in new (%d):" % len(not_in_new))
compare_cmap_data._print_detailed(not_in_new)
def _assign_bidi_mirroring(cmap_ops):
"""Ensure that if a bidi mirroring char is in a font, its mirrored char
is too."""
cmap_ops.phase("bidi mirroring")
script_to_chars = cmap_ops.create_script_to_chars()
mirrored = unicode_data.mirrored_chars()
for script, cps in sorted(script_to_chars.items()):
mirrored_in_script = cps & mirrored
if not mirrored_in_script:
continue
sibs = set(unicode_data.bidi_mirroring_glyph(cp) for cp in mirrored_in_script)
missing_sibs = sibs - mirrored_in_script
if missing_sibs:
cmap_ops.log("adding %d missing bidi chars" % len(missing_sibs))
cmap_ops.add_all(missing_sibs, script)
def _unassign_lgc_from_symbols(cmap_ops):
"""Characters in LGC don't need to be in Symbols or Sym2."""
cmap_ops.phase("unassign lgc from symbols")
lgc_set = frozenset(cmap_ops.script_chars("LGC"))
sym_set = frozenset(cmap_ops.script_chars("Zsym"))
sym2_set = frozenset(cmap_ops.script_chars("SYM2"))
sym_set_to_remove = sym_set & lgc_set
sym2_set_to_remove = sym2_set & lgc_set
# Combining enclosing marks in Symbols need latin to combine with, so add
# letters and digits, also dotted circle if not there already.
alphanum = tool_utils.parse_int_ranges("30-39 41-5a 61-7a 25cc")
sym_set_to_remove -= alphanum
cmap_ops.remove_all(sym_set_to_remove, "Zsym")
cmap_ops.remove_all(sym2_set_to_remove, "SYM2")
def _assign_programming_lang_symbols(cmap_ops):
"""Assign characters used in programming languages, which generally
should be in MONO and in some cases need to be compatible with math
in general."""
def add_mirrored(cps):
mirrored_cps = set()
for cp in cps:
if unicode_data.mirrored(cp):
mirrored_glyph = unicode_data.bidi_mirroring_glyph(cp)
if mirrored_glyph is not None:
mirrored_cps.add(mirrored_glyph)
cps |= mirrored_cps
# some characters we want to preserve in symbols despite adding them
# to math.
preserve_symbols_cps = tool_utils.parse_int_ranges(
"""
2190 # LEFTWARDS ARROW
2191 # UPWARDS ARROW
2192 # RIGHTWARDS ARROW
2193 # DOWNWARDS ARROW
2194 # LEFT RIGHT ARROW
2195 # UP DOWN ARROW
2474 # PARENTHESIZED DIGIT ONE
2475 # PARENTHESIZED DIGIT TWO
266d # MUSIC FLAT SIGN
266e # MUSIC NATURAL SIGN
266f # MUSIC SHARP SIGN
27f6 # LONG RIGHTWARDS ARROW
"""
)
# similarly, preserve some in symbols2
preserve_symbols2_cps = tool_utils.parse_int_ranges(
"""
21e8 # RIGHTWARDS WHITE ARROW
2219 # BULLET OPERATOR
2299 # CIRCLED DOT OPERATOR
25a1 # WHITE SQUARE
25b7 # WHITE RIGHT-POINTING TRIANGLE
25bb # WHITE RIGHT-POINTING POINTER
25c2 # BLACK LEFT-POINTING SMALL TRIANGLE
25c3 # WHITE LEFT-POINTING SMALL TRIANGLE
25c5 # WHITE LEFT-POINTING POINTER
25c7 # WHITE DIAMOND
25c8 # WHITE DIAMOND CONTAINING BLACK SMALL DIAMOND
25cb # WHITE CIRCLE
2736 # SIX POINTED BLACK STAR
"""
)
cmap_ops.phase("programming - haskell")
# see noto-fonts#669 agda non-ascii character list
haskell_cps = tool_utils.parse_int_ranges(
"""
00a0 00ac 00b2 00b7 00b9 00bd 00d7 00e0 00e9 00f3 00f6-00f7 019b
02b0 02b3 02e1-02e2 0307 0393 0398 03a0 03a3 03b5 03b7 03bb-03be
03c1 03c3-03c4 03c6 03c8-03c9 2022 2026 2032-2033 203c 203f
2045-2046 2070 207a-207b 207f-2089 2113 2115 211a 2124 2190-2194
219d-219e 21a0 21a2-21a3 21a6 21d0-21d4 21db 21e8 2200-2201
2203-2205 2208-2209 220b 220e 2218-2219 221e 2223 2227-222a
2236-2238 223c 2241 2243 2245 2247-224b 2254 2257 225f 2261-2262
2264-2265 226c 226e-2273 2275 227a-227b 2286-2288 228e 2291-229c
22a4-22a5 22b4 22b8 22c2-22c3 22c6 22c9-22ca 22ce 22d0 22e2
2308-230b 236e 2474-2475 25a1 25b7 25bb 25c2-25c3 25c5 25c7-25c8
266d 266f 2736 27e6-27eb 27f6 2987-2988 2a00 2a05-2a06 ff5b ff5d
"""
)
# add extra not in the set above:
# (from github.com/adobe-fonts/source-code-pro/issues/114)
haskell_cps |= tool_utils.parse_int_ranges(
"""2202 2210 2220 2234 2235 2284 2285 2289"""
)
# see comment from joeyaiello on noto-fonts/issues/669
# others mentioned in that comment are already in haskell
haskell_cps.add(0x2195)
# add mirrored cps to this set
add_mirrored(haskell_cps)
# add 'leftwards' variants (not mirrored) and a few other variants
# because it seems odd to split these groups even if there's no use for
# them in haskell.
leftwards_variants = tool_utils.parse_int_ranges(
"""
# Arrows
219c # LEFTWARDS WAVE ARROW (ref 219d)
21a4 # LEFTWARDS ARROW FROM BAR (ref 21a6)
21da # LEFTWARDS TRIPLE ARROW (ref 21db)
21e6 # LEFTWARDS WHITE ARROW (ref 21e8)
# Miscellaneous Technical
2310 # REVERSED NOT SIGN (ref 00ac)
2319 # TURNED NOT SIGN (ref 00ac)
# Miscellaneous Symbols
266e # MUSIC NATURAL SIGN (ref 266d)
# Supplemental Arrows-A
27f5 # LONG LEFTWARDS ARROW (ref 27f6)
"""
)
haskell_cps |= leftwards_variants
cmap_ops.add_all_to_all(haskell_cps, ["Zmth", "MONO"])
cmap_ops.remove_all(haskell_cps - preserve_symbols_cps, "Zsym")
cmap_ops.remove_all(haskell_cps - preserve_symbols2_cps, "SYM2")
cmap_ops.phase("programming - APL")
# For the below APL sets, see noto-fonts#751
apl_cps = tool_utils.parse_int_ranges(
"""
0021 0024 0027-0029 002b-002c 002e-002f 003a-003f 005b-005d 005f
007b 007d 00a8 00af 00d7 00f7 2190-2193 2205-2207 220a 2212 2218
2223 2227-222a 2235 223c 2260-2262 2264-2265 2282-2283 2286-2287
2296 22a2-22a5 22c4 22c6 2308 230a 2336-237a 2395 25cb
"""
)
# do not use circled uppercase letters as a substitute for APL underscored
# letters. Dyalog APL does this and hacks a font to make them to render as
# underscored. Also apl385 does this and renders these as underscored. This
# is contrary to Unicode (which should just have gone ahead and encoded these,
# but I guess balked since they were already kind of deprecated by that time).
# apl_cps |= tool_utils.parse_int_ranges('24B6-24CF')
# additionally requested relational algebra symbols
apl_cps |= tool_utils.parse_int_ranges("22c8-22ca 25b7 27d5-27d7")
# additionally requested NARS symbols
apl_cps |= tool_utils.parse_int_ranges("00a7 03c0 221a 221e 2299")
add_mirrored(apl_cps)
# Android doesn't want MONO as a fallback, so no codepoint should be added
# only to MONO and not to any other Noto font.
cmap_ops.add_all_to_all(apl_cps, ["MONO", "Zmth"])
def _assign_symbols_from_groups(cmap_ops):
"""Use 'group data' to assign various symbols to Zmth, Zsym, SYM2,
MONO, MUSIC' based on character groups. This fine-tunes the block
assignments (some related symbols are scattered across blocks,
and symbols blocks are themselves mixed)."""
cmap_ops.phase("assign symbols from groups")
with open("codepoint_groups.txt", "r") as f:
for lineix, line in enumerate(f):
ix = line.find("#")
if ix >= 0:
line = line[:ix]
line = line.strip()
if not line:
continue
cols = [s.strip() for s in line.split(";")]
if not len(cols) == 3:
print('incorrect cols on line %d "%s"' % (lineix, line))
if cols[0] == "":
# no assignments for this line
continue
add, remove = [], []
for s in cols[0].split():
if s.startswith("-"):
remove.append(s[1:])
else:
add.append(s)
name = cols[1]
# We use parens to delimit parts of the ranges that are 'for
# reference' but should not impact codepoint assignment.
# since parse_int_ranges doesn't understand these, strip
# out the parenthesized sections. These don't nest but we
# don't check for this, only that open ranges are closed.
ranges = cols[2]
parts = None
ix = 0
while ix < len(ranges):
open_p = ranges.find("(", ix)
if open_p < 0:
if parts is not None:
parts.append(ranges[ix:].strip())
break
close_p = ranges.find(")", open_p + 1)
if close_p < 0:
raise Exception(
'unclosed paren in ranges on line %d "%s"' % (lineix, line)
)
if parts is None:
parts = []
parts.append(ranges[ix:open_p])
ix = close_p + 1
if parts:
ranges = " ".join(parts)
try:
cps = tool_utils.parse_int_ranges(ranges)
except Exception as err:
sys.stderr.write(err + "\n")
sys.stderr.write(cols[2] + "\n")
sys.stderr.write('problem on %d "%s"\n' % (lineix, line))
raise
if len(cps) > 50:
sys.stderr.write(
'large range (%d) on %d "%s"\n' % (len(cps), lineix, line)
)
cmap_ops.log("group: %s (%d)" % (name, len(cps)))
if add:
cmap_ops.add_all_to_all(cps, add)
if remove:
cmap_ops.remove_all_from_all(cps, remove)
def _assign_mono(cmap_ops):
"""Monospace should be similar to LGC, with the addition of box drawing
and block elements. It should also include all CP437 codepoints."""
cmap_ops.phase("assign mono")
lgc_chars = cmap_ops.script_chars("LGC")
cmap_ops.add_all(lgc_chars, "MONO")
cp437_cps = unicode_data.codeset("cp437")
cmap_ops.phase("assign cp437 to mono")
assert cp437_cps is not None
cmap_ops.add_all(cp437_cps, "MONO")
# for variant zero
cmap_ops.add(0xFE00, "MONO")
# geometric shapes should be in MONO too, many are but they're scattered
cmap_ops.add_all(_block_cps("Geometric Shapes"), "MONO")
def _assign_sym2(cmap_ops):
"""SYM2 should support enclosing keycaps, used to be in B/W Emoji."""
cmap_ops.phase("assign sym2")
keycap_chars = tool_utils.parse_int_ranges(
"""
0023 # Number Sign
002A # Asterisk
0030-0039 # Digits
20E3 # Combining Enclosing Keycap"""
)
cmap_ops.add_all(keycap_chars, "SYM2")
def _assign_math(cmap_ops):
"""No longer use STIX character set, we will just fallback for characters
not in math. However, we want much of math to work without fallback, for
instance we need character ranges for the combining marks, and want a serif
form of the ASCII, so we duplicate more than usual."""
cmap_ops.phase("assign math")
# We keep this here for awhile for reference, but no longer use it.
STIX_CPS = tool_utils.parse_int_ranges(
"""
0020-007e 00a0-0180 0188 0190 0192 0195 0199-019b 019e 01a0-01a1 01a5
01aa-01ab 01ad 01af-01b0 01b5 01ba-01bb 01be 01c0-01c3 01f0 01fa-01ff
0221 0234-0237 02b0-02e9 02ec-02ed 0300-033f 0346 034c 0359 035c
0360-0362 037e 0384-038a 038c 038e-03a1 03a3-03ce 03d0-03d2 03d5-03d6
03d8-03e1 03f0-03f1 03f4-03f6 0401-040c 040e-044f 0451-045c 045e-045f
0462-0463 046a-046b 0472-0475 0490-0491 1d00 1d07 1d1c 1d84-1d85 1d8a
1d8d-1d8e 1e80-1e85 1ef2-1ef3 2010-2022 2025-2026 2030-203c 203e 2040
2043-2044 2047 204e-2052 2057 205f 207f 20a3-20a4 20a7 20ac 20d0-20d2
20d6-20d7 20db-20df 20e1 20e4-20f0 2102 2105 2107 210a-2113 2115-211e
2122 2124-2129 212b-2138 213c-214b 2153-215e 2190-21ea 21f4-22ff 2302
2305-2306 2308-2313 2315-231a 231c-2323 2329-232a 232c-232e 2332 2336
233d 233f-2340 2353 2370 237c 2393-2394 239b-23b9 23ce 23d0 23dc-23e7
2423 2460-2468 24b6-24ea 2500 2502 2506 2508 250a 250c 2510 2514 2518
251c 2524 252c 2534 253c 2550-256c 2571-2572 2584 2588 258c 2590-2593
25a1-25ff 2606 2609 260c 260e 2612 2621 2639-2644 2646-2649 2660-2667
2669-266b 266d-266f 267e 2680-2689 26a0 26a5 26aa-26ac 26b2 2709 2713
2720 272a 2736 273d 2772-2773 2780-2793 279b 27c1-27c9 27cc 27d0-27ef
27f1-27ff 2901-2aff 2b13-2b41 2b43-2b4c 2b50-2b54 3030 fb00-fb04
1d401-1d454 1d456-1d49c 1d49e-1d49f 1d4a2 1d4a5-1d4a6 1d4a9-1d4ac
1d4ae-1d4b9 1d4bb 1d4bd-1d4c3 1d4c5-1d505 1d507-1d50a 1d50d-1d514
1d516-1d51c 1d51e-1d539 1d53b-1d53e 1d540-1d544 1d546 1d54a-1d550
1d552-1d6a5 1d6a8-1d7c9 1d7ce-1d7ff
"""
)
# Assume fallback will work for these in general, but...
cmap_ops.remove_all(cmap_ops.script_chars("LGC"), "Zmth")
cmap_ops.remove_all(cmap_ops.script_chars("SYM2"), "Zmth")
# Add all printable ASCII. We're not going to rely on fallback for these
# after all.
printable_ascii = tool_utils.parse_int_ranges("0020-007e")
cmap_ops.add_all(printable_ascii, "Zmth")
# Add back blocks that get split up too arbitrarily
cmap_ops.add_all(_block_cps("Mathematical Operators"), "Zmth")
cmap_ops.add_all(_block_cps("Miscellaneous Mathematical Symbols-B"), "Zmth")
# Add back some symbols for math/logic
math_geom = tool_utils.parse_int_ranges(
"25af/b3/b7/bd/c1/ca/fb", allow_compressed=True
)
cmap_ops.add_all(math_geom, "Zmth")
# Add dotted circle, we have combining marks
cmap_ops.add(0x25CC, "Zmth")
# Add misc latin ops
# plus/minus, multiply, divide, logical not
# a7 is used in a variant of APL
latin_misc = tool_utils.parse_int_ranges("b1 d7 f7 a7 ac")
cmap_ops.add_all(latin_misc, "Zmth")
# Fill holes in math alpha blocks, again we don't fallback here after all.
math_holes = tool_utils.parse_int_ranges(
"""
2102/0a-0e/10-12/15/19-1d/24/28/2c-2d/2f-31/33-38/3c-40/45-49
""",
allow_compressed=True,
)
cmap_ops.add_all(math_holes, "Zmth")
# Add hebrew alef, bet, gimel, dalet
cmap_ops.add_all(tool_utils.parse_int_ranges("2135-2138"), "Zmth")
# Add greek regular, we can have combining marks on them too
# These correspond to the math greek alpha ranges
greek_math_regular = tool_utils.parse_int_ranges(
"391-3a1 3f4 3a3-3a9 2207 3b1-3c9 2202 3f5 3d1 3f0 3d5 3f1 3d6"
)
cmap_ops.add_all(greek_math_regular, "Zmth")
# Add primes
cmap_ops.add_all(tool_utils.parse_int_ranges("2032-2037 2057"), "Zmth")
# Duplicate some combining marks from LGC so they can apply to math chars
more_combining_marks = tool_utils.parse_int_ranges("302-303 305 307-308 330")
cmap_ops.add_all(more_combining_marks, "Zmth")
def _assign_dotted_circle(cmap_ops):
"""All scripts with combining marks should provide dotted circle (and provide
an appropriate rendering of the mark in combination with it)."""
cmap_ops.phase("assign dotted circle")
def is_combining(cp):
return unicode_data.category(cp) == "Mn"
# Note wikipedia shows Arabic marks placed w.r.t. tatweel, not the dotted
# circle, but as using dotted circle is the convention used by Unicode in
# their code charts we'll require it for Arabic too.
script_to_chars = cmap_ops.create_script_to_chars()
for script, charset in sorted(script_to_chars.items()):
if script == "EXCL":
continue
nsm = frozenset(cp for cp in charset if is_combining(cp))
if nsm:
count = len(nsm)
range_str = tool_utils.write_int_ranges(sorted(nsm)[:8])
msg = "%s has %d marks: %s" % (
script,
count,
range_str if count < 8 else range_str + "...",
)
cmap_ops.log(msg)
cmap_ops.add(0x25CC, script)
def _remove_unwanted(cmap_ops):
"""Remove characters we know we don't want in any font."""
# Chars we never want.
unwanted_chars = tool_utils.parse_int_ranges(
"""
0000-001f # C0 controls
007F # DEL
0080-009f # C1 controls
FEFF # BOM"""
)
# Chars we don't want, but perhaps a bit more provisionally than the
# above.
excluded_chars = tool_utils.parse_int_ranges(
"""
332c # Jungshik says excluded on purpose
fa70-fad9 # Jungshik says Ken regards DPRK compatibility chars as
# outside of scope, like most of plane 2.
1b000-1b001 # Ken says these are controversial."""
)
cmap_ops.phase("remove unwanted")
cmap_ops.remove_all_from_all(unwanted_chars, cmap_ops.all_scripts())
cmap_ops.add_all(unwanted_chars, "EXCL")
cmap_ops.phase("remove excluded")
cmap_ops.remove_all_from_all(excluded_chars, cmap_ops.all_scripts())
cmap_ops.add_all(excluded_chars, "EXCL")
def _assign_wanted(cmap_ops):
"""After we remove the characters we 'never want', add exceptions back in
to particular fonts."""
wanted_chars = {
"LGC": "20bf feff", # Bitcoin (not in Unicode 9 data yet), BOM
"MONO": "feff", # BOM
"SYM2": "0000-001f 007f 0080-009f", # show as question mark char
"Zsye": "fe4e5-fe4ee fe82c fe82e-fe837", # legacy PUA for android
}
cmap_ops.phase("assign wanted")
for script in sorted(wanted_chars.keys()):
chars = tool_utils.parse_int_ranges(wanted_chars[script])
cmap_ops.add_all(chars, script)
def _assign_basic(cmap_ops):
"""Add NUL, CR, Space, NBS to all scripts."""
basic_chars = frozenset([0x0, 0x0D, 0x20, 0xA0])
cmap_ops.phase("assign basic")
scripts_to_add = set(cmap_ops.all_scripts()) - {"EXCL"}
cmap_ops.add_all_to_all(basic_chars, scripts_to_add)
def build_script_to_chars(log_level):
if log_level == 0:
log_events = False
log_details = False
else:
log_events = True
log_details = log_level > 1
script_to_chars = unicode_data.create_script_to_chars()
# Bitcoin is not in our unicode 9 data yet, allow it to be set anyway.
temp_defined = {0x20BF}
cmap_ops = CmapOps(
script_to_chars,
log_events=log_events,
log_details=log_details,
undefined_exceptions=temp_defined,
)
_remove_unicode_assignments(cmap_ops)
_unassign_inherited_and_common_with_extensions(cmap_ops)
_reassign_inherited(cmap_ops)
_reassign_common(cmap_ops)
_unassign_latin(cmap_ops)
_assign_cldr_punct(cmap_ops)
_reassign_merged_scripts(cmap_ops)
_reassign_common_by_block(cmap_ops)
_reassign_by_block(cmap_ops)
_remove_empty(cmap_ops)
_reassign_symbols(cmap_ops)
_reassign_emoji(cmap_ops)
_assign_nastaliq(cmap_ops)
_assign_complex_script_extra(cmap_ops)
_assign_hyphens_for_autohyphenation(cmap_ops)
_assign_script_required(cmap_ops)
_assign_script_special_chars(cmap_ops)
_assign_legacy_phase2(cmap_ops)
_assign_bidi_mirroring(cmap_ops)
_unassign_lgc_from_symbols(cmap_ops)
_assign_programming_lang_symbols(cmap_ops)
_assign_symbols_from_groups(cmap_ops)
_assign_mono(cmap_ops) # after LGC is defined except for basics
_assign_sym2(cmap_ops) # after LGC removed, add back for enclosing keycaps
_assign_math(cmap_ops)
_assign_dotted_circle(cmap_ops) # for all fonts with combining marks
_remove_unwanted(cmap_ops) # comes before assign_basic, assign_wanted
_assign_wanted(cmap_ops)
_assign_basic(cmap_ops)
cmap_ops.finish() # so we can clean up log
return cmap_ops.create_script_to_chars()
def _merge_fallback_chars(script_to_chars, srcfile):
xtra_cmap_data = cmap_data.read_cmap_data_file(srcfile)
xtra_rowdata = cmap_data.create_map_from_table(xtra_cmap_data.table)
merged_cmap = {}
for script in sorted(script_to_chars):
cmap = script_to_chars[script]
xcmap = None
if script in xtra_rowdata:
rowdata = xtra_rowdata[script]
xcount = int(getattr(rowdata, "xcount", -1))
if xcount != -1:
xcmap = tool_utils.parse_int_ranges(rowdata.xranges)
cmap -= xcmap
else:
xcmap = None # not a tuple, so probably no fallback data
else:
sys.stderr.write("no script %s found in %s\n" % (script, srcfile))
merged_cmap[script] = (cmap, xcmap)
return merged_cmap
def _get_cmap_data(script_to_chars, metadata):
tabledata = cmap_data.create_table_from_map(script_to_chars)
return cmap_data.CmapData(metadata, tabledata)
### debug
def _dump_primaries():
for block in unicode_data.block_names():
block_range = unicode_data.block_range(block)
primary_script = _primary_script_for_block(block)
print(
"%13s %6s %s"
% (
"%04X-%04X" % block_range,
"'%s'" % primary_script if primary_script else "------",
block,
)
)
def main():
DEFAULT_OUTFILE = "noto_cmap_phase3_temp.xml"
parser = argparse.ArgumentParser()
parser.add_argument(
"-o",
"--outfile",
help='name of cmap file to output ("%s" if name ' "omitted)" % DEFAULT_OUTFILE,
metavar="file",
nargs="?",
default=None,
const=DEFAULT_OUTFILE,
)
parser.add_argument(
"-m", "--merge", help="merge excluded/fallback data from file", metavar="file"
)
parser.add_argument(
"-l",
"--loglevel",
help="log detail 0-2",
metavar="level",
nargs="?",
type=int,
const=1,
default=0,
)
parser.add_argument(
"--regen",
help="reformat script required data, no cmap generation",
action="store_true",
)
args = parser.parse_args()
if args.regen:
_regen_script_required()
return
script_to_chars = build_script_to_chars(args.loglevel)
meta_params = []
if args.merge:
script_to_chars = _merge_fallback_chars(script_to_chars, args.merge)
meta_params.append(("mergefile", args.merge))
metadata = cmap_data.create_metadata("noto_cmap_reqs", meta_params)
cmapdata = _get_cmap_data(script_to_chars, metadata)
if args.outfile:
cmap_data.write_cmap_data_file(cmapdata, args.outfile, pretty=True)
print("wrote %s" % args.outfile)
else:
print(cmap_data.write_cmap_data(cmapdata, pretty=True))
if __name__ == "__main__":
main()
|
googlei18n/nototools
|
nototools/noto_cmap_reqs.py
|
Python
|
apache-2.0
| 103,028
|
[
"FEFF",
"FLEUR"
] |
e36613ecde965b86e978a2602e37218d31e7e284af4bcb49b6d9b0bb060b8b4c
|
#!/usr/bin/env python
"""
File I/O class
A wrapper around various NetCDF libraries, used by
BOUT++ routines. Creates a consistent interface
across machines
NOTE: NetCDF includes unlimited dimensions,
but this library is just for very simple
I/O operations. Educated guesses are made
for the dimensions.
Supported libraries:
-------------------
netCDF4
Scientific.IO.NetCDF
scipy.io.netcdf
old version (create_dimension, create_variable)
new version (createDimension, createVariable)
"""
from __future__ import print_function
try:
from builtins import map
from builtins import zip
from builtins import str
from builtins import object
except:
pass
import numpy as np
import time, getpass
# Record which library to use
library = None
try:
from netCDF4 import Dataset
library = "netCDF4"
has_netCDF = True
except ImportError:
try:
from Scientific.IO.NetCDF import NetCDFFile as Dataset
from Scientific.N import Int, Float, Float32
library = "Scientific"
has_netCDF = True
except ImportError:
try:
from scipy.io.netcdf import netcdf_file as Dataset
library = "scipy"
has_netCDF = True
except:
raise ImportError("DataFile: No supported NetCDF modules available")
try:
import h5py
has_h5py = True
except ImportError:
has_h5py = False
class DataFile:
impl = None
def __init__(self, filename=None, write=False, create=False, format='NETCDF3_CLASSIC'):
if filename != None:
if filename.split('.')[-1] in ('hdf5','hdf','h5'):
self.impl = DataFile_HDF5(filename=filename, write=write, create=create, format=format)
else:
self.impl = DataFile_netCDF(filename=filename, write=write, create=create, format=format)
elif format == 'HDF5':
self.impl = DataFile_HDF5(filename=filename, write=write, create=create, format=format)
else:
self.impl = DataFile_netCDF(filename=filename, write=write, create=create, format=format)
def open(self, filename, write=False, create=False,
format='NETCDF3_CLASSIC'):
self.impl.open(filename, write=write, create=create,
format=format)
def close(self):
self.impl.close()
def __del__(self):
self.impl.__del__()
def __enter__(self):
return self.impl.__enter__()
def __exit__(self, type, value, traceback):
self.impl.__exit__(type, value, traceback)
def read(self, name, ranges=None):
"""Read a variable from the file."""
return self.impl.read(name, ranges=ranges)
def list(self):
"""List all variables in the file."""
return self.impl.list()
def dimensions(self, varname):
"""Array of dimension names"""
return self.impl.dimensions(varname)
def ndims(self, varname):
"""Number of dimensions for a variable."""
return self.impl.ndims(varname)
def size(self, varname):
"""List of dimension sizes for a variable."""
return self.impl.size(varname)
def write(self, name, data):
"""Writes a variable to file, making guesses for the dimensions"""
return self.impl.write(name, data)
def __getitem__(self, name):
return self.impl.__getitem__(name)
def __setitem__(self, key, value):
self.impl.__setitem__(key, value)
class DataFile_netCDF(DataFile):
handle = None
# Print warning if netcdf is used without the netcdf library
if library != "netCDF4":
print("WARNING: netcdf4-python module not found")
print(" expect poor performance")
if library == "Scientific":
print(" => Using Scientific.IO.NetCDF instead")
elif library == "scipy":
print(" => Using scipy.io.netcdf instead")
def open(self, filename, write=False, create=False,
format='NETCDF3_CLASSIC'):
if (not write) and (not create):
if library == "scipy":
self.handle = Dataset(filename, "r", mmap=False)
else:
self.handle = Dataset(filename, "r")
elif create:
if library == "Scientific":
self.handle = Dataset(filename, "w",
'Created ' + time.ctime(time.time())
+ ' by ' + getpass.getuser())
elif library == "scipy":
self.handle = Dataset(filename, "w")
else:
self.handle = Dataset(filename, "w", format=format)
else:
if library == "scipy":
raise Exception("scipy.io.netcdf doesn't support appending");
else:
self.handle = Dataset(filename, "a")
# Record if writing
self.writeable = write or create
def close(self):
if self.handle != None:
self.handle.close()
self.handle = None
def __init__(self, filename=None, write=False, create=False,
format='NETCDF3_CLASSIC'):
if not has_netCDF:
message = "DataFile: No supported NetCDF python-modules available"
raise ImportError(message)
if filename != None:
self.open(filename, write=write, create=create, format=format)
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def read(self, name, ranges=None):
"""Read a variable from the file."""
if self.handle == None: return None
try:
var = self.handle.variables[name]
except KeyError:
# Not found. Try to find using case-insensitive search
var = None
for n in list(self.handle.variables.keys()):
if n.lower() == name.lower():
print("WARNING: Reading '"+n+"' instead of '"+name+"'")
var = self.handle.variables[n]
if var == None:
return None
ndims = len(var.dimensions)
if ndims == 0:
data = var.getValue()
return data #[0]
else:
if ranges != None:
if len(ranges) != 2*ndims:
print("Incorrect number of elements in ranges argument")
return None
if library == "Scientific":
# Passing ranges to var[] doesn't seem to work
data = var[:]
if ndims == 1:
data = data[ranges[0]:ranges[1]]
elif ndims == 2:
data = data[ranges[0]:ranges[1],
ranges[2]:ranges[3]]
elif ndims == 3:
data = data[ranges[0]:ranges[1],
ranges[2]:ranges[3],
ranges[4]:ranges[5]]
elif ndims == 4:
data = data[(ranges[0]):(ranges[1]),
(ranges[2]):(ranges[3]),
(ranges[4]):(ranges[5]),
(ranges[6]):(ranges[7])]
else:
if ndims == 1:
data = var[ranges[0]:ranges[1]]
elif ndims == 2:
data = var[ranges[0]:ranges[1],
ranges[2]:ranges[3]]
elif ndims == 3:
data = var[ranges[0]:ranges[1],
ranges[2]:ranges[3],
ranges[4]:ranges[5]]
elif ndims == 4:
data = var[(ranges[0]):(ranges[1]),
(ranges[2]):(ranges[3]),
(ranges[4]):(ranges[5]),
(ranges[6]):(ranges[7])]
return data
else:
return var[:]
def __getitem__(self, name):
var = self.read(name)
if var is None:
raise KeyError("No variable found: "+name)
return var
def list(self):
"""List all variables in the file."""
if self.handle == None: return []
return list(self.handle.variables.keys())
def keys(self):
"""List all variables in the file."""
return self.list()
def dimensions(self, varname):
"""Array of dimension names"""
if self.handle == None: return None
try:
var = self.handle.variables[varname]
except KeyError:
raise ValueError("No such variable")
return var.dimensions
def ndims(self, varname):
"""Number of dimensions for a variable."""
if self.handle is None:
raise ValueError("File not open")
try:
var = self.handle.variables[varname]
except KeyError:
raise ValueError("No such variable")
return len(var.dimensions)
def size(self, varname):
"""List of dimension sizes for a variable."""
if self.handle == None: return []
try:
var = self.handle.variables[varname]
except KeyError:
return []
def dimlen(d):
dim = self.handle.dimensions[d]
if dim != None:
t = type(dim).__name__
if t == 'int':
return dim
return len(dim)
return 0
return [dimlen(d) for d in var.dimensions]
def write(self, name, data, info = False):
"""Writes a variable to file, making guesses for the dimensions"""
if not self.writeable:
raise Exception("File not writeable. Open with write=True keyword")
s = np.shape(data)
# Get the variable type
t = type(data).__name__
if t == 'NoneType':
print("DataFile: None passed as data to write. Ignoring")
return
if t == 'ndarray':
# Numpy type. Get the data type
t = data.dtype.str
if t == 'list':
# List -> convert to numpy array
data = np.array(data)
t = data.dtype.str
if (t == 'int') or (t == '<i8') or (t == 'int64') :
# NetCDF 3 does not support type int64
data = np.int32(data)
t = data.dtype.str
try:
# See if the variable already exists
var = self.handle.variables[name]
# Check the shape of the variable
if var.shape != s:
print("DataFile: Variable already exists with different size: "+ name)
# Fallthrough to the exception
raise
except:
# Not found, so add.
# Get dimensions
defdims = [(),
('x',),
('x','y'),
('x','y','z'),
('t','x','y','z')]
def find_dim(dim):
# Find a dimension with given name and size
size, name = dim
# See if it exists already
try:
d = self.handle.dimensions[name]
# Check if it's the correct size
if type(d).__name__ == 'int':
if d == size:
return name;
else:
if len(d) == size:
return name
# Find another with the correct size
for dn, d in list(self.handle.dimensions.items()):
# Some implementations need len(d) here, some just d
if type(d).__name__ == 'int':
if d == size:
return dn
else:
if len(d) == size:
return dn
# None found, so create a new one
i = 2
while True:
dn = name + str(i)
try:
d = self.handle.dimensions[dn]
# Already exists, so keep going
except KeyError:
# Not found. Create
if info:
print("Defining dimension "+ dn + " of size %d" % size)
try:
self.handle.createDimension(dn, size)
except AttributeError:
# Try the old-style function
self.handle.create_dimension(dn, size)
return dn
i = i + 1
except KeyError:
# Doesn't exist, so add
if info:
print("Defining dimension "+ name + " of size %d" % size)
try:
self.handle.createDimension(name, size)
except AttributeError:
self.handle.create_dimension(name, size)
return name
# List of (size, 'name') tuples
dlist = list(zip(s, defdims[len(s)]))
# Get new list of variables, and turn into a tuple
dims = tuple( map(find_dim, dlist) )
# Create the variable
if library == "Scientific":
if t == 'int' or t == '<i4' or t == 'int32':
tc = Int
elif t=='<f4':
tc = Float32
else:
tc = Float
var = self.handle.createVariable(name, tc, dims)
elif library == "scipy":
try:
# New style functions
var = self.handle.createVariable(name, t, dims)
except AttributeError:
# Old style functions
var = self.handle.create_variable(name, t, dims)
else:
var = self.handle.createVariable(name, t, dims)
if var == None:
raise Exception("Couldn't create variable")
# Write the data
try:
# Some libraries allow this for arrays
var.assignValue(data)
except:
# And some others only this
var[:] = data
class DataFile_HDF5(DataFile):
handle = None
def open(self, filename, write=False, create=False, format=None):
if (not write) and (not create):
self.handle = h5py.File(filename,mode="r")
elif create:
self.handle = h5py.File(filename,mode="w")
else:
self.handl = h5py.File(filename,mode="a")
# Record if writing
self.writeable = write or create
def close(self):
if self.handle != None:
self.handle.close()
self.handle = None
def __init__(self, filename=None, write=False, create=False,
format=None):
if not has_h5py:
message = "DataFile: No supported HDF5 python-modules available"
raise ImportError(message)
if filename != None:
self.open(filename, write=write, create=create, format=format)
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def read(self, name, ranges=None):
"""Read a variable from the file."""
if self.handle == None: return None
try:
var = self.handle[name]
except KeyError:
# Not found. Try to find using case-insensitive search
var = None
for n in self.handle.variables.keys():
if n.lower() == name.lower():
print("WARNING: Reading '"+n+"' instead of '"+name+"'")
var = self.handle[name]
if var == None:
return None
ndims = len(var.shape)
if ndims == 1 and var.shape[0] == 1:
data = var
return data[0]
else:
if ranges != None:
if len(ranges) != 2*ndims:
print("Incorrect number of elements in ranges argument")
return None
if ndims == 1:
data = var[ranges[0]:ranges[1]]
elif ndims == 2:
data = var[ranges[0]:ranges[1],
ranges[2]:ranges[3]]
elif ndims == 3:
data = var[ranges[0]:ranges[1],
ranges[2]:ranges[3],
ranges[4]:ranges[5]]
elif ndims == 4:
data = var[(ranges[0]):(ranges[1]),
(ranges[2]):(ranges[3]),
(ranges[4]):(ranges[5]),
(ranges[6]):(ranges[7])]
return data
else:
return var[...]
def __getitem__(self, name):
var = self.read(name)
if var is None:
raise KeyError("No variable found: "+name)
return var
def list(self):
"""List all variables in the file."""
if self.handle == None: return []
names = []
self.handle.visit(names.append)
return names
def keys(self):
"""List all variables in the file."""
return self.list()
def dimensions(self, varname):
"""Array of dimension names"""
var = self.handle[varname]
vartype = var.attrs['type']
if vartype == 'Field3D_t':
return ('t','x','y','z')
elif vartype == 'Field2D_t':
return ('t','x','y')
elif vartype == 'scalar_t':
return ('t')
elif vartype == 'Field3D':
return ('x','y','z')
elif vartype == 'Field2D':
return ('x','y')
elif vartype == 'scalar':
return ()
else:
raise ValueError("Variable type not recognized")
def ndims(self, varname):
"""Number of dimensions for a variable."""
if self.handle == None: return None
try:
var = self.handle[varname]
except KeyError:
raise ValueError("Variable not found")
return len(var.shape)
def size(self, varname):
"""List of dimension sizes for a variable."""
if self.handle == None: return None
try:
var = self.handle[varname]
except KeyError:
return None
return var.shape
def write(self, name, data):
"""Writes a variable to file"""
if not self.writeable:
raise Exception("File not writeable. Open with write=True keyword")
self.handle.create_dataset(name, data=data)
|
erikgrinaker/BOUT-dev
|
tools/pylib/boututils/datafile.py
|
Python
|
gpl-3.0
| 19,217
|
[
"NetCDF",
"VisIt"
] |
35de7213d4a5ce5a03be38bbe9073d35d3ce4f0955ec737bbdccc3be1fb04256
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module is intended to be used to compute Pourbaix diagrams
of arbitrary compositions and formation energies. If you use
this module in your work, please consider citing the following:
General formalism for solid-aqueous equilibria from DFT:
Persson et al., DOI: 10.1103/PhysRevB.85.235438
Decomposition maps, or Pourbaix hull diagrams
Singh et al., DOI: 10.1021/acs.chemmater.7b03980
Fast computation of many-element Pourbaix diagrams:
Patel et al., https://arxiv.org/abs/1909.00035 (submitted)
"""
import itertools
import logging
import re
import warnings
from copy import deepcopy
from functools import cmp_to_key, lru_cache, partial
from multiprocessing import Pool
import numpy as np
from monty.json import MontyDecoder, MSONable
from scipy.spatial import ConvexHull, HalfspaceIntersection
try:
from scipy.special import comb
except ImportError:
from scipy.misc import comb
from tqdm import tqdm
from pymatgen.analysis.phase_diagram import PDEntry, PhaseDiagram
from pymatgen.analysis.reaction_calculator import Reaction, ReactionError
from pymatgen.core.composition import Composition
from pymatgen.core.ion import Ion
from pymatgen.core.periodic_table import Element
from pymatgen.entries.compatibility import MU_H2O
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.util.coord import Simplex
from pymatgen.util.plotting import pretty_plot
from pymatgen.util.string import latexify
__author__ = "Sai Jayaraman"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.4"
__maintainer__ = "Joseph Montoya"
__credits__ = "Arunima Singh, Joseph Montoya, Anjli Patel"
__email__ = "joseph.montoya@tri.global"
__status__ = "Production"
__date__ = "Nov 1, 2012"
logger = logging.getLogger(__name__)
PREFAC = 0.0591
# TODO: Revise to more closely reflect PDEntry, invoke from energy/composition
# TODO: PourbaixEntries depend implicitly on having entry energies be
# formation energies, should be a better way to get from raw energies
# TODO: uncorrected_energy is a bit of a misnomer, but not sure what to rename
class PourbaixEntry(MSONable):
"""
An object encompassing all data relevant to a solid or ion
in a pourbaix diagram. Each bulk solid/ion has an energy
g of the form: e = e0 + 0.0591 log10(conc) - nO mu_H2O
+ (nH - 2nO) pH + phi (-nH + 2nO + q)
Note that the energies corresponding to the input entries
should be formation energies with respect to hydrogen and
oxygen gas in order for the pourbaix diagram formalism to
work. This may be changed to be more flexible in the future.
"""
def __init__(self, entry, entry_id=None, concentration=1e-6):
"""
Args:
entry (ComputedEntry/ComputedStructureEntry/PDEntry/IonEntry): An
entry object
entry_id ():
concentration ():
"""
self.entry = entry
if isinstance(entry, IonEntry):
self.concentration = concentration
self.phase_type = "Ion"
self.charge = entry.ion.charge
else:
self.concentration = 1.0
self.phase_type = "Solid"
self.charge = 0.0
self.uncorrected_energy = entry.energy
if entry_id is not None:
self.entry_id = entry_id
elif hasattr(entry, "entry_id") and entry.entry_id:
self.entry_id = entry.entry_id
else:
self.entry_id = None
@property
def npH(self):
"""
Returns:
"""
return self.entry.composition.get("H", 0.0) - 2 * self.entry.composition.get("O", 0.0)
@property
def nH2O(self):
"""
Returns: Number of H2O.
"""
return self.entry.composition.get("O", 0.0)
@property
def nPhi(self):
"""
Returns: Number of H2O.
"""
return self.npH - self.charge
@property
def name(self):
"""
Returns: Name for entry
"""
if self.phase_type == "Solid":
return self.entry.composition.reduced_formula + "(s)"
return self.entry.name
@property
def energy(self):
"""
returns energy
Returns (float): total energy of the pourbaix
entry (at pH, V = 0 vs. SHE)
"""
# Note: this implicitly depends on formation energies as input
return self.uncorrected_energy + self.conc_term - (MU_H2O * self.nH2O)
@property
def energy_per_atom(self):
"""
energy per atom of the pourbaix entry
Returns (float): energy per atom
"""
return self.energy / self.composition.num_atoms
def energy_at_conditions(self, pH, V):
"""
Get free energy for a given pH and V
Args:
pH (float): pH at which to evaluate free energy
V (float): voltage at which to evaluate free energy
Returns:
free energy at conditions
"""
return self.energy + self.npH * PREFAC * pH + self.nPhi * V
def get_element_fraction(self, element):
"""
Gets the elemental fraction of a given non-OH element
Args:
element (Element or str): string or element corresponding
to element to get from composition
Returns:
fraction of element / sum(all non-OH elements)
"""
return self.composition.get(element) * self.normalization_factor
@property
def normalized_energy(self):
"""
Returns:
energy normalized by number of non H or O atoms, e. g.
for Zn2O6, energy / 2 or for AgTe3(OH)3, energy / 4
"""
return self.energy * self.normalization_factor
def normalized_energy_at_conditions(self, pH, V):
"""
Energy at an electrochemical condition, compatible with
numpy arrays for pH/V input
Args:
pH (float): pH at condition
V (float): applied potential at condition
Returns:
energy normalized by number of non-O/H atoms at condition
"""
return self.energy_at_conditions(pH, V) * self.normalization_factor
@property
def conc_term(self):
"""
Returns the concentration contribution to the free energy,
and should only be present when there are ions in the entry
"""
return PREFAC * np.log10(self.concentration)
# TODO: not sure if these are strictly necessary with refactor
def as_dict(self):
"""
Returns dict which contains Pourbaix Entry data.
Note that the pH, voltage, H2O factors are always calculated when
constructing a PourbaixEntry object.
"""
d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__}
if isinstance(self.entry, IonEntry):
d["entry_type"] = "Ion"
else:
d["entry_type"] = "Solid"
d["entry"] = self.entry.as_dict()
d["concentration"] = self.concentration
d["entry_id"] = self.entry_id
return d
@classmethod
def from_dict(cls, d):
"""
Invokes
"""
entry_type = d["entry_type"]
if entry_type == "Ion":
entry = IonEntry.from_dict(d["entry"])
else:
entry = PDEntry.from_dict(d["entry"])
entry_id = d["entry_id"]
concentration = d["concentration"]
return PourbaixEntry(entry, entry_id, concentration)
@property
def normalization_factor(self):
"""
Sum of number of atoms minus the number of H and O in composition
"""
return 1.0 / (self.num_atoms - self.composition.get("H", 0) - self.composition.get("O", 0))
@property
def composition(self):
"""
Returns composition
"""
return self.entry.composition
@property
def num_atoms(self):
"""
Return number of atoms in current formula. Useful for normalization
"""
return self.composition.num_atoms
def __repr__(self):
return "Pourbaix Entry : {} with energy = {:.4f}, npH = {}, nPhi = {}, nH2O = {}, entry_id = {} ".format(
self.entry.composition,
self.energy,
self.npH,
self.nPhi,
self.nH2O,
self.entry_id,
)
def __str__(self):
return self.__repr__()
class MultiEntry(PourbaixEntry):
"""
PourbaixEntry-like object for constructing multi-elemental Pourbaix
diagrams.
"""
def __init__(self, entry_list, weights=None):
"""
Initializes a MultiEntry.
Args:
entry_list ([PourbaixEntry]): List of component PourbaixEntries
weights ([float]): Weights associated with each entry. Default is None
"""
if weights is None:
self.weights = [1.0] * len(entry_list)
else:
self.weights = weights
self.entry_list = entry_list
@lru_cache()
def __getattr__(self, item):
"""
Because most of the attributes here are just weighted
averages of the entry_list, we save some space by
having a set of conditionals to define the attributes
"""
# Attributes that are weighted averages of entry attributes
if item in [
"energy",
"npH",
"nH2O",
"nPhi",
"conc_term",
"composition",
"uncorrected_energy",
]:
# TODO: Composition could be changed for compat with sum
if item == "composition":
start = Composition({})
else:
start = 0
return sum(
[getattr(e, item) * w for e, w in zip(self.entry_list, self.weights)],
start,
)
# Attributes that are just lists of entry attributes
if item in ["entry_id", "phase_type"]:
return [getattr(e, item) for e in self.entry_list]
# normalization_factor, num_atoms should work from superclass
return self.__getattribute__(item)
@property
def name(self):
"""
MultiEntry name, i. e. the name of each entry joined by ' + '
"""
return " + ".join([e.name for e in self.entry_list])
def __repr__(self):
return (
"Multiple Pourbaix Entry: energy = {:.4f}, npH = {}, nPhi = {}, "
"nH2O = {}, entry_id = {}, species: {}".format(
self.energy, self.npH, self.nPhi, self.nH2O, self.entry_id, self.name
)
)
def __str__(self):
return self.__repr__()
def as_dict(self):
"""
Returns: MSONable dict
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry_list": [e.as_dict() for e in self.entry_list],
"weights": self.weights,
}
@classmethod
def from_dict(cls, d):
"""
Args:
d (): Dict representation
Returns:
MultiEntry
"""
entry_list = [PourbaixEntry.from_dict(e) for e in d.get("entry_list")]
return cls(entry_list, d.get("weights"))
# TODO: this class isn't particularly useful in its current form, could be
# refactored to include information about the reference solid
class IonEntry(PDEntry):
"""
Object similar to PDEntry, but contains an Ion object instead of a
Composition object.
.. attribute:: name
A name for the entry. This is the string shown in the phase diagrams.
By default, this is the reduced formula for the composition, but can be
set to some other string for display purposes.
"""
def __init__(self, ion, energy, name=None, attribute=None):
"""
Args:
ion: Ion object
energy: Energy for composition.
name: Optional parameter to name the entry. Defaults to the
chemical formula.
"""
self.ion = ion
# Auto-assign name
name = name if name else self.ion.reduced_formula
super().__init__(composition=ion.composition, energy=energy, name=name, attribute=attribute)
@classmethod
def from_dict(cls, d):
"""
Returns an IonEntry object from a dict.
"""
return IonEntry(Ion.from_dict(d["ion"]), d["energy"], d.get("name"), d.get("attribute"))
def as_dict(self):
"""
Creates a dict of composition, energy, and ion name
"""
d = {"ion": self.ion.as_dict(), "energy": self.energy, "name": self.name}
return d
def __repr__(self):
return "IonEntry : {} with energy = {:.4f}".format(self.composition, self.energy)
def __str__(self):
return self.__repr__()
def ion_or_solid_comp_object(formula):
"""
Returns either an ion object or composition object given
a formula.
Args:
formula: String formula. Eg. of ion: NaOH(aq), Na[+];
Eg. of solid: Fe2O3(s), Fe(s), Na2O
Returns:
Composition/Ion object
"""
m = re.search(r"\[([^\[\]]+)\]|\(aq\)", formula)
if m:
comp_obj = Ion.from_formula(formula)
elif re.search(r"\(s\)", formula):
comp_obj = Composition(formula[:-3])
else:
comp_obj = Composition(formula)
return comp_obj
ELEMENTS_HO = {Element("H"), Element("O")}
# TODO: the solids filter breaks some of the functionality of the
# heatmap plotter, because the reference states for decomposition
# don't include oxygen/hydrogen in the OER/HER regions
# TODO: create a from_phase_diagram class method for non-formation energy
# invocation
# TODO: invocation from a MultiEntry entry list could be a bit more robust
# TODO: serialization is still a bit rough around the edges
class PourbaixDiagram(MSONable):
"""
Class to create a Pourbaix diagram from entries
"""
def __init__(self, entries, comp_dict=None, conc_dict=None, filter_solids=False, nproc=None):
"""
Args:
entries ([PourbaixEntry] or [MultiEntry]): Entries list
containing Solids and Ions or a list of MultiEntries
comp_dict ({str: float}): Dictionary of compositions,
defaults to equal parts of each elements
conc_dict ({str: float}): Dictionary of ion concentrations,
defaults to 1e-6 for each element
filter_solids (bool): applying this filter to a pourbaix
diagram ensures all included phases are filtered by
stability on the compositional phase diagram. This
breaks some of the functionality of the analysis,
though, so use with caution.
nproc (int): number of processes to generate multientries with
in parallel. Defaults to None (serial processing)
"""
entries = deepcopy(entries)
# Get non-OH elements
self.pbx_elts = set(itertools.chain.from_iterable([entry.composition.elements for entry in entries]))
self.pbx_elts = list(self.pbx_elts - ELEMENTS_HO)
self.dim = len(self.pbx_elts) - 1
# Process multientry inputs
if isinstance(entries[0], MultiEntry):
self._processed_entries = entries
# Extract individual entries
single_entries = list(set(itertools.chain.from_iterable([e.entry_list for e in entries])))
self._unprocessed_entries = single_entries
self._filtered_entries = single_entries
self._conc_dict = None
self._elt_comp = {k: v for k, v in entries[0].composition.items() if k not in ELEMENTS_HO}
self._multielement = True
# Process single entry inputs
else:
# Set default conc/comp dicts
if not comp_dict:
comp_dict = {elt.symbol: 1.0 / len(self.pbx_elts) for elt in self.pbx_elts}
if not conc_dict:
conc_dict = {elt.symbol: 1e-6 for elt in self.pbx_elts}
self._conc_dict = conc_dict
self._elt_comp = comp_dict
self.pourbaix_elements = self.pbx_elts
solid_entries = [entry for entry in entries if entry.phase_type == "Solid"]
ion_entries = [entry for entry in entries if entry.phase_type == "Ion"]
# If a conc_dict is specified, override individual entry concentrations
for entry in ion_entries:
ion_elts = list(set(entry.composition.elements) - ELEMENTS_HO)
# TODO: the logic here for ion concentration setting is in two
# places, in PourbaixEntry and here, should be consolidated
if len(ion_elts) == 1:
entry.concentration = conc_dict[ion_elts[0].symbol] * entry.normalization_factor
elif len(ion_elts) > 1 and not entry.concentration:
raise ValueError("Elemental concentration not compatible " "with multi-element ions")
self._unprocessed_entries = solid_entries + ion_entries
if not len(solid_entries + ion_entries) == len(entries):
raise ValueError("All supplied entries must have a phase type of " 'either "Solid" or "Ion"')
if filter_solids:
# O is 2.46 b/c pbx entry finds energies referenced to H2O
entries_HO = [ComputedEntry("H", 0), ComputedEntry("O", 2.46)]
solid_pd = PhaseDiagram(solid_entries + entries_HO)
solid_entries = list(set(solid_pd.stable_entries) - set(entries_HO))
self._filtered_entries = solid_entries + ion_entries
if len(comp_dict) > 1:
self._multielement = True
self._processed_entries = self._preprocess_pourbaix_entries(self._filtered_entries, nproc=nproc)
else:
self._processed_entries = self._filtered_entries
self._multielement = False
self._stable_domains, self._stable_domain_vertices = self.get_pourbaix_domains(self._processed_entries)
def _convert_entries_to_points(self, pourbaix_entries):
"""
Args:
pourbaix_entries ([PourbaixEntry]): list of pourbaix entries
to process into vectors in nph-nphi-composition space
Returns:
list of vectors, [[nph, nphi, e0, x1, x2, ..., xn-1]]
corresponding to each entry in nph-nphi-composition space
"""
vecs = [
[entry.npH, entry.nPhi, entry.energy] + [entry.composition.get(elt) for elt in self.pbx_elts[:-1]]
for entry in pourbaix_entries
]
vecs = np.array(vecs)
norms = np.transpose([[entry.normalization_factor for entry in pourbaix_entries]])
vecs *= norms
return vecs
def _get_hull_in_nph_nphi_space(self, entries):
"""
Generates convex hull of pourbaix diagram entries in composition,
npH, and nphi space. This enables filtering of multi-entries
such that only compositionally stable combinations of entries
are included.
Args:
entries ([PourbaixEntry]): list of PourbaixEntries to construct
the convex hull
Returns: list of entries and stable facets corresponding to that
list of entries
"""
ion_entries = [entry for entry in entries if entry.phase_type == "Ion"]
solid_entries = [entry for entry in entries if entry.phase_type == "Solid"]
# Pre-filter solids based on min at each composition
logger.debug("Pre-filtering solids by min energy at each composition")
sorted_entries = sorted(
solid_entries,
key=lambda x: (x.composition.reduced_composition, x.entry.energy_per_atom),
)
grouped_by_composition = itertools.groupby(sorted_entries, key=lambda x: x.composition.reduced_composition)
min_entries = [list(grouped_entries)[0] for comp, grouped_entries in grouped_by_composition]
min_entries += ion_entries
logger.debug("Constructing nph-nphi-composition points for qhull")
vecs = self._convert_entries_to_points(min_entries)
maxes = np.max(vecs[:, :3], axis=0)
extra_point = np.concatenate([maxes, np.ones(self.dim) / self.dim], axis=0)
# Add padding for extra point
pad = 1000
extra_point[2] += pad
points = np.concatenate([vecs, np.array([extra_point])], axis=0)
logger.debug("Constructing convex hull in nph-nphi-composition space")
hull = ConvexHull(points, qhull_options="QJ i")
# Create facets and remove top
facets = [facet for facet in hull.simplices if not len(points) - 1 in facet]
if self.dim > 1:
logger.debug("Filtering facets by pourbaix composition")
valid_facets = []
for facet in facets:
comps = vecs[facet][:, 3:]
full_comps = np.concatenate([comps, 1 - np.sum(comps, axis=1).reshape(len(comps), 1)], axis=1)
# Ensure an compositional interior point exists in the simplex
if np.linalg.matrix_rank(full_comps) > self.dim:
valid_facets.append(facet)
else:
valid_facets = facets
return min_entries, valid_facets
def _preprocess_pourbaix_entries(self, entries, nproc=None):
"""
Generates multi-entries for pourbaix diagram
Args:
entries ([PourbaixEntry]): list of PourbaixEntries to preprocess
into MultiEntries
nproc (int): number of processes to be used in parallel
treatment of entry combos
Returns:
([MultiEntry]) list of stable MultiEntry candidates
"""
# Get composition
tot_comp = Composition(self._elt_comp)
min_entries, valid_facets = self._get_hull_in_nph_nphi_space(entries)
combos = []
for facet in valid_facets:
for i in range(1, self.dim + 2):
these_combos = list()
for combo in itertools.combinations(facet, i):
these_entries = [min_entries[i] for i in combo]
these_combos.append(frozenset(these_entries))
combos.append(these_combos)
all_combos = set(itertools.chain.from_iterable(combos))
list_combos = []
for i in all_combos:
list_combos.append(list(i))
all_combos = list_combos
multi_entries = []
# Parallel processing of multi-entry generation
if nproc is not None:
f = partial(self.process_multientry, prod_comp=tot_comp)
with Pool(nproc) as p:
multi_entries = list(tqdm(p.imap(f, all_combos), total=len(all_combos)))
multi_entries = list(filter(bool, multi_entries))
else:
# Serial processing of multi-entry generation
for combo in tqdm(all_combos):
multi_entry = self.process_multientry(combo, prod_comp=tot_comp)
if multi_entry:
multi_entries.append(multi_entry)
return multi_entries
def _generate_multielement_entries(self, entries, nproc=None):
"""
Create entries for multi-element Pourbaix construction.
This works by finding all possible linear combinations
of entries that can result in the specified composition
from the initialized comp_dict.
Args:
entries ([PourbaixEntries]): list of pourbaix entries
to process into MultiEntries
nproc (int): number of processes to be used in parallel
treatment of entry combos
"""
N = len(self._elt_comp) # No. of elements
total_comp = Composition(self._elt_comp)
# generate all combinations of compounds that have all elements
entry_combos = [itertools.combinations(entries, j + 1) for j in range(N)]
entry_combos = itertools.chain.from_iterable(entry_combos)
entry_combos = filter(lambda x: total_comp < MultiEntry(x).composition, entry_combos)
# Generate and filter entries
processed_entries = []
total = sum([comb(len(entries), j + 1) for j in range(N)])
if total > 1e6:
warnings.warn(
"Your pourbaix diagram includes {} entries and may " "take a long time to generate.".format(total)
)
# Parallel processing of multi-entry generation
if nproc is not None:
f = partial(self.process_multientry, prod_comp=total_comp)
with Pool(nproc) as p:
processed_entries = list(tqdm(p.imap(f, entry_combos), total=total))
processed_entries = list(filter(bool, processed_entries))
# Serial processing of multi-entry generation
else:
for entry_combo in entry_combos:
processed_entry = self.process_multientry(entry_combo, total_comp)
if processed_entry is not None:
processed_entries.append(processed_entry)
return processed_entries
@staticmethod
def process_multientry(entry_list, prod_comp, coeff_threshold=1e-4):
"""
Static method for finding a multientry based on
a list of entries and a product composition.
Essentially checks to see if a valid aqueous
reaction exists between the entries and the
product composition and returns a MultiEntry
with weights according to the coefficients if so.
Args:
entry_list ([Entry]): list of entries from which to
create a MultiEntry
prod_comp (Composition): composition constraint for setting
weights of MultiEntry
coeff_threshold (float): threshold of stoichiometric
coefficients to filter, if weights are lower than
this value, the entry is not returned
"""
dummy_oh = [Composition("H"), Composition("O")]
try:
# Get balanced reaction coeffs, ensuring all < 0 or conc thresh
# Note that we get reduced compositions for solids and non-reduced
# compositions for ions because ions aren't normalized due to
# their charge state.
entry_comps = [e.composition for e in entry_list]
rxn = Reaction(entry_comps + dummy_oh, [prod_comp])
react_coeffs = [-rxn.get_coeff(comp) for comp in entry_comps]
all_coeffs = react_coeffs + [rxn.get_coeff(prod_comp)]
# Check if reaction coeff threshold met for pourbaix compounds
# All reactant/product coefficients must be positive nonzero
if all([coeff > coeff_threshold for coeff in all_coeffs]):
return MultiEntry(entry_list, weights=react_coeffs)
return None
except ReactionError:
return None
@staticmethod
def get_pourbaix_domains(pourbaix_entries, limits=None):
"""
Returns a set of pourbaix stable domains (i. e. polygons) in
pH-V space from a list of pourbaix_entries
This function works by using scipy's HalfspaceIntersection
function to construct all of the 2-D polygons that form the
boundaries of the planes corresponding to individual entry
gibbs free energies as a function of pH and V. Hyperplanes
of the form a*pH + b*V + 1 - g(0, 0) are constructed and
supplied to HalfspaceIntersection, which then finds the
boundaries of each pourbaix region using the intersection
points.
Args:
pourbaix_entries ([PourbaixEntry]): Pourbaix entries
with which to construct stable pourbaix domains
limits ([[float]]): limits in which to do the pourbaix
analysis
Returns:
Returns a dict of the form {entry: [boundary_points]}.
The list of boundary points are the sides of the N-1
dim polytope bounding the allowable ph-V range of each entry.
"""
if limits is None:
limits = [[-2, 16], [-4, 4]]
# Get hyperplanes
hyperplanes = [
np.array([-PREFAC * entry.npH, -entry.nPhi, 0, -entry.energy]) * entry.normalization_factor
for entry in pourbaix_entries
]
hyperplanes = np.array(hyperplanes)
hyperplanes[:, 2] = 1
max_contribs = np.max(np.abs(hyperplanes), axis=0)
g_max = np.dot(-max_contribs, [limits[0][1], limits[1][1], 0, 1])
# Add border hyperplanes and generate HalfspaceIntersection
border_hyperplanes = [
[-1, 0, 0, limits[0][0]],
[1, 0, 0, -limits[0][1]],
[0, -1, 0, limits[1][0]],
[0, 1, 0, -limits[1][1]],
[0, 0, -1, 2 * g_max],
]
hs_hyperplanes = np.vstack([hyperplanes, border_hyperplanes])
interior_point = np.average(limits, axis=1).tolist() + [g_max]
hs_int = HalfspaceIntersection(hs_hyperplanes, np.array(interior_point))
# organize the boundary points by entry
pourbaix_domains = {entry: [] for entry in pourbaix_entries}
for intersection, facet in zip(hs_int.intersections, hs_int.dual_facets):
for v in facet:
if v < len(pourbaix_entries):
this_entry = pourbaix_entries[v]
pourbaix_domains[this_entry].append(intersection)
# Remove entries with no pourbaix region
pourbaix_domains = {k: v for k, v in pourbaix_domains.items() if v}
pourbaix_domain_vertices = {}
for entry, points in pourbaix_domains.items():
points = np.array(points)[:, :2]
# Initial sort to ensure consistency
points = points[np.lexsort(np.transpose(points))]
center = np.average(points, axis=0)
points_centered = points - center
# Sort points by cross product of centered points,
# isn't strictly necessary but useful for plotting tools
points_centered = sorted(points_centered, key=cmp_to_key(lambda x, y: x[0] * y[1] - x[1] * y[0]))
points = points_centered + center
# Create simplices corresponding to pourbaix boundary
simplices = [Simplex(points[indices]) for indices in ConvexHull(points).simplices]
pourbaix_domains[entry] = simplices
pourbaix_domain_vertices[entry] = points
return pourbaix_domains, pourbaix_domain_vertices
def find_stable_entry(self, pH, V):
"""
Finds stable entry at a pH,V condition
Args:
pH (float): pH to find stable entry
V (float): V to find stable entry
Returns:
"""
energies_at_conditions = [e.normalized_energy_at_conditions(pH, V) for e in self.stable_entries]
return self.stable_entries[np.argmin(energies_at_conditions)]
def get_decomposition_energy(self, entry, pH, V):
"""
Finds decomposition to most stable entries in eV/atom,
supports vectorized inputs for pH and V
Args:
entry (PourbaixEntry): PourbaixEntry corresponding to
compound to find the decomposition for
pH (float, [float]): pH at which to find the decomposition
V (float, [float]): voltage at which to find the decomposition
Returns:
Decomposition energy for the entry, i. e. the energy above
the "pourbaix hull" in eV/atom at the given conditions
"""
# Check composition consistency between entry and Pourbaix diagram:
pbx_comp = Composition(self._elt_comp).fractional_composition
entry_pbx_comp = Composition(
{elt: coeff for elt, coeff in entry.composition.items() if elt not in ELEMENTS_HO}
).fractional_composition
if entry_pbx_comp != pbx_comp:
raise ValueError("Composition of stability entry does not match " "Pourbaix Diagram")
entry_normalized_energy = entry.normalized_energy_at_conditions(pH, V)
hull_energy = self.get_hull_energy(pH, V)
decomposition_energy = entry_normalized_energy - hull_energy
# Convert to eV/atom instead of eV/normalized formula unit
decomposition_energy /= entry.normalization_factor
decomposition_energy /= entry.composition.num_atoms
return decomposition_energy
def get_hull_energy(self, pH, V):
"""
Gets the minimum energy of the pourbaix "basin" that is formed
from the stable pourbaix planes. Vectorized.
Args:
pH (float or [float]): pH at which to find the hull energy
V (float or [float]): V at which to find the hull energy
Returns:
(float or [float]) minimum pourbaix energy at conditions
"""
all_gs = np.array([e.normalized_energy_at_conditions(pH, V) for e in self.stable_entries])
base = np.min(all_gs, axis=0)
return base
def get_stable_entry(self, pH, V):
"""
Gets the stable entry at a given pH, V condition
Args:
pH (float): pH at a given condition
V (float): V at a given condition
Returns:
(PourbaixEntry or MultiEntry): pourbaix or multi-entry
corresponding ot the minimum energy entry at a given
pH, V condition
"""
all_gs = np.array([e.normalized_energy_at_conditions(pH, V) for e in self.stable_entries])
return self.stable_entries[np.argmin(all_gs)]
@property
def stable_entries(self):
"""
Returns the stable entries in the Pourbaix diagram.
"""
return list(self._stable_domains.keys())
@property
def unstable_entries(self):
"""
Returns all unstable entries in the Pourbaix diagram
"""
return [e for e in self.all_entries if e not in self.stable_entries]
@property
def all_entries(self):
"""
Return all entries used to generate the pourbaix diagram
"""
return self._processed_entries
@property
def unprocessed_entries(self):
"""
Return unprocessed entries
"""
return self._unprocessed_entries
def as_dict(self, include_unprocessed_entries=False):
"""
Args:
include_unprocessed_entries (): Whether to include unprocessed entries.
Returns:
MSONable dict.
"""
if include_unprocessed_entries:
entries = [e.as_dict() for e in self._unprocessed_entries]
else:
entries = [e.as_dict() for e in self._processed_entries]
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entries": entries,
"comp_dict": self._elt_comp,
"conc_dict": self._conc_dict,
}
return d
@classmethod
def from_dict(cls, d):
"""
Args:
d (): Dict representation.
Returns:
PourbaixDiagram
"""
decoded_entries = MontyDecoder().process_decoded(d["entries"])
return cls(decoded_entries, d.get("comp_dict"), d.get("conc_dict"))
class PourbaixPlotter:
"""
A plotter class for phase diagrams.
"""
def __init__(self, pourbaix_diagram):
"""
Args:
pourbaix_diagram (PourbaixDiagram): A PourbaixDiagram object.
"""
self._pbx = pourbaix_diagram
def show(self, *args, **kwargs):
"""
Shows the pourbaix plot
Args:
*args: args to get_pourbaix_plot
**kwargs: kwargs to get_pourbaix_plot
Returns:
None
"""
plt = self.get_pourbaix_plot(*args, **kwargs)
plt.show()
def get_pourbaix_plot(self, limits=None, title="", label_domains=True, plt=None):
"""
Plot Pourbaix diagram.
Args:
limits: 2D list containing limits of the Pourbaix diagram
of the form [[xlo, xhi], [ylo, yhi]]
title (str): Title to display on plot
label_domains (bool): whether to label pourbaix domains
plt (pyplot): Pyplot instance for plotting
Returns:
plt (pyplot) - matplotlib plot object with pourbaix diagram
"""
if limits is None:
limits = [[-2, 16], [-3, 3]]
plt = plt or pretty_plot(16)
xlim = limits[0]
ylim = limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC], [xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23], [xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
lw = 3
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw)
for entry, vertices in self._pbx._stable_domain_vertices.items():
center = np.average(vertices, axis=0)
x, y = np.transpose(np.vstack([vertices, vertices[0]]))
plt.plot(x, y, "k-", linewidth=lw)
if label_domains:
plt.annotate(
generate_entry_label(entry),
center,
ha="center",
va="center",
fontsize=20,
color="b",
).draggable()
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.title(title, fontsize=20, fontweight="bold")
return plt
def plot_entry_stability(
self,
entry,
pH_range=None,
pH_resolution=100,
V_range=None,
V_resolution=100,
e_hull_max=1,
cmap="RdYlBu_r",
**kwargs,
):
"""
Args:
entry ():
pH_range ():
pH_resolution ():
V_range ():
V_resolution ():
e_hull_max ():
cmap ():
**kwargs ():
Returns:
"""
if pH_range is None:
pH_range = [-2, 16]
if V_range is None:
V_range = [-3, 3]
# plot the Pourbaix diagram
plt = self.get_pourbaix_plot(**kwargs)
pH, V = np.mgrid[
pH_range[0] : pH_range[1] : pH_resolution * 1j,
V_range[0] : V_range[1] : V_resolution * 1j,
]
stability = self._pbx.get_decomposition_energy(entry, pH, V)
# Plot stability map
plt.pcolor(pH, V, stability, cmap=cmap, vmin=0, vmax=e_hull_max)
cbar = plt.colorbar()
cbar.set_label("Stability of {} (eV/atom)".format(generate_entry_label(entry)))
# Set ticklabels
# ticklabels = [t.get_text() for t in cbar.ax.get_yticklabels()]
# ticklabels[-1] = '>={}'.format(ticklabels[-1])
# cbar.ax.set_yticklabels(ticklabels)
return plt
def domain_vertices(self, entry):
"""
Returns the vertices of the Pourbaix domain.
Args:
entry: Entry for which domain vertices are desired
Returns:
list of vertices
"""
return self._pbx._stable_domain_vertices[entry]
def generate_entry_label(entry):
"""
Generates a label for the pourbaix plotter
Args:
entry (PourbaixEntry or MultiEntry): entry to get a label for
"""
if isinstance(entry, MultiEntry):
return " + ".join([latexify_ion(latexify(e.name)) for e in entry.entry_list])
return latexify_ion(latexify(entry.name))
def latexify_ion(formula):
"""
Convert a formula to latex format.
Args:
formula (str): Formula
Returns:
Latex string.
"""
return re.sub(r"()\[([^)]*)\]", r"\1$^{\2}$", formula)
|
davidwaroquiers/pymatgen
|
pymatgen/analysis/pourbaix_diagram.py
|
Python
|
mit
| 40,558
|
[
"pymatgen"
] |
06c480645de6a84ba5883ea9e02d408fa1654dfc267849f863ab82f737fc4f2a
|
# Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
=================================
MetPy Declarative Syntax Tutorial
=================================
The declarative syntax that is a part of the MetPy packaged is designed to aid in simple
data exploration and analysis needs by simplifying the plotting context from typical verbose
Python code. The complexity of data wrangling and plotting are hidden behind the simplified
syntax to allow a lower barrier to investigating your data.
"""
#########################################################################
# Imports
# -------
#
# You'll note that the number of imports is smaller due to using the declarative syntax.
# There is no need to import Matplotlib or Cartopy to your code as all of that is done
# behind the scenes.
from datetime import datetime, timedelta
import xarray as xr
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.io import metar
from metpy.plots.declarative import (BarbPlot, ContourPlot, FilledContourPlot, MapPanel,
PanelContainer, PlotObs)
from metpy.units import units
#########################################################################
# Getting Data
# ------------
#
# Depending on what kind of data you are wanting to plot you'll use either Xarray (for gridded
# data), Pandas (for CSV data), or the MetPy METAR parser (for METAR data).
#
# We'll start this tutorial by reading in a gridded dataset using Xarray.
# Open the netCDF file as a xarray Dataset and parse the full dataset
data = xr.open_dataset(get_test_data('GFS_test.nc', False)).metpy.parse_cf()
# View a summary of the Dataset
print(data)
#########################################################################
# Set Datetime
# ------------
#
# Set the date/time of that you desire to plot
plot_time = datetime(2010, 10, 26, 12)
#########################################################################
# Subsetting Data
# ---------------
#
# MetPy provides wrappers for the usual xarray indexing and selection routines that can handle
# quantities with units. For DataArrays, MetPy also allows using the coordinate axis types
# mentioned above as aliases for the coordinates. And so, if we wanted data to be just over
# the U.S. for plotting purposes
ds = data.metpy.sel(lat=slice(70, 10), lon=slice(360 - 150, 360 - 55))
#########################################################################
# For full details on xarray indexing/selection, see
# `xarray's documentation <https://xarray.pydata.org/en/stable/indexing.html>`_.
#########################################################################
# Calculations
# ------------
#
# In MetPy 1.0 and later, calculation functions accept Xarray DataArray's as input and the
# output a DataArray that can be easily added to an existing Dataset.
#
# As an example, we calculate wind speed from the wind components and add it as a new variable
# to our Dataset.
ds['wind_speed'] = mpcalc.wind_speed(ds['u-component_of_wind_isobaric'],
ds['v-component_of_wind_isobaric'])
#########################################################################
# Plotting
# --------
#
# With that minimal preparation, we are now ready to use the simplified plotting syntax to be
# able to plot our data and analyze the meteorological situation.
#
# General Structure
#
# 1. Set contour attributes
#
# 2. Set map characteristics and collect contours
#
# 3. Collect panels and plot
#
# 4. Show (or save) the results
#
# Valid Plotting Types for Gridded Data:
#
# - ``ContourPlot()``
#
# - ``FilledContourPlot()``
#
# - ``ImagePlot()``
#
# - ``BarbPlot()``
#
# More complete descriptions of these and other plotting types, as well as the map panel and
# panel container classes are at the end of this tutorial.
#
# Let's plot a 300-hPa map with color-filled wind speed, which we calculated and added to
# our Dataset above, and geopotential heights over the CONUS.
#########################################################################
# We'll start by setting attributes for contours of Geopotential Heights at 300 hPa.
# We need to set at least the data, field, level, and time attributes. We'll set a few others
# to have greater control over hour the data is plotted.
# Set attributes for contours of Geopotential Heights at 300 hPa
cntr2 = ContourPlot()
cntr2.data = ds
cntr2.field = 'Geopotential_height_isobaric'
cntr2.level = 300 * units.hPa
cntr2.time = plot_time
cntr2.contours = list(range(0, 10000, 120))
cntr2.linecolor = 'black'
cntr2.linestyle = 'solid'
cntr2.clabels = True
#########################################################################
# Now we'll set the attributes for plotting color-filled contours of wind speed at 300 hPa.
# Again, the attributes that must be set include data, field, level, and time. We'll also set
# a colormap and colorbar to be purposeful for wind speed. Additionally, we'll set the
# attribute to change the units from m/s to knots, which is the common plotting units for
# wind speed.
# Set attributes for plotting color-filled contours of wind speed at 300 hPa
cfill = FilledContourPlot()
cfill.data = ds
cfill.field = 'wind_speed'
cfill.level = 300 * units.hPa
cfill.time = plot_time
cfill.contours = list(range(10, 201, 20))
cfill.colormap = 'BuPu'
cfill.colorbar = 'horizontal'
cfill.plot_units = 'knot'
#########################################################################
# Once we have our contours (and any colorfill plots) set up, we will want to define the map
# panel that we'll plot the data on. This is the place where we can set the view extent,
# projection of our plot, add map lines like coastlines and states, set a plot title.
# One of the key elements is to add the data to the map panel as a list with the plots
# attribute.
# Set the attributes for the map and add our data to the map
panel = MapPanel()
panel.area = [-125, -74, 20, 55]
panel.projection = 'lcc'
panel.layers = ['states', 'coastline', 'borders']
panel.title = f'{cfill.level.m}-hPa Heights and Wind Speed at {plot_time}'
panel.plots = [cfill, cntr2]
#########################################################################
# Finally we'll collect all the panels to plot on the figure, set the size of the figure,
# and ultimately show or save the figure.
# Set the attributes for the panel and put the panel in the figure
pc = PanelContainer()
pc.size = (15, 15)
pc.panels = [panel]
#########################################################################
# All of our setting now produce the following map!
# Show the image
pc.show()
#########################################################################
# That's it! What a nice looking map, with relatively simple set of code.
#########################################################################
# Adding Wind Barbs
# -----------------
#
# We can easily add wind barbs to the plot we generated above by adding another plot type
# and adding it to the panel. The plot type for wind barbs is ``BarbPlot()`` and has its own
# set of attributes to control plotting a vector quantity.
#########################################################################
# We start with setting the attributes that we had before for our 300 hPa plot including,
# Geopotential Height contours, and color-filled wind speed.
# Set attributes for contours of Geopotential Heights at 300 hPa
cntr2 = ContourPlot()
cntr2.data = ds
cntr2.field = 'Geopotential_height_isobaric'
cntr2.level = 300 * units.hPa
cntr2.time = plot_time
cntr2.contours = list(range(0, 10000, 120))
cntr2.linecolor = 'black'
cntr2.linestyle = 'solid'
cntr2.clabels = True
# Set attributes for plotting color-filled contours of wind speed at 300 hPa
cfill = FilledContourPlot()
cfill.data = ds
cfill.field = 'wind_speed'
cfill.level = 300 * units.hPa
cfill.time = plot_time
cfill.contours = list(range(10, 201, 20))
cfill.colormap = 'BuPu'
cfill.colorbar = 'horizontal'
cfill.plot_units = 'knot'
#########################################################################
# Now we'll set the attributes for plotting wind barbs, with the required attributes of data,
# time, field, and level. The skip attribute is particularly useful for thinning the number of
# wind barbs that are plotted on the map. Again we convert to units of knots.
# Set attributes for plotting wind barbs
barbs = BarbPlot()
barbs.data = ds
barbs.time = plot_time
barbs.field = ['u-component_of_wind_isobaric', 'v-component_of_wind_isobaric']
barbs.level = 300 * units.hPa
barbs.skip = (3, 3)
barbs.plot_units = 'knot'
#########################################################################
# Add all of our plot types to the panel, don't forget to add in the new wind barbs to our plot
# list!
# Set the attributes for the map and add our data to the map
panel = MapPanel()
panel.area = [-125, -74, 20, 55]
panel.projection = 'lcc'
panel.layers = ['states', 'coastline', 'borders']
panel.title = f'{cfill.level.m}-hPa Heights and Wind Speed at {plot_time}'
panel.plots = [cfill, cntr2, barbs]
# Set the attributes for the panel and put the panel in the figure
pc = PanelContainer()
pc.size = (15, 15)
pc.panels = [panel]
# Show the figure
pc.show()
#########################################################################
# Plot Surface Obs
# ----------------
#
# We can also plot surface (or upper-air) observations at point locations using the simplified
# syntax. Whether it is surface or upper-air data, the ``PlotObs()`` class is what you would
# want to use. Then you would add those observations to a map panel and then collect the panels
# to plot the figure; similar to what you would do for a gridded plot.
df = metar.parse_metar_file(get_test_data('metar_20190701_1200.txt', False), year=2019,
month=7)
# Let's take a look at the variables that we could plot coming from our METAR observations.
print(df.keys())
# Set the observation time
obs_time = datetime(2019, 7, 1, 12)
#########################################################################
# Setting of our attributes for plotting observations is pretty straightforward and just needs
# to be lists for the variables, and a comparable number of items for plot characteristics that
# are specific to the individual fields. For example, the locations around a station plot, the
# plot units, and any plotting formats would all need to have the same number of items as the
# fields attribute.
#
# Plotting wind bards is done through the vector_field attribute. You can reduce the number
# of points plotted (especially important for surface observations) with the ``reduce_points``
# attribute.
#
# For a very basic plot of one field, the minimum required attributes are the data, time,
# fields, and location attributes.
# Plot desired data
obs = PlotObs()
obs.data = df
obs.time = obs_time
obs.time_window = timedelta(minutes=15)
obs.level = None
obs.fields = ['cloud_coverage', 'air_temperature', 'dew_point_temperature',
'air_pressure_at_sea_level', 'current_wx1_symbol']
obs.plot_units = [None, 'degF', 'degF', None, None]
obs.locations = ['C', 'NW', 'SW', 'NE', 'W']
obs.formats = ['sky_cover', None, None, lambda v: format(v * 10, '.0f')[-3:],
'current_weather']
obs.reduce_points = 0.75
obs.vector_field = ['eastward_wind', 'northward_wind']
#########################################################################
# We use the same Classes for plotting our data on a map panel and collecting all the
# panels on the figure. In this case we'll focus in on the state of Indiana for plotting.
# Panel for plot with Map features
panel = MapPanel()
panel.layout = (1, 1, 1)
panel.projection = 'lcc'
panel.area = 'in'
panel.layers = ['states']
panel.title = f'Surface plot for {obs_time}'
panel.plots = [obs]
# Bringing it all together
pc = PanelContainer()
pc.size = (10, 10)
pc.panels = [panel]
pc.show()
#########################################################################
# Detailed Attribute Descriptions
# -------------------------------
#
# This final section contains verbose descriptions of the attributes that can be set by the
# plot types used in this tutorial.
#########################################################################
# ContourPlot()
# -------------
#
# This class is designed to plot contours of gridded data, most commonly model output from the
# GFS, NAM, RAP, or other gridded dataset (e.g., NARR).
#
# Attributes:
#
# ``data``
#
# This attribute must be set with the variable name that contains the xarray dataset.
# (Typically this is the variable ds)
#
# ``field``
#
# This attribute must be set with the name of the variable that you want to contour.
# For example, to plot the heights of pressure surfaces from the GFS you would use the name
# ``‘Geopotential_height_isobaric’``
#
# ``level``
#
# This attribute sets the level of the data you wish to plot. If it is a pressure level,
# then it must be set to a unit bearing value (e.g., 500*units.hPa). If the variable does
# not have any vertical levels (e.g., mean sea-level pressure), then the level attribute must
# be set to None.
#
# ``time``
#
# This attribute must be set with a datetime object, just as with the ``PlotObs()`` class.
# To get a forecast hour, you can use the timedelta function from datetime to add the number of
# hours into the future you wish to plot. For example, if you wanted the six hour forecast from
# the 00 UTC 2 February 2020 model run, then you would set the attribute with:
#
# ``datetime(2020, 2, 2, 0) + timedelta(hours=6)``
#
# ``contours``
#
# This attribute sets the contour values to be plotted with a list. This can be set manually
# with a list of integers in square brackets (e.g., ``[5400, 5460, 5520, 5580, 5640, 5700]``)
# or programmatically (e.g., ``list(range(0, 10000, 60))``). The second method is a way to
# easily set a contour interval (in this case 60).
#
# ``clabel``
#
# This attribute can be set to ``True`` if you desire to have your contours labeled.
#
# ``linestyle``
#
# This attribute can be set to make the contours ``‘solid’``, ``‘dashed’``, ``‘dotted’``,
# or ``‘dashdot’``. Other linestyles are can be used and are found at:
# https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/linestyles.html
#
# Default is ``‘solid’``.
#
# ``linewidth``
#
# This attribute alters the width of the contours (defaults to 1). Setting the value greater
# than 1 will yield a thicker contour line.
#
# ``linecolor``
#
# This attribute sets the color of the contour lines. Default is ``‘black’``. All colors from
# matplotlib are valid: https://matplotlib.org/3.1.0/_images/sphx_glr_named_colors_003.png
#
# ``plot_units``
#
# If you want to change the units for plotting purposes, add the string value of the units
# desired. For example, if you want to plot temperature in Celsius, then set this attribute
# to ``‘degC’``.
#
# ``scale``
#
# This attribute will scale the field by multiplying by the scale. For example, to
# scale vorticity to be whole values for contouring you could set the scale to 1e5, such that
# the data values will be multiplied by 10^5.
#########################################################################
# FilledContourPlot()
# -------------------
#
# Works very similarly to ``ContourPlot()``, except that contours are filled using a colormap
# between contour values. All attributes for ``ContourPlot()`` work for color-filled plots,
# except for linestyle, linecolor, and linewidth. Additionally, there are the following
# attributes that work for color-filling:
#
# Attributes:
#
# ``colormap``
#
# This attribute is used to set a valid colormap from either Matplotlib or MetPy:
# Matplotlib Colormaps: https://matplotlib.org/3.1.1/gallery/color/colormap_reference.html
# MetPy Colormaps: https://unidata.github.io/MetPy/v1.0/api/generated/metpy.plots.ctables.html
#
# ``colorbar``
#
# This attribute can be set to ``‘vertical’`` or ``‘horizontal’``, which is the location the
# colorbar will be plotted on the panel.
#
# ``image_range``
#
# A set of values indicating the minimum and maximum for the data being plotted. This
# attribute should be set as ``(min_value, max_value)``, where min_value and max_value are
# numeric values.
#########################################################################
# PanelContainer()
# ----------------
#
# Attributes:
#
# ``size``
#
# The size of the figure in inches (e.g., (10, 8))
#
# ``panels``
#
# A list collecting the panels to be plotted in the figure.
#
# ``show``
#
# Show the plot
#
# ``save``
#
# Save the figure using the Matplotlib arguments/keyword arguments
#########################################################################
# MapPanel()
# ----------
#
# Attributes:
#
# ``layout``
#
# The Matplotlib layout of the figure. For a single panel figure the setting should be
# ``(1, 1, 1)``
#
# ``projection``
#
# The projection can be set with the name of a default projection (``‘lcc’``, ``‘mer’``, or
# ``‘ps’``) or it can be set to a Cartopy projection.
#
# ``layers``
#
# This attribute will add map layers to identify boundaries or features to plot on the map.
# Valid layers are ``'borders'``, ``'coastline'``, ``'states'``, ``'lakes'``, ``'land'``,
# ``'ocean'``, ``'rivers'``, ``'counties'``.
#
# ``area``
#
# This attribute sets the geographical area of the panel. This can be set with a predefined
# name of an area including all US state postal abbreviations (e.g., ``‘us’``, ``‘natl’``,
# ``‘in’``, ``‘il’``, ``‘wi’``, ``‘mi’``, etc.) or a tuple value that corresponds to
# longitude/latitude box based on the projection of the map with the format
# ``(west-most longitude, east-most longitude, south-most latitude, north-most latitude)``.
# This tuple defines a box from the lower-left to the upper-right corner.
#
# ``title``
#
# This attribute sets a title for the panel.
#
# ``plots``
#
# A list collecting the observations to be plotted in the panel.
#########################################################################
# BarbPlot()
# ----------
#
# This plot class is used to add wind barbs to the plot with the following
#
# Attributes:
#
# ``data``
#
# This attribute must be set to the variable that contains the vector components to be plotted.
#
# ``field``
#
# This attribute is a list of the vector components to be plotted. For the typical
# meteorological case it would be the ``[‘u-component’, ‘v-component’]``.
#
# ``time``
#
# This attribute should be set to a datetime object, the same as for all other declarative
# classes.
#
# ``barblength``
#
# This attribute sets the length of the wind barbs. The default value is based on the
# font size.
#
# ``color``
#
# This attribute sets the color of the wind barbs, which can be any Matplotlib color.
# Default color is ``‘black’``.
#
# ``earth_relative``
#
# This attribute can be set to False if the vector components are grid relative (e.g., for NAM
# or NARR output)
#
# ``pivot``
#
# This attribute can be set to a string value about where the wind barb will pivot relative to
# the grid point. Possible values include ``‘tip’`` or ``‘middle’``. Default is ``‘middle’``.
########################################################################
# PlotObs()
# ---------
#
# This class is used to plot point observations from the surface or upper-air.
#
# Attributes:
#
# ``data``
#
# This attribute needs to be set to the DataFrame variable containing the fields that you
# desire to plot.
#
# ``fields``
#
# This attribute is a list of variable names from your DataFrame that you desire to plot at the
# given locations around the station model.
#
# ``level``
#
# For a surface plot this needs to be set to None.
#
# ``time``
#
# This attribute needs to be set to subset your data attribute for the time of the observations
# to be plotted. This needs to be a datetime object.
#
# ``locations``
#
# This attribute sets the location of the fields to be plotted around the surface station
# model. The default location is center ``(‘C’)``
#
# ``time_window``
#
# This attribute allows you to define a window for valid observations (e.g., 15 minutes on
# either side of the datetime object setting. This is important for surface data since actual
# observed times are not all exactly on the hour. If multiple observations exist in the defined
# window, the most recent observations is retained for plotting purposes.
#
# ``formats``
#
# This attribute sets a formatter for text or plotting symbols around the station model. For
# example, plotting mean sea-level pressure is done in a three-digit code and a formatter can
# be used to achieve that on the station plot.
#
# MSLP Formatter: ``lambda v: format(10 * v, '.0f')[-3:]``
#
# For plotting symbols use the available MetPy options through their name. Valid symbol formats
# are ``'current_weather'``, ``'sky_cover'``, ``'low_clouds'``, ``'mid_clouds'``,
# ``'high_clouds'``, and ``'pressure_tendency'``.
#
# ``colors``
#
# This attribute can change the color of the plotted observation. Default is ``‘black’``.
# Acceptable colors are those available through Matplotlib:
# https://matplotlib.org/3.1.1/_images/sphx_glr_named_colors_003.png
#
# ``vector_field``
#
# This attribute can be set to a list of wind component values for plotting
# (e.g., ``[‘uwind’, ‘vwind’]``)
#
# ``vector_field_color``
#
# Same as colors except only controls the color of the wind barbs. Default is ``‘black’``.
#
# ``reduce_points``
#
# This attribute can be set to a real number to reduce the number of stations that are plotted.
# Default value is zero (e.g., no points are removed from the plot).
|
Unidata/MetPy
|
tutorials/declarative_tutorial.py
|
Python
|
bsd-3-clause
| 21,805
|
[
"NetCDF"
] |
3e911a1e1a4e152d7e09f4390aafd48e3849e0367e2d37401daced7f869b916d
|
"""
LLVM pass that converts intrinsic into other math calls
"""
from __future__ import print_function, absolute_import
import llvmlite.llvmpy.core as lc
from llvmlite import ir
class _DivmodFixer(ir.Visitor):
def visit_Instruction(self, instr):
if instr.type == ir.IntType(64):
if instr.opname in ['srem', 'urem', 'sdiv', 'udiv']:
name = 'numba.math.{op}'.format(op=instr.opname)
fn = self.module.globals.get(name)
# Declare the function if it doesn't already exist
if fn is None:
opty = instr.type
sdivfnty = ir.FunctionType(opty, [opty, opty])
fn = ir.Function(self.module, sdivfnty, name=name)
# Replace the operation with a call to the builtin
repl = ir.CallInstr(parent=instr.parent, func=fn,
args=instr.operands, name=instr.name)
instr.parent.replace(instr, repl)
def fix_divmod(mod):
"""Replace division and reminder instructions to builtins calls
"""
_DivmodFixer().visit(mod)
class IntrinsicMapping(object):
def __init__(self, context, mapping=None, availintr=None):
"""
Args
----
mapping:
Optional. Intrinsic name to alternative implementation.
Default to global MAPPING
availintr:
Optional. Available intrinsic set.
Default to global AVAILINTR
"""
self.context = context
self.mapping = mapping or MAPPING
self.availintr = availintr or AVAILINTR
def run(self, module):
self.apply_mapping(module)
self.translate_intrinsic_to_cmath(module)
def apply_mapping(self, module):
modified = []
for fn in module.functions:
if fn.is_declaration and fn.name in self.mapping:
imp = self.mapping[fn.name]
imp(self.context, fn)
modified.append(fn)
# Rename all modified functions
for fn in modified:
fn.name = "numba." + fn.name
if __debug__:
module.verify()
def translate_intrinsic_to_cmath(self, module):
for fn in self._iter_unavail(module):
# Rename unavailable intrinsic to libc calls
# Ignore unrecognized llvm intrinsic
fn.name = INTR_TO_CMATH.get(fn.name, fn.name)
if __debug__:
module.verify()
def _iter_unavail(self, module):
for fn in module.functions:
if fn.is_declaration and fn.name.startswith('llvm.'):
if fn.name not in self.availintr:
yield fn
AVAILINTR = ()
INTR_TO_CMATH = {
"llvm.pow.f32": "powf",
"llvm.pow.f64": "pow",
"llvm.sin.f32": "sinf",
"llvm.sin.f64": "sin",
"llvm.cos.f32": "cosf",
"llvm.cos.f64": "cos",
"llvm.sqrt.f32": "sqrtf",
"llvm.sqrt.f64": "sqrt",
"llvm.exp.f32": "expf",
"llvm.exp.f64": "exp",
"llvm.log.f32": "logf",
"llvm.log.f64": "log",
"llvm.log10.f32": "log10f",
"llvm.log10.f64": "log10",
"llvm.fabs.f32": "fabsf",
"llvm.fabs.f64": "fabs",
"llvm.floor.f32": "floorf",
"llvm.floor.f64": "floor",
"llvm.ceil.f32": "ceilf",
"llvm.ceil.f64": "ceil",
"llvm.trunc.f32": "truncf",
"llvm.trunc.f64": "trunc",
}
OTHER_CMATHS = '''
tan
tanf
sinh
sinhf
cosh
coshf
tanh
tanhf
asin
asinf
acos
acosf
atan
atanf
atan2
atan2f
atan2_fixed
asinh
asinhf
acosh
acoshf
atanh
atanhf
expm1
expm1f
log1p
log1pf
log10
log10f
fmod
fmodf
round
roundf
'''.split()
INTR_MATH = frozenset(INTR_TO_CMATH.values()) | frozenset(OTHER_CMATHS)
|
ssarangi/numba
|
numba/targets/intrinsics.py
|
Python
|
bsd-2-clause
| 3,712
|
[
"VisIt"
] |
57c314620b009da828a27e68b563d755d6d6f6f50b7b91ee2e25201d15eaf86b
|
import numpy as np
import shutil
import os
import mdtraj as md
from mdtraj.utils import enter_temp_directory
import tempfile
from distutils.spawn import find_executable
PACKMOL_PATH = find_executable("packmol")
HEADER_TEMPLATE = """
# Mixture
tolerance %f
filetype pdb
output %s
add_amber_ter
"""
BOX_TEMPLATE = """
structure %s
number %d
inside box 0. 0. 0. %f %f %f
end structure
"""
def pack_box(pdb_filenames_or_trajectories, n_molecules_list, tolerance=2.0, box_size=None):
"""Run packmol to generate a box containing a mixture of molecules.
Parameters
----------
pdb_filenames_or_trajectories : list({str, Trajectory})
List of pdb filenames or trajectories for each component of mixture. If this is
a list of trajectories, the trajectories will be saved to as
temporary files to be run in packmol.
n_molecules_list : list(int)
The number of molecules of each mixture component.
tolerance : float, optional, default=2.0
The mininum spacing between molecules during packing. In ANGSTROMS!
box_size : float, optional
The size of the box to generate. In ANGSTROMS.
Default generates boxes that are very large for increased stability.
May require extra time for energy minimization and equilibration.
Returns
-------
trj : MDTraj.Trajectory
Single frame trajectory with mixture box.
Notes
-----
Be aware that MDTraj uses nanometers internally, but packmol uses angstrom
units. The present function takes `tolerance` and `box_size` in
angstrom units, but the output trajectory will have data in nm.
Also note that OpenMM is pretty picky about the format of unit cell input,
so use the example in tests/test_packmol.py to ensure that you do the right thing.
"""
assert len(pdb_filenames_or_trajectories) == len(n_molecules_list), "Must input same number of pdb filenames as num molecules"
pdb_filenames = []
for obj in pdb_filenames_or_trajectories:
try: # See if MDTraj Trajectory
tmp_filename = tempfile.mktemp(suffix=".pdb")
obj.save_pdb(tmp_filename)
pdb_filenames.append(tmp_filename)
except AttributeError: # Not an MDTraj Trajectory, assume filename
pdb_filenames.append(obj)
if PACKMOL_PATH is None:
raise(IOError("Packmol not found, cannot run pack_box()"))
output_filename = tempfile.mktemp(suffix=".pdb")
# approximating volume to initialize box
if box_size is None:
box_size = approximate_volume(pdb_filenames, n_molecules_list)
header = HEADER_TEMPLATE % (tolerance, output_filename)
for k in range(len(pdb_filenames)):
filename = pdb_filenames[k]
n_molecules = n_molecules_list[k]
header = header + BOX_TEMPLATE % (filename, n_molecules, box_size, box_size, box_size)
pwd = os.getcwd()
print(header)
packmol_filename = "packmol_input.txt"
packmol_filename = tempfile.mktemp(suffix=".txt")
file_handle = open(packmol_filename, 'w')
file_handle.write(header)
file_handle.close()
print(header)
os.system("%s < %s" % (PACKMOL_PATH, packmol_filename))
trj = md.load(output_filename)
assert trj.topology.n_chains == sum(n_molecules_list), "Packmol error: molecules missing from output"
#Begin hack to introduce bonds for the MISSING CONECT ENTRIES THAT PACKMOL FAILS TO WRITE
top, bonds = trj.top.to_dataframe()
trj_i = [md.load(filename) for filename in pdb_filenames]
bonds_i = [t.top.to_dataframe()[1] for t in trj_i]
offset = 0
bonds = []
for i in range(len(pdb_filenames)):
n_atoms = trj_i[i].n_atoms
for j in range(n_molecules_list[i]):
bonds.extend(bonds_i[i] + offset)
offset += n_atoms
bonds = np.array(bonds)
trj.top = md.Topology.from_dataframe(top, bonds)
trj.unitcell_vectors = np.array([np.eye(3)]) * box_size / 10.
return trj
def approximate_volume(pdb_filenames, n_molecules_list, box_scaleup_factor=2.0):
"""Approximate the appropriate box size based on the number and types of atoms present.
Parameters
----------
pdb_filenames : list(str)
List of pdb filenames for each component of mixture.
n_molecules_list : list(int)
The number of molecules of each mixture component.
box_scaleup_factor : float, optional, default = 2.0
Factor by which the estimated box size is increased
Returns
-------
box_size : float
The size of the box to generate. In ANGSTROMS.
Notes
-----
By default, boxes are very large for increased stability, and therefore may
require extra time for energy minimization and equilibration.
"""
volume = 0.0 # in cubic angstroms
for k, (pdb_file) in enumerate(pdb_filenames):
molecule_volume = 0.0
molecule_trj = md.load(pdb_filenames[k])
for atom in molecule_trj.topology.atoms:
if atom.element.symbol == 'H':
molecule_volume += 5.0 # approximated from bondi radius = 1.06 angstroms
else:
molecule_volume += 15.0 # approximated from bondi radius of carbon = 1.53 angstroms
volume += molecule_volume * n_molecules_list[k]
box_size = volume**(1.0/3.0) * box_scaleup_factor
return box_size
|
kyleabeauchamp/openmoltools
|
openmoltools/packmol.py
|
Python
|
gpl-2.0
| 5,446
|
[
"MDTraj",
"OpenMM"
] |
bb451c7a01968c4a8033d62f349df6fce128a017d1ddc0f6b47cc0c876f8d9c1
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from PyQt5 import QtWidgets, QtCore
class PreferenceWidget(QtWidgets.QWidget):
"""
Holds a collection of preferences.
Signals:
saved: Emitted when preferences have been saved.
"""
saved = QtCore.pyqtSignal()
def __init__(self, plugins):
super(PreferenceWidget, self).__init__()
self._widgets = []
for plugin in plugins:
self._widgets += plugin.preferenceWidgets()
self.saved.connect(plugin.onPreferencesSaved)
if self._widgets:
layout = QtWidgets.QGridLayout()
for i, w in enumerate(self._widgets):
layout.addWidget(w.label(), i, 0)
layout.addWidget(w.widget(), i, 1)
self.setLayout(layout)
def save(self, settings):
"""
Go through all the preferences and save then to disk
Input:
settings[QSettings]: settings to load from
"""
for w in self._widgets:
w.save(settings)
self.saved.emit()
def load(self, settings):
"""
Iniitialize each widget from previously saved settings.
Input:
settings[QSettings]: settings to load from
"""
for w in self._widgets:
w.load(settings)
def widget(self, key):
"""
Get a preference widget corresponding to a given key.
Input:
key[str]: The key value that is stored in a QSettings
"""
for w in self._widgets:
if key == w.key():
return w
return None
def count(self):
"""
Returns the number of preferences widgets
"""
return len(self._widgets)
|
nuclear-wizard/moose
|
python/peacock/base/PreferenceWidget.py
|
Python
|
lgpl-2.1
| 2,015
|
[
"MOOSE"
] |
4da46760217085074dca530eb05caa55cb41250d81e1f3210f43c0e025f1ff48
|
# -*- coding: utf-8 -*-
#
# AtHomePowerlineServer - networked server for CM11/CM11A/XTB-232 X10 controllers
# Copyright © 2014, 2019 Dave Hocker
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# See the LICENSE file for more details.
#
#
# Device all lights off
#
import commands.ServerCommand as ServerCommand
import drivers.X10ControllerAdapter
#######################################################################
# Command handler for bright command
class DeviceAllLightsOff(ServerCommand.ServerCommand):
#######################################################################
# Execute the "of" command.
def Execute(self, request):
result = drivers.X10ControllerAdapter.X10ControllerAdapter.DeviceAllLightsOff(request["args"]["house-code"])
# Generate a successful response
r = DeviceAllLightsOff.CreateResponse(request["request"])
r['result-code'] = drivers.X10ControllerAdapter.X10ControllerAdapter.GetLastErrorCode()
if result:
# r['error'] = "Command not fully implemented"
r['message'] = "Success"
else:
r['error'] = drivers.X10ControllerAdapter.X10ControllerAdapter.GetLastError()
r['message'] = "Failure"
return r
|
dhocker/athomepowerlineserver
|
commands/DeviceAllLightsOff.py
|
Python
|
gpl-3.0
| 1,451
|
[
"xTB"
] |
2aafcd297a76c38cc6d74eafb0b233377165cab24d917dd4a1ca632a812a3883
|
"""
@created_at 2014-06-09
@author Exequiel Fuentes <efulet@gmail.com>
@author Brian Keith <briankeithn@gmail.com>
"""
# Se recomienda seguir los siguientes estandares:
# 1. Para codificacion: PEP 8 - Style Guide for Python Code (http://legacy.python.org/dev/peps/pep-0008/)
# 2. Para documentacion: PEP 257 - Docstring Conventions (http://legacy.python.org/dev/peps/pep-0257/)
import traceback
import sys
import datetime
from lib import *
def check_version():
"""Python v2.7 es requerida por el curso, entonces verificamos la version"""
if sys.version_info[:2] != (2, 7):
raise Exception("Parece que python v2.7 no esta instalado en el sistema")
if __name__ == '__main__':
try:
# Verificar version de python
check_version()
opciones = Opciones()
opts = opciones.parse(sys.argv[1:])
laberinto = Laberinto(opts)
# todo: Que tal agregar una medida de cuantos cuadritos tuvo que recorrer el algoritmo antes de terminar?
# y ademas tener un contador de los cuadritos libres del laberinto, con eso se puede calcular
# el % de cuadrados recorridos respecto al total de cuadrados recorribles, y junto con el tiempo
# seria una metrica bastante interesante creo.
busqueda = None
if opts.bea:
print "Ejecutando Busqueda en Anchura..."
busqueda = BusquedaEnAnchura(laberinto, opts)
elif opts.bep:
print "Ejecutando Busqueda en Profundidad..."
busqueda = BusquedaEnProfundidad(laberinto, opts)
elif opts.bcu:
print "Ejecutando Busqueda Costo Uniforme..."
busqueda = BusquedaCostoUniforme(laberinto, opts)
elif opts.bae:
print "Ejecutando Busqueda A*..."
busqueda = BusquedaAEstrella(laberinto, opts)
else:
print "Ejecutando Busqueda en Anchura..."
busqueda = BusquedaEnAnchura(laberinto, opts)
if not opts.tiempo:
despliegue = Despliegue(laberinto, busqueda, opts)
despliegue.comenzar()
else:
start = datetime.datetime.now()
busqueda.encontrar()
#print busqueda.reconstruir_camino()
print "La ejecucion tomo:", str(datetime.datetime.now() - start)
except Exception, err:
print traceback.format_exc()
finally:
sys.exit()
|
efulet/laberinto
|
laberinto/main.py
|
Python
|
mit
| 2,429
|
[
"Brian"
] |
9b1387a880c479497d2203cc2423a08f41621bde0a9a1fb2e3674ca367b7c4f5
|
#!/bin/python
"""
Test how sex reproduction evolve to 50% male children and 50% female children
Whatever the starting configuration, if there is no extinction,
the equilibrium look like a gaussian curbe:
[0, 0, 0, 0, 0, 8, 55, 148, 298, 433, 448, 338, 197, 59, 16, 0, 0, 0, 0, 0, ]
"""
from generation_manager import GenerationManager
from animal import Animal
from genes.rate_of_male_gene import RateOfMaleGene
from genes.sd_gene import SDGene
from genes.anti_sd_gene import Anti_SDGene
Animal.init_genes_class([RateOfMaleGene])#, SDGene, Anti_SDGene])
generation_manager = GenerationManager(2000)
generation_manager.run()
|
dionisos2/evolve
|
sex_equilibrium.py
|
Python
|
gpl-2.0
| 629
|
[
"Gaussian"
] |
777b0b69a1325d31b4a6ed2e2801ebc1201940dc1ee89a5ace60d5abfe924858
|
"""
Basic tool parameters.
"""
import logging, string, sys, os, os.path
from elementtree.ElementTree import XML, Element
from galaxy import config, datatypes, util
from galaxy.web import form_builder
from galaxy.util.bunch import Bunch
from galaxy.util import string_as_bool, sanitize_param
from sanitize import ToolParameterSanitizer
import validation, dynamic_options
# For BaseURLToolParameter
from galaxy.web import url_for
import galaxy.model
log = logging.getLogger(__name__)
class ToolParameter( object ):
"""
Describes a parameter accepted by a tool. This is just a simple stub at the
moment but in the future should encapsulate more complex parameters (lists
of valid choices, validation logic, ...)
"""
def __init__( self, tool, param, context=None ):
self.tool = tool
self.refresh_on_change = False
self.refresh_on_change_values = []
self.name = param.get("name")
self.type = param.get("type")
self.label = util.xml_text(param, "label")
self.help = util.xml_text(param, "help")
self.sanitizer = param.find( "sanitizer" )
if self.sanitizer is not None:
self.sanitizer = ToolParameterSanitizer.from_element( self.sanitizer )
self.html = "no html set"
self.repeat = param.get("repeat", None)
self.condition = param.get( "condition", None )
self.validators = []
for elem in param.findall("validator"):
self.validators.append( validation.Validator.from_element( self, elem ) )
def get_label( self ):
"""Return user friendly name for the parameter"""
if self.label: return self.label
else: return self.name
def get_html_field( self, trans=None, value=None, other_values={} ):
raise TypeError( "Abstract Method" )
def get_html( self, trans=None, value=None, other_values={}):
"""
Returns the html widget corresponding to the paramter.
Optionally attempt to retain the current value specific by 'value'
"""
return self.get_html_field( trans, value, other_values ).get_html()
def from_html( self, value, trans=None, other_values={} ):
"""
Convert a value from an HTML POST into the parameters prefered value
format.
"""
return value
def get_initial_value( self, trans, context ):
"""
Return the starting value of the parameter
"""
return None
def get_required_enctype( self ):
"""
If this parameter needs the form to have a specific encoding
return it, otherwise return None (indicating compatibility with
any encoding)
"""
return None
def get_dependencies( self ):
"""
Return the names of any other parameters this parameter depends on
"""
return []
def filter_value( self, value, trans=None, other_values={} ):
"""
Parse the value returned by the view into a form usable by the tool OR
raise a ValueError.
"""
return value
def to_string( self, value, app ):
"""Convert a value to a string representation suitable for persisting"""
return str( value )
def to_python( self, value, app ):
"""Convert a value created with to_string back to an object representation"""
return value
def value_to_basic( self, value, app ):
if isinstance( value, RuntimeValue ):
return { "__class__": "RuntimeValue" }
return self.to_string( value, app )
def value_from_basic( self, value, app, ignore_errors=False ):
# HACK: Some things don't deal with unicode well, psycopg problem?
if type( value ) == unicode:
value = str( value )
# Handle Runtime values (valid for any parameter?)
if isinstance( value, dict ) and '__class__' in value and value['__class__'] == "RuntimeValue":
return RuntimeValue()
# Delegate to the 'to_python' method
if ignore_errors:
try:
return self.to_python( value, app )
except:
return value
else:
return self.to_python( value, app )
def value_to_display_text( self, value, app ):
"""
Convert a value to a text representation suitable for displaying to
the user
"""
return value
def to_param_dict_string( self, value, other_values={} ):
value = str( value )
if self.tool is None or self.tool.options.sanitize:
if self.sanitizer:
value = self.sanitizer.sanitize_param( value )
else:
value = sanitize_param( value )
return value
def validate( self, value, history=None ):
for validator in self.validators:
validator.validate( value, history )
@classmethod
def build( cls, tool, param ):
"""Factory method to create parameter of correct type"""
param_type = param.get("type")
if not param_type or param_type not in parameter_types:
raise ValueError( "Unknown tool parameter type '%s'" % param_type )
else:
return parameter_types[param_type]( tool, param )
class TextToolParameter( ToolParameter ):
"""
Parameter that can take on any text value.
>>> p = TextToolParameter( None, XML( '<param name="blah" type="text" size="4" value="default" />' ) )
>>> print p.name
blah
>>> print p.get_html()
<input type="text" name="blah" size="4" value="default">
>>> print p.get_html( value="meh" )
<input type="text" name="blah" size="4" value="meh">
"""
def __init__( self, tool, elem ):
ToolParameter.__init__( self, tool, elem )
self.name = elem.get( 'name' )
self.size = elem.get( 'size' )
self.value = elem.get( 'value' )
self.area = string_as_bool( elem.get( 'area', False ) )
def get_html_field( self, trans=None, value=None, other_values={} ):
if self.area:
return form_builder.TextArea( self.name, self.size, value or self.value )
else:
return form_builder.TextField( self.name, self.size, value or self.value )
def get_initial_value( self, trans, context ):
return self.value
class IntegerToolParameter( TextToolParameter ):
"""
Parameter that takes an integer value.
>>> p = IntegerToolParameter( None, XML( '<param name="blah" type="integer" size="4" value="10" />' ) )
>>> print p.name
blah
>>> print p.get_html()
<input type="text" name="blah" size="4" value="10">
>>> type( p.from_html( "10" ) )
<type 'int'>
>>> type( p.from_html( "bleh" ) )
Traceback (most recent call last):
...
ValueError: An integer is required
"""
def __init__( self, tool, elem ):
TextToolParameter.__init__( self, tool, elem )
if self.value:
try:
int( self.value )
except:
raise ValueError( "An integer is required" )
elif self.value is None:
raise ValueError( "The settings for this field require a 'value' setting and optionally a default value which must be an integer" )
def from_html( self, value, trans=None, other_values={} ):
try:
return int( value )
except:
raise ValueError( "An integer is required" )
def to_python( self, value, app ):
return int( value )
def get_initial_value( self, trans, context ):
if self.value:
return int( self.value )
else:
return 0
class FloatToolParameter( TextToolParameter ):
"""
Parameter that takes a real number value.
>>> p = FloatToolParameter( None, XML( '<param name="blah" type="integer" size="4" value="3.141592" />' ) )
>>> print p.name
blah
>>> print p.get_html()
<input type="text" name="blah" size="4" value="3.141592">
>>> type( p.from_html( "36.1" ) )
<type 'float'>
>>> type( p.from_html( "bleh" ) )
Traceback (most recent call last):
...
ValueError: A real number is required
"""
def __init__( self, tool, elem ):
TextToolParameter.__init__( self, tool, elem )
if self.value:
try:
float( self.value )
except:
raise ValueError( "A real number is required" )
elif self.value is None:
raise ValueError( "The settings for this field require a 'value' setting and optionally a default value which must be a real number" )
def from_html( self, value, trans=None, other_values={} ):
try:
return float( value )
except:
raise ValueError( "A real number is required" )
def to_python( self, value, app ):
return float( value )
def get_initial_value( self, trans, context ):
try:
return float( self.value )
except:
return float( 0 )
class BooleanToolParameter( ToolParameter ):
"""
Parameter that takes one of two values.
>>> p = BooleanToolParameter( None, XML( '<param name="blah" type="boolean" checked="yes" truevalue="bulletproof vests" falsevalue="cellophane chests" />' ) )
>>> print p.name
blah
>>> print p.get_html()
<input type="checkbox" name="blah" value="true" checked><input type="hidden" name="blah" value="true">
>>> print p.from_html( ["true","true"] )
True
>>> print p.to_param_dict_string( True )
bulletproof vests
>>> print p.from_html( ["true"] )
False
>>> print p.to_param_dict_string( False )
cellophane chests
"""
def __init__( self, tool, elem ):
ToolParameter.__init__( self, tool, elem )
self.truevalue = elem.get( 'truevalue', 'true' )
self.falsevalue = elem.get( 'falsevalue', 'false' )
self.name = elem.get( 'name' )
self.checked = string_as_bool( elem.get( 'checked' ) )
def get_html_field( self, trans=None, value=None, other_values={} ):
checked = self.checked
if value:
checked = form_builder.CheckboxField.is_checked( value )
return form_builder.CheckboxField( self.name, checked )
def from_html( self, value, trans=None, other_values={} ):
return form_builder.CheckboxField.is_checked( value )
def to_python( self, value, app ):
return ( value == 'True' )
def get_initial_value( self, trans, context ):
return self.checked
def to_param_dict_string( self, value, other_values={} ):
if value:
return self.truevalue
else:
return self.falsevalue
class FileToolParameter( ToolParameter ):
"""
Parameter that takes an uploaded file as a value.
>>> p = FileToolParameter( None, XML( '<param name="blah" type="file"/>' ) )
>>> print p.name
blah
>>> print p.get_html()
<input type="file" name="blah">
>>> p = FileToolParameter( None, XML( '<param name="blah" type="file" ajax-upload="true"/>' ) )
>>> print p.get_html()
<input type="file" name="blah" galaxy-ajax-upload="true">
"""
def __init__( self, tool, elem ):
"""
Example: C{<param name="bins" type="file" />}
"""
ToolParameter.__init__( self, tool, elem )
self.name = elem.get( 'name' )
self.ajax = string_as_bool( elem.get( 'ajax-upload' ) )
def get_html_field( self, trans=None, value=None, other_values={} ):
return form_builder.FileField( self.name, ajax = self.ajax, value = value )
def from_html( self, value, trans=None, other_values={} ):
# Middleware or proxies may encode files in special ways (TODO: this
# should be pluggable)
if type( value ) == dict:
upload_store = self.tool.app.config.nginx_upload_store
assert upload_store, \
"Request appears to have been processed by nginx_upload_module \
but Galaxy is not configured to recognize it"
# Check that the file is in the right location
local_filename = os.path.abspath( value['path'] )
assert local_filename.startswith( upload_store ), \
"Filename provided by nginx is not in correct directory"
value = dict(
filename = value["name"],
local_filename = local_filename
)
return value
def get_required_enctype( self ):
"""
File upload elements require the multipart/form-data encoding
"""
return "multipart/form-data"
def to_string( self, value, app ):
if value in [ None, '' ]:
return None
elif isinstance( value, unicode ) or isinstance( value, str ):
return value
elif isinstance( value, dict ):
# or should we jsonify?
try:
return value['local_filename']
except:
return None
raise Exception( "FileToolParameter cannot be persisted" )
def to_python( self, value, app ):
if value is None:
return None
elif isinstance( value, unicode ) or isinstance( value, str ):
return value
else:
raise Exception( "FileToolParameter cannot be persisted" )
def get_initial_value( self, trans, context ):
return None
class HiddenToolParameter( ToolParameter ):
"""
Parameter that takes one of two values.
FIXME: This seems hacky, parameters should only describe things the user
might change. It is used for 'initializing' the UCSC proxy tool
>>> p = HiddenToolParameter( None, XML( '<param name="blah" type="hidden" value="wax so rockin"/>' ) )
>>> print p.name
blah
>>> print p.get_html()
<input type="hidden" name="blah" value="wax so rockin">
"""
def __init__( self, tool, elem ):
ToolParameter.__init__( self, tool, elem )
self.name = elem.get( 'name' )
self.value = elem.get( 'value' )
def get_html_field( self, trans=None, value=None, other_values={} ):
return form_builder.HiddenField( self.name, self.value )
def get_initial_value( self, trans, context ):
return self.value
def get_label( self ):
return None
## This is clearly a HACK, parameters should only be used for things the user
## can change, there needs to be a different way to specify this. I'm leaving
## it for now to avoid breaking any tools.
class BaseURLToolParameter( ToolParameter ):
"""
Returns a parameter the contains its value prepended by the
current server base url. Used in all redirects.
"""
def __init__( self, tool, elem ):
ToolParameter.__init__( self, tool, elem )
self.name = elem.get( 'name' )
self.value = elem.get( 'value', '' )
def get_value( self, trans ):
# url = trans.request.base + self.value
url = url_for( self.value, qualified=True )
return url
def get_html_field( self, trans=None, value=None, other_values={} ):
return form_builder.HiddenField( self.name, self.get_value( trans ) )
def get_initial_value( self, trans, context ):
return self.value
def get_label( self ):
# BaseURLToolParameters are ultimately "hidden" parameters
return None
class SelectToolParameter( ToolParameter ):
"""
Parameter that takes on one (or many) or a specific set of values.
>>> p = SelectToolParameter( None, XML(
... '''
... <param name="blah" type="select">
... <option value="x">I am X</option>
... <option value="y" selected="true">I am Y</option>
... <option value="z">I am Z</option>
... </param>
... ''' ) )
>>> print p.name
blah
>>> print p.get_html()
<select name="blah" last_selected_value="y">
<option value="x">I am X</option>
<option value="y" selected>I am Y</option>
<option value="z">I am Z</option>
</select>
>>> print p.get_html( value="z" )
<select name="blah" last_selected_value="z">
<option value="x">I am X</option>
<option value="y">I am Y</option>
<option value="z" selected>I am Z</option>
</select>
>>> print p.filter_value( "y" )
y
>>> p = SelectToolParameter( None, XML(
... '''
... <param name="blah" type="select" multiple="true">
... <option value="x">I am X</option>
... <option value="y" selected="true">I am Y</option>
... <option value="z" selected="true">I am Z</option>
... </param>
... ''' ) )
>>> print p.name
blah
>>> print p.get_html()
<select name="blah" multiple last_selected_value="z">
<option value="x">I am X</option>
<option value="y" selected>I am Y</option>
<option value="z" selected>I am Z</option>
</select>
>>> print p.get_html( value=["x","y"])
<select name="blah" multiple last_selected_value="y">
<option value="x" selected>I am X</option>
<option value="y" selected>I am Y</option>
<option value="z">I am Z</option>
</select>
>>> print p.to_param_dict_string( ["y", "z"] )
y,z
>>> p = SelectToolParameter( None, XML(
... '''
... <param name="blah" type="select" multiple="true" display="checkboxes">
... <option value="x">I am X</option>
... <option value="y" selected="true">I am Y</option>
... <option value="z" selected="true">I am Z</option>
... </param>
... ''' ) )
>>> print p.name
blah
>>> print p.get_html()
<div class="checkUncheckAllPlaceholder" checkbox_name="blah"></div>
<div><input type="checkbox" name="blah" value="x">I am X</div>
<div class="odd_row"><input type="checkbox" name="blah" value="y" checked>I am Y</div>
<div><input type="checkbox" name="blah" value="z" checked>I am Z</div>
>>> print p.get_html( value=["x","y"])
<div class="checkUncheckAllPlaceholder" checkbox_name="blah"></div>
<div><input type="checkbox" name="blah" value="x" checked>I am X</div>
<div class="odd_row"><input type="checkbox" name="blah" value="y" checked>I am Y</div>
<div><input type="checkbox" name="blah" value="z">I am Z</div>
>>> print p.to_param_dict_string( ["y", "z"] )
y,z
"""
def __init__( self, tool, elem, context=None ):
ToolParameter.__init__( self, tool, elem )
self.multiple = string_as_bool( elem.get( 'multiple', False ) )
self.display = elem.get( 'display', None )
self.separator = elem.get( 'separator', ',' )
self.legal_values = set()
# TODO: the <dynamic_options> tag is deprecated and should be replaced with the <options> tag.
self.dynamic_options = elem.get( "dynamic_options", None )
options = elem.find( 'options' )
if options is None:
self.options = None
else:
self.options = dynamic_options.DynamicOptions( options, self )
for validator in self.options.validators:
self.validators.append( validator )
if self.dynamic_options is None and self.options is None:
self.static_options = list()
for index, option in enumerate( elem.findall( "option" ) ):
value = option.get( "value" )
self.legal_values.add( value )
selected = string_as_bool( option.get( "selected", False ) )
self.static_options.append( ( option.text, value, selected ) )
self.is_dynamic = ( ( self.dynamic_options is not None ) or ( self.options is not None ) )
def get_options( self, trans, other_values ):
if self.options:
return self.options.get_options( trans, other_values )
elif self.dynamic_options:
return eval( self.dynamic_options, self.tool.code_namespace, other_values )
else:
return self.static_options
def get_legal_values( self, trans, other_values ):
def _get_UnvalidatedValue_value( value ):
if isinstance( value, UnvalidatedValue ):
return value.value
return value
if self.options:
return map( _get_UnvalidatedValue_value, set( v for _, v, _ in self.options.get_options( trans, other_values ) ) )
elif self.dynamic_options:
return set( v for _, v, _ in eval( self.dynamic_options, self.tool.code_namespace, other_values ) )
else:
return self.legal_values
def get_html_field( self, trans=None, value=None, context={} ):
# Dynamic options are not yet supported in workflow, allow
# specifying the value as text for now.
if self.need_late_validation( trans, context ):
assert isinstance( value, UnvalidatedValue )
value = value.value
if self.multiple:
if value is None:
value = ""
else:
value = "\n".join( value )
return form_builder.TextArea( self.name, value=value )
else:
return form_builder.TextField( self.name, value=(value or "") )
if value is not None:
if not isinstance( value, list ): value = [ value ]
field = form_builder.SelectField( self.name, self.multiple, self.display, self.refresh_on_change, refresh_on_change_values = self.refresh_on_change_values )
options = self.get_options( trans, context )
for text, optval, selected in options:
if isinstance( optval, UnvalidatedValue ):
optval = optval.value
text = "%s (unvalidated)" % text
if value:
selected = ( optval in value )
field.add_option( text, optval, selected )
return field
def from_html( self, value, trans=None, context={} ):
if self.need_late_validation( trans, context ):
if self.multiple:
# While it is generally allowed that a select value can be '',
# we do not allow this to be the case in a dynamically
# generated multiple select list being set in workflow building
# mode we instead treat '' as 'No option Selected' (None)
if value == '':
value = None
else:
value = value.split( "\n" )
return UnvalidatedValue( value )
legal_values = self.get_legal_values( trans, context )
if isinstance( value, list ):
if not(self.repeat):
assert self.multiple, "Multiple values provided but parameter is not expecting multiple values"
rval = []
for v in value:
v = util.restore_text( v )
if v not in legal_values:
raise ValueError( "An invalid option was selected, please verify" )
rval.append( v )
return rval
else:
value = util.restore_text( value )
if value not in legal_values:
raise ValueError( "An invalid option was selected, please verify" )
return value
def to_param_dict_string( self, value, other_values={} ):
if value is None:
return "None"
if isinstance( value, list ):
if not( self.repeat ):
assert self.multiple, "Multiple values provided but parameter is not expecting multiple values"
value = map( str, value )
else:
value = str( value )
if self.tool is None or self.tool.options.sanitize:
if self.sanitizer:
value = self.sanitizer.sanitize_param( value )
else:
value = sanitize_param( value )
if isinstance( value, list ):
value = self.separator.join( value )
return value
def value_to_basic( self, value, app ):
if isinstance( value, UnvalidatedValue ):
return { "__class__": "UnvalidatedValue", "value": value.value }
elif isinstance( value, RuntimeValue ):
# Need to handle runtime value's ourself since delegating to the
# parent method causes the value to be turned into a string, which
# breaks multiple selection
return { "__class__": "RuntimeValue" }
return value
def value_from_basic( self, value, app, ignore_errors=False ):
if isinstance( value, dict ) and value["__class__"] == "UnvalidatedValue":
return UnvalidatedValue( value["value"] )
return super( SelectToolParameter, self ).value_from_basic( value, app )
def need_late_validation( self, trans, context ):
"""
Determine whether we need to wait to validate this parameters value
given the current state. For parameters with static options this is
always false (can always validate immediately). For parameters with
dynamic options, we need to check whether the other parameters which
determine what options are valid have been set. For the old style
dynamic options which do not specify dependencies, this is always true
(must valiate at runtime).
"""
# Option list is statically defined, never need late validation
if not self.is_dynamic:
return False
# Old style dynamic options, no dependency information so there isn't
# a lot we can do: if we're dealing with workflows, have to assume
# late validation no matter what.
if self.dynamic_options is not None:
return ( trans is None or trans.workflow_building_mode )
# If we got this far, we can actually look at the dependencies
# to see if their values will not be available until runtime.
for dep_name in self.get_dependencies():
dep_value = context[ dep_name ]
# Dependency on a dataset that does not yet exist
if isinstance( dep_value, DummyDataset ):
return True
# Dependency on a value that has not been checked
if isinstance( dep_value, UnvalidatedValue ):
return True
# Dependency on a value that does not yet exist
if isinstance( dep_value, RuntimeValue ):
return True
# Dynamic, but all dependenceis are known and have values
return False
def get_initial_value( self, trans, context ):
# More working around dynamic options for workflow
if self.need_late_validation( trans, context ):
# Really the best we can do?
return UnvalidatedValue( None )
options = list( self.get_options( trans, context ) )
value = [ optval for _, optval, selected in options if selected ]
if len( value ) == 0:
if not self.multiple and options:
# Nothing selected, but not a multiple select, with some values,
# so we have to default to something (the HTML form will anyway)
# TODO: deal with optional parameters in a better way
value = options[0][1]
else:
value = None
elif len( value ) == 1:
value = value[0]
return value
def value_to_display_text( self, value, app ):
if isinstance( value, UnvalidatedValue ):
suffix = "\n(value not yet validated)"
value = value.value
else:
suffix = ""
if not isinstance( value, list ):
value = [ value ]
# FIXME: Currently only translating values back to labels if they
# are not dynamic
if self.is_dynamic:
rval = map( str, value )
else:
options = list( self.static_options )
rval = []
for t, v, s in options:
if v in value:
rval.append( t )
return "\n".join( rval ) + suffix
def get_dependencies( self ):
"""
Get the *names* of the other params this param depends on.
"""
if self.options:
return self.options.get_dependency_names()
else:
return []
class GenomeBuildParameter( SelectToolParameter ):
"""
Select list that sets the last used genome build for the current history
as "selected".
>>> # Create a mock transcation with 'hg17' as the current build
>>> from galaxy.util.bunch import Bunch
>>> trans = Bunch( history=Bunch( genome_build='hg17' ), db_builds=util.dbnames )
>>> p = GenomeBuildParameter( None, XML(
... '''
... <param name="blah" type="genomebuild" />
... ''' ) )
>>> print p.name
blah
>>> # hg17 should be selected by default
>>> print p.get_html( trans ) # doctest: +ELLIPSIS
<select name="blah" last_selected_value="hg17">
<option value="?">unspecified (?)</option>
...
<option value="hg18">Human Mar. 2006 (hg18)</option>
<option value="hg17" selected>Human May 2004 (hg17)</option>
...
</select>
>>> # If the user selected something else already, that should be used
>>> # instead
>>> print p.get_html( trans, value='hg18' ) # doctest: +ELLIPSIS
<select name="blah" last_selected_value="hg18">
<option value="?">unspecified (?)</option>
...
<option value="hg18" selected>Human Mar. 2006 (hg18)</option>
<option value="hg17">Human May 2004 (hg17)</option>
...
</select>
>>> print p.filter_value( "hg17" )
hg17
"""
def get_options( self, trans, other_values ):
last_used_build = trans.history.genome_build
for dbkey, build_name in trans.db_builds:
yield build_name, dbkey, ( dbkey == last_used_build )
def get_legal_values( self, trans, other_values ):
return set( dbkey for dbkey, _ in trans.db_builds )
class ColumnListParameter( SelectToolParameter ):
"""
Select list that consists of either the total number of columns or only
those columns that contain numerical values in the associated DataToolParameter.
# TODO: we need better testing here, but not sure how to associate a DatatoolParameter with a ColumnListParameter
# from a twill perspective...
>>> # Mock up a history (not connected to database)
>>> from galaxy.model import History, HistoryDatasetAssociation
>>> from galaxy.util.bunch import Bunch
>>> from galaxy.model.mapping import context as sa_session
>>> hist = History()
>>> sa_session.add( hist )
>>> sa_session.flush()
>>> hist.add_dataset( HistoryDatasetAssociation( id=1, extension='interval', create_dataset=True, sa_session=sa_session ) )
>>> dtp = DataToolParameter( None, XML( '<param name="blah" type="data" format="interval"/>' ) )
>>> print dtp.name
blah
>>> clp = ColumnListParameter ( None, XML( '<param name="numerical_column" type="data_column" data_ref="blah" numerical="true"/>' ) )
>>> print clp.name
numerical_column
"""
def __init__( self, tool, elem ):
SelectToolParameter.__init__( self, tool, elem )
self.tool = tool
self.numerical = string_as_bool( elem.get( "numerical", False ))
self.force_select = string_as_bool( elem.get( "force_select", True ))
self.accept_default = string_as_bool( elem.get( "accept_default", False ))
self.data_ref = elem.get( "data_ref", None )
self.is_dynamic = True
def from_html( self, value, trans=None, context={} ):
"""
Label convention prepends column number with a 'c', but tool uses the integer. This
removes the 'c' when entered into a workflow.
"""
if type( value ) == list:
# We have a multi-select list
new_value = []
for item in value:
if item.startswith( "c" ):
item = item[1:]
new_value.append( item )
value = new_value
elif value and value.startswith( "c" ):
value = value[1:]
return super( ColumnListParameter, self ).from_html( value, trans, context )
def get_column_list( self, trans, other_values ):
"""
Generate a select list containing the columns of the associated
dataset (if found).
"""
column_list = []
# No value indicates a configuration error, the named DataToolParameter
# must preceed this parameter in the config
assert self.data_ref in other_values, "Value for associated DataToolParameter not found"
# Get the value of the associated DataToolParameter (a dataset)
dataset = other_values[ self.data_ref ]
# Check if a dataset is selected
if dataset is None or dataset == '':
# NOTE: Both of these values indicate that no dataset is selected.
# However, 'None' indicates that the dataset is optional
# while '' indicates that it is not. Currently column
# parameters do not work well with optional datasets
return column_list
# Generate options
if not dataset.metadata.columns:
if self.accept_default:
column_list.append( '1' )
return column_list
if not self.force_select:
column_list.append( 'None' )
if self.numerical:
# If numerical was requsted, filter columns based on metadata
for i, col in enumerate( dataset.metadata.column_types ):
if col == 'int' or col == 'float':
column_list.append( str( i + 1 ) )
else:
for i in range(0, dataset.metadata.columns):
column_list.append( str( i + 1 ) )
return column_list
def get_options( self, trans, other_values ):
options = []
column_list = self.get_column_list( trans, other_values )
if len( column_list ) > 0 and not self.force_select:
options.append( ('?', 'None', False) )
for col in column_list:
if col != 'None':
options.append( ( 'c' + col, col, False ) )
return options
def get_legal_values( self, trans, other_values ):
return set( self.get_column_list( trans, other_values ) )
def get_dependencies( self ):
return [ self.data_ref ]
def need_late_validation( self, trans, context ):
if super( ColumnListParameter, self ).need_late_validation( trans, context ):
return True
# Get the selected dataset if selected
dataset = context[ self.data_ref ]
if dataset:
# Check if the dataset does not have the expected metadata for columns
if not dataset.metadata.columns:
# Only allow late validation if the dataset is not yet ready
# (since we have reason to expect the metadata to be ready eventually)
if dataset.is_pending:
return True
# No late validation
return False
class DrillDownSelectToolParameter( SelectToolParameter ):
"""
Parameter that takes on one (or many) of a specific set of values.
Creating a hierarchical select menu, which allows users to 'drill down' a tree-like set of options.
>>> p = DrillDownSelectToolParameter( None, XML(
... '''
... <param name="some_name" type="drill_down" display="checkbox" hierarchy="recurse" multiple="true">
... <options>
... <option name="Heading 1" value="heading1">
... <option name="Option 1" value="option1"/>
... <option name="Option 2" value="option2"/>
... <option name="Heading 1" value="heading1">
... <option name="Option 3" value="option3"/>
... <option name="Option 4" value="option4"/>
... </option>
... </option>
... <option name="Option 5" value="option5"/>
... </options>
... </param>
... ''' ) )
>>> print p.get_html()
<div><ul class="toolParameterExpandableCollapsable">
<li><span class="toolParameterExpandableCollapsable">[+]</span><input type="checkbox" name="some_name" value="heading1"">Heading 1
<ul class="toolParameterExpandableCollapsable" default_state="collapsed">
<li><input type="checkbox" name="some_name" value="option1"">Option 1
</li>
<li><input type="checkbox" name="some_name" value="option2"">Option 2
</li>
<li><span class="toolParameterExpandableCollapsable">[+]</span><input type="checkbox" name="some_name" value="heading1"">Heading 1
<ul class="toolParameterExpandableCollapsable" default_state="collapsed">
<li><input type="checkbox" name="some_name" value="option3"">Option 3
</li>
<li><input type="checkbox" name="some_name" value="option4"">Option 4
</li>
</ul>
</li>
</ul>
</li>
<li><input type="checkbox" name="some_name" value="option5"">Option 5
</li>
</ul></div>
>>> p = DrillDownSelectToolParameter( None, XML(
... '''
... <param name="some_name" type="drill_down" display="radio" hierarchy="recurse" multiple="false">
... <options>
... <option name="Heading 1" value="heading1">
... <option name="Option 1" value="option1"/>
... <option name="Option 2" value="option2"/>
... <option name="Heading 1" value="heading1">
... <option name="Option 3" value="option3"/>
... <option name="Option 4" value="option4"/>
... </option>
... </option>
... <option name="Option 5" value="option5"/>
... </options>
... </param>
... ''' ) )
>>> print p.get_html()
<div><ul class="toolParameterExpandableCollapsable">
<li><span class="toolParameterExpandableCollapsable">[+]</span><input type="radio" name="some_name" value="heading1"">Heading 1
<ul class="toolParameterExpandableCollapsable" default_state="collapsed">
<li><input type="radio" name="some_name" value="option1"">Option 1
</li>
<li><input type="radio" name="some_name" value="option2"">Option 2
</li>
<li><span class="toolParameterExpandableCollapsable">[+]</span><input type="radio" name="some_name" value="heading1"">Heading 1
<ul class="toolParameterExpandableCollapsable" default_state="collapsed">
<li><input type="radio" name="some_name" value="option3"">Option 3
</li>
<li><input type="radio" name="some_name" value="option4"">Option 4
</li>
</ul>
</li>
</ul>
</li>
<li><input type="radio" name="some_name" value="option5"">Option 5
</li>
</ul></div>
>>> print p.options
[{'selected': False, 'name': 'Heading 1', 'value': 'heading1', 'options': [{'selected': False, 'name': 'Option 1', 'value': 'option1', 'options': []}, {'selected': False, 'name': 'Option 2', 'value': 'option2', 'options': []}, {'selected': False, 'name': 'Heading 1', 'value': 'heading1', 'options': [{'selected': False, 'name': 'Option 3', 'value': 'option3', 'options': []}, {'selected': False, 'name': 'Option 4', 'value': 'option4', 'options': []}]}]}, {'selected': False, 'name': 'Option 5', 'value': 'option5', 'options': []}]
"""
def __init__( self, tool, elem, context=None ):
def recurse_option_elems( cur_options, option_elems ):
for option_elem in option_elems:
selected = string_as_bool( option_elem.get( 'selected', False ) )
cur_options.append( { 'name':option_elem.get( 'name' ), 'value': option_elem.get( 'value'), 'options':[], 'selected':selected } )
recurse_option_elems( cur_options[-1]['options'], option_elem.findall( 'option' ) )
ToolParameter.__init__( self, tool, elem )
self.multiple = string_as_bool( elem.get( 'multiple', False ) )
self.display = elem.get( 'display', None )
self.hierarchy = elem.get( 'hierarchy', 'exact' ) #exact or recurse
self.separator = elem.get( 'separator', ',' )
from_file = elem.get( 'from_file', None )
if from_file:
if not os.path.isabs( from_file ):
from_file = os.path.join( tool.app.config.tool_data_path, from_file )
elem = XML( "<root>%s</root>" % open( from_file ).read() )
self.is_dynamic = False
self.dynamic_options = None #backwards compatibility with SelectToolParameter's old dynamic options and late validation
self.options = []
self.filtered = {}
if elem.find( 'filter' ):
self.is_dynamic = True
for filter in elem.findall( 'filter' ):
#currently only filtering by metadata key matching input file is allowed
if filter.get( 'type' ) == 'data_meta':
if filter.get( 'data_ref' ) not in self.filtered:
self.filtered[filter.get( 'data_ref' )] = {}
self.filtered[filter.get( 'data_ref' )][filter.get( 'meta_key' )] = { 'value': filter.get( 'value' ), 'options':[] }
recurse_option_elems( self.filtered[filter.get( 'data_ref' )][filter.get( 'meta_key' )]['options'], filter.find( 'options' ).findall( 'option' ) )
else:
recurse_option_elems( self.options, elem.find( 'options' ).findall( 'option' ) )
def get_options( self, trans=None, value=None, other_values={} ):
if self.is_dynamic:
options = []
for filter_key, filter_value in self.filtered.iteritems():
dataset = other_values[filter_key]
if dataset.__class__.__name__.endswith( "DatasetFilenameWrapper" ): #this is a bad way to check for this, but problems importing class ( due to circular imports? )
dataset = dataset.dataset
if dataset:
for meta_key, meta_dict in filter_value.iteritems():
if dataset.metadata.spec[meta_key].param.to_string( dataset.metadata.get( meta_key ) ) == meta_dict['value']:
options.extend( meta_dict['options'] )
return options
return self.options
def get_legal_values( self, trans, other_values ):
def recurse_options( legal_values, options ):
for option in options:
legal_values.append( option['value'] )
recurse_options( legal_values, option['options'] )
legal_values = []
recurse_options( legal_values, self.get_options( trans=trans, other_values=other_values ) )
return legal_values
def get_html( self, trans=None, value=None, other_values={} ):
"""
Returns the html widget corresponding to the paramter.
Optionally attempt to retain the current value specific by 'value'
"""
return self.get_html_field( trans, value, other_values ).get_html()
def get_html_field( self, trans=None, value=None, other_values={} ):
# Dynamic options are not yet supported in workflow, allow
# specifying the value as text for now.
if self.need_late_validation( trans, other_values ):
if value is not None:
assert isinstance( value, UnvalidatedValue )
value = value.value
if self.multiple:
if value is None:
value = ""
else:
value = "\n".join( value )
return form_builder.TextArea( self.name, value=value )
else:
return form_builder.TextField( self.name, value=(value or "") )
return form_builder.DrillDownField( self.name, self.multiple, self.display, self.refresh_on_change, self.get_options( trans, value, other_values ), value, refresh_on_change_values = self.refresh_on_change_values )
def from_html( self, value, trans=None, other_values={} ):
if self.need_late_validation( trans, other_values ):
if self.multiple:
if value == '': #No option selected
value = None
else:
value = value.split( "\n" )
return UnvalidatedValue( value )
if not value: return None
if not isinstance( value, list ):
value = [value]
if not( self.repeat ) and len( value ) > 1:
assert self.multiple, "Multiple values provided but parameter is not expecting multiple values"
rval = []
for val in value:
if val not in self.get_legal_values( trans, other_values ): raise ValueError( "An invalid option was selected, please verify" )
rval.append( util.restore_text( val ) )
return rval
def to_param_dict_string( self, value, other_values={} ):
def get_options_list( value ):
def get_base_option( value, options ):
for option in options:
if value == option['value']:
return option
rval = get_base_option( value, option['options'] )
if rval: return rval
return None #not found
def recurse_option( option_list, option ):
if not option['options']:
option_list.append( option['value'] )
else:
for opt in option['options']:
recurse_option( option_list, opt )
rval = []
recurse_option( rval, get_base_option( value, self.get_options( other_values = other_values ) ) )
return rval or [value]
if value is None: return "None"
rval = []
if self.hierarchy == "exact":
rval = value
else:
for val in value:
options = get_options_list( val )
rval.extend( options )
if len( rval ) > 1:
if not( self.repeat ):
assert self.multiple, "Multiple values provided but parameter is not expecting multiple values"
rval = self.separator.join( rval )
if self.tool is None or self.tool.options.sanitize:
if self.sanitizer:
rval = self.sanitizer.sanitize_param( rval )
else:
rval = sanitize_param( rval )
return rval
def get_initial_value( self, trans, context ):
def recurse_options( initial_values, options ):
for option in options:
if option['selected']:
initial_values.append( option['value'] )
recurse_options( initial_values, option['options'] )
# More working around dynamic options for workflow
if self.need_late_validation( trans, context ):
# Really the best we can do?
return UnvalidatedValue( None )
initial_values = []
recurse_options( initial_values, self.get_options( trans=trans, other_values=context ) )
return initial_values
def value_to_display_text( self, value, app ):
def get_option_display( value, options ):
for option in options:
if value == option['value']:
return option['name']
rval = get_option_display( value, option['options'] )
if rval: return rval
return None #not found
if isinstance( value, UnvalidatedValue ):
suffix = "\n(value not yet validated)"
value = value.value
else:
suffix = ""
if not value:
value = []
elif not isinstance( value, list ):
value = [ value ]
# FIXME: Currently only translating values back to labels if they
# are not dynamic
if self.is_dynamic:
if value:
if isinstance( value, list ):
rval = value
else:
rval = [ value ]
else:
rval = []
else:
rval = []
for val in value:
rval.append( get_option_display( val, self.options ) or val )
return "\n".join( rval ) + suffix
def get_dependencies( self ):
"""
Get the *names* of the other params this param depends on.
"""
return self.filtered.keys()
class DummyDataset( object ):
pass
class DataToolParameter( ToolParameter ):
# TODO, Nate: Make sure the following unit tests appropriately test the dataset security
# components. Add as many additional tests as necessary.
"""
Parameter that takes on one (or many) or a specific set of values.
TODO: There should be an alternate display that allows single selects to be
displayed as radio buttons and multiple selects as a set of checkboxes
TODO: The following must be fixed to test correctly for the new security_check tag in the DataToolParameter ( the last test below is broken )
Nate's next passs at the dataset security stuff will dramatically alter this anyway.
"""
def __init__( self, tool, elem ):
ToolParameter.__init__( self, tool, elem )
# Add metadata validator
if not string_as_bool( elem.get( 'no_validation', False ) ):
self.validators.append( validation.MetadataValidator() )
# Build tuple of classes for supported data formats
formats = []
self.extensions = elem.get( 'format', 'data' ).split( "," )
for extension in self.extensions:
extension = extension.strip()
if tool is None:
#This occurs for things such as unit tests
import galaxy.datatypes.registry
formats.append( galaxy.datatypes.registry.Registry().get_datatype_by_extension( extension.lower() ).__class__ )
else:
formats.append( tool.app.datatypes_registry.get_datatype_by_extension( extension.lower() ).__class__ )
self.formats = tuple( formats )
self.multiple = string_as_bool( elem.get( 'multiple', False ) )
# Optional DataToolParameters are used in tools like GMAJ and LAJ
self.optional = string_as_bool( elem.get( 'optional', False ) )
# TODO: Enhance dynamic options for DataToolParameters. Currently,
# only the special case key='build' of type='data_meta' is
# a valid filter
options = elem.find( 'options' )
if options is None:
self.options = None
else:
self.options = dynamic_options.DynamicOptions( options, self )
self.is_dynamic = self.options is not None
def get_html_field( self, trans=None, value=None, other_values={} ):
filter_value = None
if self.options:
try:
filter_value = self.options.get_options( trans, other_values )[0][0]
except IndexError:
pass #no valid options
assert trans is not None, "DataToolParameter requires a trans"
history = trans.get_history()
assert history is not None, "DataToolParameter requires a history"
if value is not None:
if type( value ) != list:
value = [ value ]
field = form_builder.SelectField( self.name, self.multiple, None, self.refresh_on_change, refresh_on_change_values = self.refresh_on_change_values )
# CRUCIAL: the dataset_collector function needs to be local to DataToolParameter.get_html_field()
def dataset_collector( hdas, parent_hid ):
user, roles = trans.get_user_and_roles()
for i, hda in enumerate( hdas ):
if len( hda.name ) > 30:
hda_name = '%s..%s' % ( hda.name[:17], hda.name[-11:] )
else:
hda_name = hda.name
if parent_hid is not None:
hid = "%s.%d" % ( parent_hid, i + 1 )
else:
hid = str( hda.hid )
if not hda.dataset.state in [galaxy.model.Dataset.states.ERROR, galaxy.model.Dataset.states.DISCARDED] and \
hda.visible and \
trans.app.security_agent.can_access_dataset( roles, hda.dataset ):
# If we are sending data to an external application, then we need to make sure there are no roles
# associated with the dataset that restrict it's access from "public".
if self.tool and self.tool.tool_type == 'data_destination' and not trans.app.security_agent.dataset_is_public( hda.dataset ):
continue
if self.options and hda.get_dbkey() != filter_value:
continue
if isinstance( hda.datatype, self.formats):
selected = ( value and ( hda in value ) )
field.add_option( "%s: %s" % ( hid, hda_name ), hda.id, selected )
else:
target_ext, converted_dataset = hda.find_conversion_destination( self.formats, converter_safe = self.converter_safe( other_values, trans ) )
if target_ext:
if converted_dataset:
hda = converted_dataset
if not trans.app.security_agent.can_access_dataset( roles, hda.dataset ):
continue
selected = ( value and ( hda in value ) )
field.add_option( "%s: (as %s) %s" % ( hid, target_ext, hda_name ), hda.id, selected )
# Also collect children via association object
dataset_collector( hda.children, hid )
dataset_collector( history.active_datasets, None )
some_data = bool( field.options )
if some_data:
if value is None or len( field.options ) == 1:
# Ensure that the last item is always selected
a, b, c = field.options[-1]
if self.optional:
field.options[-1] = a, b, False
else:
field.options[-1] = a, b, True
if self.optional:
if not value:
field.add_option( "Selection is Optional", 'None', True )
else:
field.add_option( "Selection is Optional", 'None', False )
return field
def get_initial_value( self, trans, context ):
"""
NOTE: This is wasteful since dynamic options and dataset collection
happens twice (here and when generating HTML).
"""
# Can't look at history in workflow mode
if trans.workflow_building_mode:
return DummyDataset()
assert trans is not None, "DataToolParameter requires a trans"
history = trans.get_history()
assert history is not None, "DataToolParameter requires a history"
if self.optional:
return None
most_recent_dataset = [None]
filter_value = None
if self.options:
try:
filter_value = self.options.get_options( trans, context )[0][0]
except IndexError:
pass #no valid options
def dataset_collector( datasets ):
def is_convertable( dataset ):
target_ext, converted_dataset = dataset.find_conversion_destination( self.formats, converter_safe = self.converter_safe( None, trans ) )
if target_ext is not None:
return True
return False
for i, data in enumerate( datasets ):
if data.visible and not data.deleted and data.state not in [data.states.ERROR, data.states.DISCARDED] and ( isinstance( data.datatype, self.formats) or is_convertable( data ) ):
if self.options and data.get_dbkey() != filter_value:
continue
most_recent_dataset[0] = data
# Also collect children via association object
dataset_collector( data.children )
dataset_collector( history.datasets )
most_recent_dataset = most_recent_dataset.pop()
if most_recent_dataset is not None:
return most_recent_dataset
else:
return ''
def from_html( self, value, trans, other_values={} ):
# Can't look at history in workflow mode, skip validation and such,
# although, this should never be called in workflow mode right?
if trans.workflow_building_mode:
return None
if not value:
raise ValueError( "History does not include a dataset of the required format / build" )
if value in [None, "None"]:
return None
if isinstance( value, list ):
return [ trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( v ) for v in value ]
elif isinstance( value, trans.app.model.HistoryDatasetAssociation ):
return value
else:
return trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( value )
def to_string( self, value, app ):
if value is None or isinstance( value, str ):
return value
elif isinstance( value, DummyDataset ):
return None
return value.id
def to_python( self, value, app ):
# Both of these values indicate that no dataset is selected. However, 'None'
# indicates that the dataset is optional, while '' indicates that it is not.
if value is None or value == '' or value == 'None':
return value
return app.model.context.query( app.model.HistoryDatasetAssociation ).get( int( value ) )
def to_param_dict_string( self, value, other_values={} ):
if value is None: return "None"
return value.file_name
def value_to_display_text( self, value, app ):
if value:
return "%s: %s" % ( value.hid, value.name )
else:
return "No dataset"
def get_dependencies( self ):
"""
Get the *names* of the other params this param depends on.
"""
if self.options:
return self.options.get_dependency_names()
else:
return []
def converter_safe( self, other_values, trans ):
if self.tool is None or self.tool.has_multiple_pages or not hasattr( trans, 'workflow_building_mode' ) or trans.workflow_building_mode:
return False
if other_values is None:
return True # we don't know other values, so we can't check, assume ok
converter_safe = [True]
def visitor( prefix, input, value, parent = None ):
if isinstance( input, SelectToolParameter ) and self.name in input.get_dependencies():
if input.is_dynamic and ( input.dynamic_options or ( not input.dynamic_options and not input.options ) or not input.options.converter_safe ):
converter_safe[0] = False #This option does not allow for conversion, i.e. uses contents of dataset file to generate options
self.tool.visit_inputs( other_values, visitor )
return False not in converter_safe
# class RawToolParameter( ToolParameter ):
# """
# Completely nondescript parameter, HTML representation is provided as text
# contents.
#
# >>> p = RawToolParameter( None, XML(
# ... '''
# ... <param name="blah" type="raw">
# ... <![CDATA[<span id="$name">Some random stuff</span>]]>
# ... </param>
# ... ''' ) )
# >>> print p.name
# blah
# >>> print p.get_html().strip()
# <span id="blah">Some random stuff</span>
# """
# def __init__( self, tool, elem ):
# ToolParameter.__init__( self, tool, elem )
# self.template = string.Template( elem.text )
# def get_html( self, prefix="" ):
# context = dict( self.__dict__ )
# context.update( dict( prefix=prefix ) )
# return self.template.substitute( context )
# class HistoryIDParameter( ToolParameter ):
# """
# Parameter that takes a name value, makes history.id available.
#
# FIXME: This is a hack (esp. if hidden params are a hack) but in order to
# have the history accessable at the job level, it is necessary
# I also probably wrote this docstring test thing wrong.
#
# >>> from galaxy.model import History
# >>> from galaxy.util.bunch import Bunch
# >>> hist = History( id=1 )
# >>> p = HistoryIDParameter( None, XML( '<param name="blah" type="history"/>' ) )
# >>> print p.name
# blah
# >>> html_string = '<input type="hidden" name="blah" value="%d">' % hist.id
# >>> assert p.get_html( trans=Bunch( history=hist ) ) == html_string
# """
# def __init__( self, tool, elem ):
# ToolParameter.__init__( self, tool, elem )
# self.name = elem.get('name')
# def get_html( self, trans, value=None, other_values={} ):
# assert trans.history is not None, "HistoryIDParameter requires a history"
# self.html = form_builder.HiddenField( self.name, trans.history.id ).get_html()
# return self.html
parameter_types = dict( text = TextToolParameter,
integer = IntegerToolParameter,
float = FloatToolParameter,
boolean = BooleanToolParameter,
genomebuild = GenomeBuildParameter,
select = SelectToolParameter,
data_column = ColumnListParameter,
hidden = HiddenToolParameter,
baseurl = BaseURLToolParameter,
file = FileToolParameter,
data = DataToolParameter,
drill_down = DrillDownSelectToolParameter )
class UnvalidatedValue( object ):
"""
Wrapper to mark a value that has not been validated
"""
def __init__( self, value ):
self.value = value
def __str__( self ):
return str( self.value )
class RuntimeValue( object ):
"""
Wrapper to note a value that is not yet set, but will be required at
runtime.
"""
pass
|
volpino/Yeps-EURAC
|
lib/galaxy/tools/parameters/basic.py
|
Python
|
mit
| 62,561
|
[
"Galaxy"
] |
7b30e2ac493c2c8ae72e9c784d790c66828f0d831cd366b877dcaf040eebbc9a
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from six import StringIO
from unittest import TestCase, main
from skbio.io import PhylipFormatError
from skbio.io.phylip import _alignment_to_phylip
from skbio import Alignment, DNA, RNA
from skbio.util import get_data_path
class AlignmentWriterTests(TestCase):
def setUp(self):
# ids all same length, seqs longer than 10 chars
dna_3_seqs = Alignment([
DNA('..ACC-GTTGG..', metadata={'id': "d1"}),
DNA('TTACCGGT-GGCC', metadata={'id': "d2"}),
DNA('.-ACC-GTTGC--', metadata={'id': "d3"})])
# id lengths from 0 to 10, with mixes of numbers, characters, and
# spaces. sequence characters are a mix of cases and gap characters.
# sequences are shorter than 10 chars
variable_length_ids = Alignment([
RNA('.-ACGU', metadata={'id': ''}),
RNA('UGCA-.', metadata={'id': 'a'}),
RNA('.ACGU-', metadata={'id': 'bb'}),
RNA('ugca-.', metadata={'id': '1'}, validate=False),
RNA('AaAaAa', metadata={'id': 'abcdefghij'}, validate=False),
RNA('GGGGGG', metadata={'id': 'ab def42ij'})])
# sequences with 20 chars = exactly two chunks of size 10
two_chunks = Alignment([
DNA('..ACC-GTTGG..AATGC.C', metadata={'id': 'foo'}),
DNA('TTACCGGT-GGCCTA-GCAT', metadata={'id': 'bar'})])
# single sequence with more than two chunks
single_seq_long = Alignment([
DNA('..ACC-GTTGG..AATGC.C----', metadata={'id': 'foo'})])
# single sequence with only a single character (minimal writeable
# alignment)
single_seq_short = Alignment([DNA('-', metadata={'id': ''})])
# alignments that can be written in phylip format
self.objs = [dna_3_seqs, variable_length_ids, two_chunks,
single_seq_long, single_seq_short]
self.fps = map(get_data_path,
['phylip_dna_3_seqs', 'phylip_variable_length_ids',
'phylip_two_chunks', 'phylip_single_seq_long',
'phylip_single_seq_short'])
# alignments that cannot be written in phylip format, paired with their
# expected error message regexps
self.invalid_objs = [
# no seqs
(Alignment([]), 'one sequence'),
# no positions
(Alignment([DNA('', metadata={'id': "d1"}),
DNA('', metadata={'id': "d2"})]), 'one position'),
# ids too long
(Alignment([RNA('ACGU', metadata={'id': "foo"}),
RNA('UGCA', metadata={'id': "alongsequenceid"})]),
'10.*alongsequenceid')
]
def test_write(self):
for fp, obj in zip(self.fps, self.objs):
fh = StringIO()
_alignment_to_phylip(obj, fh)
obs = fh.getvalue()
fh.close()
with open(fp, 'U') as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_write_invalid_alignment(self):
for invalid_obj, error_msg_regexp in self.invalid_objs:
fh = StringIO()
with self.assertRaisesRegexp(PhylipFormatError, error_msg_regexp):
_alignment_to_phylip(invalid_obj, fh)
# ensure nothing was written to the file before the error was
# thrown. TODO remove this check when #674 is resolved
obs = fh.getvalue()
fh.close()
self.assertEqual(obs, '')
if __name__ == '__main__':
main()
|
Achuth17/scikit-bio
|
skbio/io/tests/test_phylip.py
|
Python
|
bsd-3-clause
| 3,973
|
[
"scikit-bio"
] |
b076609f49c324cb4ab4164664f8242a163f5aebc3d5ff4162793289c408f5d7
|
# coding: utf-8
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that walk through Course Builder pages."""
__author__ = 'Sean Lip'
import __builtin__
import copy
import cStringIO
import csv
import datetime
import logging
import os
import re
import shutil
import sys
import time
import urllib
import zipfile
import actions
from actions import assert_contains
from actions import assert_contains_all_of
from actions import assert_does_not_contain
from actions import assert_equals
from controllers_review import PeerReviewControllerTest
from controllers_review import PeerReviewDashboardAdminTest
from review_stats import PeerReviewAnalyticsTest
from webtest.app import AppError
import appengine_config
from common import crypto
from common.utils import Namespace
from controllers import lessons
from controllers import sites
from controllers import utils
from controllers.utils import XsrfTokenManager
from models import config
from models import courses
from models import entities
from models import jobs
from models import models
from models import transforms
from models import vfs
from models.courses import Course
import modules.admin.admin
from modules.announcements.announcements import AnnouncementEntity
import modules.oeditor.oeditor
from tools.etl import etl
from tools.etl import etl_lib
from tools.etl import examples
from tools.etl import remote
from google.appengine.api import memcache
from google.appengine.api import namespace_manager
from google.appengine.ext import db
# A number of data files in a test course.
COURSE_FILE_COUNT = 70
# Datastore entities that hold parts of course content. Delay-loaded.
COURSE_CONTENT_ENTITY_FILES = [
'QuestionEntity.json', 'QuestionGroupEntity.json']
# There is an expectation in our tests of automatic import of data/*.csv files,
# which is achieved below by selecting an alternative factory method.
courses.Course.create_new_default_course = (
courses.Course.custom_new_default_course_for_test)
def _add_data_entity(app_context, entity_type, data):
"""Insert new entity into a given namespace."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(app_context.get_namespace_name())
new_object = entity_type()
new_object.data = data
new_object.put()
return new_object
finally:
namespace_manager.set_namespace(old_namespace)
def _assert_identical_data_entity_exists(app_context, test_object):
"""Checks a specific entity exists in a given namespace."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(app_context.get_namespace_name())
entity_class = test_object.__class__
existing_object = entity_class().get(test_object.key())
assert existing_object
assert existing_object.data == test_object.data
assert existing_object.key().id() == test_object.key().id()
finally:
namespace_manager.set_namespace(old_namespace)
class InfrastructureTest(actions.TestBase):
"""Test core infrastructure classes agnostic to specific user roles."""
def test_value_cached_in_one_namespace_invisible_in_another(self):
"""Value cached in one namespace is not visible in another."""
# set value and check it's visible in one namespace
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace('test_memcache_manager_a')
models.MemcacheManager.set('foo', 'bar')
assert 'bar' == models.MemcacheManager.get('foo')
finally:
namespace_manager.set_namespace(old_namespace)
# check same value is not visible in another namespace
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace('test_memcache_manager_b')
assert not models.MemcacheManager.get('foo')
finally:
namespace_manager.set_namespace(old_namespace)
# check same value is not visible in default namespace
assert not models.MemcacheManager.get('foo')
# check same value is not visible in None namespace
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(None)
assert not models.MemcacheManager.get('foo')
finally:
namespace_manager.set_namespace(old_namespace)
# set value and check it's visible in default namespace
models.MemcacheManager.set('foo', 'bar')
assert 'bar' == models.MemcacheManager.get('foo')
# check value is not visible in another namespace
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace('test_memcache_manager_c')
assert not models.MemcacheManager.get('foo')
finally:
namespace_manager.set_namespace(old_namespace)
def test_response_content_type_is_application_json_in_utf_8(self):
response = self.testapp.get(
'/rest/config/item?key=gcb_config_update_interval_sec')
self.assertEqual(
'application/javascript; charset=utf-8',
response.headers['Content-Type'])
def test_xsrf_token_manager(self):
"""Test XSRF token operations."""
# os.environ['AUTH_DOMAIN'] = 'test_domain'
# os.environ['APPLICATION_ID'] = 'test app'
# Issues and verify anonymous user token.
action = 'test-action'
token = utils.XsrfTokenManager.create_xsrf_token(action)
assert '/' in token
assert utils.XsrfTokenManager.is_xsrf_token_valid(token, action)
# Impersonate real user.
os.environ['USER_EMAIL'] = 'test_email'
os.environ['USER_ID'] = 'test_id'
# Issues and verify real user token.
action = 'test-action'
token = utils.XsrfTokenManager.create_xsrf_token(action)
assert '/' in token
assert utils.XsrfTokenManager.is_xsrf_token_valid(token, action)
# Check forged time stamp invalidates token.
parts = token.split('/')
assert len(parts) == 2
forgery = '%s/%s' % (long(parts[0]) + 1000, parts[1])
assert forgery != token
assert not utils.XsrfTokenManager.is_xsrf_token_valid(forgery, action)
# Check token properly expires.
action = 'test-action'
time_in_the_past = long(
time.time() - utils.XsrfTokenManager.XSRF_TOKEN_AGE_SECS)
# pylint: disable=protected-access
old_token = utils.XsrfTokenManager._create_token(
action, time_in_the_past)
assert not utils.XsrfTokenManager.is_xsrf_token_valid(old_token, action)
# Clean up.
# del os.environ['APPLICATION_ID']
# del os.environ['AUTH_DOMAIN']
del os.environ['USER_EMAIL']
del os.environ['USER_ID']
def test_import_course(self):
"""Tests importing one course into another."""
# Setup courses.
sites.setup_courses('course:/a::ns_a, course:/b::ns_b, course:/:/')
# Validate the courses before import.
all_courses = sites.get_all_courses()
dst_app_context_a = all_courses[0]
dst_app_context_b = all_courses[1]
src_app_context = all_courses[2]
dst_course_a = courses.Course(None, app_context=dst_app_context_a)
dst_course_b = courses.Course(None, app_context=dst_app_context_b)
src_course = courses.Course(None, app_context=src_app_context)
assert not dst_course_a.get_units()
assert not dst_course_b.get_units()
assert 12 == len(src_course.get_units())
# Import 1.2 course into 1.3.
errors = []
src_course_out, dst_course_out_a = dst_course_a.import_from(
src_app_context, errors)
if errors:
raise Exception(errors)
assert len(
src_course.get_units()) == len(src_course_out.get_units())
assert len(
src_course_out.get_units()) == len(dst_course_out_a.get_units())
# add dependent entities so we can check they make it through the import
dependents = []
for dependent_entity_class in courses.COURSE_CONTENT_ENTITIES:
dependents.append(_add_data_entity(
dst_course_out_a.app_context,
dependent_entity_class, 'Test "%s"' % str(
dependent_entity_class)))
assert dependents
# Import 1.3 course into 1.3.
errors = []
src_course_out_a, dst_course_out_b = dst_course_b.import_from(
dst_app_context_a, errors)
if errors:
raise Exception(errors)
assert src_course_out_a.get_units() == dst_course_out_b.get_units()
for dependent in dependents:
_assert_identical_data_entity_exists(
dst_course_out_b.app_context, dependent)
# Test delete.
units_to_delete = dst_course_a.get_units()
deleted_count = 0
for unit in units_to_delete:
assert dst_course_a.delete_unit(unit)
deleted_count += 1
dst_course_a.save()
assert deleted_count == len(units_to_delete)
assert not dst_course_a.get_units()
assert not dst_course_a.app_context.fs.list(os.path.join(
dst_course_a.app_context.get_home(), 'assets/js/'))
# Clean up.
sites.reset_courses()
def test_import_13_assessment(self):
# Setup courses.
sites.setup_courses('course:/a::ns_a, course:/b::ns_b, course:/:/')
all_courses = sites.get_all_courses()
src_app_context = all_courses[0]
dst_app_context = all_courses[1]
src_course = courses.Course(None, app_context=src_app_context)
dst_course = courses.Course(None, app_context=dst_app_context)
# Add an assessment
src_assessment = src_course.add_assessment()
self.assertEqual('A', src_assessment.type)
src_assessment.title = 'Test Assessment'
src_assessment.release_date = '2015-01-01 12:15'
src_assessment.now_available = True
src_assessment.properties = {'key': 'value'}
src_assessment.weight = 3.14
src_assessment.html_content = 'content'
src_assessment.html_check_answers = 'check'
src_assessment.html_review_form = 'review'
src_assessment.workflow_yaml = 'a: 3'
src_course.save()
errors = []
dst_course.import_from(src_app_context, errors)
self.assertEqual(0, len(errors))
dst_assessment = dst_course.find_unit_by_id(src_assessment.unit_id)
self.assertEqual(src_assessment.__dict__, dst_assessment.__dict__)
def test_import_13_lesson(self):
# Setup courses.
sites.setup_courses('course:/a::ns_a, course:/b::ns_b, course:/:/')
all_courses = sites.get_all_courses()
src_app_context = all_courses[0]
dst_app_context = all_courses[1]
src_course = courses.Course(None, app_context=src_app_context)
dst_course = courses.Course(None, app_context=dst_app_context)
# Add a unit
src_unit = src_course.add_unit()
src_lesson = src_course.add_lesson(src_unit)
src_lesson.title = 'Test Lesson'
src_lesson.scored = True
src_lesson.objectives = 'objectives'
src_lesson.video = 'video'
src_lesson.notes = 'notes'
src_lesson.duration = 'duration'
src_lesson.now_available = True
src_lesson.has_activity = True
src_lesson.activity_title = 'activity title'
src_lesson.activity_listed = False
src_lesson.properties = {'key': 'value'}
src_course.save()
errors = []
dst_course.import_from(src_app_context, errors)
self.assertEqual(0, len(errors))
dst_unit = dst_course.find_unit_by_id(src_unit.unit_id)
dst_lesson = dst_course.find_lesson_by_id(
dst_unit, src_lesson.lesson_id)
self.assertEqual(src_lesson.__dict__, dst_lesson.__dict__)
def test_create_new_course(self):
"""Tests creating a new course."""
# Setup courses.
sites.setup_courses('course:/test::ns_test, course:/:/')
# Add several units.
course = courses.Course(None, app_context=sites.get_all_courses()[0])
link = course.add_link()
unit = course.add_unit()
assessment = course.add_assessment()
course.save()
assert course.find_unit_by_id(link.unit_id)
assert course.find_unit_by_id(unit.unit_id)
assert course.find_unit_by_id(assessment.unit_id)
assert 3 == len(course.get_units())
assert assessment.unit_id == 3
# Check unit can be found.
assert unit == course.find_unit_by_id(unit.unit_id)
assert not course.find_unit_by_id(999)
# Update unit.
unit.title = 'Unit Title'
unit.labels = 'foo, bar'
course.update_unit(unit)
course.save()
assert 'Unit Title' == course.find_unit_by_id(unit.unit_id).title
assert 'foo, bar' == course.find_unit_by_id(unit.unit_id).labels
# Update link.
link.title = 'Link Title'
link.href = 'http://google.com'
link.labels = 'bar, baz'
course.update_unit(link)
course.save()
assert 'Link Title' == course.find_unit_by_id(link.unit_id).title
assert 'http://google.com' == course.find_unit_by_id(link.unit_id).href
assert 'bar, baz' == course.find_unit_by_id(link.unit_id).labels
# Update assessment.
assessment.title = 'Asmt. Title'
assessment.labels = 'a, b, c'
course.update_unit(assessment)
course.save()
assert 'Asmt. Title' == course.find_unit_by_id(assessment.unit_id).title
assert 'a, b, c' == course.find_unit_by_id(assessment.unit_id).labels
# Update assessment from file.
assessment_content = open(os.path.join(
appengine_config.BUNDLE_ROOT,
'assets/js/assessment-Pre.js'), 'rb').readlines()
assessment_content = u''.join(assessment_content)
errors = []
course.set_assessment_content(assessment, assessment_content, errors)
course.save()
assert not errors
assessment_content_stored = course.app_context.fs.get(os.path.join(
course.app_context.get_home(),
course.get_assessment_filename(assessment.unit_id)))
assert assessment_content == assessment_content_stored
# Test adding lessons.
lesson_a = course.add_lesson(unit)
lesson_b = course.add_lesson(unit)
lesson_c = course.add_lesson(unit)
course.save()
assert [lesson_a, lesson_b, lesson_c] == course.get_lessons(
unit.unit_id)
assert lesson_c.lesson_id == 6
# Reorder lessons.
new_order = [
{'id': link.unit_id},
{
'id': unit.unit_id,
'lessons': [
{'id': lesson_b.lesson_id},
{'id': lesson_a.lesson_id},
{'id': lesson_c.lesson_id}]},
{'id': assessment.unit_id}]
course.reorder_units(new_order)
course.save()
assert [lesson_b, lesson_a, lesson_c] == course.get_lessons(
unit.unit_id)
# Move lesson to another unit.
another_unit = course.add_unit()
course.move_lesson_to(lesson_b, another_unit)
course.save()
assert [lesson_a, lesson_c] == course.get_lessons(unit.unit_id)
assert [lesson_b] == course.get_lessons(another_unit.unit_id)
course.delete_unit(another_unit)
course.save()
# Make the course available.
with actions.OverriddenEnvironment({'course': {'now_available': True}}):
# Test public/private assessment.
assessment_url = (
'/test/' + course.get_assessment_filename(assessment.unit_id))
assert not assessment.now_available
response = self.get(assessment_url, expect_errors=True)
assert_equals(response.status_int, 403)
assessment = course.find_unit_by_id(assessment.unit_id)
assessment.now_available = True
course.update_unit(assessment)
course.save()
response = self.get(assessment_url)
assert_equals(response.status_int, 200)
# Check delayed assessment deletion.
course.delete_unit(assessment)
response = self.get(assessment_url) # note: file is still available
assert_equals(response.status_int, 200)
course.save()
response = self.get(assessment_url, expect_errors=True)
assert_equals(response.status_int, 404)
# Test public/private activity.
lesson_a = course.find_lesson_by_id(None, lesson_a.lesson_id)
lesson_a.now_available = False
lesson_a.has_activity = True
course.update_lesson(lesson_a)
errors = []
course.set_activity_content(lesson_a, u'var activity = []', errors)
assert not errors
activity_url = (
'/test/' + course.get_activity_filename(
None, lesson_a.lesson_id))
response = self.get(activity_url, expect_errors=True)
assert_equals(response.status_int, 403)
lesson_a = course.find_lesson_by_id(None, lesson_a.lesson_id)
lesson_a.now_available = True
course.update_lesson(lesson_a)
course.save()
response = self.get(activity_url)
assert_equals(response.status_int, 200)
# Check delayed activity.
course.delete_lesson(lesson_a)
response = self.get(activity_url) # note: file is still available
assert_equals(response.status_int, 200)
course.save()
response = self.get(activity_url, expect_errors=True)
assert_equals(response.status_int, 404)
# Test deletes removes all child objects.
course.delete_unit(link)
course.delete_unit(unit)
assert not course.delete_unit(assessment)
course.save()
assert not course.get_units()
assert not course.app_context.fs.list(os.path.join(
course.app_context.get_home(), 'assets/js/'))
# Clean up.
sites.reset_courses()
# pylint: disable-msg=too-many-statements
def test_unit_lesson_not_available(self):
"""Tests that unavailable units and lessons behave correctly."""
# Setup a new course.
sites.setup_courses('course:/test::ns_test, course:/:/')
self.base = '/test'
config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] = True
app_context = sites.get_all_courses()[0]
course = courses.Course(None, app_context=app_context)
# Add a unit that is not available.
unit_1 = course.add_unit()
unit_1.now_available = False
lesson_1_1 = course.add_lesson(unit_1)
lesson_1_1.title = 'Lesson 1.1'
course.update_unit(unit_1)
# Add a unit with some lessons available and some lessons not available.
unit_2 = course.add_unit()
unit_2.now_available = True
lesson_2_1 = course.add_lesson(unit_2)
lesson_2_1.title = 'Lesson 2.1'
lesson_2_1.now_available = False
lesson_2_2 = course.add_lesson(unit_2)
lesson_2_2.title = 'Lesson 2.2'
lesson_2_2.now_available = True
course.update_unit(unit_2)
# Add a unit with all lessons not available.
unit_3 = course.add_unit()
unit_3.now_available = True
lesson_3_1 = course.add_lesson(unit_3)
lesson_3_1.title = 'Lesson 3.1'
lesson_3_1.now_available = False
course.update_unit(unit_3)
# Add a unit that is available.
unit_4 = course.add_unit()
unit_4.now_available = True
lesson_4_1 = course.add_lesson(unit_4)
lesson_4_1.title = 'Lesson 4.1'
lesson_4_1.now_available = True
course.update_unit(unit_4)
# Add an available unit with no lessons.
unit_5 = course.add_unit()
unit_5.now_available = True
course.update_unit(unit_5)
course.save()
assert [lesson_1_1] == course.get_lessons(unit_1.unit_id)
assert [lesson_2_1, lesson_2_2] == course.get_lessons(unit_2.unit_id)
assert [lesson_3_1] == course.get_lessons(unit_3.unit_id)
# Make the course available.
with actions.OverriddenEnvironment({
'course': {
'now_available': True,
'browsable': False}}):
private_tag = 'id="lesson-title-private"'
# Confirm private units are suppressed for user out of session
response = self.get('preview')
assert_equals(response.status_int, 200)
assert_does_not_contain('Unit 1 - New Unit', response.body)
assert_contains('Unit 2 - New Unit', response.body)
assert_contains('Unit 3 - New Unit', response.body)
assert_contains('Unit 4 - New Unit', response.body)
assert_contains('Unit 5 - New Unit', response.body)
# Simulate a student traversing the course.
email = 'test_unit_lesson_not_available@example.com'
name = 'Test Unit Lesson Not Available'
actions.login(email, is_admin=False)
actions.register(self, name)
# Accessing a unit that is not available redirects to the main page.
response = self.get('unit?unit=%s' % unit_1.unit_id)
assert_equals(response.status_int, 302)
response = self.get('unit?unit=%s' % unit_2.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 2.1', response.body)
assert_contains('This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('unit?unit=%s&lesson=%s' % (
unit_2.unit_id, lesson_2_2.lesson_id))
assert_equals(response.status_int, 200)
assert_contains('Lesson 2.2', response.body)
assert_does_not_contain(
'This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('unit?unit=%s' % unit_3.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 3.1', response.body)
assert_contains('This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('unit?unit=%s' % unit_4.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 4.1', response.body)
assert_does_not_contain(
'This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('unit?unit=%s' % unit_5.unit_id)
assert_equals(response.status_int, 200)
assert_does_not_contain('Lesson', response.body)
assert_contains('This unit has no content.', response.body)
assert_does_not_contain(private_tag, response.body)
actions.logout()
# Simulate an admin traversing the course.
email = 'test_unit_lesson_not_available@example.com_admin'
name = 'Test Unit Lesson Not Available Admin'
actions.login(email, is_admin=True)
actions.register(self, name)
# The course admin can access a unit that is not available.
response = self.get('unit?unit=%s' % unit_1.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 1.1', response.body)
response = self.get('unit?unit=%s' % unit_2.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 2.1', response.body)
assert_does_not_contain(
'This lesson is not available.', response.body)
assert_contains(private_tag, response.body)
response = self.get('unit?unit=%s&lesson=%s' % (
unit_2.unit_id, lesson_2_2.lesson_id))
assert_equals(response.status_int, 200)
assert_contains('Lesson 2.2', response.body)
assert_does_not_contain(
'This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('unit?unit=%s' % unit_3.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 3.1', response.body)
assert_does_not_contain(
'This lesson is not available.', response.body)
assert_contains(private_tag, response.body)
response = self.get('unit?unit=%s' % unit_4.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 4.1', response.body)
assert_does_not_contain(
'This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('unit?unit=%s' % unit_5.unit_id)
assert_equals(response.status_int, 200)
assert_does_not_contain('Lesson', response.body)
assert_contains('This unit has no content.', response.body)
assert_does_not_contain(private_tag, response.body)
actions.logout()
# pylint: disable-msg=too-many-statements
def test_custom_assessments(self):
"""Tests that custom assessments are evaluated correctly."""
# Setup a new course.
sites.setup_courses('course:/test::ns_test, course:/:/')
self.base = '/test'
self.namespace = 'ns_test'
config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] = True
app_context = sites.get_all_courses()[0]
course = courses.Course(None, app_context=app_context)
email = 'test_assessments@google.com'
name = 'Test Assessments'
assessment_1 = course.add_assessment()
assessment_1.title = 'first'
assessment_1.now_available = True
assessment_1.weight = 0
assessment_2 = course.add_assessment()
assessment_2.title = 'second'
assessment_2.now_available = True
assessment_2.weight = 0
course.save()
assert course.find_unit_by_id(assessment_1.unit_id)
assert course.find_unit_by_id(assessment_2.unit_id)
assert 2 == len(course.get_units())
# Make the course available.
with actions.OverriddenEnvironment({'course': {'now_available': True}}):
first = {'score': '1.00', 'assessment_type': assessment_1.unit_id}
second = {'score': '3.00', 'assessment_type': assessment_2.unit_id}
# Update assessment 1.
assessment_1_content = open(os.path.join(
appengine_config.BUNDLE_ROOT,
'assets/js/assessment-Pre.js'), 'rb').readlines()
assessment_1_content = u''.join(assessment_1_content)
errors = []
course.set_assessment_content(
assessment_1, assessment_1_content, errors)
course.save()
assert not errors
# Update assessment 2.
assessment_2_content = open(os.path.join(
appengine_config.BUNDLE_ROOT,
'assets/js/assessment-Mid.js'), 'rb').readlines()
assessment_2_content = u''.join(assessment_2_content)
errors = []
course.set_assessment_content(
assessment_2, assessment_2_content, errors)
course.save()
assert not errors
# Register.
actions.login(email)
actions.register(self, name)
# Submit assessment 1.
actions.submit_assessment(self, assessment_1.unit_id, first)
student = (
models.StudentProfileDAO.get_enrolled_student_by_email_for(
email, app_context))
student_scores = course.get_all_scores(student)
assert len(student_scores) == 2
assert student_scores[0]['id'] == str(assessment_1.unit_id)
assert student_scores[0]['score'] == 1
assert student_scores[0]['title'] == 'first'
assert student_scores[0]['weight'] == 0
assert student_scores[1]['id'] == str(assessment_2.unit_id)
assert student_scores[1]['score'] == 0
assert student_scores[1]['title'] == 'second'
assert student_scores[1]['weight'] == 0
# The overall score is None if there are no weights assigned to any
# of the assessments.
overall_score = course.get_overall_score(student)
assert overall_score is None
# View the student profile page.
response = self.get('student/home')
assert_does_not_contain('Overall course score', response.body)
# Add a weight to the first assessment.
assessment_1.weight = 10
overall_score = course.get_overall_score(student)
assert overall_score == 1
# Submit assessment 2.
actions.submit_assessment(self, assessment_2.unit_id, second)
# We need to reload the student instance, because its properties
# have changed.
student = (
models.StudentProfileDAO.get_enrolled_student_by_email_for(
email, app_context))
student_scores = course.get_all_scores(student)
assert len(student_scores) == 2
assert student_scores[1]['score'] == 3
overall_score = course.get_overall_score(student)
assert overall_score == 1
# Change the weight of assessment 2.
assessment_2.weight = 30
overall_score = course.get_overall_score(student)
assert overall_score == int((1 * 10 + 3 * 30) / 40)
# Save all changes.
course.save()
# View the student profile page.
response = self.get('student/home')
assert_contains('assessment-score-first">1</span>', response.body)
assert_contains('assessment-score-second">3</span>', response.body)
assert_contains('Overall course score', response.body)
assert_contains('assessment-score-overall">2</span>', response.body)
# Submitting a lower score for any assessment does not change any of
# the scores, since the system records the maximum score that has
# ever been achieved on any assessment.
first_retry = {
'score': '0', 'assessment_type': assessment_1.unit_id}
actions.submit_assessment(self, assessment_1.unit_id, first_retry)
student = (
models.StudentProfileDAO.get_enrolled_student_by_email_for(
email, app_context))
student_scores = course.get_all_scores(student)
assert len(student_scores) == 2
assert student_scores[0]['id'] == str(assessment_1.unit_id)
assert student_scores[0]['score'] == 1
overall_score = course.get_overall_score(student)
assert overall_score == int((1 * 10 + 3 * 30) / 40)
actions.logout()
def test_datastore_backed_file_system(self):
"""Tests datastore-backed file system operations."""
fs = vfs.AbstractFileSystem(vfs.DatastoreBackedFileSystem('', '/'))
# Check binary file.
src = os.path.join(appengine_config.BUNDLE_ROOT, 'course.yaml')
dst = os.path.join('/', 'course.yaml')
fs.put(dst, open(src, 'rb'))
stored = fs.open(dst)
assert stored.metadata.size == len(open(src, 'rb').read())
assert not stored.metadata.is_draft
assert stored.read() == open(src, 'rb').read()
# Check draft.
fs.put(dst, open(src, 'rb'), is_draft=True)
stored = fs.open(dst)
assert stored.metadata.is_draft
# Check text files with non-ASCII characters and encoding.
foo_js = os.path.join('/', 'assets/js/foo.js')
foo_text = u'This is a test text (тест данные).'
fs.put(foo_js, vfs.string_to_stream(foo_text))
stored = fs.open(foo_js)
assert vfs.stream_to_string(stored) == foo_text
# Check delete.
del_file = os.path.join('/', 'memcache.test')
fs.put(del_file, vfs.string_to_stream(u'test'))
assert fs.isfile(del_file)
fs.delete(del_file)
assert not fs.isfile(del_file)
# Check open or delete of non-existent does not fail.
assert not fs.open('/foo/bar/baz')
assert not fs.delete('/foo/bar/baz')
# Check new content fully overrides old (with and without memcache).
test_file = os.path.join('/', 'memcache.test')
fs.put(test_file, vfs.string_to_stream(u'test text'))
stored = fs.open(test_file)
assert u'test text' == vfs.stream_to_string(stored)
fs.delete(test_file)
# Check file existence.
assert not fs.isfile('/foo/bar')
assert fs.isfile('/course.yaml')
assert fs.isfile('/assets/js/foo.js')
# Check file listing.
bar_js = os.path.join('/', 'assets/js/bar.js')
fs.put(bar_js, vfs.string_to_stream(foo_text))
baz_js = os.path.join('/', 'assets/js/baz.js')
fs.put(baz_js, vfs.string_to_stream(foo_text))
assert fs.list('/') == sorted([
u'/course.yaml',
u'/assets/js/foo.js', u'/assets/js/bar.js', u'/assets/js/baz.js'])
assert fs.list('/assets') == sorted([
u'/assets/js/foo.js', u'/assets/js/bar.js', u'/assets/js/baz.js'])
assert not fs.list('/foo/bar')
def test_utf8_datastore(self):
"""Test writing to and reading from datastore using UTF-8 content."""
event = models.EventEntity()
event.source = 'test-source'
event.user_id = 'test-user-id'
event.data = u'Test Data (тест данные)'
event.put()
stored_event = models.EventEntity().get_by_id([event.key().id()])
assert 1 == len(stored_event)
assert event.data == stored_event[0].data
def assert_queriable(self, entity, name, date_type=datetime.datetime):
"""Create some entities and check that single-property queries work."""
for i in range(1, 32):
item = entity(
key_name='%s_%s' % (date_type.__class__.__name__, i))
setattr(item, name, date_type(2012, 1, i))
item.put()
# Descending order.
items = entity.all().order('-%s' % name).fetch(1000)
assert len(items) == 31
assert getattr(items[0], name) == date_type(2012, 1, 31)
# Ascending order.
items = entity.all().order('%s' % name).fetch(1000)
assert len(items) == 31
assert getattr(items[0], name) == date_type(2012, 1, 1)
def test_indexed_properties(self):
"""Test whether entities support specific query types."""
# A 'DateProperty' or 'DateTimeProperty' of each persistent entity must
# be indexed. This is true even if the application doesn't execute any
# queries relying on the index. The index is still critically important
# for managing data, for example, for bulk data download or for
# incremental computations. Using index, the entire table can be
# processed in daily, weekly, etc. chunks and it is easy to query for
# new data. If we did not have an index, chunking would have to be done
# by the primary index, where it is impossible to separate recently
# added/modified rows from the rest of the data. Having this index adds
# to the cost of datastore writes, but we believe it is important to
# have it. Below we check that all persistent date/datetime properties
# are indexed.
self.assert_queriable(
AnnouncementEntity, 'date', date_type=datetime.date)
self.assert_queriable(models.EventEntity, 'recorded_on')
self.assert_queriable(models.Student, 'enrolled_on')
self.assert_queriable(models.StudentAnswersEntity, 'updated_on')
self.assert_queriable(jobs.DurableJobEntity, 'updated_on')
def test_config_visible_from_any_namespace(self):
"""Test that ConfigProperty is visible from any namespace."""
assert (
config.UPDATE_INTERVAL_SEC.value ==
config.UPDATE_INTERVAL_SEC.default_value)
new_value = config.UPDATE_INTERVAL_SEC.default_value + 5
# Add datastore override for known property.
prop = config.ConfigPropertyEntity(
key_name=config.UPDATE_INTERVAL_SEC.name)
prop.value = str(new_value)
prop.is_draft = False
prop.put()
# Check visible from default namespace.
config.Registry.last_update_time = 0
assert config.UPDATE_INTERVAL_SEC.value == new_value
# Check visible from another namespace.
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(
'ns-test_config_visible_from_any_namespace')
config.Registry.last_update_time = 0
assert config.UPDATE_INTERVAL_SEC.value == new_value
finally:
namespace_manager.set_namespace(old_namespace)
class AdminAspectTest(actions.TestBase):
"""Test site from the Admin perspective."""
def test_appstats(self):
"""Checks that appstats is available when enabled."""
email = 'test_appstats@google.com'
# check appstats is disabled by default
actions.login(email, is_admin=True)
response = self.testapp.get('/admin')
assert_equals(response.status_int, 200)
assert_does_not_contain('>Appstats</a>', response.body)
assert_does_not_contain('/admin/stats/', response.body)
# enable and check appstats is now enabled
os.environ['GCB_APPSTATS_ENABLED'] = 'True'
response = self.testapp.get('/admin')
assert_equals(response.status_int, 200)
assert_contains('>Appstats</a>', response.body)
assert_contains('/admin/stats/', response.body)
del os.environ['GCB_APPSTATS_ENABLED']
def test_courses_page_for_multiple_courses(self):
"""Tests /admin page showing multiple courses."""
# Setup courses.
sites.setup_courses('course:/aaa::ns_a, course:/bbb::ns_b, course:/:/')
config.Registry.test_overrides[
models.CAN_USE_MEMCACHE.name] = True
# Validate the courses before import.
all_courses = sites.get_all_courses()
dst_app_context_a = all_courses[0]
dst_app_context_b = all_courses[1]
src_app_context = all_courses[2]
# This test requires a read-write file system. If test runs on read-
# only one, we can't run this test :(
if (not dst_app_context_a.fs.is_read_write() or
not dst_app_context_a.fs.is_read_write()):
return
course_a = courses.Course(None, app_context=dst_app_context_a)
course_b = courses.Course(None, app_context=dst_app_context_b)
unused_course, course_a = course_a.import_from(src_app_context)
unused_course, course_b = course_b.import_from(src_app_context)
# Rename courses.
dst_app_context_a.fs.put(
dst_app_context_a.get_config_filename(),
vfs.string_to_stream(u'course:\n title: \'Course AAA\''))
dst_app_context_b.fs.put(
dst_app_context_b.get_config_filename(),
vfs.string_to_stream(u'course:\n title: \'Course BBB\''))
# Login.
email = 'test_courses_page_for_multiple_courses@google.com'
actions.login(email, is_admin=True)
# Check the course listing page.
response = self.testapp.get('/admin')
assert_contains_all_of([
'Course AAA',
'/aaa/dashboard',
'Course BBB',
'/bbb/dashboard'], response.body)
# Clean up.
sites.reset_courses()
def test_python_console(self):
"""Test access rights to the Python console."""
email = 'test_python_console@google.com'
# The default is that the console should be turned off
self.assertFalse(modules.admin.admin.DIRECT_CODE_EXECUTION_UI_ENABLED)
# Test the console when it is enabled
modules.admin.admin.DIRECT_CODE_EXECUTION_UI_ENABLED = True
# Check normal user has no access.
actions.login(email)
response = self.testapp.get('/admin?action=console')
assert_equals(response.status_int, 302)
response = self.testapp.post('/admin?action=console')
assert_equals(response.status_int, 302)
# Check delegated admin has no access.
os.environ['gcb_admin_user_emails'] = '[%s]' % email
actions.login(email)
response = self.testapp.get('/admin?action=console')
assert_equals(response.status_int, 200)
assert_contains(
'You must be an actual admin user to continue.', response.body)
response = self.testapp.get('/admin?action=console')
assert_equals(response.status_int, 200)
assert_contains(
'You must be an actual admin user to continue.', response.body)
del os.environ['gcb_admin_user_emails']
# Check actual admin has access.
actions.login(email, is_admin=True)
response = self.testapp.get('/admin?action=console')
assert_equals(response.status_int, 200)
response.form.set('code', 'print "foo" + "bar"')
response = self.submit(response.form, response)
assert_contains('foobar', response.body)
# Finally, test that the console is not found when it is disabled
modules.admin.admin.DIRECT_CODE_EXECUTION_UI_ENABLED = False
actions.login(email, is_admin=True)
self.testapp.get('/admin?action=console', status=404)
self.testapp.post('/admin?action=console_run', status=404)
def test_non_admin_has_no_access(self):
"""Test non admin has no access to pages or REST endpoints."""
email = 'test_non_admin_has_no_access@google.com'
actions.login(email)
# Add datastore override.
prop = config.ConfigPropertyEntity(
key_name='gcb_config_update_interval_sec')
prop.value = '5'
prop.is_draft = False
prop.put()
# Check user has no access to specific pages and actions.
response = self.testapp.get('/admin?action=settings')
assert_equals(response.status_int, 302)
response = self.testapp.get(
'/admin?action=config_edit&name=gcb_admin_user_emails')
assert_equals(response.status_int, 302)
response = self.testapp.post(
'/admin?action=config_reset&name=gcb_admin_user_emails')
assert_equals(response.status_int, 302)
# Check user has no rights to GET verb.
response = self.testapp.get(
'/rest/config/item?key=gcb_config_update_interval_sec')
assert_equals(response.status_int, 200)
json_dict = transforms.loads(response.body)
assert json_dict['status'] == 401
assert json_dict['message'] == 'Access denied.'
# Here are the endpoints we want to test: (uri, xsrf_action_name).
endpoints = [
('/rest/config/item', 'config-property-put'),
('/rest/courses/item', 'add-course-put')]
# Check user has no rights to PUT verb.
payload_dict = {}
payload_dict['value'] = '666'
payload_dict['is_draft'] = False
request = {}
request['key'] = 'gcb_config_update_interval_sec'
request['payload'] = transforms.dumps(payload_dict)
for uri, unused_action in endpoints:
response = self.testapp.put(uri + '?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert_contains('"status": 403', response.body)
# Check user still has no rights to PUT verb even if he somehow
# obtained a valid XSRF token.
for uri, action in endpoints:
request['xsrf_token'] = XsrfTokenManager.create_xsrf_token(action)
response = self.testapp.put(uri + '?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
json_dict = transforms.loads(response.body)
assert json_dict['status'] == 401
assert json_dict['message'] == 'Access denied.'
def test_admin_list(self):
"""Test delegation of admin access to another user."""
email = 'test_admin_list@google.com'
actions.login(email)
# Add environment variable override.
os.environ['gcb_admin_user_emails'] = '[%s]' % email
# Add datastore override.
prop = config.ConfigPropertyEntity(
key_name='gcb_config_update_interval_sec')
prop.value = '5'
prop.is_draft = False
prop.put()
# Check user has access now.
response = self.testapp.get('/admin?action=settings')
assert_equals(response.status_int, 200)
# Check overrides are active and have proper management actions.
assert_contains('gcb_admin_user_emails', response.body)
assert_contains('[test_admin_list@google.com]', response.body)
assert_contains(
'/admin?action=config_override&name=gcb_admin_user_emails',
response.body)
assert_contains(
'/admin?action=config_edit&name=gcb_config_update_interval_sec',
response.body)
# Check editor page has proper actions.
response = self.testapp.get(
'/admin?action=config_edit&name=gcb_config_update_interval_sec')
assert_equals(response.status_int, 200)
assert_contains('/admin?action=config_reset', response.body)
assert_contains('name=gcb_config_update_interval_sec', response.body)
# Remove override.
del os.environ['gcb_admin_user_emails']
# Check user has no access.
response = self.testapp.get('/admin?action=settings')
assert_equals(response.status_int, 302)
def test_access_to_admin_pages(self):
"""Test access to admin pages."""
# assert anonymous user has no access
response = self.testapp.get('/admin?action=settings')
assert_equals(response.status_int, 302)
# assert admin user has access
email = 'test_access_to_admin_pages@google.com'
name = 'Test Access to Admin Pages'
actions.login(email, is_admin=True)
actions.register(self, name)
response = self.testapp.get('/admin')
assert_contains('Power Searching with Google', response.body)
assert_contains('All Courses', response.body)
response = self.testapp.get('/admin?action=settings')
assert_contains('gcb_admin_user_emails', response.body)
assert_contains('gcb_config_update_interval_sec', response.body)
assert_contains('All Settings', response.body)
response = self.testapp.get('/admin?action=perf')
assert_contains('gcb-admin-uptime-sec:', response.body)
assert_contains('In-process Performance Counters', response.body)
response = self.testapp.get('/admin?action=deployment')
assert_contains('application_id: testbed-test', response.body)
assert_contains('About the Application', response.body)
actions.unregister(self)
actions.logout()
# assert not-admin user has no access
actions.login(email)
actions.register(self, name)
response = self.testapp.get('/admin?action=settings')
assert_equals(response.status_int, 302)
def test_multiple_courses(self):
"""Test courses admin page with two courses configured."""
sites.setup_courses(
'course:/foo:/foo-data, course:/bar:/bar-data:nsbar')
email = 'test_multiple_courses@google.com'
actions.login(email, is_admin=True)
response = self.testapp.get('/admin')
assert_contains('Course Builder > Admin > Courses', response.body)
assert_contains('Total: 2 item(s)', response.body)
# Check ocurse URL's.
assert_contains('<a href="/foo/dashboard">', response.body)
assert_contains('<a href="/bar/dashboard">', response.body)
# Check content locations.
assert_contains('/foo-data', response.body)
assert_contains('/bar-data', response.body)
# Check namespaces.
assert_contains('gcb-course-foo-data', response.body)
assert_contains('nsbar', response.body)
# Clean up.
sites.reset_courses()
def test_add_course(self):
"""Tests adding a new course entry."""
if not self.supports_editing:
return
email = 'test_add_course@google.com'
actions.login(email, is_admin=True)
# Prepare request data.
payload_dict = {
'name': 'add_new',
'title': u'new course (тест данные)', 'admin_email': 'foo@bar.com'}
request = {}
request['payload'] = transforms.dumps(payload_dict)
request['xsrf_token'] = XsrfTokenManager.create_xsrf_token(
'add-course-put')
# Execute action.
response = self.testapp.put('/rest/courses/item?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
# Check response.
json_dict = transforms.loads(transforms.loads(response.body)['payload'])
assert 'course:/add_new::ns_add_new' == json_dict.get('entry')
# Re-execute action; should fail as this would create a duplicate.
response = self.testapp.put('/rest/courses/item?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert_equals(412, transforms.loads(response.body)['status'])
# Load the course and check its title.
new_app_context = sites.get_all_courses(
'course:/add_new::ns_add_new')[0]
assert_equals(u'new course (тест данные)', new_app_context.get_title())
new_course = courses.Course(None, app_context=new_app_context)
assert not new_course.get_units()
class CourseAuthorAspectTest(actions.TestBase):
"""Tests the site from the Course Author perspective."""
# pylint: disable-msg=too-many-statements
def test_dashboard(self):
"""Test course dashboard."""
email = 'test_dashboard@google.com'
name = 'Test Dashboard'
# Non-admin does't have access.
actions.login(email)
response = self.get('dashboard')
assert_equals(response.status_int, 302)
actions.register(self, name)
assert_equals(response.status_int, 302)
actions.logout()
# Admin has access.
actions.login(email, is_admin=True)
response = self.get('dashboard')
# Verify title does not have link text
assert_contains(
'<title>Course Builder > Power Searching with Google > Dash',
response.body)
# Verify body does have linked breadcrumb trail.
assert_contains(
'Google ><a href="%s"> Dashboard </a>> Outline' %
self.canonicalize('dashboard'),
response.body)
# Tests outline view.
response = self.get('dashboard')
assert_contains('Unit 3 - Advanced techniques', response.body)
assert_contains('data/lesson.csv', response.body)
# Check editability.
if self.supports_editing:
assert_contains('Add Assessment', response.body)
else:
assert_does_not_contain('Add Assessment', response.body)
# Test assets view.
response = self.get('dashboard?action=assets&tab=css')
# Verify title does not have link text
assert_contains(
'<title>Course Builder > Power Searching with Google > Dash',
response.body)
# Verify body does have linked breadcrumb trail.
assert_contains(
'Google ><a href="%s">' % self.canonicalize('dashboard') +
' Dashboard </a>> Assets > CSS',
response.body)
assert_contains('assets/css/main.css', response.body)
response = self.get('dashboard?action=assets&tab=images')
assert_contains('assets/img/Image1.5.png', response.body)
response = self.get('dashboard?action=assets&tab=js')
assert_contains('assets/lib/activity-generic-1.3.js', response.body)
# Test settings view.
response = self.get('dashboard?action=settings')
# Verify title does not have link text
assert_contains(
'<title>Course Builder > Power Searching with Google > Dash',
response.body)
# Verify body does have linked breadcrumb trail.
assert_contains(
'Google ><a href="%s"> Dashboard </a>> Settings' %
self.canonicalize('dashboard'),
response.body)
assert_contains('course.yaml', response.body)
assert_contains(
'title: 'Power Searching with Google'', response.body)
assert_contains('locale: 'en_US'', response.body)
# Check editability.
if self.supports_editing:
assert_contains('create_or_edit_settings', response.body)
else:
assert_does_not_contain('create_or_edit_settings', response.body)
# Tests student statistics view.
response = self.get('dashboard?action=analytics&tab=students')
# Verify title does not have link text
assert_contains(
'<title>Course Builder > Power Searching with Google > Dash',
response.body)
# Verify body does have linked breadcrumb trail.
assert_contains(
'Google ><a href="%s"> ' % self.canonicalize('dashboard') +
'Dashboard </a>> Analytics > Students', response.body)
assert_contains('have not been calculated yet', response.body)
response = response.forms[
'gcb-generate-analytics-data'].submit().follow()
assert len(self.taskq.GetTasks('default')) == 3
response = self.get(response.request.url)
assert_contains('is running', response.body)
self.execute_all_deferred_tasks()
response = self.get(response.request.url)
assert_contains('were last updated at', response.body)
assert_contains('currently enrolled: 1', response.body)
assert_contains('total: 1', response.body)
# Tests assessment statistics.
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(self.namespace)
try:
for i in range(5):
student = models.Student(key_name='key-%s' % i)
student.is_enrolled = True
student.scores = transforms.dumps({'test-assessment': i})
student.put()
finally:
namespace_manager.set_namespace(old_namespace)
response = self.get(response.request.url)
response = response.forms[
'gcb-generate-analytics-data'].submit().follow()
self.execute_all_deferred_tasks()
response = self.get(response.request.url)
assert_contains('currently enrolled: 6', response.body)
assert_contains(
'test-assessment: completed 5, average score 2.0', response.body)
def test_trigger_sample_announcements(self):
"""Test course author can trigger adding sample announcements."""
email = 'test_announcements@google.com'
name = 'Test Announcements'
actions.login(email, is_admin=True)
actions.register(self, name)
response = actions.view_announcements(self)
assert_contains('Example Announcement', response.body)
assert_contains('Welcome to the final class!', response.body)
assert_does_not_contain('No announcements yet.', response.body)
def test_manage_announcements(self):
"""Test course author can manage announcements."""
email = 'test_announcements@google.com'
name = 'Test Announcements'
actions.login(email, is_admin=True)
actions.register(self, name)
# add new
response = actions.view_announcements(self)
add_form = response.forms['gcb-add-announcement']
response = self.submit(add_form)
assert_equals(response.status_int, 302)
# check edit form rendering
response = self.testapp.get(response.location)
assert_equals(response.status_int, 200)
assert_contains('/rest/announcements/item?key=', response.body)
# check added
response = actions.view_announcements(self)
assert_contains('Sample Announcement (Draft)', response.body)
# delete draft
response = actions.view_announcements(self)
delete_form = response.forms['gcb-delete-announcement-1']
response = self.submit(delete_form)
assert_equals(response.status_int, 302)
# check deleted
assert_does_not_contain('Welcome to the final class!', response.body)
def test_announcements_rest(self):
"""Test REST access to announcements."""
email = 'test_announcements_rest@google.com'
name = 'Test Announcements Rest'
actions.login(email, is_admin=True)
actions.register(self, name)
response = actions.view_announcements(self)
assert_does_not_contain('My Test Title', response.body)
# REST GET existing item
items = AnnouncementEntity.all().fetch(1)
for item in items:
response = self.get('rest/announcements/item?key=%s' % item.key())
json_dict = transforms.loads(response.body)
assert json_dict['status'] == 200
assert 'message' in json_dict
assert 'payload' in json_dict
payload_dict = transforms.loads(json_dict['payload'])
assert 'title' in payload_dict
assert 'date' in payload_dict
# REST PUT item
payload_dict['title'] = u'My Test Title Мой заголовок теста'
payload_dict['date'] = '2012/12/31'
payload_dict['is_draft'] = True
payload_dict['send_email'] = False
request = {}
request['key'] = str(item.key())
request['payload'] = transforms.dumps(payload_dict)
# Check XSRF is required.
response = self.put('rest/announcements/item?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert_contains('"status": 403', response.body)
# Check PUT works.
request['xsrf_token'] = json_dict['xsrf_token']
response = self.put('rest/announcements/item?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert_contains('"status": 200', response.body)
# Confirm change is visible on the page.
response = self.get('announcements')
assert_contains(
u'My Test Title Мой заголовок теста (Draft)', response.body)
# REST GET not-existing item
response = self.get('rest/announcements/item?key=not_existent_key')
json_dict = transforms.loads(response.body)
assert json_dict['status'] == 404
class CourseAuthorCourseCreationTest(actions.TestBase):
def test_course_admin_can_create_another_course(self):
admin_email = 'admin@foo.com'
author_email = 'author@foo.com'
actions.login(admin_email, is_admin=True)
actions.simple_add_course('course_one', admin_email, 'Course One')
actions.update_course_config('course_one', {
'course': {'admin_user_emails': author_email}})
# Login without super-admin authority; visit dashboard of course we
# may edit.
actions.login(author_email)
response = self.get('/course_one/dashboard')
response = self.click(response, 'Add Course')
# Ensure that clicking on add-course link does not result in a 302
# to '/', which would happen if we did not have access.
self.assertEquals(200, response.status_int)
self.assertEquals('http://localhost/admin?action=add_course',
response.request.url)
def test_course_admin_does_not_see_courses_he_does_not_administer(self):
admin_email = 'admin@foo.com'
author_email = 'author@foo.com'
actions.login(admin_email, is_admin=True)
actions.simple_add_course('course_one', admin_email, 'Course One')
actions.simple_add_course('course_two', admin_email, 'Course Two')
actions.simple_add_course('course_three', admin_email, 'Course Three')
actions.update_course_config('course_one', {
'course': {'admin_user_emails': author_email}})
actions.update_course_config('course_two', {
'course': {'admin_user_emails': author_email}})
actions.login(author_email)
# Visit course_one's dashboard
response = self.get('/course_one/dashboard')
# Expect to be able to see peer course for which author has admin rights
self.assertIn('Course Two', response.body)
self.assertIn('/course_two', response.body)
# But not peer course for which he does not.
self.assertNotIn('Course Three', response.body)
self.assertNotIn('/course_three', response.body)
class StudentAspectTest(actions.TestBase):
"""Test the site from the Student perspective."""
def test_view_announcements(self):
"""Test student aspect of announcements."""
email = 'test_announcements@google.com'
name = 'Test Announcements'
actions.login(email)
actions.register(self, name)
# Check no announcements yet.
response = actions.view_announcements(self)
assert_does_not_contain('Example Announcement', response.body)
assert_does_not_contain('Welcome to the final class!', response.body)
assert_contains('No announcements yet.', response.body)
actions.logout()
# Login as admin and add announcements.
actions.login('admin@sample.com', is_admin=True)
actions.register(self, 'admin')
response = actions.view_announcements(self)
actions.logout()
# Check we can see non-draft announcements.
actions.login(email)
response = actions.view_announcements(self)
assert_contains('Example Announcement', response.body)
assert_does_not_contain('Welcome to the final class!', response.body)
assert_does_not_contain('No announcements yet.', response.body)
# Check no access to access to draft announcements via REST handler.
items = AnnouncementEntity.all().fetch(1000)
for item in items:
response = self.get('rest/announcements/item?key=%s' % item.key())
if item.is_draft:
json_dict = transforms.loads(response.body)
assert json_dict['status'] == 401
else:
assert_equals(response.status_int, 200)
def test_registration(self):
"""Test student registration."""
email = 'test_registration@example.com'
name1 = 'Test Student'
name2 = 'John Smith'
name3 = u'Pavel Simakov (тест данные)'
actions.login(email)
# Verify registration is present on /course to unregistered student.
response = self.get('course')
self.assertIn('<a href="register">Registration</a>', response.body)
actions.register(self, name1)
actions.check_profile(self, name1)
# Verify registration link is gone once registered.
response = self.get('course')
self.assertNotIn('<a href="register">Registration</a>', response.body)
actions.change_name(self, name2)
actions.unregister(self)
actions.register(self, name3)
actions.check_profile(self, name3)
def test_course_not_available(self):
"""Tests course is only accessible to author when incomplete."""
email = 'test_course_not_available@example.com'
name = 'Test Course Not Available'
actions.login(email)
actions.register(self, name)
# Check preview and static resources are available.
response = self.get('course')
assert_equals(response.status_int, 200)
response = self.get('assets/js/activity-1.3.js')
assert_equals(response.status_int, 200)
# Override course.yaml settings by patching app_context.
with actions.OverriddenEnvironment(
{'course': {'now_available': False}}):
# Check preview and static resources are not available to Student.
response = self.get('course', expect_errors=True)
assert_equals(response.status_int, 404)
response = self.get('assets/js/activity-1.3.js', expect_errors=True)
assert_equals(response.status_int, 404)
# Check preview and static resources are still available to author.
actions.login(email, is_admin=True)
response = self.get('course')
assert_equals(response.status_int, 200)
response = self.get('assets/js/activity-1.3.js')
assert_equals(response.status_int, 200)
def test_registration_closed(self):
"""Test student registration when course is full."""
email = 'test_registration_closed@example.com'
name = 'Test Registration Closed'
with actions.OverriddenEnvironment(
{'reg_form': {'can_register': False}}):
# Try to login and register.
actions.login(email)
try:
actions.register(self, name)
raise actions.ShouldHaveFailedByNow(
'Expected to fail: new registrations should not be allowed '
'when registration is closed.')
except actions.ShouldHaveFailedByNow as e:
raise e
except:
pass
# Verify registration link not present on /course
response = self.get('course')
self.assertNotIn(
'<a href="register">Registration</a>', response.body)
def test_registration_with_additional_fields(self):
"""Registers a new student with customized registration form."""
email = 'test_registration_with_additional_fields@example.com'
name = 'Test Registration with Additional Fields'
zipcode = '94043'
score = '99'
environ = {
'course': {'browsable': False},
'reg_form': {
'additional_registration_fields': (
'\'<!-- reg_form.additional_registration_fields -->'
'<li>'
'<label class="form-label" for="form02"> '
'What is your zipcode?'
'</label><input name="form02" type="text"></li>'
'<li>'
'<label class="form-label" for="form03"> '
'What is your score?'
'</label> <input name="form03" type="text"></li>\'')
}
}
with actions.OverriddenEnvironment(environ):
# Login and register.
actions.login(email)
actions.register_with_additional_fields(self, name, zipcode, score)
# Verify that registration results in capturing additional
# registration questions.
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(self.namespace)
student = models.Student.get_enrolled_student_by_email(email)
# Check that two registration additional fields are populated
# with correct values.
if student.additional_fields:
json_dict = transforms.loads(student.additional_fields)
assert zipcode == json_dict[2][1]
assert score == json_dict[3][1]
# Clean up app_context.
namespace_manager.set_namespace(old_namespace)
def test_permissions(self):
"""Test student permissions, and which pages they can view."""
email = 'test_permissions@example.com'
name = 'Test Permissions'
with actions.OverriddenEnvironment({'course': {'browsable': False}}):
actions.login(email)
actions.register(self, name)
actions.Permissions.assert_enrolled(self)
actions.unregister(self)
actions.Permissions.assert_unenrolled(self)
actions.register(self, name)
actions.Permissions.assert_enrolled(self)
def test_login_and_logout(self):
"""Test if login and logout behave as expected."""
with actions.OverriddenEnvironment({'course': {'browsable': False}}):
email = 'test_login_logout@example.com'
actions.Permissions.assert_logged_out(self)
actions.login(email)
actions.Permissions.assert_unenrolled(self)
actions.logout()
actions.Permissions.assert_logged_out(self)
def test_locale_settings(self):
extra_environ = {
'course': {'locale': 'en_US'},
'extra_locales': [
{'locale': 'el', 'availability': 'available'},
{'locale': 'fr', 'availability': 'unavailable'}]}
with actions.OverriddenEnvironment(extra_environ):
# Visit course home page with no locale settings and see the default
# locale
course_page = self.parse_html_string(self.get('course').body)
self.assertEquals(
'Registration', course_page.find('.//a[@href="register"]').text)
# Visit course home page with accept-language set to an available
# locale
course_page = self.parse_html_string(
self.get('course', headers={'Accept-Language': 'el'}).body)
self.assertEquals(
u'Εγγραφή', course_page.find('.//a[@href="register"]').text)
# Visit course home page with accept-language set to an unavailable
# locale
course_page = self.parse_html_string(
self.get('course', headers={'Accept-Language': 'fr'}).body)
self.assertEquals(
u'Registration',
course_page.find('.//a[@href="register"]').text)
# Locale picker not show for user out of session
locale_select = course_page.find('.//select[@id="locale-select"]')
self.assertIsNone(locale_select)
actions.login('user@place.com')
# Locale picker shown for user in session. Chooser shows only
# available locales.
course_page = self.parse_html_string(self.get('course').body)
locale_options = course_page.findall(
'.//select[@id="locale-select"]/option')
self.assertEqual(2, len(locale_options))
self.assertEquals('en_US', locale_options[0].attrib['value'])
self.assertEquals('el', locale_options[1].attrib['value'])
# Set language prefs using the REST endoint
# A bad XSRF token is rejected
request = {'xsrf_token': '1234'}
response = transforms.loads(self.post(
'rest/locale', {'request': transforms.dumps(request)}).body)
self.assertEquals(403, response['status'])
self.assertIn('Bad XSRF token', response['message'])
xsrf_token = crypto.XsrfTokenManager.create_xsrf_token('locales')
# An unavailable locale is rejected
request = {'xsrf_token': xsrf_token, 'payload': {'selected': 'fr'}}
response = transforms.loads(self.post(
'rest/locale', {'request': transforms.dumps(request)}).body)
self.assertEquals(401, response['status'])
self.assertIn('Bad locale', response['message'])
# An available locale is accepted
request = {'xsrf_token': xsrf_token, 'payload': {'selected': 'el'}}
response = transforms.loads(self.post(
'rest/locale', {'request': transforms.dumps(request)}).body)
self.assertEquals(200, response['status'])
self.assertIn('OK', response['message'])
# After setting locale, visit course homepage and see new locale
course_page = self.parse_html_string(self.get('course').body)
self.assertEquals(
u'Εγγραφή', course_page.find('.//a[@href="register"]').text)
def test_lesson_activity_navigation(self):
"""Test navigation between lesson/activity pages."""
email = 'test_lesson_activity_navigation@example.com'
name = 'Test Lesson Activity Navigation'
actions.login(email)
actions.register(self, name)
response = self.get('unit?unit=1&lesson=1')
assert_does_not_contain('Previous Page', response.body)
assert_contains('Next Page', response.body)
response = self.get('unit?unit=2&lesson=3')
assert_contains('Previous Page', response.body)
assert_contains('Next Page', response.body)
response = self.get('unit?unit=3&lesson=5')
assert_contains('Previous Page', response.body)
assert_does_not_contain('Next Page', response.body)
assert_contains('End', response.body)
def test_show_hide_unit_links_on_sidebar(self):
"""Test display of unit links in side bar."""
email = 'test_show_hide_unit_links_on_sidebar@example.com'
name = 'Test Show/Hide of Unit Links on Side Bar'
actions.login(email)
actions.register(self, name)
text_to_check = [
'unit?unit=1', 'Unit 1 - ',
'unit?unit=3', 'Unit 3 - ',
'assessment?name=Mid', 'Mid-course assessment',
'unit?unit=1&lesson=5', 'Word order matters',
'unit?unit=3&lesson=4', 'OR and quotes'
]
# The default behavior is to show links to other units and lessons.
response = self.get('unit?unit=2')
for item in text_to_check:
assert_contains(item, response.body)
with actions.OverriddenEnvironment(
{'unit': {'show_unit_links_in_leftnav': False}}):
# Check that now we don't have links to other units and lessons.
response = self.get('unit?unit=2')
for item in text_to_check:
assert_does_not_contain(item, response.body)
def test_show_hide_lesson_navigation(self):
"""Test display of lesson navigation buttons."""
email = 'test_show_hide_of_lesson_navigation@example.com'
name = 'Test Show/Hide of Lesson Navigation'
actions.login(email)
actions.register(self, name)
# The default behavior is to show the lesson navigation buttons.
response = self.get('unit?unit=2&lesson=3')
assert_contains('<div class="gcb-prev-button">', response.body)
assert_contains('<div class="gcb-next-button">', response.body)
with actions.OverriddenEnvironment(
{'unit': {'hide_lesson_navigation_buttons': True}}):
# The lesson navigation buttons should now be hidden.
response = self.get('unit?unit=2&lesson=3')
assert_does_not_contain(
'<div class="gcb-prev-button">', response.body)
assert_does_not_contain(
'<div class="gcb-next-button">', response.body)
def test_attempt_activity_event(self):
"""Test activity attempt generates event."""
email = 'test_attempt_activity_event@example.com'
name = 'Test Attempt Activity Event'
actions.login(email)
actions.register(self, name)
# Enable event recording.
config.Registry.test_overrides[
lessons.CAN_PERSIST_ACTIVITY_EVENTS.name] = True
# Prepare event.
request = {}
request['source'] = 'test-source'
request['payload'] = transforms.dumps({'Alice': u'Bob (тест данные)'})
# Check XSRF token is required.
response = self.post('rest/events?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert_contains('"status": 403', response.body)
# Check PUT works.
request['xsrf_token'] = XsrfTokenManager.create_xsrf_token(
'event-post')
response = self.post('rest/events?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert not response.body
# Check event is properly recorded.
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(self.namespace)
try:
events = models.EventEntity.all().fetch(1000)
assert 1 == len(events)
assert_contains(
u'Bob (тест данные)',
transforms.loads(events[0].data)['Alice'])
finally:
namespace_manager.set_namespace(old_namespace)
# Clean up.
config.Registry.test_overrides = {}
def test_two_students_dont_see_each_other_pages(self):
"""Test a user can't see another user pages."""
email1 = 'user1@foo.com'
name1 = 'User 1'
email2 = 'user2@foo.com'
name2 = 'User 2'
# Login as one user and view 'unit' and other pages, which are not
# cached.
actions.login(email1)
actions.register(self, name1)
actions.Permissions.assert_enrolled(self)
response = actions.view_unit(self)
assert_contains(email1, response.body)
actions.logout()
# Login as another user and check that 'unit' and other pages show
# the correct new email.
actions.login(email2)
actions.register(self, name2)
actions.Permissions.assert_enrolled(self)
response = actions.view_unit(self)
assert_contains(email2, response.body)
actions.logout()
def test_xsrf_defence(self):
"""Test defense against XSRF attack."""
email = 'test_xsrf_defence@example.com'
name = 'Test Xsrf Defence'
actions.login(email)
actions.register(self, name)
response = self.get('student/home')
edit_form = actions.get_form_by_action(response, 'student/editstudent')
edit_form.set('name', 'My New Name')
edit_form.set('xsrf_token', 'bad token')
response = edit_form.submit(expect_errors=True)
assert_equals(response.status_int, 403)
def test_autoescaping(self):
"""Test Jinja autoescaping."""
email = 'test_autoescaping@example.com'
name1 = '<script>alert(1);</script>'
name2 = '<script>alert(2);</script>'
actions.login(email)
actions.register(self, name1)
actions.check_profile(self, name1)
actions.change_name(self, name2)
actions.unregister(self)
def test_response_headers(self):
"""Test dynamically-generated responses use proper headers."""
email = 'test_response_headers@example.com'
name = 'Test Response Headers'
actions.login(email)
actions.register(self, name)
response = self.get('student/home')
assert_equals(response.status_int, 200)
assert_contains('must-revalidate', response.headers['Cache-Control'])
assert_contains('no-cache', response.headers['Cache-Control'])
assert_contains('no-cache', response.headers['Pragma'])
assert_contains('Mon, 01 Jan 1990', response.headers['Expires'])
def test_browsability_permissions(self):
"""Tests that the course browsability flag works correctly."""
# By default, courses are browsable.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_contains('<a href="assessment?name=Pre"', response.body)
assert_does_not_contain('progress-notstarted-Pre', response.body)
actions.Permissions.assert_can_browse(self)
with actions.OverriddenEnvironment({'course': {'browsable': False}}):
actions.Permissions.assert_logged_out(self)
# Check course page redirects.
response = self.get('course', expect_errors=True)
assert_equals(response.status_int, 302)
class StudentUnifiedProfileTest(StudentAspectTest):
"""Tests student actions having unified profile enabled."""
def setUp(self): # pylint: disable=g-bad-name
super(StudentUnifiedProfileTest, self).setUp()
config.Registry.test_overrides[
models.CAN_SHARE_STUDENT_PROFILE] = True
def tearDown(self): # pylint: disable=g-bad-name
config.Registry.test_overrides = {}
super(StudentUnifiedProfileTest, self).tearDown()
class StaticHandlerTest(actions.TestBase):
"""Check serving of static resources."""
def test_disabled_modules_has_no_routes(self):
"""Test that disabled modules has no routes."""
assert modules.oeditor.oeditor.custom_module.enabled
assert modules.oeditor.oeditor.custom_module.global_routes
assert modules.oeditor.oeditor.custom_module.namespaced_routes
modules.oeditor.oeditor.custom_module.disable()
try:
assert not modules.oeditor.oeditor.custom_module.enabled
assert not modules.oeditor.oeditor.custom_module.global_routes
assert not modules.oeditor.oeditor.custom_module.namespaced_routes
finally:
modules.oeditor.oeditor.custom_module.enable()
def test_static_files_cache_control(self):
"""Test static/zip handlers use proper Cache-Control headers."""
# Check static handler.
response = self.get('/assets/css/main.css')
assert_equals(response.status_int, 200)
assert_contains('max-age=600', response.headers['Cache-Control'])
assert_contains('public', response.headers['Cache-Control'])
assert_does_not_contain('no-cache', response.headers['Cache-Control'])
# Check zip file handler.
response = self.testapp.get(
'/static/inputex-3.1.0/src/inputex/assets/skins/sam/inputex.css')
assert_equals(response.status_int, 200)
assert_contains('max-age=600', response.headers['Cache-Control'])
assert_contains('public', response.headers['Cache-Control'])
assert_does_not_contain('no-cache', response.headers['Cache-Control'])
class ActivityTest(actions.TestBase):
"""Test for activities."""
def get_activity(self, unit_id, lesson_id, args):
"""Retrieve the activity page for a given unit and lesson id."""
response = self.get('activity?unit=%s&lesson=%s' % (unit_id, lesson_id))
assert_equals(response.status_int, 200)
assert_contains(
'<script src="assets/js/activity-%s.%s.js"></script>' %
(unit_id, lesson_id), response.body)
assert_contains('assets/lib/activity-generic-1.3.js', response.body)
js_response = self.get('assets/lib/activity-generic-1.3.js')
assert_equals(js_response.status_int, 200)
# Extract XSRF token from the page.
match = re.search(r'eventXsrfToken = [\']([^\']+)', response.body)
assert match
xsrf_token = match.group(1)
args['xsrf_token'] = xsrf_token
return response, args
def test_activities(self):
"""Test that activity submissions are handled and recorded correctly."""
email = 'test_activities@google.com'
name = 'Test Activities'
unit_id = 1
lesson_id = 2
activity_submissions = {
'1.2': {
'index': 3,
'type': 'activity-choice',
'value': 3,
'correct': True,
},
}
# Register.
actions.login(email)
actions.register(self, name)
# Enable event recording.
config.Registry.test_overrides[
lessons.CAN_PERSIST_ACTIVITY_EVENTS.name] = True
# Navigate to the course overview page, and check that the unit shows
# no progress yet.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_contains(
u'id="progress-notstarted-%s"' % unit_id, response.body)
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(self.namespace)
try:
response, args = self.get_activity(unit_id, lesson_id, {})
# Check that the current activity shows no progress yet.
assert_contains(
u'id="progress-notstarted-%s-activity"' %
lesson_id, response.body)
# Prepare activity submission event.
args['source'] = 'attempt-activity'
lesson_key = '%s.%s' % (unit_id, lesson_id)
assert lesson_key in activity_submissions
args['payload'] = activity_submissions[lesson_key]
args['payload']['location'] = (
'http://localhost:8080/activity?unit=%s&lesson=%s' %
(unit_id, lesson_id))
args['payload'] = transforms.dumps(args['payload'])
# Submit the request to the backend.
response = self.post('rest/events?%s' % urllib.urlencode(
{'request': transforms.dumps(args)}), {})
assert_equals(response.status_int, 200)
assert not response.body
# Check that the current activity shows partial progress.
response, args = self.get_activity(unit_id, lesson_id, {})
assert_contains(
u'id="progress-inprogress-%s-activity"' %
lesson_id, response.body)
# Navigate to the course overview page and check that the unit shows
# partial progress.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_contains(
u'id="progress-inprogress-%s"' % unit_id, response.body)
finally:
namespace_manager.set_namespace(old_namespace)
# pylint: disable-msg=too-many-statements
def test_progress(self):
"""Test student activity progress in detail, using the sample course."""
class FakeHandler(object):
def __init__(self, app_context):
self.app_context = app_context
course = Course(FakeHandler(sites.get_all_courses()[0]))
tracker = course.get_progress_tracker()
student = models.Student(key_name='key-test-student')
# Initially, all progress entries should be set to zero.
unit_progress = tracker.get_unit_progress(student)
for key in unit_progress:
assert unit_progress[key] == 0
lesson_progress = tracker.get_lesson_progress(student, 1)
for key in lesson_progress:
assert lesson_progress[key] == {
'html': 0, 'activity': 0, 'has_activity': True
}
# The blocks in Lesson 1.2 with activities are blocks 3 and 6.
# Submitting block 3 should trigger an in-progress update.
tracker.put_block_completed(student, 1, 2, 3)
assert tracker.get_unit_progress(student)['1'] == 1
assert tracker.get_lesson_progress(student, 1)[2] == {
'html': 0, 'activity': 1, 'has_activity': True
}
# Submitting block 6 should trigger a completion update for the
# activity, but Lesson 1.2 is still incomplete.
tracker.put_block_completed(student, 1, 2, 6)
assert tracker.get_unit_progress(student)['1'] == 1
assert tracker.get_lesson_progress(student, 1)[2] == {
'html': 0, 'activity': 2, 'has_activity': True
}
# Visiting the HTML page for Lesson 1.2 completes the lesson.
tracker.put_html_accessed(student, 1, 2)
assert tracker.get_unit_progress(student)['1'] == 1
assert tracker.get_lesson_progress(student, 1)[2] == {
'html': 2, 'activity': 2, 'has_activity': True
}
# Test a lesson with no interactive blocks in its activity. It should
# change its status to 'completed' once it is accessed.
tracker.put_activity_accessed(student, 2, 1)
assert tracker.get_unit_progress(student)['2'] == 1
assert tracker.get_lesson_progress(student, 2)[1] == {
'html': 0, 'activity': 2, 'has_activity': True
}
# Test that a lesson without activities (Lesson 1.1) doesn't count.
# Complete lessons 1.3, 1.4, 1.5 and 1.6; unit 1 should then be marked
# as 'completed' even though we have no events associated with
# Lesson 1.1.
tracker.put_html_accessed(student, 1, 1)
tracker.put_html_accessed(student, 1, 3)
tracker.put_html_accessed(student, 1, 4)
tracker.put_html_accessed(student, 1, 5)
tracker.put_html_accessed(student, 1, 6)
tracker.put_activity_completed(student, 1, 3)
tracker.put_activity_completed(student, 1, 4)
tracker.put_activity_completed(student, 1, 5)
assert tracker.get_unit_progress(student)['1'] == 1
tracker.put_activity_completed(student, 1, 6)
assert tracker.get_unit_progress(student)['1'] == 2
# Test that a unit is not completed until all HTML and activity pages
# have been, at least, visited. Unit 6 has 3 lessons; the last one has
# no activity block.
tracker.put_html_accessed(student, 6, 1)
tracker.put_html_accessed(student, 6, 2)
tracker.put_activity_completed(student, 6, 1)
tracker.put_activity_completed(student, 6, 2)
assert tracker.get_unit_progress(student)['6'] == 1
tracker.put_activity_accessed(student, 6, 3)
assert tracker.get_unit_progress(student)['6'] == 1
tracker.put_html_accessed(student, 6, 3)
assert tracker.get_unit_progress(student)['6'] == 2
# Test assessment counters.
pre_id = 'Pre'
tracker.put_assessment_completed(student, pre_id)
progress = tracker.get_or_create_progress(student)
assert tracker.is_assessment_completed(progress, pre_id)
assert tracker.get_assessment_status(progress, pre_id) == 1
tracker.put_assessment_completed(student, pre_id)
progress = tracker.get_or_create_progress(student)
assert tracker.is_assessment_completed(progress, pre_id)
assert tracker.get_assessment_status(progress, pre_id) == 2
tracker.put_assessment_completed(student, pre_id)
progress = tracker.get_or_create_progress(student)
assert tracker.is_assessment_completed(progress, pre_id)
assert tracker.get_assessment_status(progress, pre_id) == 3
# Test that invalid keys do not lead to any updates.
# Invalid assessment id.
fake_id = 'asdf'
tracker.put_assessment_completed(student, fake_id)
progress = tracker.get_or_create_progress(student)
assert not tracker.is_assessment_completed(progress, fake_id)
assert tracker.get_assessment_status(progress, fake_id) is None
# Invalid unit id.
tracker.put_activity_completed(student, fake_id, 1)
progress = tracker.get_or_create_progress(student)
assert tracker.get_activity_status(progress, fake_id, 1) is None
# Invalid lesson id.
fake_numeric_id = 22
tracker.put_activity_completed(student, 1, fake_numeric_id)
progress = tracker.get_or_create_progress(student)
assert tracker.get_activity_status(progress, 1, fake_numeric_id) is None
# Invalid block id.
tracker.put_block_completed(student, 5, 2, fake_numeric_id)
progress = tracker.get_or_create_progress(student)
assert not tracker.is_block_completed(
progress, 5, 2, fake_numeric_id)
class AssessmentTest(actions.TestBase):
"""Test for assessments."""
def test_course_pass(self):
"""Test student passing final exam."""
email = 'test_pass@google.com'
name = 'Test Pass'
post = {'assessment_type': 'Fin', 'score': '100.00'}
# Register.
actions.login(email)
actions.register(self, name)
# Submit answer.
response = actions.submit_assessment(self, 'Fin', post)
assert_equals(response.status_int, 200)
assert_contains('your overall course score of 70%', response.body)
assert_contains('you have passed the course', response.body)
# Check that the result shows up on the profile page.
response = actions.check_profile(self, name)
assert_contains('70', response.body)
assert_contains('100', response.body)
# pylint: disable-msg=too-many-statements
def test_assessments(self):
"""Test assessment scores are properly submitted and summarized."""
course = courses.Course(None, app_context=sites.get_all_courses()[0])
email = 'test_assessments@google.com'
name = 'Test Assessments'
pre_answers = [{'foo': 'bar'}, {'Alice': u'Bob (тест данные)'}]
pre = {
'assessment_type': 'Pre', 'score': '1.00',
'answers': transforms.dumps(pre_answers)}
mid = {'assessment_type': 'Mid', 'score': '2.00'}
fin = {'assessment_type': 'Fin', 'score': '3.00'}
peer = {'assessment_type': 'ReviewAssessmentExample'}
second_mid = {'assessment_type': 'Mid', 'score': '1.00'}
second_fin = {'assessment_type': 'Fin', 'score': '100000'}
# Register.
actions.login(email)
actions.register(self, name)
# Navigate to the course overview page.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_does_not_contain(u'id="progress-completed-Mid', response.body)
assert_contains(u'id="progress-notstarted-Mid', response.body)
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(self.namespace)
try:
student = models.Student.get_enrolled_student_by_email(email)
# Check that four score objects (corresponding to the four sample
# assessments) exist right now, and that they all have zero
# score.
student_scores = course.get_all_scores(student)
assert len(student_scores) == 4
for assessment in student_scores:
assert assessment['score'] == 0
# Submit assessments and check that the score is updated.
actions.submit_assessment(self, 'Pre', pre)
student = models.Student.get_enrolled_student_by_email(email)
student_scores = course.get_all_scores(student)
assert len(student_scores) == 4
for assessment in student_scores:
if assessment['id'] == 'Pre':
assert assessment['score'] > 0
else:
assert assessment['score'] == 0
actions.submit_assessment(self, 'Mid', mid)
student = models.Student.get_enrolled_student_by_email(email)
# Navigate to the course overview page.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_contains(u'id="progress-completed-Pre', response.body)
assert_contains(u'id="progress-completed-Mid', response.body)
assert_contains(u'id="progress-notstarted-Fin', response.body)
# Submit the final assessment.
actions.submit_assessment(self, 'Fin', fin)
student = models.Student.get_enrolled_student_by_email(email)
# Submit the sample peer review assessment.
actions.submit_assessment(self, 'ReviewAssessmentExample', peer)
student_scores = course.get_all_scores(student)
# This assessment is not considered to be completed until enough
# peer reviews have been submitted.
for assessment in student_scores:
if assessment['id'] == 'ReviewAssessmentExample':
assert assessment['human_graded']
assert not assessment['completed']
# Navigate to the course overview page.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_contains(u'id="progress-completed-Fin', response.body)
# Check that the overall-score is non-zero.
assert course.get_overall_score(student)
# Check assessment answers.
answers = transforms.loads(
models.StudentAnswersEntity.get_by_key_name(
student.user_id).data)
assert pre_answers == answers['Pre']
# pylint: disable=g-explicit-bool-comparison
assert [] == answers['Mid']
assert [] == answers['Fin']
# pylint: enable-msg=g-explicit-bool-comparison
# Check that scores are recorded properly.
student = models.Student.get_enrolled_student_by_email(email)
assert int(course.get_score(student, 'Pre')) == 1
assert int(course.get_score(student, 'Mid')) == 2
assert int(course.get_score(student, 'Fin')) == 3
assert (int(course.get_overall_score(student)) ==
int((0.30 * 2) + (0.70 * 3)))
# Try posting a new midcourse exam with a lower score;
# nothing should change.
actions.submit_assessment(self, 'Mid', second_mid)
student = models.Student.get_enrolled_student_by_email(email)
assert int(course.get_score(student, 'Pre')) == 1
assert int(course.get_score(student, 'Mid')) == 2
assert int(course.get_score(student, 'Fin')) == 3
assert (int(course.get_overall_score(student)) ==
int((0.30 * 2) + (0.70 * 3)))
# Now try posting a postcourse exam with a higher score and note
# the changes.
actions.submit_assessment(self, 'Fin', second_fin)
student = models.Student.get_enrolled_student_by_email(email)
assert int(course.get_score(student, 'Pre')) == 1
assert int(course.get_score(student, 'Mid')) == 2
assert int(course.get_score(student, 'Fin')) == 100000
assert (int(course.get_overall_score(student)) ==
int((0.30 * 2) + (0.70 * 100000)))
finally:
namespace_manager.set_namespace(old_namespace)
def remove_dir(dir_name):
"""Delete a directory."""
logging.info('removing folder: %s', dir_name)
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
if os.path.exists(dir_name):
raise Exception('Failed to delete directory: %s' % dir_name)
def clean_dir(dir_name):
"""Clean a directory."""
remove_dir(dir_name)
logging.info('creating folder: %s', dir_name)
os.makedirs(dir_name)
if not os.path.exists(dir_name):
raise Exception('Failed to create directory: %s' % dir_name)
def clone_canonical_course_data(src, dst):
"""Makes a copy of canonical course content."""
clean_dir(dst)
def copytree(name):
shutil.copytree(
os.path.join(src, name),
os.path.join(dst, name))
copytree('assets')
copytree('data')
copytree('views')
shutil.copy(
os.path.join(src, 'course.yaml'),
os.path.join(dst, 'course.yaml'))
# Make all files writable.
for root, unused_dirs, files in os.walk(dst):
for afile in files:
fname = os.path.join(root, afile)
os.chmod(fname, 0o777)
class GeneratedCourse(object):
"""A helper class for a dynamically generated course content."""
@classmethod
def set_data_home(cls, test):
"""All data for this test will be placed here."""
cls.data_home = test.test_tempdir
def __init__(self, ns):
self.path = ns
@property
def namespace(self):
return 'ns%s' % self.path
@property
def title(self):
return u'Power Searching with Google title-%s (тест данные)' % self.path
@property
def unit_title(self):
return u'Interpreting results unit-title-%s (тест данные)' % self.path
@property
def lesson_title(self):
return u'Word order matters lesson-title-%s (тест данные)' % self.path
@property
def head(self):
return '<!-- head-%s -->' % self.path
@property
def css(self):
return '<!-- css-%s -->' % self.path
@property
def home(self):
return os.path.join(self.data_home, 'data-%s' % self.path)
@property
def email(self):
return 'walk_the_course_named_%s@google.com' % self.path
@property
def name(self):
return 'Walk The Course Named %s' % self.path
class MultipleCoursesTestBase(actions.TestBase):
"""Configures several courses for running concurrently."""
def modify_file(self, filename, find, replace):
"""Read, modify and write back the file."""
text = open(filename, 'r').read().decode('utf-8')
# Make sure target text is not in the file.
assert replace not in text
text = text.replace(find, replace)
assert replace in text
open(filename, 'w').write(text.encode('utf-8'))
def modify_canonical_course_data(self, course):
"""Modify canonical content by adding unique bits to it."""
self.modify_file(
os.path.join(course.home, 'course.yaml'),
'title: \'Power Searching with Google\'',
'title: \'%s\'' % course.title)
self.modify_file(
os.path.join(course.home, 'data/unit.csv'),
',Interpreting results,',
',%s,' % course.unit_title)
self.modify_file(
os.path.join(course.home, 'data/lesson.csv'),
',Word order matters,',
',%s,' % course.lesson_title)
self.modify_file(
os.path.join(course.home, 'data/lesson.csv'),
',Interpreting results,',
',%s,' % course.unit_title)
self.modify_file(
os.path.join(course.home, 'views/base.html'),
'<head>',
'<head>\n%s' % course.head)
self.modify_file(
os.path.join(course.home, 'assets/css/main.css'),
'html {',
'%s\nhtml {' % course.css)
def prepare_course_data(self, course):
"""Create unique course content for a course."""
clone_canonical_course_data(self.bundle_root, course.home)
self.modify_canonical_course_data(course)
def setUp(self): # pylint: disable=g-bad-name
"""Configure the test."""
super(MultipleCoursesTestBase, self).setUp()
GeneratedCourse.set_data_home(self)
self.course_a = GeneratedCourse('a')
self.course_b = GeneratedCourse('b')
self.course_ru = GeneratedCourse('ru')
# Override BUNDLE_ROOT.
self.bundle_root = appengine_config.BUNDLE_ROOT
appengine_config.BUNDLE_ROOT = GeneratedCourse.data_home
# Prepare course content.
clean_dir(GeneratedCourse.data_home)
self.prepare_course_data(self.course_a)
self.prepare_course_data(self.course_b)
self.prepare_course_data(self.course_ru)
# Setup one course for I18N.
self.modify_file(
os.path.join(self.course_ru.home, 'course.yaml'),
'locale: \'en_US\'',
'locale: \'ru\'')
# Configure courses.
sites.setup_courses('%s, %s, %s' % (
'course:/courses/a:/data-a:nsa',
'course:/courses/b:/data-b:nsb',
'course:/courses/ru:/data-ru:nsru'))
def tearDown(self): # pylint: disable=g-bad-name
"""Clean up."""
sites.reset_courses()
appengine_config.BUNDLE_ROOT = self.bundle_root
super(MultipleCoursesTestBase, self).tearDown()
def walk_the_course(
self, course, first_time=True, is_admin=False, logout=True):
"""Visit a course as a Student would."""
with actions.OverriddenEnvironment({'course': {'browsable': False}}):
# Check normal user has no access.
actions.login(course.email, is_admin=is_admin)
# Test schedule.
if first_time:
response = self.testapp.get('/courses/%s/preview' % course.path)
else:
response = self.testapp.get('/courses/%s/course' % course.path)
assert_contains(course.title, response.body)
assert_contains(course.unit_title, response.body)
assert_contains(course.head, response.body)
# Tests static resource.
response = self.testapp.get(
'/courses/%s/assets/css/main.css' % course.path)
assert_contains(course.css, response.body)
if first_time:
# Test registration.
response = self.get('/courses/%s/register' % course.path)
assert_contains(course.title, response.body)
assert_contains(course.head, response.body)
register_form = actions.get_form_by_action(response, 'register')
register_form.set('form01', course.name)
register_form.action = '/courses/%s/register' % course.path
response = self.submit(register_form)
assert_equals(response.status_int, 302)
assert_contains(
'course#registration_confirmation', response.headers[
'location'])
# Check lesson page.
response = self.testapp.get(
'/courses/%s/unit?unit=1&lesson=5' % course.path)
assert_contains(course.title, response.body)
assert_contains(course.lesson_title, response.body)
assert_contains(course.head, response.body)
# Check activity page.
response = self.testapp.get(
'/courses/%s/activity?unit=1&lesson=5' % course.path)
assert_contains(course.title, response.body)
assert_contains(course.lesson_title, response.body)
assert_contains(course.head, response.body)
if logout:
actions.logout()
class MultipleCoursesTest(MultipleCoursesTestBase):
"""Test several courses running concurrently."""
def test_courses_are_isolated(self):
"""Test each course serves its own assets, views and data."""
# Pretend students visit courses.
self.walk_the_course(self.course_a)
self.walk_the_course(self.course_b)
self.walk_the_course(self.course_a, first_time=False)
self.walk_the_course(self.course_b, first_time=False)
# Check course namespaced data.
self.validate_course_data(self.course_a)
self.validate_course_data(self.course_b)
# Check default namespace.
assert (
namespace_manager.get_namespace() ==
appengine_config.DEFAULT_NAMESPACE_NAME)
assert not models.Student.all().fetch(1000)
def validate_course_data(self, course):
"""Check course data is valid."""
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(course.namespace)
try:
students = models.Student.all().fetch(1000)
assert len(students) == 1
for student in students:
assert_equals(course.email, student.key().name())
assert_equals(course.name, student.name)
finally:
namespace_manager.set_namespace(old_namespace)
class I18NTest(MultipleCoursesTestBase):
"""Test courses running in different locales and containing I18N content."""
def test_csv_supports_utf8(self):
"""Test UTF-8 content in CSV file is handled correctly."""
title_ru = u'Найди факты быстрее'
csv_file = os.path.join(self.course_ru.home, 'data/unit.csv')
self.modify_file(
csv_file, ',Find facts faster,', ',%s,' % title_ru)
self.modify_file(
os.path.join(self.course_ru.home, 'data/lesson.csv'),
',Find facts faster,', ',%s,' % title_ru)
rows = []
for row in csv.reader(open(csv_file)):
rows.append(row)
assert title_ru == rows[6][3].decode('utf-8')
response = self.get('/courses/%s/course' % self.course_ru.path)
assert_contains(title_ru, response.body)
# Tests student perspective.
self.walk_the_course(self.course_ru, first_time=True)
self.walk_the_course(self.course_ru, first_time=False)
# Test course author dashboard.
self.walk_the_course(
self.course_ru, first_time=False, is_admin=True, logout=False)
def assert_page_contains(page_name, text_array):
dashboard_url = '/courses/%s/dashboard' % self.course_ru.path
response = self.get('%s?action=%s' % (dashboard_url, page_name))
for text in text_array:
assert_contains(text, response.body)
assert_page_contains('', [
title_ru, self.course_ru.unit_title, self.course_ru.lesson_title])
assert_page_contains(
'assets', [self.course_ru.title])
assert_page_contains(
'settings', [
self.course_ru.title,
vfs.AbstractFileSystem.normpath(self.course_ru.home)])
# Clean up.
actions.logout()
def test_i18n(self):
"""Test course is properly internationalized."""
response = self.get('/courses/%s/course' % self.course_ru.path)
assert_contains_all_of(
[u'Войти', u'Расписание', u'Курс'], response.body)
class CourseUrlRewritingTestBase(actions.TestBase):
"""Prepare course for using rewrite rules and '/courses/pswg' base URL."""
def setUp(self): # pylint: disable=g-bad-name
super(CourseUrlRewritingTestBase, self).setUp()
self.base = '/courses/pswg'
self.namespace = 'gcb-courses-pswg-tests-ns'
sites.setup_courses('course:%s:/:%s' % (self.base, self.namespace))
def tearDown(self): # pylint: disable=g-bad-name
sites.reset_courses()
super(CourseUrlRewritingTestBase, self).tearDown()
def canonicalize(self, href, response=None):
"""Canonicalize URL's using either <base> or self.base."""
# Check if already canonicalized.
if href.startswith(
self.base) or utils.ApplicationHandler.is_absolute(href):
pass
else:
# Look for <base> tag in the response to compute the canonical URL.
if response:
return super(CourseUrlRewritingTestBase, self).canonicalize(
href, response)
# Prepend self.base to compute the canonical URL.
if not href.startswith('/'):
href = '/%s' % href
href = '%s%s' % (self.base, href)
self.audit_url(href)
return href
class VirtualFileSystemTestBase(actions.TestBase):
"""Prepares a course running on a virtual local file system."""
def setUp(self): # pylint: disable=g-bad-name
"""Configure the test."""
super(VirtualFileSystemTestBase, self).setUp()
GeneratedCourse.set_data_home(self)
# Override BUNDLE_ROOT.
self.bundle_root = appengine_config.BUNDLE_ROOT
appengine_config.BUNDLE_ROOT = GeneratedCourse.data_home
# Prepare course content.
home_folder = os.path.join(GeneratedCourse.data_home, 'data-v')
clone_canonical_course_data(self.bundle_root, home_folder)
# Configure course.
self.namespace = 'nsv'
sites.setup_courses('course:/:/data-vfs:%s' % self.namespace)
# Modify app_context filesystem to map /data-v to /data-vfs.
def after_create(unused_cls, instance):
# pylint: disable=protected-access
instance._fs = vfs.AbstractFileSystem(
vfs.LocalReadOnlyFileSystem(
os.path.join(GeneratedCourse.data_home, 'data-vfs'),
home_folder))
sites.ApplicationContext.after_create = after_create
def tearDown(self): # pylint: disable=g-bad-name
"""Clean up."""
sites.reset_courses()
appengine_config.BUNDLE_ROOT = self.bundle_root
super(VirtualFileSystemTestBase, self).tearDown()
class DatastoreBackedCourseTest(actions.TestBase):
"""Prepares an empty course running on datastore-backed file system."""
def setUp(self): # pylint: disable=g-bad-name
"""Configure the test."""
super(DatastoreBackedCourseTest, self).setUp()
self.supports_editing = True
self.namespace = 'dsbfs'
sites.setup_courses('course:/::%s' % self.namespace)
all_courses = sites.get_all_courses()
assert len(all_courses) == 1
self.app_context = all_courses[0]
def tearDown(self): # pylint: disable=g-bad-name
"""Clean up."""
sites.reset_courses()
super(DatastoreBackedCourseTest, self).tearDown()
def upload_all_in_dir(self, dir_name, files_added):
"""Uploads all files in a folder to vfs."""
root_dir = os.path.join(appengine_config.BUNDLE_ROOT, dir_name)
for root, unused_dirs, files in os.walk(root_dir):
for afile in files:
filename = os.path.join(root, afile)
self.app_context.fs.put(filename, open(filename, 'rb'))
files_added.append(filename)
def init_course_data(self, upload_files):
"""Uploads required course data files into vfs."""
files_added = []
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(self.namespace)
upload_files(files_added)
# Normalize paths to be identical for Windows and Linux.
files_added_normpath = []
for file_added in files_added:
files_added_normpath.append(
vfs.AbstractFileSystem.normpath(file_added))
assert self.app_context.fs.list(
appengine_config.BUNDLE_ROOT) == sorted(files_added_normpath)
finally:
namespace_manager.set_namespace(old_namespace)
def upload_all_sample_course_files(self, files_added):
"""Uploads all sample course data files into vfs."""
self.upload_all_in_dir('assets', files_added)
self.upload_all_in_dir('views', files_added)
self.upload_all_in_dir('data', files_added)
course_yaml = os.path.join(
appengine_config.BUNDLE_ROOT, 'course.yaml')
self.app_context.fs.put(course_yaml, open(course_yaml, 'rb'))
files_added.append(course_yaml)
class DatastoreBackedCustomCourseTest(DatastoreBackedCourseTest):
"""Prepares a sample course running on datastore-backed file system."""
# pylint: disable-msg=too-many-statements
def test_course_import(self):
"""Test importing of the course."""
# Setup courses.
sites.setup_courses('course:/test::ns_test, course:/:/')
self.namespace = 'ns_test'
self.base = '/test'
config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] = True
# Format import payload and URL.
payload_dict = {}
payload_dict['course'] = 'course:/:/'
request = {}
request['payload'] = transforms.dumps(payload_dict)
import_put_url = (
'rest/course/import?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}))
# Check non-logged user has no rights.
response = self.put(import_put_url, {}, expect_errors=True)
assert_equals(404, response.status_int)
# Login as admin.
email = 'test_course_import@google.com'
name = 'Test Course Import'
actions.login(email, is_admin=True)
# Check course is empty.
response = self.get('dashboard')
assert_equals(200, response.status_int)
assert_does_not_contain('Filter image results by color', response.body)
# Import sample course.
request[
'xsrf_token'] = XsrfTokenManager.create_xsrf_token('import-course')
import_put_url = (
'rest/course/import?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}))
response = self.put(import_put_url, {})
assert_equals(200, response.status_int)
assert_contains('Imported.', response.body)
# Check course is not empty.
response = self.get('dashboard')
assert_contains('Filter image results by color', response.body)
# Check assessment is copied.
response = self.get('assets/js/assessment-21.js')
assert_equals(200, response.status_int)
assert_contains('Humane Society website', response.body)
# Check activity is copied.
response = self.get('assets/js/activity-37.js')
assert_equals(200, response.status_int)
assert_contains('explore ways to keep yourself updated', response.body)
unit_2_title = 'Unit 2 - Interpreting results'
lesson_2_1_title = '2.1 When search results suggest something new'
lesson_2_2_title = '2.2 Thinking more deeply about your search'
# Check units and lessons are indexed correctly.
response = actions.register(self, name)
assert (
'http://localhost'
'/test/course'
'#registration_confirmation' == response.location)
response = self.get('course')
assert_contains(unit_2_title, response.body)
# Unit page.
response = self.get('unit?unit=9')
# A unit title.
assert_contains(
unit_2_title, response.body)
# First child lesson without link.
assert_contains(
lesson_2_1_title, response.body)
# Second child lesson with link.
assert_contains(
lesson_2_2_title, response.body)
# Breadcrumbs.
assert_contains_all_of(
['Unit 2</a></li>', 'Lesson 1</li>'], response.body)
# Unit page.
response = self.get('activity?unit=9&lesson=10')
# A unit title.
assert_contains(
unit_2_title, response.body)
# An activity title.
assert_contains(
'Lesson 2.1 Activity', response.body)
# First child lesson without link.
assert_contains(
lesson_2_1_title, response.body)
# Second child lesson with link.
assert_contains(
lesson_2_2_title, response.body)
# Breadcrumbs.
assert_contains_all_of(
['Unit 2</a></li>', 'Lesson 1</a></li>'], response.body)
# Clean up.
sites.reset_courses()
config.Registry.test_overrides = {}
def test_get_put_file(self):
"""Test that one can put/get file via REST interface."""
self.init_course_data(self.upload_all_sample_course_files)
email = 'test_get_put_file@google.com'
actions.login(email, is_admin=True)
response = self.get('dashboard?action=settings')
# Check course.yaml edit form.
compute_form = response.forms['edit_course_yaml']
response = self.submit(compute_form)
assert_equals(response.status_int, 302)
assert_contains(
'dashboard?action=edit_settings&key=%2Fcourse.yaml',
response.location)
response = self.get(response.location)
assert_contains('rest/files/item?key=%2Fcourse.yaml', response.body)
# Get text file.
response = self.get('rest/files/item?key=%2Fcourse.yaml')
assert_equals(response.status_int, 200)
json_dict = transforms.loads(
transforms.loads(response.body)['payload'])
assert '/course.yaml' == json_dict['key']
assert 'text/utf-8' == json_dict['encoding']
assert (open(os.path.join(
appengine_config.BUNDLE_ROOT, 'course.yaml')).read(
) == json_dict['content'])
def test_empty_course(self):
"""Test course with no assets and the simplest possible course.yaml."""
email = 'test_empty_course@google.com'
actions.login(email, is_admin=True)
# Check minimal course page comes up.
response = self.get('course')
assert_contains('UNTITLED COURSE', response.body)
assert_contains('Registration', response.body)
# Check inheritable files are accessible.
response = self.get('/assets/css/main.css')
assert (open(os.path.join(
appengine_config.BUNDLE_ROOT, 'assets/css/main.css')).read(
) == response.body)
# Check non-inheritable files are not inherited.
response = self.testapp.get(
'/assets/js/activity-1.3.js', expect_errors=True)
assert_equals(response.status_int, 404)
# Login as admin.
email = 'test_empty_course@google.com'
actions.login(email, is_admin=True)
response = self.get('dashboard')
# Add unit.
compute_form = response.forms['add_unit']
response = self.submit(compute_form)
response = self.get('/rest/course/unit?key=1')
assert_equals(response.status_int, 200)
# Add lessons.
response = self.get('dashboard')
compute_form = response.forms['add_lesson']
response = self.submit(compute_form)
response = self.get('/rest/course/lesson?key=2')
assert_equals(response.status_int, 200)
# Add assessment.
response = self.get('dashboard')
compute_form = response.forms['add_assessment']
response = self.submit(compute_form)
response = self.get('/rest/course/assessment?key=3')
assert_equals(response.status_int, 200)
# Add link.
response = self.get('dashboard')
compute_form = response.forms['add_link']
response = self.submit(compute_form)
response = self.get('/rest/course/link?key=4')
assert_equals(response.status_int, 200)
def import_sample_course(self):
"""Imports a sample course."""
# Setup courses.
sites.setup_courses('course:/test::ns_test, course:/:/')
# Import sample course.
dst_app_context = sites.get_all_courses()[0]
src_app_context = sites.get_all_courses()[1]
dst_course = courses.Course(None, app_context=dst_app_context)
errors = []
src_course_out, dst_course_out = dst_course.import_from(
src_app_context, errors)
if errors:
raise Exception(errors)
assert len(
src_course_out.get_units()) == len(dst_course_out.get_units())
dst_course_out.save()
# Clean up.
sites.reset_courses()
def test_imported_course_performance(self):
"""Tests various pages of the imported course."""
self.import_sample_course()
# Install a clone on the '/' so all the tests will treat it as normal
# sample course.
sites.setup_courses('course:/::ns_test')
self.namespace = 'ns_test'
# Enable memcache.
config.Registry.test_overrides[
models.CAN_USE_MEMCACHE.name] = True
with actions.OverriddenEnvironment({
'course': {
'now_available': True,
'browsable': False}}):
def custom_inc(unused_increment=1, context=None):
"""A custom inc() function for cache miss counter."""
self.keys.append(context)
self.count += 1
def assert_cached(url, assert_text, cache_miss_allowed=0):
"""Checks that specific URL supports caching."""
memcache.flush_all()
self.keys = []
self.count = 0
# Expect cache misses first time we load page.
cache_miss_before = self.count
response = self.get(url)
assert_contains(assert_text, response.body)
assert cache_miss_before != self.count
# Expect no cache misses first time we load page.
self.keys = []
cache_miss_before = self.count
response = self.get(url)
assert_contains(assert_text, response.body)
cache_miss_actual = self.count - cache_miss_before
if cache_miss_actual != cache_miss_allowed:
raise Exception(
'Expected %s cache misses, got %s. Keys are:\n%s' % (
cache_miss_allowed, cache_miss_actual,
'\n'.join(self.keys)))
old_inc = models.CACHE_MISS.inc
models.CACHE_MISS.inc = custom_inc
# Walk the site.
email = 'test_units_lessons@google.com'
name = 'Test Units Lessons'
assert_cached('preview', 'Putting it all together')
actions.login(email, is_admin=True)
assert_cached('preview', 'Putting it all together')
actions.register(self, name)
assert_cached(
'unit?unit=9', 'When search results suggest something new')
assert_cached(
'unit?unit=9&lesson=12',
'Understand options for different media')
# Clean up.
models.CACHE_MISS.inc = old_inc
config.Registry.test_overrides = {}
sites.reset_courses()
def test_imported_course(self):
"""Tests various pages of the imported course."""
# TODO(psimakov): Ideally, this test class should run all aspect tests
# and they all should pass. However, the id's in the cloned course
# do not match the id's of source sample course and we fetch pages
# and assert page content using id's. For now, we will check the minimal
# set of pages manually. Later, we have to make it run all known tests.
self.import_sample_course()
# Install a clone on the '/' so all the tests will treat it as normal
# sample course.
sites.setup_courses('course:/::ns_test')
self.namespace = 'ns_test'
email = 'test_units_lessons@google.com'
name = 'Test Units Lessons'
actions.login(email, is_admin=True)
response = self.get('course')
assert_contains('Putting it all together', response.body)
actions.register(self, name)
actions.check_profile(self, name)
actions.view_announcements(self)
# Check unit page without lesson specified.
response = self.get('unit?unit=9')
assert_contains('Interpreting results', response.body)
assert_contains(
'When search results suggest something new', response.body)
# Check unit page with a lessons.
response = self.get('unit?unit=9&lesson=12')
assert_contains('Interpreting results', response.body)
assert_contains(
'Understand options for different media', response.body)
# Check assesment page.
response = self.get('assessment?name=21')
assert_contains(
'<script src="assets/js/assessment-21.js"></script>', response.body)
# Check activity page.
response = self.get('activity?unit=9&lesson=13')
assert_contains(
'<script src="assets/js/activity-13.js"></script>',
response.body)
# Clean up.
sites.reset_courses()
class DatastoreBackedSampleCourseTest(DatastoreBackedCourseTest):
"""Run all existing tests using datastore-backed file system."""
def setUp(self): # pylint: disable=g-bad-name
super(DatastoreBackedSampleCourseTest, self).setUp()
self.init_course_data(self.upload_all_sample_course_files)
class LessonComponentsTest(DatastoreBackedCourseTest):
"""Test operations that make use of components in a lesson body."""
def setUp(self):
"""Set up the dummy course for each test case in this class."""
super(LessonComponentsTest, self).setUp()
self.course = courses.Course(None, app_context=self.app_context)
self.unit = self.course.add_unit()
self.lesson = self.course.add_lesson(self.unit)
self.lesson.objectives = """
<question quid="123" weight="1" instanceid="QN"></question>
random_text
<gcb-youtube videoid="Kdg2drcUjYI" instanceid="VD"></gcb-youtube>
more_random_text
<question-group qgid="456" instanceid="QG"></question-group>
yet_more_random_text
"""
self.lesson.has_activity = False
self.course.update_lesson(self.lesson)
self.course.save()
self.tracker = self.course.get_progress_tracker()
def test_component_discovery(self):
"""Test extraction of components from a lesson body."""
cpt_list = self.course.get_components(
self.unit.unit_id, self.lesson.lesson_id)
assert cpt_list == [
{'instanceid': 'QN', 'quid': '123', 'weight': '1',
'cpt_name': 'question'},
{'instanceid': 'VD', 'cpt_name': 'gcb-youtube',
'videoid': 'Kdg2drcUjYI'},
{'instanceid': 'QG', 'qgid': '456', 'cpt_name': 'question-group'}
]
valid_cpt_ids = self.tracker.get_valid_component_ids(
self.unit.unit_id, self.lesson.lesson_id)
self.assertEqual(set(['QN', 'QG']), set(valid_cpt_ids))
def test_component_progress(self):
"""Test that progress tracking for components is done correctly."""
unit_id = self.unit.unit_id
lesson_id = self.lesson.lesson_id
student = models.Student(key_name='lesson-body-test-student')
assert self.tracker.get_unit_progress(student)[unit_id] == 0
assert self.tracker.get_lesson_progress(
student, unit_id)[lesson_id] == {
'html': 0, 'activity': 0, 'has_activity': False}
# Visiting the lesson page has no effect on progress, since it contains
# trackable components.
self.tracker.put_html_accessed(student, unit_id, lesson_id)
assert self.tracker.get_unit_progress(student)[unit_id] == 0
assert self.tracker.get_lesson_progress(
student, unit_id)[lesson_id] == {
'html': 0, 'activity': 0, 'has_activity': False}
# Marking progress for a non-existent component id has no effect.
self.tracker.put_component_completed(student, unit_id, lesson_id, 'a')
assert self.tracker.get_unit_progress(student)[unit_id] == 0
assert self.tracker.get_lesson_progress(
student, unit_id)[lesson_id] == {
'html': 0, 'activity': 0, 'has_activity': False}
# Marking progress for a non-trackable component id has no effect.
self.tracker.put_component_completed(student, unit_id, lesson_id, 'VD')
assert self.tracker.get_unit_progress(student)[unit_id] == 0
assert self.tracker.get_lesson_progress(
student, unit_id)[lesson_id] == {
'html': 0, 'activity': 0, 'has_activity': False}
# Completing a trackable component marks the lesson as in-progress,
self.tracker.put_component_completed(student, unit_id, lesson_id, 'QN')
assert self.tracker.get_unit_progress(student)[unit_id] == 1
assert self.tracker.get_lesson_progress(
student, unit_id)[lesson_id] == {
'html': 1, 'activity': 0, 'has_activity': False}
# Completing the same component again has no further effect.
self.tracker.put_component_completed(student, unit_id, lesson_id, 'QN')
assert self.tracker.get_unit_progress(student)[unit_id] == 1
assert self.tracker.get_lesson_progress(
student, unit_id)[lesson_id] == {
'html': 1, 'activity': 0, 'has_activity': False}
# Completing the other trackable component marks the lesson (and unit)
# as completed.
self.tracker.put_component_completed(student, unit_id, lesson_id, 'QG')
assert self.tracker.get_unit_progress(student)[unit_id] == 2
assert self.tracker.get_lesson_progress(
student, unit_id)[lesson_id] == {
'html': 2, 'activity': 0, 'has_activity': False}
class FakeEnvironment(object):
"""Temporary fake tools.etl.remote.Evironment.
Bypasses making a remote_api connection because webtest can't handle it and
we don't want to bring up a local server for our functional tests. When this
fake is used, the in-process datastore stub will handle RPCs.
TODO(johncox): find a way to make webtest successfully emulate the
remote_api endpoint and get rid of this fake.
"""
def __init__(self, application_id, server, path=None):
self._appication_id = application_id
self._path = path
self._server = server
def establish(self):
pass
class EtlMainTestCase(DatastoreBackedCourseTest):
"""Tests tools/etl/etl.py's main()."""
# Allow access to protected members under test.
# pylint: disable=protected-access
def setUp(self):
"""Configures EtlMainTestCase."""
super(EtlMainTestCase, self).setUp()
self.test_environ = copy.deepcopy(os.environ)
# In etl.main, use test auth scheme to avoid interactive login.
self.test_environ['SERVER_SOFTWARE'] = remote.TEST_SERVER_SOFTWARE
self.archive_path = os.path.join(self.test_tempdir, 'archive.zip')
self.new_course_title = 'New Course Title'
self.url_prefix = '/test'
self.raw = 'course:%s::ns_test' % self.url_prefix
self.swap(os, 'environ', self.test_environ)
self.common_args = [
self.url_prefix, 'myapp', 'localhost:8080']
self.common_command_args = self.common_args + [
'--archive_path', self.archive_path]
self.common_course_args = [etl._TYPE_COURSE] + self.common_command_args
self.common_datastore_args = [
etl._TYPE_DATASTORE] + self.common_command_args
self.delete_datastore_args = etl.PARSER.parse_args(
[etl._MODE_DELETE, etl._TYPE_DATASTORE] + self.common_args)
self.download_course_args = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_course_args)
self.upload_course_args = etl.PARSER.parse_args(
[etl._MODE_UPLOAD] + self.common_course_args)
# Set up courses: version 1.3, version 1.2.
sites.setup_courses(self.raw + ', course:/:/')
def tearDown(self):
sites.reset_courses()
super(EtlMainTestCase, self).tearDown()
def create_app_yaml(self, context, title=None):
yaml = copy.deepcopy(courses.DEFAULT_COURSE_YAML_DICT)
if title:
yaml['course']['title'] = title
context.fs.impl.put(
os.path.join(
appengine_config.BUNDLE_ROOT, etl._COURSE_YAML_PATH_SUFFIX),
etl._ReadWrapper(str(yaml)), is_draft=False)
def create_archive(self):
self.upload_all_sample_course_files([])
self.import_sample_course()
args = etl.PARSER.parse_args(['download'] + self.common_course_args)
etl.main(args, environment_class=FakeEnvironment)
sites.reset_courses()
def create_archive_with_question(self, data):
self.upload_all_sample_course_files([])
self.import_sample_course()
question = _add_data_entity(
sites.get_all_courses()[1], models.QuestionEntity, data)
args = etl.PARSER.parse_args(['download'] + self.common_course_args)
etl.main(args, environment_class=FakeEnvironment)
sites.reset_courses()
return question
def create_empty_course(self, raw):
sites.setup_courses(raw)
context = etl_lib.get_context(self.url_prefix)
course = etl._get_course_from(etl_lib.get_context(self.url_prefix))
course.delete_all()
self.create_app_yaml(context)
def import_sample_course(self):
"""Imports a sample course."""
# Import sample course.
dst_app_context = sites.get_all_courses()[0]
src_app_context = sites.get_all_courses()[1]
# Patch in a course.yaml.
self.create_app_yaml(dst_app_context, title=self.new_course_title)
dst_course = courses.Course(None, app_context=dst_app_context)
errors = []
src_course_out, dst_course_out = dst_course.import_from(
src_app_context, errors)
if errors:
raise Exception(errors)
assert len(
src_course_out.get_units()) == len(dst_course_out.get_units())
dst_course_out.save()
def test_archive_size_can_exceed_2_gb(self):
# The maximum size for any file in the zipfile is 1 GB.
byte = '.'
gig = byte * (2 ** 30)
archive = etl._Archive(self.archive_path)
archive.open('w')
archive.add(os.path.join(self.test_tempdir, 'first'), gig)
archive.add(os.path.join(self.test_tempdir, 'second'), gig)
archive.add(os.path.join(self.test_tempdir, 'overflow'), byte)
archive.close()
def test_delete_course_fails(self):
args = etl.PARSER.parse_args(
[etl._MODE_DELETE, etl._TYPE_COURSE] + self.common_args)
self.assertRaises(
NotImplementedError,
etl.main, args, environment_class=FakeEnvironment)
def test_delete_datastore_fails_if_user_does_not_confirm(self):
self.swap(
etl, '_raw_input',
lambda x: 'not' + etl._DELETE_DATASTORE_CONFIRMATION_INPUT)
self.assertRaises(
SystemExit, etl.main, self.delete_datastore_args,
environment_class=FakeEnvironment)
def test_delete_datastore_succeeds(self):
"""Tests delete datastore success for populated and empty datastores."""
self.import_sample_course()
context = etl_lib.get_context(
self.delete_datastore_args.course_url_prefix)
self.swap(
etl, '_raw_input',
lambda x: etl._DELETE_DATASTORE_CONFIRMATION_INPUT)
# Spot check that some kinds are populated.
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(context.get_namespace_name())
self.assertTrue(vfs.FileDataEntity.all().get())
self.assertTrue(vfs.FileMetadataEntity.all().get())
finally:
namespace_manager.set_namespace(old_namespace)
# Delete against a datastore with contents runs successfully.
etl.main(self.delete_datastore_args, environment_class=FakeEnvironment)
# Spot check that those kinds are now empty.
try:
namespace_manager.set_namespace(context.get_namespace_name())
self.assertFalse(vfs.FileDataEntity.all().get())
self.assertFalse(vfs.FileMetadataEntity.all().get())
finally:
namespace_manager.set_namespace(old_namespace)
# Delete against a datastore without contents runs successfully.
etl.main(self.delete_datastore_args, environment_class=FakeEnvironment)
def test_disable_remote_cannot_be_passed_for_mode_other_than_run(self):
bad_args = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_course_args +
['--disable_remote'])
self.assertRaises(
SystemExit, etl.main, bad_args, environment_class=FakeEnvironment)
def test_download_course_creates_valid_archive(self):
"""Tests download of course data and archive creation."""
self.upload_all_sample_course_files([])
self.import_sample_course()
question = _add_data_entity(
sites.get_all_courses()[0], models.QuestionEntity, 'test question')
etl.main(self.download_course_args, environment_class=FakeEnvironment)
# Don't use Archive and Manifest here because we want to test the raw
# structure of the emitted zipfile.
zip_archive = zipfile.ZipFile(self.archive_path)
# check manifest
manifest = transforms.loads(
zip_archive.open(etl._MANIFEST_FILENAME).read())
self.assertGreaterEqual(
courses.COURSE_MODEL_VERSION_1_3, manifest['version'])
self.assertEqual(
'course:%s::ns_test' % self.url_prefix, manifest['raw'])
# check content
for entity in manifest['entities']:
self.assertTrue(entity.has_key('is_draft'))
self.assertTrue(zip_archive.open(entity['path']))
# check question
question_json = transforms.loads(
zip_archive.open('models/QuestionEntity.json').read())
self.assertEqual(
question.key().id(), question_json['rows'][0]['key.id'])
self.assertEqual(
'test question', question_json['rows'][0]['data'])
def test_download_course_errors_if_archive_path_exists_on_disk(self):
self.upload_all_sample_course_files([])
self.import_sample_course()
etl.main(self.download_course_args, environment_class=FakeEnvironment)
self.assertRaises(
SystemExit, etl.main, self.download_course_args,
environment_class=FakeEnvironment)
def test_download_errors_if_course_url_prefix_does_not_exist(self):
sites.reset_courses()
self.assertRaises(
SystemExit, etl.main, self.download_course_args,
environment_class=FakeEnvironment)
def test_download_course_errors_if_course_version_is_pre_1_3(self):
args = etl.PARSER.parse_args(
['download', 'course', '/'] + self.common_course_args[2:])
self.upload_all_sample_course_files([])
self.import_sample_course()
self.assertRaises(
SystemExit, etl.main, args, environment_class=FakeEnvironment)
def test_download_datastore_fails_if_datastore_types_not_in_datastore(self):
download_datastore_args = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_datastore_args +
['--datastore_types', 'missing'])
self.assertRaises(
SystemExit, etl.main, download_datastore_args,
environment_class=FakeEnvironment)
def test_download_datastore_succeeds(self):
"""Test download of datastore data and archive creation."""
download_datastore_args = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_datastore_args +
['--datastore_types', 'Student,StudentPropertyEntity'])
context = etl_lib.get_context(download_datastore_args.course_url_prefix)
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(context.get_namespace_name())
first_student = models.Student(key_name='first_student')
second_student = models.Student(key_name='second_student')
first_entity = models.StudentPropertyEntity(
key_name='first_student-property_entity')
second_entity = models.StudentPropertyEntity(
key_name='second_student-property_entity')
db.put([first_student, second_student, first_entity, second_entity])
finally:
namespace_manager.set_namespace(old_namespace)
etl.main(
download_datastore_args, environment_class=FakeEnvironment)
archive = etl._Archive(self.archive_path)
archive.open('r')
self.assertEqual(
['Student.json', 'StudentPropertyEntity.json'],
sorted(
[os.path.basename(e.path) for e in archive.manifest.entities]))
student_entity = [
e for e in archive.manifest.entities
if e.path.endswith('Student.json')][0]
entity_entity = [
e for e in archive.manifest.entities
if e.path.endswith('StudentPropertyEntity.json')][0]
# Ensure .json files are deserializable into Python objects.
students = sorted(
transforms.loads(archive.get(student_entity.path))['rows'],
key=lambda d: d['key.name'])
entitiez = sorted(
transforms.loads(archive.get(entity_entity.path))['rows'],
key=lambda d: d['key.name'])
# Spot check their contents.
self.assertEqual(
[model.key().name() for model in [first_student, second_student]],
[student['key.name'] for student in students])
self.assertEqual(
[model.key().name() for model in [first_entity, second_entity]],
[entity['key.name'] for entity in entitiez])
def test_download_datastore_with_privacy_maintains_references(self):
"""Test download of datastore data and archive creation."""
unsafe_user_id = '1'
download_datastore_args = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_datastore_args +
['--datastore_types', 'EventEntity,Student', '--privacy',
'--privacy_secret', 'super_seekrit'])
context = etl_lib.get_context(download_datastore_args.course_url_prefix)
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(context.get_namespace_name())
event = models.EventEntity(user_id=unsafe_user_id)
student = models.Student(
key_name='first_student', user_id=unsafe_user_id)
db.put([event, student])
finally:
namespace_manager.set_namespace(old_namespace)
etl.main(
download_datastore_args, environment_class=FakeEnvironment)
archive = etl._Archive(self.archive_path)
archive.open('r')
self.assertEqual(
['EventEntity.json', 'Student.json'],
sorted(
[os.path.basename(e.path) for e in archive.manifest.entities]))
event_entity_entity = [
e for e in archive.manifest.entities
if e.path.endswith('EventEntity.json')][0]
student_entity = [
e for e in archive.manifest.entities
if e.path.endswith('Student.json')][0]
# Ensure .json files are deserializable into Python objects...
event_entities = transforms.loads(
archive.get(event_entity_entity.path))['rows']
students = transforms.loads(archive.get(student_entity.path))['rows']
# Reference maintained.
self.assertEqual(event_entities[0]['user_id'], students[0]['user_id'])
# But user_id transformed.
self.assertNotEqual(unsafe_user_id, event_entities[0]['user_id'])
self.assertNotEqual(unsafe_user_id, students[0]['user_id'])
def test_privacy_fails_if_not_downloading_datastore(self):
wrong_mode = etl.PARSER.parse_args(
[etl._MODE_UPLOAD] + self.common_datastore_args + ['--privacy'])
self.assertRaises(
SystemExit, etl.main, wrong_mode, environment_class=FakeEnvironment)
wrong_type = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_course_args + ['--privacy'])
self.assertRaises(
SystemExit, etl.main, wrong_type, environment_class=FakeEnvironment)
def test_privacy_secret_fails_if_not_download_datastore_with_privacy(self):
"""Tests invalid flag combinations related to --privacy."""
missing_privacy = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_datastore_args +
['--privacy_secret', 'foo'])
self.assertRaises(
SystemExit, etl.main, missing_privacy,
environment_class=FakeEnvironment)
self.assertRaises(
SystemExit, etl.main, missing_privacy,
environment_class=FakeEnvironment)
wrong_mode = etl.PARSER.parse_args(
[etl._MODE_UPLOAD] + self.common_datastore_args +
['--privacy_secret', 'foo', '--privacy'])
self.assertRaises(
SystemExit, etl.main, wrong_mode, environment_class=FakeEnvironment)
wrong_type = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_course_args +
['--privacy_secret', 'foo', '--privacy'])
self.assertRaises(
SystemExit, etl.main, wrong_type, environment_class=FakeEnvironment)
def test_run_fails_when_delegated_argument_parsing_fails(self):
bad_args = etl.PARSER.parse_args(
['run', 'tools.etl_lib.Job'] + self.common_args +
['--job_args', "'unexpected_argument'"])
self.assertRaises(
SystemExit, etl.main, bad_args, environment_class=FakeEnvironment)
def test_run_fails_when_if_requested_class_missing_or_invalid(self):
bad_args = etl.PARSER.parse_args(
['run', 'a.missing.class.or.Module'] + self.common_args)
self.assertRaises(
SystemExit, etl.main, bad_args, environment_class=FakeEnvironment)
bad_args = etl.PARSER.parse_args(
['run', 'tools.etl.etl._Archive'] + self.common_args)
self.assertRaises(
SystemExit, etl.main, bad_args, environment_class=FakeEnvironment)
def test_run_print_memcache_stats_succeeds(self):
"""Tests examples.WriteStudentEmailsToFile prints stats to stdout."""
args = etl.PARSER.parse_args(
['run', 'tools.etl.examples.PrintMemcacheStats'] + self.common_args)
memcache.get('key')
memcache.set('key', 1)
memcache.get('key')
old_stdout = sys.stdout
stdout = cStringIO.StringIO()
try:
sys.stdout = stdout
etl.main(args, environment_class=FakeEnvironment)
finally:
sys.stdout = old_stdout
expected = examples.PrintMemcacheStats._STATS_TEMPLATE % {
'byte_hits': 1,
'bytes': 1,
'hits': 1,
'items': 1,
'misses': 1,
'oldest_item_age': 0,
}
self.assertTrue(expected in stdout.getvalue())
def test_run_skips_remote_env_setup_when_disable_remote_passed(self):
args = etl.PARSER.parse_args(
['run', 'tools.etl.etl_lib.Job'] + self.common_args +
['--disable_remote'])
etl.main(args)
def test_run_upload_file_to_course_succeeds(self):
"""Tests upload of a single local file to a course."""
path = os.path.join(self.test_tempdir, 'file')
target = 'assets/file'
remote_path = os.path.join(appengine_config.BUNDLE_ROOT, target)
contents = 'contents'
with open(path, 'w') as f:
f.write(contents)
args = etl.PARSER.parse_args(
['run', 'tools.etl.examples.UploadFileToCourse'] +
self.common_args + ['--job_args=%s %s' % (path, target)])
sites.setup_courses(self.raw)
context = etl_lib.get_context(args.course_url_prefix)
self.assertFalse(context.fs.impl.get(remote_path))
etl.main(args, environment_class=FakeEnvironment)
self.assertEqual(contents, context.fs.impl.get(remote_path).read())
def test_run_write_student_emails_to_file_succeeds(self):
"""Tests args passed to and run of examples.WriteStudentEmailsToFile."""
email1 = 'email1@example.com'
email2 = 'email2@example.com'
path = os.path.join(self.test_tempdir, 'emails')
args = etl.PARSER.parse_args(
['run', 'tools.etl.examples.WriteStudentEmailsToFile'] +
self.common_args + ['--job_args=%s --batch_size 1' % path])
context = etl_lib.get_context(args.course_url_prefix)
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(context.get_namespace_name())
first_student = models.Student(key_name=email1)
second_student = models.Student(key_name=email2)
db.put([first_student, second_student])
finally:
namespace_manager.set_namespace(old_namespace)
etl.main(args, environment_class=FakeEnvironment)
self.assertEqual('%s\n%s\n' % (email1, email2), open(path).read())
def test_upload_course_fails_if_archive_cannot_be_opened(self):
sites.setup_courses(self.raw)
self.assertRaises(
SystemExit, etl.main, self.upload_course_args,
environment_class=FakeEnvironment)
def test_upload_course_fails_if_archive_course_json_malformed(self):
self.create_archive()
self.create_empty_course(self.raw)
zip_archive = zipfile.ZipFile(self.archive_path, 'a')
zip_archive.writestr(
etl._Archive.get_internal_path(etl._COURSE_JSON_PATH_SUFFIX),
'garbage')
zip_archive.close()
self.assertRaises(
SystemExit, etl.main, self.upload_course_args,
environment_class=FakeEnvironment)
def test_upload_course_fails_if_archive_course_yaml_malformed(self):
self.create_archive()
self.create_empty_course(self.raw)
zip_archive = zipfile.ZipFile(self.archive_path, 'a')
zip_archive.writestr(
etl._Archive.get_internal_path(etl._COURSE_YAML_PATH_SUFFIX),
'{')
zip_archive.close()
self.assertRaises(
SystemExit, etl.main, self.upload_course_args,
environment_class=FakeEnvironment)
def test_upload_course_fails_if_course_has_non_course_yaml_contents(self):
self.upload_all_sample_course_files([])
self.import_sample_course()
self.assertRaises(
SystemExit, etl.main, self.upload_course_args,
environment_class=FakeEnvironment)
def test_upload_course_fails_if_force_overwrite_passed_with_bad_args(self):
self.create_archive()
bad_args = etl.PARSER.parse_args(
[etl._MODE_UPLOAD] + self.common_datastore_args + [
'--force_overwrite'])
self.assertRaises(
SystemExit, etl.main, bad_args, environment_class=FakeEnvironment)
def test_upload_course_fails_if_no_course_with_url_prefix_found(self):
self.create_archive()
self.assertRaises(
SystemExit, etl.main, self.upload_course_args,
environment_class=FakeEnvironment)
def test_upload_course_succeeds(self):
"""Tests upload of archive contents."""
question = self.create_archive_with_question('test question')
self.create_empty_course(self.raw)
context = etl_lib.get_context(self.upload_course_args.course_url_prefix)
self.assertNotEqual(self.new_course_title, context.get_title())
etl.main(self.upload_course_args, environment_class=FakeEnvironment)
# check archive content
archive = etl._Archive(self.archive_path)
archive.open('r')
context = etl_lib.get_context(self.upload_course_args.course_url_prefix)
filesystem_contents = context.fs.impl.list(appengine_config.BUNDLE_ROOT)
self.assertEqual(
len(archive.manifest.entities),
len(filesystem_contents) + len(COURSE_CONTENT_ENTITY_FILES))
# check course structure
self.assertEqual(self.new_course_title, context.get_title())
units = etl._get_course_from(context).get_units()
spot_check_single_unit = [u for u in units if u.unit_id == 9][0]
self.assertEqual('Interpreting results', spot_check_single_unit.title)
for unit in units:
self.assertTrue(unit.title)
# check entities
for entity in archive.manifest.entities:
_, tail = os.path.split(entity.path)
if tail in COURSE_CONTENT_ENTITY_FILES:
continue
full_path = os.path.join(
appengine_config.BUNDLE_ROOT,
etl._Archive.get_external_path(entity.path))
stream = context.fs.impl.get(full_path)
self.assertEqual(entity.is_draft, stream.metadata.is_draft)
# check uploaded question matches original
_assert_identical_data_entity_exists(
sites.get_all_courses()[0], question)
def test_upload_course_with_force_overwrite_succeeds(self):
"""Tests upload into non-empty course with --force_overwrite."""
self.upload_all_sample_course_files([])
self.import_sample_course()
etl.main(self.download_course_args, environment_class=FakeEnvironment)
force_overwrite_args = etl.PARSER.parse_args(
[etl._MODE_UPLOAD] + self.common_course_args + [
'--force_overwrite'])
etl.main(force_overwrite_args, environment_class=FakeEnvironment)
archive = etl._Archive(self.archive_path)
archive.open('r')
context = etl_lib.get_context(self.upload_course_args.course_url_prefix)
filesystem_contents = context.fs.impl.list(appengine_config.BUNDLE_ROOT)
self.assertEqual(
len(archive.manifest.entities),
len(filesystem_contents) + len(COURSE_CONTENT_ENTITY_FILES))
self.assertEqual(self.new_course_title, context.get_title())
units = etl._get_course_from(context).get_units()
spot_check_single_unit = [u for u in units if u.unit_id == 9][0]
self.assertEqual('Interpreting results', spot_check_single_unit.title)
for unit in units:
self.assertTrue(unit.title)
for entity in archive.manifest.entities:
_, tail = os.path.split(entity.path)
if tail in COURSE_CONTENT_ENTITY_FILES:
continue
full_path = os.path.join(
appengine_config.BUNDLE_ROOT,
etl._Archive.get_external_path(entity.path))
stream = context.fs.impl.get(full_path)
self.assertEqual(entity.is_draft, stream.metadata.is_draft)
def test_upload_datastore_fails(self):
upload_datastore_args = etl.PARSER.parse_args(
[etl._MODE_UPLOAD] + self.common_datastore_args +
['--datastore_types', 'doesnt_matter'])
self.assertRaises(
NotImplementedError, etl.main, upload_datastore_args,
environment_class=FakeEnvironment)
def test_is_identity_transform_when_privacy_false(self):
self.assertEqual(
1, etl._get_privacy_transform_fn(False, 'no_effect')(1))
self.assertEqual(
1, etl._get_privacy_transform_fn(False, 'other_value')(1))
def test_is_hmac_sha_2_256_when_privacy_true(self):
# Must run etl.main() to get crypto module loaded.
args = etl.PARSER.parse_args(['download'] + self.common_course_args)
etl.main(args, environment_class=FakeEnvironment)
self.assertEqual(
crypto.hmac_sha_2_256_transform('secret', 'value'),
# Testing protected functions. pylint: disable=protected-access
etl._get_privacy_transform_fn(True, 'secret')('value'))
# TODO(johncox): re-enable these tests once we figure out how to make webtest
# play nice with remote_api.
class EtlRemoteEnvironmentTestCase(actions.TestBase):
"""Tests tools/etl/remote.py."""
# Method name determined by superclass. pylint: disable=g-bad-name
def setUp(self):
super(EtlRemoteEnvironmentTestCase, self).setUp()
self.test_environ = copy.deepcopy(os.environ)
# Allow access to protected members under test.
# pylint: disable=protected-access
def disabled_test_can_establish_environment_in_dev_mode(self):
# Stub the call that requires user input so the test runs unattended.
self.swap(__builtin__, 'raw_input', lambda _: 'username')
self.assertEqual(os.environ['SERVER_SOFTWARE'], remote.SERVER_SOFTWARE)
# establish() performs RPC. If it doesn't throw, we're good.
remote.Environment('mycourse', 'localhost:8080').establish()
def disabled_test_can_establish_environment_in_test_mode(self):
self.test_environ['SERVER_SOFTWARE'] = remote.TEST_SERVER_SOFTWARE
self.swap(os, 'environ', self.test_environ)
# establish() performs RPC. If it doesn't throw, we're good.
remote.Environment('mycourse', 'localhost:8080').establish()
class CourseUrlRewritingTest(CourseUrlRewritingTestBase):
"""Run all existing tests using '/courses/pswg' base URL rewrite rules."""
class VirtualFileSystemTest(VirtualFileSystemTestBase):
"""Run all existing tests using virtual local file system."""
class MemcacheTestBase(actions.TestBase):
"""Executes all tests with memcache enabled."""
def setUp(self): # pylint: disable=g-bad-name
super(MemcacheTestBase, self).setUp()
config.Registry.test_overrides = {models.CAN_USE_MEMCACHE.name: True}
def tearDown(self): # pylint: disable=g-bad-name
config.Registry.test_overrides = {}
super(MemcacheTestBase, self).tearDown()
class MemcacheTest(MemcacheTestBase):
"""Executes all tests with memcache enabled."""
class PiiHolder(entities.BaseEntity):
user_id = db.StringProperty(indexed=True)
age = db.IntegerProperty(indexed=False)
class_rank = db.IntegerProperty(indexed=False)
registration_date = db.DateTimeProperty(indexed=True, required=True)
class_goal = db.StringProperty(indexed=False, required=True)
albedo = db.FloatProperty(indexed=False)
_PROPERTY_EXPORT_BLACKLIST = [user_id, age]
class TransformsEntitySchema(actions.TestBase):
def test_schema(self):
schema = transforms.get_schema_for_entity(PiiHolder)
schema = schema.get_json_schema_dict()['properties']
self.assertNotIn('user_id', schema)
self.assertNotIn('age', schema)
self.assertIn('class_rank', schema)
self.assertEquals('integer', schema['class_rank']['type'])
self.assertIn('optional', schema['class_rank'])
self.assertEquals(True, schema['class_rank']['optional'])
self.assertIn('registration_date', schema)
self.assertEquals('datetime', schema['registration_date']['type'])
self.assertNotIn('optional', schema['registration_date'])
self.assertIn('class_goal', schema)
self.assertEquals('string', schema['class_goal']['type'])
self.assertNotIn('optional', schema['class_goal'])
self.assertIn('albedo', schema)
self.assertEquals('number', schema['albedo']['type'])
self.assertIn('optional', schema['albedo'])
self.assertEquals(True, schema['albedo']['optional'])
class TransformsJsonFileTestCase(actions.TestBase):
"""Tests for models/transforms.py's JsonFile."""
# Method name determined by superclass. pylint: disable=g-bad-name
def setUp(self):
super(TransformsJsonFileTestCase, self).setUp()
# Treat as module-protected. pylint: disable=protected-access
self.path = os.path.join(self.test_tempdir, 'file.json')
self.reader = transforms.JsonFile(self.path)
self.writer = transforms.JsonFile(self.path)
self.first = 1
self.second = {'c': 'c_value', 'd': {'nested': 'e'}}
def tearDown(self):
self.reader.close()
self.writer.close()
super(TransformsJsonFileTestCase, self).tearDown()
def test_round_trip_of_file_with_zero_records(self):
self.writer.open('w')
self.writer.close()
self.reader.open('r')
self.assertEqual([], [entity for entity in self.reader])
self.reader.reset()
self.assertEqual({'rows': []}, self.reader.read())
def test_round_trip_of_file_with_one_record(self):
self.writer.open('w')
self.writer.write(self.first)
self.writer.close()
self.reader.open('r')
self.assertEqual([self.first], [entity for entity in self.reader])
self.reader.reset()
self.assertEqual({'rows': [self.first]}, self.reader.read())
def test_round_trip_of_file_with_multiple_records(self):
self.writer.open('w')
self.writer.write(self.first)
self.writer.write(self.second)
self.writer.close()
self.reader.open('r')
self.assertEqual(
[self.first, self.second], [entity for entity in self.reader])
self.reader.reset()
self.assertEqual(
{'rows': [self.first, self.second]}, self.reader.read())
class ImportActivityTests(DatastoreBackedCourseTest):
"""Functional tests for importing legacy activities into lessons."""
URI = '/rest/course/lesson/activity'
FREETEXT_QUESTION = """
var activity = [
{ questionType: 'freetext',
correctAnswerRegex: /abc/i,
correctAnswerOutput: "Correct.",
incorrectAnswerOutput: "Try again.",
showAnswerOutput: "A hint."
}
];
"""
MULTPLE_CHOICE_QUESTION = """
var activity = [
{questionType: 'multiple choice',
choices: [
['a', false, 'A'],
['b', true, 'B'],
['c', false, 'C'],
['d', false, 'D']
]
}
];
"""
MULTPLE_CHOICE_GROUP_QUESTION = """
var activity = [
{questionType: 'multiple choice group',
questionsList: [
{
questionHTML: 'choose a',
choices: ['aa', 'bb'],
correctIndex: 0
},
{
questionHTML: 'choose b or c',
choices: ['aa', 'bb', 'cc'],
correctIndex: [1, 2]
}
]
allCorrectOutput: 'unused',
someIncorrectOutput: 'also unused'
}
];
"""
def setUp(self):
super(ImportActivityTests, self).setUp()
course = courses.Course(None, app_context=self.app_context)
self.unit = course.add_unit()
self.lesson = course.add_lesson(self.unit)
course.update_lesson(self.lesson)
course.save()
email = 'test_admin@google.com'
actions.login(email, is_admin=True)
def load_dto(self, dao, entity_id):
old_namespace = namespace_manager.get_namespace()
new_namespace = self.app_context.get_namespace_name()
try:
namespace_manager.set_namespace(new_namespace)
return dao.load(entity_id)
finally:
namespace_manager.set_namespace(old_namespace)
def get_response_dict(self, activity_text):
request = {
'xsrf_token': XsrfTokenManager.create_xsrf_token('lesson-edit'),
'key': self.lesson.lesson_id,
'text': activity_text
}
response = self.testapp.put(
self.URI, params={'request': transforms.dumps(request)})
return transforms.loads(response.body)
def get_content_from_service(self, activity_text):
response_dict = self.get_response_dict(activity_text)
self.assertEqual(response_dict['status'], 200)
return transforms.loads(response_dict['payload'])['content']
def test_import_multiple_choice(self):
"""Should be able to import a single multiple choice question."""
content = self.get_content_from_service(self.MULTPLE_CHOICE_QUESTION)
m = re.match((
r'^<question quid="(\d+)" instanceid="[a-zA-Z0-9]{12}">'
r'</question>$'), content)
assert m
quid = m.group(1)
question = self.load_dto(models.QuestionDAO, quid)
self.assertEqual(question.type, models.QuestionDTO.MULTIPLE_CHOICE)
self.assertEqual(question.dict['version'], '1.5')
self.assertEqual(
question.dict['description'],
'Imported from unit "New Unit", lesson "New Lesson" (question #1)')
self.assertEqual(question.dict['question'], '')
self.assertEqual(question.dict['multiple_selections'], False)
self.assertEqual(len(question.dict['choices']), 4)
choices = question.dict['choices']
choices_data = [
['a', 0.0, 'A'], ['b', 1.0, 'B'], ['c', 0.0, 'C'],
['d', 0.0, 'D']]
for i, choice in enumerate(choices):
self.assertEqual(choice['text'], choices_data[i][0])
self.assertEqual(choice['score'], choices_data[i][1])
self.assertEqual(choice['feedback'], choices_data[i][2])
def test_import_multiple_choice_group(self):
"""Should be able to import a single 'multiple choice group'."""
content = self.get_content_from_service(
self.MULTPLE_CHOICE_GROUP_QUESTION)
# The tag links to a question group which embeds two questions
m = re.match((
r'^<question-group qgid="(\d+)" instanceid="[a-zA-Z0-9]{12}">'
r'</question-group>$'), content)
assert m
quid = m.group(1)
question_group = self.load_dto(models.QuestionGroupDAO, quid)
self.assertEqual(question_group.dict['version'], '1.5')
self.assertEqual(
question_group.dict['description'],
'Imported from unit "New Unit", lesson "New Lesson" (question #1)')
self.assertEqual(len(question_group.dict['items']), 2)
items = question_group.dict['items']
self.assertEqual(items[0]['weight'], 1.0)
self.assertEqual(items[1]['weight'], 1.0)
# The first question is multiple choice with single selection
quid = items[0]['question']
question = self.load_dto(models.QuestionDAO, quid)
self.assertEqual(question.type, models.QuestionDTO.MULTIPLE_CHOICE)
self.assertEqual(question.dict['version'], '1.5')
self.assertEqual(
question.dict['description'],
(
'Imported from unit "New Unit", lesson "New Lesson" '
'(question #1, part #1)'))
self.assertEqual(question.dict['question'], 'choose a')
self.assertEqual(question.dict['multiple_selections'], False)
self.assertEqual(len(question.dict['choices']), 2)
choices = question.dict['choices']
self.assertEqual(choices[0]['text'], 'aa')
self.assertEqual(choices[0]['score'], 1.0)
self.assertEqual(choices[1]['text'], 'bb')
self.assertEqual(choices[1]['score'], 0.0)
# The second question is multiple choice with multiple selection
quid = items[1]['question']
question = self.load_dto(models.QuestionDAO, quid)
self.assertEqual(question.type, models.QuestionDTO.MULTIPLE_CHOICE)
self.assertEqual(question.dict['version'], '1.5')
self.assertEqual(
question.dict['description'],
(
'Imported from unit "New Unit", lesson "New Lesson" '
'(question #1, part #2)'))
self.assertEqual(question.dict['question'], 'choose b or c')
self.assertEqual(question.dict['multiple_selections'], True)
self.assertEqual(len(question.dict['choices']), 3)
choices = question.dict['choices']
self.assertEqual(choices[0]['text'], 'aa')
self.assertEqual(choices[0]['score'], -1.0)
self.assertEqual(choices[1]['text'], 'bb')
self.assertEqual(choices[1]['score'], 0.5)
self.assertEqual(choices[1]['text'], 'bb')
self.assertEqual(choices[1]['score'], 0.5)
def test_import_freetext(self):
"""Should be able to import a single feettext question."""
content = self.get_content_from_service(self.FREETEXT_QUESTION)
m = re.match((
r'^<question quid="(\d+)" instanceid="[a-zA-Z0-9]{12}">'
r'</question>$'), content)
assert m
quid = m.group(1)
question = self.load_dto(models.QuestionDAO, quid)
self.assertEqual(question.type, models.QuestionDTO.SHORT_ANSWER)
self.assertEqual(question.dict['version'], '1.5')
self.assertEqual(
question.dict['description'],
'Imported from unit "New Unit", lesson "New Lesson" (question #1)')
self.assertEqual(question.dict['question'], '')
self.assertEqual(question.dict['hint'], 'A hint.')
self.assertEqual(question.dict['defaultFeedback'], 'Try again.')
self.assertEqual(len(question.dict['graders']), 1)
grader = question.dict['graders'][0]
self.assertEqual(grader['score'], 1.0)
self.assertEqual(grader['matcher'], 'regex')
self.assertEqual(grader['response'], '/abc/i')
self.assertEqual(grader['feedback'], 'Correct.')
def test_repeated_imports_are_rejected(self):
response_dict = self.get_response_dict(self.FREETEXT_QUESTION)
self.assertEqual(response_dict['status'], 200)
response_dict = self.get_response_dict(self.FREETEXT_QUESTION)
self.assertEqual(response_dict['status'], 412)
self.assertTrue(response_dict['message'].startswith(
'This activity has already been imported.'))
def test_user_must_be_logged_in(self):
actions.logout()
try:
self.get_response_dict(self.FREETEXT_QUESTION)
self.fail('Expected 404')
except AppError:
pass
def test_user_must_have_valid_xsrf_token(self):
request = {
'key': self.lesson.lesson_id,
'text': self.FREETEXT_QUESTION
}
response = self.testapp.put(
self.URI, params={'request': transforms.dumps(request)})
response_dict = transforms.loads(response.body)
self.assertEqual(response_dict['status'], 403)
class NamespaceTest(actions.TestBase):
def test_namespace_context_manager(self):
pre_test_namespace = namespace_manager.get_namespace()
with Namespace('xyzzy'):
self.assertEqual(namespace_manager.get_namespace(), 'xyzzy')
with Namespace('plugh'):
self.assertEqual(namespace_manager.get_namespace(), 'plugh')
self.assertEqual(namespace_manager.get_namespace(), 'xyzzy')
self.assertEqual(namespace_manager.get_namespace(), pre_test_namespace)
def test_namespace_context_manager_handles_exception(self):
pre_test_namespace = namespace_manager.get_namespace()
try:
with Namespace('xyzzy'):
self.assertEqual(namespace_manager.get_namespace(), 'xyzzy')
raise RuntimeError('No way, Jose')
except RuntimeError:
pass
self.assertEqual(namespace_manager.get_namespace(), pre_test_namespace)
ALL_COURSE_TESTS = (
StudentAspectTest, AssessmentTest, CourseAuthorAspectTest,
StaticHandlerTest, AdminAspectTest, PeerReviewControllerTest,
PeerReviewDashboardAdminTest, PeerReviewAnalyticsTest)
MemcacheTest.__bases__ += (InfrastructureTest,) + ALL_COURSE_TESTS
CourseUrlRewritingTest.__bases__ += ALL_COURSE_TESTS
VirtualFileSystemTest.__bases__ += ALL_COURSE_TESTS
DatastoreBackedSampleCourseTest.__bases__ += ALL_COURSE_TESTS
|
wavemind/mlgcb
|
tests/functional/test_classes.py
|
Python
|
apache-2.0
| 177,291
|
[
"VisIt"
] |
85f68f29f0708c7e4de0b6dc19d829e8bbe37e08182a95d134c86ad7d5e00e03
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A runner implementation that submits a job for remote execution.
The runner will create a JSON description of the job graph and then submit it
to the Dataflow Service for remote execution by a worker.
"""
# pytype: skip-file
import base64
import logging
import os
import threading
import time
import traceback
from collections import defaultdict
from subprocess import DEVNULL
from typing import TYPE_CHECKING
from typing import List
from urllib.parse import quote
from urllib.parse import quote_from_bytes
from urllib.parse import unquote_to_bytes
import apache_beam as beam
from apache_beam import coders
from apache_beam import error
from apache_beam.internal import pickler
from apache_beam.internal.gcp import json_value
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import TestOptions
from apache_beam.options.pipeline_options import TypeOptions
from apache_beam.options.pipeline_options import WorkerOptions
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.pvalue import AsSideInput
from apache_beam.runners.common import DoFnSignature
from apache_beam.runners.common import group_by_key_input_visitor
from apache_beam.runners.dataflow.internal import names
from apache_beam.runners.dataflow.internal.clients import dataflow as dataflow_api
from apache_beam.runners.dataflow.internal.names import PropertyNames
from apache_beam.runners.dataflow.internal.names import TransformNames
from apache_beam.runners.runner import PipelineResult
from apache_beam.runners.runner import PipelineRunner
from apache_beam.runners.runner import PipelineState
from apache_beam.runners.runner import PValueCache
from apache_beam.transforms import window
from apache_beam.transforms.display import DisplayData
from apache_beam.transforms.sideinputs import SIDE_INPUT_PREFIX
from apache_beam.typehints import typehints
from apache_beam.utils import processes
from apache_beam.utils import proto_utils
from apache_beam.utils.interactive_utils import is_in_notebook
from apache_beam.utils.plugin import BeamPlugin
if TYPE_CHECKING:
from apache_beam.pipeline import PTransformOverride
__all__ = ['DataflowRunner']
_LOGGER = logging.getLogger(__name__)
BQ_SOURCE_UW_ERROR = (
'The Read(BigQuerySource(...)) transform is not supported with newer stack '
'features (Fn API, Dataflow Runner V2, etc). Please use the transform '
'apache_beam.io.gcp.bigquery.ReadFromBigQuery instead.')
class DataflowRunner(PipelineRunner):
"""A runner that creates job graphs and submits them for remote execution.
Every execution of the run() method will submit an independent job for
remote execution that consists of the nodes reachable from the passed in
node argument or entire graph if node is None. The run() method returns
after the service created the job and will not wait for the job to finish
if blocking is set to False.
"""
# A list of PTransformOverride objects to be applied before running a pipeline
# using DataflowRunner.
# Currently this only works for overrides where the input and output types do
# not change.
# For internal SDK use only. This should not be updated by Beam pipeline
# authors.
# Imported here to avoid circular dependencies.
# TODO: Remove the apache_beam.pipeline dependency in CreatePTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import CombineValuesPTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import CreatePTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import JrhReadPTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import ReadPTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import NativeReadPTransformOverride
# These overrides should be applied before the proto representation of the
# graph is created.
_PTRANSFORM_OVERRIDES = [
CombineValuesPTransformOverride(),
NativeReadPTransformOverride(),
] # type: List[PTransformOverride]
_JRH_PTRANSFORM_OVERRIDES = [
JrhReadPTransformOverride(),
] # type: List[PTransformOverride]
# These overrides should be applied after the proto representation of the
# graph is created.
_NON_PORTABLE_PTRANSFORM_OVERRIDES = [
CreatePTransformOverride(),
ReadPTransformOverride(),
] # type: List[PTransformOverride]
def __init__(self, cache=None):
# Cache of CloudWorkflowStep protos generated while the runner
# "executes" a pipeline.
self._cache = cache if cache is not None else PValueCache()
self._unique_step_id = 0
self._default_environment = None
def is_fnapi_compatible(self):
return False
def apply(self, transform, input, options):
self._maybe_add_unified_worker_missing_options(options)
return super(DataflowRunner, self).apply(transform, input, options)
def _get_unique_step_name(self):
self._unique_step_id += 1
return 's%s' % self._unique_step_id
@staticmethod
def poll_for_job_completion(runner, result, duration):
"""Polls for the specified job to finish running (successfully or not).
Updates the result with the new job information before returning.
Args:
runner: DataflowRunner instance to use for polling job state.
result: DataflowPipelineResult instance used for job information.
duration (int): The time to wait (in milliseconds) for job to finish.
If it is set to :data:`None`, it will wait indefinitely until the job
is finished.
"""
last_message_time = None
current_seen_messages = set()
last_error_rank = float('-inf')
last_error_msg = None
last_job_state = None
# How long to wait after pipeline failure for the error
# message to show up giving the reason for the failure.
# It typically takes about 30 seconds.
final_countdown_timer_secs = 50.0
sleep_secs = 5.0
# Try to prioritize the user-level traceback, if any.
def rank_error(msg):
if 'work item was attempted' in msg:
return -1
elif 'Traceback' in msg:
return 1
return 0
if duration:
start_secs = time.time()
duration_secs = duration // 1000
job_id = result.job_id()
while True:
response = runner.dataflow_client.get_job(job_id)
# If get() is called very soon after Create() the response may not contain
# an initialized 'currentState' field.
if response.currentState is not None:
if response.currentState != last_job_state:
_LOGGER.info('Job %s is in state %s', job_id, response.currentState)
last_job_state = response.currentState
if str(response.currentState) != 'JOB_STATE_RUNNING':
# Stop checking for new messages on timeout, explanatory
# message received, success, or a terminal job state caused
# by the user that therefore doesn't require explanation.
if (final_countdown_timer_secs <= 0.0 or last_error_msg is not None or
str(response.currentState) == 'JOB_STATE_DONE' or
str(response.currentState) == 'JOB_STATE_CANCELLED' or
str(response.currentState) == 'JOB_STATE_UPDATED' or
str(response.currentState) == 'JOB_STATE_DRAINED'):
break
# Check that job is in a post-preparation state before starting the
# final countdown.
if (str(response.currentState) not in ('JOB_STATE_PENDING',
'JOB_STATE_QUEUED')):
# The job has failed; ensure we see any final error messages.
sleep_secs = 1.0 # poll faster during the final countdown
final_countdown_timer_secs -= sleep_secs
time.sleep(sleep_secs)
# Get all messages since beginning of the job run or since last message.
page_token = None
while True:
messages, page_token = runner.dataflow_client.list_messages(
job_id, page_token=page_token, start_time=last_message_time)
for m in messages:
message = '%s: %s: %s' % (m.time, m.messageImportance, m.messageText)
if not last_message_time or m.time > last_message_time:
last_message_time = m.time
current_seen_messages = set()
if message in current_seen_messages:
# Skip the message if it has already been seen at the current
# time. This could be the case since the list_messages API is
# queried starting at last_message_time.
continue
else:
current_seen_messages.add(message)
# Skip empty messages.
if m.messageImportance is None:
continue
_LOGGER.info(message)
if str(m.messageImportance) == 'JOB_MESSAGE_ERROR':
if rank_error(m.messageText) >= last_error_rank:
last_error_rank = rank_error(m.messageText)
last_error_msg = m.messageText
if not page_token:
break
if duration:
passed_secs = time.time() - start_secs
if passed_secs > duration_secs:
_LOGGER.warning(
'Timing out on waiting for job %s after %d seconds',
job_id,
passed_secs)
break
result._job = response
runner.last_error_msg = last_error_msg
@staticmethod
def _only_element(iterable):
# type: (Iterable[T]) -> T
element, = iterable
return element
@staticmethod
def side_input_visitor(
use_unified_worker=False,
use_fn_api=False,
deterministic_key_coders=True):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.pipeline import PipelineVisitor
from apache_beam.transforms.core import ParDo
class SideInputVisitor(PipelineVisitor):
"""Ensures input `PCollection` used as a side inputs has a `KV` type.
TODO(BEAM-115): Once Python SDK is compatible with the new Runner API,
we could directly replace the coder instead of mutating the element type.
"""
def visit_transform(self, transform_node):
if isinstance(transform_node.transform, ParDo):
new_side_inputs = []
for ix, side_input in enumerate(transform_node.side_inputs):
access_pattern = side_input._side_input_data().access_pattern
if access_pattern == common_urns.side_inputs.ITERABLE.urn:
if use_unified_worker or not use_fn_api:
# TODO(BEAM-9173): Stop patching up the access pattern to
# appease Dataflow when using the UW and hardcode the output
# type to be Any since the Dataflow JSON and pipeline proto
# can differ in coders which leads to encoding/decoding issues
# within the runner.
side_input.pvalue.element_type = typehints.Any
new_side_input = _DataflowIterableSideInput(side_input)
else:
# Add a map to ('', value) as Dataflow currently only handles
# keyed side inputs when using the JRH.
pipeline = side_input.pvalue.pipeline
new_side_input = _DataflowIterableAsMultimapSideInput(
side_input)
new_side_input.pvalue = beam.pvalue.PCollection(
pipeline,
element_type=typehints.KV[bytes,
side_input.pvalue.element_type],
is_bounded=side_input.pvalue.is_bounded)
parent = transform_node.parent or pipeline._root_transform()
map_to_void_key = beam.pipeline.AppliedPTransform(
parent,
beam.Map(lambda x: (b'', x)),
transform_node.full_label + '/MapToVoidKey%s' % ix,
(side_input.pvalue, ))
new_side_input.pvalue.producer = map_to_void_key
map_to_void_key.add_output(new_side_input.pvalue, None)
parent.add_part(map_to_void_key)
elif access_pattern == common_urns.side_inputs.MULTIMAP.urn:
# Ensure the input coder is a KV coder and patch up the
# access pattern to appease Dataflow.
side_input.pvalue.element_type = typehints.coerce_to_kv_type(
side_input.pvalue.element_type, transform_node.full_label)
side_input.pvalue.requires_deterministic_key_coder = (
deterministic_key_coders and transform_node.full_label)
new_side_input = _DataflowMultimapSideInput(side_input)
else:
raise ValueError(
'Unsupported access pattern for %r: %r' %
(transform_node.full_label, access_pattern))
new_side_inputs.append(new_side_input)
if use_fn_api:
transform_node.side_inputs = new_side_inputs
transform_node.transform.side_inputs = new_side_inputs
return SideInputVisitor()
@staticmethod
def flatten_input_visitor():
# Imported here to avoid circular dependencies.
from apache_beam.pipeline import PipelineVisitor
class FlattenInputVisitor(PipelineVisitor):
"""A visitor that replaces the element type for input ``PCollections``s of
a ``Flatten`` transform with that of the output ``PCollection``.
"""
def visit_transform(self, transform_node):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam import Flatten
if isinstance(transform_node.transform, Flatten):
output_pcoll = DataflowRunner._only_element(
transform_node.outputs.values())
for input_pcoll in transform_node.inputs:
input_pcoll.element_type = output_pcoll.element_type
return FlattenInputVisitor()
@staticmethod
def combinefn_visitor():
# Imported here to avoid circular dependencies.
from apache_beam.pipeline import PipelineVisitor
from apache_beam import core
class CombineFnVisitor(PipelineVisitor):
"""Checks if `CombineFn` has non-default setup or teardown methods.
If yes, raises `ValueError`.
"""
def visit_transform(self, applied_transform):
transform = applied_transform.transform
if isinstance(transform, core.ParDo) and isinstance(
transform.fn, core.CombineValuesDoFn):
if self._overrides_setup_or_teardown(transform.fn.combinefn):
raise ValueError(
'CombineFn.setup and CombineFn.teardown are '
'not supported with non-portable Dataflow '
'runner. Please use Dataflow Runner V2 instead.')
@staticmethod
def _overrides_setup_or_teardown(combinefn):
# TODO(BEAM-3736): provide an implementation for this method
return False
return CombineFnVisitor()
def _adjust_pipeline_for_dataflow_v2(self, pipeline):
# Dataflow runner requires a KV type for GBK inputs, hence we enforce that
# here.
pipeline.visit(
group_by_key_input_visitor(
not pipeline._options.view_as(
TypeOptions).allow_non_deterministic_key_coders))
def _check_for_unsupported_features_on_non_portable_worker(self, pipeline):
pipeline.visit(self.combinefn_visitor())
def run_pipeline(self, pipeline, options):
"""Remotely executes entire pipeline or parts reachable from node."""
# Label goog-dataflow-notebook if job is started from notebook.
if is_in_notebook():
notebook_version = (
'goog-dataflow-notebook=' +
beam.version.__version__.replace('.', '_'))
if options.view_as(GoogleCloudOptions).labels:
options.view_as(GoogleCloudOptions).labels.append(notebook_version)
else:
options.view_as(GoogleCloudOptions).labels = [notebook_version]
# Import here to avoid adding the dependency for local running scenarios.
try:
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
except ImportError:
raise ImportError(
'Google Cloud Dataflow runner not available, '
'please install apache_beam[gcp]')
self._maybe_add_unified_worker_missing_options(options)
use_fnapi = apiclient._use_fnapi(options)
if not use_fnapi:
self._check_for_unsupported_features_on_non_portable_worker(pipeline)
# Convert all side inputs into a form acceptable to Dataflow.
pipeline.visit(
self.side_input_visitor(
apiclient._use_unified_worker(options),
apiclient._use_fnapi(options),
deterministic_key_coders=not options.view_as(
TypeOptions).allow_non_deterministic_key_coders))
# Performing configured PTransform overrides. Note that this is currently
# done before Runner API serialization, since the new proto needs to contain
# any added PTransforms.
pipeline.replace_all(DataflowRunner._PTRANSFORM_OVERRIDES)
from apache_beam.runners.dataflow.ptransform_overrides import WriteToBigQueryPTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import GroupIntoBatchesWithShardedKeyPTransformOverride
pipeline.replace_all([
WriteToBigQueryPTransformOverride(pipeline, options),
GroupIntoBatchesWithShardedKeyPTransformOverride(self, options)
])
if use_fnapi and not apiclient._use_unified_worker(options):
pipeline.replace_all(DataflowRunner._JRH_PTRANSFORM_OVERRIDES)
from apache_beam.transforms import environments
if options.view_as(SetupOptions).prebuild_sdk_container_engine:
# if prebuild_sdk_container_engine is specified we will build a new sdk
# container image with dependencies pre-installed and use that image,
# instead of using the inferred default container image.
self._default_environment = (
environments.DockerEnvironment.from_options(options))
options.view_as(WorkerOptions).sdk_container_image = (
self._default_environment.container_image)
else:
self._default_environment = (
environments.DockerEnvironment.from_container_image(
apiclient.get_container_image_from_options(options),
artifacts=environments.python_sdk_dependencies(options),
resource_hints=environments.resource_hints_from_options(options)))
# This has to be performed before pipeline proto is constructed to make sure
# that the changes are reflected in the portable job submission path.
self._adjust_pipeline_for_dataflow_v2(pipeline)
# Snapshot the pipeline in a portable proto.
self.proto_pipeline, self.proto_context = pipeline.to_runner_api(
return_context=True, default_environment=self._default_environment)
# Optimize the pipeline if it not streaming and the pre_optimize
# experiment is set.
if not options.view_as(StandardOptions).streaming:
pre_optimize = options.view_as(DebugOptions).lookup_experiment(
'pre_optimize', 'default').lower()
from apache_beam.runners.portability.fn_api_runner import translations
if pre_optimize == 'none':
phases = []
elif pre_optimize == 'default' or pre_optimize == 'all':
phases = [translations.pack_combiners, translations.sort_stages]
else:
phases = []
for phase_name in pre_optimize.split(','):
# For now, these are all we allow.
if phase_name in ('pack_combiners', ):
phases.append(getattr(translations, phase_name))
else:
raise ValueError(
'Unknown or inapplicable phase for pre_optimize: %s' %
phase_name)
phases.append(translations.sort_stages)
if phases:
self.proto_pipeline = translations.optimize_pipeline(
self.proto_pipeline,
phases=phases,
known_runner_urns=frozenset(),
partial=True)
if not use_fnapi:
# Performing configured PTransform overrides which should not be reflected
# in the proto representation of the graph.
pipeline.replace_all(DataflowRunner._NON_PORTABLE_PTRANSFORM_OVERRIDES)
# Add setup_options for all the BeamPlugin imports
setup_options = options.view_as(SetupOptions)
plugins = BeamPlugin.get_all_plugin_paths()
if setup_options.beam_plugins is not None:
plugins = list(set(plugins + setup_options.beam_plugins))
setup_options.beam_plugins = plugins
# Elevate "min_cpu_platform" to pipeline option, but using the existing
# experiment.
debug_options = options.view_as(DebugOptions)
worker_options = options.view_as(WorkerOptions)
if worker_options.min_cpu_platform:
debug_options.add_experiment(
'min_cpu_platform=' + worker_options.min_cpu_platform)
if (apiclient._use_unified_worker(options) and
pipeline.contains_external_transforms):
# All Dataflow multi-language pipelines (supported by Runner v2 only) use
# portable job submission by default.
debug_options.add_experiment("use_portable_job_submission")
# Elevate "enable_streaming_engine" to pipeline option, but using the
# existing experiment.
google_cloud_options = options.view_as(GoogleCloudOptions)
if google_cloud_options.enable_streaming_engine:
debug_options.add_experiment("enable_windmill_service")
debug_options.add_experiment("enable_streaming_engine")
elif (apiclient._use_fnapi(options) and
apiclient._use_unified_worker(options) and
options.view_as(StandardOptions).streaming):
debug_options.add_experiment("enable_windmill_service")
debug_options.add_experiment("enable_streaming_engine")
else:
if (debug_options.lookup_experiment("enable_windmill_service") or
debug_options.lookup_experiment("enable_streaming_engine")):
raise ValueError(
"""Streaming engine both disabled and enabled:
--enable_streaming_engine flag is not set, but
enable_windmill_service
and/or enable_streaming_engine experiments are present.
It is recommended you only set the --enable_streaming_engine flag.""")
dataflow_worker_jar = getattr(worker_options, 'dataflow_worker_jar', None)
if dataflow_worker_jar is not None:
if not apiclient._use_fnapi(options):
_LOGGER.warning(
'Typical end users should not use this worker jar feature. '
'It can only be used when FnAPI is enabled.')
else:
debug_options.add_experiment('use_staged_dataflow_worker_jar')
# Make Dataflow workers use FastAvro on Python 3 unless use_avro experiment
# is set. Note that use_avro is only interpreted by the Dataflow runner
# at job submission and is not interpreted by Dataflow service or workers,
# which by default use avro library unless use_fastavro experiment is set.
if not debug_options.lookup_experiment('use_avro'):
debug_options.add_experiment('use_fastavro')
self.job = apiclient.Job(options, self.proto_pipeline)
# Dataflow Runner v1 requires output type of the Flatten to be the same as
# the inputs, hence we enforce that here. Dataflow Runner v2 does not
# require this.
pipeline.visit(self.flatten_input_visitor())
# Trigger a traversal of all reachable nodes.
self.visit_transforms(pipeline, options)
test_options = options.view_as(TestOptions)
# If it is a dry run, return without submitting the job.
if test_options.dry_run:
result = PipelineResult(PipelineState.DONE)
result.wait_until_finish = lambda duration=None: None
return result
# Get a Dataflow API client and set its options
self.dataflow_client = apiclient.DataflowApplicationClient(options)
# Create the job description and send a request to the service. The result
# can be None if there is no need to send a request to the service (e.g.
# template creation). If a request was sent and failed then the call will
# raise an exception.
result = DataflowPipelineResult(
self.dataflow_client.create_job(self.job), self)
# TODO(BEAM-4274): Circular import runners-metrics. Requires refactoring.
from apache_beam.runners.dataflow.dataflow_metrics import DataflowMetrics
self._metrics = DataflowMetrics(self.dataflow_client, result, self.job)
result.metric_results = self._metrics
return result
def _maybe_add_unified_worker_missing_options(self, options):
debug_options = options.view_as(DebugOptions)
# Streaming is always portable, default to runner v2.
if options.view_as(StandardOptions).streaming:
if not debug_options.lookup_experiment('disable_runner_v2'):
debug_options.add_experiment('beam_fn_api')
debug_options.add_experiment('use_runner_v2')
debug_options.add_experiment('use_portable_job_submission')
# set default beam_fn_api experiment if use unified
# worker experiment flag exists, no-op otherwise.
from apache_beam.runners.dataflow.internal import apiclient
if apiclient._use_unified_worker(options):
if not debug_options.lookup_experiment('beam_fn_api'):
debug_options.add_experiment('beam_fn_api')
def _get_typehint_based_encoding(self, typehint, window_coder):
"""Returns an encoding based on a typehint object."""
return self._get_cloud_encoding(
self._get_coder(typehint, window_coder=window_coder))
@staticmethod
def _get_coder(typehint, window_coder):
"""Returns a coder based on a typehint object."""
if window_coder:
return coders.WindowedValueCoder(
coders.registry.get_coder(typehint), window_coder=window_coder)
return coders.registry.get_coder(typehint)
def _get_cloud_encoding(self, coder, unused=None):
"""Returns an encoding based on a coder object."""
if not isinstance(coder, coders.Coder):
raise TypeError(
'Coder object must inherit from coders.Coder: %s.' % str(coder))
return coder.as_cloud_object(self.proto_context.coders)
def _get_side_input_encoding(self, input_encoding):
"""Returns an encoding for the output of a view transform.
Args:
input_encoding: encoding of current transform's input. Side inputs need
this because the service will check that input and output types match.
Returns:
An encoding that matches the output and input encoding. This is essential
for the View transforms introduced to produce side inputs to a ParDo.
"""
return {
'@type': 'kind:stream',
'component_encodings': [input_encoding],
'is_stream_like': {
'value': True
},
}
def _get_encoded_output_coder(
self, transform_node, window_value=True, output_tag=None):
"""Returns the cloud encoding of the coder for the output of a transform."""
if output_tag in transform_node.outputs:
element_type = transform_node.outputs[output_tag].element_type
elif len(transform_node.outputs) == 1:
output_tag = DataflowRunner._only_element(transform_node.outputs.keys())
# TODO(robertwb): Handle type hints for multi-output transforms.
element_type = transform_node.outputs[output_tag].element_type
else:
# TODO(silviuc): Remove this branch (and assert) when typehints are
# propagated everywhere. Returning an 'Any' as type hint will trigger
# usage of the fallback coder (i.e., cPickler).
element_type = typehints.Any
if window_value:
# All outputs have the same windowing. So getting the coder from an
# arbitrary window is fine.
output_tag = next(iter(transform_node.outputs.keys()))
window_coder = (
transform_node.outputs[output_tag].windowing.windowfn.
get_window_coder())
else:
window_coder = None
return self._get_typehint_based_encoding(element_type, window_coder)
def get_pcoll_with_auto_sharding(self):
if not hasattr(self, '_pcoll_with_auto_sharding'):
return set()
return self._pcoll_with_auto_sharding
def add_pcoll_with_auto_sharding(self, applied_ptransform):
if not hasattr(self, '_pcoll_with_auto_sharding'):
self.__setattr__('_pcoll_with_auto_sharding', set())
output = DataflowRunner._only_element(applied_ptransform.outputs.keys())
self._pcoll_with_auto_sharding.add(
applied_ptransform.outputs[output]._unique_name())
def _add_step(self, step_kind, step_label, transform_node, side_tags=()):
"""Creates a Step object and adds it to the cache."""
# Import here to avoid adding the dependency for local running scenarios.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
step = apiclient.Step(step_kind, self._get_unique_step_name())
self.job.proto.steps.append(step.proto)
step.add_property(PropertyNames.USER_NAME, step_label)
# Cache the node/step association for the main output of the transform node.
# External transforms may not use 'None' as an output tag.
output_tags = ([None] +
list(side_tags) if None in transform_node.outputs.keys() else
list(transform_node.outputs.keys()))
# We have to cache output for all tags since some transforms may produce
# multiple outputs.
for output_tag in output_tags:
self._cache.cache_output(transform_node, output_tag, step)
# Finally, we add the display data items to the pipeline step.
# If the transform contains no display data then an empty list is added.
step.add_property(
PropertyNames.DISPLAY_DATA,
[
item.get_dict()
for item in DisplayData.create_from(transform_node.transform).items
])
if transform_node.resource_hints:
step.add_property(
PropertyNames.RESOURCE_HINTS,
{
hint: quote_from_bytes(value)
for (hint, value) in transform_node.resource_hints.items()
})
return step
def _add_singleton_step(
self,
label,
full_label,
tag,
input_step,
windowing_strategy,
access_pattern):
"""Creates a CollectionToSingleton step used to handle ParDo side inputs."""
# Import here to avoid adding the dependency for local running scenarios.
from apache_beam.runners.dataflow.internal import apiclient
step = apiclient.Step(TransformNames.COLLECTION_TO_SINGLETON, label)
self.job.proto.steps.append(step.proto)
step.add_property(PropertyNames.USER_NAME, full_label)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(tag)
})
step.encoding = self._get_side_input_encoding(input_step.encoding)
output_info = {
PropertyNames.USER_NAME: '%s.%s' % (full_label, PropertyNames.OUTPUT),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}
if common_urns.side_inputs.MULTIMAP.urn == access_pattern:
output_info[PropertyNames.USE_INDEXED_FORMAT] = True
step.add_property(PropertyNames.OUTPUT_INFO, [output_info])
step.add_property(
PropertyNames.WINDOWING_STRATEGY,
self.serialize_windowing_strategy(
windowing_strategy, self._default_environment))
return step
def run_Impulse(self, transform_node, options):
standard_options = options.view_as(StandardOptions)
debug_options = options.view_as(DebugOptions)
use_fn_api = (
debug_options.experiments and
'beam_fn_api' in debug_options.experiments)
use_streaming_engine = (
debug_options.experiments and
'enable_streaming_engine' in debug_options.experiments and
'enable_windmill_service' in debug_options.experiments)
step = self._add_step(
TransformNames.READ, transform_node.full_label, transform_node)
if (standard_options.streaming and
(not use_fn_api or not use_streaming_engine)):
step.add_property(PropertyNames.FORMAT, 'pubsub')
step.add_property(PropertyNames.PUBSUB_SUBSCRIPTION, '_starting_signal/')
else:
step.add_property(PropertyNames.FORMAT, 'impulse')
encoded_impulse_element = coders.WindowedValueCoder(
coders.BytesCoder(),
coders.coders.GlobalWindowCoder()).get_impl().encode_nested(
window.GlobalWindows.windowed_value(b''))
if use_fn_api:
encoded_impulse_as_str = self.byte_array_to_json_string(
encoded_impulse_element)
else:
encoded_impulse_as_str = base64.b64encode(
encoded_impulse_element).decode('ascii')
step.add_property(PropertyNames.IMPULSE_ELEMENT, encoded_impulse_as_str)
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}])
def run_Flatten(self, transform_node, options):
step = self._add_step(
TransformNames.FLATTEN, transform_node.full_label, transform_node)
inputs = []
for one_input in transform_node.inputs:
input_step = self._cache.get_pvalue(one_input)
inputs.append({
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(one_input.tag)
})
step.add_property(PropertyNames.INPUTS, inputs)
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}])
# TODO(srohde): Remove this after internal usages have been removed.
def apply_GroupByKey(self, transform, pcoll, options):
return transform.expand(pcoll)
def _verify_gbk_coders(self, transform, pcoll):
# Infer coder of parent.
#
# TODO(ccy): make Coder inference and checking less specialized and more
# comprehensive.
parent = pcoll.producer
if parent:
coder = parent.transform._infer_output_coder() # pylint: disable=protected-access
if not coder:
coder = self._get_coder(pcoll.element_type or typehints.Any, None)
if not coder.is_kv_coder():
raise ValueError((
'Coder for the GroupByKey operation "%s" is not a '
'key-value coder: %s.') % (transform.label, coder))
# TODO(robertwb): Update the coder itself if it changed.
coders.registry.verify_deterministic(
coder.key_coder(), 'GroupByKey operation "%s"' % transform.label)
def run_GroupByKey(self, transform_node, options):
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
# Verify that the GBK's parent has a KV coder.
self._verify_gbk_coders(transform_node.transform, transform_node.inputs[0])
step = self._add_step(
TransformNames.GROUP, transform_node.full_label, transform_node)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)
})
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}])
windowing = transform_node.transform.get_windowing(transform_node.inputs)
step.add_property(
PropertyNames.SERIALIZED_FN,
self.serialize_windowing_strategy(windowing, self._default_environment))
def run_ExternalTransform(self, transform_node, options):
# Adds a dummy step to the Dataflow job description so that inputs and
# outputs are mapped correctly in the presence of external transforms.
#
# Note that Dataflow Python multi-language pipelines use Portable Job
# Submission by default, hence this step and rest of the Dataflow step
# definitions defined here are not used at Dataflow service but we have to
# maintain the mapping correctly till we can fully drop the Dataflow step
# definitions from the SDK.
# AppliedTransform node outputs have to be updated to correctly map the
# outputs for external transforms.
transform_node.outputs = ({
output.tag: output
for output in transform_node.outputs.values()
})
self.run_Impulse(transform_node, options)
def run_ParDo(self, transform_node, options):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
# Attach side inputs.
si_dict = {}
si_labels = {}
full_label_counts = defaultdict(int)
lookup_label = lambda side_pval: si_labels[side_pval]
named_inputs = transform_node.named_inputs()
label_renames = {}
for ix, side_pval in enumerate(transform_node.side_inputs):
assert isinstance(side_pval, AsSideInput)
step_name = 'SideInput-' + self._get_unique_step_name()
si_label = ((SIDE_INPUT_PREFIX + '%d-%s') %
(ix, transform_node.full_label))
old_label = (SIDE_INPUT_PREFIX + '%d') % ix
label_renames[old_label] = si_label
assert old_label in named_inputs
pcollection_label = '%s.%s' % (
side_pval.pvalue.producer.full_label.split('/')[-1],
side_pval.pvalue.tag if side_pval.pvalue.tag else 'out')
si_full_label = '%s/%s(%s.%s)' % (
transform_node.full_label,
side_pval.__class__.__name__,
pcollection_label,
full_label_counts[pcollection_label])
# Count the number of times the same PCollection is a side input
# to the same ParDo.
full_label_counts[pcollection_label] += 1
self._add_singleton_step(
step_name,
si_full_label,
side_pval.pvalue.tag,
self._cache.get_pvalue(side_pval.pvalue),
side_pval.pvalue.windowing,
side_pval._side_input_data().access_pattern)
si_dict[si_label] = {
'@type': 'OutputReference',
PropertyNames.STEP_NAME: step_name,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}
si_labels[side_pval] = si_label
# Now create the step for the ParDo transform being handled.
transform_name = transform_node.full_label.rsplit('/', 1)[-1]
step = self._add_step(
TransformNames.DO,
transform_node.full_label +
('/{}'.format(transform_name) if transform_node.side_inputs else ''),
transform_node,
transform_node.transform.output_tags)
# Import here to avoid adding the dependency for local running scenarios.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
transform_proto = self.proto_context.transforms.get_proto(transform_node)
transform_id = self.proto_context.transforms.get_id(transform_node)
use_fnapi = apiclient._use_fnapi(options)
use_unified_worker = apiclient._use_unified_worker(options)
# Patch side input ids to be unique across a given pipeline.
if (label_renames and
transform_proto.spec.urn == common_urns.primitives.PAR_DO.urn):
# Patch PTransform proto.
for old, new in label_renames.items():
transform_proto.inputs[new] = transform_proto.inputs[old]
del transform_proto.inputs[old]
# Patch ParDo proto.
proto_type, _ = beam.PTransform._known_urns[transform_proto.spec.urn]
proto = proto_utils.parse_Bytes(transform_proto.spec.payload, proto_type)
for old, new in label_renames.items():
proto.side_inputs[new].CopyFrom(proto.side_inputs[old])
del proto.side_inputs[old]
transform_proto.spec.payload = proto.SerializeToString()
# We need to update the pipeline proto.
del self.proto_pipeline.components.transforms[transform_id]
(
self.proto_pipeline.components.transforms[transform_id].CopyFrom(
transform_proto))
# The data transmitted in SERIALIZED_FN is different depending on whether
# this is a fnapi pipeline or not.
if (use_fnapi and
(transform_proto.spec.urn == common_urns.primitives.PAR_DO.urn or
use_unified_worker)):
serialized_data = transform_id
else:
serialized_data = pickler.dumps(
self._pardo_fn_data(transform_node, lookup_label))
step.add_property(PropertyNames.SERIALIZED_FN, serialized_data)
# TODO(BEAM-8882): Enable once dataflow service doesn't reject this.
# step.add_property(PropertyNames.PIPELINE_PROTO_TRANSFORM_ID, transform_id)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)
})
# Add side inputs if any.
step.add_property(PropertyNames.NON_PARALLEL_INPUTS, si_dict)
# Generate description for the outputs. The output names
# will be 'None' for main output and '<tag>' for a tagged output.
outputs = []
all_output_tags = list(transform_proto.outputs.keys())
# Some external transforms require output tags to not be modified.
# So we randomly select one of the output tags as the main output and
# leave others as side outputs. Transform execution should not change
# dependending on which output tag we choose as the main output here.
# Also, some SDKs do not work correctly if output tags are modified. So for
# external transforms, we leave tags unmodified.
#
# Python SDK uses 'None' as the tag of the main output.
main_output_tag = 'None'
step.encoding = self._get_encoded_output_coder(
transform_node, output_tag=main_output_tag)
side_output_tags = set(all_output_tags).difference({main_output_tag})
# Add the main output to the description.
outputs.append({
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: main_output_tag
})
for side_tag in side_output_tags:
# The assumption here is that all outputs will have the same typehint
# and coder as the main output. This is certainly the case right now
# but conceivably it could change in the future.
encoding = self._get_encoded_output_coder(
transform_node, output_tag=side_tag)
outputs.append({
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, side_tag)),
PropertyNames.ENCODING: encoding,
PropertyNames.OUTPUT_NAME: side_tag
})
step.add_property(PropertyNames.OUTPUT_INFO, outputs)
# Add the restriction encoding if we are a splittable DoFn
# and are using the Fn API on the unified worker.
restriction_coder = transform.get_restriction_coder()
if restriction_coder:
step.add_property(
PropertyNames.RESTRICTION_ENCODING,
self._get_cloud_encoding(restriction_coder))
if options.view_as(StandardOptions).streaming:
is_stateful_dofn = (DoFnSignature(transform.dofn).is_stateful_dofn())
if is_stateful_dofn:
step.add_property(PropertyNames.USES_KEYED_STATE, 'true')
# Also checks whether the step allows shardable keyed states.
# TODO(BEAM-11360): remove this when migrated to portable job
# submission since we only consider supporting the property in runner
# v2.
for pcoll in transform_node.outputs.values():
if pcoll._unique_name() in self.get_pcoll_with_auto_sharding():
step.add_property(PropertyNames.ALLOWS_SHARDABLE_STATE, 'true')
# Currently we only allow auto-sharding to be enabled through the
# GroupIntoBatches transform. So we also add the following property
# which GroupIntoBatchesDoFn has, to allow the backend to perform
# graph optimization.
step.add_property(PropertyNames.PRESERVES_KEYS, 'true')
break
@staticmethod
def _pardo_fn_data(transform_node, get_label):
transform = transform_node.transform
si_tags_and_types = [ # pylint: disable=protected-access
(get_label(side_pval), side_pval.__class__, side_pval._view_options())
for side_pval in transform_node.side_inputs]
return (
transform.fn,
transform.args,
transform.kwargs,
si_tags_and_types,
transform_node.inputs[0].windowing)
def run_CombineValuesReplacement(self, transform_node, options):
transform = transform_node.transform.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.COMBINE, transform_node.full_label, transform_node)
transform_id = self.proto_context.transforms.get_id(transform_node.parent)
# The data transmitted in SERIALIZED_FN is different depending on whether
# this is a fnapi pipeline or not.
from apache_beam.runners.dataflow.internal import apiclient
use_fnapi = apiclient._use_fnapi(options)
if use_fnapi:
# Fnapi pipelines send the transform ID of the CombineValues transform's
# parent composite because Dataflow expects the ID of a CombinePerKey
# transform.
serialized_data = transform_id
else:
# Combiner functions do not take deferred side-inputs (i.e. PValues) and
# therefore the code to handle extra args/kwargs is simpler than for the
# DoFn's of the ParDo transform. In the last, empty argument is where
# side inputs information would go.
serialized_data = pickler.dumps(
(transform.fn, transform.args, transform.kwargs, ()))
step.add_property(PropertyNames.SERIALIZED_FN, serialized_data)
# TODO(BEAM-8882): Enable once dataflow service doesn't reject this.
# step.add_property(PropertyNames.PIPELINE_PROTO_TRANSFORM_ID, transform_id)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)
})
# Note that the accumulator must not have a WindowedValue encoding, while
# the output of this step does in fact have a WindowedValue encoding.
accumulator_encoding = self._get_cloud_encoding(
transform.fn.get_accumulator_coder())
output_encoding = self._get_encoded_output_coder(transform_node)
step.encoding = output_encoding
step.add_property(PropertyNames.ENCODING, accumulator_encoding)
# Generate description for main output 'out.'
outputs = []
# Add the main output to the description.
outputs.append({
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
})
step.add_property(PropertyNames.OUTPUT_INFO, outputs)
def run_Read(self, transform_node, options):
transform = transform_node.transform
step = self._add_step(
TransformNames.READ, transform_node.full_label, transform_node)
# TODO(mairbek): refactor if-else tree to use registerable functions.
# Initialize the source specific properties.
standard_options = options.view_as(StandardOptions)
if not hasattr(transform.source, 'format'):
# If a format is not set, we assume the source to be a custom source.
source_dict = {}
source_dict['spec'] = {
'@type': names.SOURCE_TYPE,
names.SERIALIZED_SOURCE_KEY: pickler.dumps(transform.source)
}
try:
source_dict['metadata'] = {
'estimated_size_bytes': json_value.get_typed_value_descriptor(
transform.source.estimate_size())
}
except error.RuntimeValueProviderError:
# Size estimation is best effort, and this error is by value provider.
_LOGGER.info(
'Could not estimate size of source %r due to ' + \
'RuntimeValueProviderError', transform.source)
except Exception: # pylint: disable=broad-except
# Size estimation is best effort. So we log the error and continue.
_LOGGER.info(
'Could not estimate size of source %r due to an exception: %s',
transform.source,
traceback.format_exc())
step.add_property(PropertyNames.SOURCE_STEP_INPUT, source_dict)
elif transform.source.format == 'text':
step.add_property(PropertyNames.FILE_PATTERN, transform.source.path)
elif transform.source.format == 'bigquery':
if standard_options.streaming:
raise ValueError(
'BigQuery source is not currently available for use '
'in streaming pipelines.')
debug_options = options.view_as(DebugOptions)
use_fn_api = (
debug_options.experiments and
'beam_fn_api' in debug_options.experiments)
if use_fn_api:
raise ValueError(BQ_SOURCE_UW_ERROR)
step.add_property(PropertyNames.BIGQUERY_EXPORT_FORMAT, 'FORMAT_AVRO')
# TODO(silviuc): Add table validation if transform.source.validate.
if transform.source.table_reference is not None:
step.add_property(
PropertyNames.BIGQUERY_DATASET,
transform.source.table_reference.datasetId)
step.add_property(
PropertyNames.BIGQUERY_TABLE,
transform.source.table_reference.tableId)
# If project owning the table was not specified then the project owning
# the workflow (current project) will be used.
if transform.source.table_reference.projectId is not None:
step.add_property(
PropertyNames.BIGQUERY_PROJECT,
transform.source.table_reference.projectId)
elif transform.source.query is not None:
step.add_property(PropertyNames.BIGQUERY_QUERY, transform.source.query)
step.add_property(
PropertyNames.BIGQUERY_USE_LEGACY_SQL,
transform.source.use_legacy_sql)
step.add_property(
PropertyNames.BIGQUERY_FLATTEN_RESULTS,
transform.source.flatten_results)
else:
raise ValueError(
'BigQuery source %r must specify either a table or'
' a query' % transform.source)
if transform.source.kms_key is not None:
step.add_property(
PropertyNames.BIGQUERY_KMS_KEY, transform.source.kms_key)
elif transform.source.format == 'pubsub':
if not standard_options.streaming:
raise ValueError(
'Cloud Pub/Sub is currently available for use '
'only in streaming pipelines.')
# Only one of topic or subscription should be set.
if transform.source.full_subscription:
step.add_property(
PropertyNames.PUBSUB_SUBSCRIPTION,
transform.source.full_subscription)
elif transform.source.full_topic:
step.add_property(
PropertyNames.PUBSUB_TOPIC, transform.source.full_topic)
if transform.source.id_label:
step.add_property(
PropertyNames.PUBSUB_ID_LABEL, transform.source.id_label)
if transform.source.with_attributes:
# Setting this property signals Dataflow runner to return full
# PubsubMessages instead of just the data part of the payload.
step.add_property(PropertyNames.PUBSUB_SERIALIZED_ATTRIBUTES_FN, '')
if transform.source.timestamp_attribute is not None:
step.add_property(
PropertyNames.PUBSUB_TIMESTAMP_ATTRIBUTE,
transform.source.timestamp_attribute)
else:
raise ValueError(
'Source %r has unexpected format %s.' %
(transform.source, transform.source.format))
if not hasattr(transform.source, 'format'):
step.add_property(PropertyNames.FORMAT, names.SOURCE_FORMAT)
else:
step.add_property(PropertyNames.FORMAT, transform.source.format)
# Wrap coder in WindowedValueCoder: this is necessary as the encoding of a
# step should be the type of value outputted by each step. Read steps
# automatically wrap output values in a WindowedValue wrapper, if necessary.
# This is also necessary for proper encoding for size estimation.
# Using a GlobalWindowCoder as a place holder instead of the default
# PickleCoder because GlobalWindowCoder is known coder.
# TODO(robertwb): Query the collection for the windowfn to extract the
# correct coder.
coder = coders.WindowedValueCoder(
coders.registry.get_coder(transform_node.outputs[None].element_type),
coders.coders.GlobalWindowCoder())
step.encoding = self._get_cloud_encoding(coder)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}])
def run__NativeWrite(self, transform_node, options):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.WRITE, transform_node.full_label, transform_node)
# TODO(mairbek): refactor if-else tree to use registerable functions.
# Initialize the sink specific properties.
if transform.sink.format == 'text':
# Note that it is important to use typed properties (@type/value dicts)
# for non-string properties and also for empty strings. For example,
# in the code below the num_shards must have type and also
# file_name_suffix and shard_name_template (could be empty strings).
step.add_property(
PropertyNames.FILE_NAME_PREFIX,
transform.sink.file_name_prefix,
with_type=True)
step.add_property(
PropertyNames.FILE_NAME_SUFFIX,
transform.sink.file_name_suffix,
with_type=True)
step.add_property(
PropertyNames.SHARD_NAME_TEMPLATE,
transform.sink.shard_name_template,
with_type=True)
if transform.sink.num_shards > 0:
step.add_property(
PropertyNames.NUM_SHARDS, transform.sink.num_shards, with_type=True)
# TODO(silviuc): Implement sink validation.
step.add_property(PropertyNames.VALIDATE_SINK, False, with_type=True)
elif transform.sink.format == 'bigquery':
# TODO(silviuc): Add table validation if transform.sink.validate.
step.add_property(
PropertyNames.BIGQUERY_DATASET,
transform.sink.table_reference.datasetId)
step.add_property(
PropertyNames.BIGQUERY_TABLE, transform.sink.table_reference.tableId)
# If project owning the table was not specified then the project owning
# the workflow (current project) will be used.
if transform.sink.table_reference.projectId is not None:
step.add_property(
PropertyNames.BIGQUERY_PROJECT,
transform.sink.table_reference.projectId)
step.add_property(
PropertyNames.BIGQUERY_CREATE_DISPOSITION,
transform.sink.create_disposition)
step.add_property(
PropertyNames.BIGQUERY_WRITE_DISPOSITION,
transform.sink.write_disposition)
if transform.sink.table_schema is not None:
step.add_property(
PropertyNames.BIGQUERY_SCHEMA, transform.sink.schema_as_json())
if transform.sink.kms_key is not None:
step.add_property(
PropertyNames.BIGQUERY_KMS_KEY, transform.sink.kms_key)
elif transform.sink.format == 'pubsub':
standard_options = options.view_as(StandardOptions)
if not standard_options.streaming:
raise ValueError(
'Cloud Pub/Sub is currently available for use '
'only in streaming pipelines.')
step.add_property(PropertyNames.PUBSUB_TOPIC, transform.sink.full_topic)
if transform.sink.id_label:
step.add_property(
PropertyNames.PUBSUB_ID_LABEL, transform.sink.id_label)
# Setting this property signals Dataflow runner that the PCollection
# contains PubsubMessage objects instead of just raw data.
step.add_property(PropertyNames.PUBSUB_SERIALIZED_ATTRIBUTES_FN, '')
if transform.sink.timestamp_attribute is not None:
step.add_property(
PropertyNames.PUBSUB_TIMESTAMP_ATTRIBUTE,
transform.sink.timestamp_attribute)
else:
raise ValueError(
'Sink %r has unexpected format %s.' %
(transform.sink, transform.sink.format))
step.add_property(PropertyNames.FORMAT, transform.sink.format)
# Wrap coder in WindowedValueCoder: this is necessary for proper encoding
# for size estimation. Using a GlobalWindowCoder as a place holder instead
# of the default PickleCoder because GlobalWindowCoder is known coder.
# TODO(robertwb): Query the collection for the windowfn to extract the
# correct coder.
coder = coders.WindowedValueCoder(
transform.sink.coder, coders.coders.GlobalWindowCoder())
step.encoding = self._get_cloud_encoding(coder)
step.add_property(PropertyNames.ENCODING, step.encoding)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)
})
def run_TestStream(self, transform_node, options):
from apache_beam.testing.test_stream import ElementEvent
from apache_beam.testing.test_stream import ProcessingTimeEvent
from apache_beam.testing.test_stream import WatermarkEvent
standard_options = options.view_as(StandardOptions)
if not standard_options.streaming:
raise ValueError(
'TestStream is currently available for use '
'only in streaming pipelines.')
transform = transform_node.transform
step = self._add_step(
TransformNames.READ, transform_node.full_label, transform_node)
step.add_property(
PropertyNames.SERIALIZED_FN,
self.proto_context.transforms.get_id(transform_node))
step.add_property(PropertyNames.FORMAT, 'test_stream')
test_stream_payload = beam_runner_api_pb2.TestStreamPayload()
# TestStream source doesn't do any decoding of elements,
# so we won't set test_stream_payload.coder_id.
output_coder = transform._infer_output_coder() # pylint: disable=protected-access
for event in transform._events:
new_event = test_stream_payload.events.add()
if isinstance(event, ElementEvent):
for tv in event.timestamped_values:
element = new_event.element_event.elements.add()
element.encoded_element = output_coder.encode(tv.value)
element.timestamp = tv.timestamp.micros
elif isinstance(event, ProcessingTimeEvent):
new_event.processing_time_event.advance_duration = (
event.advance_by.micros)
elif isinstance(event, WatermarkEvent):
new_event.watermark_event.new_watermark = event.new_watermark.micros
serialized_payload = self.byte_array_to_json_string(
test_stream_payload.SerializeToString())
step.add_property(PropertyNames.SERIALIZED_TEST_STREAM, serialized_payload)
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}])
# We must mark this method as not a test or else its name is a matcher for
# nosetest tests.
run_TestStream.__test__ = False # type: ignore[attr-defined]
@classmethod
def serialize_windowing_strategy(cls, windowing, default_environment):
from apache_beam.runners import pipeline_context
context = pipeline_context.PipelineContext(
default_environment=default_environment)
windowing_proto = windowing.to_runner_api(context)
return cls.byte_array_to_json_string(
beam_runner_api_pb2.MessageWithComponents(
components=context.to_runner_api(),
windowing_strategy=windowing_proto).SerializeToString())
@classmethod
def deserialize_windowing_strategy(cls, serialized_data):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners import pipeline_context
from apache_beam.transforms.core import Windowing
proto = beam_runner_api_pb2.MessageWithComponents()
proto.ParseFromString(cls.json_string_to_byte_array(serialized_data))
return Windowing.from_runner_api(
proto.windowing_strategy,
pipeline_context.PipelineContext(proto.components))
@staticmethod
def byte_array_to_json_string(raw_bytes):
"""Implements org.apache.beam.sdk.util.StringUtils.byteArrayToJsonString."""
return quote(raw_bytes)
@staticmethod
def json_string_to_byte_array(encoded_string):
"""Implements org.apache.beam.sdk.util.StringUtils.jsonStringToByteArray."""
return unquote_to_bytes(encoded_string)
def get_default_gcp_region(self):
"""Get a default value for Google Cloud region according to
https://cloud.google.com/compute/docs/gcloud-compute/#default-properties.
If no default can be found, returns None.
"""
environment_region = os.environ.get('CLOUDSDK_COMPUTE_REGION')
if environment_region:
_LOGGER.info(
'Using default GCP region %s from $CLOUDSDK_COMPUTE_REGION',
environment_region)
return environment_region
try:
cmd = ['gcloud', 'config', 'get-value', 'compute/region']
raw_output = processes.check_output(cmd, stderr=DEVNULL)
formatted_output = raw_output.decode('utf-8').strip()
if formatted_output:
_LOGGER.info(
'Using default GCP region %s from `%s`',
formatted_output,
' '.join(cmd))
return formatted_output
except RuntimeError:
pass
return None
class _DataflowSideInput(beam.pvalue.AsSideInput):
"""Wraps a side input as a dataflow-compatible side input."""
def _view_options(self):
return {
'data': self._data,
}
def _side_input_data(self):
return self._data
class _DataflowIterableAsMultimapSideInput(_DataflowSideInput):
"""Wraps an iterable side input as dataflow-compatible side input."""
def __init__(self, side_input):
# pylint: disable=protected-access
side_input_data = side_input._side_input_data()
assert (
side_input_data.access_pattern == common_urns.side_inputs.ITERABLE.urn)
iterable_view_fn = side_input_data.view_fn
self._data = beam.pvalue.SideInputData(
common_urns.side_inputs.MULTIMAP.urn,
side_input_data.window_mapping_fn,
lambda multimap: iterable_view_fn(multimap[b'']))
class _DataflowIterableSideInput(_DataflowSideInput):
"""Wraps an iterable side input as dataflow-compatible side input."""
def __init__(self, side_input):
# pylint: disable=protected-access
self.pvalue = side_input.pvalue
side_input_data = side_input._side_input_data()
assert (
side_input_data.access_pattern == common_urns.side_inputs.ITERABLE.urn)
self._data = beam.pvalue.SideInputData(
common_urns.side_inputs.ITERABLE.urn,
side_input_data.window_mapping_fn,
side_input_data.view_fn)
class _DataflowMultimapSideInput(_DataflowSideInput):
"""Wraps a multimap side input as dataflow-compatible side input."""
def __init__(self, side_input):
# pylint: disable=protected-access
self.pvalue = side_input.pvalue
side_input_data = side_input._side_input_data()
assert (
side_input_data.access_pattern == common_urns.side_inputs.MULTIMAP.urn)
self._data = beam.pvalue.SideInputData(
common_urns.side_inputs.MULTIMAP.urn,
side_input_data.window_mapping_fn,
side_input_data.view_fn)
class DataflowPipelineResult(PipelineResult):
"""Represents the state of a pipeline run on the Dataflow service."""
def __init__(self, job, runner):
"""Initialize a new DataflowPipelineResult instance.
Args:
job: Job message from the Dataflow API. Could be :data:`None` if a job
request was not sent to Dataflow service (e.g. template jobs).
runner: DataflowRunner instance.
"""
self._job = job
self._runner = runner
self.metric_results = None
def _update_job(self):
# We need the job id to be able to update job information. There is no need
# to update the job if we are in a known terminal state.
if self.has_job and not self.is_in_terminal_state():
self._job = self._runner.dataflow_client.get_job(self.job_id())
def job_id(self):
return self._job.id
def metrics(self):
return self.metric_results
@property
def has_job(self):
return self._job is not None
def _get_job_state(self):
values_enum = dataflow_api.Job.CurrentStateValueValuesEnum
# Ordered by the enum values. Values that may be introduced in
# future versions of Dataflow API are considered UNRECOGNIZED by the SDK.
api_jobstate_map = defaultdict(
lambda: PipelineState.UNRECOGNIZED,
{
values_enum.JOB_STATE_UNKNOWN: PipelineState.UNKNOWN,
values_enum.JOB_STATE_STOPPED: PipelineState.STOPPED,
values_enum.JOB_STATE_RUNNING: PipelineState.RUNNING,
values_enum.JOB_STATE_DONE: PipelineState.DONE,
values_enum.JOB_STATE_FAILED: PipelineState.FAILED,
values_enum.JOB_STATE_CANCELLED: PipelineState.CANCELLED,
values_enum.JOB_STATE_UPDATED: PipelineState.UPDATED,
values_enum.JOB_STATE_DRAINING: PipelineState.DRAINING,
values_enum.JOB_STATE_DRAINED: PipelineState.DRAINED,
values_enum.JOB_STATE_PENDING: PipelineState.PENDING,
values_enum.JOB_STATE_CANCELLING: PipelineState.CANCELLING,
})
return (
api_jobstate_map[self._job.currentState]
if self._job.currentState else PipelineState.UNKNOWN)
@property
def state(self):
"""Return the current state of the remote job.
Returns:
A PipelineState object.
"""
if not self.has_job:
return PipelineState.UNKNOWN
self._update_job()
return self._get_job_state()
def is_in_terminal_state(self):
if not self.has_job:
return True
return PipelineState.is_terminal(self._get_job_state())
def wait_until_finish(self, duration=None):
if not self.is_in_terminal_state():
if not self.has_job:
raise IOError('Failed to get the Dataflow job id.')
thread = threading.Thread(
target=DataflowRunner.poll_for_job_completion,
args=(self._runner, self, duration))
# Mark the thread as a daemon thread so a keyboard interrupt on the main
# thread will terminate everything. This is also the reason we will not
# use thread.join() to wait for the polling thread.
thread.daemon = True
thread.start()
while thread.is_alive():
time.sleep(5.0)
# TODO: Merge the termination code in poll_for_job_completion and
# is_in_terminal_state.
terminated = self.is_in_terminal_state()
assert duration or terminated, (
'Job did not reach to a terminal state after waiting indefinitely.')
if terminated and self.state != PipelineState.DONE:
# TODO(BEAM-1290): Consider converting this to an error log based on
# theresolution of the issue.
raise DataflowRuntimeException(
'Dataflow pipeline failed. State: %s, Error:\n%s' %
(self.state, getattr(self._runner, 'last_error_msg', None)),
self)
return self.state
def cancel(self):
if not self.has_job:
raise IOError('Failed to get the Dataflow job id.')
self._update_job()
if self.is_in_terminal_state():
_LOGGER.warning(
'Cancel failed because job %s is already terminated in state %s.',
self.job_id(),
self.state)
else:
if not self._runner.dataflow_client.modify_job_state(
self.job_id(), 'JOB_STATE_CANCELLED'):
cancel_failed_message = (
'Failed to cancel job %s, please go to the Developers Console to '
'cancel it manually.') % self.job_id()
_LOGGER.error(cancel_failed_message)
raise DataflowRuntimeException(cancel_failed_message, self)
return self.state
def __str__(self):
return '<%s %s %s>' % (self.__class__.__name__, self.job_id(), self.state)
def __repr__(self):
return '<%s %s at %s>' % (self.__class__.__name__, self._job, hex(id(self)))
class DataflowRuntimeException(Exception):
"""Indicates an error has occurred in running this pipeline."""
def __init__(self, msg, result):
super(DataflowRuntimeException, self).__init__(msg)
self.result = result
|
robertwb/incubator-beam
|
sdks/python/apache_beam/runners/dataflow/dataflow_runner.py
|
Python
|
apache-2.0
| 71,425
|
[
"VisIt"
] |
f67394a8127bb9e337df6b74263f51c3a960961406f41488406d0274ae01193d
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2017 The Ray Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is adapted from
# https://github.com/ray-project/ray/blob/master/python/ray/util/sgd/torch/torch_runner.py
from filelock import FileLock
import logging
import io
import itertools
import os
import tempfile
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from bigdl.orca import OrcaContext
from bigdl.orca.learn.pytorch.constants import SCHEDULER_STEP, NUM_STEPS
from bigdl.orca.learn.pytorch.training_operator import TrainingOperator
from bigdl.orca.learn.pytorch import utils
from bigdl.orca.learn.pytorch.utils import get_filesystem
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
class DistBackend:
def get_world_size(self):
pass
def all_reduce(self, *args, **kwargs):
pass
class HorovodDistBackend(DistBackend):
def get_world_size(self):
import horovod.torch as hvd
return hvd.size()
def all_reduce(self, *args, **kwargs):
import horovod.torch as hvd
return hvd.allreduce(*args, **kwargs)
class TorchDistBackend(DistBackend):
def get_world_size(self):
import torch.distributed as dist
return dist.get_world_size()
def all_reduce(self, *args, **kwargs):
import torch.distributed as dist
return dist.all_reduce(*args, **kwargs)
def is_initialized(self):
import torch.distributed as dist
return dist.is_initialized()
class TorchRunner:
"""Manages a PyTorch model for training."""
def __init__(self,
model_creator,
optimizer_creator,
loss_creator=None,
metrics=None,
scheduler_creator=None,
training_operator_cls=None,
config=None,
use_tqdm=False,
scheduler_step_freq=None,
sync_stats=True,
log_level=logging.INFO):
logging.basicConfig(level=log_level,
format='[%(asctime)s] %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
self.logger = logging.getLogger(__name__)
self.model_creator = model_creator
self.optimizer_creator = optimizer_creator
self.loss_creator = loss_creator
self.scheduler_creator = scheduler_creator
self.training_operator_cls = training_operator_cls or TrainingOperator
self.config = {} if config is None else config
self.timers = utils.TimerCollection()
self.epochs = 0
self.models = None
self.optimizers = None
self.metrics = metrics
self.criterion = None
self.schedulers = None
self.train_loader = None
self.validation_loader = None
self.training_operator = None
self.use_tqdm = use_tqdm
self.scheduler_step_freq = scheduler_step_freq
self.sync_stats = sync_stats
self.epochs_stats = None # The state saved in every epoch
def _create_loss(self):
if not self.loss_creator:
return
self.logger.debug("Creating loss.")
if isinstance(self.loss_creator, torch.nn.modules.loss._Loss):
self.criterion = self.loss_creator
else: # Torch loss is also callable.
import types
assert isinstance(self.loss_creator, types.FunctionType), \
"Must provide a torch loss instance or a loss_creator function"
self.criterion = self.loss_creator(self.config)
def _create_schedulers_if_available(self):
# Learning rate schedules are optional.
if not self.scheduler_creator:
return
self.schedulers = self.scheduler_creator(self.given_optimizers,
self.config)
if not isinstance(self.schedulers, Iterable):
self.schedulers = [self.schedulers]
def setup(self, cores_per_node):
import torch
torch.set_num_threads(cores_per_node)
def setup_torch_distribute(self, url, world_rank, world_size):
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
dist.init_process_group(
backend="gloo",
init_method=url,
rank=world_rank,
world_size=world_size)
self.backend = "torch-distributed"
self.rank = world_rank
self.size = world_size
self.setup_components()
training_models = [
DistributedDataParallel(model)
for model in self.models
]
self.setup_operator(training_models)
def setup_components(self):
"""Runs the creator functions without any distributed coordination."""
self.logger.debug("Creating model")
self.models = self.model_creator(self.config)
if isinstance(self.models, nn.Sequential) or not isinstance(self.models, Iterable):
self.models = [self.models]
assert all(isinstance(model, nn.Module) for model in self.models), (
"All models must be PyTorch models: {}.".format(self.models))
self.logger.debug("Creating optimizer.")
self.optimizers = self.optimizer_creator(self.given_models,
self.config)
if not isinstance(self.optimizers, Iterable):
self.optimizers = [self.optimizers]
self._create_schedulers_if_available()
self._create_loss()
def setup_operator(self, training_models):
"""Create the training operator."""
if self.backend == "horovod":
dist_backend = HorovodDistBackend()
else:
dist_backend = TorchDistBackend()
self.training_operator = \
self.training_operator_cls(
self.config,
models=training_models,
optimizers=self.optimizers,
criterion=self.criterion,
world_rank=self.rank,
schedulers=self.schedulers,
use_tqdm=self.use_tqdm,
sync_stats=self.sync_stats,
dist_backend=dist_backend)
def with_sampler(self, loader):
self.logger.debug("Wrapping DistributedSampler on DataLoader")
data_loader_args = {
"dataset": loader.dataset,
"batch_size": loader.batch_size,
"shuffle": False,
"num_workers": loader.num_workers,
"collate_fn": loader.collate_fn,
"pin_memory": loader.pin_memory,
"drop_last": loader.drop_last,
"timeout": loader.timeout,
"worker_init_fn": loader.worker_init_fn,
"sampler": DistributedSampler(loader.dataset,
num_replicas=self.size,
rank=self.rank)
}
return DataLoader(**data_loader_args)
@staticmethod
def should_wrap_dataloader(loader):
from torch.utils.data import DataLoader
try:
from torch.utils.data import IterableDataset
not_iterable = not isinstance(loader.dataset, IterableDataset)
except Exception as e:
not_iterable = TorchRunner
return (isinstance(loader, DataLoader)
and not_iterable)
def train_epochs(self, data_creator, epochs=1, batch_size=32, profile=False,
info=None, wrap_dataloader=None, callbacks=None):
config = self.config.copy()
if OrcaContext.serialize_data_creator:
with FileLock(
os.path.join(tempfile.gettempdir(), ".orcadata.lock")):
loader = data_creator(config, batch_size)
else:
loader = data_creator(config, batch_size)
if wrap_dataloader is None:
if TorchRunner.should_wrap_dataloader(loader):
loader = self.with_sampler(loader)
elif wrap_dataloader is True:
loader = self.with_sampler(loader)
if callbacks is not None:
for callback in callbacks:
callback.set_trainer(self)
callback.on_train_begin()
stats_list = list()
for i in range(epochs):
if callbacks is not None:
for callback in callbacks:
callback.on_epoch_begin(epoch=self.epochs)
stats = self.train_epoch(loader, profile=profile, info=info, callbacks=callbacks)
if self.rank == 0:
if self.sync_stats:
self.logger.info(f"Finished training epoch {i + 1}, " +
f"stats averaged over workers: {stats}")
else:
self.logger.info(f"Finished training epoch {i + 1}, " +
f"stats on rank 0: {stats}")
stats_list.append(stats)
self.epochs_stats = stats
if callbacks is not None:
for callback in callbacks:
callback.on_epoch_end(epoch=self.epochs)
if callbacks is not None:
for callback in callbacks:
callback.on_train_end()
return stats_list
def train_epoch(self,
data_loader,
profile=False,
info=None,
callbacks=None):
"""Runs a training epoch and updates the model parameters."""
if hasattr(self.train_loader, "sampler") and hasattr(
self.train_loader.sampler, "set_epoch"):
self.train_loader.sampler.set_epoch(self.epochs)
self.logger.debug("Begin Training Step {}".format(self.epochs + 1))
info = info or {}
self._toggle_profiling(profile=profile)
info.update({
SCHEDULER_STEP: self.scheduler_step_freq
})
with self.timers.record("train_epoch"):
data_loader = iter(data_loader)
train_stats = self.training_operator.train_epoch(data_loader, info, callbacks)
self.epochs += 1
# This is so that `epochs` is first in ordering.
stats = dict(epoch=self.epochs, **train_stats)
if profile:
stats.update(profile=self.timers.stats())
return stats
def validate(self, data_creator, batch_size=32, num_steps=None, profile=False,
info=None, wrap_dataloader=None):
"""Evaluates the model on the validation data set."""
config = self.config.copy()
info = info or {}
self._toggle_profiling(profile=profile)
if OrcaContext.serialize_data_creator:
with FileLock(
os.path.join(tempfile.gettempdir(), ".orcadata.lock")):
loader = data_creator(config, batch_size)
else:
loader = data_creator(config, batch_size)
if wrap_dataloader is None:
if TorchRunner.should_wrap_dataloader(loader):
loader = self.with_sampler(loader)
elif wrap_dataloader is True:
loader = self.with_sampler(loader)
loader = iter(loader)
if num_steps:
loader = itertools.islice(loader, num_steps)
with self.timers.record("validation"):
validation_stats = self.training_operator.validate(loader,
info=info,
metrics=self.metrics)
if profile:
validation_stats.update(profile=self.timers.stats())
return validation_stats
def predict(self, partition, batch_size=32, profile=False):
"""Evaluates the model on the validation data set."""
config = self.config.copy()
self._toggle_profiling(profile=profile)
params = {"batch_size": batch_size, "shuffle": False}
for arg in ["shuffle", "sampler", "batch_sampler", "num_workers", "collate_fn",
"pin_memory", "drop_last", "timeout", "worker_init_fn",
"multiprocessing_context"]:
if arg in config:
params[arg] = config[arg]
def predict_fn(shard):
if isinstance(shard["x"], tuple) or isinstance(shard["x"], list):
tensors = [torch.from_numpy(arr) for arr in shard["x"]]
else:
tensors = [torch.from_numpy(shard["x"])]
dataset = torch.utils.data.TensorDataset(*tensors)
data_loader = DataLoader(dataset, **params)
y = self.training_operator.predict(iter(data_loader))
return {"prediction": y}
with self.timers.record("predict"):
new_part = [predict_fn(shard) for shard in partition]
return new_part
def _toggle_profiling(self, profile=False):
"""Enables/Disables and resets timing profiles."""
if profile:
self.timers.enable()
self.timers.reset()
else:
self.timers.disable()
self.training_operator._set_timers(self.timers)
def get_state_dict(self):
"""Returns the state of the runner."""
state = {
"epoch": self.epochs,
"operator": self.training_operator.state_dict(),
"models": [model.state_dict() for model in self.models],
"optimizers": [opt.state_dict() for opt in self.optimizers]
}
if self.schedulers:
state.update({
"schedulers": [
scheduler.state_dict() for scheduler in self.schedulers
]
})
return state
def load_state_dict(self, state):
"""Sets the state of the model."""
for model, state_dict in zip(self.models, state["models"]):
model.load_state_dict(state_dict)
if "optimizers" in state:
for optimizer, state_dict in zip(self.optimizers, state["optimizers"]):
optimizer.load_state_dict(state_dict)
if self.schedulers and "schedulers" in state:
for scheduler, state_dict in zip(self.schedulers,
state["schedulers"]):
scheduler.load_state_dict(state_dict)
self.epochs = state["epoch"]
if "operator" in state:
self.training_operator.load_state_dict(state["operator"])
@staticmethod
def _state_dict2stream(state_dict):
_buffer = io.BytesIO()
torch.save(state_dict, _buffer)
return _buffer.getvalue()
@staticmethod
def _state_stream2dict(byte_obj):
_buffer = io.BytesIO(byte_obj)
state_dict = torch.load(_buffer)
return state_dict
def get_state_stream(self):
"""Returns a bytes object for the state dict."""
state_dict = self.get_state_dict()
state_stream = TorchRunner._state_dict2stream(state_dict)
return state_stream
def load_state_stream(self, byte_obj):
"""Loads a bytes object the training state dict."""
state_dict = TorchRunner._state_stream2dict(byte_obj)
return self.load_state_dict(state_dict)
def save_checkpoint(self, filepath, save_weights_only=False):
if self.rank == 0:
self._save_checkpoint(filepath, save_weights_only)
self.logger.debug(f"Saved checkpoint: {filepath}")
return filepath
def _save_checkpoint(self, filepath, save_weights_only=False):
import fsspec
if save_weights_only:
checkpoint = {
"epoch": self.epochs,
"models": [model.state_dict() for model in self.models],
}
else:
checkpoint = self.get_state_dict()
byte_obj = TorchRunner._state_dict2stream(checkpoint)
with fsspec.open(filepath, "wb") as f:
f.write(byte_obj)
def load_checkpoint(self, filepath):
fs = get_filesystem(filepath)
if not fs.exists(filepath):
raise FileNotFoundError(f"Checkpoint at {filepath} not found. Aborting training.")
with fs.open(filepath, "rb") as f:
state_dict = torch.load(f)
self.load_state_dict(state_dict)
def remove_checkpoint(self, filepath):
if self.rank == 0:
self._remove_checkpoint(filepath)
def _remove_checkpoint(self, filepath):
fs = get_filesystem(filepath)
if fs.exists(filepath):
fs.rm(filepath, recursive=True)
self.logger.debug(f"Removed checkpoint: {filepath}")
def apply(self, fn):
return fn()
def apply_operator(self, fn):
return fn(self.training_operator)
def shutdown(self):
"""Attempts to shut down the worker."""
del self.training_operator
del self.validation_loader
del self.train_loader
del self.criterion
del self.optimizers
del self.models
@property
def given_models(self):
if len(self.models) > 1:
return self.models
else:
return self.models[0]
@property
def given_optimizers(self):
if len(self.optimizers) > 1:
return self.optimizers
else:
return self.optimizers[0]
@property
def given_schedulers(self):
if not self.schedulers:
return self.schedulers
if len(self.schedulers) > 1:
return self.schedulers
else:
return self.schedulers[0]
|
intel-analytics/BigDL
|
python/orca/src/bigdl/orca/learn/pytorch/torch_runner.py
|
Python
|
apache-2.0
| 18,769
|
[
"ORCA"
] |
14453a58039eb110d91f08f1e364489f7f12f112851d62588654f48508e6b175
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides objects describing the basic parameters of the
pseudopotentials used in Abinit, and a parser to instantiate pseudopotential objects..
"""
from __future__ import unicode_literals, division, print_function
import abc
import collections
import json
import logging
import os
import sys
import numpy as np
import six
from collections import OrderedDict, defaultdict, namedtuple
from monty.collections import AttrDict, Namespace
from tabulate import tabulate
from monty.dev import deprecated
from monty.functools import lazy_property
from monty.itertools import iterator_from_slice
from monty.json import MSONable, MontyDecoder
from monty.os.path import find_exts
from monty.string import list_strings, is_string
from pymatgen.core.periodic_table import Element
from pymatgen.core.xcfunc import XcFunc
from pymatgen.util.serialization import pmg_serialize
from pymatgen.util.plotting import add_fig_kwargs, get_ax_fig_plt
logger = logging.getLogger(__name__)
__all__ = [
"Pseudo",
"PseudoTable",
]
__author__ = "Matteo Giantomassi"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
# Tools and helper functions.
def straceback():
"""Returns a string with the traceback."""
import traceback
return "\n".join((traceback.format_exc(), str(sys.exc_info()[0])))
def _read_nlines(filename, nlines):
"""
Read at most nlines lines from file filename.
If nlines is < 0, the entire file is read.
"""
if nlines < 0:
with open(filename, 'r') as fh:
return fh.readlines()
lines = []
with open(filename, 'r') as fh:
for lineno, line in enumerate(fh):
if lineno == nlines: break
lines.append(line)
return lines
_l2str = {
0: "s",
1: "p",
2: "d",
3: "f",
4: "g",
5: "h",
6: "i",
}
_str2l = {v: k for k, v in _l2str.items()}
def l2str(l):
"""Convert the angular momentum l (int) to string."""
try:
return _l2str[l]
except KeyError:
return "Unknown angular momentum, received l = %s" % l
def str2l(s):
"""Convert a string to the angular momentum l (int)"""
return _str2l[s]
class Pseudo(six.with_metaclass(abc.ABCMeta, MSONable, object)):
"""
Abstract base class defining the methods that must be
implemented by the concrete pseudopotential sub-classes.
"""
@classmethod
def as_pseudo(cls, obj):
"""
Convert obj into a pseudo. Accepts:
* Pseudo object.
* string defining a valid path.
"""
return obj if isinstance(obj, cls) else cls.from_file(obj)
@staticmethod
def from_file(filename):
"""
Build an instance of a concrete Pseudo subclass from filename.
Note: the parser knows the concrete class that should be instantiated
Client code should rely on the abstract interface provided by Pseudo.
"""
return PseudoParser().parse(filename)
def __eq__(self, other):
if other is None: return False
return (self.md5 == other.md5 and
self.__class__ == other.__class__ and
self.Z == other.Z and
self.Z_val == other.Z_val and
self.l_max == other.l_max )
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
try:
return "<%s at %s>" % (self.__class__.__name__, os.path.relpath(self.filepath))
except:
# relpath can fail if the code is executed in demon mode.
return "<%s at %s>" % (self.__class__.__name__, self.filepath)
def __str__(self):
return self.to_string()
def to_string(self, verbose=0):
"""String representation."""
lines = []
app = lines.append
app("<%s: %s>" % (self.__class__.__name__, self.basename))
app(" summary: " + self.summary.strip())
app(" number of valence electrons: %s" % self.Z_val)
app(" maximum angular momentum: %s" % l2str(self.l_max))
app(" angular momentum for local part: %s" % l2str(self.l_local))
app(" XC correlation: %s" % self.xc)
app(" supports spin-orbit: %s" % self.supports_soc)
if self.isnc:
app(" radius for non-linear core correction: %s" % self.nlcc_radius)
if self.has_hints:
for accuracy in ("low", "normal", "high"):
hint = self.hint_for_accuracy(accuracy=accuracy)
app(" hint for %s accuracy: %s" % (accuracy, str(hint)))
return "\n".join(lines)
@property
@abc.abstractmethod
def summary(self):
"""String summarizing the most important properties."""
@property
def filepath(self):
return os.path.abspath(self.path)
@property
def basename(self):
"""File basename."""
return os.path.basename(self.filepath)
@property
@abc.abstractmethod
def Z(self):
"""The atomic number of the atom."""
@property
@abc.abstractmethod
def Z_val(self):
"""Valence charge."""
@property
def type(self):
return self.__class__.__name__
@property
def element(self):
"""Pymatgen :class:`Element`."""
try:
return Element.from_Z(self.Z)
except (KeyError, IndexError):
return Element.from_Z(int(self.Z))
@property
def symbol(self):
"""Element symbol."""
return self.element.symbol
@property
@abc.abstractmethod
def l_max(self):
"""Maximum angular momentum."""
@property
@abc.abstractmethod
def l_local(self):
"""Angular momentum used for the local part."""
@property
def isnc(self):
"""True if norm-conserving pseudopotential."""
return isinstance(self, NcPseudo)
@property
def ispaw(self):
"""True if PAW pseudopotential."""
return isinstance(self, PawPseudo)
@lazy_property
def md5(self):
"""MD5 hash value."""
#if self.has_dojo_report and "md5" in self.dojo_report: return self.dojo_report["md5"]
return self.compute_md5()
def compute_md5(self):
"""Compute and erturn MD5 hash value."""
import hashlib
with open(self.path, "rt") as fh:
text = fh.read()
m = hashlib.md5(text.encode("utf-8"))
return m.hexdigest()
@property
@abc.abstractmethod
def supports_soc(self):
"""
True if the pseudo can be used in a calculation with spin-orbit coupling.
Base classes should provide a concrete implementation that computes this value.
"""
@pmg_serialize
def as_dict(self, **kwargs):
return dict(
basename=self.basename,
type=self.type,
symbol=self.symbol,
Z=self.Z,
Z_val=self.Z_val,
l_max=self.l_max,
md5=self.md5,
filepath=self.filepath,
#xc=self.xc.as_dict(),
)
@classmethod
def from_dict(cls, d):
new = cls.from_file(d['filepath'])
# Consistency test based on md5
if "md5" in d and d["md5"] != new.md5:
raise ValueError("The md5 found in file does not agree with the one in dict\n"
"Received %s\nComputed %s" % (d["md5"], new.md5))
return new
def as_tmpfile(self, tmpdir=None):
"""
Copy the pseudopotential to a temporary a file and returns a new pseudopotential object.
Useful for unit tests in which we have to change the content of the file.
Args:
tmpdir: If None, a new temporary directory is created and files are copied here
else tmpdir is used.
"""
import tempfile, shutil
tmpdir = tempfile.mkdtemp() if tmpdir is None else tmpdir
new_path = os.path.join(tmpdir, self.basename)
shutil.copy(self.filepath, new_path)
# Copy dojoreport file if present.
root, ext = os.path.splitext(self.filepath)
djrepo = root + ".djrepo"
if os.path.exists(djrepo):
shutil.copy(djrepo, os.path.join(tmpdir, os.path.basename(djrepo)))
# Build new object and copy dojo_report if present.
new = self.__class__.from_file(new_path)
if self.has_dojo_report: new.dojo_report = self.dojo_report.deepcopy()
return new
@property
def has_dojo_report(self):
"""True if the pseudo has an associated `DOJO_REPORT` section."""
return hasattr(self, "dojo_report") and bool(self.dojo_report)
@property
def djrepo_path(self):
"""The path of the djrepo file. None if file does not exist."""
root, ext = os.path.splitext(self.filepath)
path = root + ".djrepo"
return path
#if os.path.exists(path): return path
#return None
def hint_for_accuracy(self, accuracy="normal"):
"""
Returns a :class:`Hint` object with the suggensted value of ecut [Ha] and
pawecutdg [Ha] for the given accuracy.
ecut and pawecutdg are set to zero if no hint is available.
Args:
accuracy: ["low", "normal", "high"]
"""
if not self.has_dojo_report:
return Hint(ecut=0., pawecutdg=0.)
# Get hints from dojoreport. Try first in hints then in ppgen_hints.
if "hints" in self.dojo_report:
return Hint.from_dict(self.dojo_report["hints"][accuracy])
elif "ppgen_hints" in self.dojo_report:
return Hint.from_dict(self.dojo_report["ppgen_hints"][accuracy])
return Hint(ecut=0., pawecutdg=0.)
@property
def has_hints(self):
"""
True if self provides hints on the cutoff energy.
"""
for acc in ["low", "normal", "high"]:
try:
if self.hint_for_accuracy(acc) is None:
return False
except KeyError:
return False
return True
def open_pspsfile(self, ecut=20, pawecutdg=None):
"""
Calls Abinit to compute the internal tables for the application of the
pseudopotential part. Returns :class:`PspsFile` object providing methods
to plot and analyze the data or None if file is not found or it's not readable.
Args:
ecut: Cutoff energy in Hartree.
pawecutdg: Cutoff energy for the PAW double grid.
"""
from pymatgen.io.abinit.tasks import AbinitTask
from abipy.core.structure import Structure
from abipy.abio.factories import gs_input
from abipy.electrons.psps import PspsFile
# Build fake structure.
lattice = 10 * np.eye(3)
structure = Structure(lattice, [self.element], coords=[[0, 0, 0]])
if self.ispaw and pawecutdg is None: pawecutdg = ecut * 4
inp = gs_input(structure, pseudos=[self], ecut=ecut, pawecutdg=pawecutdg,
spin_mode="unpolarized", kppa=1)
# Add prtpsps = -1 to make Abinit print the PSPS.nc file and stop.
inp["prtpsps"] = -1
# Build temporary task and run it (ignore retcode because we don't exit cleanly)
task = AbinitTask.temp_shell_task(inp)
task.start_and_wait()
filepath = task.outdir.has_abiext("_PSPS.nc")
if not filepath:
logger.critical("Cannot find PSPS.nc file in %s" % task.outdir)
return None
# Open the PSPS.nc file.
try:
return PspsFile(filepath)
except Exception as exc:
logger.critical("Exception while reading PSPS file at %s:\n%s" % (filepath, str(exc)))
return None
class NcPseudo(six.with_metaclass(abc.ABCMeta, object)):
"""
Abstract class defining the methods that must be implemented
by the concrete classes representing norm-conserving pseudopotentials.
"""
@property
@abc.abstractmethod
def nlcc_radius(self):
"""
Radius at which the core charge vanish (i.e. cut-off in a.u.).
Returns 0.0 if nlcc is not used.
"""
@property
def has_nlcc(self):
"""True if the pseudo is generated with non-linear core correction."""
return self.nlcc_radius > 0.0
@property
def rcore(self):
"""Radius of the pseudization sphere in a.u."""
try:
return self._core
except AttributeError:
return None
class PawPseudo(six.with_metaclass(abc.ABCMeta, object)):
"""
Abstract class that defines the methods that must be implemented
by the concrete classes representing PAW pseudopotentials.
"""
#def nlcc_radius(self):
# """
# Radius at which the core charge vanish (i.e. cut-off in a.u.).
# Returns 0.0 if nlcc is not used.
# """
# return 0.0
#
#@property
#def has_nlcc(self):
# """True if the pseudo is generated with non-linear core correction."""
# return True
@property
@abc.abstractmethod
def paw_radius(self):
"""Radius of the PAW sphere in a.u."""
@property
def rcore(self):
"""Alias of paw_radius."""
return self.paw_radius
class AbinitPseudo(Pseudo):
"""
An AbinitPseudo is a pseudopotential whose file contains an abinit header.
"""
def __init__(self, path, header):
"""
Args:
path: Filename.
header: :class:`AbinitHeader` instance.
"""
self.path = path
self.header = header
self._summary = header.summary
# Build xc from header.
self.xc = XcFunc.from_abinit_ixc(header["pspxc"])
for attr_name, desc in header.items():
value = header.get(attr_name, None)
# Hide these attributes since one should always use the public interface.
setattr(self, "_" + attr_name, value)
@property
def summary(self):
"""Summary line reported in the ABINIT header."""
return self._summary.strip()
@property
def Z(self):
return self._zatom
@property
def Z_val(self):
return self._zion
@property
def l_max(self):
return self._lmax
@property
def l_local(self):
return self._lloc
@property
def supports_soc(self):
# Treate ONCVPSP pseudos
if self._pspcod == 8:
switch = self.header["extension_switch"]
if switch in (0, 1): return False
if switch in (2, 3): return True
raise ValueError("Don't know how to handle extension_switch: %s" % switch)
# TODO Treat HGH HGHK pseudos
# As far as I know, other Abinit pseudos do not support SOC.
return False
class NcAbinitPseudo(NcPseudo, AbinitPseudo):
"""Norm-conserving pseudopotential in the Abinit format."""
@property
def summary(self):
return self._summary.strip()
@property
def Z(self):
return self._zatom
@property
def Z_val(self):
"""Number of valence electrons."""
return self._zion
@property
def l_max(self):
return self._lmax
@property
def l_local(self):
return self._lloc
@property
def nlcc_radius(self):
return self._rchrg
class PawAbinitPseudo(PawPseudo, AbinitPseudo):
"""Paw pseudopotential in the Abinit format."""
@property
def paw_radius(self):
return self._r_cut
#def orbitals(self):
@property
def supports_soc(self):
return True
class Hint(object):
"""
Suggested value for the cutoff energy [Hartree units]
and the cutoff energy for the dense grid (only for PAW pseudos).
"""
def __init__(self, ecut, pawecutdg=None):
self.ecut = ecut
self.pawecutdg = ecut if pawecutdg is None else pawecutdg
def __str__(self):
if self.pawecutdg is not None:
return "ecut: %s, pawecutdg: %s" % (self.ecut, self.pawecutdg)
else:
return "ecut: %s" % (self.ecut)
@pmg_serialize
def as_dict(self):
return dict(ecut=self.ecut, pawecutdg=self.pawecutdg)
@classmethod
def from_dict(cls, d):
return cls(**{k: v for k, v in d.items() if not k.startswith("@")})
def _dict_from_lines(lines, key_nums, sep=None):
"""
Helper function to parse formatted text structured like:
value1 value2 ... sep key1, key2 ...
key_nums is a list giving the number of keys for each line. 0 if line should be skipped.
sep is a string denoting the character that separates the keys from the value (None if
no separator is present).
Returns:
dict{key1 : value1, key2 : value2, ...}
Raises:
ValueError if parsing fails.
"""
if is_string(lines):
lines = [lines]
if not isinstance(key_nums, collections.Iterable):
key_nums = list(key_nums)
if len(lines) != len(key_nums):
err_msg = "lines = %s\n key_num = %s" % (str(lines), str(key_nums))
raise ValueError(err_msg)
kwargs = Namespace()
for (i, nk) in enumerate(key_nums):
if nk == 0: continue
line = lines[i]
tokens = [t.strip() for t in line.split()]
values, keys = tokens[:nk], "".join(tokens[nk:])
# Sanitize keys: In some case we might get strings in the form: foo[,bar]
keys.replace("[", "").replace("]", "")
keys = keys.split(",")
if sep is not None:
check = keys[0][0]
if check != sep:
raise ValueError("Expecting separator %s, got %s" % (sep, check))
keys[0] = keys[0][1:]
if len(values) != len(keys):
msg = "line: %s\n len(keys) != len(value)\nkeys: %s\n values: %s" % (line, keys, values)
raise ValueError(msg)
kwargs.update(zip(keys, values))
return kwargs
class AbinitHeader(dict):
"""Dictionary whose keys can be also accessed as attributes."""
def __getattr__(self, name):
try:
# Default behaviour
return super(AbinitHeader, self).__getattribute__(name)
except AttributeError:
try:
# Try in the dictionary.
return self[name]
except KeyError as exc:
raise AttributeError(str(exc))
def _int_from_str(string):
"""
Convert string into integer
Raise:
TypeError if string is not a valid integer
"""
float_num = float(string)
int_num = int(float_num)
if float_num == int_num:
return int_num
else:
# Needed to handle pseudos with fractional charge
int_num = np.rint(float_num)
logger.warning("Converting float %s to int %s" % (float_num, int_num))
return int_num
class NcAbinitHeader(AbinitHeader):
"""The abinit header found in the NC pseudopotential files."""
_attr_desc = namedtuple("att", "default astype")
_VARS = {
# Mandatory
"zatom": _attr_desc(None, _int_from_str),
"zion": _attr_desc(None, float),
"pspdat": _attr_desc(None, float),
"pspcod": _attr_desc(None, int),
"pspxc": _attr_desc(None, int),
"lmax": _attr_desc(None, int),
"lloc": _attr_desc(None, int),
"r2well": _attr_desc(None, float),
"mmax": _attr_desc(None, float),
# Optional variables for non linear-core correction. HGH does not have it.
"rchrg": _attr_desc(0.0, float), # radius at which the core charge vanish (i.e. cut-off in a.u.)
"fchrg": _attr_desc(0.0, float),
"qchrg": _attr_desc(0.0, float),
}
del _attr_desc
def __init__(self, summary, **kwargs):
super(NcAbinitHeader, self).__init__()
# pseudos generated by APE use llocal instead of lloc.
if "llocal" in kwargs:
kwargs["lloc"] = kwargs.pop("llocal")
self.summary = summary.strip()
for key, desc in NcAbinitHeader._VARS.items():
default, astype = desc.default, desc.astype
value = kwargs.pop(key, None)
if value is None:
value = default
if default is None:
raise RuntimeError("Attribute %s must be specified" % key)
else:
try:
value = astype(value)
except:
raise RuntimeError("Conversion Error for key %s, value %s" % (key, value))
self[key] = value
# Add remaining arguments, e.g. extension_switch
if kwargs:
self.update(kwargs)
@staticmethod
def fhi_header(filename, ppdesc):
"""
Parse the FHI abinit header. Example:
Troullier-Martins psp for element Sc Thu Oct 27 17:33:22 EDT 1994
21.00000 3.00000 940714 zatom, zion, pspdat
1 1 2 0 2001 .00000 pspcod,pspxc,lmax,lloc,mmax,r2well
1.80626423934776 .22824404341771 1.17378968127746 rchrg,fchrg,qchrg
"""
lines = _read_nlines(filename, 4)
try:
header = _dict_from_lines(lines[:4], [0, 3, 6, 3])
except ValueError:
# The last record with rchrg ... seems to be optional.
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
return NcAbinitHeader(summary, **header)
@staticmethod
def hgh_header(filename, ppdesc):
"""
Parse the HGH abinit header. Example:
Hartwigsen-Goedecker-Hutter psp for Ne, from PRB58, 3641 (1998)
10 8 010605 zatom,zion,pspdat
3 1 1 0 2001 0 pspcod,pspxc,lmax,lloc,mmax,r2well
"""
lines = _read_nlines(filename, 3)
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
return NcAbinitHeader(summary, **header)
@staticmethod
def gth_header(filename, ppdesc):
"""
Parse the GTH abinit header. Example:
Goedecker-Teter-Hutter Wed May 8 14:27:44 EDT 1996
1 1 960508 zatom,zion,pspdat
2 1 0 0 2001 0. pspcod,pspxc,lmax,lloc,mmax,r2well
0.2000000 -4.0663326 0.6778322 0 0 rloc, c1, c2, c3, c4
0 0 0 rs, h1s, h2s
0 0 rp, h1p
1.36 .2 0.6 rcutoff, rloc
"""
lines = _read_nlines(filename, 7)
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
return NcAbinitHeader(summary, **header)
@staticmethod
def oncvpsp_header(filename, ppdesc):
"""
Parse the ONCVPSP abinit header. Example:
Li ONCVPSP r_core= 2.01 3.02
3.0000 3.0000 140504 zatom,zion,pspd
8 2 1 4 600 0 pspcod,pspxc,lmax,lloc,mmax,r2well
5.99000000 0.00000000 0.00000000 rchrg fchrg qchrg
2 2 0 0 0 nproj
0 extension_switch
0 -2.5000025868368D+00 -1.2006906995331D+00
1 0.0000000000000D+00 0.0000000000000D+00 0.0000000000000D+00
2 1.0000000000000D-02 4.4140499497377D-02 1.9909081701712D-02
"""
lines = _read_nlines(filename, 6)
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
# Replace pspd with pspdata
header.update({'pspdat': header['pspd']})
header.pop('pspd')
# Read extension switch
header["extension_switch"] = int(lines[5].split()[0])
return NcAbinitHeader(summary, **header)
@staticmethod
def tm_header(filename, ppdesc):
"""
Parse the TM abinit header. Example:
Troullier-Martins psp for element Fm Thu Oct 27 17:28:39 EDT 1994
100.00000 14.00000 940714 zatom, zion, pspdat
1 1 3 0 2001 .00000 pspcod,pspxc,lmax,lloc,mmax,r2well
0 4.085 6.246 0 2.8786493 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
1 3.116 4.632 1 3.4291849 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
2 4.557 6.308 1 2.1865358 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
3 23.251 29.387 1 2.4776730 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
3.62474762267880 .07409391739104 3.07937699839200 rchrg,fchrg,qchrg
"""
lines = _read_nlines(filename, -1)
header = []
for lineno, line in enumerate(lines):
header.append(line)
if lineno == 2:
# Read lmax.
tokens = line.split()
pspcod, pspxc, lmax, lloc = map(int, tokens[:4])
mmax, r2well = map(float, tokens[4:6])
#if tokens[-1].strip() != "pspcod,pspxc,lmax,lloc,mmax,r2well":
# raise RuntimeError("%s: Invalid line\n %s" % (filename, line))
lines = lines[3:]
break
# TODO
# Parse the section with the projectors.
#0 4.085 6.246 0 2.8786493 l,e99.0,e99.9,nproj,rcpsp
#.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
projectors = OrderedDict()
for idx in range(2*(lmax+1)):
line = lines[idx]
if idx % 2 == 0: proj_info = [line,]
if idx % 2 == 1:
proj_info.append(line)
d = _dict_from_lines(proj_info, [5,4])
projectors[int(d["l"])] = d
# Add the last line with info on nlcc.
header.append(lines[idx+1])
summary = header[0]
header = _dict_from_lines(header, [0,3,6,3])
return NcAbinitHeader(summary, **header)
class PawAbinitHeader(AbinitHeader):
"""The abinit header found in the PAW pseudopotential files."""
_attr_desc = namedtuple("att", "default astype")
_VARS = {
"zatom": _attr_desc(None, _int_from_str),
"zion": _attr_desc(None, float),
"pspdat": _attr_desc(None, float),
"pspcod": _attr_desc(None, int),
"pspxc": _attr_desc(None, int),
"lmax": _attr_desc(None, int),
"lloc": _attr_desc(None, int),
"mmax": _attr_desc(None, int),
"r2well": _attr_desc(None, float),
"pspfmt": _attr_desc(None, str),
"creatorID": _attr_desc(None, int),
"basis_size": _attr_desc(None, int),
"lmn_size": _attr_desc(None, int),
"orbitals": _attr_desc(None, list),
"number_of_meshes": _attr_desc(None, int),
"r_cut": _attr_desc(None, float), # r_cut(PAW) in the header
"shape_type": _attr_desc(None, int),
"rshape": _attr_desc(None, float),
}
del _attr_desc
def __init__(self, summary, **kwargs):
super(PawAbinitHeader, self).__init__()
self.summary = summary.strip()
for key, desc in self._VARS.items():
default, astype = desc.default, desc.astype
value = kwargs.pop(key, None)
if value is None:
value = default
if default is None:
raise RuntimeError("Attribute %s must be specified" % key)
else:
try:
value = astype(value)
except:
raise RuntimeError("Conversion Error for key %s, with value %s" % (key, value))
self[key] = value
if kwargs:
raise RuntimeError("kwargs should be empty but got %s" % str(kwargs))
@staticmethod
def paw_header(filename, ppdesc):
"""
Parse the PAW abinit header. Examples:
Paw atomic data for element Ni - Generated by AtomPAW (N. Holzwarth) + AtomPAW2Abinit v3.0.5
28.000 18.000 20061204 : zatom,zion,pspdat
7 7 2 0 350 0. : pspcod,pspxc,lmax,lloc,mmax,r2well
paw3 1305 : pspfmt,creatorID
5 13 : basis_size,lmn_size
0 0 1 1 2 : orbitals
3 : number_of_meshes
1 3 350 1.1803778368E-05 3.5000000000E-02 : mesh 1, type,size,rad_step[,log_step]
2 1 921 2.500000000000E-03 : mesh 2, type,size,rad_step[,log_step]
3 3 391 1.1803778368E-05 3.5000000000E-02 : mesh 3, type,size,rad_step[,log_step]
2.3000000000 : r_cut(SPH)
2 0.
Another format:
C (US d-loc) - PAW data extracted from US-psp (D.Vanderbilt) - generated by USpp2Abinit v2.3.0
6.000 4.000 20090106 : zatom,zion,pspdat
7 11 1 0 560 0. : pspcod,pspxc,lmax,lloc,mmax,r2well
paw4 2230 : pspfmt,creatorID
4 8 : basis_size,lmn_size
0 0 1 1 : orbitals
5 : number_of_meshes
1 2 560 1.5198032759E-04 1.6666666667E-02 : mesh 1, type,size,rad_step[,log_step]
2 2 556 1.5198032759E-04 1.6666666667E-02 : mesh 2, type,size,rad_step[,log_step]
3 2 576 1.5198032759E-04 1.6666666667E-02 : mesh 3, type,size,rad_step[,log_step]
4 2 666 1.5198032759E-04 1.6666666667E-02 : mesh 4, type,size,rad_step[,log_step]
5 2 673 1.5198032759E-04 1.6666666667E-02 : mesh 5, type,size,rad_step[,log_step]
1.5550009124 : r_cut(PAW)
3 0. : shape_type,rshape
Yet nnother one:
Paw atomic data for element Si - Generated by atompaw v3.0.1.3 & AtomPAW2Abinit v3.3.1
14.000 4.000 20120814 : zatom,zion,pspdat
7 11 1 0 663 0. : pspcod,pspxc,lmax,lloc,mmax,r2well
paw5 1331 : pspfmt,creatorID
4 8 : basis_size,lmn_size
0 0 1 1 : orbitals
5 : number_of_meshes
1 2 663 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 1, type,size,rad_step[,log_step]
2 2 658 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 2, type,size,rad_step[,log_step]
3 2 740 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 3, type,size,rad_step[,log_step]
4 2 819 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 4, type,size,rad_step[,log_step]
5 2 870 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 5, type,size,rad_step[,log_step]
1.5669671236 : r_cut(PAW)
2 0. : shape_type,rshape
"""
supported_formats = ["paw3", "paw4", "paw5"]
if ppdesc.format not in supported_formats:
raise NotImplementedError("format %s not in %s" % (ppdesc.format, supported_formats))
lines = _read_nlines(filename, -1)
summary = lines[0]
header = _dict_from_lines(lines[:5], [0, 3, 6, 2, 2], sep=":")
lines = lines[5:]
# TODO
# Parse orbitals and number of meshes.
header["orbitals"] = [int(t) for t in lines[0].split(":")[0].split()]
header["number_of_meshes"] = num_meshes = int(lines[1].split(":")[0])
#print filename, header
# Skip meshes =
lines = lines[2+num_meshes:]
#for midx in range(num_meshes):
# l = midx + 1
#print lines[0]
header["r_cut"] = float(lines[0].split(":")[0])
#print lines[1]
header.update(_dict_from_lines(lines[1], [2], sep=":"))
#print("PAW header\n", header)
return PawAbinitHeader(summary, **header)
class PseudoParserError(Exception):
"""Base Error class for the exceptions raised by :class:`PseudoParser`"""
class PseudoParser(object):
"""
Responsible for parsing pseudopotential files and returning pseudopotential objects.
Usage::
pseudo = PseudoParser().parse("filename")
"""
Error = PseudoParserError
# Supported values of pspcod
ppdesc = namedtuple("ppdesc", "pspcod name psp_type format")
# TODO Recheck
_PSPCODES = OrderedDict( {
1: ppdesc(1, "TM", "NC", None),
2: ppdesc(2, "GTH", "NC", None),
3: ppdesc(3, "HGH", "NC", None),
4: ppdesc(4, "Teter", "NC", None),
#5: ppdesc(5, "NC", , None),
6: ppdesc(6, "FHI", "NC", None),
7: ppdesc(6, "PAW_abinit_text", "PAW", None),
8: ppdesc(8, "ONCVPSP", "NC", None),
10: ppdesc(10, "HGHK", "NC", None),
})
del ppdesc
# renumber functionals from oncvpsp todo confrim that 3 is 2
#_FUNCTIONALS = {1: {'n': 4, 'name': 'Wigner'},
# 2: {'n': 5, 'name': 'HL'},
# 3: {'n': 2, 'name': 'PWCA'},
# 4: {'n': 11, 'name': 'PBE'}}
def __init__(self):
# List of files that have been parsed succesfully.
self._parsed_paths = []
# List of files that could not been parsed.
self._wrong_paths = []
def scan_directory(self, dirname, exclude_exts=(), exclude_fnames=()):
"""
Analyze the files contained in directory dirname.
Args:
dirname: directory path
exclude_exts: list of file extensions that should be skipped.
exclude_fnames: list of file names that should be skipped.
Returns:
List of pseudopotential objects.
"""
for i, ext in enumerate(exclude_exts):
if not ext.strip().startswith("."):
exclude_exts[i] = "." + ext.strip()
# Exclude files depending on the extension.
paths = []
for fname in os.listdir(dirname):
root, ext = os.path.splitext(fname)
path = os.path.join(dirname, fname)
if (ext in exclude_exts or fname in exclude_fnames or
fname.startswith(".") or not os.path.isfile(path)): continue
paths.append(path)
pseudos = []
for path in paths:
# Parse the file and generate the pseudo.
try:
pseudo = self.parse(path)
except:
pseudo = None
if pseudo is not None:
pseudos.append(pseudo)
self._parsed_paths.extend(path)
else:
self._wrong_paths.extend(path)
return pseudos
def read_ppdesc(self, filename):
"""
Read the pseudopotential descriptor from file filename.
Returns:
Pseudopotential descriptor. None if filename is not a valid pseudopotential file.
Raises:
`PseudoParserError` if fileformat is not supported.
"""
if filename.endswith(".xml"):
raise self.Error("XML pseudo not supported yet")
else:
# Assume file with the abinit header.
lines = _read_nlines(filename, 80)
for lineno, line in enumerate(lines):
if lineno == 2:
try:
tokens = line.split()
pspcod, pspxc = map(int, tokens[:2])
except:
msg = "%s: Cannot parse pspcod, pspxc in line\n %s" % (filename, line)
logger.critical(msg)
return None
#if tokens[-1].strip().replace(" ","") not in ["pspcod,pspxc,lmax,lloc,mmax,r2well",
# "pspcod,pspxc,lmax,llocal,mmax,r2well"]:
# raise self.Error("%s: Invalid line\n %s" % (filename, line))
# return None
if pspcod not in self._PSPCODES:
raise self.Error("%s: Don't know how to handle pspcod %s\n" % (filename, pspcod))
ppdesc = self._PSPCODES[pspcod]
if pspcod == 7:
# PAW -> need to know the format pspfmt
tokens = lines[lineno+1].split()
pspfmt, creatorID = tokens[:2]
#if tokens[-1].strip() != "pspfmt,creatorID":
# raise self.Error("%s: Invalid line\n %s" % (filename, line))
# return None
ppdesc = ppdesc._replace(format = pspfmt)
return ppdesc
return None
def parse(self, filename):
"""
Read and parse a pseudopotential file. Main entry point for client code.
Returns:
pseudopotential object or None if filename is not a valid pseudopotential file.
"""
path = os.path.abspath(filename)
# Only PAW supports XML at present.
if filename.endswith(".xml"):
return PawXmlSetup(path)
ppdesc = self.read_ppdesc(path)
if ppdesc is None:
logger.critical("Cannot find ppdesc in %s" % path)
return None
psp_type = ppdesc.psp_type
parsers = {
"FHI": NcAbinitHeader.fhi_header,
"GTH": NcAbinitHeader.gth_header,
"TM": NcAbinitHeader.tm_header,
"Teter": NcAbinitHeader.tm_header,
"HGH": NcAbinitHeader.hgh_header,
"HGHK": NcAbinitHeader.hgh_header,
"ONCVPSP": NcAbinitHeader.oncvpsp_header,
"PAW_abinit_text": PawAbinitHeader.paw_header,
}
try:
header = parsers[ppdesc.name](path, ppdesc)
except Exception:
raise self.Error(path + ":\n" + straceback())
if psp_type == "NC":
pseudo = NcAbinitPseudo(path, header)
elif psp_type == "PAW":
pseudo = PawAbinitPseudo(path, header)
else:
raise NotImplementedError("psp_type not in [NC, PAW]")
return pseudo
#TODO use RadialFunction from pseudo_dojo.
class RadialFunction(namedtuple("RadialFunction", "mesh values")):
pass
class PawXmlSetup(Pseudo, PawPseudo):
def __init__(self, filepath):
self.path = os.path.abspath(filepath)
# Get the XML root (this trick is used to that the object is pickleable).
root = self.root
# Get the version of the XML format
self.paw_setup_version = root.get("version")
# Info on the atom.
atom_attrib = root.find("atom").attrib
#self._symbol = atom_attrib["symbol"]
self._zatom = int(float(atom_attrib["Z"]))
self.core, self.valence = map(float, [atom_attrib["core"], atom_attrib["valence"]])
# Build xc from header.
xc_info = root.find("xc_functional").attrib
self.xc = XcFunc.from_type_name(xc_info["type"], xc_info["name"])
# Old XML files do not define this field!
# In this case we set the PAW radius to None.
#self._paw_radius = float(root.find("PAW_radius").attrib["rpaw"])
#self.ae_energy = {k: float(v) for k,v in root.find("ae_energy").attrib.items()}
pawr_element = root.find("PAW_radius")
self._paw_radius = None
if pawr_element is not None:
self._paw_radius = float(pawr_element.attrib["rpaw"])
#<valence_states>
# <state n="2" l="0" f="2" rc="1.10" e="-0.6766" id="N-2s"/>
# <state n="2" l="1" f="3" rc="1.10" e="-0.2660" id="N-2p"/>
# <state l="0" rc="1.10" e=" 0.3234" id="N-s1"/>
# <state l="1" rc="1.10" e=" 0.7340" id="N-p1"/>
# <state l="2" rc="1.10" e=" 0.0000" id="N-d1"/>
#</valence_states>
#
# The valence_states element contains several state elements.
# For this setup, the first two lines describe bound eigenstates
# with occupation numbers and principal quantum numbers.
# Notice, that the three additional unbound states should have no f and n attributes.
# In this way, we know that only the first two bound states (with f and n attributes)
# should be used for constructing an initial guess for the wave functions.
self.valence_states = {}
for node in root.find("valence_states"):
attrib = AttrDict(node.attrib)
assert attrib.id not in self.valence_states
self.valence_states[attrib.id] = attrib
#print(self.valence_states)
# Parse the radial grids
self.rad_grids = {}
for node in root.findall("radial_grid"):
grid_params = node.attrib
gid = grid_params["id"]
assert gid not in self.rad_grids
self.rad_grids[gid] = self._eval_grid(grid_params)
def __getstate__(self):
"""
Return state is pickled as the contents for the instance.
In this case we just remove the XML root element process since Element object cannot be pickled.
"""
return {k: v for k, v in self.__dict__.items() if k not in ["_root"]}
@property
def root(self):
try:
return self._root
except AttributeError:
from xml.etree import cElementTree as Et
tree = Et.parse(self.filepath)
self._root = tree.getroot()
return self._root
@property
def Z(self):
return self._zatom
@property
def Z_val(self):
"""Number of valence electrons."""
return self.valence
# FIXME
@property
def l_max(self):
"""Maximum angular momentum."""
return None
@property
def l_local(self):
"""Angular momentum used for the local part."""
return None
@property
def summary(self):
"""String summarizing the most important properties."""
return ""
@property
def paw_radius(self):
return self._paw_radius
@property
def supports_soc(self):
"""
Here I assume that the ab-initio code can treat the SOC within the on-site approximation
"""
return True
@staticmethod
def _eval_grid(grid_params):
"""
This function receives a dictionary with the parameters defining the
radial mesh and returns a `ndarray` with the mesh
"""
eq = grid_params.get("eq").replace(" ", "")
istart, iend = int(grid_params.get("istart")), int(grid_params.get("iend"))
indices = list(range(istart, iend+1))
if eq == 'r=a*exp(d*i)':
a, d = float(grid_params['a']), float(grid_params['d'])
mesh = [a * np.exp(d * i) for i in indices]
elif eq == 'r=a*i/(n-i)':
a, n = float(grid_params['a']), float(grid_params['n'])
mesh = [a * i / (n - i) for i in indices]
elif eq == 'r=a*(exp(d*i)-1)':
a, d = float(grid_params['a']), float(grid_params['d'])
mesh = [a * (np.exp(d * i) - 1.0) for i in indices]
elif eq == 'r=d*i':
d = float(grid_params['d'])
mesh = [d * i for i in indices]
elif eq == 'r=(i/n+a)^5/a-a^4':
a, n = float(grid_params['a']), float(grid_params['n'])
mesh = [(i / n + a)**5 / a - a**4 for i in indices]
else:
raise ValueError('Unknown grid type: %s' % eq)
return np.array(mesh)
def _parse_radfunc(self, func_name):
"""Parse the first occurence of func_name in the XML file."""
node = self.root.find(func_name)
grid = node.attrib["grid"]
values = np.array([float(s) for s in node.text.split()])
return self.rad_grids[grid], values, node.attrib
def _parse_all_radfuncs(self, func_name):
"""Parse all the nodes with tag func_name in the XML file."""
for node in self.root.findall(func_name):
grid = node.attrib["grid"]
values = np.array([float(s) for s in node.text.split()])
yield self.rad_grids[grid], values, node.attrib
@property
def ae_core_density(self):
"""The all-electron radial density."""
try:
return self._ae_core_density
except AttributeError:
mesh, values, attrib = self._parse_radfunc("ae_core_density")
self._ae_core_density = RadialFunction(mesh, values)
return self._ae_core_density
@property
def pseudo_core_density(self):
"""The pseudized radial density."""
try:
return self._pseudo_core_density
except AttributeError:
mesh, values, attrib = self._parse_radfunc("pseudo_core_density")
self._pseudo_core_density = RadialFunction(mesh, values)
return self._pseudo_core_density
@property
def ae_partial_waves(self):
"""Dictionary with the AE partial waves indexed by state."""
try:
return self._ae_partial_waves
except AttributeError:
self._ae_partial_waves = {}
for mesh, values, attrib in self._parse_all_radfuncs("ae_partial_wave"):
state = attrib["state"]
#val_state = self.valence_states[state]
self._ae_partial_waves[state] = RadialFunction(mesh, values)
return self._ae_partial_waves
@property
def pseudo_partial_waves(self):
"""Dictionary with the pseudo partial waves indexed by state."""
try:
return self._pseudo_partial_waves
except AttributeError:
self._pseudo_partial_waves = {}
for (mesh, values, attrib) in self._parse_all_radfuncs("pseudo_partial_wave"):
state = attrib["state"]
#val_state = self.valence_states[state]
self._pseudo_partial_waves[state] = RadialFunction(mesh, values)
return self._pseudo_partial_waves
@property
def projector_functions(self):
"""Dictionary with the PAW projectors indexed by state."""
try:
return self._projector_functions
except AttributeError:
self._projector_functions = {}
for (mesh, values, attrib) in self._parse_all_radfuncs("projector_function"):
state = attrib["state"]
#val_state = self.valence_states[state]
self._projector_functions[state] = RadialFunction(mesh, values)
return self._projector_functions
@add_fig_kwargs
def plot_densities(self, ax=None, **kwargs):
"""
Plot the PAW densities.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns:
`matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax)
ax.grid(True)
ax.set_xlabel('r [Bohr]')
#ax.set_ylabel('density')
for i, den_name in enumerate(["ae_core_density", "pseudo_core_density"]):
rden = getattr(self, den_name)
label = "$n_c$" if i == 1 else "$\\tilde{n}_c$"
ax.plot(rden.mesh, rden.mesh * rden.values, label=label, lw=2)
ax.legend(loc="best")
return fig
@add_fig_kwargs
def plot_waves(self, ax=None, **kwargs):
"""
Plot the AE and the pseudo partial waves.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns:
`matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax)
ax.grid(True)
ax.set_xlabel("r [Bohr]")
ax.set_ylabel("$r\\phi,\\, r\\tilde\\phi\\, [Bohr]^{-\\frac{1}{2}}$")
ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--")
#ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1))
for state, rfunc in self.pseudo_partial_waves.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, lw=2, label="PS-WAVE: " + state)
for state, rfunc in self.ae_partial_waves.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, lw=2, label="AE-WAVE: " + state)
ax.legend(loc="best")
return fig
@add_fig_kwargs
def plot_projectors(self, ax=None, **kwargs):
"""
Plot the PAW projectors.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns:
`matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax)
title = kwargs.pop("title", "Projectors")
ax.grid(True)
ax.set_xlabel('r [Bohr]')
ax.set_ylabel("$r\\tilde p\\, [Bohr]^{-\\frac{1}{2}}$")
ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--")
#ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1))
for state, rfunc in self.projector_functions.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, label="TPROJ: " + state)
ax.legend(loc="best")
return fig
#@add_fig_kwargs
#def plot_potentials(self, **kwargs):
# """
# ================ ==============================================================
# kwargs Meaning
# ================ ==============================================================
# title Title of the plot (Default: None).
# show True to show the figure (Default).
# savefig 'abc.png' or 'abc.eps' to save the figure to a file.
# ================ ==============================================================
# Returns:
# `matplotlib` figure
# """
# title = kwargs.pop("title", "Potentials")
# show = kwargs.pop("show", True)
# savefig = kwargs.pop("savefig", None)
# import matplotlib.pyplot as plt
# fig = plt.figure()
# ax = fig.add_subplot(1,1,1)
# ax.grid(True)
# ax.set_xlabel('r [Bohr]')
# ax.set_ylabel('density')
# ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--")
# ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1))
# for state, rfunc in self.potentials.items():
# ax.plot(rfunc.mesh, rfunc.values, label="TPROJ: " + state)
# ax.legend(loc="best")
# if title is not None: fig.suptitle(title)
# if show: plt.show()
# if savefig: fig.savefig(savefig)
# return fig
class PseudoTable(six.with_metaclass(abc.ABCMeta, collections.Sequence, MSONable, object)):
"""
Define the pseudopotentials from the element table.
Individidual elements are accessed by name, symbol or atomic number.
For example, the following all retrieve iron:
print elements[26]
Fe
print elements.Fe
Fe
print elements.symbol('Fe')
Fe
print elements.name('iron')
Fe
print elements.isotope('Fe')
Fe
"""
@classmethod
def as_table(cls, items):
"""
Return an instance of :class:`PseudoTable` from the iterable items.
"""
if isinstance(items, cls): return items
return cls(items)
@classmethod
def from_dir(cls, top, exts=None, exclude_dirs="_*"):
"""
Find all pseudos in the directory tree starting from top.
Args:
top: Top of the directory tree
exts: List of files extensions. if exts == "all_files"
we try to open all files in top
exclude_dirs: Wildcard used to exclude directories.
return: :class:`PseudoTable` sorted by atomic number Z.
"""
pseudos = []
if exts == "all_files":
for f in [os.path.join(top, fn) for fn in os.listdir(top)]:
if os.path.isfile(f):
try:
p = Pseudo.from_file(f)
if p:
pseudos.append(p)
else:
logger.info('Skipping file %s' % f)
except:
logger.info('Skipping file %s' % f)
if not pseudos:
logger.warning('No pseudopotentials parsed from folder %s' % top)
return None
logger.info('Creating PseudoTable with %i pseudopotentials' % len(pseudos))
else:
if exts is None: exts=("psp8",)
for p in find_exts(top, exts, exclude_dirs=exclude_dirs):
try:
pseudos.append(Pseudo.from_file(p))
except Exception as exc:
logger.critical("Error in %s:\n%s" % (p, exc))
return cls(pseudos).sort_by_z()
def __init__(self, pseudos):
"""
Args:
pseudos: List of pseudopotentials or filepaths
"""
# Store pseudos in a default dictionary with z as key.
# Note that we can have more than one pseudo for given z.
# hence the values are lists of pseudos.
if not isinstance(pseudos, collections.Iterable):
pseudos = [pseudos]
if len(pseudos) and is_string(pseudos[0]):
pseudos = list_strings(pseudos)
self._pseudos_with_z = defaultdict(list)
for pseudo in pseudos:
if not isinstance(pseudo, Pseudo):
pseudo = Pseudo.from_file(pseudo)
if pseudo is not None:
self._pseudos_with_z[pseudo.Z].append(pseudo)
for z in self.zlist:
pseudo_list = self._pseudos_with_z[z]
symbols = [p.symbol for p in pseudo_list]
symbol = symbols[0]
if any(symb != symbol for symb in symbols):
raise ValueError("All symbols must be equal while they are: %s" % str(symbols))
setattr(self, symbol, pseudo_list)
def __getitem__(self, Z):
"""
Retrieve pseudos for the atomic number z. Accepts both int and slice objects.
"""
if isinstance(Z, slice):
assert Z.stop is not None
pseudos = []
for znum in iterator_from_slice(Z):
pseudos.extend(self._pseudos_with_z[znum])
return self.__class__(pseudos)
else:
return self.__class__(self._pseudos_with_z[Z])
def __len__(self):
return len(list(self.__iter__()))
def __iter__(self):
"""Process the elements in Z order."""
for z in self.zlist:
for pseudo in self._pseudos_with_z[z]:
yield pseudo
def __repr__(self):
return "<%s at %s>" % (self.__class__.__name__, id(self))
def __str__(self):
return self.to_table()
@property
def allnc(self):
"""True if all pseudos are norm-conserving."""
return all(p.isnc for p in self)
@property
def allpaw(self):
"""True if all pseudos are PAW."""
return all(p.ispaw for p in self)
@property
def zlist(self):
"""Ordered list with the atomic numbers available in the table."""
return sorted(list(self._pseudos_with_z.keys()))
#def max_ecut_pawecutdg(self, accuracy):
#"""Return the maximum value of ecut and pawecutdg based on the hints available in the pseudos."""
# ecut = max(p.hint_for_accuracy(accuracy=accuracy).ecut for p in self)
# pawecutdg = max(p.hint_for_accuracy(accuracy=accuracy).pawecutdg for p in self)
# return ecut, pawecutdg
def as_dict(self, **kwargs):
d = {}
for p in self:
k, count = p.element, 1
# Handle multiple-pseudos with the same name!
while k in d:
k += k.split("#")[0] + "#" + str(count)
count += 1
d.update({k: p.as_dict()})
d['@module'] = self.__class__.__module__
d['@class'] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
pseudos = []
dec = MontyDecoder()
for k, v in d.items():
if not k.startswith('@'):
pseudos.append(dec.process_decoded(v))
return cls(pseudos)
def is_complete(self, zmax=118):
"""
True if table is complete i.e. all elements with Z < zmax have at least on pseudopotential
"""
for z in range(1, zmax):
if not self[z]: return False
return True
def all_combinations_for_elements(self, element_symbols):
"""
Return a list with all the the possible combination of pseudos
for the given list of element_symbols.
Each item is a list of pseudopotential objects.
Example::
table.all_combinations_for_elements(["Li", "F"])
"""
d = OrderedDict()
for symbol in element_symbols:
d[symbol] = self.select_symbols(symbol, ret_list=True)
from itertools import product
return list(product(*d.values()))
def pseudo_with_symbol(self, symbol, allow_multi=False):
"""
Return the pseudo with the given chemical symbol.
Args:
symbols: String with the chemical symbol of the element
allow_multi: By default, the method raises ValueError
if multiple occurrences are found. Use allow_multi to prevent this.
Raises:
ValueError if symbol is not found or multiple occurences are present and not allow_multi
"""
pseudos = self.select_symbols(symbol, ret_list=True)
if not pseudos or (len(pseudos) > 1 and not allow_multi):
raise ValueError("Found %d occurrences of symbol %s" % (len(pseudos), symbol))
if not allow_multi:
return pseudos[0]
else:
return pseudos
def pseudos_with_symbols(self, symbols):
"""
Return the pseudos with the given chemical symbols.
Raises:
ValueError if one of the symbols is not found or multiple occurences are present.
"""
pseudos = self.select_symbols(symbols, ret_list=True)
found_symbols = [p.symbol for p in pseudos]
duplicated_elements = [s for s, o in collections.Counter(found_symbols).items() if o > 1]
if duplicated_elements:
raise ValueError("Found multiple occurrences of symbol(s) %s" % ', '.join(duplicated_elements))
missing_symbols = [s for s in symbols if s not in found_symbols]
if missing_symbols:
raise ValueError("Missing data for symbol(s) %s" % ', '.join(missing_symbols))
return pseudos
def select_symbols(self, symbols, ret_list=False):
"""
Return a :class:`PseudoTable` with the pseudopotentials with the given list of chemical symbols.
Args:
symbols: str or list of symbols
Prepend the symbol string with "-", to exclude pseudos.
ret_list: if True a list of pseudos is returned instead of a :class:`PseudoTable`
"""
symbols = list_strings(symbols)
exclude = symbols[0].startswith("-")
if exclude:
if not all(s.startswith("-") for s in symbols):
raise ValueError("When excluding symbols, all strings must start with `-`")
symbols = [s[1:] for s in symbols]
symbols = set(symbols)
pseudos = []
for p in self:
if exclude:
if p.symbol in symbols: continue
else:
if p.symbol not in symbols: continue
pseudos.append(p)
if ret_list:
return pseudos
else:
return self.__class__(pseudos)
def get_pseudos_for_structure(self, structure):
"""
Return the list of :class:`Pseudo` objects to be used for this :class:`Structure`.
Args:
structure: pymatgen :class:`Structure`.
Raises:
`ValueError` if one of the chemical symbols is not found or
multiple occurences are present in the table.
"""
return self.pseudos_with_symbols(structure.symbol_set)
def print_table(self, stream=sys.stdout, filter_function=None):
"""
A pretty ASCII printer for the periodic table, based on some filter_function.
Args:
stream: file-like object
filter_function:
A filtering function that take a Pseudo as input and returns a boolean.
For example, setting filter_function = lambda p: p.Z_val > 2 will print
a periodic table containing only pseudos with Z_val > 2.
"""
print(self.to_table(filter_function=filter_function), file=stream)
def to_table(self, filter_function=None):
"""Return string with data in tabular form."""
table = []
for p in self:
if filter_function is not None and filter_function(p): continue
table.append([p.basename, p.symbol, p.Z_val, p.l_max, p.l_local, p.xc, p.type])
return tabulate(table, headers= ["basename", "symbol", "Z_val", "l_max", "l_local", "XC", "type"],
tablefmt="grid")
def sorted(self, attrname, reverse=False):
"""
Sort the table according to the value of attribute attrname.
Return:
New class:`PseudoTable` object
"""
attrs = []
for i, pseudo in self:
try:
a = getattr(pseudo, attrname)
except AttributeError:
a = np.inf
attrs.append((i, a))
# Sort attrs, and build new table with sorted pseudos.
return self.__class__([self[a[0]] for a in sorted(attrs, key=lambda t: t[1], reverse=reverse)])
def sort_by_z(self):
"""Return a new :class:`PseudoTable` with pseudos sorted by Z"""
return self.__class__(sorted(self, key=lambda p: p.Z))
def select(self, condition):
"""
Select only those pseudopotentials for which condition is True.
Return new class:`PseudoTable` object.
Args:
condition:
Function that accepts a :class:`Pseudo` object and returns True or False.
"""
return self.__class__([p for p in self if condition(p)])
def with_dojo_report(self):
"""Select pseudos containing the DOJO_REPORT section. Return new class:`PseudoTable` object."""
return self.select(condition=lambda p: p.has_dojo_report)
def select_rows(self, rows):
"""
Return new class:`PseudoTable` object with pseudos in the given rows of the periodic table.
rows can be either a int or a list of integers.
"""
if not isinstance(rows, (list, tuple)): rows = [rows]
return self.__class__([p for p in self if p.element.row in rows])
def select_family(self, family):
# e.g element.is_alkaline
return self.__class__([p for p in self if getattr(p.element, "is_" + family)])
|
matk86/pymatgen
|
pymatgen/io/abinit/pseudos.py
|
Python
|
mit
| 63,625
|
[
"ABINIT",
"pymatgen"
] |
0d7f6db916fc196315e88fa7a0b955fb3f99f4849569e2ad30910b913251006e
|
"""
authors: Xi Chen, Eric García de Ceca, Jaime Mendizábal Roche
This module is an advanced version of our MLP using tensorflow.
In this module, we have optimization's methods, like "SDG",
"momentum", "adagrad","RMS_prop", "adadelta", "nesterov" and "adam".
"""
from __future__ import print_function, division
import sys
from datetime import datetime
import tensorflow as tf
import numpy as np
NOW = datetime.utcnow().strftime("%Y%m%d%H%M%S")
ROOT_LOGDIR = 'tf_logs'
LOG_DIR = "{}/run-{}".format(ROOT_LOGDIR, NOW)
class NetConstructor(object):
"""
In the class NetConstructor, we construct a Convolutional Neural Network.
"""
def __init__(self, layers):
tf.reset_default_graph()
if type(layers[0]['dim']) is int:
layers[0]['dim'] = (layers[0]['dim'],)
self.layers = layers
self.activations_dict = {'relu': tf.nn.relu,
'sigmoid': tf.nn.sigmoid,
'tanh': tf.nn.tanh,
'identity': tf.identity,
'softmax': tf.nn.softmax}
self.create_net()
def create_net(self):
"""
Create the Convolutional Neural Network, those basic values and layers.
"""
def init_tn(shape, w_or_b):
"""
Outputs random values from a truncated normal distribution.
"""
return tf.truncated_normal(shape, stddev=layer['stddev_' + w_or_b])
def init_zeros(shape, w_or_b=None):
"""
Create a zero matrix.
"""
return tf.zeros(shape)
def reduce_mul(vector):
"""
Compute the product of all numbers in a vector.
"""
ret = 1
for i in range(len(vector)):
ret *= vector[i]
return ret
def fc_layer():
"""
The Fully Connected layer is a traditional Multi Layer Perceptron that uses
an activation function in the output layer.
The term “Fully Connected” implies that every neuron in the previous layer is
connected to every neuron on the next layer. Their activations can hence be computed
with a matrix multiplication followed by a bias offset. The purpose of the Fully
Connected layer is to use these features for classifying the input image into
various classes based on the training dataset.
1st, we will get the values of dim, activation, weights and biases from layer.
2nd, we will reshape the unit to a vector and the weights to a vector (length dim).
3rd, we use the Variable of tensorflow to get weights and bias.
Finally, with the famous function: wX+b, we can get the activ and h.
"""
with tf.name_scope('fc_layer'):
dim = layer['dim']
h = self.activations_dict[layer['activation']]
init_w = init_dict[layer['init_w']]
init_b = init_dict[layer['init_b']]
unit_dim = [-1] + [reduce_mul(unit.get_shape().as_list()[1:])]
reshaped_unit = tf.reshape(unit, unit_dim)
weights_shape = (unit_dim[1], dim)
weights = tf.Variable(init_w(weights_shape, w_or_b='w'), name='weights')
bias = tf.Variable(init_b(dim, w_or_b='b'), name='bias')
activ = tf.add(tf.matmul(reshaped_unit, weights), bias, name='activation')
return h(activ, name='unit')
def conv_layer():
"""
Convolutional Layer
The Conv layer is the core building block of a Convolutional Network
that does most of the computational heavy lifting.
The process is : do the product of part of unit(filter_size) and filter,
then compute the sum and add the biases.
"""
with tf.name_scope('conv_layer'):
h = self.activations_dict[layer['activation']]
init_w = init_dict[layer['init_w']]
init_b = init_dict[layer['init_b']]
filter_size = layer['k_size']+(int(unit.get_shape()[3]), layer['channels'])
filter = tf.Variable(init_w(filter_size, w_or_b='w'), name='filter')
aux = tf.nn.conv2d(input=unit,
filter=filter,
strides=(1,) + layer['strides'] + (1,),
padding=layer['padding'],
name='activation_before_bias')
biases = tf.Variable(init_b(layer['channels'], w_or_b='b'), name='biases')
activ = tf.nn.bias_add(aux, biases, name='activation')
return h(activ, name='unit')
def maxpool_layer():
"""
Performs the max pooling(take the biggest value)on the input.
value: A Tensor. Unit
ksize: A list of size of the window for each dimension of the input tensor.
strides: A list of the stride of the sliding window for each dimension of
the input tensor.
padding: A string, either 'VALID' or 'SAME'. 'maxpool_layer'
name: Optional name for the operation.
"""
with tf.name_scope('maxpool_layer'):
return tf.nn.max_pool(value=unit,
ksize=(1,) + layer['k_size'] + (1,),
strides=(1,) + layer['strides'] + (1,),
padding=layer['padding'],
name='maxpool_layer')
def dropout_layer():
"""
Computes dropout.
With probability keep_prob, outputs the input element scaled up by keep_prob,
otherwise outputs 0. The scaling is so that the expected sum is unchanged.
By default, each element is kept or dropped independently.
x: A tensor.
keep_prob: A scalar Tensor with the same type as x.
The probability that each element is kept.
name: A name for this operation (optional).
"""
with tf.name_scope('dropout_layer'):
return tf.nn.dropout(x=unit,
keep_prob=layer['prob'],
name='dropout_layer')
def LRN_layer():
"""
Local Response Normalization defined by tensorflow.The 4-D input tensor
is treated as a 3-D array of 1-D vectors (along the last dimension),
and each vector is normalized independently. Within a given vector,
each component is divided by the weighted, squared sum of inputs
within depth_radius. In detail,
sqr_sum[a, b, c, d] = sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
output = input / (bias + alpha * sqr_sum) ** beta
These values will be given by our layer:
input: A Tensor. Our unit.
depth_radius: An optional int.
bias: An optional float. An offset (usually positive to avoid dividing by 0).
alpha: An optional float. A scale factor, usually positive.
beta: An optional float. Defaults to 0.5. An exponent.
name: A name for the operation (optional). We use here 'LRN_layer'
"""
with tf.name_scope('LRN_layer'):
return tf.nn.local_response_normalization(
input=unit,
bias=layer['k'],
alpha=layer['alpha'],
beta=layer['beta'],
depth_radius=layer['r'],
name='LRN_layer')
def BN_layer():
"""
Batch normalization: Normalizes a tensor by mean and variance,
and applies (optionally) a scale to it, as well as an offset:
(scale * (input - mean) / variance) + offset
"""
with tf.name_scope('BN_layer'):
mean, variance = tf.nn.moments(unit, [0], keep_dims=True)
offset = tf.Variable(tf.zeros(mean.get_shape()), name='offset')
scale = tf.Variable(tf.ones(variance.get_shape()), name='scale')
return tf.nn.batch_normalization(
x=unit,
mean=mean,
variance=variance,
offset=offset,
scale=scale,
variance_epsilon=1e-8,
name='BN_layer')
layer_dict = {'fc': fc_layer,
'conv': conv_layer,
'maxpool': maxpool_layer,
'dropout': dropout_layer,
'LRN': LRN_layer,
'BN': BN_layer}
init_dict = {'truncated_normal': init_tn,
'zeros': init_zeros}
loss_dict = {'softmax': tf.nn.softmax_cross_entropy_with_logits,
'identity': tf.nn.l2_loss,
'sigmoid': tf.nn.sigmoid_cross_entropy_with_logits}
self.x = tf.placeholder(tf.float32, shape=(None,)+self.layers[0]['dim'], name='x')
self.y_ = tf.placeholder(tf.float32, shape=(None, self.layers[-1]['dim']), name='y_')
unit = self.x;
for layer in self.layers[1:-1]:
layer_funct = layer_dict[layer['type']]
unit = layer_funct()
layer = self.layers[-1]
layer_funct = layer_dict[layer['type']]
self.last_activation = layer['activation']
layer['activation'] = 'identity'
self.logits = layer_funct()
layer['activation'] = self.last_activation
with tf.name_scope('loss'):
loss_funct = loss_dict[self.last_activation]
self.loss = tf.reduce_mean(loss_funct(logits=self.logits,
labels=self.y_),
name='loss')
self.saver = tf.train.Saver()
file_writer = tf.summary.FileWriter(LOG_DIR, tf.get_default_graph())
def train(self, x_train, t_train,
nb_epochs=1000,
batch_size=10,
method=('SGD', {'eta': 0.1}),
seed=tf.set_random_seed(1),
use_validation=False,
x_val=None,
t_val=None,
show_cost=True,
load=False):
"""
It is the principal function of MLP, which trains the input datas with
our Convolutional Neural Network. After trainning datas with this
function, we will have the results and enough datas to print them
Args:
x_train: input value. data for train
t_train: List of correct results.
"1" means red_point and "0" means black_point
nb_epochs: number of the epochs of train
batch_size: size of the data to train in every epoch
method: name of method and the parameters, like eta (learning rate),
beta (a value used to compute the gradients), gamma (fraction of the update vector
or momentum term), beta_1 (fraction of estimate of the first moment (the mean) of
the gradients), beta_2 (fraction of estimate of the second moment), epsilon (a
smoothing term that avoids division by zero), etc.
seed: A Python integer. Used to create random seeds.
use_validation: boolean value to run or no the validation function
show_cost: boolean value to print or no the cost.
"""
def SGD_adapter(params, name):
"""
We use the function defined by tensorflow.
Args:
params: It is a tuple which has the values of learning_rate, beta1, beta2,
epsilon, decay, rho and momentum. We only use learning_rate here, which
is a Tensor or a floating point value.(eta)
name: Optional name for the operations created when applying gradients.
Defaults to "GradientDescent".
Returns:
A new gradient descent optimizer
"""
return tf.train.GradientDescentOptimizer(
learning_rate=params['eta'],
name=name)
def adam_adapter(params, name):
"""
We use the function defined by tensorflow.
Args:
params: It is a tuple which has the values of learning_rate, beta1, beta2,
epsilon, decay, rho and momentum. We use:
learning_rate: A Tensor or a floating point value. The learning rate.
beta1: A float value or a constant float tensor.
The exponential decay rate for the 1st moment estimates
beta2: A float value or a constant float tensor.
The exponential decay rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability.
name: Optional name for the operations created when applying gradients.
Defaults to "Adam".
Returns:
A new Adam optimizer
"""
return tf.train.AdamOptimizer(
learning_rate=params['eta'],
beta1=params['beta_1'],
beta2=params['beta_2'],
epsilon=params['epsilon'],
name=name)
def adagrad_adapter(params, name):
"""
We use the function defined by tensorflow.
Args:
params: It is a tuple which has the values of learning_rate, beta1, beta2,
epsilon, decay, rho and momentum. We use:
learning_rate: A Tensor or a floating point value. The learning rate.
name: Optional name prefix for the operations created when applying gradients.
Defaults to "Adagrad".
Returns:
A new Adagrad optimizer.
"""
return tf.train.AdagradOptimizer(
learning_rate=params['eta'],
name=name)
def RMS_adapter(params, name):
"""
We use the function defined by tensorflow.
Args:
params: It is a tuple which has the values of learning_rate, beta1, beta2,
epsilon, decay, rho and momentum. We use:
learning_rate: A Tensor or a floating point value. The learning rate.
decay: Discounting factor for the history/coming gradient
epsilon: Small value to avoid zero denominator.
name: Optional name prefix for the operations created when applying gradients.
Defaults to "RMSProp".
Returns:
A new RMSProp optimizer.
"""
return tf.train.RMSPropOptimizer(
learning_rate=params['eta'],
decay=params['gamma'],
epsilon=params['epsilon'],
name=name)
def adadelta_adapter(params, name):
"""
We use the function defined by tensorflow.
Args:
params: It is a tuple which has the values of learning_rate, beta1, beta2,
epsilon, decay, rho and momentum. We use:
learning_rate: A Tensor or a floating point value. The learning rate.
rho: A Tensor or a floating point value. The decay rate.
epsilon: A Tensor or a floating point value.
A constant epsilon used to better conditioning the grad update.
name: Optional name prefix for the operations created when applying gradients.
Defaults to "Adadelta".
Returns:
A new Adadelta optimizer.
"""
return tf.train.AdadeltaOptimizer(
learning_rate=params['eta'],
rho=params['gamma'],
epsilon=params['epsilon'],
name=name)
def momentum_adapter(params, name):
"""
We use the function defined by tensorflow.
Args:
params: It is a tuple which has the values of learning_rate, beta1, beta2,
epsilon, decay, rho and momentum. We use:
learning_rate: A Tensor or a floating point value. The learning rate.
momentum: A Tensor or a floating point value. The momentum.
name: Optional name prefix for the operations created when applying gradients.
Defaults to "Momentum".
Returns:
A new Momentum optimizer
"""
return tf.train.MomentumOptimizer(
learning_rate=params['eta'],
momentum=params['gamma'],
name=name)
def nesterov_adapter(params, name):
"""
We use the function defined by tensorflow. We need 4 arguments for this function.
Args:
params: It is a tuple which has the values of learning_rate, beta1, beta2,
epsilon, decay, rho and momentum. We use:
learning_rate: A Tensor or a floating point value. The learning rate.
momentum: A Tensor or a floating point value. The momentum.
use_nesterov: If True use Nesterov Momentum
name: Optional name prefix for the operations created when applying gradients.
Defaults to "Momentum".
Returns:
A new Nesterov optimizer
"""
return tf.train.MomentumOptimizer(
learning_rate=params['eta'],
momentum=params['gamma'],
use_nesterov=True,
name=name)
optimizers_dict = {'SGD': SGD_adapter,
'adam': adam_adapter,
'adagrad': adagrad_adapter,
'RMS_prop': RMS_adapter,
'adadelta': adadelta_adapter,
'momentum': momentum_adapter,
'nesterov': nesterov_adapter}
with tf.name_scope('train'):
method_class = optimizers_dict[method[0]]
optimizer = method_class(method[1], name='optimizer')
self.train_step = optimizer.minimize(self.loss, name='train_step')
self.init = tf.global_variables_initializer()
nb_data = x_train.shape[0]
index_list = np.arange(nb_data)
nb_batches = nb_data // batch_size
with tf.Session() as sess:
if load:
sess.run(self.init)
self.saver.restore(sess, "./MLP.ckpt")
else:
sess.run(self.init)
for epoch in range(nb_epochs):
np.random.shuffle(index_list)
for batch in range(nb_batches):
batch_indices = index_list[batch * batch_size:
(batch + 1) * batch_size]
x_batch = x_train[batch_indices, :]
t_batch = t_train[batch_indices, :]
sess.run(self.train_step,
feed_dict={self.x: x_batch,
self.y_: t_batch})
cost = 0.0
if use_validation:
cost = sess.run(self.loss, feed_dict={self.x: x_val,
self.y_: t_val})
else:
if show_cost:
cost = sess.run(self.loss, feed_dict={self.x: x_train,
self.y_: t_train})
sys.stdout.write('cost=%f %d\r' % (cost, epoch))
sys.stdout.flush()
self.saver.save(sess, "./MLP.ckpt")
def predict(self, x_test):
with tf.Session() as sess:
self.saver.restore(sess, "./MLP.ckpt")
pred = self.activations_dict[self.last_activation](self.logits)
y_pred = sess.run(pred, feed_dict={self.x: x_test})
return y_pred
if __name__ == "__main__":
nb_black = 15
nb_red = 15
nb_data = nb_black + nb_red
x_data_black = np.random.randn(nb_black, 2) + np.array([0, 0])
x_data_red = np.random.randn(nb_red, 2) + np.array([10, 10])
x_data = np.vstack((x_data_black, x_data_red))
t_data = np.asarray([0]*nb_black + [1]*nb_red).reshape(nb_data, 1)
layer_0 = {'dim': 2}
layer_1 = {'type': 'fc', 'dim': 50, 'activation': 'sigmoid', 'init': 'he'}
layer_2 = {'type': 'fc', 'dim': 1, 'activation': 'sigmoid', 'init': 'xavier'}
layer_list = [layer_0, layer_1, layer_2]
net = NetConstructor(layer_list)
net.train(x_data, t_data)
|
daxadal/Computational-Geometry
|
Practica_4/net_constructor_documented.py
|
Python
|
apache-2.0
| 21,096
|
[
"NEURON"
] |
d6e34011dc8b9a67288f326a22d0bb362340147df5dd777eabfe453e1ea0409d
|
class InvalidIntervalException(Exception):
def __init__(self, l, u):
self.l = l
self.u = u
def __str__(self):
return "Invalid interval [" + str(l) + ", " + str(u) + "]"
class ArithmeticException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def lt(x, y):
if x == y:
return False
elif x == '-' or y == '+':
return True
elif y == '-' or x == '+':
return False
else:
return x < y
def leq(x, y):
if x == y or x == '-' or y == '+':
return True
elif y == '-' or x == '+':
return False
else:
return x < y
def gt(x, y):
if x == y:
return False
elif x == '+' or y == '-':
return True
elif y == '+' or x == '-':
return False
else:
return x > y
def geq(x, y):
if x == y or x == '+' or y == '-':
return True
elif y == '+' or x == '-':
return False
else:
return x > y
def min(x, y):
if lt(x, y):
return x
else:
return y
def max(x, y):
if gt(x, y):
return x
else:
return y
def add(x, y):
if (x == '-' and y == '+') or (y == '-' and x == '+'):
raise ArithmeticException("Adding minus and plus infinity.")
elif x == '-' or y == '-':
return '-'
elif x == '+' or y == '+':
return '+'
else:
return x + y
def mul(x, y):
if (x == 0 or y == '0'):
return 0
elif (x == '-' and lt(y, 0)) or (y == '-' and lt(x, 0)):
return '+'
elif (x == '+' and gt(y, 0)) or (y == '+' and gt(x, 0)):
return '+'
elif (x == '-' and gt(y, 0)) or (y == '-' and gt(x, 0)):
return '-'
elif (x == '+' and lt(y, 0)) or (y == '+' and lt(x, 0)):
return '-'
else:
return x * y
class Interval:
def __init__(self, l='-', u='+'):
if (l != '-' and u != '+'):
if (l > u):
raise InvalidIntervalException(l, u)
self.l = l;
self.u = u;
def intersection(self, i):
if (gt(self.l, i.u) or lt(self.u, i.l)):
return BottomInterval()
else:
return Interval(max(self.l, i.l), min(self.u, i.u))
def __str__(self):
return "[" + str(self.l) + ", " + str(self.u) + "]"
class SymbolicInterval(Interval):
"""This is an interval that contains a symbolic limit, which is given by
the bounds of a program name, e.g.: [-inf, ub(b) + 1]"""
def __init__(self, bound, op='=='):
self.bound = bound
self.op = op
self.l = '-'
self.u = '+'
def __str__(self):
if self.op == '==':
return "[lb(" + self.bound.name + "), ub(" + self.bound.name + ")]"
elif self.op == '<=':
return "[-, ub(" + self.bound.name + ")]"
elif self.op == '<':
return "[-, ub(" + self.bound.name + ") - 1]"
elif self.op == '>=':
return "[lb(" + self.bound.name + "), +]"
elif self.op == '>':
return "[lb(" + self.bound.name + ") + 1, +]"
else:
return "[" + str(self.l) + ", " + str(self.u) + "]"
class BottomInterval(Interval):
"""This interval is used to represent the empty interval. It arises, for
instance, from the intersection of disjoint intervals."""
def __str__(self):
return "[., .]"
class VarNode:
"""A VarNode represents a program variable."""
def __init__(self, name, interval=Interval()):
self.name = name
self.interval = interval
def __str__(self):
return self.name + str(self.interval)
class UnaryOp:
"""A constraint like sink = a * source + b \intersec [l, u]"""
def __init__(self, source, sink, a=1, b=0, interval=Interval('-', '+')):
self.source = source
self.sink = sink
self.a = a
self.b = b
self.i = interval
def getUseList(self):
"""Returns the list containting the single variable used in this unary
operation"""
return [self.source]
def __str__(self):
self_str = str(self.sink) + " = " + str(self.a) + "* " + str(self.source)
self_str += str(self.b) + " \int " + str(self.i)
return self_str
def eval(self):
"""Read the interval in source, apply the operation on it, and return it."""
l = add(mul(self.a, self.source.interval.l), self.b)
u = add(mul(self.a, self.source.interval.u), self.b)
if gt(l, u):
auxInterval = Interval(u, l)
else:
auxInterval = Interval(l, u)
return auxInterval.intersection(self.i)
def fixIntersects(self):
"""Replace symbolic intervals with hard-wired constants."""
if isinstance(self.i, SymbolicInterval):
l = self.i.bound.interval.l
u = self.i.bound.interval.u
if self.i.op == '==':
self.i = Interval(l, u)
elif self.i.op == '<=':
self.i = Interval(self.i.l, u)
elif self.i.op == '<':
self.i = Interval(self.i.l, add(u, -1))
elif self.i.op == '>=':
self.i = Interval(l, self.i.u)
elif self.i.op == '>':
self.i = Interval(add(l, 1), self.i.u)
else:
self.i = Interval()
def toDotStr(self):
lb = " " + str(hash(self)) + " [shape=box,label =\""
space = ""
if self.a != 1:
lb += "*(" + str(self.a) + ")"
space = " "
if self.b != 0:
lb += space + "+(" + str(self.b) + ")"
space = " "
if isinstance(self.i, SymbolicInterval) or self.i.l != '-' or self.i.u != '+':
lb += space + "INT" + str(self.i)
lb += "\"]\n"
lb += " " + self.source.name + " -> " + str(hash(self)) + "\n"
lb += " " + str(hash(self)) + " -> " + self.sink.name + "\n"
return lb
class PlusOp:
"""A constraint like sink = src1 + src2"""
def __init__(self, src1, src2, sink):
self.src1 = src1
self.src2 = src2
self.sink = sink
def getUseList(self):
"""Returns the list of variables used in this binary operation."""
return [self.src1, self.src2]
def __str__(self):
return self.sink.name + " = " + self.src1.name + " + " + self.src2.name
def eval(self):
"""Read the interval in source, apply the operation on it, and return it."""
int1 = self.src1.interval
int2 = self.src2.interval
return Interval(add(int1.l, int2.l), add(int1.u, int2.u))
def fixIntersects(self):
"""Replace symbolic intervals with hard-wired constants. Normally this
kind of operations have no intersect to fix, but the method is here so
that we can invoke it on any kind of operation."""
def toDotStr(self):
lb = " " + str(hash(self)) + " [shape=box,label =\" + \"]\n"
lb += " " + self.src1.name + " -> " + str(hash(self)) + "\n"
lb += " " + self.src2.name + " -> " + str(hash(self)) + "\n"
lb += " " + str(hash(self)) + " -> " + self.sink.name + "\n"
return lb
class PhiOp:
"""A constraint like sink = phi(src1, src2)"""
def __init__(self, src1, src2, sink):
self.src1 = src1
self.src2 = src2
self.sink = sink
def getUseList(self):
"""Return the variables used in this phi node."""
return [self.src1, self.src2]
def __str__(self):
return str(self.sink) + " =phi (" + str(self.src1) + ", " + str(self.src2) + ")"
def eval(self):
"""The result of evaluating the phi-function is the union of the ranges of
every variable used in the phi."""
int1 = self.src1.interval
int2 = self.src2.interval
# Remember, the union of bottom and anythin is anything:
if isinstance(int1, BottomInterval):
return Interval(int2.l, int2.u)
elif isinstance(int2, BottomInterval):
return Interval(int1.l, int1.u)
return Interval(min(int1.l, int2.l), max(int1.u, int2.u))
def fixIntersects(self):
"""Replace symbolic intervals with hard-wired constants. Normally this
kind of operations have no intersect to fix, but the method is here so
that we can invoke it on any kind of operation."""
def toDotStr(self):
lb = " " + str(hash(self)) + " [shape=box,label =\" phi \"]\n"
lb += " " + self.src1.name + " -> " + str(hash(self)) + "\n"
lb += " " + self.src2.name + " -> " + str(hash(self)) + "\n"
lb += " " + str(hash(self)) + " -> " + self.sink.name + "\n"
return lb
def toDot(Title, Variables, Operations):
"""Print the edges in dot format."""
print 'digraph "' + Title + '" {'
for v, k in Variables.iteritems():
print " ", v, "[label=\"", str(k), "\"]"
for op in Operations:
print op.toDotStr()
print '}'
def buildUseMap(Variables, Operations):
"""This method builds a map that binds each variable label to the operations
where this variable is used."""
map = {}
for var in Variables.values():
uses = []
for op in Operations:
if var in op.getUseList():
uses.append(op)
continue
map[var.name] = uses
return map
def processGraph(fileName, findIntervals):
"""This method finds the intervals of the graph in the input file."""
try:
f = open(fileName, 'r')
strFile = f.read()
exec(strFile)
UseMap = buildUseMap(Variables, Operations)
EntryPoints = [var.name for var in Variables.values() if not isinstance(var.interval, BottomInterval)]
findIntervals(Variables, Operations, UseMap, EntryPoints)
except IOError:
print fileName, " is not a valid file name."
def readGraph(findIntervals):
"""Reads a file and converts it into data structures."""
fileName = raw_input("Please, enter a file name: ")
content = ""
while fileName != "":
processGraph(fileName, findIntervals)
fileName = raw_input("Please, enter a file name: ")
else:
print content
# The global index used to find the SCCs via Nuutila's algorithm.
class Nuutila:
def __init__(self, Variables, UseMap, EntryPoints):
"""Finds the strongly connected components in the constraint graph formed by
Variables and UseMap."""
self.index = 0
self.dfs = {}
self.root = {}
self.inComponent = set()
self.components = {}
self.worklist = []
for var in Variables.keys():
self.dfs[var] = -1
for var in EntryPoints:
self.visit(var, [], UseMap)
def visit(self, v, stack, UseMap):
"""Finds SCCs using Nuutila's algorithm."""
self.dfs[v] = self.index
self.index += 1
self.root[v] = v
# Visit every node defined in an instruction that uses v:
for w in UseMap[v]:
if self.dfs[w.sink.name] < 0:
self.visit(w.sink.name, stack, UseMap)
if not w.sink.name in self.inComponent and self.dfs[self.root[v]] >= self.dfs[self.root[w.sink.name]]:
self.root[v] = self.root[w.sink.name]
# The second phase of the algorithm assigns components to stacked nodes:
if self.root[v] == v:
# This worklist is not part of Nuutila's original algorithm. It is just
# a speedy way to get the topological ordering of SCCs.
self.worklist.append(v)
SCC = [v]
self.inComponent.add(v)
while len(stack) > 0 and self.dfs[stack[-1]] > self.dfs[v]:
node = stack[-1]
stack.pop()
self.inComponent.add(node)
SCC.append(node)
self.components[v] = SCC
else:
stack.append(v)
def printSCCs(self):
"""Prints the result of Nuutila's algorithm."""
for i in range(len(self.worklist) - 1, 0, -1):
v = self.worklist[i]
print "SCC of:", v
SCC = self.components[v]
for w in SCC:
print " ", w
|
vhscampos/range-analysis
|
prototype/PythonRangeAnalysis/bck/DS1.py
|
Python
|
gpl-2.0
| 11,116
|
[
"VisIt"
] |
c7204696ec29b5def20f1441fa6c35aa496e9f7f31d94ec71e26efba8b2364cc
|
"""
This file contains Python code illustrating the creation and manipulation of
vtkTable objects.
"""
from vtk import *
#------------------------------------------------------------------------------
# Script Entry Point (i.e., main() )
#------------------------------------------------------------------------------
if __name__ == "__main__":
""" Main entry point of this python script """
print "vtkTable Example 1: Building a vtkTable from scratch."
#----------------------------------------------------------
# Create an empty table
T = vtkTable()
#----------------------------------------------------------
# Create Column 1 (IDs)
col1 = vtkIntArray()
col1.SetName("ID")
for i in range(1, 8):
col1.InsertNextValue(i)
T.AddColumn(col1)
#----------------------------------------------------------
# Create Column 2 (Names)
namesList = ['Bob', 'Ann', 'Sue', 'Bill', 'Joe', 'Jill', 'Rick']
col2 = vtkStringArray()
col2.SetName("Name")
for val in namesList:
col2.InsertNextValue(val)
T.AddColumn(col2)
#----------------------------------------------------------
# Create Column 3 (Ages)
agesList = [12, 25, 72, 11, 31, 36, 32]
col3 = vtkIntArray()
col3.SetName("Age")
for val in agesList:
col3.InsertNextValue(val)
T.AddColumn(col3)
T.Dump(6)
print "vtkTable Example 1: Finished."
|
berendkleinhaneveld/VTK
|
Examples/Infovis/Python/tables1.py
|
Python
|
bsd-3-clause
| 1,422
|
[
"VTK"
] |
9c30b52102788b9e68141197f9081fd31b3f46460b4a285b2dec2b99dff7084c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""transforms.py -- This module contains parameter transformations that may be
useful to transform from parameters that are easier to _sample_ in to the
parameters required for building SED models.
They can be used as ``"depends_on"`` entries in parameter specifications.
"""
import numpy as np
from ..sources.constants import cosmo
__all__ = ["stellar_logzsol", "delogify_mass",
"tburst_from_fage", "tage_from_tuniv", "zred_to_agebins",
"dustratio_to_dust1",
"logsfr_ratios_to_masses", "logsfr_ratios_to_sfrs",
"logsfr_ratios_to_masses_flex", "logsfr_ratios_to_agebins",
"zfrac_to_masses", "zfrac_to_sfrac", "zfrac_to_sfr", "masses_to_zfrac",
"sfratio_to_sfr", "sfratio_to_mass"]
# --------------------------------------
# --- Basic Convenience Transforms ---
# --------------------------------------
def stellar_logzsol(logzsol=0.0, **extras):
"""Simple function that takes an argument list and returns the value of the
`logzsol` argument (i.e. the stellar metallicity)
Parameters
----------
logzsol : float
FSPS stellar metaliicity parameter.
Returns
-------
logzsol: float
The same.
"""
return logzsol
def delogify_mass(logmass=0.0, **extras):
"""Simple function that takes an argument list including a `logmass`
parameter and returns the corresponding linear mass.
Parameters
----------
logmass : float
The log10(mass)
Returns
-------
mass : float
The mass in linear units
"""
return 10**logmass
def total_mass(mass=0.0, **extras):
"""Simple function that takes an argument list uncluding a `mass`
parameter and returns the corresponding total mass.
Parameters
----------
mass : ndarray of shape ``(N_bins,)``
Vector of masses in bins
Returns
-------
total_mass : float
Total mass in linear units
"""
return mass.sum()
# --------------------------------------
# Fancier transforms
# --------------------------------------
def tburst_from_fage(tage=0.0, fage_burst=0.0, **extras):
"""This function transfroms from a fractional age of a burst to an absolute
age. With this transformation one can sample in ``fage_burst`` without
worry about the case ``tburst`` > ``tage``.
Parameters
----------
tage : float, Gyr
The age of the host galaxy.
fage_burst : float between 0 and 1
The fraction of the host age at which the burst occurred.
Returns
-------
tburst : float, Gyr
The age of the host when the burst occurred (i.e. the FSPS ``tburst``
parameter)
"""
return tage * fage_burst
def tage_from_tuniv(zred=0.0, tage_tuniv=1.0, **extras):
"""This function calculates a galaxy age from the age of the universe at
``zred`` and the age given as a fraction of the age of the universe. This
allows for both ``zred`` and ``tage`` parameters without ``tage`` exceeding
the age of the universe.
Parameters
----------
zred : float
Cosmological redshift.
tage_tuniv : float between 0 and 1
The ratio of ``tage`` to the age of the universe at ``zred``.
Returns
-------
tage : float
The stellar population age, in Gyr
"""
tuniv = cosmo.age(zred).value
tage = tage_tuniv * tuniv
return tage
def zred_to_agebins(zred=0.0, agebins=[], **extras):
"""Set the nonparameteric SFH age bins depending on the age of the universe
at ``zred``. The first bin is not altered and the last bin is always 15% of
the upper edge of the oldest bin, but the intervening bins are evenly
spaced in log(age).
Parameters
----------
zred : float
Cosmological redshift. This sets the age of the universe.
agebins : ndarray of shape ``(nbin, 2)``
The SFH bin edges in log10(years).
Returns
-------
agebins : ndarray of shape ``(nbin, 2)``
The new SFH bin edges.
"""
tuniv = cosmo.age(zred).value * 1e9
tbinmax = tuniv * 0.85
ncomp = len(agebins)
agelims = list(agebins[0]) + np.linspace(agebins[1][1], np.log10(tbinmax), ncomp-2).tolist() + [np.log10(tuniv)]
return np.array([agelims[:-1], agelims[1:]]).T
def dustratio_to_dust1(dust2=0.0, dust_ratio=0.0, **extras):
"""Set the value of dust1 from the value of dust2 and dust_ratio
Parameters
----------
dust2 : float
The diffuse dust V-band optical depth (the FSPS ``dust2`` parameter.)
dust_ratio : float
The ratio of the extra optical depth towards young stars to the diffuse
optical depth affecting all stars.
Returns
-------
dust1 : float
The extra optical depth towards young stars (the FSPS ``dust1``
parameter.)
"""
return dust2 * dust_ratio
# --------------------------------------
# --- Transforms for the continuity non-parametric SFHs used in (Leja et al. 2018) ---
# --------------------------------------
def logsfr_ratios_to_masses(logmass=None, logsfr_ratios=None, agebins=None,
**extras):
"""This converts from an array of log_10(SFR_j / SFR_{j+1}) and a value of
log10(\Sum_i M_i) to values of M_i. j=0 is the most recent bin in lookback
time.
"""
nbins = agebins.shape[0]
sratios = 10**np.clip(logsfr_ratios, -100, 100) # numerical issues...
dt = (10**agebins[:, 1] - 10**agebins[:, 0])
coeffs = np.array([ (1. / np.prod(sratios[:i])) * (np.prod(dt[1: i+1]) / np.prod(dt[: i]))
for i in range(nbins)])
m1 = (10**logmass) / coeffs.sum()
return m1 * coeffs
def logsfr_ratios_to_sfrs(logmass=None, logsfr_ratios=None, agebins=None, **extras):
"""Convenience function
"""
masses = logsfr_ratios_to_masses(logmass=logmass, logsfr_ratios=logsfr_ratios,
agebins=agebins)
dt = (10**agebins[:, 1] - 10**agebins[:, 0])
return masses / dt
# --------------------------------------
# --- Transforms for the flexible agebin continuity non-parametric SFHs used in (Leja et al. 2018) ---
# --------------------------------------
def logsfr_ratios_to_masses_flex(logmass=None, logsfr_ratios=None,
logsfr_ratio_young=None, logsfr_ratio_old=None,
**extras):
logsfr_ratio_young = np.clip(logsfr_ratio_young, -100, 100)
logsfr_ratio_old = np.clip(logsfr_ratio_old, -100, 100)
abins = logsfr_ratios_to_agebins(logsfr_ratios=logsfr_ratios, **extras)
nbins = abins.shape[0] - 2
syoung, sold = 10**logsfr_ratio_young, 10**logsfr_ratio_old
dtyoung, dt1 = (10**abins[:2, 1] - 10**abins[:2, 0])
dtn, dtold = (10**abins[-2:, 1] - 10**abins[-2:, 0])
mbin = (10**logmass) / (syoung*dtyoung/dt1 + sold*dtold/dtn + nbins)
myoung = syoung * mbin * dtyoung / dt1
mold = sold * mbin * dtold/dtn
n_masses = np.full(nbins, mbin)
return np.array(myoung.tolist() + n_masses.tolist() + mold.tolist())
def logsfr_ratios_to_agebins(logsfr_ratios=None, agebins=None, **extras):
"""This transforms from SFR ratios to agebins by assuming a constant amount
of mass forms in each bin agebins = np.array([NBINS,2])
use equation:
delta(t1) = tuniv / (1 + SUM(n=1 to n=nbins-1) PROD(j=1 to j=n) Sn)
where Sn = SFR(n) / SFR(n+1) and delta(t1) is width of youngest bin
"""
# numerical stability
logsfr_ratios = np.clip(logsfr_ratios, -100, 100)
# calculate delta(t) for oldest, youngest bins (fixed)
lower_time = (10**agebins[0, 1] - 10**agebins[0, 0])
upper_time = (10**agebins[-1, 1] - 10**agebins[-1, 0])
tflex = (10**agebins[-1,-1] - upper_time - lower_time)
# figure out other bin sizes
n_ratio = logsfr_ratios.shape[0]
sfr_ratios = 10**logsfr_ratios
dt1 = tflex / (1 + np.sum([np.prod(sfr_ratios[:(i+1)]) for i in range(n_ratio)]))
# translate into agelims vector (time bin edges)
agelims = [1, lower_time, dt1+lower_time]
for i in range(n_ratio):
agelims += [dt1*np.prod(sfr_ratios[:(i+1)]) + agelims[-1]]
#agelims += [tuniv[0]]
agelims += [10**agebins[-1, 1]]
agebins = np.log10([agelims[:-1], agelims[1:]]).T
return agebins
# --------------------------------------
# -- Transforms for the fixed+flexible non-parametric SFHs used in (Suess et al. 2021) --
# --------------------------------------
def logsfr_ratios_to_masses_psb(logmass=None, logsfr_ratios=None,
logsfr_ratio_young=None, logsfr_ratio_old=None,
tlast=None, tflex=None, nflex=None, nfixed=None,
agebins=None, **extras):
"""This is a modified version of logsfr_ratios_to_masses_flex above. This now
assumes that there are nfixed fixed-edge timebins at the beginning of
the universe, followed by nflex flexible timebins that each form an equal
stellar mass. The final bin has variable width and variable SFR; the width
of the bin is set by the parameter tlast.
The major difference between this and the transform above is that
logsfr_ratio_old is a vector.
"""
# clip for numerical stability
nflex = nflex[0]; nfixed = nfixed[0]
logsfr_ratio_young = np.clip(logsfr_ratio_young[0], -7, 7)
logsfr_ratio_old = np.clip(logsfr_ratio_old, -7, 7)
syoung, sold = 10**logsfr_ratio_young, 10**logsfr_ratio_old
sratios = 10.**np.clip(logsfr_ratios, -7, 7) # numerical issues...
# get agebins
abins = psb_logsfr_ratios_to_agebins(logsfr_ratios=logsfr_ratios,
agebins=agebins, tlast=tlast, tflex=tflex, nflex=nflex, nfixed=nfixed, **extras)
# get find mass in each bin
dtyoung, dt1 = (10**abins[:2, 1] - 10**abins[:2, 0])
dtold = 10**abins[-nfixed-1:, 1] - 10**abins[-nfixed-1:, 0]
old_factor = np.zeros(nfixed)
for i in range(nfixed):
old_factor[i] = (1. / np.prod(sold[:i+1]) * np.prod(dtold[1:i+2]) / np.prod(dtold[:i+1]))
mbin = 10**logmass / (syoung*dtyoung/dt1 + np.sum(old_factor) + nflex)
myoung = syoung * mbin * dtyoung / dt1
mold = mbin * old_factor
n_masses = np.full(nflex, mbin)
return np.array(myoung.tolist() + n_masses.tolist() + mold.tolist())
def psb_logsfr_ratios_to_agebins(logsfr_ratios=None, agebins=None,
tlast=None, tflex=None, nflex=None, nfixed=None, **extras):
"""This is a modified version of logsfr_ratios_to_agebins above. This now
assumes that there are nfixed fixed-edge timebins at the beginning of
the universe, followed by nflex flexible timebins that each form an equal
stellar mass. The final bin has variable width and variable SFR; the width
of the bin is set by the parameter tlast.
For the flexible bins, we again use the equation:
delta(t1) = tuniv / (1 + SUM(n=1 to n=nbins-1) PROD(j=1 to j=n) Sn)
where Sn = SFR(n) / SFR(n+1) and delta(t1) is width of youngest bin
"""
# dumb way to de-arrayify values...
tlast = tlast[0]; tflex = tflex[0]
try: nflex = nflex[0]
except IndexError: pass
try: nfixed = nfixed[0]
except IndexError: pass
# numerical stability
logsfr_ratios = np.clip(logsfr_ratios, -7, 7)
# flexible time is t_flex - youngest bin (= tlast, which we fit for)
# this is also equal to tuniv - upper_time - lower_time
tf = (tflex - tlast) * 1e9
# figure out other bin sizes
n_ratio = logsfr_ratios.shape[0]
sfr_ratios = 10**logsfr_ratios
dt1 = tf / (1 + np.sum([np.prod(sfr_ratios[:(i+1)]) for i in range(n_ratio)]))
# translate into agelims vector (time bin edges)
agelims = [1, (tlast*1e9), dt1+(tlast*1e9)]
for i in range(n_ratio):
agelims += [dt1*np.prod(sfr_ratios[:(i+1)]) + agelims[-1]]
agelims += list(10**agebins[-nfixed:,1])
abins = np.log10([agelims[:-1], agelims[1:]]).T
return abins
# --------------------------------------
# --- Transforms for Dirichlet non-parametric SFH used in (Leja et al. 2017) ---
# --------------------------------------
def zfrac_to_sfrac(z_fraction=None, **extras):
"""This transforms from independent dimensionless `z` variables to sfr
fractions. The transformation is such that sfr fractions are drawn from a
Dirichlet prior. See Betancourt et al. 2010 and Leja et al. 2017
Parameters
----------
z_fraction : ndarray of shape ``(Nbins-1,)``
latent variables drawn from a specific set of Beta distributions. (see
Betancourt 2010)
Returns
-------
sfrac : ndarray of shape ``(Nbins,)``
The star formation fractions (See Leja et al. 2017 for definition).
"""
sfr_fraction = np.zeros(len(z_fraction) + 1)
sfr_fraction[0] = 1.0 - z_fraction[0]
for i in range(1, len(z_fraction)):
sfr_fraction[i] = np.prod(z_fraction[:i]) * (1.0 - z_fraction[i])
sfr_fraction[-1] = 1 - np.sum(sfr_fraction[:-1])
return sfr_fraction
def zfrac_to_masses(total_mass=None, z_fraction=None, agebins=None, **extras):
"""This transforms from independent dimensionless `z` variables to sfr
fractions and then to bin mass fractions. The transformation is such that
sfr fractions are drawn from a Dirichlet prior. See Betancourt et al. 2010
and Leja et al. 2017
Parameters
----------
total_mass : float
The total mass formed over all bins in the SFH.
z_fraction : ndarray of shape ``(Nbins-1,)``
latent variables drawn from a specific set of Beta distributions. (see
Betancourt 2010)
Returns
-------
masses : ndarray of shape ``(Nbins,)``
The stellar mass formed in each age bin.
"""
# sfr fractions
sfr_fraction = np.zeros(len(z_fraction) + 1)
sfr_fraction[0] = 1.0 - z_fraction[0]
for i in range(1, len(z_fraction)):
sfr_fraction[i] = np.prod(z_fraction[:i]) * (1.0 - z_fraction[i])
sfr_fraction[-1] = 1 - np.sum(sfr_fraction[:-1])
# convert to mass fractions
time_per_bin = np.diff(10**agebins, axis=-1)[:, 0]
mass_fraction = sfr_fraction * np.array(time_per_bin)
mass_fraction /= mass_fraction.sum()
masses = total_mass * mass_fraction
return masses
# -- version of above for arrays of fractions --
#zf = np.atleast_2d(z_fraction)
#shape = list(zf.shape)
#shape[-1] += 1
#sfr_fraction = np.zeros(shape)
#sfr_fraction[..., 0] = 1.0 - z_fraction[..., 0]
#for i in range(1, shape[-1]-1):
# sfr_fraction[..., i] = (np.prod(z_fraction[..., :i], axis=-1) *
# (1.0 - z_fraction[...,i]))
#sfr_fraction[..., -1] = 1 - np.sum(sfr_fraction[..., :-1], axis=-1)
#sfr_fraction = np.squeeze(sfr_fraction)
#
# convert to mass fractions
#time_per_bin = np.diff(10**agebins, axis=-1)[:,0]
#sfr_fraction *= np.array(time_per_bin)
#mtot = np.atleast_1d(sfr_fraction.sum(axis=-1))
#mass_fraction = sfr_fraction / mtot[:, None]
#
#masses = np.atleast_2d(total_mass) * mass_fraction.T
#return masses.T
def zfrac_to_sfr(total_mass=None, z_fraction=None, agebins=None, **extras):
"""This transforms from independent dimensionless `z` variables to SFRs.
:returns sfrs:
The SFR in each age bin (msun/yr).
"""
time_per_bin = np.diff(10**agebins, axis=-1)[:, 0]
masses = zfrac_to_masses(total_mass, z_fraction, agebins)
return masses / time_per_bin
def masses_to_zfrac(mass=None, agebins=None, **extras):
"""The inverse of :py:func:`zfrac_to_masses`, for setting mock parameters
based on mock bin masses.
Returns
-------
total_mass : float
The total mass
zfrac : ndarray of shape ``(Nbins-1,)``
latent variables drawn from a specific set of Beta distributions. (see
Betancourt 2010) related to the fraction of mass formed in each bin.
"""
total_mass = mass.sum()
time_per_bin = np.diff(10**agebins, axis=-1)[:, 0]
sfr_fraction = mass / time_per_bin
sfr_fraction /= sfr_fraction.sum()
z_fraction = np.zeros(len(sfr_fraction) - 1)
z_fraction[0] = 1 - sfr_fraction[0]
for i in range(1, len(z_fraction)):
z_fraction[i] = 1.0 - sfr_fraction[i] / np.prod(z_fraction[:i])
return total_mass, z_fraction
# --------------------------------------
# --- Transforms for SFR ratio based nonparameteric SFH ---
# --------------------------------------
def sfratio_to_sfr(sfr_ratio=None, sfr0=None, **extras):
raise(NotImplementedError)
def sfratio_to_mass(sfr_ratio=None, sfr0=None, agebins=None, **extras):
raise(NotImplementedError)
|
bd-j/prospector
|
prospect/models/transforms.py
|
Python
|
mit
| 16,746
|
[
"Galaxy"
] |
83747e5d29fec6dc5cd934a1056d672aefa6f58370d8f50e1639d970f4d0ca15
|
# Copyright (C) 2017 Martin Nilsson
# This file is part of the Memtran compiler.
#
# The Memtran compiler is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The Memtran compiler is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the Memtran compiler. If not, see http://www.gnu.org/licenses/ .
NOT_IMPLEMENTED = "You should implement this!"
class NIdentifier:
# public long lineNr;
# public long rowNr;
# String name;
def __init__(self, lineNr,
rowNr,
name):
self.lineNr = lineNr
self.rowNr = rowNr
self.name = name
def print_it(self):
print("$", end='')
print(self.name, end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NIdentifier(self.lineNr, self.rowNr, self.name)
def visit_children(self, visitor):
return True # leaf element
##################### TYPES ###################################
class NType:
# abstract void print();
# abstract long getLineNr();
# abstract long getRowNr();
# abstract NType createCopy();
# abstract boolean visit_children(AbstractASTVisitor visitor);
pass
class NIdentifierType(NType):
# long lineNr;
# long rowNr;
# NIdentifier moduleNameOrNull;
# NIdentifier name;
def __init__(self, lineNr,
rowNr,
moduleNameOrNull,
name):
self.lineNr = lineNr
self.rowNr = rowNr
self.moduleNameOrNull = moduleNameOrNull
self.name = name
def print_it(self):
if not self.moduleNameOrNull is None:
self.moduleNameOrNull.print_it()
print("..", end='')
self.name.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
if self.moduleNameOrNull is None:
return NIdentifierType(self.lineNr, self.rowNr, None, self.name.create_copy())
else:
return NIdentifierType(self.lineNr, self.rowNr, self.moduleNameOrNull.create_copy(), self.name.create_copy())
def visit_children(self, visitor):
if not self.moduleNameOrNull is None:
success = visitor.visit(self.moduleNameOrNull)
if success == False:
return False
success = visitor.visit(self.name)
if success == False:
return False
return True
def get_definition(self, typeDict, directlyImportedTypesDict, otherImportedModulesTypeDictDict):
if self.moduleNameOrNull is None:
if self.name.name in typeDict:
return typeDict[self.name.name].theType
elif self.name.name in directlyImportedTypesDict:
return directlyImportedTypesDict[self.name.name].theType
else:
util.log_error(self.lineNr, self.rowNr, "Type match: named type's definition not found. SHOULD NOT HAPPEN.")
return NStructType(self.lineNr, self.rowNr, NIdentifier(self.lineNr, self.rowNr, "ERRORRR"), [])
else:
if self.moduleNameOrNull.name in otherImportedModulesTypeDictDict:
moduleTypeDict = otherImportedModulesTypeDictDict[self.moduleNameOrNull.name]
if self.name.name in moduleTypeDict:
return moduleTypeDict[self.name.name].theType
else:
util.log_error(self.lineNr, self.rowNr, "Named type's definition not found for type match. SHOULD NOT HAPPEN #18")
return NStructType(self.lineNr, self.rowNr, NIdentifier(self.lineNr, self.rowNr, "ERRORRR"), [])
else:
util.log_error(self.lineNr, self.rowNr, "Module not found for named type match. SHOULD NOT HAPPEN #8")
return NStructType(self.lineNr, self.rowNr, NIdentifier(self.lineNr, self.rowNr, "ERRORRR"), [])
class NNilType(NType):
# long lineNr;
# long rowNr;
def __init__(self, lineNr,
rowNr):
self.lineNr = lineNr
self.rowNr = rowNr
def print_it(self):
print("Nil", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NNilType(self.lineNr, self.rowNr)
def visit_children(self, visitor):
return True # concrete element
class NBoolType(NType):
# long lineNr;
# long rowNr;
def __init__(self, lineNr,
rowNr):
self.lineNr = lineNr
self.rowNr = rowNr
def print_it(self):
print("Bool", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NBoolType(self.lineNr, self.rowNr)
def visit_children(self, visitor):
return True # concrete element
class NI8Type(NType):
# long lineNr;
# long rowNr;
def __init__(self, lineNr,
rowNr):
self.lineNr = lineNr
self.rowNr = rowNr
def print_it(self):
print("I8", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NI8Type(self.lineNr, self.rowNr)
def visit_children(self, visitor):
return True # concrete element
class NI16Type(NType):
# long lineNr;
# long rowNr;
def __init__(self, lineNr,
rowNr):
self.lineNr = lineNr
self.rowNr = rowNr
def print_it(self):
print("I16", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NI16Type(self.lineNr, self.rowNr)
def visit_children(self, visitor):
return True # concrete element
class NI32Type(NType):
# long lineNr;
# long rowNr;
def __init__(self, lineNr,
rowNr):
self.lineNr = lineNr
self.rowNr = rowNr
def print_it(self):
print("I32", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NI32Type(self.lineNr, self.rowNr)
def visit_children(self, visitor):
return True # concrete element
class NI64Type(NType):
# long lineNr;
# long rowNr;
def __init__(self, lineNr,
rowNr):
self.lineNr = lineNr
self.rowNr = rowNr
def print_it(self):
print("I64", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NI64Type(self.lineNr, self.rowNr)
def visit_children(self, visitor):
return True # concrete element
class NISizeType(NType):
# long lineNr;
# long rowNr;
def __init__(self, lineNr,
rowNr):
self.lineNr = lineNr
self.rowNr = rowNr
def print_it(self):
print("Int", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NISizeType(self.lineNr, self.rowNr)
def visit_children(self, visitor):
return True # concrete element
class NU8Type(NType):
# long lineNr;
# long rowNr;
def __init__(self, lineNr,
rowNr):
self.lineNr = lineNr
self.rowNr = rowNr
def print_it(self):
print("U8", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NU8Type(self.lineNr, self.rowNr)
def visit_children(self, visitor):
return True # concrete element
class NU16Type(NType):
# long lineNr;
# long rowNr;
def __init__(self, lineNr,
rowNr):
self.lineNr = lineNr
self.rowNr = rowNr
def print_it(self):
print("U16", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NU16Type(self.lineNr, self.rowNr)
def visit_children(self, visitor):
return True # concrete element
class NU32Type(NType):
# long lineNr;
# long rowNr;
def __init__(self, lineNr,
rowNr):
self.lineNr = lineNr
self.rowNr = rowNr
def print_it(self):
print("U32", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NU32Type(self.lineNr, self.rowNr)
def visit_children(self, visitor):
return True # concrete element
class NU64Type(NType):
# long lineNr;
# long rowNr;
def __init__(self, lineNr,
rowNr):
self.lineNr = lineNr
self.rowNr = rowNr
def print_it(self):
print("U64", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NU64Type(self.lineNr, self.rowNr)
def visit_children(self, visitor):
return True # concrete element
class NUSizeType(NType):
# long lineNr;
# long rowNr;
def __init__(self, lineNr,
rowNr):
self.lineNr = lineNr
self.rowNr = rowNr
def print_it(self):
print("UInt", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NUSizeType(self.lineNr, self.rowNr)
def visit_children(self, visitor):
return True # concrete element
class NF32Type(NType):
# long lineNr;
# long rowNr;
def __init__(self, lineNr,
rowNr):
self.lineNr = lineNr
self.rowNr = rowNr
def print_it(self):
print("F32", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NF32Type(self.lineNr, self.rowNr)
def visit_children(self, visitor):
return True # concrete element
class NF64Type(NType):
# long lineNr;
# long rowNr;
def __init__(self, lineNr,
rowNr):
self.lineNr = lineNr
self.rowNr = rowNr
def print_it(self):
print("F64", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NF64Type(self.lineNr, self.rowNr)
def visit_children(self, visitor):
return True # concrete element
class NDynamicArrayType(NType):
# long lineNr;
# long rowNr;
# NType valueType;
def __init__ (self, lineNr,
rowNr,
valueType):
self.lineNr = lineNr
self.rowNr = rowNr
self.valueType = valueType
def print_it(self):
print("([]", end='')
self.valueType.print_it()
print(")", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NDynamicArrayType(self.lineNr, self.rowNr, self.valueType.create_copy())
def visit_children(self, visitor):
success = visitor.visit(self.valueType)
if success == False:
return False
return True
class NStructTypeMember:
# long lineNr;
# long rowNr;
# NIdentifier name;
# NType theType;
def __init__(self, lineNr,
rowNr,
name,
theType):
self.lineNr = lineNr
self.rowNr = rowNr
self.name = name
self.theType = theType
def print_it(self):
self.name.print_it()
print(" : ", end='')
self.theType.print_it()
print("; ", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NStructTypeMember(self.lineNr, self.rowNr, self.name.create_copy(), self.theType.create_copy())
def visit_children(self, visitor):
success = visitor.visit(self.name)
if success == False:
return False
success = visitor.visit(self.theType)
if success == False:
return False
return True
class NStructType(NType):
# long lineNr;
# long rowNr;
# NIdentifier tag;
# ArrayList<NStructTypeMember> members;
def __init__(self, lineNr,
rowNr,
tag,
members):
self.lineNr = lineNr
self.rowNr = rowNr
self.tag = tag
self.members = members
def print_it(self):
print("'", end='')
self.tag.print_it()
print("'{", end='')
for m in self.members:
m.print_it()
print("}", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
membersCopy = []
for m in self.members: # I guess there is a prettier way available.....
membersCopy.append(m.create_copy())
return NStructType(self.lineNr, self.rowNr, self.tag.create_copy(), membersCopy)
def visit_children(self, visitor):
success = visitor.visit(self.tag)
if success == False:
return False
for member in self.members:
success = visitor.visit(member)
if success == False:
return False
return True
class NVariantBoxType(NType):
# long lineNr;
# long rowNr;
# ArrayList<NType> types;
def __init__(self, lineNr,
rowNr,
types):
self.lineNr = lineNr
self.rowNr = rowNr
self.types = types
def print_it(self):
print("(", end='')
if len(self.types) < 2:
print("ERRATIC VARIANT-BOX TYPE!", end='')
else:
self.types[0].print_it()
for i in range(1, len(self.types)):
print(" / ", end='')
self.types[i].print_it()
print(")", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
typesCopy = []
for t in self.types:
typesCopy.append(t.create_copy()) # (Surely cooler ways to do this, but nevermind)
return NVariantBoxType(self.lineNr, self.rowNr, typesCopy)
def visit_children(self, visitor):
for t in self.types:
success = visitor.visit(t)
if success == False:
return False
return True
class NTypeArg:
# abstract void print();
# abstract long getLineNr();
# abstract long getRowNr();
# abstract NTypeArg createCopy();
# abstract Any accept_visitor(AbstractASTVisitor visitor);
pass
class NNormalTypeArg(NTypeArg):
# long lineNr;
# long rowNr;
# boolean isMu;
# boolean isConstruand;
# NType argType;
def __init__(self, lineNr,
rowNr,
isMu,
isConstruand,
argType):
self.lineNr = lineNr
self.rowNr = rowNr
self.isMu = isMu
self.isConstruand = isConstruand
self.argType = argType
def print_it(self):
if self.isMu:
print("mu ", end='')
if self.isConstruand:
print("construand ", end='')
self.argType.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NNormalTypeArg(self.lineNr, self.rowNr, self.isMu, self.isConstruand, self.argType.create_copy())
def visit_children(self, visitor):
success = visitor.visit(self.argType)
if success == False:
return False
return True
class NRefTypeArg(NTypeArg):
# long lineNr;
# long rowNr;
# NType argType;
def __init__(
self,
lineNr,
rowNr,
argType
):
self.lineNr = lineNr
self.rowNr = rowNr
self.argType = argType
def print_it(self):
print("ref ", end='')
self.argType.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NRefTypeArg(self.lineNr, self.rowNr, self.argType.create_copy())
def visit_children(self, visitor):
success = visitor.visit(self.argType)
if success == False:
return False
return True
class NFunctionType(NType):
# long lineNr;
# long rowNr;
# ArrayList<NTypeArg> typeArgs;
# ArrayList<NType> returnTypes;
def __init__(self, lineNr,
rowNr,
typeArgs,
returnTypes):
self.lineNr = lineNr
self.rowNr = rowNr
self.typeArgs = typeArgs
self.returnTypes = returnTypes
def print_it(self):
print("Fn(", end='')
if len(self.typeArgs) > 0:
self.typeArgs[0].print_it()
for i in range(1, len(self.typeArgs)):
print(", ", end='')
self.typeArgs[i].print_it()
if len(self.returnTypes) > 0:
print(" => ", end='')
for i in range(0, len(self.returnTypes) - 1):
self.returnTypes[i].print_it()
print(", ", end='')
self.returnTypes[len(self.returnTypes) - 1].print_it()
print(")", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
typeArgsCopy = []
for ta in self.typeArgs:
typeArgsCopy.append(ta.create_copy()) # (There are cooler ways to do it definitely)
returnTypesCopy = []
for rt in self.returnTypes:
returnTypesCopy.append(rt.create_copy())
return NFunctionType(self.lineNr, self.rowNr, typeArgsCopy, returnTypesCopy)
def accept_visitor(self, visitor):
for typeArg in self.typeArgs:
success = visitor.visit(typeArg)
if success == False:
return False
for returnType in self.returnTypes:
success = visitor.visit(returnType)
if success == False:
return False
return True
class NParametrizedIdentifierType(NType):
# long lineNr;
# long rowNr;
# NIdentifier moduleNameOrNull;
# NIdentifier name;
# ArrayList<NType> params;
def __init__(self, lineNr,
rowNr,
moduleNameOrNull,
name,
params):
self.lineNr = lineNr
self.rowNr = rowNr
self.moduleNameOrNull = moduleNameOrNull
self.name = name
self.params = params
def print_it(self):
if not self.moduleNameOrNull is None:
self.moduleNameOrNull.print_it()
print("::", end='')
self.name.print_it()
print("(", end='')
if len(self.params) < 1:
print("ERRATIC PARAMETRIZED TYPE!", end='')
else:
for i in range(0, len(self.params) - 1):
self.params[i].print_it()
print(", ", end='')
self.params[len(self.params) - 1].print_it()
print(")", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
paramsCopy = []
for p in self.params:
paramsCopy.append(p.create_copy())
if self.moduleNameOrNull is None:
return NParametrizedIdentifierType(self.lineNr, self.rowNr, None, self.name.create_copy(), paramsCopy)
else:
return NParametrizedIdentifierType(self.lineNr, self.rowNr, self.moduleNameOrNull.create_copy(), self.name.create_copy(), paramsCopy)
def visit_children(self, visitor):
if not self.moduleNameOrNull is None:
success = visitor.visit(self.moduleNameOrNull)
if success == False:
return False
success = visitor.visit(self.name)
if success == False:
return False
for param in self.params:
success = visitor.visit(param)
if success == False:
return False
return True
#################### EXPRESSIONS ##############################
class NExpression:
pass
# NType inferredType = null;
# boolean isValidLValue;
# abstract void print();
# abstract long getLineNr();
# abstract long getRowNr();
# abstract NExpression create_copy();
# abstract boolean visit_children(AbstractASTVisitor visitor);
class NIdentifierExpression(NExpression):
# public long lineNr;
# public long rowNr;
# public NIdentifier moduleNameOrNull;
# public NIdentifier name;
# public ArrayList<NIndexingIndex> indexings;
# TODO: Find out and document how the indexings are supposed to be parsed/used
# String mangledName
def __init__(self, lineNr,
rowNr,
moduleNameOrNull,
name,
indexings
):
self.lineNr = lineNr
self.rowNr = rowNr
self.moduleNameOrNull = moduleNameOrNull
self.name = name
self.indexings = indexings
self.isValidLValue = True # TODO: remove all these, not needed for parsing!
def print_it(self):
if not self.moduleNameOrNull is None:
self.moduleNameOrNull.print_it()
print("::", end='')
self.name.print_it()
for ii in self.indexings:
ii.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
indexingsCopy = []
for indexing in self.indexings:
indexingsCopy.append(indexing.create_copy())
if self.moduleNameOrNull is None:
result = NIdentifierExpression(self.lineNr, self.rowNr, None, self.name.create_copy(), indexingsCopy)
if hasattr(self, "mangledName"):
result.mangledName = self.mangledName
return result
else:
result = NIdentifierExpression(self.lineNr, self.rowNr, self.moduleNameOrNull.create_copy(), self.name.create_copy(), indexingsCopy)
if hasattr(self, "mangledName"):
result.mangledName = self.mangledName
return result
def visit_children(self, visitor):
if not self.moduleNameOrNull is None:
success = visitor.visit(self.moduleNameOrNull)
if success == False:
return False
success = visitor.visit(self.name)
if success == False:
return False
for indexing in self.indexings:
success = visitor.visit(indexing)
if success == False:
return False
return True
class NIndexingIndex:
# abstract void print();
# abstract long getLineNr();
# abstract long getRowNr();
# abstract NIndexingIndex create_copy()
# abstract boolean visit_children(AbstractASTVisitor visitor);
pass
class NNilExpression(NExpression):
# long lineNr;
# long rowNr;
def __init__(self, lineNr,
rowNr):
self.lineNr = lineNr
self.rowNr = rowNr
self.isValidLValue = False
def print_it(self):
print("nil", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NNilExpression(self.lineNr, self.rowNr) # no copying actually needed for literals, but whatever
def visit_children(self, visitor):
return True # concrete node
class NTrueExpression(NExpression):
# long lineNr;
# long rowNr;
def __init__(self, lineNr,
rowNr):
self.lineNr = lineNr
self.rowNr = rowNr
self.isValidLValue = False
def print_it(self):
print("true", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NTrueExpression(self.lineNr, self.rowNr) # no copying actually needed for literals, but whatever
def visit_children(self, visitor):
return True # concrete node
class NFalseExpression(NExpression):
# long lineNr;
# long rowNr;
def __init__(self, lineNr,
rowNr):
self.lineNr = lineNr
self.rowNr = rowNr
self.isValidLValue = False
def print_it(self):
print("false", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NFalseExpression(self.lineNr, self.rowNr) # no copying actually needed for literals, but whatever
def visit_children(self, visitor):
return True # concrete node
class NIntegerExpression(NExpression):
# public long lineNr;
# public long rowNr;
# public String value;
# public boolean isNegative;
def __init__(self, lineNr,
rowNr,
value,
isNegative):
self.lineNr = lineNr
self.rowNr = rowNr
self.value = value
self.isNegative = isNegative
self.isValidLValue = False
def print_it(self):
if self.isNegative:
print("-", end='')
print("#", end='')
print(self.value, end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NIntegerExpression(self.lineNr, self.rowNr, self.value, self.isNegative)
def visit_children(self, visitor):
return True # concrete node
class NFloatingPointNumberExpression(NExpression):
# long lineNr;
# long rowNr;
# String value;
# boolean isNegative;
def __init__(self, lineNr,
rowNr,
value,
isNegative ):
self.lineNr = lineNr
self.rowNr = rowNr
self.value = value
self.isNegative = isNegative
self.isValidLValue = False
def print_it(self):
if self.isNegative:
print("-", end='')
print("##", end='')
print(self.value, end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NFloatingPointNumberExpression(self.lineNr, self.rowNr, self.value, self.isNegative)
def visit_children(self, visitor):
return True # concrete node
class NStringExpression(NExpression):
# long lineNr;
# long rowNr;
# String value;
def __init__(self, lineNr,
rowNr,
value ):
self.lineNr = lineNr
self.rowNr = rowNr
self.value = value
self.isValidLValue = False
def print_it(self):
print("\"", end='')
print(self.value, end='')
print("\"", end='') # funny printing of cr:s and newlines though
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NStringExpression(self.lineNr, self.rowNr, self.value)
def visit_children(self, visitor):
return True # concrete node
class NArrayExpressionIndividualValues(NExpression):
# long lineNr;
# long rowNr;
# ArrayList<NExpression> values;
def __init__(
self,
lineNr,
rowNr,
values
):
self.lineNr = lineNr
self.rowNr = rowNr
self.values = values
self.isValidLValue = False
def print_it(self):
print("#[", end='')
if len(self.values) > 0:
for i in range(0, len(self.values) - 1):
self.values[i].print_it()
print(", ", end='')
self.values[len(self.values) - 1].print_it()
print("]", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
valuesCopy = []
for value in self.values:
valuesCopy.append(value.create_copy())
return NArrayExpressionIndividualValues(self.lineNr, self.rowNr, valuesCopy)
def visit_children(self, visitor):
for value in self.values:
success = visitor.visit(value)
if success == False:
return False
return True
class NArrayExpressionNoInitialization(NExpression):
# long lineNr;
# long rowNr;
# boolean isUninitialized; // if false, it is "trash"
# NExpression length;
def __init__(
self,
lineNr,
rowNr,
isUninitialized,
length
):
self.lineNr = lineNr
self.rowNr = rowNr
self.isUninitialized = isUninitialized
self.length = length
self.isValidLValue = False
def print_it(self):
print("#[", end='')
if self.isUninitialized:
print("uninitialized ", end='')
else:
print("trash ", end='')
self.length.print_it()
print("]", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NArrayExpressionNoInitialization(self.lineNr, self.rowNr, self.inUninitialized, self.length.create_copy())
def visit_children(self, visitor):
success = visitor.visit(self.length)
if success == False:
return False
return True
class NArrayExpressionRepeatedValue(NExpression):
# long lineNr;
# long rowNr;
# NExpression repeatedValue;
# NExpression length;
def __init__(
self,
lineNr,
rowNr,
repeatedValue,
length
):
self.lineNr = lineNr
self.rowNr = rowNr
self.repeatedValue = repeatedValue
self.length = length
self.isValidLValue = False
def print_it(self):
print("#[", end='')
self.repeatedValue.print_it()
print(" repeat ", end='')
self.length.print_it()
print("]", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NArrayExpressionRepeatedValue(self.lineNr, self.rowNr, self.repeatedValue.createCopy(), self.length.createCopy())
def visit_children(self, visitor):
success = visitor.visit(self.repeatedValue)
if success == False:
return False
success = visitor.visit(self.length)
if success == False:
return False
return True
class NStructExpressionPost:
# long lineNr;
# long rowNr;
# NIdentifier name;
# NExpression value;
def __init__(
self,
lineNr,
rowNr,
name,
value
):
self.lineNr = lineNr
self.rowNr = rowNr
self.name = name
self.value = value
def print_it(self):
self.name.print_it()
print(" = ", end='')
self.value.print_it()
print("; ", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NStructExpressionPost(self.lineNr, self.rowNr, self.name.create_copy(), self.value.create_copy())
def visit_children(self, visitor):
success = visitor.visit(self.name)
if success == False:
return False
success = visitor.visit(self.value)
if success == False:
return False
return True
class NStructExpression(NExpression):
# long lineNr;
# long rowNr;
# NIdentifier tag;
# ArrayList<NStructExpressionPost> posts;
def __init__(
self,
lineNr,
rowNr,
tag,
posts
):
self.lineNr = lineNr
self.rowNr = rowNr
self.tag = tag
self.posts = posts
self.isValidLValue = False
def print_it(self):
print("#'", end='')
self.tag.print_it()
print("'{", end='')
for post in self.posts:
post.print_it()
print("}", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
postsCopy = []
for post in self.posts:
postsCopy.append(post.create_copy())
return NStructExpression(self.lineNr, self.rowNr, self.tag.create_copy(), postsCopy)
def visit_children(self, visitor):
success = visitor.visit(self.tag) # probably too deep but whatever
if success == False:
return False
for post in self.posts:
success = visitor.visit(post)
if success == False:
return False
return True
class NVariantBoxExpression(NExpression):
# long lineNr;
# long rowNr;
# NExpression expression;
def __init__(self, lineNr, rowNr, expression):
self.lineNr = lineNr
self.rowNr = rowNr
self.expression = expression
def print_it(self):
print("#/", end='')
self.expression.print_it()
print("\\", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NVariantBoxExpression(self.lineNr, self.rowNr, self.expression.create_copy())
def visit_children(self, visitor):
success = visitor.visit(self.expression)
if success == False:
return False
return True
class NTypeClarifiedExpression(NExpression):
# long lineNr;
# long rowNr;
# NExpression expression;
# NType theType;
def __init__(self, lineNr,
rowNr,
expression,
theType):
self.lineNr = lineNr
self.rowNr = rowNr
self.expression = expression
self.theType = theType
self.isValidLValue = False # because it is only used when "expression" isn't a simple identifier expression
def print_it(self):
print("(", end='')
self.expression.print_it()
print(" ::: ", end='')
self.theType.print_it()
print(")", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NTypeClarifiedExpression(self.lineNr, self.rowNr, self.expression.create_copy(), self.theType.create_copy())
def visit_children(self, visitor):
success = visitor.visit(self.expression)
if success == False:
return False
success = visitor.visit(self.theType)
if success == False:
return False
return True
# These classes have an NIndexingIndex:
#
# - NIdentifierExpression: type clarification indexings, array indexings, struct indexings, ortype cast indexings
# - NIndexing : same kinds of indexings -- used for general indexing on expressions
#
# Subclasses:
#
# -NArrayIndexingIndex
# -NStructIndexingIndex
# -NTypeClarificationIndex // this may should be allowed in lvalues too, for interesting reasons
# -NOrtypeCastIndex
class NArrayIndexing(NExpression):
# -- this class is to be used in the "general case"
# long lineNr;
# long rowNr;
# NExpression arrayExpression;
# NExpression indexExpression;
def __init__(
self,
lineNr,
rowNr,
arrayExpression,
indexExpression
):
self.lineNr = lineNr
self.rowNr = rowNr
self.arrayExpression = arrayExpression
self.indexExpression = indexExpression
self.isValidLValue = False # because it is only used when "arrayExpression" isn't a valid lvalue
def print_it(self):
print("(", end='')
self.arrayExpression.print_it()
print("[", end='')
self.indexExpression.print_it()
print("])", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NArrayIndexing(self.lineNr, self.rowNr, self.arrayExpression.create_copy(), self.indexExpression.create_copy())
def visit_children(self, visitor):
success = visitor.visit(self.arrayExpression)
if success == False:
return False
success = visitor.visit(self.indexExpression)
if success == False:
return False
return True
class NArrayIndexingIndex(NIndexingIndex):
# long lineNr;
# long rowNr;
# NExpression indexExpression;
def __init__ (self, lineNr,
rowNr,
indexExpression):
self.lineNr = lineNr
self.rowNr = rowNr
self.indexExpression = indexExpression
def print_it(self):
print("[", end='')
self.indexExpression.print_it()
print("]", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NArrayIndexingIndex(self.lineNr, self.rowNr, self.indexExpression.create_copy())
def visit_children(self, visitor):
success = visitor.visit(self.indexExpression)
if success == False:
return False
return True
class NStructIndexing(NExpression):
# long lineNr;
# long rowNr;
# NExpression structExpression;
# NIdentifier indexName;
def __init__(self, lineNr,
rowNr,
structExpression,
indexName):
self.lineNr = lineNr
self.rowNr = rowNr
self.structExpression = structExpression
self.indexName = indexName
self.isValidLValue = False # because it is only used when "structExpression" isn't a valid lvalue
def print_it(self):
print("(", end='')
self.structExpression.print_it()
print(".", end='')
self.indexName.print_it()
print(")", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NStructIndexing(self.lineNr, self.rowNr, self.structExpression.create_copy(), self.indexName.create_copy())
def visit_children(self, visitor):
success = visitor.visit(self.structExpression)
if success == False:
return False
success = visitor.visit(self.indexName)
if success == False:
return False
return True
class NStructIndexingIndex(NIndexingIndex):
# long lineNr;
# long rowNr;
# NIdentifier indexName;
def __init__(self, lineNr,
rowNr,
indexName):
self.lineNr = lineNr
self.rowNr = rowNr
self.indexName = indexName
def print_it(self):
print(".", end='')
self.indexName.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NStructIndexingIndex(self.lineNr, self.rowNr, self.indexName.create_copy())
def visit_children(self, visitor):
success = visitor.visit(self.indexName)
if success == False:
return False
return True
class NVariantBoxCastIndex(NIndexingIndex):
# long lineNr;
# long rowNr;
# NType theType;
def __init__(self, lineNr,
rowNr,
theType):
self.lineNr = lineNr
self.rowNr = rowNr
self.theType = theType
def print_it(self):
print(" ==> ", end='')
self.theType.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NVariantBoxCastIndex(self.lineNr, self.rowNr, self.theType.create_copy())
def visit_children(self, visitor):
success = visitor.visit(self.theType)
if success == False:
return False
return True
class NTypeClarificationIndex(NIndexingIndex):
# long lineNr;
# long rowNr;
# NType theType;
def __init__(self, lineNr,
rowNr,
theType):
self.lineNr = lineNr
self.rowNr = rowNr
self.theType = theType
def print_it(self):
print(" ::: ", end='')
self.theType.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NTypeClarificationIndex(self.lineNr, self.rowNr, self.theType.create_copy())
def visit_children(self, visitor):
success = visitor.visit(self.theType)
if success == False:
return False
return True
# NOT USED CURRENTLY:
# class NIndexing(NExpression):
#
# long lineNr;
# long rowNr;
# NExpression expression;
# ArrayList<NIndexingIndex> indexingIndices;
# def __init__(self, lineNr, rowNr, expression, indexingIndices):
#
# self.lineNr = lineNr
# self.rowNr = rowNr
# self.expression = expression
# self.indexingIndices = indexingIndices
# self.isValidLValue = self.expression.isValidLValue
#
#
#
# def print_it(self):
# self.expression.print_it()
# for ii in indexingIndices:
# ii.print_it()
#
#
#
# def get_line_nr(self):
# return self.lineNr
#
# def get_row_nr(self):
# return self.rowNr
#
# # createCopy() here????
class NVariantBoxCastExpression(NExpression):
# long lineNr;
# long rowNr;
# NExpression expression;
# NType theType;
def __init__(self, lineNr,
rowNr,
expression,
theType):
self.lineNr = lineNr
self.rowNr = rowNr
self.expression = expression
self.theType = theType
self.isValidLValue = False # because it is only used when "expression" isn't a valid lvalue
def print_it(self):
print("(", end='')
self.expression.print_it()
print(" ==> ", end='')
self.theType.print_it()
print(")", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NVariantBoxCastExpression(self.lineNr, self.rowNr, self.expression.create_copy(), self.theType.create_copy())
def visit_children(self, visitor):
success = visitor.visit(self.expression)
if success == False:
return False
success = visitor.visit(self.theType)
if successs == False:
return False
return True
class NArg:
pass
class NNormalArg(NArg):
# long lineNr;
# long rowNr;
# NIdentifier argNameOrNull;
# NExpression argExpression;
def __init__(self, lineNr,
rowNr,
argNameOrNull,
argExpression):
self.lineNr = lineNr
self.rowNr = rowNr
self.argNameOrNull = argNameOrNull
self.argExpression = argExpression
def print_it(self):
if not self.argNameOrNull is None:
self.argNameOrNull.print_it()
print(" = ", end='')
self.argExpression.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
if self.argNameOrNull is None:
return NNormalArg(self.lineNr, self.rowNr, None, self.argExpression.create_copy())
else:
return NNormalArg(self.lineNr, self.rowNr, self.argNameOrNull.create_copy(), self.argExpression.create_copy())
def visit_children(self, visitor):
if not self.argNameOrNull is None:
success = visitor.visit(self.argNameOrNull)
if success == False:
return False
success = visitor.visit(self.argExpression)
if success == False:
return False
return True
class NRefArg(NArg):
# long lineNr;
# long rowNr;
# NIdentifier argNameOrNull;
# NLValueContainer lValueContainer;
def __init__(self, lineNr,
rowNr,
argNameOrNull,
lValueContainer):
self.lineNr = lineNr
self.rowNr = rowNr
self.argNameOrNull = argNameOrNull
self.lValueContainer = lValueContainer
def print_it(self):
print("ref ", end='')
if not self.argNameOrNull is None:
self.argNameOrNull.print_it()
print(" = ", end='')
self.lValueContainer.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
if self.argNameOrNull is None:
return NRefArg(self.lineNr, self.rowNr, None, self.lValueContainer.create_copy())
else:
return NRefArg(self.lineNr, self.rowNr, self.argNameOrNull.create_copy(), self.lValueContainer.create_copy())
def visit_children(self, visitor):
if not self.argNameOrNull is None:
success = visitor.visit(self.argNameOrNull)
if success == False:
return False
success = visitor.visit(self.lValueContainer)
if success == False:
return False
return True
class NAndSymbolExpression(NExpression):
# long lineNr;
# long rowNr;
# NExpression leftExpression;
# NExpression rightExpression;
def __init__(
self,
lineNr,
rowNr,
leftExpression,
rightExpression
):
self.lineNr = lineNr
self.rowNr = rowNr
self.leftExpression = leftExpression
self.rightExpression = rightExpression
self.isValidLValue = False
def print_it(self):
print("(", end='')
self.leftExpression.print_it()
print(" && ", end='')
self.rightExpression.print_it()
print(")", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NAndSymbolExpression(self.lineNr, self.rowNr, self.leftExpression.create_copy(), self.rightExpression.create_copy())
def visit_children(self, visitor):
success = visitor.visit(self.leftExpression)
if success == False:
return False
success = visitor.visit(self.rightExpression)
if success == False:
return False
return True
class NOrSymbolExpression(NExpression):
# long lineNr;
# long rowNr;
# NExpression leftExpression;
# NExpression rightExpression;
def __init__(
self,
lineNr,
rowNr,
leftExpression,
rightExpression
):
self.lineNr = lineNr
self.rowNr = rowNr
self.leftExpression = leftExpression
self.rightExpression = rightExpression
self.isValidLValue = False
def print_it(self):
print("(", end='')
self.leftExpression.print_it()
print(" || ", end='')
self.rightExpression.print_it()
print(")", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NOrSymbolExpression(self.lineNr, self.rowNr, self.leftExpression.create_copy(), self.rightExpression.create_copy())
def visit_children(self, visitor):
success = visitor.visit(self.leftExpression)
if success == False:
return False
success = visitor.visit(self.rightExpression)
if success == False:
return False
return True
class NEndExpression(NExpression):
# long lineNr;
# long rowNr;
# NExpression expansion;
def __init__(
self,
lineNr,
rowNr
):
self.lineNr = lineNr
self.rowNr = rowNr
self.isValidLValue = False
def print_it(self):
if hasattr(self, 'expansion'):
self.expansion.print_it()
else:
print("end", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
result = NEndExpression(self.lineNr, self.rowNr)
if hasattr(self, 'expansion'):
result.expansion = self.expansion.create_copy()
return result # TODO: We have to do like this later with create_copy on all things that have annotations!!!!!!!!!!!
def visit_children(self, visitor):
# this seems reasonable to do...
if hasattr(self, "expansion"):
success = visitor.visit(self.expansion)
if success == False:
return False
return True
else:
return True
class NFunctionCall(NExpression):
# long lineNr;
# long rowNr;
# NExpression functionExpression;
# ArrayList<NArg> args;
def __init__(self, lineNr,
rowNr,
functionExpression,
args):
self.lineNr = lineNr
self.rowNr = rowNr
self.functionExpression = functionExpression
self.args = args
self.isValidLValue = False
def print_it(self):
self.functionExpression.print_it()
print("(", end='')
if len(self.args) > 0:
self.args[0].print_it()
for i in range(1, len(self.args)):
print(", ", end='')
self.args[i].print_it()
print(")", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
argsCopy = []
for arg in self.args:
argsCopy.append(arg.create_copy())
result = NFunctionCall(self.lineNr, self.rowNr, self.functionExpression.create_copy(), argsCopy)
return result
def visit_children(self, visitor):
success = visitor.visit(self.functionExpression)
if success == False:
return False
for arg in self.args:
success = visitor.visit(arg)
if success == False:
return False
return True
class NIFExpression(NExpression):
# long lineNr;
# long rowNr;
# NExpression condition;
# NExpression thenExpression;
# NExpression elseExpression;
def __init__(self, lineNr, rowNr, condition, thenExpression, elseExpression):
self.lineNr = lineNr
self.rowNr = rowNr
self.condition = condition
self.thenExpression = thenExpression
self.elseExpression = elseExpression
self.isValidLValue = False # We better not experiment with changing this!!!!!!
def print_it(self):
print("IF ", end='')
self.condition.print_it()
print(" ", end='')
self.thenExpression.print_it()
print(" ELSE ", end='')
self.elseExpression.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
result = NIFExpression(self.lineNr, self.rowNr, self.condition.create_copy(), self.thenExpression.create_copy(), self.elseExpression.create_copy())
return result
def visit_children(self, visitor):
success = visitor.visit(self.condition)
if success == False:
return False
success = visitor.visit(self.thenExpression)
if success == False:
return False
success = visitor.visit(self.elseExpression)
if success == False:
return False
return True
class NSWITCHNormalCase:
# long lineNr;
# long rowNr;
# ArrayList<NExpression> caseValues; // at least one of these!
# NExpression value;
def __init__(self, lineNr, rowNr, caseValues, value):
self.lineNr = lineNr
self.rowNr = rowNr
self.caseValues = caseValues
self.value = value
def print_it(self):
print(" CASE ", end='')
if len(self.caseValues) == 0:
print("MISSING CASE VALUE!!!", end='')
else:
self.caseValues[0].print_it()
for i in range(1, len(self.caseValues)):
print(", ", end='')
self.caseValues[i].print_it()
print(" ", end='')
self.value.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
caseValuesCopy = []
for caseValue in self.caseValues:
caseValuesCopy.append(caseValue.create_copy())
result = NSWITCHNormalCase(self.lineNr, self.rowNr, caseValuesCopy, self.value.create_copy())
return result
def visit_children(self, visitor):
for caseValue in self.caseValues:
success = visitor.visit(caseValue)
if success == False:
return False
success = visitor.visit(self.value)
if success == False:
return False
return True
class NCONTENTTYPENormalCase:
# long lineNr;
# long rowNr;
# ArrayList<NType> typeCases;
# NExpression value;
def __init__(self, lineNr, rowNr, typeCases, value):
self.lineNr = lineNr
self.rowNr = rowNr
self.typeCases = typeCases
self.value = value
def print_it(self):
print(" CASE ", end='')
if len(self.typeCases) == 0:
print("MISSING TYPE CASE!!!", end='')
else:
self.typeCases[0].print_it()
for i in range(1, len(self.typeCases)):
print(",", end='')
self.typeCases[i].print_it()
print(" ", end='')
self.value.print_it()
def create_copy(self):
typeCasesCopy = []
for typeCase in self.typeCases:
typeCasesCopy.append(typeCase.create_copy())
result = NCONTENTTYPENormalCase(self.lineNr, self.rowNr, typeCasesCopy, self.value.create_copy())
return result
def visit_children(self, visitor):
for typeCase in self.typeCases:
success = visitor.visit(typeCase)
if success == False:
return False
success = visitor.visit(self.value)
if success == False:
return False
return True
class NSWITCHExpression(NExpression):
# long lineNr;
# long rowNr;
# NExpression switchValue;
# ArrayList<NSWITCHNormalCase> cases;
# NExpression defaultCase;
def __init__(self, lineNr, rowNr, switchValue, cases, defaultCase):
self.lineNr = lineNr
self.rowNr = rowNr
self.switchValue = switchValue
self.cases = cases
self.defaultCase = defaultCase
self.isValidLValue = False
def print_it(self):
print("SWITCH ", end='')
self.switchValue.print_it()
for c in self.cases:
c.print_it()
print(" DEFAULT ", end='')
self.defaultCase.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
casesCopy = []
for case in self.cases:
casesCopy.append(case.create_copy())
result = NSWITCHNormalCase(self.lineNr, self.rowNr, self.switchValue.create_copy(), casesCopy, self.defaultCase.create_copy())
return result
def visit_children(self, visitor):
success = visitor.visit(self.switchValue)
if success == False:
return False
for case in self.cases:
success = visitor.visit(case)
if success == False:
return False
success = visitor.visit(self.defaultCase)
if success == False:
return False
return True
class NCONTENTTYPEExpression(NExpression):
# long lineNr;
# long rowNr;
# NExpression switchValue;
# ArrayList<NCONTENTTYPENormalCase> cases;
# NExpression defaultCaseOrNull;
def __init__(self, lineNr, rowNr, switchValue, cases, defaultCaseOrNull):
self.lineNr = lineNr
self.rowNr = rowNr
self.switchValue = switchValue
self.cases = cases
self.defaultCaseOrNull = defaultCaseOrNull
self.isValidLValue = False
def print_it(self):
print("CONTENTTYPE ", end='')
self.switchValue.print_it()
for c in self.cases:
c.print_it()
if not self.defaultCaseOrNull is None:
print(" DEFAULT ", end='')
self.defaultCaseOrNull.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
casesCopy = []
for case in cases:
casesCopy.append(case.create_copy())
if self.defaultCaseOrNull is None:
result = NCONTENTTYPEExpression(self.lineNr, self.rowNr, self.switchValue.create_copy(), casesCopy, None)
else:
result = NCONTENTTYPEExpression(self.lineNr, self.rowNr, self.switchValue.create_copy(), casesCopy, self.defaultCaseOrNull.create_copy())
return result
def visit_children(self, visitor):
success = visitor.visit(self.switchValue)
if success == False:
return False
for case in self.cases:
success = visitor.visit(case)
if success == False:
return False
if not self.defaultCaseOrNull is None:
success = visitor.visit(self.defaultCaseOrNull)
if success == False:
return False
return True
##################### LVALUES ETC. #################################
class NLValueOrVariableDeclaration:
# abstract void print();
# abstract long getLineNr();
# abstract long getRowNr();
# abstract boolean visit_children(AbstractASTVisitor visitor);
pass
class NLValueContainer(NLValueOrVariableDeclaration):
# long lineNr;
# long rowNr;
# NExpression lValueExpression;
def __init__(self, lineNr,
rowNr,
lValueExpression):
self.lineNr = lineNr
self.rowNr = rowNr
self.lValueExpression = lValueExpression
def print_it(self):
print("(LVALUE ", end='')
self.lValueExpression.print_it()
print(")", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NLValueContainer(self.lineNr, self.rowNr, self.lValueExpression.create_copy())
def visit_children(self, visitor):
success = visitor.visit(self.lValueExpression)
if success == False:
return False
return True
##################### STATEMENTS ##############################
class NStatement:
# abstract void print();
# abstract long getLineNr();
# abstract long getRowNr();
# abstract boolean visit_children(AbstractASTVisitor visitor);
pass
class NTypeDeclarationWithDefinition(NStatement):
# long lineNr;
# long rowNr;
# NIdentifier name;
# ArrayList<NIdentifier> paramsOrNull;
# NType theType;
# str mangledName;
def __init__(
self,
lineNr,
rowNr,
name,
paramsOrNull, # this one should have at least 1 element if not None
theType
):
self.lineNr = lineNr
self.rowNr = rowNr
self.name = name
self.paramsOrNull = paramsOrNull
self.theType = theType
def print_it(self):
print("type ", end='')
self.name.print_it()
if not self.paramsOrNull is None:
if len(self.paramsOrNull) > 0:
print("(", end='')
self.paramsOrNull[0].print_it()
for i in range(1, len(self.paramsOrNull)):
print(", ", end='')
self.paramsOrNull[i].print_it()
print(")", end='')
print(" = ", end='')
self.theType.print_it()
print("; ", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self): # this method needs not exist for all kinds of statements, but on this one it is needed
paramsOrNullCopy = None
if not self.paramsOrNull is None:
paramsOrNullCopy = []
for param in self.paramsOrNull:
paramsOrNullCopy.append(param.create_copy())
if paramsOrNullCopy is None:
result = NTypeDeclarationWithDefinition(self.lineNr, self.rowNr, self.name.create_copy(), None, self.theType.create_copy())
if hasattr(self, "mangledName"):
result.mangledName = self.mangledName
return result
else:
result = NTypeDeclarationWithDefinition(self.lineNr, self.rowNr, self.name.create_copy(), paramsOrNullCopy, self.theType.create_copy())
if hasattr(self, "mangledName"):
result.mangledName = self.mangledName
return result
def visit_children(self, visitor):
success = visitor.visit(self.name)
if success == False:
return False
if not self.paramsOrNull is None:
for param in self.paramsOrNull:
success = visitor.visit(param)
if success == False:
return False
success = visitor.visit(self.theType)
if success == False:
return False
return True
class NBlock(NStatement):
# long lineNr;
# long rowNr;
# ArrayList<NStatement> statements;
# String blockEntryNumStr; // added by slot collection pass
def __init__(
self,
lineNr,
rowNr,
statements
):
self.lineNr = lineNr
self.rowNr = rowNr
self.statements = statements
def print_it(self):
print("{", end='')
for statement in self.statements:
statement.print_it()
print("} ", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
for statement in self.statements:
success = visitor.visit(statement)
if success == False:
return False
return True
class NElseIfClause:
# long lineNr;
# long rowNr;
# NExpression condition;
# NBlock block;
def __init__(self, lineNr, rowNr, condition, block):
self.lineNr = lineNr
self.rowNr = rowNr
self.condition = condition
self.block = block
def print_it(self):
print("else if ", end='')
self.condition.print_it()
print(" ", end='')
self.block.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
success = visitor.visit(self.condition)
if success == False:
return False
success = visitor.visit(self.block)
if success == False:
return False
return True
class NIfStatement(NStatement):
# long lineNr;
# long rowNr;
# NExpression condition;
# NBlock ifBlock;
# ArrayList<NElseIfClause> elseIfClauses;
# NBlock elseBlockOrNull;
def __init__(self, lineNr, rowNr, condition, ifBlock, elseIfClauses, elseBlockOrNull):
self.lineNr = lineNr
self.rowNr = rowNr
self.condition = condition
self.ifBlock = ifBlock
self.elseIfClauses = elseIfClauses
self.elseBlockOrNull = elseBlockOrNull
def print_it(self):
print("if ", end='')
self.condition.print_it()
print(" ", end='')
self.ifBlock.print_it()
for clause in self.elseIfClauses:
clause.print_it()
if not self.elseBlockOrNull is None:
print("else ", end='')
self.elseBlockOrNull.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
success = visitor.visit(self.condition)
if success == False:
return False
success = visitor.visit(self.ifBlock)
if success == False:
return False
for elseIfClause in self.elseIfClauses:
success = visitor.visit(elseIfClause)
if success == False:
return False
if not self.elseBlockOrNull is None:
success = visitor.visit(self.elseBlockOrNull)
if success == False:
return False
return True
class NParam:
# abstract void print();
# abstract long getLineNr();
# abstract long getRowNr();
# abstract Param create_copy();
# abstract boolean visit_children(AbstractASTVisitor visitor);
pass
class NNormalParam(NParam):
# long lineNr;
# long rowNr;
# boolean isMut;
# boolean isConstruand;
# NIdentifier name;
# NType theType;
def __init__(
self,
lineNr,
rowNr,
isMut,
isConstruand, # TODO: can they be both?????? Yes, but then mu wins for impl. purposes I guess
name,
theType
):
self.lineNr = lineNr
self.rowNr = rowNr
self.isMut = isMut
self.isConstruand = isConstruand
self.name = name
self.theType = theType
def print_it(self):
if self.isMut:
print("mu ", end='')
if self.isConstruand:
print("construand ", end='')
self.name.print_it()
print(" : ", end='')
self.theType.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NNormalParam(self.lineNr, self.rowNr, self.isMut, self.isConstruand, self.name.create_copy(), self.theType.create_copy())
def visit_children(self, visitor):
success = visitor.visit(self.name)
if success == False:
return False
success = visitor.visit(self.theType)
if success == False:
return False
return True
class NRefParam(NParam):
# long lineNr;
# long rowNr;
# NIdentifier name;
# NType theType;
def __init__(
self,
lineNr,
rowNr,
name,
theType
):
self.lineNr = lineNr
self.rowNr = rowNr
self.name = name
self.theType = theType
def print_it(self):
print("ref ", end='')
self.name.print_it()
print(" : ", end='')
self.theType.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def create_copy(self):
return NRefParam(self.lineNr, self.rowNr, self.name.create_copy(), self.theType.create_copy())
def visit_children(self, visitor):
success = visitor.visit(self.name)
if success == False:
return False
success = visitor.visit(self.theType)
if success == False:
return False
return True
class NActualFunctionDeclarationWithDefinition(NStatement):
# long lineNr;
# long rowNr;
# boolean isInternal;
# boolean isInline;
# NIdentifier name;
# ArrayList<NParam> params;
# ArrayList<NType> returnTypes;
# NBlock body;
# String mangledName;
def __init__(
self,
lineNr,
rowNr,
isInternal,
isInline,
name,
params,
returnTypes,
body
):
self.lineNr = lineNr
self.rowNr = rowNr
self.isInternal = isInternal
self.isInline = isInline
self.name = name
self.params = params
self.returnTypes = returnTypes
self.body = body
def print_it(self):
if self.isInternal:
print("internal ", end='')
if self.isInline:
print("inline ", end='')
print("fn ", end='')
self.name.print_it()
print("(", end='')
if len(self.params) > 0:
self.params[0].print_it()
for i in range(1, len(self.params)):
print(", ", end='')
self.params[i].print_it()
if len(self.returnTypes) > 0:
print(" => ", end='')
self.returnTypes[0].print_it()
for i in range(1, len(self.returnTypes)):
print(", ", end='')
self.returnTypes[i].print_it()
print(") ", end='')
self.body.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
success = visitor.visit(self.name)
if success == False:
return False
for param in self.params:
success = visitor.visit(param)
if success == False:
return False
for returnType in self.returnTypes:
success = visitor.visit(returnType)
if success == False:
return False
success = visitor.visit(self.body)
if success == False:
return False
return True
class NRefToFunctionDeclarationWithDefinition(NStatement):
# int funsIndex;
# ArrayList<NActualFunctionDeclarationWithDefinition> funs; // need this ref for printing, sadly, in order to conform to the NStatement abstract
# int blockNumberStr
def __init__ (
self,
funsIndex,
funs
):
self.funsIndex = funsIndex
self.funs = funs
def print_it(self):
self.funs[self.funsIndex].print_it()
def get_line_nr(self):
return self.funs[self.funsIndex].get_line_nr()
def get_row_nr(self):
return self.funs[self.funsIndex].get_row_nr()
def visit_children(self, visitor):
# this seems the reasonable thing to do for our purposes:
success = visitor.visit(self.funs[self.funsIndex])
if success == False:
return False
return True
class NActualTemplateDeclarationWithDefinition(NStatement):
# long lineNr;
# long rowNr;
# boolean isInternal;
# boolean isInline;
# ArrayList<NIdentifier> templateParams; // must have at least length 1
# NIdentifier name;
# ArrayList<NParam> params;
# ArrayList<NType> returnTypes;
# NBlock body;
def __init__(
self,
lineNr,
rowNr,
isInternal,
isInline,
templateParams,
name,
params,
returnTypes,
body
):
self.lineNr = lineNr
self.rowNr = rowNr
self.isInternal = isInternal
self.isInline = isInline
self.templateParams = templateParams
self.name = name
self.params = params
self.returnTypes = returnTypes
self.body = body
def print_it(self):
if self.isInternal:
print("private ", end='')
if self.isInline:
print("inline ", end='')
print("fn(", end='')
if len(self.templateParams) > 0: # should be
self.templateParams[0].print_it()
for i in range(1, len(self.templateParams)):
print(", ", end='')
self.templateParams[i].print_it()
print(") ", end='')
self.name.print_it()
print("(", end='')
if len(self.params) > 0:
self.params[0].print_it()
for i in range(1, len(self.params)):
print(", ", end='')
self.params[i].print_it()
if len(self.returnTypes) > 0:
print(" => ", end='')
self.returnTypes[0].print_it()
for i in range(1, len(self.returnTypes)):
print(", ", end='')
self.returnTypes[i].print_it()
print(") ", end='')
self.body.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
for templateParam in self.templateParams:
success = visitor.visit(templateParam)
if success == False:
return False
success = visitor.visit(self.name)
if success == False:
return False
for param in self.params:
success = visitor.visit(param)
if success == False:
return False
for returnType in self.returnTypes:
success = visitor.visit(returnType)
if success == False:
return False
success = visitor.visit(self.body)
if success == False:
return False
return True
class NRefToTemplateDeclarationWithDefinition(NStatement):
# int templatesIndex;
# ArrayList<NActualTemplateDeclarationWithDefinition> templates; // need this ref for printing, sadly, in order to conform to the NStatement abstract
def __init__(
self,
templatesIndex,
templates
):
self.templatesIndex = templatesIndex
self.templates = templates
def print_it(self):
self.templates[self.templatesIndex].print_it()
def get_line_nr(self):
return self.templates[self.templatesIndex].get_line_nr()
def get_row_nr(self):
return self.templates[self.templatesIndex].get_row_nr()
def visit_children(self, visitor):
success = visitor.visit(self.templates[self.templatesIndex])
if success == False:
return False
return True
class NVariableDeclaration(NLValueOrVariableDeclaration):
# long lineNr;
# long rowNr;
# boolean isMut;
# boolean isInternal;
# NIdentifier name;
# NType theType;
def __init__(
self,
lineNr,
rowNr,
isMut,
isInternal,
name,
theType
):
self.lineNr = lineNr
self.rowNr = rowNr
self.isMut = isMut
self.isInternal = isInternal
self.name = name
self.theType = theType
def print_it(self):
if self.isInternal:
print("internal ", end='')
if self.isMut:
print("mu ", end='')
self.name.print_it()
print(" : ", end='')
self.theType.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
success = visitor.visit(self.name)
if success == False:
return False
success = visitor.visit(self.theType)
if success == False:
return False
return True
class NAssignment(NStatement):
pass
class NNormalAssignment(NAssignment):
# long lineNr;
# long rowNr;
# ArrayList<NLValueOrVariableDeclaration> leftHandSide;
# NExpression value;
def __init__(
self,
lineNr,
rowNr,
leftHandSide, # should have at least 1 element
value
):
self.lineNr = lineNr
self.rowNr = rowNr
self.leftHandSide = leftHandSide
self.value = value
def print_it(self):
self.leftHandSide[0].print_it()
for i in range(1, len(self.leftHandSide)):
print(", ", end='')
self.leftHandSide[i].print_it()
print(" = ", end='')
self.value.print_it()
print("; ", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
for lhsEntry in self.leftHandSide:
success = visitor.visit(lhsEntry)
if success == False:
return False
success = visitor.visit(self.value)
if success == False:
return False
return True
class NModuloAssignment(NAssignment):
# long lineNr;
# long rowNr;
# ArrayList<NLValueContainer> leftHandSide;
# NExpression value;
def __init__(
self,
lineNr,
rowNr,
leftHandSide, # should have at least 1 element
value
):
self.lineNr = lineNr
self.rowNr = rowNr
self.leftHandSide = leftHandSide
self.value = value
def print_it(self):
self.leftHandSide[0].print_it()
for i in range(1, len(self.leftHandSide)):
print(", ", end='')
self.leftHandSide[i].print_it()
print(" %= ", end='')
self.value.print_it()
print("; ", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
for lhsEntry in self.leftHandSide:
success = visitor.visit(lhsEntry)
if success == False:
return False
success = visitor.visit(self.value)
if success == False:
return False
return True
class NAdditionAssignment(NAssignment):
# long lineNr;
# long rowNr;
# ArrayList<NLValueContainer> leftHandSide;
# NExpression value;
def __init__(
self,
lineNr,
rowNr,
leftHandSide, # should have at least 1 element
value
):
self.lineNr = lineNr
self.rowNr = rowNr
self.leftHandSide = leftHandSide
self.value = value
def print_it(self):
self.leftHandSide[0].print_it()
for i in range(1, len(self.leftHandSide)):
print(", ", end='')
self.leftHandSide[i].print_it()
print(" += ", end='')
self.value.print_it()
print("; ", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
for lhsEntry in self.leftHandSide:
success = visitor.visit(lhsEntry)
if success == False:
return False
success = visitor.visit(self.value)
if success == False:
return False
return True
class NSubtractionAssignment(NAssignment):
# long lineNr;
# long rowNr;
# ArrayList<NLValueContainer> leftHandSide;
# NExpression value;
def __init__(
self,
lineNr,
rowNr,
leftHandSide, # should have at least 1 element
value
):
self.lineNr = lineNr
self.rowNr = rowNr
self.leftHandSide = leftHandSide
self.value = value
def print_it(self):
self.leftHandSide[0].print_it()
for i in range(1, len(self.leftHandSide)):
print(", ", end='')
self.leftHandSide[i].print_it()
print(" -= ", end='')
self.value.print_it()
print("; ", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
for lhsEntry in self.leftHandSide:
success = visitor.visit(lhsEntry)
if success == False:
return False
success = visitor.visit(self.value)
if success == False:
return False
return True
class NMultiplicationAssignment(NAssignment):
# long lineNr;
# long rowNr;
# ArrayList<NLValueContainer> leftHandSide;
# NExpression value;
def __init(
self,
lineNr,
rowNr,
leftHandSide, # should have at least 1 element
value
):
self.lineNr = lineNr
self.rowNr = rowNr
self.leftHandSide = leftHandSide
self.value = value
def print_it(self):
self.leftHandSide[0].print_it()
for i in range(1, len(self.leftHandSide)):
print(", ", end='')
self.leftHandSide[i].print_it()
print(" *= ", end='')
self.value.print_it()
print("; ", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
for lhsEntry in self.leftHandSide:
success = visitor.visit(lhsEntry)
if success == False:
return False
success = visitor.visit(self.value)
if success == False:
return False
return True
class NDivisionAssignment(NAssignment):
# long lineNr;
# long rowNr;
# ArrayList<NLValueContainer> leftHandSide;
# NExpression value;
def __init__(
self,
lineNr,
rowNr,
leftHandSide, # should have at least 1 element
value
):
self.lineNr = lineNr
self.rowNr = rowNr
self.leftHandSide = leftHandSide
self.value = value
def print_it(self):
self.leftHandSide[0].print_it()
for i in range(1, len(self.leftHandSide)):
print(", ", end='')
self.leftHandSide[i].print_it()
print(" /= ", end='')
self.value.print()
print("; ", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
for lhsEntry in self.leftHandSide:
success = visitor.visit(lhsEntry)
if success == False:
return False
success = visitor.visit(self.value)
if success == False:
return False
return True
class NLoopStatement(NStatement):
# long lineNr;
# long rowNr;
# NBlock block;
# NIdentifier labelOrNull;
def __init__(
self,
lineNr,
rowNr,
block,
labelOrNull
):
self.lineNr = lineNr
self.rowNr = rowNr
self.block = block
self.labelOrNull = labelOrNull
def print_it(self):
print("loop ", end='')
if not self.labelOrNull is None:
print("label ", end='')
self.labelOrNull.print_it()
print(" ", end='')
self.block.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
success = visitor.visit(self.block)
if success == False:
return False
success = visitor.visit(self.labelOrNull)
if success == False:
return False
return True
class NRange:
# long lineNr;
# long rowNr;
# NIdentifier counterName;
# NType counterType;
# NExpression rangeFrom;
# boolean isDownto;
# NExpression rangeTo;
def __init__(
self,
lineNr,
rowNr,
counterName,
counterType,
rangeFrom,
isDownto,
rangeTo
):
self.lineNr = lineNr
self.rowNr = rowNr
self.counterName = counterName
self.counterType = counterType
self.rangeFrom = rangeFrom
self.isDownto = isDownto
self.rangeTo = rangeTo
def print_it(self):
self.counterName.print_it()
print(" : ", end='')
self.counterType.print_it()
print(" = ", end='')
self.rangeFrom.print_it()
if self.isDownto:
print(" downto ", end='')
else:
print(" to ", end='')
self.rangeTo.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
success = visitor.visit(self.counterName)
if success == False:
return False
success = visitor.visit(self.counterType)
if success == False:
return False
success = visitor.visit(self.rangeFrom)
if success == False:
return False
success = visitor.visit(self.rangeTo)
if success == False:
return False
return True
class NIteration:
# abstract void print();
# abstract long getLineNr();
# abstract long getRowNr();
# abstract boolean visit_children(AbstractASTVisitor visitor);
pass
class NIterationIn(NIteration):
# long lineNr;
# long rowNr;
# NIdentifier itName;
# NType itTypeOrNull;
# NExpression arrayExpression;
# NExpression indexfactorOrNull;
# NExpression indexoffsetOrNull;
def __init__(
self,
lineNr,
rowNr,
itName,
itTypeOrNull,
arrayExpression,
indexfactorOrNull,
indexoffsetOrNull
):
self.lineNr = lineNr
self.rowNr = rowNr
self.itName = itName
self.itTypeOrNull = itTypeOrNull
self.arrayExpression = arrayExpression
self.indexfactorOrNull = indexfactorOrNull
self.indexoffsetOrNull = indexoffsetOrNull
def print_it(self):
self.itName.print_it()
if not self.itTypeOrNull is None:
print(" : ", end='')
self.itTypeOrNull.print_it()
print(" in ", end='')
self.arrayExpression.print_it()
if not self.indexfactorOrNull is None:
print(" indexfactor ", end='')
self.indexfactorOrNull.print_it()
if not self.indexoffsetOrNull is None:
print(" indexoffset ", end='')
self.indexoffsetOrNull.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
success = visitor.visit(self.itName)
if success == False:
return False
if not self.itTypeOrNull is None:
success = visitor.visit(self.itTypeOrNull)
if success == False:
return False
success = visitor.visit(self.arrayExpression)
if success == False:
return False
if not self.indexfactorOrNull is None:
success = visitor.visit(self.indexfactorOrNull)
if success == False:
return False
if not self.indexoffsetOrNull is None:
success = visitor.visit(self.indexoffsetOrNull)
if success == False:
return False
return True
class NIterationOver(NIteration):
# long lineNr;
# long rowNr;
# NIdentifier itName;
# NType itTypeOrNull;
# NLValueContainer arrayLValue;
# NExpression indexfactorOrNull;
# NExpression indexoffsetOrNull;
def __init__(
self,
lineNr,
rowNr,
itName,
itTypeOrNull,
arrayLValue,
indexfactorOrNull,
indexoffsetOrNull
):
self.lineNr = lineNr
self.rowNr = rowNr
self.itName = itName
self.itTypeOrNull = itTypeOrNull
self.arrayLValue = arrayLValue
self.indexfactorOrNull = indexfactorOrNull
self.indexoffsetOrNull = indexoffsetOrNull
def print_it(self):
self.itName.print_it()
if not self.itTypeOrNull is None:
print(" : ", end='')
self.itTypeOrNull.print_it()
print(" over ", end='')
self.arrayLValue.print_it()
if not self.indexfactorOrNull is None:
print(" indexfactor ", end='')
self.indexfactorOrNull.print_it()
if not self.indexoffsetOrNull is None:
print(" offsetvalue ", end='')
self.indexoffsetOrNull.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
success = visitor.visit(self.itName)
if success == False:
return False
if not self.itTypeOrNull is None:
success = visitor.visit(self.itTypeOrNull)
if success == False:
return False
success = visitor.visit(self.arrayLValue)
if success == False:
return False
if not self.indexfactorOrNull is None:
success = visitor.visit(self.indexfactorOrNull)
if success == False:
return False
if not self.indexoffsetOrNull is None:
success = visitor.visit(self.indexoffsetOrNull)
if success == False:
return False
return True
class NForStatement(NStatement): # check this during validation so that there is either range or at least 1 iteration...
# long lineNr;
# long rowNr;
# NRange rangeOrNull;
# ArrayList<NIteration> iterations;
# NBlock block;
# NIdentifier labelOrNull;
def __init__(
self,
lineNr,
rowNr,
rangeOrNull,
iterations,
block,
labelOrNull
):
self.lineNr = lineNr
self.rowNr = rowNr
self.rangeOrNull = rangeOrNull
self.iterations = iterations
self.block = block
self.labelOrNull = labelOrNull
def print_it(self):
print("for ", end='')
if not self.labelOrNull is None:
print("label ", end='')
self.labelOrNull.print_it()
print(" ", end='')
if not self.rangeOrNull is None:
self.rangeOrNull.print_it()
if len(self.iterations) > 0:
print(", ", end='')
if len(self.iterations) > 0:
self.iterations[0].print_it()
for i in range(1, len(self.iterations)):
print(", ", end='')
self.iterations[i].print_it()
self.block.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
if not self.rangeOrNull is None:
success = visitor.visit(self.rangeOrNull)
if success == False:
return False
for iteration in self.iterations:
success = visitor.visit(iteration)
if success == False:
return False
success = visitor.visit(self.block)
if success == False:
return False
if not self.labelOrNull is None:
success = visitor.visit(self.labelOrNull)
if success == False:
return False
return True
class NFunctionCallStatement(NStatement):
# long lineNr;
# long rowNr;
# NFunctionCall functionCall;
def __init__(
self,
lineNr,
rowNr,
functionCall
):
self.lineNr = lineNr
self.rowNr = rowNr
self.functionCall = functionCall
def print_it(self):
self.functionCall.print_it()
print("; ", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
success = visitor.visit(self.functionCall)
if success == False:
return False
return True
class NReturnStatement(NStatement):
# long lineNr;
# long rowNr;
# ArrayList<NExpression> returnExpressions;
def __init__(
self,
lineNr,
rowNr,
returnExpressions
):
self.lineNr = lineNr
self.rowNr = rowNr
self.returnExpressions = returnExpressions
def print_it(self):
print("return", end='')
if len(self.returnExpressions) > 0:
print(" ", end='')
self.returnExpressions[0].print_it()
for i in range(1, len(self.returnExpressions)):
print(", ", end='')
self.returnExpressions[i].print_it()
print("; ", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
for returnExpression in self.returnExpressions:
success = visitor.visit(returnExpression)
if success == False:
return False
return True
class NBreakStatement(NStatement):
# long lineNr;
# long rowNr;
# NIdentifier labelOrNull;
def __init__(
self,
lineNr,
rowNr,
labelOrNull
):
self.lineNr = lineNr
self.rowNr = rowNr
self.labelOrNull = labelOrNull
def print_it(self):
print("break", end='')
if not self.labelOrNull is None:
print(" label ", end='')
self.labelOrNull.print_it()
print("; ", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
if not self.labelOrNull is None:
success = visitor.visit(self.labelOrNull)
if success == False:
return False
return True
class NContinueStatement(NStatement):
# long lineNr;
# long rowNr;
# NIdentifier labelOrNull;
def __init__(
self,
lineNr,
rowNr,
labelOrNull
):
self.lineNr = lineNr
self.rowNr = rowNr
self.labelOrNull = labelOrNull
def print_it(self):
print("continue", end='')
if not self.labelOrNull is None:
print(" label ", end='')
self.labelOrNull.print_it()
print("; ", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
if not self.labelOrNull is None:
success = visitor.visit(self.labelOrNull)
if success == False:
return False
return True
class NSwitchNormalCase:
# long lineNr;
# long rowNr;
# ArrayList<NExpression> caseValues; // at least one of these!
# NBlock block;
def __init__(self, lineNr, rowNr, caseValues, block):
self.lineNr = lineNr
self.rowNr = rowNr
self.caseValues = caseValues
self.block = block
def print_it(self):
print("case ", end='')
if len(self.caseValues) == 0:
print("MISSING CASE VALUE!!!", end='');
else:
self.caseValues[0].print_it()
for i in range(1, len(self.caseValues)):
print(", ", end='')
self.caseValues[i].print_it()
print(" ", end='')
self.block.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
for caseValue in self.caseValues:
success = visitor.visit(caseValue)
if success == False:
return False
success = visitor.visit(self.block)
if success == False:
return False
return True
class NContenttypeNormalCase:
# long lineNr;
# long rowNr;
# ArrayList<NType> caseTypes; // should have at least 1 element
# NBlock block;
def __init__(
self,
lineNr,
rowNr,
caseTypes,
block
):
self.lineNr = lineNr
self.rowNr = rowNr
self.caseTypes = caseTypes
self.block = block
def print_it(self):
print("case ", end='')
if len(self.caseTypes) == 0:
print("MISSING TYPE CASE!!!", end='');
else:
self.caseTypes[0].print_it()
for i in range(1, len(self.caseTypes)):
print(", ", end='')
self.caseTypes[i].print_it()
print(" ", end='')
self.block.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
for caseType in self.caseTypes:
success = visitor.visit(caseType)
if success == False:
return False
success = visitor.visit(self.block)
if success == False:
return False
return True
class NSwitchStatement(NStatement):
# long lineNr;
# long rowNr;
# NExpression switchValue
# ArrayList<NSwitchNormalCase> cases;
# NBlock defaultCaseOrNull;
def __init__(self, lineNr, rowNr, switchValue, cases, defaultCaseOrNull):
self.lineNr = lineNr
self.rowNr = rowNr
self.switchValue = switchValue
self.cases = cases
self.defaultCaseOrNull = defaultCaseOrNull
def print_it(self):
print("switch ", end='')
self.switchValue.print_it()
print(" ", end='')
for c in self.cases:
c.print_it()
if not self.defaultCaseOrNull is None:
print("default ", end='')
self.defaultCaseOrNull.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
success = visitor.visit(self.switchValue)
if success == False:
return False
for case in self.cases:
success = visitor.visit(case)
if success == False:
return False
if not self.defaultCaseOrNull is None:
success = visitor.visit(self.defaultCaseOrNull)
if success == False:
return False
return True
class NContenttypeStatement(NStatement):
# long lineNr;
# long rowNr;
# NExpression switchValue;
# ArrayList<NContenttypeNormalCase> cases; // can have 0 length!
# NBlock defaultCaseOrNull;
# but must have either default or 1 normal case, check this during validation!
def __init__(
self,
lineNr,
rowNr,
switchValue,
cases,
defaultCaseOrNull
):
self.lineNr = lineNr
self.rowNr = rowNr
self.switchValue = switchValue
self.cases = cases
self.defaultCaseOrNull = defaultCaseOrNull
def print_it(self):
print("contenttype ", end='')
self.switchValue.print_it()
print(" ", end='')
for c in self.cases:
c.print_it()
if not self.defaultCaseOrNull is None:
print("default ", end='')
self.defaultCaseOrNull.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
success = visitor.visit(self.switchValue)
if success == False:
return False
for case in self.cases:
success = visitor.visit(case)
if success == False:
return False
if not self.defaultCaseOrNull is None:
success = visitor.visit(self.defaultCaseOrNull)
if success == False:
return False
return True
class NImportStatement:
# long lineNr;
# long rowNr;
# boolean isPrefixImport;
# String path;
def __init__(self, lineNr, rowNr, isPrefixImport, path):
self.lineNr = lineNr
self.rowNr = rowNr
self.isPrefixImport = isPrefixImport
self.path = path
def print_it(self):
if self.isPrefixImport:
print("prefiximport\"", end='')
else:
print("import\"", end='')
print(self.path, end='') # will print newlines and escapes in a funny way though...
print("\"; ", end='')
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
class NProgram:
# long lineNr;
# long rowNr;
# ArrayList<NImportStatement> importStatements;
# ArrayList<NStatement> statements;
def __init__(
self,
lineNr,
rowNr,
importStatements,
statements
):
self.lineNr = lineNr
self.rowNr = rowNr
self.importStatements = importStatements
self.statements = statements
def print_it(self):
for importStatement in self.importStatements:
importStatement.print_it()
for statement in self.statements:
statement.print_it()
def get_line_nr(self):
return self.lineNr
def get_row_nr(self):
return self.rowNr
def visit_children(self, visitor):
# we don't bother about the import statements!
for statement in self.statements:
success = visitor.visit(statement)
if success == False:
return False
return True
######################################### VISITOR #########################################
class AbstractASTVisitor:
def visit(self, node):
raise NotImplementedError(NOT_IMPLEMENTED)
class AbstractASTExpressionVisitor(AbstractASTVisitor):
def visit_expression(self, node):
raise NotImplementedError(NOT_IMPLEMENTED)
def visit(self, node):
if (isinstance(node, NProgram) or
isinstance(node, NStatement) or isinstance(node, NLValueContainer) or isinstance(node, NRange) or isinstance(node, NIteration) or
isinstance(node, NLValueOrVariableDeclaration) or isinstance(node, NElseIfClause) or
isinstance(node, NSwitchNormalCase) or isinstance(node, NContenttypeNormalCase)
):
return node.visit_children(self)
elif isinstance(node, NExpression):
return self.visit_expression(node)
elif isinstance(node, NArrayIndexingIndex) or isinstance(node, NArg) or isinstance(node, NSWITCHNormalCase) or isinstance(node, NCONTENTTYPENormalCase):
# we only reach these through expressions though
return node.visit_children(self)
else:
return True
class AbstractASTTypeVisitor(AbstractASTVisitor):
def visit_type(self, node):
raise NotImplementedError(NOT_IMPLEMENTED)
def visit(self, node):
if (isinstance(node, NProgram) or
isinstance(node, NStatement) or isinstance(node, NLValueContainer) or isinstance(node, NRange) or isinstance(node, NIteration) or
isinstance(node, NLValueOrVariableDeclaration) or isinstance(node, NElseIfClause) or
isinstance(node, NSwitchNormalCase) or isinstance(node, NContenttypeNormalCase) or
isinstance(node, NParam) or
isinstance(node, NTypeClarifiedExpression) or isinstance(node, NVariantBoxCastExpression) or
isinstance(node, NTypeClarificationIndex) or isinstance(node, NVariantBoxCastIndex)
):
return node.visit_children(self)
elif isinstance(node, NType):
return self.visit_type(node)
else:
return True
|
LJMNilsson/memtran
|
src/ast.py
|
Python
|
gpl-3.0
| 106,488
|
[
"VisIt"
] |
12d4436c5b29ded605c7af7783211e61196bc5364176b7142b66fad05fd9470a
|
#! /usr/bin/env python
"""
Protein-Ligand Interaction Profiler - Analyze and visualize protein-ligand interactions in PDB files.
plipcmd - Main script for PLIP command line execution.
Copyright 2014-2015 Sebastian Salentin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
modified by Li Tuan
"""
# Compatibility
from __future__ import print_function
# Own modules
try:
from plip.modules.preparation import *
from plip.modules.visualize import visualize_in_pymol
from plip.modules.plipremote import VisualizerData
from plip.modules.report import StructureReport,__version__
from plip.modules import config
from plip.modules.mp import parallel_fn
from plip.modules.webservices import check_pdb_status, fetch_pdb
print 'import se'
except ImportError:
from modules.preparation import *
from modules.visualize import visualize_in_pymol
from modules.plipremote import VisualizerData
from modules.report import StructureReport, __version__
from modules import config
from modules.mp import parallel_fn
from modules.webservices import check_pdb_status, fetch_pdb
# Python standard library
import sys
import argparse
from argparse import ArgumentParser
import time
import multiprocessing
import json
# External libraries
import lxml.etree as et
descript = "Protein-Ligand Interaction Profiler (PLIP) v%s " \
"is a command-line based tool to analyze interactions in a protein-ligand complex. " \
"If you are using PLIP in your work, please cite: " \
"Salentin,S. et al. PLIP: fully automated protein-ligand interaction profiler. " \
"Nucl. Acids Res. (1 July 2015) 43 (W1): W443-W447. doi: 10.1093/nar/gkv315" % __version__
def threshold_limiter(aparser, arg):
arg = float(arg)
if arg <= 0:
aparser.error("All thresholds have to be values larger than zero.")
return arg
def process_pdb(pdbfile, outpath):
write_message(outpath)
"""Analysis of a single PDB file. Can generate textual reports XML, PyMOL session files and images as output."""
startmessage = '\nStarting analysis of %s\n' % pdbfile.split('/')[-1]
write_message(startmessage)
write_message('='*len(startmessage)+'\n')
mol = PDBComplex()
mol.output_path = outpath
mol.load_pdb(pdbfile)
# #@todo Offers possibility for filter function from command line (by ligand chain, position, hetid)
for ligand in mol.ligands:
mol.characterize_complex(ligand)
create_folder_if_not_exists(outpath)
# Generate the report files
streport = StructureReport(mol)
config.MAXTHREADS = min(config.MAXTHREADS, len(mol.interaction_sets))
######################################
# PyMOL Visualization (parallelized) #
######################################
if config.PYMOL or config.PICS:
complexes = [VisualizerData(mol, site) for site in sorted(mol.interaction_sets)
if not len(mol.interaction_sets[site].interacting_res) == 0]
if config.MAXTHREADS > 1:
write_message('\nGenerating visualizations in parallel on %i cores ...' % config.MAXTHREADS)
parfn = parallel_fn(visualize_in_pymol)
parfn(complexes, processes=config.MAXTHREADS)
else:
[visualize_in_pymol(plcomplex) for plcomplex in complexes]
if config.XML: # Generate report in xml format
streport.write_xml()
if config.TXT: # Generate report in txt (rst) format
streport.write_txt()
def download_structure(inputpdbid):
"""Given a PDB ID, downloads the corresponding PDB structure.
Checks for validity of ID and handles error while downloading.
Returns the path of the downloaded file."""
try:
if len(inputpdbid) != 4 or extract_pdbid(inputpdbid.lower()) == 'UnknownProtein':
sysexit(3, 'Invalid PDB ID (Wrong format)\n')
pdbfile, pdbid = fetch_pdb(inputpdbid.lower())
pdbpath = tilde_expansion('%s/%s.pdb' % (config.BASEPATH.rstrip('/'), pdbid))
create_folder_if_not_exists(config.BASEPATH)
with open(pdbpath, 'w') as g:
g.write(pdbfile)
write_message('file downloaded as %s\n\n' % pdbpath)
return pdbpath, pdbid
except ValueError: # Invalid PDB ID, cannot fetch from RCBS server
sysexit(3, 'Invalid PDB ID (Entry does not exist)\n')
def remove_duplicates(slist):
"""Checks input lists for duplicates and returns
a list with unique entries"""
unique = list(set(slist))
difference = len(slist) - len(unique)
if difference == 1:
write_message("Removed one duplicate entry from input list.\n")
if difference > 1:
write_message("Removed %i duplicate entries from input list.\n" % difference)
return unique
def plip_main(inputstructs, inputpdbids):
"""Main function. Calls functions for processing, report generation and visualization."""
pdbid, pdbpath = None, None
# #@todo For multiprocessing, implement better stacktracing for errors
# Print title and version
title = "* Protein-Ligand Interaction Profiler v%s *" % __version__
write_message('\n' + '*' * len(title) + '\n')
write_message(title)
write_message('\n' + '*' * len(title) + '\n\n')
if inputstructs is not None: # Process PDB file(s)
num_structures = len(inputstructs)
inputstructs = remove_duplicates(inputstructs)
for inputstruct in inputstructs:
if os.path.getsize(inputstruct) == 0:
sysexit(2, 'Empty PDB file\n') # Exit if input file is empty
if num_structures > 1:
basename = inputstruct.split('.')[0].split('/')[-1]
config.OUTPATH = '/'.join([config.BASEPATH, basename])
process_pdb(inputstruct, config.OUTPATH)
else: # Try to fetch the current PDB structure(s) directly from the RCBS server
num_pdbids = len(inputpdbids)
inputpdbids =remove_duplicates(inputpdbids)
for inputpdbid in inputpdbids:
pdbpath, pdbid = download_structure(inputpdbid)
if num_pdbids > 1:
# config.OUTPATH = '/'.join([config.BASEPATH, pdbid[1:3].upper(), pdbid.upper()])
config.OUTPATH = '/'.join([config.BASEPATH, pdbid.upper()])
process_pdb(pdbpath, config.OUTPATH)
if (pdbid is not None or inputstructs is not None) and config.BASEPATH is not None:
if config.BASEPATH in ['.', './']:
write_message('\nFinished analysis. Find the result files in the working directory.\n\n')
else:
write_message('\nFinished analysis. Find the result files in %s\n\n' % config.BASEPATH)
# if __name__ == '__main__':
##############################
# Parse command line arguments
##############################
def myplip(pdbf):
# pdbids format '1NEX,1ARJ...'
file_path,file_name = os.path.split(pdbf)
pathname = file_path+'/'+file_name.split('.')[0].upper()
config.VERBOSE = True
config.DEBUG = False
config.MAXTHREADS = 1
config.XML = True
config.TXT = True
config.PICS = True
config.PYMOL = True
config.OUTPATH = pathname
config.OUTPATH = tilde_expansion("".join([config.OUTPATH, '/'])
if not config.OUTPATH.endswith('/') else config.OUTPATH)
config.BASEPATH = config.OUTPATH # Used for batch processing
# config.BASEPATH = 'basetest'
config.BREAKCOMPOSITE = True
# config.ALTLOC = arguments.altlocation
# config.PEPTIDES = False
# config.INTRA = arguments.intra
# config.NOFIX = arguments.nofix
config.KEEPMOD = True
# expanded_path = tilde_expansion(arguments.input) if arguments.input is not None else None
# plip_main(expanded_path, arguments.pdbid) # Start main script
plip_main([pdbf], None) # Start main script,pdbids is a list
if __name__ == "__main__":
for root,dirs,files in os.walk(sys.argv[-1]):
for f in files:
if f[-4:] == '.pdb':
f = os.path.join(root,f)
myplip(f)
|
lituan/tools
|
plip/plip_phos_local.py
|
Python
|
cc0-1.0
| 8,562
|
[
"PyMOL"
] |
247c2d9bdeb99c6d5398678d2f64c126708b6425c0d88e86c35caf4740e29d79
|
"""
Signal handling functions for use with external commerce service.
"""
from __future__ import unicode_literals
import json
import logging
from urlparse import urljoin
import requests
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.dispatch import receiver
from django.utils.translation import ugettext as _
from commerce.models import CommerceConfiguration
from openedx.core.djangoapps.commerce.utils import ecommerce_api_client, is_commerce_service_configured
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming import helpers as theming_helpers
from request_cache.middleware import RequestCache
from student.models import REFUND_ORDER
log = logging.getLogger(__name__)
# pylint: disable=unused-argument
@receiver(REFUND_ORDER)
def handle_refund_order(sender, course_enrollment=None, **kwargs):
"""
Signal receiver for unenrollments, used to automatically initiate refunds
when applicable.
"""
if not is_commerce_service_configured():
return
if course_enrollment and course_enrollment.refundable():
try:
request_user = get_request_user() or course_enrollment.user
if isinstance(request_user, AnonymousUser):
# Assume the request was initiated via server-to-server
# API call (presumably Otto). In this case we cannot
# construct a client to call Otto back anyway, because
# the client does not work anonymously, and furthermore,
# there's certainly no need to inform Otto about this request.
return
refund_seat(course_enrollment)
except: # pylint: disable=bare-except
# don't assume the signal was fired with `send_robust`.
# avoid blowing up other signal handlers by gracefully
# trapping the Exception and logging an error.
log.exception(
"Unexpected exception while attempting to initiate refund for user [%s], course [%s]",
course_enrollment.user.id,
course_enrollment.course_id,
)
def get_request_user():
"""
Helper to get the authenticated user from the current HTTP request (if
applicable).
If the requester of an unenrollment is not the same person as the student
being unenrolled, we authenticate to the commerce service as the requester.
"""
request = RequestCache.get_current_request()
return getattr(request, 'user', None)
def refund_seat(course_enrollment):
"""
Attempt to initiate a refund for any orders associated with the seat being unenrolled, using the commerce service.
Arguments:
course_enrollment (CourseEnrollment): a student enrollment
Returns:
A list of the external service's IDs for any refunds that were initiated
(may be empty).
Raises:
exceptions.SlumberBaseException: for any unhandled HTTP error during communication with the E-Commerce Service.
exceptions.Timeout: if the attempt to reach the commerce service timed out.
"""
User = get_user_model() # pylint:disable=invalid-name
course_key_str = unicode(course_enrollment.course_id)
enrollee = course_enrollment.user
service_user = User.objects.get(username=settings.ECOMMERCE_SERVICE_WORKER_USERNAME)
api_client = ecommerce_api_client(service_user)
log.info('Attempting to create a refund for user [%s], course [%s]...', enrollee.id, course_key_str)
refund_ids = api_client.refunds.post({'course_id': course_key_str, 'username': enrollee.username})
if refund_ids:
log.info('Refund successfully opened for user [%s], course [%s]: %r', enrollee.id, course_key_str, refund_ids)
config = CommerceConfiguration.current()
if config.enable_automatic_refund_approval:
refunds_requiring_approval = []
for refund_id in refund_ids:
try:
# NOTE: Approve payment only because the user has already been unenrolled. Additionally, this
# ensures we don't tie up an additional web worker when the E-Commerce Service tries to unenroll
# the learner
api_client.refunds(refund_id).process.put({'action': 'approve_payment_only'})
log.info('Refund [%d] successfully approved.', refund_id)
except: # pylint: disable=bare-except
log.exception('Failed to automatically approve refund [%d]!', refund_id)
refunds_requiring_approval.append(refund_id)
else:
refunds_requiring_approval = refund_ids
if refunds_requiring_approval:
# XCOM-371: this is a temporary measure to suppress refund-related email
# notifications to students and support for free enrollments. This
# condition should be removed when the CourseEnrollment.refundable() logic
# is updated to be more correct, or when we implement better handling (and
# notifications) in Otto for handling reversal of $0 transactions.
if course_enrollment.mode != 'verified':
# 'verified' is the only enrollment mode that should presently
# result in opening a refund request.
log.info(
'Skipping refund email notification for non-verified mode for user [%s], course [%s], mode: [%s]',
course_enrollment.user.id,
course_enrollment.course_id,
course_enrollment.mode,
)
else:
try:
send_refund_notification(course_enrollment, refunds_requiring_approval)
except: # pylint: disable=bare-except
# don't break, just log a warning
log.warning('Could not send email notification for refund.', exc_info=True)
else:
log.info('No refund opened for user [%s], course [%s]', enrollee.id, course_key_str)
return refund_ids
def create_zendesk_ticket(requester_name, requester_email, subject, body, tags=None):
""" Create a Zendesk ticket via API. """
if not (settings.ZENDESK_URL and settings.ZENDESK_USER and settings.ZENDESK_API_KEY):
log.debug('Zendesk is not configured. Cannot create a ticket.')
return
# Copy the tags to avoid modifying the original list.
tags = list(tags or [])
tags.append('LMS')
# Remove duplicates
tags = list(set(tags))
data = {
'ticket': {
'requester': {
'name': requester_name,
'email': requester_email
},
'subject': subject,
'comment': {'body': body},
'tags': tags
}
}
# Encode the data to create a JSON payload
payload = json.dumps(data)
# Set the request parameters
url = urljoin(settings.ZENDESK_URL, '/api/v2/tickets.json')
user = '{}/token'.format(settings.ZENDESK_USER)
pwd = settings.ZENDESK_API_KEY
headers = {'content-type': 'application/json'}
try:
response = requests.post(url, data=payload, auth=(user, pwd), headers=headers)
# Check for HTTP codes other than 201 (Created)
if response.status_code != 201:
log.error('Failed to create ticket. Status: [%d], Body: [%s]', response.status_code, response.content)
else:
log.debug('Successfully created ticket.')
except Exception: # pylint: disable=broad-except
log.exception('Failed to create ticket.')
return
def generate_refund_notification_body(student, refund_ids): # pylint: disable=invalid-name
""" Returns a refund notification message body. """
msg = _(
"A refund request has been initiated for {username} ({email}). "
"To process this request, please visit the link(s) below."
).format(username=student.username, email=student.email)
ecommerce_url_root = configuration_helpers.get_value(
'ECOMMERCE_PUBLIC_URL_ROOT', settings.ECOMMERCE_PUBLIC_URL_ROOT,
)
refund_urls = [urljoin(ecommerce_url_root, '/dashboard/refunds/{}/'.format(refund_id))
for refund_id in refund_ids]
return '{msg}\n\n{urls}'.format(msg=msg, urls='\n'.join(refund_urls))
def send_refund_notification(course_enrollment, refund_ids):
""" Notify the support team of the refund request. """
tags = ['auto_refund']
if theming_helpers.is_request_in_themed_site():
# this is not presently supported with the external service.
raise NotImplementedError("Unable to send refund processing emails to support teams.")
student = course_enrollment.user
subject = _("[Refund] User-Requested Refund")
body = generate_refund_notification_body(student, refund_ids)
requester_name = student.profile.name or student.username
create_zendesk_ticket(requester_name, student.email, subject, body, tags)
|
pepeportela/edx-platform
|
lms/djangoapps/commerce/signals.py
|
Python
|
agpl-3.0
| 9,151
|
[
"VisIt"
] |
ef16f93a5b4809fea36a66d2a12f063c5e502944c48c0ed69d95b6cc6302fd2c
|
import os, re
import subprocess
from multiprocessing import Pool
import Config.ADNI_RecurserConfig as arc
import Config.StudyConfig as sc
from Recursor.ScanSession import ScanSession
from Utils.PipelineLogger import PipelineLogger
class DIANRecursor:
def __init__(self, study, recurse_folder):
self.study = study
self.root_folder = recurse_folder
self.only_allowed_scan_types_to_move = ['MPR', 'MPRAGE', 'PIB', 'FDG', 'TAU', 'IRFSPGR', 'FSPGR']
self.pool = Pool()
def __getstate__(self):
self_dict = self.__dict__.copy()
del self_dict['pool']
return self_dict
def __setstate__(self, state):
self.__dict__.update(state)
def recurse(self):
directories_list, filenames = self.listRootFolderContents()
instancesList = []
sessions = self.pool.map(self.createNewScanSession, zip(directories_list, filenames))
for newSession in sessions:
if newSession:
instancesList.append(newSession)
return instancesList
def get_visit_info(self, file_name):
cmd = "/data/data02/sulantha/bin/gdcmbin/bin/gdcmdump {0} ".format(file_name)
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
tmp = proc.stdout.read().decode("utf-8")
for line in tmp.split("\n"):
if '(0010,0020)' in line:
pid = line[line.find('[') + len('['):line.find(']')]
visit = pid.split('_')[1]
if len(visit) is 3 and visit.startswith('v'):
return visit
else:
return None
def getScanDateTimeDCMHeader(self, file_name):
cmd = "/data/data02/sulantha/bin/gdcmbin/bin/gdcmdump {0} ".format(file_name)
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
tmp = proc.stdout.read().decode("utf-8")
time_str = None
date_str = None
for line in tmp.split("\n"):
if '(0008,0020)' in line:
date_str = line[line.find('[') + len('['):line.find(']')]
for line in tmp.split("\n"):
if '(0008,0030)' in line:
time_str = line[line.find('[') + len('['):line.find(']')]
time_str = time_str.strip().split('.')[0]
return date_str, time_str
def createNewScanSession(self, tup):
down_most_folder, filelist = tup
# Return parts of the folder path, the ones of interest
folder = down_most_folder.replace(self.root_folder,"")
filelist = [ x for x in filelist if 'xml' not in x ]
if len(filelist) == 0: # If no file in folder, ignore and skip
return None
try:
folder_parts = folder.split("/") # List containing each parts/folders of the full path
filename_parts = filelist[0].split(".") # Takes the first filename and create a list of its parts
rid = filename_parts[0]
file_type = self.determineExtension(filename_parts)
scan_date_str, scan_time_str = self.getScanDateTimeDCMHeader('{0}/{1}'.format(down_most_folder, filelist[0]))
scan_date = '{0}-{1}-{2}'.format(scan_date_str[:4], scan_date_str[4:6], scan_date_str[6:8])
scan_time = '{0}:{1}:{2}'.format(scan_time_str[:2], scan_time_str[2:4], scan_time_str[4:6])
visit = self.get_visit_info('{0}/{1}'.format(down_most_folder, filelist[0]))
if not visit:
return None
scan_type = self.determineScanType('{0}/{1}'.format(down_most_folder, filelist[0]))
if not scan_type or scan_type not in self.only_allowed_scan_types_to_move:
return None
i_identifier = '{0}{1}{2}{3}x{4}'.format(rid, scan_type, visit, scan_date_str,
re.sub(r'[\W_]+', '', folder_parts[-2]))
s_identifier = 'ySy'
download_folder = down_most_folder
raw_folder = '{0}/{1}/{2}/{3}/{4}_{5}_{6}/raw'.format(sc.studyDatabaseRootDict[self.study], self.study, scan_type, rid, scan_date, s_identifier, i_identifier)
except Exception as e:
PipelineLogger.log('root', 'exception', 'File recurse error on Folder - {0}, \n Filelist - {1}'.format(folder, filelist))
PipelineLogger.log('root', 'exception', 'Exception - {0}'.format(e))
return None
newScanSession = ScanSession(self.study, rid, scan_type, scan_date, scan_time,
s_identifier, i_identifier, download_folder, raw_folder, file_type)
return newScanSession
def determineExtension(self, filename):
fileEnding = filename[-1].split('.')[-1]
if fileEnding == "gz":
fileEnding = filename[-1].split('.', 1)[-1]
return arc.fileExtensionDict[fileEnding]
def getScanTypeFromPID(self, pid):
try:
st = pid.lower().split('_')[2]
if st in ['fdg', 'pib', 'tau']:
return st.upper()
else:
return 'unknown'
except:
return 'unknown'
def getDIAN_MR_ScanType(self, st):
if 'dti' in st.lower():
return 'DTI'
if 'mprage' in st.lower():
return 'MPRAGE'
if 'rsfmri' in st.lower() or 'rsf mri' in st.lower():
return 'rsFMRI'
if 'fmri' in st.lower():
return 'FMRI'
if 'ir-fspgr' in st.lower():
return 'IRFSPGR'
if 'fspgr' in st.lower():
return 'FSPGR'
if 't2' in st.lower():
return 'T2'
if 'perfusion_weighted' in st.lower():
return 'DTI'
if 'resting_state' in st.lower():
return 'rsFMRI'
if 'dwi' in st.lower():
return 'DTI'
if 'mpr' in st.lower():
return 'MPR'
def determineScanType(self, file_name):
cmd = "/data/data02/sulantha/bin/gdcmbin/bin/gdcmdump {0} ".format(file_name)
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
tmp = proc.stdout.read().decode("utf-8")
scanTypeRaw='unknown'
for line in tmp.split("\n"):
if '(0008,0060)' in line:
modality = line[line.find('[')+len('['):line.find(']')]
break
for line in tmp.split("\n"):
if '(0008,0008)' in line:
img_type = line[line.find('[')+len('['):line.find(']')]
break
for line in tmp.split("\n"):
if '(0010,0020)' in line:
pid = line[line.find('[')+len('['):line.find(']')]
if 'fdg' in pid.lower() or 'pib' in pid.lower() or 'tau' in pid.lower():
if (modality == 'CT' or modality == 'PT') and img_type == r'ORIGINAL\PRIMARY':
return self.getScanTypeFromPID(pid)
else:
if modality == 'MR':
break
else:
return None
for line in tmp.split("\n"):
if '(0008,103e)' in line:
scanTypeRaw = line[line.find('[')+len('['):line.find(']')]
break
return self.getDIAN_MR_ScanType(scanTypeRaw)
def listRootFolderContents(self):
# Reach down-most directories and return a list
down_most_directories_list = []
filenames_list = []
for dirpath, dirnames, filenames in os.walk(self.root_folder):
if not dirnames: # Down-most directory
down_most_directories_list.append(dirpath)
filenames_list.append([x for x in filenames if x.endswith(arc.fileExtensionTuple)])
return down_most_directories_list, filenames_list
if __name__ == '__main__':
c = DIANRecursor('DIAN', '/data/data02/sulantha/DIAN_EXT_MR_DICOM/DIANDF')
k = c.recurse()
print(len(k))
s = set(k)
print(len(s))
|
sulantha2006/Processing_Pipeline
|
Recursor/DIAN/DIANRecursor.py
|
Python
|
apache-2.0
| 8,040
|
[
"VisIt"
] |
49dede003ed6618f83f9659d5ae339435f09b3ec7a18502425fdc345af468ca1
|
# Code from Chapter 6 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by Stephen Marsland (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# Stephen Marsland, 2008, 2014
# Demonstration of PCA and kernel PCA on the circular dataset
import pylab as pl
import numpy as np
import pca
import kernelpca
data = np.zeros((150,2))
theta = np.random.normal(0,np.pi,50)
r = np.random.normal(0,0.1,50)
data[0:50,0] = r*np.cos(theta)
data[0:50,1] = r*np.sin(theta)
theta = np.random.normal(0,np.pi,50)
r = np.random.normal(2,0.1,50)
data[50:100,0] = r*np.cos(theta)
data[50:100,1] = r*np.sin(theta)
theta = np.random.normal(0,np.pi,50)
r = np.random.normal(5,0.1,50)
data[100:150,0] = r*np.cos(theta)
data[100:150,1] = r*np.sin(theta)
pl.figure()
pl.plot(data[:50,0],data[:50,1],'ok')
pl.plot(data[50:100,0],data[50:100,1],'^k')
pl.plot(data[100:150,0],data[100:150,1],'vk')
pl.title('Original dataset')
x,y,evals,evecs = pca.pca(data,2)
pl.figure()
pl.plot(x[:50,0],x[:50,1],'ok')
pl.plot(x[50:100,0],x[50:100,1],'^k')
pl.plot(x[100:150,0],x[100:150,1],'vk')
pl.title('Reconstructed points after PCA')
pl.figure()
y = kernelpca.kernelpca(data,'gaussian',2)
pl.plot(y[:50,0],y[:50,1],'ok')
pl.plot(y[50:100,0],y[50:100,1],'^k')
pl.plot(y[100:150,0],y[100:150,1],'vk')
pl.title('Reconstructed points after kernel PCA')
pl.show()
|
Anderson-Lab/anderson-lab.github.io
|
csc_466_2021_spring/MLCode/Ch6/kpcademo.py
|
Python
|
mit
| 1,531
|
[
"Gaussian"
] |
6e6bdac58ce429f5fd570d7a2c8e29745b77101c86b77a3b8ad6f707b2e1f533
|
# shamelessly copied from pliExpertInfo (Vali, Mirakels, Littlesat)
from os import path
from enigma import iServiceInformation, iPlayableService
from Components.Converter.Converter import Converter
from Components.Element import cached
from Components.config import config
from Tools.Transponder import ConvertToHumanReadable, getChannelNumber
from Tools.GetEcmInfo import GetEcmInfo
from Tools.Hex2strColor import Hex2strColor
from Poll import Poll
from skin import parameters
caid_data = (
("0x100", "0x1ff", "Seca", "S", True),
("0x500", "0x5ff", "Via", "V", True),
("0x600", "0x6ff", "Irdeto", "I", True),
("0x900", "0x9ff", "NDS", "Nd", True),
("0xb00", "0xbff", "Conax", "Co", True),
("0xd00", "0xdff", "CryptoW", "Cw", True),
("0xe00", "0xeff", "PowerVU", "P", False),
("0x1000", "0x10FF", "Tandberg", "TB", False),
("0x1700", "0x17ff", "Beta", "B", True),
("0x1800", "0x18ff", "Nagra", "N", True),
("0x2600", "0x2600", "Biss", "Bi", False),
("0x2700", "0x2710", "Dre3", "D3", False),
("0x4ae0", "0x4ae1", "Dre", "D", False),
("0x4aee", "0x4aee", "BulCrypt", "B1", False),
("0x5581", "0x5581", "BulCrypt", "B2", False)
)
# stream type to codec map
codec_data = {
-1: "N/A",
0: "MPEG2",
1: "AVC",
2: "H263",
3: "VC1",
4: "MPEG4-VC",
5: "VC1-SM",
6: "MPEG1",
7: "HEVC",
8: "VP8",
9: "VP9",
10: "XVID",
11: "N/A 11",
12: "N/A 12",
13: "DIVX 3.11",
14: "DIVX 4",
15: "DIVX 5",
16: "AVS",
17: "N/A 17",
18: "VP6",
19: "N/A 19",
20: "N/A 20",
21: "SPARK",
}
# Dynamic range ("gamma") value to text
gamma_data = {
0: "SDR",
1: "HDR",
2: "HDR10",
3: "HLG",
}
def addspace(text):
if text:
text += " "
return text
class PliExtraInfo(Poll, Converter, object):
def __init__(self, type):
Converter.__init__(self, type)
Poll.__init__(self)
self.type = type
self.poll_interval = 1000
self.poll_enabled = True
self.info_fields = {
# Field combinations accessible from skin
"All": (
( # config.usage.show_cryptoinfo.value <= 0
"ProviderName",
"TransponderInfo",
"TransponderName",
"NewLine",
"CryptoBar",
"CryptoCurrentSource",
"NewLine",
"CryptoSpecial",
"VideoCodec",
"ResolutionString",
), ( # config.usage.show_cryptoinfo.value > 0
"ProviderName",
"TransponderInfo",
"TransponderName",
"NewLine",
"CryptoBar",
"CryptoSpecial",
"NewLine",
"PIDInfo",
"VideoCodec",
"ResolutionString",
)
),
"CryptoInfo": (
( # config.usage.show_cryptoinfo.value <= 0
"CryptoBar",
"CryptoCurrentSource",
"CryptoSpecial",
), ( # config.usage.show_cryptoinfo.value > 0
"CryptoBar",
"CryptoSpecial",
)
),
"ServiceInfo": (
"ProviderName",
"TunerSystem",
"TransponderFrequency",
"TransponderPolarization",
"TransponderSymbolRate",
"TransponderFEC",
"TransponderModulation",
"OrbitalPosition",
"TransponderName",
"VideoCodec",
"ResolutionString",
),
"TransponderInfo": (
( # not feraw
"StreamURLInfo",
),
( # feraw and "DVB-T" not in feraw.get("tuner_type", "")
"TunerSystem",
"TransponderFrequencyMHz",
"TransponderPolarization",
"TransponderSymbolRate",
"TransponderFEC",
"TransponderModulation",
"OrbitalPosition",
"TransponderInfoMisPls",
),
( # feraw and "DVB-T" in feraw.get("tuner_type", "")
"TunerSystem",
"TerrestrialChannelNumber",
"TransponderFrequencyMHz",
"TransponderPolarization",
"TransponderSymbolRate",
"TransponderFEC",
"TransponderModulation",
"OrbitalPosition",
)
),
"TransponderInfo2line": (
"ProviderName",
"TunerSystem",
"TransponderName",
"NewLine",
"TransponderFrequencyMHz",
"TransponderPolarization",
"TransponderSymbolRate",
"TransponderModulationFEC",
),
"User": (),
}
self.ca_table = (
("CryptoCaidSecaAvailable", "S", False),
("CryptoCaidViaAvailable", "V", False),
("CryptoCaidIrdetoAvailable", "I", False),
("CryptoCaidNDSAvailable", "Nd", False),
("CryptoCaidConaxAvailable", "Co", False),
("CryptoCaidCryptoWAvailable", "Cw", False),
("CryptoCaidPowerVUAvailable", "P", False),
("CryptoCaidBetaAvailable", "B", False),
("CryptoCaidNagraAvailable", "N", False),
("CryptoCaidBissAvailable", "Bi", False),
("CryptoCaidDre3Available", "D3", False),
("CryptoCaidDreAvailable", "D", False),
("CryptoCaidBulCrypt1Available", "B1", False),
("CryptoCaidBulCrypt2Available", "B2", False),
("CryptoCaidTandbergAvailable", "T", False),
("CryptoCaidSecaSelected", "S", True),
("CryptoCaidViaSelected", "V", True),
("CryptoCaidIrdetoSelected", "I", True),
("CryptoCaidNDSSelected", "Nd", True),
("CryptoCaidConaxSelected", "Co", True),
("CryptoCaidCryptoWSelected", "Cw", True),
("CryptoCaidPowerVUSelected", "P", True),
("CryptoCaidBetaSelected", "B", True),
("CryptoCaidNagraSelected", "N", True),
("CryptoCaidBissSelected", "Bi", True),
("CryptoCaidDre3Selected", "D3", True),
("CryptoCaidDreSelected", "D", True),
("CryptoCaidBulCrypt1Selected", "B1", True),
("CryptoCaidBulCrypt2Selected", "B2", True),
("CryptoCaidTandbergSelected", "T", True)
)
self.type = self.type.split(',')
if self.type[0] == "User":
self.info_fields[self.type[0]] = tuple(self.type[1:])
self.type = self.type[0]
self.ecmdata = GetEcmInfo()
self.feraw = self.fedata = self.updateFEdata = None
self.recursionCheck = set()
self.cryptocolors = parameters.get("PliExtraInfoCryptoColors", (0x004C7D3F, 0x009F9F9F, 0x00EEEE00, 0x00FFFFFF))
def getCryptoInfo(self, info):
if info.getInfo(iServiceInformation.sIsCrypted) == 1:
data = self.ecmdata.getEcmData()
self.current_source = data[0]
self.current_caid = data[1]
self.current_provid = data[2]
self.current_ecmpid = data[3]
else:
self.current_source = ""
self.current_caid = "0"
self.current_provid = "0"
self.current_ecmpid = "0"
def createCryptoBar(self, info):
res = ""
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
colors = parameters.get("PliExtraInfoColors", (0x0000FF00, 0x00FFFF00, 0x007F7F7F, 0x00FFFFFF)) # "found", "not found", "available", "default" colors
for caid_entry in caid_data:
if int(caid_entry[0], 16) <= int(self.current_caid, 16) <= int(caid_entry[1], 16):
color = Hex2strColor(colors[0]) # green
else:
color = Hex2strColor(colors[2]) # grey
try:
for caid in available_caids:
if int(caid_entry[0], 16) <= caid <= int(caid_entry[1], 16):
color = Hex2strColor(colors[1]) # yellow
except:
pass
if color != Hex2strColor(colors[2]) or caid_entry[4]:
if res:
res += " "
res += color + caid_entry[3]
res += Hex2strColor(colors[3]) # white (this acts like a color "reset" for following strings
return res
def createCryptoSeca(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x100', 16) <= int(self.current_caid, 16) <= int('0x1ff', 16):
color = Hex2strColor(self.cryptocolors[0])
else:
color = Hex2strColor(self.cryptocolors[1])
try:
for caid in available_caids:
if int('0x100', 16) <= caid <= int('0x1ff', 16):
color = Hex2strColor(self.cryptocolors[2])
except:
pass
res = color + 'S'
res += Hex2strColor(self.cryptocolors[3])
return res
def createCryptoVia(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x500', 16) <= int(self.current_caid, 16) <= int('0x5ff', 16):
color = Hex2strColor(self.cryptocolors[0])
else:
color = Hex2strColor(self.cryptocolors[1])
try:
for caid in available_caids:
if int('0x500', 16) <= caid <= int('0x5ff', 16):
color = Hex2strColor(self.cryptocolors[2])
except:
pass
res = color + 'V'
res += Hex2strColor(self.cryptocolors[3])
return res
def createCryptoIrdeto(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x600', 16) <= int(self.current_caid, 16) <= int('0x6ff', 16):
color = Hex2strColor(self.cryptocolors[0])
else:
color = Hex2strColor(self.cryptocolors[1])
try:
for caid in available_caids:
if int('0x600', 16) <= caid <= int('0x6ff', 16):
color = Hex2strColor(self.cryptocolors[2])
except:
pass
res = color + 'I'
res += Hex2strColor(self.cryptocolors[3])
return res
def createCryptoNDS(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x900', 16) <= int(self.current_caid, 16) <= int('0x9ff', 16):
color = Hex2strColor(self.cryptocolors[0])
else:
color = Hex2strColor(self.cryptocolors[1])
try:
for caid in available_caids:
if int('0x900', 16) <= caid <= int('0x9ff', 16):
color = Hex2strColor(self.cryptocolors[2])
except:
pass
res = color + 'NDS'
res += Hex2strColor(self.cryptocolors[3])
return res
def createCryptoConax(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0xb00', 16) <= int(self.current_caid, 16) <= int('0xbff', 16):
color = Hex2strColor(self.cryptocolors[0])
else:
color = Hex2strColor(self.cryptocolors[1])
try:
for caid in available_caids:
if int('0xb00', 16) <= caid <= int('0xbff', 16):
color = Hex2strColor(self.cryptocolors[2])
except:
pass
res = color + 'CO'
res += Hex2strColor(self.cryptocolors[3])
return res
def createCryptoCryptoW(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0xd00', 16) <= int(self.current_caid, 16) <= int('0xdff', 16):
color = Hex2strColor(self.cryptocolors[0])
else:
color = Hex2strColor(self.cryptocolors[1])
try:
for caid in available_caids:
if int('0xd00', 16) <= caid <= int('0xdff', 16):
color = Hex2strColor(self.cryptocolors[2])
except:
pass
res = color + 'CW'
res += Hex2strColor(self.cryptocolors[3])
return res
def createCryptoPowerVU(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0xe00', 16) <= int(self.current_caid, 16) <= int('0xeff', 16):
color = Hex2strColor(self.cryptocolors[0])
else:
color = Hex2strColor(self.cryptocolors[1])
try:
for caid in available_caids:
if int('0xe00', 16) <= caid <= int('0xeff', 16):
color = Hex2strColor(self.cryptocolors[2])
except:
pass
res = color + 'P'
res += Hex2strColor(self.cryptocolors[3])
return res
def createCryptoTandberg(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x1010', 16) <= int(self.current_caid, 16) <= int('0x1010', 16):
color = Hex2strColor(self.cryptocolors[0])
else:
color = Hex2strColor(self.cryptocolors[1])
try:
for caid in available_caids:
if int('0x1010', 16) <= caid <= int('0x1010', 16):
color = Hex2strColor(self.cryptocolors[2])
except:
pass
res = color + 'T'
res += Hex2strColor(self.cryptocolors[3])
return res
def createCryptoBeta(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x1700', 16) <= int(self.current_caid, 16) <= int('0x17ff', 16):
color = Hex2strColor(self.cryptocolors[0])
else:
color = Hex2strColor(self.cryptocolors[1])
try:
for caid in available_caids:
if int('0x1700', 16) <= caid <= int('0x17ff', 16):
color = Hex2strColor(self.cryptocolors[2])
except:
pass
res = color + 'B'
res += Hex2strColor(self.cryptocolors[3])
return res
def createCryptoNagra(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x1800', 16) <= int(self.current_caid, 16) <= int('0x18ff', 16):
color = Hex2strColor(self.cryptocolors[0])
else:
color = Hex2strColor(self.cryptocolors[1])
try:
for caid in available_caids:
if int('0x1800', 16) <= caid <= int('0x18ff', 16):
color = Hex2strColor(self.cryptocolors[2])
except:
pass
res = color + 'N'
res += Hex2strColor(self.cryptocolors[3])
return res
def createCryptoBiss(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x2600', 16) <= int(self.current_caid, 16) <= int('0x26ff', 16):
color = Hex2strColor(self.cryptocolors[0])
else:
color = Hex2strColor(self.cryptocolors[1])
try:
for caid in available_caids:
if int('0x2600', 16) <= caid <= int('0x26ff', 16):
color = Hex2strColor(self.cryptocolors[2])
except:
pass
res = color + 'BI'
res += Hex2strColor(self.cryptocolors[3])
return res
def createCryptoDre(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x4ae0', 16) <= int(self.current_caid, 16) <= int('0x4ae1', 16):
color = Hex2strColor(self.cryptocolors[0])
else:
color = Hex2strColor(self.cryptocolors[1])
try:
for caid in available_caids:
if int('0x4ae0', 16) <= caid <= int('0x4ae1', 16):
color = Hex2strColor(self.cryptocolors[2])
except:
pass
res = color + 'DC'
res += Hex2strColor(self.cryptocolors[3])
return res
def createCryptoSpecial(self, info):
caid_name = "FTA"
try:
for caid_entry in caid_data:
if int(caid_entry[0], 16) <= int(self.current_caid, 16) <= int(caid_entry[1], 16):
caid_name = caid_entry[2]
break
return caid_name + ":%04x:%04x:%04x" % (int(self.current_caid, 16), int(self.current_provid, 16), info.getInfo(iServiceInformation.sSID))
except:
pass
return ""
def createCryptoNameCaid(self, info):
caid_name = "FTA"
if int(self.current_caid, 16) == 0:
return caid_name
try:
for caid_entry in self.caid_data:
if int(caid_entry[0], 16) <= int(self.current_caid, 16) <= int(caid_entry[1], 16):
caid_name = caid_entry[2]
break
return caid_name + ":%04x" % (int(self.current_caid, 16))
except:
pass
return ""
def createResolution(self, info):
video_height = 0
video_width = 0
video_pol = " "
video_rate = 0
if path.exists("/proc/stb/vmpeg/0/yres"):
f = open("/proc/stb/vmpeg/0/yres", "r")
try:
video_height = int(f.read(), 16)
except:
pass
f.close()
if path.exists("/proc/stb/vmpeg/0/xres"):
f = open("/proc/stb/vmpeg/0/xres", "r")
try:
video_width = int(f.read(), 16)
except:
pass
f.close()
if path.exists("/proc/stb/vmpeg/0/progressive"):
f = open("/proc/stb/vmpeg/0/progressive", "r")
try:
video_pol = "p" if int(f.read(), 16) else "i"
except:
pass
f.close()
if path.exists("/proc/stb/vmpeg/0/framerate"):
f = open("/proc/stb/vmpeg/0/framerate", "r")
try:
video_rate = int(f.read())
except:
pass
f.close()
fps = str((video_rate + 500) / 1000)
gamma = gamma_data.get(info.getInfo(iServiceInformation.sGamma), "")
return str(video_width) + "x" + str(video_height) + video_pol + fps + gamma
def createVideoCodec(self, info):
return codec_data.get(info.getInfo(iServiceInformation.sVideoType), _("N/A"))
def createServiceRef(self, info):
return info.getInfoString(iServiceInformation.sServiceref)
def createPIDInfo(self, info):
vpid = info.getInfo(iServiceInformation.sVideoPID)
apid = info.getInfo(iServiceInformation.sAudioPID)
pcrpid = info.getInfo(iServiceInformation.sPCRPID)
sidpid = info.getInfo(iServiceInformation.sSID)
tsid = info.getInfo(iServiceInformation.sTSID)
onid = info.getInfo(iServiceInformation.sONID)
if vpid < 0:
vpid = 0
if apid < 0:
apid = 0
if pcrpid < 0:
pcrpid = 0
if sidpid < 0:
sidpid = 0
if tsid < 0:
tsid = 0
if onid < 0:
onid = 0
return "%d-%d:%05d:%04d:%04d:%04d" % (onid, tsid, sidpid, vpid, apid, pcrpid)
def createInfoString(self, fieldGroup, fedata, feraw, info):
if fieldGroup in self.recursionCheck:
return _("?%s-recursive?") % fieldGroup
self.recursionCheck.add(fieldGroup)
fields = self.info_fields[fieldGroup]
if fields and isinstance(fields[0], (tuple, list)):
if fieldGroup == "TransponderInfo":
fields = fields[feraw and int("DVB-T" in feraw.get("tuner_type", "")) + 1 or 0]
else:
fields = fields[int(config.usage.show_cryptoinfo.value) > 0]
ret = ""
vals = []
for field in fields:
val = None
if field == "CryptoCurrentSource":
self.getCryptoInfo(info)
vals.append(self.current_source)
elif field == "StreamURLInfo":
val = self.createStreamURLInfo(info)
elif field == "TransponderModulationFEC":
val = self.createModulation(fedata) + '-' + self.createFEC(fedata, feraw)
elif field == "TransponderName":
val = self.createTransponderName(feraw)
elif field == "ProviderName":
val = self.createProviderName(info)
elif field in ("NewLine", "NL"):
ret += " ".join(vals) + "\n"
vals = []
else:
val = self.getTextByType(field)
if val:
vals.append(val)
return ret + " ".join(vals)
def createStreamURLInfo(self, info):
refstr = info.getInfoString(iServiceInformation.sServiceref)
if "%3a//" in refstr.lower():
return refstr.split(":")[10].replace("%3a", ":").replace("%3A", ":")
return ""
def createFrequency(self, fedata):
frequency = fedata.get("frequency")
if frequency:
return str(frequency)
return ""
def createChannelNumber(self, fedata, feraw):
return "DVB-T" in feraw.get("tuner_type", "") and fedata.get("channel") or ""
def createSymbolRate(self, fedata, feraw):
if "DVB-T" in feraw.get("tuner_type", ""):
bandwidth = fedata.get("bandwidth")
if bandwidth:
return bandwidth
else:
symbolrate = fedata.get("symbol_rate")
if symbolrate:
return str(symbolrate)
return ""
def createPolarization(self, fedata):
return fedata.get("polarization_abbreviation") or ""
def createFEC(self, fedata, feraw):
if "DVB-T" in feraw.get("tuner_type", ""):
code_rate_lp = fedata.get("code_rate_lp")
code_rate_hp = fedata.get("code_rate_hp")
guard_interval = fedata.get('guard_interval')
if code_rate_lp and code_rate_hp and guard_interval:
return code_rate_lp + "-" + code_rate_hp + "-" + guard_interval
else:
fec = fedata.get("fec_inner")
if fec:
return fec
return ""
def createModulation(self, fedata):
if fedata.get("tuner_type") == _("Terrestrial"):
constellation = fedata.get("constellation")
if constellation:
return constellation
else:
modulation = fedata.get("modulation")
if modulation:
return modulation
return ""
def createTunerType(self, feraw):
return feraw.get("tuner_type") or ""
def createTunerSystem(self, fedata):
return fedata.get("system") or ""
def createOrbPos(self, feraw):
orbpos = feraw.get("orbital_position")
if orbpos > 1800:
return str((float(3600 - orbpos)) / 10.0) + "\xc2\xb0 W"
elif orbpos > 0:
return str((float(orbpos)) / 10.0) + "\xc2\xb0 E"
return ""
def createOrbPosOrTunerSystem(self, fedata, feraw):
orbpos = self.createOrbPos(feraw)
if orbpos is not "":
return orbpos
return self.createTunerSystem(fedata)
def createTransponderName(self, feraw):
orbpos = feraw.get("orbital_position")
if orbpos is None: # Not satellite
return ""
freq = feraw.get("frequency")
if freq and freq < 10700000: # C-band
if orbpos > 1800:
orbpos += 1
else:
orbpos -= 1
sat_names = {
30: 'Rascom/Eutelsat 3E',
48: 'SES 5',
70: 'Eutelsat 7E',
90: 'Eutelsat 9E',
100: 'Eutelsat 10E',
130: 'Hot Bird',
160: 'Eutelsat 16E',
192: 'Astra 1KR/1L/1M/1N',
200: 'Arabsat 20E',
216: 'Eutelsat 21.5E',
235: 'Astra 3',
255: 'Eutelsat 25.5E',
260: 'Badr 4/5/6',
282: 'Astra 2E/2F/2G',
305: 'Arabsat 30.5E',
315: 'Astra 5',
330: 'Eutelsat 33E',
360: 'Eutelsat 36E',
380: 'Paksat',
390: 'Hellas Sat',
400: 'Express 40E',
420: 'Turksat',
450: 'Intelsat 45E',
480: 'Afghansat',
490: 'Yamal 49E',
530: 'Express 53E',
570: 'NSS 57E',
600: 'Intelsat 60E',
620: 'Intelsat 62E',
685: 'Intelsat 68.5E',
705: 'Eutelsat 70.5E',
720: 'Intelsat 72E',
750: 'ABS',
765: 'Apstar',
785: 'ThaiCom',
800: 'Express 80E',
830: 'Insat',
851: 'Intelsat/Horizons',
880: 'ST2',
900: 'Yamal 90E',
915: 'Mesat',
950: 'NSS/SES 95E',
1005: 'AsiaSat 100E',
1030: 'Express 103E',
1055: 'Asiasat 105E',
1082: 'NSS/SES 108E',
1100: 'BSat/NSAT',
1105: 'ChinaSat',
1130: 'KoreaSat',
1222: 'AsiaSat 122E',
1380: 'Telstar 18',
1440: 'SuperBird',
2310: 'Ciel',
2390: 'Echostar/Galaxy 121W',
2410: 'Echostar/DirectTV 119W',
2500: 'Echostar/DirectTV 110W',
2630: 'Galaxy 97W',
2690: 'NIMIQ 91W',
2780: 'NIMIQ 82W',
2830: 'Echostar/QuetzSat',
2880: 'AMC 72W',
2900: 'Star One',
2985: 'Echostar 61.5W',
2990: 'Amazonas',
3020: 'Intelsat 58W',
3045: 'Intelsat 55.5W',
3070: 'Intelsat 53W',
3100: 'Intelsat 50W',
3150: 'Intelsat 45W',
3169: 'Intelsat 43.1W',
3195: 'SES 40.5W',
3225: 'NSS/Telstar 37W',
3255: 'Intelsat 34.5W',
3285: 'Intelsat 31.5W',
3300: 'Hispasat',
3325: 'Intelsat 27.5W',
3355: 'Intelsat 24.5W',
3380: 'SES 22W',
3400: 'NSS 20W',
3420: 'Intelsat 18W',
3450: 'Telstar 15W',
3460: 'Express 14W',
3475: 'Eutelsat 12.5W',
3490: 'Express 11W',
3520: 'Eutelsat 8W',
3530: 'Nilesat/Eutelsat 7W',
3550: 'Eutelsat 5W',
3560: 'Amos',
3592: 'Thor/Intelsat'
}
if orbpos in sat_names:
return sat_names[orbpos]
elif orbpos > 1800:
return str((float(3600 - orbpos)) / 10.0) + "W"
else:
return str((float(orbpos)) / 10.0) + "E"
def createProviderName(self, info):
return info.getInfoString(iServiceInformation.sProvider)
def createMisPls(self, fedata):
tmp = ""
if fedata.get("is_id") > -1:
tmp = "MIS %d" % fedata.get("is_id")
if fedata.get("pls_code") > 0:
tmp = addspace(tmp) + "%s %d" % (fedata.get("pls_mode"), fedata.get("pls_code"))
if fedata.get("t2mi_plp_id") > -1:
tmp = addspace(tmp) + "T2MI %d PID %d" % (fedata.get("t2mi_plp_id"), fedata.get("t2mi_pid"))
return tmp
@cached
def getText(self):
self.recursionCheck.clear()
return self.getTextByType(self.type)
def getTextByType(self, textType):
service = self.source.service
if service is None:
return ""
info = service and service.info()
if not info:
return ""
if textType == "CryptoBar":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoBar(info)
else:
return ""
if textType == "CryptoSeca":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoSeca(info)
else:
return ""
if textType == "CryptoVia":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoVia(info)
else:
return ""
if textType == "CryptoIrdeto":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoIrdeto(info)
else:
return ""
if textType == "CryptoNDS":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoNDS(info)
else:
return ""
if textType == "CryptoConax":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoConax(info)
else:
return ""
if textType == "CryptoCryptoW":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoCryptoW(info)
else:
return ""
if textType == "CryptoBeta":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoBeta(info)
else:
return ""
if textType == "CryptoNagra":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoNagra(info)
else:
return ""
if textType == "CryptoBiss":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoBiss(info)
else:
return ""
if textType == "CryptoDre":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoDre(info)
else:
return ""
if textType == "CryptoTandberg":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoTandberg(info)
else:
return ""
if textType == "CryptoSpecial":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoSpecial(info)
else:
return ""
if textType == "CryptoNameCaid":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoNameCaid(info)
else:
return ""
if textType == "ResolutionString":
return self.createResolution(info)
if textType == "VideoCodec":
return self.createVideoCodec(info)
if self.updateFEdata:
feinfo = service.frontendInfo()
if feinfo:
self.feraw = feinfo.getAll(config.usage.infobar_frontend_source.value == "settings")
if self.feraw:
self.fedata = ConvertToHumanReadable(self.feraw)
feraw = self.feraw
if not feraw:
feraw = info.getInfoObject(iServiceInformation.sTransponderData)
fedata = ConvertToHumanReadable(feraw)
else:
fedata = self.fedata
if textType in self.info_fields:
return self.createInfoString(textType, fedata, feraw, info)
if textType == "PIDInfo":
return self.createPIDInfo(info)
if textType == "ServiceRef":
return self.createServiceRef(info)
if not feraw:
return ""
if textType == "TransponderFrequency":
return self.createFrequency(feraw)
if textType == "TransponderFrequencyMHz":
return self.createFrequency(fedata)
if textType == "TransponderSymbolRate":
return self.createSymbolRate(fedata, feraw)
if textType == "TransponderPolarization":
return self.createPolarization(fedata)
if textType == "TransponderFEC":
return self.createFEC(fedata, feraw)
if textType == "TransponderModulation":
return self.createModulation(fedata)
if textType == "OrbitalPosition":
return self.createOrbPos(feraw)
if textType == "TunerType":
return self.createTunerType(feraw)
if textType == "TunerSystem":
return self.createTunerSystem(fedata)
if self.type == "OrbitalPositionOrTunerSystem":
return self.createOrbPosOrTunerSystem(fedata, feraw)
if textType == "TerrestrialChannelNumber":
return self.createChannelNumber(fedata, feraw)
if textType == "TransponderInfoMisPls":
return self.createMisPls(fedata)
return _("?%s?") % textType
text = property(getText)
@cached
def getBool(self):
service = self.source.service
info = service and service.info()
if not info:
return False
request_caid = None
for x in self.ca_table:
if x[0] == self.type:
request_caid = x[1]
request_selected = x[2]
break
if request_caid is None:
return False
if info.getInfo(iServiceInformation.sIsCrypted) != 1:
return False
data = self.ecmdata.getEcmData()
if data is None:
return False
current_caid = data[1]
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
for caid_entry in caid_data:
if caid_entry[3] == request_caid:
if request_selected:
if int(caid_entry[0], 16) <= int(current_caid, 16) <= int(caid_entry[1], 16):
return True
else: # request available
try:
for caid in available_caids:
if int(caid_entry[0], 16) <= caid <= int(caid_entry[1], 16):
return True
except:
pass
return False
boolean = property(getBool)
def changed(self, what):
if what[0] == self.CHANGED_SPECIFIC:
self.updateFEdata = False
if what[1] == iPlayableService.evNewProgramInfo:
self.updateFEdata = True
if what[1] == iPlayableService.evEnd:
self.feraw = self.fedata = None
Converter.changed(self, what)
elif what[0] == self.CHANGED_POLL and self.updateFEdata is not None:
self.updateFEdata = False
Converter.changed(self, what)
|
TwolDE2/enigma2
|
lib/python/Components/Converter/PliExtraInfo.py
|
Python
|
gpl-2.0
| 28,072
|
[
"Galaxy"
] |
1e9f9d5b5dacdd41c4a27cbb50db096bfbbc4e5a0e998d15723b7b9d305288da
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Non-relativistic NMR shielding tensor
'''
import time
from functools import reduce
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.scf import _vhf
from pyscf.scf import ucphf
from pyscf.scf import _response_functions
from pyscf.ao2mo import _ao2mo
from pyscf.prop.nmr import rhf as rhf_nmr
def dia(nmrobj, gauge_orig=None, shielding_nuc=None, dm0=None):
if dm0 is None: dm0 = nmrobj._scf.make_rdm1()
if not (isinstance(dm0, numpy.ndarray) and dm0.ndim == 2):
dm0 = dm0[0] + dm0[1]
return rhf_nmr.dia(nmrobj, gauge_orig, shielding_nuc, dm0)
def para(nmrobj, mo10=None, mo_coeff=None, mo_occ=None,
shielding_nuc=None):
if mo_coeff is None: mo_coeff = nmrobj._scf.mo_coeff
if mo_occ is None: mo_occ = nmrobj._scf.mo_occ
if shielding_nuc is None: shielding_nuc = nmrobj.shielding_nuc
mol = nmrobj.mol
para_vir = numpy.empty((len(shielding_nuc),3,3))
para_occ = numpy.empty((len(shielding_nuc),3,3))
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
viridxa = mo_occ[0] == 0
viridxb = mo_occ[1] == 0
orboa = mo_coeff[0][:,occidxa]
orbob = mo_coeff[1][:,occidxb]
orbva = mo_coeff[0][:,viridxa]
orbvb = mo_coeff[1][:,viridxb]
nao = mo_coeff[0].shape[0]
dm10_oo = numpy.empty((3,nao,nao))
dm10_vo = numpy.empty((3,nao,nao))
for i in range(3):
dm10_oo[i] = reduce(numpy.dot, (orboa, mo10[0][i][occidxa], orboa.conj().T))
dm10_oo[i]+= reduce(numpy.dot, (orbob, mo10[1][i][occidxb], orbob.conj().T))
dm10_vo[i] = reduce(numpy.dot, (orbva, mo10[0][i][viridxa], orboa.conj().T))
dm10_vo[i]+= reduce(numpy.dot, (orbvb, mo10[1][i][viridxb], orbob.conj().T))
for n, atm_id in enumerate(shielding_nuc):
mol.set_rinv_origin(mol.atom_coord(atm_id))
h01 = mol.intor_asymmetric('int1e_prinvxp', 3)
para_occ[n] = numpy.einsum('xji,yij->xy', dm10_oo, h01) * 2
para_vir[n] = numpy.einsum('xji,yij->xy', dm10_vo, h01) * 2
msc_para = para_occ + para_vir
return msc_para, para_vir, para_occ
def make_h10(mol, dm0, gauge_orig=None, verbose=logger.WARN):
log = logger.new_logger(mol, verbose=verbose)
if gauge_orig is None:
# A10_i dot p + p dot A10_i consistents with <p^2 g>
# A10_j dot p + p dot A10_j consistents with <g p^2>
# A10_j dot p + p dot A10_j => i/2 (rjxp - pxrj) = irjxp
log.debug('First-order GIAO Fock matrix')
h1 = -.5 * mol.intor('int1e_giao_irjxp', 3) + make_h10giao(mol, dm0)
else:
with mol.with_common_origin(gauge_orig):
h1 = -.5 * mol.intor('int1e_cg_irxp', 3)
h1 = (h1, h1)
return h1
def make_h10giao(mol, dm0):
vj, vk = rhf_nmr.get_jk(mol, dm0)
h1 = vj[0] + vj[1] - vk
h1 -= mol.intor_asymmetric('int1e_ignuc', 3)
if mol.has_ecp():
h1 -= mol.intor_asymmetric('ECPscalar_ignuc', 3)
h1 -= mol.intor('int1e_igkin', 3)
return h1
def get_fock(nmrobj, dm0=None, gauge_orig=None):
r'''First order partial derivatives of Fock matrix wrt external magnetic
field. \frac{\partial F}{\partial B}
'''
if dm0 is None: dm0 = nmrobj._scf.make_rdm1()
if gauge_orig is None: gauge_orig = nmrobj.gauge_orig
log = logger.Logger(nmrobj.stdout, nmrobj.verbose)
h1 = make_h10(nmrobj.mol, dm0, gauge_orig, log)
if nmrobj.chkfile:
lib.chkfile.dump(nmrobj.chkfile, 'nmr/h1', h1)
return h1
def _solve_mo1_uncoupled(mo_energy, mo_occ, h1, s1):
'''uncoupled first order equation'''
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
viridxa = ~occidxa
viridxb = ~occidxb
nocca = numpy.count_nonzero(occidxa)
noccb = numpy.count_nonzero(occidxb)
nmoa, nmob = mo_occ[0].size, mo_occ[1].size
eai_a = mo_energy[0][viridxa,None] - mo_energy[0][occidxa]
eai_b = mo_energy[1][viridxb,None] - mo_energy[1][occidxb]
dim0 = len(s1[0])
s1_a = s1[0].reshape(dim0,nmoa,nocca)
s1_b = s1[1].reshape(dim0,nmob,noccb)
hs_a = mo1_a = h1[0].reshape(dim0,nmoa,nocca) - s1_a * mo_energy[0][occidxa]
hs_b = mo1_b = h1[1].reshape(dim0,nmob,noccb) - s1_b * mo_energy[1][occidxb]
mo_e1_a = hs_a[:,occidxa].copy()
mo_e1_b = hs_b[:,occidxb].copy()
mo1_a[:,viridxa]/= -eai_a
mo1_b[:,viridxb]/= -eai_b
mo1_a[:,occidxa] = -s1_a[:,occidxa] * .5
mo1_b[:,occidxb] = -s1_b[:,occidxb] * .5
mo_e1_a += mo1_a[:,occidxa] * (mo_energy[0][occidxa,None] - mo_energy[0][occidxa])
mo_e1_b += mo1_b[:,occidxb] * (mo_energy[1][occidxb,None] - mo_energy[1][occidxb])
return (mo1_a, mo1_b), (mo_e1_a, mo_e1_b)
def solve_mo1(nmrobj, mo_energy=None, mo_coeff=None, mo_occ=None,
h1=None, s1=None, with_cphf=None):
'''Solve the first order equation
Kwargs:
with_cphf : boolean or function(dm_mo) => v1_mo
If a boolean value is given, the value determines whether CPHF
equation will be solved or not. The induced potential will be
generated by the function gen_vind.
If a function is given, CPHF equation will be solved, and the
given function is used to compute induced potential
'''
cput1 = (time.clock(), time.time())
log = logger.Logger(nmrobj.stdout, nmrobj.verbose)
if mo_energy is None: mo_energy = nmrobj._scf.mo_energy
if mo_coeff is None: mo_coeff = nmrobj._scf.mo_coeff
if mo_occ is None: mo_occ = nmrobj._scf.mo_occ
if with_cphf is None: with_cphf = nmrobj.cphf
mol = nmrobj.mol
orboa = mo_coeff[0][:,mo_occ[0]>0]
orbob = mo_coeff[1][:,mo_occ[1]>0]
if h1 is None:
dm0 = nmrobj._scf.make_rdm1(mo_coeff, mo_occ)
h1 = nmrobj.get_fock(dm0)
h1 = (lib.einsum('xpq,pi,qj->xij', h1[0], mo_coeff[0].conj(), orboa),
lib.einsum('xpq,pi,qj->xij', h1[1], mo_coeff[1].conj(), orbob))
cput1 = log.timer('first order Fock matrix', *cput1)
if s1 is None:
s1 = nmrobj.get_ovlp(mol)
s1 = (lib.einsum('xpq,pi,qj->xij', s1, mo_coeff[0].conj(), orboa),
lib.einsum('xpq,pi,qj->xij', s1, mo_coeff[1].conj(), orbob))
if with_cphf:
if callable(with_cphf):
vind = with_cphf
else:
vind = gen_vind(nmrobj._scf, mo_coeff, mo_occ)
mo10, mo_e10 = ucphf.solve(vind, mo_energy, mo_occ, h1, s1,
nmrobj.max_cycle_cphf, nmrobj.conv_tol,
verbose=log)
else:
mo10, mo_e10 = _solve_mo1_uncoupled(mo_energy, mo_occ, h1, s1)
logger.timer(nmrobj, 'solving mo1 eqn', *cput1)
return mo10, mo_e10
def gen_vind(mf, mo_coeff, mo_occ):
'''Induced potential'''
vresp = mf.gen_response(hermi=2)
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
orboa = mo_coeff[0][:,occidxa]
orbob = mo_coeff[1][:,occidxb]
nocca = orboa.shape[1]
noccb = orbob.shape[1]
nao, nmo = mo_coeff[0].shape
nvira = nmo - nocca
def vind(mo1):
mo1a = mo1.reshape(3,-1)[:,:nocca*nmo].reshape(3,nmo,nocca)
mo1b = mo1.reshape(3,-1)[:,nocca*nmo:].reshape(3,nmo,noccb)
dm1a = [reduce(numpy.dot, (mo_coeff[0], x, orboa.T.conj())) for x in mo1a]
dm1b = [reduce(numpy.dot, (mo_coeff[1], x, orbob.T.conj())) for x in mo1b]
dm1 = numpy.asarray(([d1-d1.conj().T for d1 in dm1a],
[d1-d1.conj().T for d1 in dm1b]))
v1ao = vresp(dm1)
v1a = [reduce(numpy.dot, (mo_coeff[0].T.conj(), x, orboa)) for x in v1ao[0]]
v1b = [reduce(numpy.dot, (mo_coeff[1].T.conj(), x, orbob)) for x in v1ao[1]]
v1mo = numpy.hstack((numpy.asarray(v1a).reshape(3,-1),
numpy.asarray(v1b).reshape(3,-1)))
return v1mo.ravel()
return vind
class NMR(rhf_nmr.NMR):
def shielding(self, mo1=None):
if getattr(self._scf, 'spin_square', None):
s2 = self._scf.spin_square()[0]
if s2 > 1e-4:
logger.warn(self, '<S^2> = %s. UHF-NMR shielding may have large error.\n'
'paramagnetic NMR should include this result plus '
'g-tensor and HFC tensors.', s2)
return rhf_nmr.NMR.shielding(self, mo1)
dia = dia
para = para
get_fock = get_fock
solve_mo1 = solve_mo1
from pyscf import scf
scf.uhf.UHF.NMR = lib.class_as_method(NMR)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom.extend([
[1 , (0. , 0. , .917)],
['F' , (0. , 0. , 0.)], ])
mol.nucmod = {'F': 2} # gaussian nuclear model
mol.basis = {'H': '6-31g',
'F': '6-31g',}
mol.build()
mf = scf.UHF(mol).run()
nmr = mf.NMR()
nmr.cphf = True
#nmr.gauge_orig = (0,0,0)
msc = nmr.kernel() # _xx,_yy = 375.232839, _zz = 483.002139
print(lib.finger(msc) - -132.22895063293751)
nmr.cphf = True
nmr.gauge_orig = (1,1,1)
msc = nmr.shielding()
print(lib.finger(msc) - -108.48536089934709)
nmr.cphf = False
nmr.gauge_orig = None
msc = nmr.shielding()
print(lib.finger(msc) - -133.26526049655627)
mol.atom.extend([
[1 , (1. , 0.3, .417)],
[1 , (0.2, 1. , 0.)],])
mol.build()
mf = scf.UHF(mol).run()
nmr = NMR(mf)
nmr.cphf = False
nmr.gauge_orig = None
msc = nmr.shielding()
print(lib.finger(msc) - -123.98596361883168)
|
gkc1000/pyscf
|
pyscf/prop/nmr/uhf.py
|
Python
|
apache-2.0
| 10,174
|
[
"Gaussian",
"PySCF"
] |
2a75d21f70b8c343f10fcfd375a35bb9a4007fad071066ca2a7758e9645ff694
|
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Zhi-Hao Cui <zhcui0408@gmail.com>
#
"""
Restricted DFT+U with kpoint sampling.
Based on KRHF routine.
Refs: PRB, 1998, 57, 1505.
"""
import copy
import itertools as it
import numpy as np
import scipy.linalg as la
from functools import reduce
from pyscf import lib
from pyscf.lib import logger
from pyscf import __config__
from pyscf.pbc.dft import krks
from pyscf.data.nist import HARTREE2EV
from pyscf import lo
from pyscf.lo import iao
from pyscf.pbc import gto as pgto
def get_veff(ks, cell=None, dm=None, dm_last=0, vhf_last=0, hermi=1,
kpts=None, kpts_band=None):
"""
Coulomb + XC functional + Hubbard U terms.
.. note::
This is a replica of pyscf.dft.rks.get_veff with kpts added.
This function will change the ks object.
Args:
ks : an instance of :class:`RKS`
XC functional are controlled by ks.xc attribute. Attribute
ks.grids might be initialized.
dm : ndarray or list of ndarrays
A density matrix or a list of density matrices
Returns:
Veff : (nkpts, nao, nao) or (*, nkpts, nao, nao) ndarray
Veff = J + Vxc + V_U.
"""
if cell is None: cell = ks.cell
if dm is None: dm = ks.make_rdm1()
if kpts is None: kpts = ks.kpts
# J + V_xc
vxc = krks.get_veff(ks, cell=cell, dm=dm, dm_last=dm_last,
vhf_last=vhf_last, hermi=hermi, kpts=kpts,
kpts_band=kpts_band)
# V_U
C_ao_lo = ks.C_ao_lo
ovlp = ks.get_ovlp()
nkpts = len(kpts)
nlo = C_ao_lo.shape[-1]
rdm1_lo = np.zeros((nkpts, nlo, nlo), dtype=np.complex128)
for k in range(nkpts):
C_inv = np.dot(C_ao_lo[k].conj().T, ovlp[k])
rdm1_lo[k] = mdot(C_inv, dm[k], C_inv.conj().T)
E_U = 0.0
weight = 1.0 / nkpts
logger.info(ks, "-" * 79)
with np.printoptions(precision=5, suppress=True, linewidth=1000):
for idx, val, lab in zip(ks.U_idx, ks.U_val, ks.U_lab):
lab_string = " "
for l in lab:
lab_string += "%9s" %(l.split()[-1])
lab_sp = lab[0].split()
logger.info(ks, "local rdm1 of atom %s: ",
" ".join(lab_sp[:2]) + " " + lab_sp[2][:2])
U_mesh = np.ix_(idx, idx)
P_loc = 0.0
for k in range(nkpts):
S_k = ovlp[k]
C_k = C_ao_lo[k][:, idx]
P_k = rdm1_lo[k][U_mesh]
SC = np.dot(S_k, C_k)
vxc[k] += mdot(SC, (np.eye(P_k.shape[-1]) - P_k)
* (val * 0.5), SC.conj().T).astype(vxc[k].dtype,copy=False)
E_U += (val * 0.5) * (P_k.trace() - np.dot(P_k, P_k).trace() * 0.5)
P_loc += P_k
P_loc = P_loc.real / nkpts
logger.info(ks, "%s\n%s", lab_string, P_loc)
logger.info(ks, "-" * 79)
E_U *= weight
if E_U.real < 0.0 and all(np.asarray(ks.U_val) > 0):
logger.warn(ks, "E_U (%s) is negative...", E_U.real)
vxc = lib.tag_array(vxc, E_U=E_U)
return vxc
def energy_elec(ks, dm_kpts=None, h1e_kpts=None, vhf=None):
"""
Electronic energy for KRKSpU.
"""
if h1e_kpts is None: h1e_kpts = ks.get_hcore(ks.cell, ks.kpts)
if dm_kpts is None: dm_kpts = ks.make_rdm1()
if vhf is None or getattr(vhf, 'ecoul', None) is None:
vhf = ks.get_veff(ks.cell, dm_kpts)
weight = 1.0 / len(h1e_kpts)
e1 = weight * np.einsum('kij,kji', h1e_kpts, dm_kpts)
tot_e = e1 + vhf.ecoul + vhf.exc + vhf.E_U
ks.scf_summary['e1'] = e1.real
ks.scf_summary['coul'] = vhf.ecoul.real
ks.scf_summary['exc'] = vhf.exc.real
ks.scf_summary['E_U'] = vhf.E_U.real
logger.debug(ks, 'E1 = %s Ecoul = %s Exc = %s EU = %s', e1, vhf.ecoul,
vhf.exc, vhf.E_U)
return tot_e.real, vhf.ecoul + vhf.exc + vhf.E_U
def set_U(ks, U_idx, U_val):
"""
Regularize the U_idx and U_val to each atom,
and set ks.U_idx, ks.U_val, ks.U_lab.
"""
assert len(U_idx) == len(U_val)
ks.U_val = []
ks.U_idx = []
ks.U_lab = []
lo_labels = np.asarray(ks.cell.ao_labels())
for i, idx in enumerate(U_idx):
if isinstance(idx, str):
lab_idx = ks.cell.search_ao_label(idx)
labs = lo_labels[lab_idx]
labs = zip(lab_idx, labs)
for j, idxj in it.groupby(labs, key=lambda x: x[1].split()[0]):
ks.U_idx.append(list(list(zip(*idxj))[0]))
ks.U_val.append(U_val[i])
else:
ks.U_idx.append(copy.deepcopy(idx))
ks.U_val.append(U_val[i])
ks.U_val = np.asarray(ks.U_val) / HARTREE2EV
logger.info(ks, "-" * 79)
logger.debug(ks, 'U indices and values: ')
for idx, val in zip(ks.U_idx, ks.U_val):
ks.U_lab.append(lo_labels[idx])
logger.debug(ks, '%6s [%.6g eV] ==> %-100s', format_idx(idx),
val * HARTREE2EV, "".join(lo_labels[idx]))
logger.info(ks, "-" * 79)
def make_minao_lo(ks, minao_ref):
"""
Construct minao local orbitals.
"""
cell = ks.cell
nao = cell.nao_nr()
kpts = ks.kpts
nkpts = len(kpts)
ovlp = ks.get_ovlp()
C_ao_minao, labels = proj_ref_ao(cell, minao=minao_ref, kpts=kpts,
return_labels=True)
for k in range(nkpts):
C_ao_minao[k] = lo.vec_lowdin(C_ao_minao[k], ovlp[k])
labels = np.asarray(labels)
C_ao_lo = np.zeros((nkpts, nao, nao), dtype=np.complex128)
for idx, lab in zip(ks.U_idx, ks.U_lab):
idx_minao = [i for i, l in enumerate(labels) if l in lab]
assert len(idx_minao) == len(idx)
C_ao_sub = C_ao_minao[:, :, idx_minao]
C_ao_lo[:, :, idx] = C_ao_sub
return C_ao_lo
def proj_ref_ao(mol, minao='minao', kpts=None, return_labels=False):
"""
Get a set of reference AO spanned by the calculation basis.
Not orthogonalized.
Args:
return_labels: if True, return the labels as well.
"""
nkpts = len(kpts)
pmol = iao.reference_mol(mol, minao)
s1 = np.asarray(mol.pbc_intor('int1e_ovlp', hermi=1, kpts=kpts))
s2 = np.asarray(pmol.pbc_intor('int1e_ovlp', hermi=1, kpts=kpts))
s12 = np.asarray(pgto.cell.intor_cross('int1e_ovlp', mol, pmol, kpts=kpts))
#s21 = np.swapaxes(s12, -1, -2).conj()
C_ao_lo = np.zeros((nkpts, s1.shape[-1], s2.shape[-1]), dtype=np.complex128)
for k in range(nkpts):
s1cd_k = la.cho_factor(s1[k])
#s2cd_k = la.cho_factor(s2[k])
C_ao_lo[k] = la.cho_solve(s1cd_k, s12[k])
if return_labels:
labels = pmol.ao_labels()
return C_ao_lo, labels
else:
return C_ao_lo
def mdot(*args):
return reduce(np.dot, args)
def format_idx(idx_list):
string = ''
for k, g in it.groupby(enumerate(idx_list), lambda ix: ix[0] - ix[1]):
g = list(g)
if len(g) > 1:
string += '%d-%d, '%(g[0][1], g[-1][1])
else:
string += '%d, '%(g[0][1])
return string[:-2]
class KRKSpU(krks.KRKS):
"""
RKSpU class adapted for PBCs with k-point sampling.
"""
def __init__(self, cell, kpts=np.zeros((1,3)), xc='LDA,VWN',
exxdiv=getattr(__config__, 'pbc_scf_SCF_exxdiv', 'ewald'),
U_idx=[], U_val=[], C_ao_lo='minao', **kwargs):
"""
DFT+U args:
U_idx: can be
list of list: each sublist is a set of LO indices to add U.
list of string: each string is one kind of LO orbitals,
e.g. ['Ni 3d', '1 O 2pz'], in this case,
LO should be aranged as ao_labels order.
or a combination of these two.
U_val: a list of effective U [in eV], i.e. U-J in Dudarev's DFT+U.
each U corresponds to one kind of LO orbitals, should have
the same length as U_idx.
C_ao_lo: LO coefficients, can be
np.array, shape ((spin,), nkpts, nao, nlo),
string, in 'minao'.
Kwargs:
minao_ref: reference for minao orbitals, default is 'MINAO'.
"""
try:
krks.KRKS.__init__(self, cell, kpts, xc=xc, exxdiv=exxdiv)
except TypeError:
# backward compatibility
krks.KRKS.__init__(self, cell, kpts)
self.xc = xc
self.exxdiv = exxdiv
set_U(self, U_idx, U_val)
if isinstance(C_ao_lo, str):
if C_ao_lo == 'minao':
minao_ref = kwargs.get("minao_ref", "MINAO")
self.C_ao_lo = make_minao_lo(self, minao_ref)
else:
raise NotImplementedError
else:
self.C_ao_lo = np.asarray(C_ao_lo)
if self.C_ao_lo.ndim == 4:
self.C_ao_lo = self.C_ao_lo[0]
self._keys = self._keys.union(["U_idx", "U_val", "C_ao_lo", "U_lab"])
get_veff = get_veff
energy_elec = energy_elec
def nuc_grad_method(self):
raise NotImplementedError
if __name__ == '__main__':
from pyscf.pbc import gto
np.set_printoptions(3, linewidth=1000, suppress=True)
cell = gto.Cell()
cell.unit = 'A'
cell.atom = 'C 0., 0., 0.; C 0.8917, 0.8917, 0.8917'
cell.a = '''0. 1.7834 1.7834
1.7834 0. 1.7834
1.7834 1.7834 0. '''
cell.basis = 'gth-dzvp'
cell.pseudo = 'gth-pade'
cell.verbose = 7
cell.build()
kmesh = [2, 1, 1]
kpts = cell.make_kpts(kmesh, wrap_around=True)
#U_idx = ["2p", "2s"]
#U_val = [5.0, 2.0]
U_idx = ["1 C 2p"]
U_val = [5.0]
mf = KRKSpU(cell, kpts, U_idx=U_idx, U_val=U_val, C_ao_lo='minao',
minao_ref='gth-szv')
mf.conv_tol = 1e-10
print (mf.U_idx)
print (mf.U_val)
print (mf.C_ao_lo.shape)
print (mf.kernel())
|
sunqm/pyscf
|
pyscf/pbc/dft/krkspu.py
|
Python
|
apache-2.0
| 10,564
|
[
"PySCF"
] |
87cc6a163d8e1813fc563fc8e49a2bfa1ef7f57b178913aef3b673dbf6a6c411
|
"""The core Python FlickrAPI module.
This module contains most of the FlickrAPI code. It is well tested and
documented.
"""
from __future__ import print_function
import logging
import six
import functools
from . import tokencache, auth
from flickrapi.xmlnode import XMLNode
from flickrapi.exceptions import *
from flickrapi.cache import SimpleCache
from flickrapi.call_builder import CallBuilder
LOG = logging.getLogger(__name__)
def make_bytes(dictionary):
"""Encodes all Unicode strings in the dictionary to UTF-8 bytes. Converts
all other objects to regular bytes.
Returns a copy of the dictionary, doesn't touch the original.
"""
result = {}
for (key, value) in six.iteritems(dictionary):
# Keep binary data as-is.
if isinstance(value, six.binary_type):
result[key] = value
continue
# If it's not a string, convert it to one.
if not isinstance(value, six.text_type):
value = six.text_type(value)
result[key] = value.encode('utf-8')
return result
def debug(method):
"""Method decorator for debugging method calls.
Using this automatically sets the log level to DEBUG.
"""
def debugged(*args, **kwargs):
LOG.debug("Call: %s(%s, %s)" % (method.__name__, args,
kwargs))
result = method(*args, **kwargs)
LOG.debug("\tResult: %s" % result)
return result
return debugged
# REST parsers, {format: (parser_method, request format), ...}. Fill by using the
# @rest_parser(format) function decorator
rest_parsers = {}
def rest_parser(parsed_format, request_format='rest'):
"""Method decorator, use this to mark a function as the parser for
REST as returned by Flickr.
"""
def decorate_parser(method):
rest_parsers[parsed_format] = (method, request_format)
return method
return decorate_parser
def require_format(required_format):
"""Method decorator, raises a ValueError when the decorated method
is called if the default format is not set to ``required_format``.
"""
def decorator(method):
@functools.wraps(method)
def decorated(self, *args, **kwargs):
# If everything is okay, call the method
if self.default_format == required_format:
return method(self, *args, **kwargs)
# Otherwise raise an exception
msg = 'Function %s requires that you use ' \
'ElementTree ("etree") as the communication format, ' \
'while the current format is set to "%s".'
raise ValueError(msg % (method.__name__, self.default_format))
return decorated
return decorator
def authenticator(method):
"""Method wrapper, assumed the wrapped method has a 'perms' parameter.
Only calls the wrapped method if the token cache doesn't contain a valid token.
"""
@functools.wraps(method)
def decorated(self, *args, **kwargs):
assert isinstance(self, FlickrAPI)
if 'perms' in kwargs:
perms = kwargs['perms']
elif len(args):
perms = args[0]
else:
perms = 'read'
if self.token_valid(perms=perms):
# Token is valid, and for the expected permissions, so no
# need to continue authentication.
return
method(self, *args, **kwargs)
return decorated
class FlickrAPI(object):
"""Encapsulates Flickr functionality.
Example usage::
flickr = flickrapi.FlickrAPI(api_key)
photos = flickr.photos_search(user_id='73509078@N00', per_page='10')
sets = flickr.photosets_getList(user_id='73509078@N00')
"""
REST_URL = 'https://api.flickr.com/services/rest/'
UPLOAD_URL = 'https://up.flickr.com/services/upload/'
REPLACE_URL = 'https://up.flickr.com/services/replace/'
def __init__(self, api_key, secret, username=None,
token=None, format='etree', store_token=True,
cache=False, token_cache_location=None,
timeout=None):
"""Construct a new FlickrAPI instance for a given API key
and secret.
api_key
The API key as obtained from Flickr.
secret
The secret belonging to the API key.
username
Used to identify the appropriate authentication token for a
certain user.
token
If you already have an authentication token, you can give
it here. It won't be stored on disk by the FlickrAPI instance.
format
The response format. Use either "xmlnode" or "etree" to get a parsed
response, or use any response format supported by Flickr to get an
unparsed response from method calls. It's also possible to pass the
``format`` parameter on individual calls.
store_token
Disables the on-disk token cache if set to False (default is True).
Use this to ensure that tokens aren't read nor written to disk, for
example in web applications that store tokens in cookies.
cache
Enables in-memory caching of FlickrAPI calls - set to ``True`` to
use. If you don't want to use the default settings, you can
instantiate a cache yourself too:
>>> f = FlickrAPI(u'123', u'123')
>>> f.cache = SimpleCache(timeout=5, max_entries=100)
token_cache_location
If not None, determines where the authentication tokens are stored.
timeout
Optional request timeout as float in seconds.
"""
self.default_format = format
self._handler_cache = {}
if isinstance(api_key, six.binary_type):
api_key = api_key.decode('ascii')
if isinstance(secret, six.binary_type):
secret = secret.decode('ascii')
if token:
assert isinstance(token, auth.FlickrAccessToken)
# Use a memory-only token cache
self.token_cache = tokencache.SimpleTokenCache()
self.token_cache.token = token
elif not store_token:
# Use an empty memory-only token cache
self.token_cache = tokencache.SimpleTokenCache()
else:
# Use a real token cache
self.token_cache = tokencache.OAuthTokenCache(api_key, username or '',
path=token_cache_location)
self.flickr_oauth = auth.OAuthFlickrInterface(api_key, secret, self.token_cache,
default_timeout=timeout)
if cache:
self.cache = SimpleCache()
else:
self.cache = None
def __repr__(self):
"""Returns a string representation of this object."""
return '[FlickrAPI for key "%s"]' % self.flickr_oauth.key
__str__ = __repr__
def trait_names(self):
"""Returns a list of method names as supported by the Flickr
API. Used for tab completion in IPython.
"""
try:
rsp = self.reflection_getMethods(format='etree')
except FlickrError:
return None
return [m.text[7:] for m in rsp.getiterator('method')]
@rest_parser('xmlnode')
def parse_xmlnode(self, rest_xml):
"""Parses a REST XML response from Flickr into an XMLNode object."""
rsp = XMLNode.parse(rest_xml, store_xml=True)
if rsp['stat'] == 'ok':
return rsp
err = rsp.err[0]
raise FlickrError(six.u('Error: %(code)s: %(msg)s') % err, code=err['code'])
@rest_parser('parsed-json', 'json')
def parse_json(self, json_string):
"""Parses a JSON response from Flickr."""
if isinstance(json_string, six.binary_type):
json_string = json_string.decode('utf-8')
import json
parsed = json.loads(json_string)
if parsed.get('stat', '') == 'fail':
raise FlickrError(six.u('Error: %(code)s: %(message)s') % parsed,
code=parsed['code'])
return parsed
@rest_parser('etree')
def parse_etree(self, rest_xml):
"""Parses a REST XML response from Flickr into an ElementTree object."""
try:
from lxml import etree as ElementTree
LOG.info('REST Parser: using lxml.etree')
except ImportError:
try:
import xml.etree.cElementTree as ElementTree
LOG.info('REST Parser: using xml.etree.cElementTree')
except ImportError:
try:
import xml.etree.ElementTree as ElementTree
LOG.info('REST Parser: using xml.etree.ElementTree')
except ImportError:
try:
import elementtree.cElementTree as ElementTree
LOG.info('REST Parser: elementtree.cElementTree')
except ImportError:
try:
import elementtree.ElementTree as ElementTree
except ImportError:
raise ImportError("You need to install "
"ElementTree to use the etree format")
rsp = ElementTree.fromstring(rest_xml)
if rsp.attrib['stat'] == 'ok':
return rsp
err = rsp.find('err')
code = err.attrib.get('code', None)
raise FlickrError(six.u('Error: %(code)s: %(msg)s') % err.attrib, code=code)
def __getattr__(self, method_name):
"""Returns a CallBuilder for the given method name."""
# Refuse to do anything with special methods
if method_name.startswith('_'):
raise AttributeError(method_name)
# Compatibility with old way of calling, i.e. flickrobj.photos_getInfo(...)
if '_' in method_name:
method_name = method_name.replace('_', '.')
return CallBuilder(self, method_name='flickr.' + method_name)
def do_flickr_call(self, method_name, timeout=None, **kwargs):
"""Handle all the regular Flickr API calls.
Example::
etree = flickr.photos.getInfo(photo_id='1234')
etree = flickr.photos.getInfo(photo_id='1234', format='etree')
xmlnode = flickr.photos.getInfo(photo_id='1234', format='xmlnode')
json = flickr.photos.getInfo(photo_id='1234', format='json')
"""
params = kwargs.copy()
# Set some defaults
defaults = {'method': method_name,
'format': self.default_format}
if 'jsoncallback' not in kwargs:
defaults['nojsoncallback'] = 1
params = self._supply_defaults(params, defaults)
LOG.info('Calling %s', defaults)
return self._wrap_in_parser(self._flickr_call,
parse_format=params['format'],
timeout=timeout,
**params)
def _supply_defaults(self, args, defaults):
"""Returns a new dictionary containing ``args``, augmented with defaults
from ``defaults``.
Defaults can be overridden, or completely removed by setting the
appropriate value in ``args`` to ``None``.
"""
result = args.copy()
for key, default_value in six.iteritems(defaults):
# Set the default if the parameter wasn't passed
if key not in args:
result[key] = default_value
for key, value in six.iteritems(result.copy()):
# You are able to remove a default by assigning None, and we can't
# pass None to Flickr anyway.
if value is None:
del result[key]
return result
def _flickr_call(self, timeout=None, **kwargs):
"""Performs a Flickr API call with the given arguments. The method name
itself should be passed as the 'method' parameter.
Returns the unparsed data from Flickr::
data = self._flickr_call(method='flickr.photos.getInfo',
photo_id='123', format='rest')
"""
LOG.debug("Calling %s" % kwargs)
# Return value from cache if available
if self.cache and self.cache.get(kwargs):
return self.cache.get(kwargs)
reply = self.flickr_oauth.do_request(self.REST_URL, kwargs, timeout=timeout)
# Store in cache, if we have one
if self.cache is not None:
self.cache.set(kwargs, reply)
return reply
def _wrap_in_parser(self, wrapped_method, parse_format, *args, **kwargs):
"""Wraps a method call in a parser.
The parser will be looked up by the ``parse_format`` specifier. If there
is a parser and ``kwargs['format']`` is set, it's set to ``rest``, and
the response of the method is parsed before it's returned.
"""
# Find the parser, and set the format to rest if we're supposed to
# parse it.
if parse_format in rest_parsers and 'format' in kwargs:
kwargs['format'] = rest_parsers[parse_format][1]
LOG.debug('Wrapping call %s(self, %s, %s)' % (wrapped_method, args, kwargs))
data = wrapped_method(*args, **kwargs)
# Just return if we have no parser
if parse_format not in rest_parsers:
return data
# Return the parsed data
parser = rest_parsers[parse_format][0]
return parser(self, data)
def _extract_upload_response_format(self, kwargs):
"""Returns the response format given in kwargs['format'], or
the default format if there is no such key.
If kwargs contains 'format', it is removed from kwargs.
If the format isn't compatible with Flickr's upload response
type, a FlickrError exception is raised.
"""
# Figure out the response format
response_format = kwargs.get('format', self.default_format)
if response_format not in rest_parsers and response_format != 'rest':
raise FlickrError('Format %s not supported for uploading '
'photos' % response_format)
# The format shouldn't be used in the request to Flickr.
if 'format' in kwargs:
del kwargs['format']
return response_format
def upload(self, filename, fileobj=None, timeout=None, **kwargs):
"""Upload a file to flickr.
Be extra careful you spell the parameters correctly, or you will
get a rather cryptic "Invalid Signature" error on the upload!
Supported parameters:
filename
name of a file to upload
fileobj
an optional file-like object from which the data can be read
title
title of the photo
description
description a.k.a. caption of the photo
tags
space-delimited list of tags, ``'''tag1 tag2 "long tag"'''``
is_public
"1" or "0" for a public resp. private photo
is_friend
"1" or "0" whether friends can see the photo while it's
marked as private
is_family
"1" or "0" whether family can see the photo while it's
marked as private
content_type
Set to "1" for Photo, "2" for Screenshot, or "3" for Other.
hidden
Set to "1" to keep the photo in global search results, "2"
to hide from public searches.
format
The response format. You can only choose between the
parsed responses or 'rest' for plain REST.
timeout
Optional timeout for the HTTP request, as float in seconds.
The ``fileobj`` parameter can be used to monitor progress via
a callback method. For example::
class FileWithCallback(object):
def __init__(self, filename, callback):
self.file = open(filename, 'rb')
self.callback = callback
# the following attributes and methods are required
self.len = os.path.getsize(path)
self.fileno = self.file.fileno
self.tell = self.file.tell
def read(self, size):
if self.callback:
self.callback(self.tell() * 100 // self.len)
return self.file.read(size)
fileobj = FileWithCallback(filename, callback)
rsp = flickr.upload(filename, fileobj, parameters)
The callback method takes one parameter:
``def callback(progress)``
Progress is a number between 0 and 100.
"""
return self._upload_to_form(self.UPLOAD_URL, filename, fileobj, timeout=timeout, **kwargs)
def replace(self, filename, photo_id, fileobj=None, timeout=None, **kwargs):
"""Replace an existing photo.
Supported parameters:
filename
name of a file to upload
fileobj
an optional file-like object from which the data can be read
photo_id
the ID of the photo to replace
format
The response format. You can only choose between the
parsed responses or 'rest' for plain REST. Defaults to the
format passed to the constructor.
timeout
Optional timeout for the HTTP request, as float in seconds.
"""
if not photo_id:
raise IllegalArgumentException("photo_id must be specified")
kwargs['photo_id'] = photo_id
return self._upload_to_form(self.REPLACE_URL, filename, fileobj, timeout=timeout, **kwargs)
def _upload_to_form(self, form_url, filename, fileobj=None, timeout=None, **kwargs):
"""Uploads a photo - can be used to either upload a new photo
or replace an existing one.
form_url must be either ``FlickrAPI.flickr_replace_form`` or
``FlickrAPI.flickr_upload_form``.
"""
if not filename:
raise IllegalArgumentException("filename must be specified")
if not self.token_cache.token:
raise IllegalArgumentException("Authentication is required")
kwargs['api_key'] = self.flickr_oauth.key
# Figure out the response format
response_format = self._extract_upload_response_format(kwargs)
# Convert to UTF-8 if an argument is an Unicode string
kwargs = make_bytes(kwargs)
return self._wrap_in_parser(self.flickr_oauth.do_upload, response_format,
filename, form_url, kwargs, fileobj, timeout=timeout)
def token_valid(self, perms=u'read'):
"""Verifies the cached token with Flickr.
If the token turns out to be invalid, or with permissions lower than required,
the token is erased from the token cache.
@return: True if the token is valid for the requested parameters, False otherwise.
"""
token = self.token_cache.token
if not token:
return False
# Check token for validity
self.flickr_oauth.token = token
try:
resp = self.auth.oauth.checkToken(format='etree')
token_perms = resp.findtext('oauth/perms')
if token_perms == token.access_level and token.has_level(perms):
# Token is valid, and for the expected permissions.
return True
except FlickrError:
# There was an error talking to Flickr, we assume this is due
# to an invalid token.
pass
# Token was for other permissions, so erase it as it is
# not usable for this request.
self.flickr_oauth.token = None
del self.token_cache.token
return False
@authenticator
def authenticate_console(self, perms=u'read'):
"""Performs the authentication/authorization, assuming a console program.
Shows the URL the user should visit on stdout, then waits for the user to authorize
the program.
"""
if isinstance(perms, six.binary_type):
perms = six.u(perms)
self.flickr_oauth.get_request_token()
self.flickr_oauth.auth_via_console(perms=perms)
token = self.flickr_oauth.get_access_token()
self.token_cache.token = token
@authenticator
def authenticate_via_browser(self, perms=u'read'):
"""Performs the authentication/authorization, assuming a console program.
Starts the browser and waits for the user to authorize the app before continuing.
"""
if isinstance(perms, six.binary_type):
perms = six.u(perms)
self.flickr_oauth.get_request_token()
self.flickr_oauth.auth_via_browser(perms=perms)
token = self.flickr_oauth.get_access_token()
self.token_cache.token = token
@authenticator
def authenticate_for_test(self, perms=u'read'):
"""Skips a bit of the authentication/authorization, for unit tests.
"""
if isinstance(perms, six.binary_type):
perms = six.u(perms)
self.flickr_oauth.get_request_token()
self.flickr_oauth.auth_for_test(perms=perms)
token = self.flickr_oauth.get_access_token()
self.token_cache.token = token
def get_request_token(self, oauth_callback=None):
"""Requests a new request token.
Updates this OAuthFlickrInterface object to use the request token on the following
authentication calls.
@param oauth_callback: the URL the user is sent to after granting the token access.
If the callback is None, a local web server is started on a random port, and the
callback will be http://localhost:randomport/
If you do not have a web-app and you also do not want to start a local web server,
pass oauth_callback='oob' and have your application accept the verifier from the
user instead.
"""
self.flickr_oauth.get_request_token(oauth_callback=oauth_callback)
def auth_url(self, perms=u'read'):
"""Returns the URL the user should visit to authenticate the given oauth Token.
Use this method in webapps, where you can redirect the user to the returned URL.
After authorization by the user, the browser is redirected to the callback URL,
which will contain the OAuth verifier. Set the 'verifier' property on this object
in order to use it.
In stand-alone apps, authenticate_via_browser(...) may be easier instead.
"""
return self.flickr_oauth.auth_url(perms=perms)
def get_access_token(self, verifier=None):
"""Exchanges the request token for an access token.
Also stores the access token for easy authentication of subsequent calls.
@param verifier: the verifier code, in case you used out-of-band communication
of the verifier code.
"""
if verifier is not None:
self.flickr_oauth.verifier = verifier
self.token_cache.token = self.flickr_oauth.get_access_token()
@require_format('etree')
def data_walker(self, method, searchstring='*/photo', **params):
"""Calls 'method' with page=0, page=1 etc. until the total
number of pages has been visited. Yields the photos
returned.
Assumes that ``method(page=n, **params).findall(searchstring)``
results in a list of interesting elements (defaulting to photos),
and that the toplevel element of the result contains a 'pages'
attribute with the total number of pages.
"""
page = 1
total = 1 # We don't know that yet, update when needed
while page <= total:
# Fetch a single page of photos
LOG.debug('Calling %s(page=%i of %i, %s)' %
(method.__name__, page, total, params))
rsp = method(page=page, **params)
photoset = rsp.getchildren()[0]
total = int(photoset.get('pages'))
photos = rsp.findall(searchstring)
# Yield each photo
for photo in photos:
yield photo
# Ready to get the next page
page += 1
@require_format('etree')
def walk_contacts(self, per_page=50, **kwargs):
"""walk_contacts(self, per_page=50, ...) -> \
generator, yields each contact of the calling user.
:Parameters:
per_page
the number of contacts that are fetched in one call to
Flickr.
Other arguments can be passed, as documented in the
flickr.contacts.getList_ API call in the Flickr API
documentation, except for ``page`` because all pages will be
returned eventually.
.. _flickr.contacts.getList:
http://www.flickr.com/services/api/flickr.contacts.getList.html
Uses the ElementTree format, incompatible with other formats.
"""
return self.data_walker(self.contacts_getList, searchstring='*/contact',
per_page=per_page, **kwargs)
@require_format('etree')
def walk_photosets(self, per_page=50, **kwargs):
"""walk_photosets(self, per_page=50, ...) -> \
generator, yields each photoset belonging to a user.
:Parameters:
per_page
the number of photosets that are fetched in one call to
Flickr.
Other arguments can be passed, as documented in the
flickr.photosets.getList_ API call in the Flickr API
documentation, except for ``page`` because all pages will be
returned eventually.
.. _flickr.photosets.getList:
http://www.flickr.com/services/api/flickr.photosets.getList.html
Uses the ElementTree format, incompatible with other formats.
"""
return self.data_walker(self.photosets_getList, searchstring='*/photoset',
per_page=per_page, **kwargs)
@require_format('etree')
def walk_set(self, photoset_id, per_page=50, **kwargs):
"""walk_set(self, photoset_id, per_page=50, ...) -> \
generator, yields each photo in a single set.
:Parameters:
photoset_id
the photoset ID
per_page
the number of photos that are fetched in one call to
Flickr.
Other arguments can be passed, as documented in the
flickr.photosets.getPhotos_ API call in the Flickr API
documentation, except for ``page`` because all pages will be
returned eventually.
.. _flickr.photosets.getPhotos:
http://www.flickr.com/services/api/flickr.photosets.getPhotos.html
Uses the ElementTree format, incompatible with other formats.
"""
return self.data_walker(self.photosets_getPhotos,
photoset_id=photoset_id, per_page=per_page, **kwargs)
@require_format('etree')
def walk_user(self, user_id='me', per_page=50, **kwargs):
"""walk_user(self, user_id, per_page=50, ...) -> \
generator, yields each photo in a user's photostream.
:Parameters:
user_id
the user ID, or 'me'
per_page
the number of photos that are fetched in one call to
Flickr.
Other arguments can be passed, as documented in the
flickr.people.getPhotos_ API call in the Flickr API
documentation, except for ``page`` because all pages will be
returned eventually.
.. _flickr.people.getPhotos:
http://www.flickr.com/services/api/flickr.people.getPhotos.html
Uses the ElementTree format, incompatible with other formats.
"""
return self.data_walker(self.people_getPhotos,
user_id=user_id, per_page=per_page, **kwargs)
@require_format('etree')
def walk_user_updates(self, min_date, per_page=50, **kwargs):
"""walk_user_updates(self, user_id, per_page=50, ...) -> \
generator, yields each photo in a user's photostream updated \
after ``min_date``
:Parameters:
min_date
per_page
the number of photos that are fetched in one call to
Flickr.
Other arguments can be passed, as documented in the
flickr.photos.recentlyUpdated API call in the Flickr API
documentation, except for ``page`` because all pages will be
returned eventually.
.. _flickr.photos.recentlyUpdated:
http://www.flickr.com/services/api/flickr.photos.recentlyUpdated.html
Uses the ElementTree format, incompatible with other formats.
"""
return self.data_walker(self.photos_recentlyUpdated,
min_date=min_date, per_page=per_page, **kwargs)
@require_format('etree')
def walk(self, per_page=50, **kwargs):
"""walk(self, user_id=..., tags=..., ...) -> generator, \
yields each photo in a search query result
Accepts the same parameters as flickr.photos.search_ API call,
except for ``page`` because all pages will be returned
eventually.
.. _flickr.photos.search:
http://www.flickr.com/services/api/flickr.photos.search.html
Also see `walk_set`.
"""
return self.data_walker(self.photos.search,
per_page=per_page, **kwargs)
|
search5/nanumlectures
|
lib/flickrapi/core.py
|
Python
|
apache-2.0
| 29,860
|
[
"VisIt"
] |
08ad0c07cd8fc5d63dd845c716dbb847b14d415bd75591b2881d95d8fba127dc
|
#!/usr/bin/env python
##nex2phy.py
##written 6/26/14 by Groves Dixon
Description = '''
Description:
Uses biopython to convert an alignment in nexus format to phylip format
'''
AdditionalProgramInfo = '''
Additional Program Information:
'''
##Import Modules
import argparse
from sys import argv
from sys import exit
from Bio import AlignIO
##Set Up Argument Parsing
parser = argparse.ArgumentParser(description=Description, epilog=AdditionalProgramInfo) ##create argument parser that will automatically return help texts from global variables above
parser.add_argument('-i', required = True, dest = 'input', help = 'The nexus file')
parser.add_argument('-o', required = False, default = "none_given", dest = 'out', help = 'The desired name for the output file name. Default = inputName.phy')
parser.add_argument('-partitions', required = False, default = "y", dest = 'part', help = 'Default is to output a partitions file along with the phylip file. Set this argument to "n" to disable this feature.')
args = parser.parse_args()
#Assign Arguments
InfileName = args.input
OutfileName = args.out
Part = args.part
if OutfileName == "none_given":
OutfileName = "".join(InfileName.split(".")[0:-1]) + ".phy"
def convert(InfileName, OutfileName):
'''Uses Biophython to convert the file
'''
count = AlignIO.convert(InfileName, "nexus", OutfileName, "phylip")
print("\nConverted %i alignments" % count)
print("\nOutput saved as %s" % OutfileName)
def handle_partitions(InfileName, OutfileName):
"""Gets the partitions from the input nexus file and
outputs a partitions file for to go along with the phylip output"""
partitionOutName = "".join(OutfileName.split(".")[0:-1]) + "_partitions.txt"
partitionList = []
with open(InfileName, 'r') as infile:
record = False
for line in infile:
if line == 'begin assumptions;\n':
record = True
continue
if record == False:
continue
if 'charset' in line:
partition = line.split()[-1]
partition = partition.strip(";\n")
partitionList.append(partition)
with open(partitionOutName, 'w') as out:
count = 0
for i in partitionList:
count += 1
if count == 1:
out.write("DNA, p{}={}".format(count, i))
else:
out.write("\nDNA, p{}={}".format(count, i))
print("\nFound %i partitions in the nexus file" % count)
print("\nOutput partitions file as %s" %partitionOutName)
def main():
print "\n\nConverting nexus file {} to phylip file {}...".format(InfileName, OutfileName)
convert(InfileName, OutfileName)
if Part == "y":
handle_partitions(InfileName, OutfileName)
print
main()
|
grovesdixon/metaTranscriptomes
|
scripts/nex2phy.py
|
Python
|
mit
| 2,840
|
[
"Biopython"
] |
819f81b03c71f79ef72c933b4232e4b61b71338357108ccec38f3194821382d3
|
""" The FileCatalogClient is a class representing the client of the DIRAC File Catalog
"""
import os
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.DISET.TransferClient import TransferClient
from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOMSAttributeForGroup, getDNForUsername
from DIRAC.Resources.Catalog.Utilities import checkCatalogArguments
from DIRAC.Resources.Catalog.FileCatalogClientBase import FileCatalogClientBase
__RCSID__ = "$Id$"
class FileCatalogClient(FileCatalogClientBase):
""" Client code to the DIRAC File Catalogue
"""
# The list of methods below is defining the client interface
READ_METHODS = FileCatalogClientBase.READ_METHODS + \
['isFile', 'getFileMetadata',
'getReplicas', 'getReplicaStatus', 'getFileSize', 'isDirectory', 'getDirectoryReplicas',
'listDirectory', 'getDirectoryMetadata', 'getDirectorySize', 'getDirectoryContents',
'getLFNForPFN', 'getLFNForGUID', 'findFilesByMetadata', 'getMetadataFields',
'findDirectoriesByMetadata', 'getReplicasByMetadata', 'findFilesByMetadataDetailed',
'findFilesByMetadataWeb', 'getCompatibleMetadata', 'getMetadataSet', 'getDatasets',
'getFileDescendents', 'getFileAncestors', 'getDirectoryUserMetadata', 'getFileUserMetadata',
'checkDataset', 'getDatasetParameters', 'getDatasetFiles', 'getDatasetAnnotation']
WRITE_METHODS = [
'createLink',
'removeLink',
'addFile',
'setFileStatus',
'addReplica',
'removeReplica',
'removeFile',
'setReplicaStatus',
'setReplicaHost',
'setReplicaProblematic',
'createDirectory',
'setDirectoryStatus',
'removeDirectory',
'changePathMode',
'changePathOwner',
'changePathGroup',
'addMetadataField',
'deleteMetadataField',
'setMetadata',
'setMetadataBulk',
'removeMetadata',
'addMetadataSet',
'addDataset',
'addDatasetAnnotation',
'removeDataset',
'updateDataset',
'freezeDataset',
'releaseDataset',
'addUser',
'deleteUser',
'addGroup',
'deleteGroup',
'repairCatalog',
'rebuildDirectoryUsage']
NO_LFN_METHODS = [
'findFilesByMetadata',
'addMetadataField',
'deleteMetadataField',
'getMetadataFields',
'setMetadata',
'setMetadataBulk',
'removeMetadata',
'getDirectoryUserMetadata',
'findDirectoriesByMetadata',
'getReplicasByMetadata',
'findFilesByMetadataDetailed',
'findFilesByMetadataWeb',
'getCompatibleMetadata',
'addMetadataSet',
'getMetadataSet',
'getFileUserMetadata',
'getLFNForGUID',
'addUser',
'deleteUser',
'addGroup',
'deleteGroup',
'repairCatalog',
'rebuildDirectoryUsage']
ADMIN_METHODS = ['addUser', 'deleteUser', 'addGroup', 'deleteGroup', 'getUsers', 'getGroups',
'getCatalogCounters', 'repairCatalog', 'rebuildDirectoryUsage']
def __init__(self, url=None, **kwargs):
""" Constructor function.
"""
self.serverURL = 'DataManagement/FileCatalog' if not url else url
super(FileCatalogClient, self).__init__(self.serverURL, **kwargs)
##################################################################################
#
##################################################################################
@checkCatalogArguments
def getReplicas(self, lfns, allStatus=False, timeout=120):
""" Get the replicas of the given files
"""
rpcClient = self._getRPC(timeout=timeout)
result = rpcClient.getReplicas(lfns, allStatus)
if not result['OK']:
return result
vo = getVOfromProxyGroup().get('Value', None)
lfnDict = result['Value']
seDict = result['Value'].get('SEPrefixes', {})
for lfn in lfnDict['Successful']:
for se in lfnDict['Successful'][lfn]:
if not lfnDict['Successful'][lfn][se]:
# The PFN was not returned, construct it on the fly
# For some VO's the prefix can be non-standard
voPrefix = seDict.get("VOPrefix", {}).get(se, {}).get(vo)
sePrefix = seDict.get(se, '')
prefix = voPrefix if voPrefix else sePrefix
lfnDict['Successful'][lfn][se] = prefix + lfn
return S_OK(lfnDict)
@checkCatalogArguments
def setReplicaProblematic(self, lfns, revert=False):
"""
Set replicas to problematic.
:param lfn lfns: has to be formated this way :
{ lfn : { se1 : pfn1, se2 : pfn2, ...}, ...}
:param revert: If True, remove the problematic flag
:return: { successful : { lfn : [ ses ] } : failed : { lfn : { se : msg } } }
"""
# This method does a batch treatment because the setReplicaStatus can only take one replica per lfn at once
#
# Illustration :
#
# lfns {'L2': {'S1': 'P3'}, 'L3': {'S3': 'P5', 'S2': 'P4', 'S4': 'P6'}, 'L1': {'S2': 'P2', 'S1': 'P1'}}
#
# loop1: lfnSEs {'L2': ['S1'], 'L3': ['S3', 'S2', 'S4'], 'L1': ['S2', 'S1']}
# loop1 : batch {'L2': {'Status': 'P', 'SE': 'S1', 'PFN': 'P3'},
# 'L3': {'Status': 'P', 'SE': 'S4', 'PFN': 'P6'},
# 'L1': {'Status': 'P', 'SE': 'S1', 'PFN': 'P1'}}
#
# loop2: lfnSEs {'L2': [], 'L3': ['S3', 'S2'], 'L1': ['S2']}
# loop2 : batch {'L3': {'Status': 'P', 'SE': 'S2', 'PFN': 'P4'}, 'L1': {'Status': 'P', 'SE': 'S2', 'PFN': 'P2'}}
#
# loop3: lfnSEs {'L3': ['S3'], 'L1': []}
# loop3 : batch {'L3': {'Status': 'P', 'SE': 'S3', 'PFN': 'P5'}}
#
# loop4: lfnSEs {'L3': []}
# loop4 : batch {}
successful = {}
failed = {}
status = 'AprioriGood' if revert else 'Trash'
# { lfn : [ se1, se2, ...], ...}
lfnsSEs = dict((lfn, [se for se in lfns[lfn]]) for lfn in lfns)
while lfnsSEs:
# { lfn : { 'SE' : se1, 'PFN' : pfn1, 'Status' : status }, ... }
batch = {}
for lfn in lfnsSEs.keys():
# If there are still some Replicas (SE) for the given LFN, we put it in the next batch
# else we remove the entry from the lfnsSEs dict
if lfnsSEs[lfn]:
se = lfnsSEs[lfn].pop()
batch[lfn] = {'SE': se, 'PFN': lfns[lfn][se], 'Status': status}
else:
del lfnsSEs[lfn]
# Happens when there is nothing to treat anymore
if not batch:
break
res = self.setReplicaStatus(batch)
if not res['OK']:
for lfn in batch:
failed.setdefault(lfn, {})[batch[lfn]['SE']] = res['Message']
continue
for lfn in res['Value']['Failed']:
failed.setdefault(lfn, {})[batch[lfn]['SE']] = res['Value']['Failed'][lfn]
for lfn in res['Value']['Successful']:
successful.setdefault(lfn, []).append(batch[lfn]['SE'])
return S_OK({'Successful': successful, 'Failed': failed})
@checkCatalogArguments
def listDirectory(self, lfn, verbose=False, timeout=120):
""" List the given directory's contents
"""
rpcClient = self._getRPC(timeout=timeout)
result = rpcClient.listDirectory(lfn, verbose)
if not result['OK']:
return result
# Force returned directory entries to be LFNs
for entryType in ['Files', 'SubDirs', 'Links']:
for path in result['Value']['Successful']:
entryDict = result['Value']['Successful'][path][entryType]
for fname in entryDict.keys():
detailsDict = entryDict.pop(fname)
lfn = os.path.join(path, os.path.basename(fname))
entryDict[lfn] = detailsDict
return result
@checkCatalogArguments
def getDirectoryMetadata(self, lfns, timeout=120):
''' Get standard directory metadata
'''
rpcClient = self._getRPC(timeout=timeout)
result = rpcClient.getDirectoryMetadata(lfns)
if not result['OK']:
return result
# Add some useful fields
for path in result['Value']['Successful']:
owner = result['Value']['Successful'][path]['Owner']
group = result['Value']['Successful'][path]['OwnerGroup']
res = getDNForUsername(owner)
if res['OK']:
result['Value']['Successful'][path]['OwnerDN'] = res['Value'][0]
else:
result['Value']['Successful'][path]['OwnerDN'] = ''
result['Value']['Successful'][path]['OwnerRole'] = getVOMSAttributeForGroup(group)
return result
@checkCatalogArguments
def removeDirectory(self, lfn, recursive=False, timeout=120):
""" Remove the directory from the File Catalog. The recursive keyword is for the ineterface.
"""
rpcClient = self._getRPC(timeout=timeout)
return rpcClient.removeDirectory(lfn)
@checkCatalogArguments
def getDirectoryReplicas(self, lfns, allStatus=False, timeout=120):
""" Find all the given directories' replicas
"""
rpcClient = self._getRPC(timeout=timeout)
result = rpcClient.getDirectoryReplicas(lfns, allStatus)
if not result['OK']:
return result
seDict = result['Value'].get('SEPrefixes', {})
for path in result['Value']['Successful']:
pathDict = result['Value']['Successful'][path]
for fname in pathDict.keys():
detailsDict = pathDict.pop(fname)
lfn = '%s/%s' % (path, os.path.basename(fname))
for se in detailsDict:
if not detailsDict[se] and se in seDict:
detailsDict[se] = seDict[se] + lfn
pathDict[lfn] = detailsDict
return result
def findFilesByMetadata(self, metaDict, path='/', timeout=120):
""" Find files given the meta data query and the path
"""
rpcClient = self._getRPC(timeout=timeout)
result = rpcClient.findFilesByMetadata(metaDict, path)
if not result['OK']:
return result
if isinstance(result['Value'], list):
return result
elif isinstance(result['Value'], dict):
# Process into the lfn list
fileList = []
for dir_, fList in result['Value'].items():
for fi in fList:
fileList.append(dir_ + '/' + fi)
result['Value'] = fileList
return result
else:
return S_ERROR('Illegal return value type %s' % type(result['Value']))
def getFileUserMetadata(self, path, timeout=120):
"""Get the meta data attached to a file, but also to
the its corresponding directory
"""
directory = "/".join(path.split("/")[:-1])
rpcClient = self._getRPC(timeout=timeout)
result = rpcClient.getFileUserMetadata(path)
if not result['OK']:
return result
fmeta = result['Value']
result = rpcClient.getDirectoryUserMetadata(directory)
if not result['OK']:
return result
fmeta.update(result['Value'])
return S_OK(fmeta)
########################################################################
# Path operations (not updated)
#
@checkCatalogArguments
def changePathOwner(self, lfns, recursive=False, timeout=120):
""" Get replica info for the given list of LFNs
"""
return self._getRPC(timeout=timeout).changePathOwner(lfns, recursive)
@checkCatalogArguments
def changePathGroup(self, lfns, recursive=False, timeout=120):
""" Get replica info for the given list of LFNs
"""
return self._getRPC(timeout=timeout).changePathGroup(lfns, recursive)
@checkCatalogArguments
def changePathMode(self, lfns, recursive=False, timeout=120):
""" Get replica info for the given list of LFNs
"""
return self._getRPC(timeout=timeout).changePathMode(lfns, recursive)
########################################################################
# ACL Operations
#
@checkCatalogArguments
def getPathPermissions(self, lfns, timeout=120):
""" Determine the ACL information for a supplied path
"""
return self._getRPC(timeout=timeout).getPathPermissions(lfns)
@checkCatalogArguments
def hasAccess(self, paths, opType, timeout=120):
""" Determine if the given op can be performed on the paths
The OpType is all the operations exported
"""
return self._getRPC(timeout=timeout).hasAccess(paths, opType)
###################################################################
#
# User/Group write operations
#
def addUser(self, userName, timeout=120):
""" Add a new user to the File Catalog """
return self._getRPC(timeout=timeout).addUser(userName)
def deleteUser(self, userName, timeout=120):
""" Delete user from the File Catalog """
return self._getRPC(timeout=timeout).deleteUser(userName)
def addGroup(self, groupName, timeout=120):
""" Add a new group to the File Catalog """
return self._getRPC(timeout=timeout).addGroup(groupName)
def deleteGroup(self, groupName, timeout=120):
""" Delete group from the File Catalog """
return self._getRPC(timeout=timeout).deleteGroup(groupName)
###################################################################
#
# User/Group read operations
#
def getUsers(self, timeout=120):
""" Get all the users defined in the File Catalog """
return self._getRPC(timeout=timeout).getUsers()
def getGroups(self, timeout=120):
""" Get all the groups defined in the File Catalog """
return self._getRPC(timeout=timeout).getGroups()
########################################################################
#
# Path read operations
#
@checkCatalogArguments
def exists(self, lfns, timeout=120):
""" Check whether the supplied paths exists """
return self._getRPC(timeout=timeout).exists(lfns)
########################################################################
#
# File write operations
#
@checkCatalogArguments
def addFile(self, lfns, timeout=120):
""" Register supplied files """
return self._getRPC(timeout=timeout).addFile(lfns)
@checkCatalogArguments
def removeFile(self, lfns, timeout=120):
""" Remove the supplied lfns """
return self._getRPC(timeout=timeout).removeFile(lfns)
@checkCatalogArguments
def setFileStatus(self, lfns, timeout=120):
""" Remove the supplied lfns """
return self._getRPC(timeout=timeout).setFileStatus(lfns)
@checkCatalogArguments
def addReplica(self, lfns, timeout=120):
""" Register supplied replicas """
return self._getRPC(timeout=timeout).addReplica(lfns)
@checkCatalogArguments
def removeReplica(self, lfns, timeout=120):
""" Remove the supplied replicas """
return self._getRPC(timeout=timeout).removeReplica(lfns)
@checkCatalogArguments
def setReplicaStatus(self, lfns, timeout=120):
""" Set the status for the supplied replicas """
return self._getRPC(timeout=timeout).setReplicaStatus(lfns)
@checkCatalogArguments
def setReplicaHost(self, lfns, timeout=120):
""" Change the registered SE for the supplied replicas """
return self._getRPC(timeout=timeout).setReplicaHost(lfns)
@checkCatalogArguments
def addFileAncestors(self, lfns, timeout=120):
""" Add file ancestor information for the given list of LFNs """
return self._getRPC(timeout=timeout).addFileAncestors(lfns)
########################################################################
#
# File read operations
#
@checkCatalogArguments
def isFile(self, lfns, timeout=120):
""" Check whether the supplied lfns are files """
return self._getRPC(timeout=timeout).isFile(lfns)
@checkCatalogArguments
def getFileSize(self, lfns, timeout=120):
""" Get the size associated to supplied lfns """
return self._getRPC(timeout=timeout).getFileSize(lfns)
@checkCatalogArguments
def getFileMetadata(self, lfns, timeout=120):
""" Get the metadata associated to supplied lfns """
return self._getRPC(timeout=timeout).getFileMetadata(lfns)
@checkCatalogArguments
def getReplicaStatus(self, lfns, timeout=120):
""" Get the status for the supplied replicas """
return self._getRPC(timeout=timeout).getReplicaStatus(lfns)
@checkCatalogArguments
def getFileAncestors(self, lfns, depths, timeout=120):
""" Get the status for the supplied replicas """
return self._getRPC(timeout=timeout).getFileAncestors(lfns, depths)
@checkCatalogArguments
def getFileDescendents(self, lfns, depths, timeout=120):
""" Get the status for the supplied replicas """
return self._getRPC(timeout=timeout).getFileDescendents(lfns, depths)
def getLFNForGUID(self, guids, timeout=120):
"""Get the matching lfns for given guids"""
return self._getRPC(timeout=timeout).getLFNForGUID(guids)
########################################################################
#
# Directory write operations
#
@checkCatalogArguments
def createDirectory(self, lfns, timeout=120):
""" Create the supplied directories """
return self._getRPC(timeout=timeout).createDirectory(lfns)
########################################################################
#
# Directory read operations
#
@checkCatalogArguments
def isDirectory(self, lfns, timeout=120):
""" Determine whether supplied path is a directory """
return self._getRPC(timeout=timeout).isDirectory(lfns)
@checkCatalogArguments
def getDirectorySize(self, lfns, longOut=False, fromFiles=False, timeout=120):
""" Get the size of the supplied directory """
return self._getRPC(timeout=timeout).getDirectorySize(lfns, longOut, fromFiles)
########################################################################
#
# Administrative database operations
#
def getCatalogCounters(self, timeout=120):
""" Get the number of registered directories, files and replicas in various tables """
return self._getRPC(timeout=timeout).getCatalogCounters()
def rebuildDirectoryUsage(self, timeout=120):
""" Rebuild DirectoryUsage table from scratch """
return self._getRPC(timeout=timeout).rebuildDirectoryUsage()
def repairCatalog(self, timeout=120):
""" Repair the catalog inconsistencies """
return self._getRPC(timeout=timeout).repairCatalog()
########################################################################
# Metadata Catalog Operations
#
def addMetadataField(self, fieldName, fieldType, metaType='-d', timeout=120):
""" Add a new metadata field of the given type
"""
return self._getRPC(timeout=timeout).addMetadataField(fieldName, fieldType, metaType)
def deleteMetadataField(self, fieldName, timeout=120):
""" Delete the metadata field
"""
return self._getRPC(timeout=timeout).deleteMetadataField(fieldName)
def getMetadataFields(self, timeout=120):
""" Get all the metadata fields
"""
return self._getRPC(timeout=timeout).getMetadataFields()
def setMetadata(self, path, metadatadict, timeout=120):
""" Set metadata parameter for the given path
"""
return self._getRPC(timeout=timeout).setMetadata(path, metadatadict)
def setMetadataBulk(self, pathMetadataDict, timeout=120):
""" Set metadata parameter for the given path
"""
return self._getRPC(timeout=timeout).setMetadataBulk(pathMetadataDict)
def removeMetadata(self, pathMetadataDict, timeout=120):
""" Remove the specified metadata for the given path
"""
return self._getRPC(timeout=timeout).removeMetadata(pathMetadataDict)
def getDirectoryUserMetadata(self, path, timeout=120):
""" Get all the metadata valid for the given directory path
"""
return self._getRPC(timeout=timeout).getDirectoryUserMetadata(path)
def findDirectoriesByMetadata(self, metaDict, path='/', timeout=120):
""" Find all the directories satisfying the given metadata set
"""
return self._getRPC(timeout=timeout).findDirectoriesByMetadata(metaDict, path)
def getReplicasByMetadata(self, metaDict, path='/', allStatus=False, timeout=120):
""" Find all the files satisfying the given metadata set
"""
return self._getRPC(timeout=timeout).getReplicasByMetadata(metaDict, path, allStatus)
def findFilesByMetadataDetailed(self, metaDict, path='/', timeout=120):
""" Find all the files satisfying the given metadata set
"""
return self._getRPC(timeout=timeout).findFilesByMetadataDetailed(metaDict, path)
def findFilesByMetadataWeb(self, metaDict, path, startItem, maxItems, timeout=120):
""" Find files satisfying the given metadata set
"""
return self._getRPC(timeout=timeout).findFilesByMetadataWeb(metaDict, path, startItem, maxItems)
def getCompatibleMetadata(self, metaDict, path='/', timeout=120):
""" Get metadata values compatible with the given metadata subset
"""
return self._getRPC(timeout=timeout).getCompatibleMetadata(metaDict, path)
def addMetadataSet(self, setName, setDict, timeout=120):
""" Add a new metadata set
"""
return self._getRPC(timeout=timeout).addMetadataSet(setName, setDict)
def getMetadataSet(self, setName, expandFlag, timeout=120):
""" Add a new metadata set
"""
return self._getRPC(timeout=timeout).getMetadataSet(setName, expandFlag)
#########################################################################################
#
# Dataset manipulation methods
#
@checkCatalogArguments
def addDataset(self, datasets, timeout=120):
""" Add a new dynamic dataset defined by its meta query
"""
return self._getRPC(timeout=timeout).addDataset(datasets)
@checkCatalogArguments
def addDatasetAnnotation(self, datasetDict, timeout=120):
""" Add annotation to an already created dataset
"""
return self._getRPC(timeout=timeout).addDatasetAnnotation(datasetDict)
@checkCatalogArguments
def removeDataset(self, datasets, timeout=120):
""" Check the given dynamic dataset for changes since its definition
"""
return self._getRPC(timeout=timeout).removeDataset(datasets)
@checkCatalogArguments
def checkDataset(self, datasets, timeout=120):
""" Check the given dynamic dataset for changes since its definition
"""
return self._getRPC(timeout=timeout).checkDataset(datasets)
@checkCatalogArguments
def updateDataset(self, datasets, timeout=120):
""" Update the given dynamic dataset for changes since its definition
"""
return self._getRPC(timeout=timeout).updateDataset(datasets)
@checkCatalogArguments
def getDatasets(self, datasets, timeout=120):
""" Get parameters of the given dynamic dataset as they are stored in the database
"""
return self._getRPC(timeout=timeout).getDatasets(datasets)
@checkCatalogArguments
def getDatasetParameters(self, datasets, timeout=120):
""" Get parameters of the given dynamic dataset as they are stored in the database
"""
return self._getRPC(timeout=timeout).getDatasetParameters(datasets)
@checkCatalogArguments
def getDatasetAnnotation(self, datasets, timeout=120):
""" Get annotation of the given datasets
"""
return self._getRPC(timeout=timeout).getDatasetAnnotation(datasets)
@checkCatalogArguments
def freezeDataset(self, datasets, timeout=120):
""" Freeze the contents of the dataset making it effectively static
"""
return self._getRPC(timeout=timeout).freezeDataset(datasets)
@checkCatalogArguments
def releaseDataset(self, datasets, timeout=120):
""" Release the contents of the frozen dataset allowing changes in its contents
"""
return self._getRPC(timeout=timeout).releaseDataset(datasets)
@checkCatalogArguments
def getDatasetFiles(self, datasets, timeout=120):
""" Get lfns in the given dataset
two lines !
"""
return self._getRPC(timeout=timeout).getDatasetFiles(datasets)
#############################################################################
def getSEDump(self, seName, outputFilename):
"""
Dump the content of an SE in the given file.
The file contains a list of [lfn,checksum,size] dumped as csv,
separated by '|'
:param seName: name of the StorageElement
:param outputFilename: path to the file where to dump it
:returns: result from the TransferClient
"""
dfc = TransferClient(self.serverURL)
return dfc.receiveFile(outputFilename, seName)
|
petricm/DIRAC
|
Resources/Catalog/FileCatalogClient.py
|
Python
|
gpl-3.0
| 23,994
|
[
"DIRAC"
] |
877aa6233097adb48957eb4e2525dd8e878356fadb73e75d03bb66a57b042417
|
#!/usr/bin/env python
'''
setup board.h for chibios
'''
import argparse, sys, fnmatch, os, dma_resolver, shlex, pickle, re
import shutil
parser = argparse.ArgumentParser("chibios_pins.py")
parser.add_argument(
'-D', '--outdir', type=str, default=None, help='Output directory')
parser.add_argument(
'--bootloader', action='store_true', default=False, help='configure for bootloader')
parser.add_argument(
'hwdef', type=str, default=None, help='hardware definition file')
args = parser.parse_args()
# output variables for each pin
vtypes = ['MODER', 'OTYPER', 'OSPEEDR', 'PUPDR', 'ODR', 'AFRL', 'AFRH']
# number of pins in each port
pincount = {
'A': 16,
'B': 16,
'C': 16,
'D': 16,
'E': 16,
'F': 16,
'G': 16,
'H': 2,
'I': 0,
'J': 0,
'K': 0
}
ports = pincount.keys()
portmap = {}
# dictionary of all config lines, indexed by first word
config = {}
# list of all pins in config file order
allpins = []
# list of configs by type
bytype = {}
# list of configs by label
bylabel = {}
# list of SPI devices
spidev = []
# list of ROMFS files
romfs = []
# SPI bus list
spi_list = []
# all config lines in order
alllines = []
# allow for extra env vars
env_vars = {}
# build flags for ChibiOS makefiles
build_flags = []
mcu_type = None
def is_int(str):
'''check if a string is an integer'''
try:
int(str)
except Exception:
return False
return True
def error(str):
'''show an error and exit'''
print("Error: " + str)
sys.exit(1)
def get_mcu_lib(mcu):
'''get library file for the chosen MCU'''
import importlib
try:
return importlib.import_module(mcu)
except ImportError:
error("Unable to find module for MCU %s" % mcu)
def setup_mcu_type_defaults():
'''setup defaults for given mcu type'''
global pincount, ports, portmap
lib = get_mcu_lib(mcu_type)
if hasattr(lib, 'pincount'):
pincount = lib.pincount
ports = pincount.keys()
# setup default as input pins
for port in ports:
portmap[port] = []
for pin in range(pincount[port]):
portmap[port].append(generic_pin(port, pin, None, 'INPUT', []))
def get_alt_function(mcu, pin, function):
'''return alternative function number for a pin'''
lib = get_mcu_lib(mcu)
alt_map = lib.AltFunction_map
if function and function.endswith("_RTS") and (
function.startswith('USART') or function.startswith('UART')):
# we do software RTS
return None
af_labels = ['USART', 'UART', 'SPI', 'I2C', 'SDIO', 'SDMMC', 'OTG', 'JT', 'TIM', 'CAN']
for l in af_labels:
if function.startswith(l):
s = pin + ":" + function
if not s in alt_map:
error("Unknown pin function %s for MCU %s" % (s, mcu))
return alt_map[s]
return None
def have_type_prefix(ptype):
'''return True if we have a peripheral starting with the given peripheral type'''
for t in bytype.keys():
if t.startswith(ptype):
return True
return False
def get_ADC1_chan(mcu, pin):
'''return ADC1 channel for an analog pin'''
import importlib
try:
lib = importlib.import_module(mcu)
ADC1_map = lib.ADC1_map
except ImportError:
error("Unable to find ADC1_Map for MCU %s" % mcu)
if not pin in ADC1_map:
error("Unable to find ADC1 channel for pin %s" % pin)
return ADC1_map[pin]
class generic_pin(object):
'''class to hold pin definition'''
def __init__(self, port, pin, label, type, extra):
self.portpin = "P%s%u" % (port, pin)
self.port = port
self.pin = pin
self.label = label
self.type = type
self.extra = extra
self.af = None
def has_extra(self, v):
'''return true if we have the given extra token'''
return v in self.extra
def extra_prefix(self, prefix):
'''find an extra token starting with the given prefix'''
for e in self.extra:
if e.startswith(prefix):
return e
return None
def extra_value(self, name, type=None, default=None):
'''find an extra value of given type'''
v = self.extra_prefix(name)
if v is None:
return default
if v[len(name)] != '(' or v[-1] != ')':
error("Badly formed value for %s: %s\n" % (name, v))
ret = v[len(name) + 1:-1]
if type is not None:
try:
ret = type(ret)
except Exception:
error("Badly formed value for %s: %s\n" % (name, ret))
return ret
def is_RTS(self):
'''return true if this is a RTS pin'''
if self.label and self.label.endswith("_RTS") and (
self.type.startswith('USART') or self.type.startswith('UART')):
return True
return False
def is_CS(self):
'''return true if this is a CS pin'''
return self.has_extra("CS") or self.type == "CS"
def get_MODER(self):
'''return one of ALTERNATE, OUTPUT, ANALOG, INPUT'''
if self.af is not None:
v = "ALTERNATE"
elif self.type == 'OUTPUT':
v = "OUTPUT"
elif self.type.startswith('ADC'):
v = "ANALOG"
elif self.is_CS():
v = "OUTPUT"
elif self.is_RTS():
v = "OUTPUT"
else:
v = "INPUT"
return "PIN_MODE_%s(%uU)" % (v, self.pin)
def get_OTYPER(self):
'''return one of PUSHPULL, OPENDRAIN'''
v = 'PUSHPULL'
if self.type.startswith('I2C'):
# default I2C to OPENDRAIN
v = 'OPENDRAIN'
values = ['PUSHPULL', 'OPENDRAIN']
for e in self.extra:
if e in values:
v = e
return "PIN_OTYPE_%s(%uU)" % (v, self.pin)
def get_OSPEEDR(self):
'''return one of SPEED_VERYLOW, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH'''
# on STM32F4 these speeds correspond to 2MHz, 25MHz, 50MHz and 100MHz
values = ['SPEED_VERYLOW', 'SPEED_LOW', 'SPEED_MEDIUM', 'SPEED_HIGH']
v = 'SPEED_MEDIUM'
for e in self.extra:
if e in values:
v = e
return "PIN_O%s(%uU)" % (v, self.pin)
def get_PUPDR(self):
'''return one of FLOATING, PULLUP, PULLDOWN'''
values = ['FLOATING', 'PULLUP', 'PULLDOWN']
v = 'FLOATING'
if self.is_CS():
v = "PULLUP"
if (self.type.startswith('USART') or
self.type.startswith('UART')) and (
(self.label.endswith('_TX') or
self.label.endswith('_RX') or
self.label.endswith('_CTS') or
self.label.endswith('_RTS'))):
v = "PULLUP"
for e in self.extra:
if e in values:
v = e
return "PIN_PUPDR_%s(%uU)" % (v, self.pin)
def get_ODR(self):
'''return one of LOW, HIGH'''
values = ['LOW', 'HIGH']
v = 'HIGH'
for e in self.extra:
if e in values:
v = e
return "PIN_ODR_%s(%uU)" % (v, self.pin)
def get_AFIO(self):
'''return AFIO'''
af = self.af
if af is None:
af = 0
return "PIN_AFIO_AF(%uU, %uU)" % (self.pin, af)
def get_AFRL(self):
'''return AFIO low 8'''
if self.pin >= 8:
return None
return self.get_AFIO()
def get_AFRH(self):
'''return AFIO high 8'''
if self.pin < 8:
return None
return self.get_AFIO()
def __str__(self):
str = ''
if self.af is not None:
str += " AF%u" % self.af
if self.type.startswith('ADC1'):
str += " ADC1_IN%u" % get_ADC1_chan(mcu_type, self.portpin)
if self.extra_value('PWM', type=int):
str += " PWM%u" % self.extra_value('PWM', type=int)
return "P%s%u %s %s%s" % (self.port, self.pin, self.label, self.type,
str)
def get_config(name, column=0, required=True, default=None, type=None, spaces=False):
'''get a value from config dictionary'''
if not name in config:
if required and default is None:
error("missing required value %s in hwdef.dat" % name)
return default
if len(config[name]) < column + 1:
error("missing required value %s in hwdef.dat (column %u)" % (name,
column))
if spaces:
ret = ' '.join(config[name][column:])
else:
ret = config[name][column]
if type is not None:
if type == int and ret.startswith('0x'):
try:
ret = int(ret,16)
except Exception:
error("Badly formed config value %s (got %s)" % (name, ret))
else:
try:
ret = type(ret)
except Exception:
error("Badly formed config value %s (got %s)" % (name, ret))
return ret
def get_mcu_config(name, required=False):
'''get a value from the mcu dictionary'''
lib = get_mcu_lib(mcu_type)
if not hasattr(lib, 'mcu'):
error("Missing mcu config for %s" % mcu_type)
if not name in lib.mcu:
if required:
error("Missing required mcu config %s for %s" % (name, mcu_type))
return None
return lib.mcu[name]
def enable_can(f):
'''setup for a CAN enabled board'''
f.write('#define HAL_WITH_UAVCAN 1\n')
env_vars['HAL_WITH_UAVCAN'] = '1'
def has_sdcard_spi():
'''check for sdcard connected to spi bus'''
for dev in spidev:
if(dev[0] == 'sdcard'):
return True
return False
def write_mcu_config(f):
'''write MCU config defines'''
f.write('// MCU type (ChibiOS define)\n')
f.write('#define %s_MCUCONF\n' % get_config('MCU'))
f.write('#define %s\n\n' % get_config('MCU', 1))
f.write('// crystal frequency\n')
f.write('#define STM32_HSECLK %sU\n\n' % get_config('OSCILLATOR_HZ'))
f.write('// UART used for stdout (printf)\n')
if get_config('STDOUT_SERIAL', required=False):
f.write('#define HAL_STDOUT_SERIAL %s\n\n' % get_config('STDOUT_SERIAL'))
f.write('// baudrate used for stdout (printf)\n')
f.write('#define HAL_STDOUT_BAUDRATE %u\n\n' % get_config('STDOUT_BAUDRATE', type=int))
if have_type_prefix('SDIO'):
f.write('// SDIO available, enable POSIX filesystem support\n')
f.write('#define USE_POSIX\n\n')
f.write('#define HAL_USE_SDC TRUE\n')
build_flags.append('USE_FATFS=yes')
elif have_type_prefix('SDMMC'):
f.write('// SDMMC available, enable POSIX filesystem support\n')
f.write('#define USE_POSIX\n\n')
f.write('#define HAL_USE_SDC TRUE\n')
f.write('#define STM32_SDC_USE_SDMMC1 TRUE\n')
build_flags.append('USE_FATFS=yes')
elif has_sdcard_spi():
f.write('// MMC via SPI available, enable POSIX filesystem support\n')
f.write('#define USE_POSIX\n\n')
f.write('#define HAL_USE_MMC_SPI TRUE\n')
f.write('#define HAL_USE_SDC FALSE\n')
f.write('#define HAL_SDCARD_SPI_HOOK TRUE\n')
build_flags.append('USE_FATFS=yes')
else:
f.write('#define HAL_USE_SDC FALSE\n')
build_flags.append('USE_FATFS=no')
if 'OTG1' in bytype:
f.write('#define STM32_USB_USE_OTG1 TRUE\n')
f.write('#define HAL_USE_USB TRUE\n')
f.write('#define HAL_USE_SERIAL_USB TRUE\n')
if 'OTG2' in bytype:
f.write('#define STM32_USB_USE_OTG2 TRUE\n')
if have_type_prefix('CAN'):
enable_can(f)
# write any custom STM32 defines
for d in alllines:
if d.startswith('STM32_'):
f.write('#define %s\n' % d)
if d.startswith('define '):
f.write('#define %s\n' % d[7:])
flash_size = get_config('FLASH_SIZE_KB', type=int)
f.write('#define BOARD_FLASH_SIZE %u\n' % flash_size)
f.write('#define CRT1_AREAS_NUMBER 1\n')
# get core-coupled-memory if available (not be DMA capable)
ccm_size = get_mcu_config('CCM_RAM_SIZE_KB')
if ccm_size is not None:
f.write('\n// core-coupled memory\n')
f.write('#define CCM_RAM_SIZE_KB %u\n' % ccm_size)
f.write('#define CCM_BASE_ADDRESS 0x%08x\n' % get_mcu_config('CCM_BASE_ADDRESS', True))
# get DTCM memory if available (DMA-capable with no cache flush/invalidate)
dtcm_size = get_mcu_config('DTCM_RAM_SIZE_KB')
if dtcm_size is not None:
f.write('\n// DTCM memory\n')
f.write('#define DTCM_RAM_SIZE_KB %u\n' % dtcm_size)
f.write('#define DTCM_BASE_ADDRESS 0x%08x\n' % get_mcu_config('DTCM_BASE_ADDRESS', True))
flash_reserve_start = get_config(
'FLASH_RESERVE_START_KB', default=16, type=int)
f.write('\n// location of loaded firmware\n')
f.write('#define FLASH_LOAD_ADDRESS 0x%08x\n' % (0x08000000 + flash_reserve_start*1024))
f.write('\n')
ram_size_kb = get_mcu_config('RAM_SIZE_KB', True)
ram_base_address = get_mcu_config('RAM_BASE_ADDRESS', True)
f.write('// main memory size and address\n')
f.write('#define HAL_RAM_SIZE_KB %uU\n' % ram_size_kb)
f.write('#define HAL_RAM_BASE_ADDRESS 0x%08x\n' % ram_base_address)
f.write('\n// CPU serial number (12 bytes)\n')
f.write('#define UDID_START 0x%08x\n\n' % get_mcu_config('UDID_START', True))
f.write('\n// APJ board ID (for bootloaders)\n')
f.write('#define APJ_BOARD_ID %s\n' % get_config('APJ_BOARD_ID'))
lib = get_mcu_lib(mcu_type)
build_info = lib.build
# setup build variables
for v in build_info.keys():
build_flags.append('%s=%s' % (v, build_info[v]))
# setup for bootloader build
if args.bootloader:
f.write('''
#define HAL_BOOTLOADER_BUILD TRUE
#define HAL_USE_ADC FALSE
#define HAL_USE_EXT FALSE
#define HAL_NO_UARTDRIVER
#define HAL_NO_PRINTF
#define HAL_NO_CCM
#define CH_DBG_STATISTICS FALSE
#define CH_CFG_USE_TM FALSE
#define CH_CFG_USE_REGISTRY FALSE
#define CH_CFG_USE_WAITEXIT FALSE
#define CH_CFG_USE_DYNAMIC FALSE
#define CH_CFG_USE_MEMPOOLS FALSE
#define CH_CFG_USE_OBJ_FIFOS FALSE
#define CH_DBG_FILL_THREADS FALSE
#define CH_CFG_USE_SEMAPHORES FALSE
#define CH_CFG_USE_HEAP FALSE
#define CH_CFG_USE_MUTEXES FALSE
#define CH_CFG_USE_CONDVARS FALSE
#define CH_CFG_USE_CONDVARS_TIMEOUT FALSE
#define CH_CFG_USE_EVENTS FALSE
#define CH_CFG_USE_EVENTS_TIMEOUT FALSE
#define CH_CFG_USE_MESSAGES FALSE
#define CH_CFG_USE_MAILBOXES FALSE
#define CH_CFG_USE_FACTORY FALSE
#define CH_CFG_USE_MEMCORE FALSE
#define HAL_USE_I2C FALSE
#define HAL_USE_PWM FALSE
''')
def write_ldscript(fname):
'''write ldscript.ld for this board'''
flash_size = get_config('FLASH_USE_MAX_KB', type=int, default=0)
if flash_size == 0:
flash_size = get_config('FLASH_SIZE_KB', type=int)
# space to reserve for bootloader and storage at start of flash
flash_reserve_start = get_config(
'FLASH_RESERVE_START_KB', default=16, type=int)
env_vars['FLASH_RESERVE_START_KB'] = str(flash_reserve_start)
# space to reserve for storage at end of flash
flash_reserve_end = get_config('FLASH_RESERVE_END_KB', default=0, type=int)
# ram size
ram_size = get_mcu_config('RAM_SIZE_KB', True)
ram_base = get_mcu_config('RAM_BASE_ADDRESS', True)
flash_base = 0x08000000 + flash_reserve_start * 1024
flash_length = flash_size - (flash_reserve_start + flash_reserve_end)
print("Generating ldscript.ld")
f = open(fname, 'w')
f.write('''/* generated ldscript.ld */
MEMORY
{
flash : org = 0x%08x, len = %uK
ram0 : org = 0x%08x, len = %uk
}
INCLUDE common.ld
''' % (flash_base, flash_length, ram_base, ram_size))
def copy_common_linkerscript(outdir, hwdef):
dirpath = os.path.dirname(hwdef)
shutil.copy(os.path.join(dirpath, "../common/common.ld"),
os.path.join(outdir, "common.ld"))
def write_USB_config(f):
'''write USB config defines'''
if not have_type_prefix('OTG'):
return;
f.write('// USB configuration\n')
f.write('#define HAL_USB_VENDOR_ID %s\n' % get_config('USB_VENDOR', default=0x0483)) # default to ST
f.write('#define HAL_USB_PRODUCT_ID %s\n' % get_config('USB_PRODUCT', default=0x5740))
f.write('#define HAL_USB_STRING_MANUFACTURER "%s"\n' % get_config("USB_STRING_MANUFACTURER", default="ArduPilot"))
default_product = "%BOARD%"
if args.bootloader:
default_product += "-BL"
f.write('#define HAL_USB_STRING_PRODUCT "%s"\n' % get_config("USB_STRING_PRODUCT", default=default_product))
f.write('#define HAL_USB_STRING_SERIAL "%s"\n' % get_config("USB_STRING_SERIAL", default="%SERIAL%"))
f.write('\n\n')
def write_SPI_table(f):
'''write SPI device table'''
f.write('\n// SPI device table\n')
devlist = []
for dev in spidev:
if len(dev) != 7:
print("Badly formed SPIDEV line %s" % dev)
name = '"' + dev[0] + '"'
bus = dev[1]
devid = dev[2]
cs = dev[3]
mode = dev[4]
lowspeed = dev[5]
highspeed = dev[6]
if not bus.startswith('SPI') or not bus in spi_list:
error("Bad SPI bus in SPIDEV line %s" % dev)
if not devid.startswith('DEVID') or not is_int(devid[5:]):
error("Bad DEVID in SPIDEV line %s" % dev)
if not cs in bylabel or not bylabel[cs].is_CS():
error("Bad CS pin in SPIDEV line %s" % dev)
if not mode in ['MODE0', 'MODE1', 'MODE2', 'MODE3']:
error("Bad MODE in SPIDEV line %s" % dev)
if not lowspeed.endswith('*MHZ') and not lowspeed.endswith('*KHZ'):
error("Bad lowspeed value %s in SPIDEV line %s" % (lowspeed, dev))
if not highspeed.endswith('*MHZ') and not highspeed.endswith('*KHZ'):
error("Bad highspeed value %s in SPIDEV line %s" % (highspeed,
dev))
cs_pin = bylabel[cs]
pal_line = 'PAL_LINE(GPIO%s,%uU)' % (cs_pin.port, cs_pin.pin)
devidx = len(devlist)
f.write(
'#define HAL_SPI_DEVICE%-2u SPIDesc(%-17s, %2u, %2u, %-19s, SPIDEV_%s, %7s, %7s)\n'
% (devidx, name, spi_list.index(bus), int(devid[5:]), pal_line,
mode, lowspeed, highspeed))
devlist.append('HAL_SPI_DEVICE%u' % devidx)
f.write('#define HAL_SPI_DEVICE_LIST %s\n\n' % ','.join(devlist))
def write_SPI_config(f):
'''write SPI config defines'''
global spi_list
for t in bytype.keys():
if t.startswith('SPI'):
spi_list.append(t)
spi_list = sorted(spi_list)
if len(spi_list) == 0:
f.write('#define HAL_USE_SPI FALSE\n')
return
devlist = []
for dev in spi_list:
n = int(dev[3:])
devlist.append('HAL_SPI%u_CONFIG' % n)
f.write(
'#define HAL_SPI%u_CONFIG { &SPID%u, %u, STM32_SPI_SPI%u_TX_DMA_STREAM, STM32_SPI_SPI%u_RX_DMA_STREAM }\n'
% (n, n, n, n, n))
f.write('#define HAL_SPI_BUS_LIST %s\n\n' % ','.join(devlist))
write_SPI_table(f)
def write_UART_config(f):
'''write UART config defines'''
get_config('UART_ORDER')
uart_list = config['UART_ORDER']
f.write('\n// UART configuration\n')
# write out driver declarations for HAL_ChibOS_Class.cpp
devnames = "ABCDEFGH"
sdev = 0
idx = 0
for dev in uart_list:
if dev == 'EMPTY':
f.write('#define HAL_UART%s_DRIVER Empty::UARTDriver uart%sDriver\n' %
(devnames[idx], devnames[idx]))
else:
f.write(
'#define HAL_UART%s_DRIVER ChibiOS::UARTDriver uart%sDriver(%u)\n'
% (devnames[idx], devnames[idx], sdev))
sdev += 1
idx += 1
for idx in range(len(uart_list), 7):
f.write('#define HAL_UART%s_DRIVER Empty::UARTDriver uart%sDriver\n' %
(devnames[idx], devnames[idx]))
if 'IOMCU_UART' in config:
f.write('#define HAL_WITH_IO_MCU 1\n')
idx = len(uart_list)
f.write('#define HAL_UART_IOMCU_IDX %u\n' % idx)
f.write(
'#define HAL_UART_IO_DRIVER ChibiOS::UARTDriver uart_io(HAL_UART_IOMCU_IDX)\n'
)
uart_list.append(config['IOMCU_UART'][0])
else:
f.write('#define HAL_WITH_IO_MCU 0\n')
f.write('\n')
need_uart_driver = False
devlist = []
for dev in uart_list:
if dev.startswith('UART'):
n = int(dev[4:])
elif dev.startswith('USART'):
n = int(dev[5:])
elif dev.startswith('OTG'):
n = int(dev[3:])
elif dev.startswith('EMPTY'):
continue
else:
error("Invalid element %s in UART_ORDER" % dev)
devlist.append('HAL_%s_CONFIG' % dev)
if dev + "_RTS" in bylabel:
p = bylabel[dev + '_RTS']
rts_line = 'PAL_LINE(GPIO%s,%uU)' % (p.port, p.pin)
else:
rts_line = "0"
if dev.startswith('OTG'):
f.write(
'#define HAL_%s_CONFIG {(BaseSequentialStream*) &SDU1, true, false, 0, 0, false, 0, 0}\n'
% dev)
else:
need_uart_driver = True
f.write(
"#define HAL_%s_CONFIG { (BaseSequentialStream*) &SD%u, false, "
% (dev, n))
f.write("STM32_%s_RX_DMA_CONFIG, STM32_%s_TX_DMA_CONFIG, %s}\n" %
(dev, dev, rts_line))
f.write('#define HAL_UART_DEVICE_LIST %s\n\n' % ','.join(devlist))
if not need_uart_driver and not args.bootloader:
f.write('#define HAL_USE_SERIAL FALSE\n')
def write_UART_config_bootloader(f):
'''write UART config defines'''
get_config('UART_ORDER')
uart_list = config['UART_ORDER']
f.write('\n// UART configuration\n')
devlist = []
have_uart = False
for u in uart_list:
if u.startswith('OTG'):
devlist.append('(BaseChannel *)&SDU1')
else:
unum = int(u[-1])
devlist.append('(BaseChannel *)&SD%u' % unum)
have_uart = True
f.write('#define BOOTLOADER_DEV_LIST %s\n' % ','.join(devlist))
if not have_uart:
f.write('#define HAL_USE_SERIAL FALSE\n')
def write_I2C_config(f):
'''write I2C config defines'''
if not have_type_prefix('I2C'):
print("No I2C peripherals")
f.write('#define HAL_USE_I2C FALSE\n')
return
if not 'I2C_ORDER' in config:
error("Missing I2C_ORDER config")
i2c_list = config['I2C_ORDER']
f.write('// I2C configuration\n')
if len(i2c_list) == 0:
error("I2C_ORDER invalid")
devlist = []
for dev in i2c_list:
if not dev.startswith('I2C') or dev[3] not in "1234":
error("Bad I2C_ORDER element %s" % dev)
n = int(dev[3:])
devlist.append('HAL_I2C%u_CONFIG' % n)
f.write('''
#if defined(STM32_I2C_I2C%u_RX_DMA_STREAM) && defined(STM32_I2C_I2C%u_TX_DMA_STREAM)
#define HAL_I2C%u_CONFIG { &I2CD%u, STM32_I2C_I2C%u_RX_DMA_STREAM, STM32_I2C_I2C%u_TX_DMA_STREAM }
#else
#define HAL_I2C%u_CONFIG { &I2CD%u, SHARED_DMA_NONE, SHARED_DMA_NONE }
#endif
'''
% (n, n, n, n, n, n, n, n))
if dev + "_SCL" in bylabel:
p = bylabel[dev + "_SCL"]
f.write(
'#define HAL_%s_SCL_AF %d\n' % (dev, p.af)
)
f.write('\n#define HAL_I2C_DEVICE_LIST %s\n\n' % ','.join(devlist))
def parse_timer(str):
'''parse timer channel string, i.e TIM8_CH2N'''
result = re.match(r'TIM([0-9]*)_CH([1234])(N?)', str)
if result:
tim = int(result.group(1))
chan = int(result.group(2))
compl = result.group(3) == 'N'
if tim < 1 or tim > 17:
error("Bad timer number %s in %s" % (tim, str))
return (tim, chan, compl)
else:
error("Bad timer definition %s" % str)
def write_PWM_config(f):
'''write PWM config defines'''
rc_in = None
rc_in_int = None
alarm = None
pwm_out = []
pwm_timers = []
for l in bylabel.keys():
p = bylabel[l]
if p.type.startswith('TIM'):
if p.has_extra('RCIN'):
rc_in = p
elif p.has_extra('RCININT'):
rc_in_int = p
elif p.has_extra('ALARM'):
alarm = p
else:
if p.extra_value('PWM', type=int) is not None:
pwm_out.append(p)
if p.type not in pwm_timers:
pwm_timers.append(p.type)
if not pwm_out:
print("No PWM output defined")
f.write('#define HAL_USE_PWM FALSE\n')
if rc_in is not None:
(n, chan, compl) = parse_timer(rc_in.label)
if compl:
# it is an inverted channel
f.write('#define HAL_RCIN_IS_INVERTED\n')
if chan not in [1, 2]:
error(
"Bad channel number, only channel 1 and 2 supported for RCIN")
f.write('// RC input config\n')
f.write('#define HAL_USE_ICU TRUE\n')
f.write('#define STM32_ICU_USE_TIM%u TRUE\n' % n)
f.write('#define RCIN_ICU_TIMER ICUD%u\n' % n)
f.write('#define RCIN_ICU_CHANNEL ICU_CHANNEL_%u\n' % chan)
f.write('#define STM32_RCIN_DMA_STREAM STM32_TIM_TIM%u_CH%u_DMA_STREAM\n' % (n, chan))
f.write('#define STM32_RCIN_DMA_CHANNEL STM32_TIM_TIM%u_CH%u_DMA_CHAN\n' % (n, chan))
f.write('\n')
if rc_in_int is not None:
(n, chan, compl) = parse_timer(rc_in_int.label)
if compl:
error('Complementary channel is not supported for RCININT %s' % rc_in_int.label)
f.write('// RC input config\n')
f.write('#define HAL_USE_EICU TRUE\n')
f.write('#define STM32_EICU_USE_TIM%u TRUE\n' % n)
f.write('#define RCININT_EICU_TIMER EICUD%u\n' % n)
f.write('#define RCININT_EICU_CHANNEL EICU_CHANNEL_%u\n' % chan)
f.write('\n')
if alarm is not None:
(n, chan, compl) = parse_timer(alarm.label)
if compl:
error("Complementary channel is not supported for ALARM %s" % alarm.label)
f.write('\n')
f.write('// Alarm PWM output config\n')
f.write('#define STM32_PWM_USE_TIM%u TRUE\n' % n)
f.write('#define STM32_TIM%u_SUPPRESS_ISR\n' % n)
chan_mode = [
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED',
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED'
]
chan_mode[chan - 1] = 'PWM_OUTPUT_ACTIVE_HIGH'
pwm_clock = 1000000
period = 1000
f.write('''#define HAL_PWM_ALARM \\
{ /* pwmGroup */ \\
%u, /* Timer channel */ \\
{ /* PWMConfig */ \\
%u, /* PWM clock frequency. */ \\
%u, /* Initial PWM period 20ms. */ \\
NULL, /* no callback */ \\
{ /* Channel Config */ \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL} \\
}, \\
0, 0 \\
}, \\
&PWMD%u /* PWMDriver* */ \\
}\n''' %
(chan-1, pwm_clock, period, chan_mode[0],
chan_mode[1], chan_mode[2], chan_mode[3], n))
else:
f.write('\n')
f.write('// No Alarm output pin defined\n')
f.write('#undef HAL_PWM_ALARM\n')
f.write('\n')
f.write('// PWM timer config\n')
for t in sorted(pwm_timers):
n = int(t[3:])
f.write('#define STM32_PWM_USE_TIM%u TRUE\n' % n)
f.write('#define STM32_TIM%u_SUPPRESS_ISR\n' % n)
f.write('\n')
f.write('// PWM output config\n')
groups = []
for t in sorted(pwm_timers):
group = len(groups) + 1
n = int(t[3:])
chan_list = [255, 255, 255, 255]
chan_mode = [
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED',
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED'
]
alt_functions = [ 0, 0, 0, 0 ]
pal_lines = [ '0', '0', '0', '0' ]
for p in pwm_out:
if p.type != t:
continue
(n, chan, compl) = parse_timer(p.label)
pwm = p.extra_value('PWM', type=int)
chan_list[chan - 1] = pwm - 1
if compl:
chan_mode[chan - 1] = 'PWM_COMPLEMENTARY_OUTPUT_ACTIVE_HIGH'
else:
chan_mode[chan - 1] = 'PWM_OUTPUT_ACTIVE_HIGH'
alt_functions[chan - 1] = p.af
pal_lines[chan - 1] = 'PAL_LINE(GPIO%s, %uU)' % (p.port, p.pin)
groups.append('HAL_PWM_GROUP%u' % group)
if n in [1, 8]:
# only the advanced timers do 8MHz clocks
advanced_timer = 'true'
else:
advanced_timer = 'false'
pwm_clock = 1000000
period = 20000 * pwm_clock / 1000000
f.write('''#ifdef STM32_TIM_TIM%u_UP_DMA_STREAM
# define HAL_PWM%u_DMA_CONFIG true, STM32_TIM_TIM%u_UP_DMA_STREAM, STM32_TIM_TIM%u_UP_DMA_CHAN
#else
# define HAL_PWM%u_DMA_CONFIG false, 0, 0
#endif\n''' % (n, n, n, n, n))
f.write('''#define HAL_PWM_GROUP%u { %s, \\
{%u, %u, %u, %u}, \\
/* Group Initial Config */ \\
{ \\
%u, /* PWM clock frequency. */ \\
%u, /* Initial PWM period 20ms. */ \\
NULL, /* no callback */ \\
{ \\
/* Channel Config */ \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL} \\
}, 0, 0}, &PWMD%u, \\
HAL_PWM%u_DMA_CONFIG, \\
{ %u, %u, %u, %u }, \\
{ %s, %s, %s, %s }}\n''' %
(group, advanced_timer,
chan_list[0], chan_list[1], chan_list[2], chan_list[3],
pwm_clock, period,
chan_mode[0], chan_mode[1], chan_mode[2], chan_mode[3],
n, n,
alt_functions[0], alt_functions[1], alt_functions[2], alt_functions[3],
pal_lines[0], pal_lines[1], pal_lines[2], pal_lines[3]))
f.write('#define HAL_PWM_GROUPS %s\n\n' % ','.join(groups))
def write_ADC_config(f):
'''write ADC config defines'''
f.write('// ADC config\n')
adc_chans = []
for l in bylabel:
p = bylabel[l]
if not p.type.startswith('ADC'):
continue
chan = get_ADC1_chan(mcu_type, p.portpin)
scale = p.extra_value('SCALE', default=None)
if p.label == 'VDD_5V_SENS':
f.write('#define ANALOG_VCC_5V_PIN %u\n' % chan)
adc_chans.append((chan, scale, p.label, p.portpin))
adc_chans = sorted(adc_chans)
vdd = get_config('STM32_VDD')
if vdd[-1] == 'U':
vdd = vdd[:-1]
vdd = float(vdd) * 0.01
f.write('#define HAL_ANALOG_PINS { \\\n')
for (chan, scale, label, portpin) in adc_chans:
scale_str = '%.2f/4096' % vdd
if scale is not None and scale != '1':
scale_str = scale + '*' + scale_str
f.write('{ %2u, %12s }, /* %s %s */ \\\n' % (chan, scale_str, portpin,
label))
f.write('}\n\n')
def write_GPIO_config(f):
'''write GPIO config defines'''
f.write('// GPIO config\n')
gpios = []
for l in bylabel:
p = bylabel[l]
gpio = p.extra_value('GPIO', type=int)
if gpio is None:
continue
# see if it is also a PWM pin
pwm = p.extra_value('PWM', type=int, default=0)
port = p.port
pin = p.pin
gpios.append((gpio, pwm, port, pin, p))
gpios = sorted(gpios)
for (gpio, pwm, port, pin, p) in gpios:
f.write('#define HAL_GPIO_LINE_GPIO%u PAL_LINE(GPIO%s, %2uU)\n' % (gpio, port, pin))
f.write('#define HAL_GPIO_PINS { \\\n')
for (gpio, pwm, port, pin, p) in gpios:
f.write('{ %3u, true, %2u, PAL_LINE(GPIO%s, %2uU)}, /* %s */ \\\n' %
(gpio, pwm, port, pin, p))
# and write #defines for use by config code
f.write('}\n\n')
f.write('// full pin define list\n')
last_label = None
for l in sorted(list(set(bylabel.keys()))):
p = bylabel[l]
label = p.label
label = label.replace('-', '_')
if label == last_label:
continue
last_label = label
f.write('#define HAL_GPIO_PIN_%-20s PAL_LINE(GPIO%s,%uU)\n' %
(label, p.port, p.pin))
f.write('\n')
def bootloader_path():
# always embed a bootloader if it is available
this_dir = os.path.realpath(__file__)
rootdir = os.path.relpath(os.path.join(this_dir, "../../../../.."))
hwdef_dirname = os.path.basename(os.path.dirname(args.hwdef))
bootloader_filename = "%s_bl.bin" % (hwdef_dirname,)
bootloader_path = os.path.join(rootdir,
"Tools",
"bootloaders",
bootloader_filename)
if os.path.exists(bootloader_path):
return os.path.realpath(bootloader_path)
return None
def add_bootloader():
'''added bootloader to ROMFS'''
bp = bootloader_path()
if bp is not None:
romfs.append( ("bootloader.bin", bp) )
def write_ROMFS(outdir):
'''create ROMFS embedded header'''
env_vars['ROMFS_FILES'] = romfs
def write_prototype_file():
'''write the prototype file for apj generation'''
pf = open(os.path.join(outdir, "apj.prototype"), "w")
pf.write('''{
"board_id": %s,
"magic": "PX4FWv1",
"description": "Firmware for the %s board",
"image": "",
"build_time": 0,
"summary": "PX4FMUv3",
"version": "0.1",
"image_size": 0,
"git_identity": "",
"board_revision": 0
}
''' % (get_config('APJ_BOARD_ID'),
get_config('APJ_BOARD_TYPE', default=mcu_type)))
def write_peripheral_enable(f):
'''write peripheral enable lines'''
f.write('// peripherals enabled\n')
for type in sorted(bytype.keys()):
if type.startswith('USART') or type.startswith('UART'):
f.write('#define STM32_SERIAL_USE_%-6s TRUE\n' % type)
if type.startswith('SPI'):
f.write('#define STM32_SPI_USE_%s TRUE\n' % type)
if type.startswith('OTG'):
f.write('#define STM32_USB_USE_%s TRUE\n' % type)
if type.startswith('I2C'):
f.write('#define STM32_I2C_USE_%s TRUE\n' % type)
def get_dma_exclude(periph_list):
'''return list of DMA devices to exclude from DMA'''
dma_exclude = []
for periph in periph_list:
if periph not in bylabel:
continue
p = bylabel[periph]
if p.has_extra('NODMA'):
dma_exclude.append(periph)
return dma_exclude
def write_hwdef_header(outfilename):
'''write hwdef header file'''
print("Writing hwdef setup in %s" % outfilename)
f = open(outfilename, 'w')
f.write('''/*
generated hardware definitions from hwdef.dat - DO NOT EDIT
*/
#pragma once
#ifndef TRUE
#define TRUE 1
#endif
#ifndef FALSE
#define FALSE 0
#endif
''')
write_mcu_config(f)
write_USB_config(f)
write_SPI_config(f)
write_ADC_config(f)
write_GPIO_config(f)
write_peripheral_enable(f)
write_prototype_file()
dma_resolver.write_dma_header(f, periph_list, mcu_type,
dma_exclude=get_dma_exclude(periph_list),
dma_priority=get_config('DMA_PRIORITY',default='TIM* SPI*', spaces=True),
dma_noshare=get_config('DMA_NOSHARE',default='', spaces=True))
if not args.bootloader:
write_PWM_config(f)
write_I2C_config(f)
write_UART_config(f)
else:
write_UART_config_bootloader(f)
add_bootloader()
if len(romfs) > 0:
f.write('#define HAL_HAVE_AP_ROMFS_EMBEDDED_H 1\n')
f.write('''
/*
* I/O ports initial setup, this configuration is established soon after reset
* in the initialization code.
* Please refer to the STM32 Reference Manual for details.
*/
#define PIN_MODE_INPUT(n) (0U << ((n) * 2U))
#define PIN_MODE_OUTPUT(n) (1U << ((n) * 2U))
#define PIN_MODE_ALTERNATE(n) (2U << ((n) * 2U))
#define PIN_MODE_ANALOG(n) (3U << ((n) * 2U))
#define PIN_ODR_LOW(n) (0U << (n))
#define PIN_ODR_HIGH(n) (1U << (n))
#define PIN_OTYPE_PUSHPULL(n) (0U << (n))
#define PIN_OTYPE_OPENDRAIN(n) (1U << (n))
#define PIN_OSPEED_VERYLOW(n) (0U << ((n) * 2U))
#define PIN_OSPEED_LOW(n) (1U << ((n) * 2U))
#define PIN_OSPEED_MEDIUM(n) (2U << ((n) * 2U))
#define PIN_OSPEED_HIGH(n) (3U << ((n) * 2U))
#define PIN_PUPDR_FLOATING(n) (0U << ((n) * 2U))
#define PIN_PUPDR_PULLUP(n) (1U << ((n) * 2U))
#define PIN_PUPDR_PULLDOWN(n) (2U << ((n) * 2U))
#define PIN_AFIO_AF(n, v) ((v) << (((n) % 8U) * 4U))
''')
for port in sorted(ports):
f.write("/* PORT%s:\n" % port)
for pin in range(pincount[port]):
p = portmap[port][pin]
if p.label is not None:
f.write(" %s\n" % p)
f.write("*/\n\n")
if pincount[port] == 0:
# handle blank ports
for vtype in vtypes:
f.write("#define VAL_GPIO%s_%-7s 0x0\n" % (port,
vtype))
f.write("\n\n\n")
continue
for vtype in vtypes:
f.write("#define VAL_GPIO%s_%-7s (" % (p.port, vtype))
first = True
for pin in range(pincount[port]):
p = portmap[port][pin]
modefunc = getattr(p, "get_" + vtype)
v = modefunc()
if v is None:
continue
if not first:
f.write(" | \\\n ")
f.write(v)
first = False
if first:
# there were no pin definitions, use 0
f.write("0")
f.write(")\n\n")
def build_peripheral_list():
'''build a list of peripherals for DMA resolver to work on'''
peripherals = []
done = set()
prefixes = ['SPI', 'USART', 'UART', 'I2C']
for p in allpins:
type = p.type
if type in done:
continue
for prefix in prefixes:
if type.startswith(prefix):
ptx = type + "_TX"
prx = type + "_RX"
peripherals.append(ptx)
peripherals.append(prx)
if not ptx in bylabel:
bylabel[ptx] = p
if not prx in bylabel:
bylabel[prx] = p
if type.startswith('ADC'):
peripherals.append(type)
if type.startswith('SDIO') or type.startswith('SDMMC'):
peripherals.append(type)
if type.startswith('TIM'):
if p.has_extra('RCIN'):
label = p.label
if label[-1] == 'N':
label = label[:-1]
peripherals.append(label)
elif not p.has_extra('ALARM') and not p.has_extra('RCININT'):
# get the TIMn_UP DMA channels for DShot
label = type + '_UP'
if not label in peripherals and not p.has_extra('NODMA'):
peripherals.append(label)
done.add(type)
return peripherals
def write_env_py(filename):
'''write out env.py for environment variables to control the build process'''
# see if board has a defaults.parm file
defaults_filename = os.path.join(os.path.dirname(args.hwdef), 'defaults.parm')
if os.path.exists(defaults_filename) and not args.bootloader:
print("Adding defaults.parm")
env_vars['DEFAULT_PARAMETERS'] = os.path.abspath(defaults_filename)
# CHIBIOS_BUILD_FLAGS is passed to the ChibiOS makefile
env_vars['CHIBIOS_BUILD_FLAGS'] = ' '.join(build_flags)
pickle.dump(env_vars, open(filename, "wb"))
def romfs_add(romfs_filename, filename):
'''add a file to ROMFS'''
romfs.append((romfs_filename, filename))
def romfs_wildcard(pattern):
'''add a set of files to ROMFS by wildcard'''
base_path = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..')
(pattern_dir, pattern) = os.path.split(pattern)
for f in os.listdir(os.path.join(base_path, pattern_dir)):
if fnmatch.fnmatch(f, pattern):
romfs.append((f, os.path.join(pattern_dir, f)))
def process_line(line):
'''process one line of pin definition file'''
global allpins
a = shlex.split(line)
# keep all config lines for later use
alllines.append(line)
if a[0].startswith('P') and a[0][1] in ports and a[0] in config:
error("Pin %s redefined" % a[0])
config[a[0]] = a[1:]
if a[0] == 'MCU':
global mcu_type
mcu_type = a[2]
setup_mcu_type_defaults()
if a[0].startswith('P') and a[0][1] in ports:
# it is a port/pin definition
try:
port = a[0][1]
pin = int(a[0][2:])
label = a[1]
type = a[2]
extra = a[3:]
except Exception:
error("Bad pin line: %s" % a)
return
p = generic_pin(port, pin, label, type, extra)
portmap[port][pin] = p
allpins.append(p)
if not type in bytype:
bytype[type] = []
bytype[type].append(p)
bylabel[label] = p
af = get_alt_function(mcu_type, a[0], label)
if af is not None:
p.af = af
if a[0] == 'SPIDEV':
spidev.append(a[1:])
if a[0] == 'ROMFS':
romfs_add(a[1],a[2])
if a[0] == 'ROMFS_WILDCARD':
romfs_wildcard(a[1])
if a[0] == 'undef':
print("Removing %s" % a[1])
config.pop(a[1], '')
bytype.pop(a[1],'')
bylabel.pop(a[1],'')
#also remove all occurences of defines in previous lines if any
for line in alllines[:]:
if line.startswith('define') and a[1] in line:
alllines.remove(line)
newpins = []
for pin in allpins:
if pin.type == a[1]:
continue
if pin.label == a[1]:
continue
if pin.portpin == a[1]:
continue
newpins.append(pin)
allpins = newpins
if a[0] == 'env':
print("Adding environment %s" % ' '.join(a[1:]))
if len(a[1:]) < 2:
error("Bad env line for %s" % a[0])
env_vars[a[1]] = ' '.join(a[2:])
def process_file(filename):
'''process a hwdef.dat file'''
try:
f = open(filename, "r")
except Exception:
error("Unable to open file %s" % filename)
for line in f.readlines():
line = line.strip()
if len(line) == 0 or line[0] == '#':
continue
a = shlex.split(line)
if a[0] == "include" and len(a) > 1:
include_file = a[1]
if include_file[0] != '/':
dir = os.path.dirname(filename)
include_file = os.path.normpath(
os.path.join(dir, include_file))
print("Including %s" % include_file)
process_file(include_file)
else:
process_line(line)
# process input file
process_file(args.hwdef)
outdir = args.outdir
if outdir is None:
outdir = '/tmp'
if not "MCU" in config:
error("Missing MCU type in config")
mcu_type = get_config('MCU', 1)
print("Setup for MCU %s" % mcu_type)
# build a list for peripherals for DMA resolver
periph_list = build_peripheral_list()
# write out hwdef.h
write_hwdef_header(os.path.join(outdir, "hwdef.h"))
# write out ldscript.ld
write_ldscript(os.path.join(outdir, "ldscript.ld"))
write_ROMFS(outdir)
# copy the shared linker script into the build directory; it must
# exist in the same directory as the ldscript.ld file we generate.
copy_common_linkerscript(outdir, args.hwdef)
write_env_py(os.path.join(outdir, "env.py"))
|
tatsuy/ardupilot
|
libraries/AP_HAL_ChibiOS/hwdef/scripts/chibios_hwdef.py
|
Python
|
gpl-3.0
| 44,294
|
[
"CRYSTAL"
] |
e438c968353742c9fd40fb90665a68998321b4614640a5c34a49327f8a8bcada
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import re
import warnings
from operator import itemgetter
from tabulate import tabulate
import numpy as np
from monty.io import zopen
from monty.json import MSONable
from pymatgen import Structure, Lattice, Element, Molecule
from pymatgen.io.cif import CifParser
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.io_utils import clean_lines
from pymatgen.util.string import str_delimited
"""
This module defines classes for reading/manipulating/writing the main sections
of FEFF input file(feff.inp), namely HEADER, ATOMS, POTENTIAL and the program
control tags.
XANES and EXAFS input files, are available, for non-spin case at this time.
"""
__author__ = "Alan Dozier, Kiran Mathew"
__credits__ = "Anubhav Jain, Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0.3"
__maintainer__ = "Alan Dozier"
__email__ = "adozier@uky.edu"
__status__ = "Beta"
__date__ = "April 7, 2013"
# **Non-exhaustive** list of valid Feff.inp tags
VALID_FEFF_TAGS = ("CONTROL", "PRINT", "ATOMS", "POTENTIALS", "RECIPROCAL",
"REAL", "MARKER", "LATTICE", "TITLE", "RMULTIPLIER",
"SGROUP", "COORDINATES", "EQUIVALENCE", "CIF", "CGRID",
"CFAVERAGE", "OVERLAP", "EXAFS", "XANES", "ELNES", "EXELFS",
"LDOS", "ELLIPTICITY", "MULTIPOLE", "POLARIZATION",
"RHOZZP", "DANES", "FPRIME", "NRIXS", "XES", "XNCD",
"XMCD", "XNCDCONTROL", "END", "KMESH", "PRINT", "EGRID",
"DIMS", "AFOLP", "EDGE", "COMPTON", "DANES",
"FPRIME" "MDFF", "HOLE", "COREHOLE", "S02", "CHBROAD",
"EXCHANGE", "FOLP", "NOHOLE", "RGRID", "SCF",
"UNFREEZEF", "CHSHIFT", "DEBYE",
"INTERSTITIAL", "CHWIDTH", "EGAP", "EPS0", "EXTPOT",
"ION", "JUMPRM", "EXPOT", "SPIN", "LJMAX", "LDEC", "MPSE",
"PLASMON", "RPHASES", "RSIGMA", "PMBSE", "TDLDA", "FMS",
"DEBYA", "OPCONS", "PREP", "RESTART", "SCREEN", "SETE",
"STRFACTORS", "BANDSTRUCTURE", "RPATH", "NLEG", "PCRITERIA",
"SYMMETRY", "SS", "CRITERIA", "IORDER", "NSTAR", "ABSOLUTE",
"CORRECTIONS", "SIG2", "SIG3", "MBCONV", "SFCONV", "RCONV",
"SELF", "SFSE", "MAGIC", "TARGET", "STRFAC")
class Header(MSONable):
"""
Creates Header for the FEFF input file.
Has the following format::
* This feff.inp file generated by pymatgen, www.materialsproject.org
TITLE comment:
TITLE Source: CoO19128.cif
TITLE Structure Summary: (Co2 O2)
TITLE Reduced formula: CoO
TITLE space group: P1, space number: 1
TITLE abc: 3.297078 3.297078 5.254213
TITLE angles: 90.0 90.0 120.0
TITLE sites: 4
* 1 Co 0.666666 0.333332 0.496324
* 2 Co 0.333333 0.666667 0.996324
* 3 O 0.666666 0.333332 0.878676
* 4 O 0.333333 0.666667 0.378675
Args:
struct: Structure object, See pymatgen.core.structure.Structure.
source: User supplied identifier, i.e. for Materials Project this
would be the material ID number
comment: Comment for first header line
"""
def __init__(self, struct, source='', comment=''):
if struct.is_ordered:
self.struct = struct
self.source = source
sym = SpacegroupAnalyzer(struct)
data = sym.get_symmetry_dataset()
self.space_number = data["number"]
self.space_group = data["international"]
self.comment = comment or "None given"
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into atomic coordinates!")
@staticmethod
def from_cif_file(cif_file, source='', comment=''):
"""
Static method to create Header object from cif_file
Args:
cif_file: cif_file path and name
source: User supplied identifier, i.e. for Materials Project this
would be the material ID number
comment: User comment that goes in header
Returns:
Header Object
"""
r = CifParser(cif_file)
structure = r.get_structures()[0]
return Header(structure, source, comment)
@property
def structure_symmetry(self):
"""
Returns space number and space group
Returns:
Space number and space group list
"""
return self.space_group, self.space_number
@property
def formula(self):
"""
Formula of structure
"""
return self.struct.composition.formula
@staticmethod
def from_file(filename):
"""
Returns Header object from file
"""
hs = Header.header_string_from_file(filename)
return Header.from_string(hs)
@staticmethod
def header_string_from_file(filename='feff.inp'):
"""
Reads Header string from either a HEADER file or feff.inp file
Will also read a header from a non-pymatgen generated feff.inp file
Args:
filename: File name containing the Header data.
Returns:
Reads header string.
"""
with zopen(filename, "r") as fobject:
f = fobject.readlines()
feff_header_str = []
ln = 0
# Checks to see if generated by pymatgen
try:
feffpmg = f[0].find("pymatgen")
if feffpmg == -1:
feffpmg = False
except IndexError:
feffpmg = False
# Reads pymatgen generated header or feff.inp file
if feffpmg:
nsites = int(f[8].split()[2])
for line in f:
ln += 1
if ln <= nsites + 9:
feff_header_str.append(line)
else:
# Reads header from header from feff.inp file from unknown
# source
end = 0
for line in f:
if (line[0] == "*" or line[0] == "T") and end == 0:
feff_header_str.append(line.replace("\r", ""))
else:
end = 1
return ''.join(feff_header_str)
@staticmethod
def from_string(header_str):
"""
Reads Header string and returns Header object if header was
generated by pymatgen.
Note: Checks to see if generated by pymatgen, if not it is impossible
to generate structure object so it is not possible to generate
header object and routine ends
Args:
header_str: pymatgen generated feff.inp header
Returns:
Structure object.
"""
lines = tuple(clean_lines(header_str.split("\n"), False))
comment1 = lines[0]
feffpmg = comment1.find("pymatgen")
if feffpmg == -1:
feffpmg = False
if feffpmg:
comment2 = ' '.join(lines[1].split()[2:])
source = ' '.join(lines[2].split()[2:])
basis_vec = lines[6].split(":")[-1].split()
# a, b, c
a = float(basis_vec[0])
b = float(basis_vec[1])
c = float(basis_vec[2])
lengths = [a, b, c]
# alpha, beta, gamma
basis_ang = lines[7].split(":")[-1].split()
alpha = float(basis_ang[0])
beta = float(basis_ang[1])
gamma = float(basis_ang[2])
angles = [alpha, beta, gamma]
lattice = Lattice.from_lengths_and_angles(lengths, angles)
natoms = int(lines[8].split(":")[-1].split()[0])
atomic_symbols = []
for i in range(9, 9 + natoms):
atomic_symbols.append(lines[i].split()[2])
# read the atomic coordinates
coords = []
for i in range(natoms):
toks = lines[i + 9].split()
coords.append([float(s) for s in toks[3:]])
struct = Structure(lattice, atomic_symbols, coords, False,
False, False)
h = Header(struct, source, comment2)
return h
else:
return "Header not generated by pymatgen, cannot return header object"
def __str__(self):
"""
String representation of Header.
"""
to_s = lambda x: "%0.6f" % x
output = ["* This FEFF.inp file generated by pymatgen",
''.join(["TITLE comment: ", self.comment]),
''.join(["TITLE Source: ", self.source]),
"TITLE Structure Summary: {}"
.format(self.struct.composition.formula),
"TITLE Reduced formula: {}"
.format(self.struct.composition.reduced_formula),
"TITLE space group: ({}), space number: ({})"
.format(self.space_group, self.space_number),
"TITLE abc:{}".format(" ".join(
[to_s(i).rjust(10) for i in self.struct.lattice.abc])),
"TITLE angles:{}".format(" ".join(
[to_s(i).rjust(10) for i in self.struct.lattice.angles])),
"TITLE sites: {}".format(self.struct.num_sites)]
for i, site in enumerate(self.struct):
output.append(" ".join(["*", str(i + 1), site.species_string,
" ".join([to_s(j).rjust(12)
for j in site.frac_coords])]))
return "\n".join(output)
def write_file(self, filename='HEADER'):
"""
Writes Header into filename on disk.
Args:
filename: Filename and path for file to be written to disk
"""
with open(filename, "w") as f:
f.write(str(self) + "\n")
class Atoms(MSONable):
"""
Atomic cluster centered around the absorbing atom.
"""
def __init__(self, struct, absorbing_atom, radius):
"""
Args:
struct (Structure): input structure
absorbing_atom (str/int): Symbol for absorbing atom or site index
radius (float): radius of the atom cluster in Angstroms.
"""
if struct.is_ordered:
self.struct = struct
self.pot_dict = get_atom_map(struct)
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into atomic coordinates!")
self.absorbing_atom, self.center_index = \
get_absorbing_atom_symbol_index(absorbing_atom, struct)
self.radius = radius
self._cluster = self._set_cluster()
def _set_cluster(self):
"""
Compute and set the cluster of atoms as a Molecule object. The siteato
coordinates are translated such that the absorbing atom(aka central
atom) is at the origin.
Returns:
Molecule
"""
center = self.struct[self.center_index].coords
sphere = self.struct.get_neighbors(self.struct[self.center_index], self.radius)
symbols = [self.absorbing_atom]
coords = [[0, 0, 0]]
for i, site_dist in enumerate(sphere):
site_symbol = re.sub(r"[^aA-zZ]+", "", site_dist[0].species_string)
symbols.append(site_symbol)
coords.append(site_dist[0].coords - center)
return Molecule(symbols, coords)
@property
def cluster(self):
"""
Returns the atomic cluster as a Molecule object.
"""
return self._cluster
@staticmethod
def atoms_string_from_file(filename):
"""
Reads atomic shells from file such as feff.inp or ATOMS file
The lines are arranged as follows:
x y z ipot Atom Symbol Distance Number
with distance being the shell radius and ipot an integer identifying
the potential used.
Args:
filename: File name containing atomic coord data.
Returns:
Atoms string.
"""
with zopen(filename, "rt") as fobject:
f = fobject.readlines()
coords = 0
atoms_str = []
for line in f:
if coords == 0:
find_atoms = line.find("ATOMS")
if find_atoms >= 0:
coords = 1
if coords == 1 and not ("END" in line):
atoms_str.append(line.replace("\r", ""))
return ''.join(atoms_str)
@staticmethod
def cluster_from_file(filename):
"""
Parse the feff input file and return the atomic cluster as a Molecule
object.
Args:
filename (str): path the feff input file
Returns:
Molecule: the atomic cluster as Molecule object. The absorbing atom
is the one at the origin.
"""
atoms_string = Atoms.atoms_string_from_file(filename)
line_list = [l.split() for l in atoms_string.splitlines()[3:]]
coords = []
symbols = []
for l in line_list:
if l:
coords.append([float(i) for i in l[:3]])
symbols.append(l[4])
return Molecule(symbols, coords)
def get_lines(self):
"""
Returns a list of string representations of the atomic configuration
information(x, y, z, ipot, atom_symbol, distance, id).
Returns:
list: list of strings, sorted by the distance from the absorbing
atom.
"""
lines = [["{:f}".format(self._cluster[0].x),
"{:f}".format(self._cluster[0].y),
"{:f}".format(self._cluster[0].z),
0, self.absorbing_atom, "0.0", 0]]
for i, site in enumerate(self._cluster[1:]):
site_symbol = re.sub(r"[^aA-zZ]+", "", site.species_string)
ipot = self.pot_dict[site_symbol]
lines.append(["{:f}".format(site.x), "{:f}".format(site.y),
"{:f}".format(site.z), ipot, site_symbol,
"{:f}".format(self._cluster.get_distance(0, i + 1)), i + 1])
return sorted(lines, key=itemgetter(5))
def __str__(self):
"""
String representation of Atoms file.
"""
lines_sorted = self.get_lines()
# TODO: remove the formatting and update the unittests
lines_formatted = str(tabulate(lines_sorted,
headers=["* x", "y", "z", "ipot",
"Atom", "Distance", "Number"]))
atom_list = lines_formatted.replace("--", "**")
return ''.join(["ATOMS\n", atom_list, "\nEND\n"])
def write_file(self, filename='ATOMS'):
"""
Write Atoms list to file.
Args:
filename: path for file to be written
"""
with zopen(filename, "wt") as f:
f.write(str(self) + "\n")
class Tags(dict):
"""
FEFF control parameters.
"""
def __init__(self, params=None):
"""
Args:
params: A set of input parameters as a dictionary.
"""
super().__init__()
if params:
self.update(params)
def __setitem__(self, key, val):
"""
Add parameter-val pair. Warns if parameter is not in list of valid
Feff tags. Also cleans the parameter and val by stripping leading and
trailing white spaces.
Arg:
key: dict key value
value: value associated with key in dictionary
"""
if key.strip().upper() not in VALID_FEFF_TAGS:
warnings.warn(key.strip() + " not in VALID_FEFF_TAGS list")
super().__setitem__(key.strip(),
Tags.proc_val(key.strip(), val.strip())
if isinstance(val, str) else val)
def as_dict(self):
"""
Dict representation.
Returns:
Dictionary of parameters from fefftags object
"""
tags_dict = dict(self)
tags_dict['@module'] = self.__class__.__module__
tags_dict['@class'] = self.__class__.__name__
return tags_dict
@staticmethod
def from_dict(d):
"""
Creates Tags object from a dictionary.
Args:
d: Dict of feff parameters and values.
Returns:
Tags object
"""
i = Tags()
for k, v in d.items():
if k not in ("@module", "@class"):
i[k] = v
return i
def get_string(self, sort_keys=False, pretty=False):
"""
Returns a string representation of the Tags. The reason why this
method is different from the __str__ method is to provide options
for pretty printing.
Args:
sort_keys: Set to True to sort the Feff parameters alphabetically.
Defaults to False.
pretty: Set to True for pretty aligned output. Defaults to False.
Returns:
String representation of Tags.
"""
keys = self.keys()
if sort_keys:
keys = sorted(keys)
lines = []
for k in keys:
if isinstance(self[k], dict):
if k in ["ELNES", "EXELFS"]:
lines.append([k, self._stringify_val(self[k]["ENERGY"])])
beam_energy = self._stringify_val(self[k]["BEAM_ENERGY"])
beam_energy_list = beam_energy.split()
if int(beam_energy_list[1]) == 0: # aver=0, specific beam direction
lines.append([beam_energy])
lines.append([self._stringify_val(self[k]["BEAM_DIRECTION"])])
else:
# no cross terms for orientation averaged spectrum
beam_energy_list[2] = str(0)
lines.append([self._stringify_val(beam_energy_list)])
lines.append([self._stringify_val(self[k]["ANGLES"])])
lines.append([self._stringify_val(self[k]["MESH"])])
lines.append([self._stringify_val(self[k]["POSITION"])])
else:
lines.append([k, self._stringify_val(self[k])])
if pretty:
return tabulate(lines)
else:
return str_delimited(lines, None, " ")
@staticmethod
def _stringify_val(val):
"""
Convert the given value to string.
"""
if isinstance(val, list):
return " ".join([str(i) for i in val])
else:
return str(val)
def __str__(self):
return self.get_string()
def write_file(self, filename='PARAMETERS'):
"""
Write Tags to a Feff parameter tag file.
Args:
filename: filename and path to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__() + "\n")
@staticmethod
def from_file(filename="feff.inp"):
"""
Creates a Feff_tag dictionary from a PARAMETER or feff.inp file.
Args:
filename: Filename for either PARAMETER or feff.inp file
Returns:
Feff_tag object
"""
with zopen(filename, "rt") as f:
lines = list(clean_lines(f.readlines()))
params = {}
eels_params = []
ieels = -1
ieels_max = -1
for i, line in enumerate(lines):
m = re.match(r"([A-Z]+\d*\d*)\s*(.*)", line)
if m:
key = m.group(1).strip()
val = m.group(2).strip()
val = Tags.proc_val(key, val)
if key not in ("ATOMS", "POTENTIALS", "END", "TITLE"):
if key in ["ELNES", "EXELFS"]:
ieels = i
ieels_max = ieels + 5
else:
params[key] = val
if ieels >= 0:
if i >= ieels and i <= ieels_max:
if i == ieels + 1:
if int(line.split()[1]) == 1:
ieels_max -= 1
eels_params.append(line)
if eels_params:
if len(eels_params) == 6:
eels_keys = ['BEAM_ENERGY', 'BEAM_DIRECTION', 'ANGLES', 'MESH', 'POSITION']
else:
eels_keys = ['BEAM_ENERGY', 'ANGLES', 'MESH', 'POSITION']
eels_dict = {"ENERGY": Tags._stringify_val(eels_params[0].split()[1:])}
for k, v in zip(eels_keys, eels_params[1:]):
eels_dict[k] = str(v)
params[str(eels_params[0].split()[0])] = eels_dict
return Tags(params)
@staticmethod
def proc_val(key, val):
"""
Static helper method to convert Feff parameters to proper types, e.g.
integers, floats, lists, etc.
Args:
key: Feff parameter key
val: Actual value of Feff parameter.
"""
list_type_keys = list(VALID_FEFF_TAGS)
del list_type_keys[list_type_keys.index("ELNES")]
del list_type_keys[list_type_keys.index("EXELFS")]
boolean_type_keys = ()
float_type_keys = ("S02", "EXAFS", "RPATH")
def smart_int_or_float(numstr):
if numstr.find(".") != -1 or numstr.lower().find("e") != -1:
return float(numstr)
else:
return int(numstr)
try:
if key.lower() == 'cif':
m = re.search(r"\w+.cif", val)
return m.group(0)
if key in list_type_keys:
output = list()
toks = re.split(r"\s+", val)
for tok in toks:
m = re.match(r"(\d+)\*([\d\.\-\+]+)", tok)
if m:
output.extend([smart_int_or_float(m.group(2))] *
int(m.group(1)))
else:
output.append(smart_int_or_float(tok))
return output
if key in boolean_type_keys:
m = re.search(r"^\W+([TtFf])", val)
if m:
if m.group(1) == "T" or m.group(1) == "t":
return True
else:
return False
raise ValueError(key + " should be a boolean type!")
if key in float_type_keys:
return float(val)
except ValueError:
return val.capitalize()
return val.capitalize()
def diff(self, other):
"""
Diff function. Compares two PARAMETER files and indicates which
parameters are the same and which are not. Useful for checking whether
two runs were done using the same parameters.
Args:
other: The other PARAMETER dictionary to compare to.
Returns:
Dict of the format {"Same" : parameters_that_are_the_same,
"Different": parameters_that_are_different} Note that the
parameters are return as full dictionaries of values.
"""
similar_param = {}
different_param = {}
for k1, v1 in self.items():
if k1 not in other:
different_param[k1] = {"FEFF_TAGS1": v1,
"FEFF_TAGS2": "Default"}
elif v1 != other[k1]:
different_param[k1] = {"FEFF_TAGS1": v1,
"FEFF_TAGS2": other[k1]}
else:
similar_param[k1] = v1
for k2, v2 in other.items():
if k2 not in similar_param and k2 not in different_param:
if k2 not in self:
different_param[k2] = {"FEFF_TAGS1": "Default",
"FEFF_TAGS2": v2}
return {"Same": similar_param, "Different": different_param}
def __add__(self, other):
"""
Add all the values of another Tags object to this object
Facilitates the use of "standard" Tags
"""
params = dict(self)
for k, v in other.items():
if k in self and v != self[k]:
raise ValueError("Tags have conflicting values!")
else:
params[k] = v
return Tags(params)
class Potential(MSONable):
"""
FEFF atomic potential.
"""
def __init__(self, struct, absorbing_atom):
"""
Args:
struct (Structure): Structure object.
absorbing_atom (str/int): Absorbing atom symbol or site index
"""
if struct.is_ordered:
self.struct = struct
self.pot_dict = get_atom_map(struct)
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into atomic coordinates!")
self.absorbing_atom, _ = \
get_absorbing_atom_symbol_index(absorbing_atom, struct)
@staticmethod
def pot_string_from_file(filename='feff.inp'):
"""
Reads Potential parameters from a feff.inp or FEFFPOT file.
The lines are arranged as follows:
ipot Z element lmax1 lmax2 stoichometry spinph
Args:
filename: file name containing potential data.
Returns:
FEFFPOT string.
"""
with zopen(filename, "rt") as f_object:
f = f_object.readlines()
ln = -1
pot_str = ["POTENTIALS\n"]
pot_tag = -1
pot_data = 0
pot_data_over = 1
sep_line_pattern = [re.compile('ipot.*Z.*tag.*lmax1.*lmax2.*spinph'),
re.compile('^[*]+.*[*]+$')]
for line in f:
if pot_data_over == 1:
ln += 1
if pot_tag == -1:
pot_tag = line.find("POTENTIALS")
ln = 0
if pot_tag >= 0 and ln > 0 and pot_data_over > 0:
try:
if len(sep_line_pattern[0].findall(line)) > 0 or \
len(sep_line_pattern[1].findall(line)) > 0:
pot_str.append(line)
elif int(line.split()[0]) == pot_data:
pot_data += 1
pot_str.append(line.replace("\r", ""))
except (ValueError, IndexError):
if pot_data > 0:
pot_data_over = 0
return ''.join(pot_str).rstrip('\n')
@staticmethod
def pot_dict_from_string(pot_data):
"""
Creates atomic symbol/potential number dictionary
forward and reverse
Arg:
pot_data: potential data in string format
Returns:
forward and reverse atom symbol and potential number dictionaries.
"""
pot_dict = {}
pot_dict_reverse = {}
begin = 0
ln = -1
for line in pot_data.split("\n"):
try:
if begin == 0 and line.split()[0] == "0":
begin += 1
ln = 0
if begin == 1:
ln += 1
if ln > 0:
atom = line.split()[2]
index = int(line.split()[0])
pot_dict[atom] = index
pot_dict_reverse[index] = atom
except (ValueError, IndexError):
pass
return pot_dict, pot_dict_reverse
def __str__(self):
"""
Returns a string representation of potential parameters to be used in
the feff.inp file,
determined from structure object.
The lines are arranged as follows:
ipot Z element lmax1 lmax2 stoichiometry spinph
Returns:
String representation of Atomic Coordinate Shells.
"""
central_element = Element(self.absorbing_atom)
ipotrow = [[0, central_element.Z, central_element.symbol, -1, -1, .0001, 0]]
for el, amt in self.struct.composition.items():
ipot = self.pot_dict[el.symbol]
ipotrow.append([ipot, el.Z, el.symbol, -1, -1, amt, 0])
ipot_sorted = sorted(ipotrow, key=itemgetter(0))
ipotrow = str(tabulate(ipot_sorted,
headers=["*ipot", "Z", "tag", "lmax1",
"lmax2", "xnatph(stoichometry)",
"spinph"]))
ipotlist = ipotrow.replace("--", "**")
ipotlist = ''.join(["POTENTIALS\n", ipotlist])
return ipotlist
def write_file(self, filename='POTENTIALS'):
"""
Write to file.
Args:
filename: filename and path to write potential file to.
"""
with zopen(filename, "wt") as f:
f.write(str(self) + "\n")
class Paths(MSONable):
"""
Set FEFF scattering paths('paths.dat' file used by the 'genfmt' module).
"""
def __init__(self, atoms, paths, degeneracies=None):
"""
Args:
atoms (Atoms): Atoms object
paths (list(list)): list of paths. Each path is a list of atom indices in the atomic
cluster(the molecular cluster created by Atoms class).
e.g. [[0, 1, 2], [5, 9, 4, 1]] -> 2 paths: one with 3 legs and the other with 4 legs.
degeneracies (list): list of degeneracies, one for each path. Set to 1 if not specified.
"""
self.atoms = atoms
self.paths = paths
self.degeneracies = degeneracies or [1] * len(paths)
assert len(self.degeneracies) == len(self.paths)
def __str__(self):
lines = ["PATH", "---------------"]
# max possible, to avoid name collision count down from max value.
path_index = 9999
for i, legs in enumerate(self.paths):
lines.append("{} {} {}".format(path_index, len(legs), self.degeneracies[i]))
lines.append("x y z ipot label")
for l in legs:
coords = self.atoms.cluster[l].coords.tolist()
tmp = "{:.6f} {:.6f} {:.6f}".format(*tuple(coords))
element = str(self.atoms.cluster[l].specie.name)
# the potential index for the absorbing atom(the one at the cluster origin) is 0
potential = 0 if np.linalg.norm(coords) <= 1e-6 else self.atoms.pot_dict[element]
tmp = "{} {} {}".format(tmp, potential, element)
lines.append(tmp)
path_index -= 1
return "\n".join(lines)
def write_file(self, filename="paths.dat"):
"""
Write paths.dat.
"""
with zopen(filename, "wt") as f:
f.write(str(self) + "\n")
class FeffParserError(Exception):
"""
Exception class for Structure.
Raised when the structure has problems, e.g., atoms that are too close.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "FeffParserError : " + self.msg
def get_atom_map(structure):
"""
Returns a dict that maps each atomic symbol to a unique integer starting
from 1.
Args:
structure (Structure)
Returns:
dict
"""
syms = [site.specie.symbol for site in structure]
unique_pot_atoms = []
[unique_pot_atoms.append(i) for i in syms if not unique_pot_atoms.count(i)]
atom_map = {}
for i, atom in enumerate(unique_pot_atoms):
atom_map[atom] = i + 1
return atom_map
def get_absorbing_atom_symbol_index(absorbing_atom, structure):
"""
Return the absorbing atom symboll and site index in the given structure.
Args:
absorbing_atom (str/int): symbol or site index
structure (Structure)
Returns:
str, int: symbol and site index
"""
if isinstance(absorbing_atom, str):
return absorbing_atom, structure.indices_from_symbol(absorbing_atom)[0]
elif isinstance(absorbing_atom, int):
return str(structure[absorbing_atom].specie), absorbing_atom
else:
raise ValueError("absorbing_atom must be either specie symbol or site index")
|
blondegeek/pymatgen
|
pymatgen/io/feff/inputs.py
|
Python
|
mit
| 32,854
|
[
"FEFF",
"pymatgen"
] |
5a8d4568af0090bd12920331d8bd4f20e0c9d4587a4a23d84cf769f6d544026b
|
################################################################################
# Copyright (C) 2013-2015 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for `gaussian` module.
"""
import numpy as np
from scipy import special
from numpy import testing
from .. import gaussian
from bayespy.nodes import (Gaussian,
GaussianARD,
GaussianGamma,
Gamma,
Wishart)
from ...vmp import VB
from bayespy.utils import misc
from bayespy.utils import linalg
from bayespy.utils import random
from bayespy.utils.misc import TestCase
class TestGaussianFunctions(TestCase):
def test_rotate_covariance(self):
"""
Test the Gaussian array covariance rotation.
"""
# Check matrix
R = np.random.randn(2,2)
Cov = np.random.randn(2,2)
self.assertAllClose(gaussian.rotate_covariance(Cov, R),
np.einsum('ik,kl,lj', R, Cov, R.T))
# Check matrix with plates
R = np.random.randn(2,2)
Cov = np.random.randn(4,3,2,2)
self.assertAllClose(gaussian.rotate_covariance(Cov, R),
np.einsum('...ik,...kl,...lj', R, Cov, R.T))
# Check array, first axis
R = np.random.randn(2,2)
Cov = np.random.randn(2,3,3,2,3,3)
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=-3),
np.einsum('...ik,...kablcd,...lj->...iabjcd',
R,
Cov,
R.T))
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=0),
np.einsum('...ik,...kablcd,...lj->...iabjcd',
R,
Cov,
R.T))
# Check array, middle axis
R = np.random.randn(2,2)
Cov = np.random.randn(3,2,3,3,2,3)
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=-2),
np.einsum('...ik,...akbcld,...lj->...aibcjd',
R,
Cov,
R.T))
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=1),
np.einsum('...ik,...akbcld,...lj->...aibcjd',
R,
Cov,
R.T))
# Check array, last axis
R = np.random.randn(2,2)
Cov = np.random.randn(3,3,2,3,3,2)
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=-1),
np.einsum('...ik,...abkcdl,...lj->...abicdj',
R,
Cov,
R.T))
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=2),
np.einsum('...ik,...abkcdl,...lj->...abicdj',
R,
Cov,
R.T))
# Check array, middle axis with plates
R = np.random.randn(2,2)
Cov = np.random.randn(4,4,3,2,3,3,2,3)
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=-2),
np.einsum('...ik,...akbcld,...lj->...aibcjd',
R,
Cov,
R.T))
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=1),
np.einsum('...ik,...akbcld,...lj->...aibcjd',
R,
Cov,
R.T))
pass
class TestGaussianARD(TestCase):
def test_init(self):
"""
Test the constructor of GaussianARD
"""
def check_init(true_plates, true_shape, mu, alpha, **kwargs):
X = GaussianARD(mu, alpha, **kwargs)
self.assertEqual(X.dims, (true_shape, true_shape+true_shape),
msg="Constructed incorrect dimensionality")
self.assertEqual(X.plates, true_plates,
msg="Constructed incorrect plates")
#
# Create from constant parents
#
# Use ndim=0 for constant mu
check_init((),
(),
0,
1)
check_init((3,2),
(),
np.zeros((3,2,)),
np.ones((2,)))
check_init((4,2,2,3),
(),
np.zeros((2,1,3,)),
np.ones((4,1,2,3)))
# Use ndim
check_init((4,2),
(2,3),
np.zeros((2,1,3,)),
np.ones((4,1,2,3)),
ndim=2)
# Use shape
check_init((4,2),
(2,3),
np.zeros((2,1,3,)),
np.ones((4,1,2,3)),
shape=(2,3))
# Use ndim and shape
check_init((4,2),
(2,3),
np.zeros((2,1,3,)),
np.ones((4,1,2,3)),
ndim=2,
shape=(2,3))
#
# Create from node parents
#
# ndim=0 by default
check_init((3,),
(),
GaussianARD(0, 1,
plates=(3,)),
Gamma(1, 1,
plates=(3,)))
check_init((4,2,2,3),
(),
GaussianARD(np.zeros((2,1,3)),
np.ones((2,1,3)),
ndim=3),
Gamma(np.ones((4,1,2,3)),
np.ones((4,1,2,3))))
# Use ndim
check_init((4,),
(2,2,3),
GaussianARD(np.zeros((4,1,2,3)),
np.ones((4,1,2,3)),
ndim=2),
Gamma(np.ones((4,2,1,3)),
np.ones((4,2,1,3))),
ndim=3)
# Use shape
check_init((4,),
(2,2,3),
GaussianARD(np.zeros((4,1,2,3)),
np.ones((4,1,2,3)),
ndim=2),
Gamma(np.ones((4,2,1,3)),
np.ones((4,2,1,3))),
shape=(2,2,3))
# Use ndim and shape
check_init((4,2),
(2,3),
GaussianARD(np.zeros((2,1,3)),
np.ones((2,1,3)),
ndim=2),
Gamma(np.ones((4,1,2,3)),
np.ones((4,1,2,3))),
ndim=2,
shape=(2,3))
# Test for a found bug
check_init((),
(3,),
np.ones(3),
1,
ndim=1)
# Parent mu has more axes
check_init(
(2,),
(3,),
GaussianARD(np.zeros((2,3)),
np.ones((2,3)),
ndim=2),
np.ones((2,3)),
ndim=1
)
# DO NOT add axes if necessary
self.assertRaises(
ValueError,
GaussianARD,
GaussianARD(np.zeros((2,3)),
np.ones((2,3)),
ndim=2),
1,
ndim=3
)
#
# Errors
#
# Inconsistent shapes
self.assertRaises(ValueError,
GaussianARD,
GaussianARD(np.zeros((2,3)),
np.ones((2,3)),
ndim=1),
np.ones((4,3)),
ndim=2)
# Inconsistent dims of mu and alpha
self.assertRaises(ValueError,
GaussianARD,
np.zeros((2,3)),
np.ones((2,)))
# Inconsistent plates of mu and alpha
self.assertRaises(ValueError,
GaussianARD,
GaussianARD(np.zeros((3,2,3)),
np.ones((3,2,3)),
ndim=2),
np.ones((3,4,2,3)),
ndim=3)
# Inconsistent ndim and shape
self.assertRaises(ValueError,
GaussianARD,
np.zeros((2,3)),
np.ones((2,)),
shape=(2,3),
ndim=1)
# Incorrect shape
self.assertRaises(ValueError,
GaussianARD,
GaussianARD(np.zeros((2,3)),
np.ones((2,3)),
ndim=2),
np.ones((2,3)),
shape=(2,2))
pass
def test_message_to_child(self):
"""
Test moments of GaussianARD.
"""
# Check that moments have full shape when broadcasting
X = GaussianARD(np.zeros((2,)),
np.ones((3,2)),
shape=(4,3,2))
(u0, u1) = X._message_to_child()
self.assertEqual(np.shape(u0),
(4,3,2))
self.assertEqual(np.shape(u1),
(4,3,2,4,3,2))
# Check the formula
X = GaussianARD(2, 3)
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2)
self.assertAllClose(u1, 2**2 + 1/3)
# Check the formula for multidimensional arrays
X = GaussianARD(2*np.ones((2,1,4)),
3*np.ones((2,3,1)),
ndim=3)
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2*np.ones((2,3,4)))
self.assertAllClose(u1,
2**2 * np.ones((2,3,4,2,3,4))
+ 1/3 * misc.identity(2,3,4))
# Check the formula for dim-broadcasted mu
X = GaussianARD(2*np.ones((3,1)),
3*np.ones((2,3,4)),
ndim=3)
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2*np.ones((2,3,4)))
self.assertAllClose(u1,
2**2 * np.ones((2,3,4,2,3,4))
+ 1/3 * misc.identity(2,3,4))
# Check the formula for dim-broadcasted alpha
X = GaussianARD(2*np.ones((2,3,4)),
3*np.ones((3,1)),
ndim=3)
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2*np.ones((2,3,4)))
self.assertAllClose(u1,
2**2 * np.ones((2,3,4,2,3,4))
+ 1/3 * misc.identity(2,3,4))
# Check the formula for dim-broadcasted mu and alpha
X = GaussianARD(2*np.ones((3,1)),
3*np.ones((3,1)),
shape=(2,3,4))
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2*np.ones((2,3,4)))
self.assertAllClose(u1,
2**2 * np.ones((2,3,4,2,3,4))
+ 1/3 * misc.identity(2,3,4))
# Check the formula for dim-broadcasted mu with plates
mu = GaussianARD(2*np.ones((5,1,3,4)),
np.ones((5,1,3,4)),
shape=(3,4),
plates=(5,1))
X = GaussianARD(mu,
3*np.ones((5,2,3,4)),
shape=(2,3,4),
plates=(5,))
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2*np.ones((5,2,3,4)))
self.assertAllClose(u1,
2**2 * np.ones((5,2,3,4,2,3,4))
+ 1/3 * misc.identity(2,3,4))
# Check posterior
X = GaussianARD(2, 3)
Y = GaussianARD(X, 1)
Y.observe(10)
X.update()
(u0, u1) = X._message_to_child()
self.assertAllClose(u0,
1/(3+1) * (3*2 + 1*10))
self.assertAllClose(u1,
(1/(3+1) * (3*2 + 1*10))**2 + 1/(3+1))
pass
def test_message_to_parent_mu(self):
"""
Test that GaussianARD computes the message to the 1st parent correctly.
"""
# Check formula with uncertain parent alpha
mu = GaussianARD(0, 1)
alpha = Gamma(2,1)
X = GaussianARD(mu,
alpha)
X.observe(3)
(m0, m1) = mu._message_from_children()
#(m0, m1) = X._message_to_parent(0)
self.assertAllClose(m0,
2*3)
self.assertAllClose(m1,
-0.5*2)
# Check formula with uncertain node
mu = GaussianARD(1, 1e10)
X = GaussianARD(mu, 2)
Y = GaussianARD(X, 1)
Y.observe(5)
X.update()
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
2 * 1/(2+1)*(2*1+1*5))
self.assertAllClose(m1,
-0.5*2)
# Check alpha larger than mu
mu = GaussianARD(np.zeros((2,3)), 1e10, shape=(2,3))
X = GaussianARD(mu,
2*np.ones((3,2,3)))
X.observe(3*np.ones((3,2,3)))
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
2*3 * 3 * np.ones((2,3)))
self.assertAllClose(m1,
-0.5 * 3 * 2*misc.identity(2,3))
# Check mu larger than alpha
mu = GaussianARD(np.zeros((3,2,3)), 1e10, shape=(3,2,3))
X = GaussianARD(mu,
2*np.ones((2,3)))
X.observe(3*np.ones((3,2,3)))
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
2 * 3 * np.ones((3,2,3)))
self.assertAllClose(m1,
-0.5 * 2*misc.identity(3,2,3))
# Check node larger than mu and alpha
mu = GaussianARD(np.zeros((2,3)), 1e10, shape=(2,3))
X = GaussianARD(mu,
2*np.ones((3,)),
shape=(3,2,3))
X.observe(3*np.ones((3,2,3)))
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
2*3 * 3*np.ones((2,3)))
self.assertAllClose(m1,
-0.5 * 2 * 3*misc.identity(2,3))
# Check broadcasting of dimensions
mu = GaussianARD(np.zeros((2,1)), 1e10, shape=(2,1))
X = GaussianARD(mu,
2*np.ones((2,3)),
shape=(2,3))
X.observe(3*np.ones((2,3)))
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
2*3 * 3*np.ones((2,1)))
self.assertAllClose(m1,
-0.5 * 2 * 3*misc.identity(2,1))
# Check plates for smaller mu than node
mu = GaussianARD(0,1,
shape=(3,),
plates=(4,1,1))
X = GaussianARD(mu,
2*np.ones((3,)),
shape=(2,3),
plates=(4,5))
X.observe(3*np.ones((4,5,2,3)))
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0 * np.ones((4,1,1,3)),
2*3 * 5*2*np.ones((4,1,1,3)))
self.assertAllClose(m1 * np.ones((4,1,1,3,3)),
-0.5*2 * 5*2*misc.identity(3) * np.ones((4,1,1,3,3)))
# Check mask
mu = GaussianARD(np.zeros((2,1,3)), 1e10, shape=(3,))
X = GaussianARD(mu,
2*np.ones((2,4,3)),
shape=(3,),
plates=(2,4,))
X.observe(3*np.ones((2,4,3)), mask=[[True, True, True, False],
[False, True, False, True]])
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
(2*3 * np.ones((2,1,3))
* np.array([[[3]], [[2]]])))
self.assertAllClose(m1,
(-0.5*2 * misc.identity(3)
* np.ones((2,1,1,1))
* np.array([[[[3]]], [[[2]]]])))
# Check mask with different shapes
mu = GaussianARD(np.zeros((2,1,3)), 1e10, shape=())
X = GaussianARD(mu,
2*np.ones((2,4,3)),
shape=(3,),
plates=(2,4,))
mask = np.array([[True, True, True, False],
[False, True, False, True]])
X.observe(3*np.ones((2,4,3)), mask=mask)
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
2*3 * np.sum(np.ones((2,4,3))*mask[...,None],
axis=-2,
keepdims=True))
self.assertAllClose(m1,
(-0.5*2 * np.sum(np.ones((2,4,3))*mask[...,None],
axis=-2,
keepdims=True)))
# Check non-ARD Gaussian child
mu = np.array([1,2])
Mu = GaussianARD(mu, 1e10, shape=(2,))
alpha = np.array([3,4])
Lambda = np.array([[1, 0.5],
[0.5, 1]])
X = GaussianARD(Mu, alpha, ndim=1)
Y = Gaussian(X, Lambda)
y = np.array([5,6])
Y.observe(y)
X.update()
(m0, m1) = Mu._message_from_children()
mean = np.dot(np.linalg.inv(np.diag(alpha)+Lambda),
np.dot(np.diag(alpha), mu)
+ np.dot(Lambda, y))
self.assertAllClose(m0,
np.dot(np.diag(alpha), mean))
self.assertAllClose(m1,
-0.5*np.diag(alpha))
# Check broadcasted variable axes
mu = GaussianARD(np.zeros(1), 1e10, shape=(1,))
X = GaussianARD(mu,
2,
shape=(3,))
X.observe(3*np.ones(3))
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
2*3 * np.sum(np.ones(3), axis=-1, keepdims=True))
self.assertAllClose(m1,
-0.5*2 * np.sum(np.identity(3),
axis=(-1,-2),
keepdims=True))
pass
def test_message_to_parent_alpha(self):
"""
Test the message from GaussianARD the 2nd parent (alpha).
"""
# Check formula with uncertain parent mu
mu = GaussianARD(1,1)
tau = Gamma(0.5*1e10, 1e10)
X = GaussianARD(mu,
tau)
X.observe(3)
(m0, m1) = tau._message_from_children()
self.assertAllClose(m0,
-0.5*(3**2 - 2*3*1 + 1**2+1))
self.assertAllClose(m1,
0.5)
# Check formula with uncertain node
tau = Gamma(1e10, 1e10)
X = GaussianARD(2, tau)
Y = GaussianARD(X, 1)
Y.observe(5)
X.update()
(m0, m1) = tau._message_from_children()
self.assertAllClose(m0,
-0.5*(1/(1+1)+3.5**2 - 2*3.5*2 + 2**2))
self.assertAllClose(m1,
0.5)
# Check alpha larger than mu
alpha = Gamma(np.ones((3,2,3))*1e10, 1e10)
X = GaussianARD(np.ones((2,3)),
alpha,
ndim=3)
X.observe(2*np.ones((3,2,3)))
(m0, m1) = alpha._message_from_children()
self.assertAllClose(m0 * np.ones((3,2,3)),
-0.5*(2**2 - 2*2*1 + 1**2) * np.ones((3,2,3)))
self.assertAllClose(m1*np.ones((3,2,3)),
0.5*np.ones((3,2,3)))
# Check mu larger than alpha
tau = Gamma(np.ones((2,3))*1e10, 1e10)
X = GaussianARD(np.ones((3,2,3)),
tau,
ndim=3)
X.observe(2*np.ones((3,2,3)))
(m0, m1) = tau._message_from_children()
self.assertAllClose(m0,
-0.5*(2**2 - 2*2*1 + 1**2) * 3 * np.ones((2,3)))
self.assertAllClose(m1 * np.ones((2,3)),
0.5 * 3 * np.ones((2,3)))
# Check node larger than mu and alpha
tau = Gamma(np.ones((3,))*1e10, 1e10)
X = GaussianARD(np.ones((2,3)),
tau,
shape=(3,2,3))
X.observe(2*np.ones((3,2,3)))
(m0, m1) = tau._message_from_children()
self.assertAllClose(m0 * np.ones(3),
-0.5*(2**2 - 2*2*1 + 1**2) * 6 * np.ones((3,)))
self.assertAllClose(m1 * np.ones(3),
0.5 * 6 * np.ones(3))
# Check plates for smaller mu than node
tau = Gamma(np.ones((4,1,2,3))*1e10, 1e10)
X = GaussianARD(GaussianARD(1, 1,
shape=(3,),
plates=(4,1,1)),
tau,
shape=(2,3),
plates=(4,5))
X.observe(2*np.ones((4,5,2,3)))
(m0, m1) = tau._message_from_children()
self.assertAllClose(m0 * np.ones((4,1,2,3)),
(-0.5 * (2**2 - 2*2*1 + 1**2+1)
* 5*np.ones((4,1,2,3))))
self.assertAllClose(m1 * np.ones((4,1,2,3)),
5*0.5 * np.ones((4,1,2,3)))
# Check mask
tau = Gamma(np.ones((4,3))*1e10, 1e10)
X = GaussianARD(np.ones(3),
tau,
shape=(3,),
plates=(2,4,))
X.observe(2*np.ones((2,4,3)), mask=[[True, False, True, False],
[False, True, True, False]])
(m0, m1) = tau._message_from_children()
self.assertAllClose(m0 * np.ones((4,3)),
(-0.5 * (2**2 - 2*2*1 + 1**2)
* np.ones((4,3))
* np.array([[1], [1], [2], [0]])))
self.assertAllClose(m1 * np.ones((4,3)),
0.5 * np.array([[1], [1], [2], [0]]) * np.ones((4,3)))
# Check non-ARD Gaussian child
mu = np.array([1,2])
alpha = np.array([3,4])
Alpha = Gamma(alpha*1e10, 1e10)
Lambda = np.array([[1, 0.5],
[0.5, 1]])
X = GaussianARD(mu, Alpha, ndim=1)
Y = Gaussian(X, Lambda)
y = np.array([5,6])
Y.observe(y)
X.update()
(m0, m1) = Alpha._message_from_children()
Cov = np.linalg.inv(np.diag(alpha)+Lambda)
mean = np.dot(Cov, np.dot(np.diag(alpha), mu)
+ np.dot(Lambda, y))
self.assertAllClose(m0 * np.ones(2),
-0.5 * np.diag(
np.outer(mean, mean) + Cov
- np.outer(mean, mu)
- np.outer(mu, mean)
+ np.outer(mu, mu)))
self.assertAllClose(m1 * np.ones(2),
0.5 * np.ones(2))
pass
def test_lowerbound(self):
"""
Test the variational Bayesian lower bound term for GaussianARD.
"""
# Test vector formula with full noise covariance
m = np.random.randn(2)
alpha = np.random.rand(2)
y = np.random.randn(2)
X = GaussianARD(m, alpha, ndim=1)
V = np.array([[3,1],[1,3]])
Y = Gaussian(X, V)
Y.observe(y)
X.update()
Cov = np.linalg.inv(np.diag(alpha) + V)
mu = np.dot(Cov, np.dot(V, y) + alpha*m)
x2 = np.outer(mu, mu) + Cov
logH_X = (+ 2*0.5*(1+np.log(2*np.pi))
+ 0.5*np.log(np.linalg.det(Cov)))
logp_X = (- 2*0.5*np.log(2*np.pi)
+ 0.5*np.log(np.linalg.det(np.diag(alpha)))
- 0.5*np.sum(np.diag(alpha)
* (x2
- np.outer(mu,m)
- np.outer(m,mu)
+ np.outer(m,m))))
self.assertAllClose(logp_X + logH_X,
X.lower_bound_contribution())
def check_lower_bound(shape_mu, shape_alpha, plates_mu=(), **kwargs):
M = GaussianARD(np.ones(plates_mu + shape_mu),
np.ones(plates_mu + shape_mu),
shape=shape_mu,
plates=plates_mu)
if not ('ndim' in kwargs or 'shape' in kwargs):
kwargs['ndim'] = len(shape_mu)
X = GaussianARD(M,
2*np.ones(shape_alpha),
**kwargs)
Y = GaussianARD(X,
3*np.ones(X.get_shape(0)),
**kwargs)
Y.observe(4*np.ones(Y.get_shape(0)))
X.update()
Cov = 1/(2+3)
mu = Cov * (2*1 + 3*4)
x2 = mu**2 + Cov
logH_X = (+ 0.5*(1+np.log(2*np.pi))
+ 0.5*np.log(Cov))
logp_X = (- 0.5*np.log(2*np.pi)
+ 0.5*np.log(2)
- 0.5*2*(x2 - 2*mu*1 + 1**2+1))
r = np.prod(X.get_shape(0))
self.assertAllClose(r * (logp_X + logH_X),
X.lower_bound_contribution())
# Test scalar formula
check_lower_bound((), ())
# Test array formula
check_lower_bound((2,3), (2,3))
# Test dim-broadcasting of mu
check_lower_bound((3,1), (2,3,4))
# Test dim-broadcasting of alpha
check_lower_bound((2,3,4), (3,1))
# Test dim-broadcasting of mu and alpha
check_lower_bound((3,1), (3,1),
shape=(2,3,4))
# Test dim-broadcasting of mu with plates
check_lower_bound((), (),
plates_mu=(),
shape=(),
plates=(5,))
# BUG: Scalar parents for array variable caused einsum error
check_lower_bound((), (),
shape=(3,))
# BUG: Log-det was summed over plates
check_lower_bound((), (),
shape=(3,),
plates=(4,))
pass
def test_rotate(self):
"""
Test the rotation of Gaussian ARD arrays.
"""
def check(shape, plates, einsum_x, einsum_xx, axis=-1):
# TODO/FIXME: Improve by having non-diagonal precision/covariance
# parameter for the Gaussian X
D = shape[axis]
X = GaussianARD(np.random.randn(*(plates+shape)),
np.random.rand(*(plates+shape)),
shape=shape,
plates=plates)
(x, xx) = X.get_moments()
R = np.random.randn(D,D)
X.rotate(R, axis=axis)
(rx, rxxr) = X.get_moments()
self.assertAllClose(rx,
np.einsum(einsum_x, R, x))
self.assertAllClose(rxxr,
np.einsum(einsum_xx, R, xx, R))
pass
# Rotate vector
check((3,), (),
'...jk,...k->...j',
'...mk,...kl,...nl->...mn')
check((3,), (2,4),
'...jk,...k->...j',
'...mk,...kl,...nl->...mn')
# Rotate array
check((2,3,4), (),
'...jc,...abc->...abj',
'...mc,...abcdef,...nf->...abmden',
axis=-1)
check((2,3,4), (5,6),
'...jc,...abc->...abj',
'...mc,...abcdef,...nf->...abmden',
axis=-1)
check((2,3,4), (),
'...jb,...abc->...ajc',
'...mb,...abcdef,...ne->...amcdnf',
axis=-2)
check((2,3,4), (5,6),
'...jb,...abc->...ajc',
'...mb,...abcdef,...ne->...amcdnf',
axis=-2)
check((2,3,4), (),
'...ja,...abc->...jbc',
'...ma,...abcdef,...nd->...mbcnef',
axis=-3)
check((2,3,4), (5,6),
'...ja,...abc->...jbc',
'...ma,...abcdef,...nd->...mbcnef',
axis=-3)
pass
def test_rotate_plates(self):
# Basic test for Gaussian vectors
X = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
shape=(2,),
plates=(3,))
(u0, u1) = X.get_moments()
Cov = u1 - linalg.outer(u0, u0, ndim=1)
Q = np.random.randn(3,3)
Qu0 = np.einsum('ik,kj->ij', Q, u0)
QCov = np.einsum('k,kij->kij', np.sum(Q, axis=0)**2, Cov)
Qu1 = QCov + linalg.outer(Qu0, Qu0, ndim=1)
X.rotate_plates(Q, plate_axis=-1)
(u0, u1) = X.get_moments()
self.assertAllClose(u0, Qu0)
self.assertAllClose(u1, Qu1)
# Test full covariance, that is, with observations
X = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
shape=(2,),
plates=(3,))
Y = Gaussian(X, [[2.0, 1.5], [1.5, 3.0]],
plates=(3,))
Y.observe(np.random.randn(3,2))
X.update()
(u0, u1) = X.get_moments()
Cov = u1 - linalg.outer(u0, u0, ndim=1)
Q = np.random.randn(3,3)
Qu0 = np.einsum('ik,kj->ij', Q, u0)
QCov = np.einsum('k,kij->kij', np.sum(Q, axis=0)**2, Cov)
Qu1 = QCov + linalg.outer(Qu0, Qu0, ndim=1)
X.rotate_plates(Q, plate_axis=-1)
(u0, u1) = X.get_moments()
self.assertAllClose(u0, Qu0)
self.assertAllClose(u1, Qu1)
pass
def test_initialization(self):
"""
Test initialization methods of GaussianARD
"""
X = GaussianARD(1, 2, shape=(2,), plates=(3,))
# Prior initialization
mu = 1 * np.ones((3, 2))
alpha = 2 * np.ones((3, 2))
X.initialize_from_prior()
u = X._message_to_child()
self.assertAllClose(u[0]*np.ones((3,2)),
mu)
self.assertAllClose(u[1]*np.ones((3,2,2)),
linalg.outer(mu, mu, ndim=1) +
misc.diag(1/alpha, ndim=1))
# Parameter initialization
mu = np.random.randn(3, 2)
alpha = np.random.rand(3, 2)
X.initialize_from_parameters(mu, alpha)
u = X._message_to_child()
self.assertAllClose(u[0], mu)
self.assertAllClose(u[1], linalg.outer(mu, mu, ndim=1) +
misc.diag(1/alpha, ndim=1))
# Value initialization
x = np.random.randn(3, 2)
X.initialize_from_value(x)
u = X._message_to_child()
self.assertAllClose(u[0], x)
self.assertAllClose(u[1], linalg.outer(x, x, ndim=1))
# Random initialization
X.initialize_from_random()
pass
class TestGaussianGamma(TestCase):
"""
Unit tests for GaussianGamma node.
"""
def test_init(self):
"""
Test the creation of GaussianGamma node
"""
# Simple construction
X_alpha = GaussianGamma([1,2,3], np.identity(3), 2, 10)
self.assertEqual(X_alpha.plates, ())
self.assertEqual(X_alpha.dims, ( (3,), (3,3), (), () ))
# Plates
X_alpha = GaussianGamma([1,2,3], np.identity(3), 2, 10, plates=(4,))
self.assertEqual(X_alpha.plates, (4,))
self.assertEqual(X_alpha.dims, ( (3,), (3,3), (), () ))
# Plates in mu
X_alpha = GaussianGamma(np.ones((4,3)), np.identity(3), 2, 10)
self.assertEqual(X_alpha.plates, (4,))
self.assertEqual(X_alpha.dims, ( (3,), (3,3), (), () ))
# Plates in Lambda
X_alpha = GaussianGamma(np.ones(3), np.ones((4,3,3))*np.identity(3), 2, 10)
self.assertEqual(X_alpha.plates, (4,))
self.assertEqual(X_alpha.dims, ( (3,), (3,3), (), () ))
# Plates in a
X_alpha = GaussianGamma(np.ones(3), np.identity(3), np.ones(4), 10)
self.assertEqual(X_alpha.plates, (4,))
self.assertEqual(X_alpha.dims, ( (3,), (3,3), (), () ))
# Plates in Lambda
X_alpha = GaussianGamma(np.ones(3), np.identity(3), 2, np.ones(4))
self.assertEqual(X_alpha.plates, (4,))
self.assertEqual(X_alpha.dims, ( (3,), (3,3), (), () ))
# Inconsistent plates
self.assertRaises(ValueError,
GaussianGamma,
np.ones((4,3)),
np.identity(3),
2,
10,
plates=())
# Inconsistent plates
self.assertRaises(ValueError,
GaussianGamma,
np.ones((4,3)),
np.identity(3),
2,
10,
plates=(5,))
# Unknown parameters
mu = Gaussian(np.zeros(3), np.identity(3))
Lambda = Wishart(10, np.identity(3))
b = Gamma(1, 1)
X_alpha = GaussianGamma(mu, Lambda, 2, b)
self.assertEqual(X_alpha.plates, ())
self.assertEqual(X_alpha.dims, ( (3,), (3,3), (), () ))
# mu is Gaussian-gamma
mu_tau = GaussianGamma(np.ones(3), np.identity(3), 5, 5)
X_alpha = GaussianGamma(mu_tau, np.identity(3), 5, 5)
self.assertEqual(X_alpha.plates, ())
self.assertEqual(X_alpha.dims, ( (3,), (3,3), (), () ))
pass
def test_message_to_child(self):
"""
Test the message to child of GaussianGamma node.
"""
# Simple test
mu = np.array([1,2,3])
Lambda = np.identity(3)
a = 2
b = 10
X_alpha = GaussianGamma(mu, Lambda, a, b)
u = X_alpha._message_to_child()
self.assertEqual(len(u), 4)
tau = np.array(a/b)
self.assertAllClose(u[0],
tau[...,None] * mu)
self.assertAllClose(u[1],
(linalg.inv(Lambda)
+ tau[...,None,None] * linalg.outer(mu, mu)))
self.assertAllClose(u[2],
tau)
self.assertAllClose(u[3],
-np.log(b) + special.psi(a))
# Test with unknown parents
mu = Gaussian(np.arange(3), 10*np.identity(3))
Lambda = Wishart(10, np.identity(3))
a = 2
b = Gamma(3, 15)
X_alpha = GaussianGamma(mu, Lambda, a, b)
u = X_alpha._message_to_child()
(mu, mumu) = mu._message_to_child()
Cov_mu = mumu - linalg.outer(mu, mu)
(Lambda, _) = Lambda._message_to_child()
(b, _) = b._message_to_child()
(tau, logtau) = Gamma(a, b + 0.5*np.sum(Lambda*Cov_mu))._message_to_child()
self.assertAllClose(u[0],
tau[...,None] * mu)
self.assertAllClose(u[1],
(linalg.inv(Lambda)
+ tau[...,None,None] * linalg.outer(mu, mu)))
self.assertAllClose(u[2],
tau)
self.assertAllClose(u[3],
logtau)
# Test with plates
mu = Gaussian(np.reshape(np.arange(3*4), (4,3)),
10*np.identity(3),
plates=(4,))
Lambda = Wishart(10, np.identity(3))
a = 2
b = Gamma(3, 15)
X_alpha = GaussianGamma(mu, Lambda, a, b, plates=(4,))
u = X_alpha._message_to_child()
(mu, mumu) = mu._message_to_child()
Cov_mu = mumu - linalg.outer(mu, mu)
(Lambda, _) = Lambda._message_to_child()
(b, _) = b._message_to_child()
(tau, logtau) = Gamma(a,
b + 0.5*np.sum(Lambda*Cov_mu,
axis=(-1,-2)))._message_to_child()
self.assertAllClose(u[0] * np.ones((4,1)),
np.ones((4,1)) * tau[...,None] * mu)
self.assertAllClose(u[1] * np.ones((4,1,1)),
np.ones((4,1,1)) * (linalg.inv(Lambda)
+ tau[...,None,None] * linalg.outer(mu, mu)))
self.assertAllClose(u[2] * np.ones(4),
np.ones(4) * tau)
self.assertAllClose(u[3] * np.ones(4),
np.ones(4) * logtau)
pass
def test_mask_to_parent(self):
"""
Test the mask handling in GaussianGamma node
"""
pass
class TestGaussianGradient(TestCase):
"""Numerically check Riemannian gradient of several nodes.
Using VB-EM update equations will take a unit length step to the
Riemannian gradient direction. Thus, the change caused by a VB-EM
update and the Riemannian gradient should be equal.
"""
def test_riemannian_gradient(self):
"""Test Riemannian gradient of a Gaussian node."""
D = 3
#
# Without observations
#
# Construct model
mu = np.random.randn(D)
Lambda = random.covariance(D)
X = Gaussian(mu, Lambda)
# Random initialization
mu0 = np.random.randn(D)
Lambda0 = random.covariance(D)
X.initialize_from_parameters(mu0, Lambda0)
# Initial parameters
phi0 = X.phi
# Gradient
g = X.get_riemannian_gradient()
# Parameters after VB-EM update
X.update()
phi1 = X.phi
# Check
self.assertAllClose(g[0],
phi1[0] - phi0[0])
self.assertAllClose(g[1],
phi1[1] - phi0[1])
# TODO/FIXME: Actually, gradient should be zero because cost function
# is zero without observations! Use the mask!
#
# With observations
#
# Construct model
mu = np.random.randn(D)
Lambda = random.covariance(D)
X = Gaussian(mu, Lambda)
V = random.covariance(D)
Y = Gaussian(X, V)
Y.observe(np.random.randn(D))
# Random initialization
mu0 = np.random.randn(D)
Lambda0 = random.covariance(D)
X.initialize_from_parameters(mu0, Lambda0)
# Initial parameters
phi0 = X.phi
# Gradient
g = X.get_riemannian_gradient()
# Parameters after VB-EM update
X.update()
phi1 = X.phi
# Check
self.assertAllClose(g[0],
phi1[0] - phi0[0])
self.assertAllClose(g[1],
phi1[1] - phi0[1])
pass
def test_gradient(self):
"""Test standard gradient of a Gaussian node."""
D = 3
np.random.seed(42)
#
# Without observations
#
# Construct model
mu = np.random.randn(D)
Lambda = random.covariance(D)
X = Gaussian(mu, Lambda)
# Random initialization
mu0 = np.random.randn(D)
Lambda0 = random.covariance(D)
X.initialize_from_parameters(mu0, Lambda0)
Q = VB(X)
# Initial parameters
phi0 = X.phi
# Gradient
rg = X.get_riemannian_gradient()
g = X.get_gradient(rg)
# Numerical gradient
eps = 1e-6
p0 = X.get_parameters()
l0 = Q.compute_lowerbound(ignore_masked=False)
g_num = [np.zeros(D), np.zeros((D,D))]
for i in range(D):
e = np.zeros(D)
e[i] = eps
p1 = p0[0] + e
X.set_parameters([p1, p0[1]])
l1 = Q.compute_lowerbound(ignore_masked=False)
g_num[0][i] = (l1 - l0) / eps
for i in range(D):
for j in range(i+1):
e = np.zeros((D,D))
e[i,j] += eps
e[j,i] += eps
p1 = p0[1] + e
X.set_parameters([p0[0], p1])
l1 = Q.compute_lowerbound(ignore_masked=False)
g_num[1][i,j] = (l1 - l0) / (2*eps)
g_num[1][j,i] = (l1 - l0) / (2*eps)
# Check
self.assertAllClose(g[0],
g_num[0])
self.assertAllClose(g[1],
g_num[1])
#
# With observations
#
# Construct model
mu = np.random.randn(D)
Lambda = random.covariance(D)
X = Gaussian(mu, Lambda)
# Random initialization
mu0 = np.random.randn(D)
Lambda0 = random.covariance(D)
X.initialize_from_parameters(mu0, Lambda0)
V = random.covariance(D)
Y = Gaussian(X, V)
Y.observe(np.random.randn(D))
Q = VB(Y, X)
# Initial parameters
phi0 = X.phi
# Gradient
rg = X.get_riemannian_gradient()
g = X.get_gradient(rg)
# Numerical gradient
eps = 1e-6
p0 = X.get_parameters()
l0 = Q.compute_lowerbound()
g_num = [np.zeros(D), np.zeros((D,D))]
for i in range(D):
e = np.zeros(D)
e[i] = eps
p1 = p0[0] + e
X.set_parameters([p1, p0[1]])
l1 = Q.compute_lowerbound()
g_num[0][i] = (l1 - l0) / eps
for i in range(D):
for j in range(i+1):
e = np.zeros((D,D))
e[i,j] += eps
e[j,i] += eps
p1 = p0[1] + e
X.set_parameters([p0[0], p1])
l1 = Q.compute_lowerbound()
g_num[1][i,j] = (l1 - l0) / (2*eps)
g_num[1][j,i] = (l1 - l0) / (2*eps)
# Check
self.assertAllClose(g[0],
g_num[0])
self.assertAllClose(g[1],
g_num[1])
#
# With plates
#
# Construct model
K = D+1
mu = np.random.randn(D)
Lambda = random.covariance(D)
X = Gaussian(mu, Lambda, plates=(K,))
V = random.covariance(D, size=(K,))
Y = Gaussian(X, V)
Y.observe(np.random.randn(K,D))
Q = VB(Y, X)
# Random initialization
mu0 = np.random.randn(*(X.get_shape(0)))
Lambda0 = random.covariance(D, size=X.plates)
X.initialize_from_parameters(mu0, Lambda0)
# Initial parameters
phi0 = X.phi
# Gradient
rg = X.get_riemannian_gradient()
g = X.get_gradient(rg)
# Numerical gradient
eps = 1e-6
p0 = X.get_parameters()
l0 = Q.compute_lowerbound()
g_num = [np.zeros(X.get_shape(0)), np.zeros(X.get_shape(1))]
for k in range(K):
for i in range(D):
e = np.zeros(X.get_shape(0))
e[k,i] = eps
p1 = p0[0] + e
X.set_parameters([p1, p0[1]])
l1 = Q.compute_lowerbound()
g_num[0][k,i] = (l1 - l0) / eps
for i in range(D):
for j in range(i+1):
e = np.zeros(X.get_shape(1))
e[k,i,j] += eps
e[k,j,i] += eps
p1 = p0[1] + e
X.set_parameters([p0[0], p1])
l1 = Q.compute_lowerbound()
g_num[1][k,i,j] = (l1 - l0) / (2*eps)
g_num[1][k,j,i] = (l1 - l0) / (2*eps)
# Check
self.assertAllClose(g[0],
g_num[0])
self.assertAllClose(g[1],
g_num[1])
pass
|
dungvtdev/upsbayescpm
|
bayespy/inference/vmp/nodes/tests/test_gaussian.py
|
Python
|
mit
| 46,041
|
[
"Gaussian"
] |
6b544421586c95bf55c5090db488f866cf690f8586c6965e1098e54c0066f332
|
#!/usr/bin/env python
import re
import argparse
import sys
def create_field_dict(line):
field_dict = dict()
for field in line.rstrip().split('\t'):
if not field.startswith('@'):
key, value = field.split(':', 1)
field_dict[key] = value
return field_dict
def create_field_set(line):
field_set = set()
for field in line.rstrip().split('\t'):
if not field.startswith('@') and not field.startswith('DT:') and not field.startswith('PI:') and not field.startswith('DS:'):
field_set.add(field)
return frozenset(field_set)
class ValidateReadgroups(object):
def __init__(self, readgroupfiles):
self.invalid_lines = set()
self.valid_lines = set()
self.expected_lines = set()
for rgfile in readgroupfiles:
with open(rgfile, 'r') as f:
for line in f:
self.expected_lines.add(create_field_set(line))
def __call__(self, line):
field_set = create_field_set(line)
if field_set in self.expected_lines:
self.valid_lines.add(line)
else:
print line,
self.invalid_lines.add(line)
def verdict(self):
assert len(self.valid_lines) == len(self.expected_lines), 'Missing expected @RG lines'
assert len(self.invalid_lines) == 0, 'Invalid @RG lines found'
class ValidateSq(object):
def __init__(self, reference_path):
self.invalid_ah = False
self.invalid_name = False
self.expected_names = self._parse_index_file(reference_path + '.fai')
self.alt_names = self._parse_alt_file(reference_path + '.alt')
def _parse_index_file(self, path):
expected_names = set()
with open(path, 'r') as f:
for line in f:
fields = line.rstrip().split('\t')
expected_names.add(fields[0])
return expected_names
def _parse_alt_file(self, path):
alt_names = set()
with open(path, 'r') as f:
for line in f:
fields = line.rstrip().split('\t')
if not line.startswith('@SQ'):
#It's not a normal name
alt_names.add(fields[0])
return alt_names
def __call__(self, line):
field_dict = create_field_dict(line)
seq_name = field_dict['SN']
if seq_name in self.expected_names:
has_ah = 'AH' in field_dict
is_alt = seq_name in self.alt_names
if (is_alt != has_ah):
print 'Invalid AH tag for reference name: {0}'.format(seq_name)
self.invalid_ah = True
else:
print 'Unexpected reference name: {0}'.format(seq_name)
self.invalid_name = True
def verdict(self):
assert not self.invalid_ah, 'No AH tag found in on the @SQ record of an expected ALT'
assert not self.invalid_name, 'Unexpected reference name detected in @SQ record'
class ValidateBwa(object):
def __init__(self):
self.found_bwa = False
self.all_bwa_had_proper_params = True
def __call__(self, line):
field_dict = create_field_dict(line)
if field_dict['ID'].startswith('bwa') and (field_dict['PN'] == 'bwa' or field_dict['PN'] == 'bwamem'):
self.found_bwa = True
self.all_bwa_had_proper_params = (self.all_bwa_had_proper_params and
field_dict['VN'] == '0.7.15-r1140' and
' -Y ' in field_dict['CL'] and
' -K 100000000 ' in field_dict['CL'])
def verdict(self):
assert self.found_bwa, 'No bwa @PG entries found'
assert self.all_bwa_had_proper_params, 'Improper bwa params (version, -Y or -K)'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Validate a CCDG CRAM file header')
parser.add_argument('readgroupfile', metavar='FILE', type=str, nargs='+', help='path to file containing all expected @RG lines')
parser.add_argument('--ref', metavar='FILE', type=str, help='path to reference file to use for determining expected chromosome names and alts. Needs .alt and .fai file')
args = parser.parse_args()
rg_validator = ValidateReadgroups(args.readgroupfile)
sq_validator = ValidateSq(args.ref)
bwa_validator = ValidateBwa()
def noop(x):
pass
validator_dict = {
'@SQ': sq_validator,
'@PG': bwa_validator,
'@RG': rg_validator
}
for line in sys.stdin:
v = validator_dict.get(line.split('\t')[0], noop)
v(line)
rv = 0
for validator in validator_dict.values():
try:
validator.verdict()
except AssertionError as e:
rv = 1
sys.stderr.write(str(e))
sys.stderr.write('\n')
sys.exit(rv)
|
ernfrid/oneoffs
|
ccdg_synch/validate_header.py
|
Python
|
mit
| 4,856
|
[
"BWA"
] |
eb2036c3e97ff133ef779fe7f3e4826e58540d1583d3b20503e32fe14cdfd235
|
from __future__ import print_function, division
inp = """
#File names and Format
#Directory to save output files
direc 'Al1000'
stem 'Al1000'
make_image 0
output '.gve' '.flt' '.par' '.ubi'
# Structural parameters
structure_phase_0 'Al.cif'
# Crystal/grains parameters
no_grains 1000
# Total number of grains summed over all phases to be simulated.
# Need to match the number of U_Grains_X key word
gen_size 1 -0.05 0.01 0.2
#Grain phase : If you want to let the PolyXSim appoint which grain
# belongs to which phase the following keyword can be used.
gen_phase 1 1 0
# Grain orientation : random (1) or specify specific orientation matrices (0)
gen_U 0
# 1 random, box or cylinder
gen_pos 0 0
# pos_grains_0 0.1 -0.1 0.05
#sample_xyz 1.0 1.0 1.0
gen_eps 1 0 0 0 0
# Instrumentation
# Detector pixel size [mm]
y_size 0.05
z_size 0.05
#Detector size [pixels]
dety_size 2048
detz_size 2048
#Distance from sample to detector [mm]
distance 300
#Detector tilt
tilt_x 0.005
tilt_y 0.01
tilt_z 0.008
#Detector orientation
o11 1
o12 0
o21 0
o22 -1
#Noise
noise 0
#Reflection
intensity_const 1
lorentz_apply 1
beampol_apply 1
peakshape 0
#Instrumental
#Beam specs, Pt edge
wavelength 0.158154
beamflux 1e-12
beampol_factor 1
beampol_direct 0
#Beam center on detector [pixels]
dety_center 1022.1
detz_center 1028.3
#Omega scan range [degrees]
omega_start 0
omega_end 180
#Omega step size [degrees]
omega_step 0.25
#Omega rotation direction [degrees]
omega_sign 1
#Wedge angle of omega axis
wedge 0.02
"""
"U_grains_0 7.712806e-01 -6.337184e-01 5.939117e-02 6.130920e-01 7.146102e-01 -3.368241e-01 1.710101e-01 2.961981e-01 9.396926e-01"
import numpy as np, xfab.tools
# generate a 10x10x10 grid of grains.
# add something to this
x,y,z = np.mgrid[0:10,0:10,0:10]-5
dx = np.sin(np.arange(1000))/3 # +/- 1
dy = np.cos(np.arange(1000))/5
dz = np.sin(np.arange(1000))/7
t = np.array( (x.ravel()+dx, y.ravel()+dy, z.ravel()+dz ) )/10
#print t.shape
np.savetxt("t",t.T)
# orientations....
# t is in range [-0.5 -> 0.5] - make it rod also.
u = [xfab.tools.rod_to_u( v) for v in t.T]
f=open("Al1000.inp","w")
f.write(inp)
for i,v in enumerate(t.T):
f.write("pos_grains_%d %f %f %f\n"%(i,v[0],v[1],v[2]))
for i,v in enumerate(u):
f.write("U_grains_%d %f %f %f %f %f %f %f %f %f\n"%(i,
v[0,0],v[0,1],v[0,2],v[1,0],v[1,1],v[1,2],v[2,0],v[2,1],v[2,2] ))
|
jonwright/ImageD11
|
test/simul_1000_grains/make_u_t.py
|
Python
|
gpl-2.0
| 2,397
|
[
"CRYSTAL"
] |
4abe66a9ef61b1ae3042e4c8ae4139585b2f5ac4a2413db7cc28c3d311f68056
|
# -*- coding: utf-8 -*-
__author__ = 'xuanwo'
a = 100
print('''
a
ad
adf
a
sgds
afg
asdf
as
d
''')
|
Xuanwo/chineseregion
|
chineseregion/learn.py
|
Python
|
mit
| 100
|
[
"ADF"
] |
f2f0e962e017349bc1515b7083745b6c98647b2c73906a2f6a5fc1fe5bf29265
|
from ase.lattice.spacegroup import crystal
a = 4.6
c = 2.95
rutile =crystal(['Ti', 'O'], basis=[(0, 0, 0), (0.3, 0.3, 0.0)],
spacegroup=136, cellpar=[a, a, c, 90, 90, 90])
|
grhawk/ASE
|
tools/doc/tutorials/spacegroup/spacegroup-rutile.py
|
Python
|
gpl-2.0
| 189
|
[
"ASE",
"CRYSTAL"
] |
89d28b9139ac2ce62260b420d337595729cbe18d3ef9c30646d021e2d7219fd8
|
#!/usr/bin/python
# ORF prediction for the sequences without homologs
from Bio import SeqIO
from Bio.Seq import reverse_complement, transcribe, back_transcribe, translate
from Bio.Alphabet import IUPAC
import getopt, sys
import string
# getopt reads in my input file
input_file = ""
#print "All arguments: ", sys.argv
shortOptions = 'hf:t:'
longOptions = ['help', 'filename=', 'threshold=']
#==============================================================================
def usage():
print """
%s
-h\t--help\tdisplay help.
-f\t--filename\tpath to FASTA input file in which to predict ORFs
-t\t--threshold\tminimum length of predicted ORF (in amino acids) to be considered
""" % sys.argv[0]
#==============================================================================
def get_parameters():
if len(sys.argv) == 1:
usage()
sys.exit()
opts = []
args = []
try:
opts, args = getopt.getopt(sys.argv[1:], shortOptions, longOptions)
except getopt.GetoptError:
print "ERR: At least one option is not available!"
usage()
sys.exit()
for o, a in opts:
if o == "--help" or o == "-h":
print "HELP"
usage()
elif o == "--filename" or o == "-f":
# print "Filename:", a
input_file = a
elif o == "--threshold" or o == "-t":
threshold = int(a)
for a in args:
print "Additional argument, no option: ", a
#end of getopt stuff! now it becomes even more exciting!!!
return input_file, threshold
def get_input_sequences(input_file):
ids2seqs = {}
for seq_record in SeqIO.parse(open(input_file), "fasta"):
ids2seqs[seq_record.id] = seq_record.seq
return ids2seqs
############################################now the real programme starts########################################################
input_file, threshold = get_parameters()
ids2seqs = get_input_sequences(input_file)
#header = ['id', 'frame', 'startpos', 'endpos', 'cds', 'protein', 'evidence']
#print "#" + string.join(header, "\t")
# iterate input sequences
for key, dna_sequence_direction1 in ids2seqs.iteritems():
# direction1 is the direction we originally have had, 2 is the antisense strand
dna_sequence_direction2 = dna_sequence_direction1.reverse_complement()
# TRANSLATE ALL POSSIBLE ORFs, do not stop at STOP codons
translations = {}
translations['1'] = translate(dna_sequence_direction1)
translations['-1'] = translate(dna_sequence_direction2)
translations['2'] = translate(dna_sequence_direction1[1:])
translations['-2'] = translate(dna_sequence_direction2[1:])
translations['3'] = translate(dna_sequence_direction1[2:])
translations['-3'] = translate(dna_sequence_direction2[2:])
polypeptides = {}
for frame, translation in translations.iteritems():
peptides = translation.split('*')
startpos = 0
for peptide in peptides:
polypeptides[peptide.tostring()] = [frame, startpos]
startpos += len(peptide)+1
# get longest ORF with startpos and frame
peptides = polypeptides.keys()
peptides.sort(key=len)
longestpeptide = peptides[-1]
frame, startpos = polypeptides[longestpeptide]
if len(longestpeptide) < threshold: continue
start_nt = startpos *3
stop_nt = start_nt + ((len(longestpeptide)+1)*3)
if frame.startswith('-'):
cds = dna_sequence_direction2.tostring()
else:
cds = dna_sequence_direction1.tostring()
cds = cds[start_nt:stop_nt+1]
if frame.startswith('-'):
start_nt, stop_nt = stop_nt, start_nt
outlist = [key, frame, str(start_nt), str(stop_nt), cds, longestpeptide, "2"]
print string.join(outlist, "\t")
|
lotharwissler/bioinformatics
|
python/openreadingframe/orf_prediction_part2.py
|
Python
|
mit
| 3,626
|
[
"exciting"
] |
f7a3f72b7e2c12e4a8b0977b3eb991c7c97674b506e8f5a3168f964763b14aa4
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""eprimer3.py
Code to conduct primer prediction with ePrimer3
(c) The James Hutton Institute 2016-2019
Author: Leighton Pritchard
Contact:
leighton.pritchard@hutton.ac.uk
Leighton Pritchard,
Information and Computing Sciences,
James Hutton Institute,
Errol Road,
Invergowrie,
Dundee,
DD2 5DA,
Scotland,
UK
The MIT License
Copyright (c) 2016-2019 The James Hutton Institute
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
from Bio import SeqIO
from Bio.Emboss.Applications import Primer3Commandline
def build_commands(collection, eprimer3_exe, eprimer3_dir, existingfiles, argdict=None):
"""Builds and returns a list of command-lines to run ePrimer3
:param collection: PDPCollection to generate command-lines for
:param eprimer3_exe: path to EMBOSS ePrimer3 executable
:param eprimer3_dir: path to output directory
:param existingfiles: iterable of existing output files to be reused.
The commands will run on each sequence in the passed PDPCollection.
"""
clines = [] # Holds command-lines
# Ensure output directory exists
os.makedirs(eprimer3_dir, exist_ok=True)
for g in collection.data:
stempath = os.path.split(os.path.splitext(g.seqfile)[0])
stem = os.path.join(eprimer3_dir, stempath[-1])
# Are we using the filter region information for this design?
# Two ways we won't use the filter region: the --filter argument/
# ep_filter argument is not set; the --filter argument/ep_filter
# argument *is* set, but there's no filtered genome sequence file.
if argdict["ep_filter"] and g.filtered_seqfile is not None:
seqfile = g.filtered_seqfile
else:
seqfile = g.seqfile
cline = build_command(eprimer3_exe, seqfile, stem, argdict)
g.cmds["ePrimer3"] = cline
if os.path.split(cline.outfile)[-1] not in existingfiles:
clines.append(cline)
return clines
def build_command(eprimer3_exe, seqfile, filestem, argdict=None):
"""Builds and returns ePrimer3 command line.
The ePrimer3 command uses the Biopython interface
"""
cline = Primer3Commandline(cmd=eprimer3_exe)
cline.sequence = seqfile
cline.auto = True
cline.outfile = filestem + ".eprimer3"
if argdict is not None:
prange = [0, 200]
args = [(a[3:], v) for a, v in argdict.items() if a.startswith("ep_")]
for arg, val in args:
if "psizemin" == arg:
prange[0] = val
elif "psizemax" == arg:
prange[1] = val
else:
setattr(cline, arg, val)
setattr(cline, "prange", "%d-%d" % tuple(prange))
return cline
|
widdowquinn/find_differential_primers
|
diagnostic_primers/eprimer3.py
|
Python
|
mit
| 3,730
|
[
"Biopython"
] |
9b531fe2cd5442dfc1fb91ff17ac4f8ff4b794f5487055c58fc9c4e8ef802ca3
|
# -*- coding: utf-8 -*-
# Copyright (c) 2006, 2008-2010, 2013-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016 Ashley Whetter <ashley@awhetter.co.uk>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""handle diagram generation options for class diagram or default diagrams
"""
from six.moves import builtins
import astroid
from pylint.pyreverse.diagrams import PackageDiagram, ClassDiagram
from pylint.pyreverse.utils import LocalsVisitor
BUILTINS_NAME = builtins.__name__
# diagram generators ##########################################################
class DiaDefGenerator(object):
"""handle diagram generation options"""
def __init__(self, linker, handler):
"""common Diagram Handler initialization"""
self.config = handler.config
self._set_default_options()
self.linker = linker
self.classdiagram = None # defined by subclasses
def get_title(self, node):
"""get title for objects"""
title = node.name
if self.module_names:
title = '%s.%s' % (node.root().name, title)
return title
def _set_option(self, option):
"""activate some options if not explicitly deactivated"""
# if we have a class diagram, we want more information by default;
# so if the option is None, we return True
if option is None:
return bool(self.config.classes)
return option
def _set_default_options(self):
"""set different default options with _default dictionary"""
self.module_names = self._set_option(self.config.module_names)
all_ancestors = self._set_option(self.config.all_ancestors)
all_associated = self._set_option(self.config.all_associated)
anc_level, ass_level = (0, 0)
if all_ancestors:
anc_level = -1
if all_associated:
ass_level = -1
if self.config.show_ancestors is not None:
anc_level = self.config.show_ancestors
if self.config.show_associated is not None:
ass_level = self.config.show_associated
self.anc_level, self.ass_level = anc_level, ass_level
def _get_levels(self):
"""help function for search levels"""
return self.anc_level, self.ass_level
def show_node(self, node):
"""true if builtins and not show_builtins"""
if self.config.show_builtin:
return True
return node.root().name != BUILTINS_NAME
def add_class(self, node):
"""visit one class and add it to diagram"""
self.linker.visit(node)
self.classdiagram.add_object(self.get_title(node), node)
def get_ancestors(self, node, level):
"""return ancestor nodes of a class node"""
if level == 0:
return
for ancestor in node.ancestors(recurs=False):
if not self.show_node(ancestor):
continue
yield ancestor
def get_associated(self, klass_node, level):
"""return associated nodes of a class node"""
if level == 0:
return
for ass_nodes in list(klass_node.instance_attrs_type.values()) + \
list(klass_node.locals_type.values()):
for ass_node in ass_nodes:
if isinstance(ass_node, astroid.Instance):
ass_node = ass_node._proxied
if not (isinstance(ass_node, astroid.ClassDef)
and self.show_node(ass_node)):
continue
yield ass_node
def extract_classes(self, klass_node, anc_level, ass_level):
"""extract recursively classes related to klass_node"""
if self.classdiagram.has_node(klass_node) or not self.show_node(klass_node):
return
self.add_class(klass_node)
for ancestor in self.get_ancestors(klass_node, anc_level):
self.extract_classes(ancestor, anc_level-1, ass_level)
for ass_node in self.get_associated(klass_node, ass_level):
self.extract_classes(ass_node, anc_level, ass_level-1)
class DefaultDiadefGenerator(LocalsVisitor, DiaDefGenerator):
"""generate minimum diagram definition for the project :
* a package diagram including project's modules
* a class diagram including project's classes
"""
def __init__(self, linker, handler):
DiaDefGenerator.__init__(self, linker, handler)
LocalsVisitor.__init__(self)
def visit_project(self, node):
"""visit an pyreverse.utils.Project node
create a diagram definition for packages
"""
mode = self.config.mode
if len(node.modules) > 1:
self.pkgdiagram = PackageDiagram('packages %s' % node.name, mode)
else:
self.pkgdiagram = None
self.classdiagram = ClassDiagram('classes %s' % node.name, mode)
def leave_project(self, node): # pylint: disable=unused-argument
"""leave the pyreverse.utils.Project node
return the generated diagram definition
"""
if self.pkgdiagram:
return self.pkgdiagram, self.classdiagram
return self.classdiagram,
def visit_module(self, node):
"""visit an astroid.Module node
add this class to the package diagram definition
"""
if self.pkgdiagram:
self.linker.visit(node)
self.pkgdiagram.add_object(node.name, node)
def visit_classdef(self, node):
"""visit an astroid.Class node
add this class to the class diagram definition
"""
anc_level, ass_level = self._get_levels()
self.extract_classes(node, anc_level, ass_level)
def visit_importfrom(self, node):
"""visit astroid.From and catch modules for package diagram
"""
if self.pkgdiagram:
self.pkgdiagram.add_from_depend(node, node.modname)
class ClassDiadefGenerator(DiaDefGenerator):
"""generate a class diagram definition including all classes related to a
given class
"""
def __init__(self, linker, handler):
DiaDefGenerator.__init__(self, linker, handler)
def class_diagram(self, project, klass):
"""return a class diagram definition for the given klass and its
related klasses
"""
self.classdiagram = ClassDiagram(klass, self.config.mode)
if len(project.modules) > 1:
module, klass = klass.rsplit('.', 1)
module = project.get_module(module)
else:
module = project.modules[0]
klass = klass.split('.')[-1]
klass = next(module.ilookup(klass))
anc_level, ass_level = self._get_levels()
self.extract_classes(klass, anc_level, ass_level)
return self.classdiagram
# diagram handler #############################################################
class DiadefsHandler(object):
"""handle diagram definitions :
get it from user (i.e. xml files) or generate them
"""
def __init__(self, config):
self.config = config
def get_diadefs(self, project, linker):
"""Get the diagrams configuration data
:param project:The pyreverse project
:type project: pyreverse.utils.Project
:param linker: The linker
:type linker: pyreverse.inspector.Linker(IdGeneratorMixIn, LocalsVisitor)
:returns: The list of diagram definitions
:rtype: list(:class:`pylint.pyreverse.diagrams.ClassDiagram`)
"""
# read and interpret diagram definitions (Diadefs)
diagrams = []
generator = ClassDiadefGenerator(linker, self)
for klass in self.config.classes:
diagrams.append(generator.class_diagram(project, klass))
if not diagrams:
diagrams = DefaultDiadefGenerator(linker, self).visit(project)
for diagram in diagrams:
diagram.extract_relationships()
return diagrams
|
axbaretto/beam
|
sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/pyreverse/diadefslib.py
|
Python
|
apache-2.0
| 8,344
|
[
"VisIt"
] |
b808821a73b81cc21c6907cb52d6770c978c0a51fe7b4ee33b9e613bc1227eb7
|
from __future__ import unicode_literals, division, absolute_import
import logging
import re
from datetime import datetime, timedelta
from dateutil.parser import parse as dateutil_parse
from sqlalchemy import Table, Column, Integer, String, Unicode, Boolean, Date, DateTime, Time, or_, func
from sqlalchemy.orm import relation, object_session
from sqlalchemy.schema import ForeignKey
from flexget import db_schema
from flexget import plugin
from flexget import options
from flexget.db_schema import upgrade
from flexget.event import event
from flexget.manager import Session
from flexget.plugin import get_plugin_by_name
from flexget.utils import requests
from flexget.utils.database import with_session
from flexget.utils.simple_persistence import SimplePersistence
from flexget.logger import console
Base = db_schema.versioned_base('api_trakt', 3)
log = logging.getLogger('api_trakt')
# Production Site
CLIENT_ID = '57e188bcb9750c79ed452e1674925bc6848bd126e02bb15350211be74c6547af'
CLIENT_SECRET = 'db4af7531e8df678b134dbc22445a2c04ebdbdd7213be7f5b6d17dfdfabfcdc2'
API_URL = 'https://api-v2launch.trakt.tv/'
PIN_URL = 'http://trakt.tv/pin/346'
# Stores the last time we checked for updates for shows/movies
updated = SimplePersistence('api_trakt')
# Oauth account authentication
class TraktUserAuth(Base):
__tablename__ = 'trakt_user_auth'
account = Column(Unicode, primary_key=True)
access_token = Column(Unicode)
refresh_token = Column(Unicode)
created = Column(DateTime)
expires = Column(DateTime)
def __init__(self, account, access_token, refresh_token, created, expires):
self.account = account
self.access_token = access_token
self.refresh_token = refresh_token
self.expires = token_expire_date(expires)
self.created = token_created_date(created)
def token_expire_date(expires):
return datetime.now() + timedelta(seconds=expires)
def token_created_date(created):
return datetime.fromtimestamp(created)
def get_access_token(account, token=None, refresh=False, re_auth=False):
"""
Gets authorization info from a pin or refresh token.
:param account: Arbitrary account name to attach authorization to.
:param unicode token: The pin or refresh token, as supplied by the trakt website.
:param bool refresh: If True, refresh the access token using refresh_token from db.
:raises RequestException: If there is a network error while authorizing.
"""
data = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob'
}
with Session() as session:
acc = session.query(TraktUserAuth).filter(TraktUserAuth.account == account).first()
if acc and datetime.now() < acc.expires and not refresh and not re_auth:
return acc.access_token
else:
if acc and refresh and not re_auth:
data['refresh_token'] = acc.refresh_token
data['grant_type'] = 'refresh_token'
elif token:
data['code'] = token
data['grant_type'] = 'authorization_code'
else:
raise plugin.PluginError('Account %s not found in db and no pin specified.' % account)
try:
r = requests.post(get_api_url('oauth/token'), data=data).json()
if acc:
acc.access_token = r.get('access_token')
acc.refresh_token = r.get('refresh_token')
acc.expires = token_expire_date(r.get('expires_in'))
acc.created = token_created_date(r.get('created_at'))
else:
acc = TraktUserAuth(account, r.get('access_token'), r.get('refresh_token'), r.get('created_at'),
r.get('expires_in'))
session.add(acc)
return r.get('access_token')
except requests.RequestException as e:
raise plugin.PluginError('Token exchange with trakt failed: %s' % e.args[0])
def make_list_slug(name):
"""Return the slug for use in url for given list name."""
slug = name.lower()
# These characters are just stripped in the url
for char in '!@#$%^*()[]{}/=?+\\|':
slug = slug.replace(char, '')
# These characters get replaced
slug = slug.replace('&', 'and')
slug = slug.replace(' ', '-')
return slug
def get_session(account=None, token=None):
"""
Creates a requests session ready to talk to trakt API with FlexGet's api key.
Can also add user level authentication if `account` parameter is given.
:param account: An account authorized via `flexget trakt auth` CLI command. If given, returned session will be
authenticated for that account.
"""
# default to username if account name is not specified
session = requests.Session()
session.headers = {
'Content-Type': 'application/json',
'trakt-api-version': 2,
'trakt-api-key': CLIENT_ID,
}
if account:
access_token = get_access_token(account, token) if account else None
if access_token:
session.headers.update({'Authorization': 'Bearer %s' % access_token})
return session
def get_api_url(*endpoint):
"""
Get the address of a trakt API endpoint.
:param endpoint: Can by a string endpoint (e.g. 'sync/watchlist') or an iterable (e.g. ('sync', 'watchlist')
Multiple parameters can also be specified instead of a single iterable.
:returns: The absolute url to the specified API endpoint.
"""
if len(endpoint) == 1 and not isinstance(endpoint[0], basestring):
endpoint = endpoint[0]
# Make sure integer portions are turned into strings first too
url = API_URL + '/'.join(map(unicode, endpoint))
return url
@upgrade('api_trakt')
def upgrade_database(ver, session):
if ver <= 2:
raise db_schema.UpgradeImpossible
return ver
def get_entry_ids(entry):
"""Creates a trakt ids dict from id fields on an entry. Prefers already populated info over lazy lookups."""
ids = {}
for lazy in [False, True]:
if entry.get('trakt_movie_id', eval_lazy=lazy):
ids['trakt'] = entry['trakt_movie_id']
elif entry.get('trakt_show_id', eval_lazy=lazy):
ids['trakt'] = entry['trakt_show_id']
elif entry.get('trakt_episode_id', eval_lazy=lazy):
ids['trakt'] = entry['trakt_episode_id']
if entry.get('tmdb_id', eval_lazy=lazy):
ids['tmdb'] = entry['tmdb_id']
if entry.get('tvdb_id', eval_lazy=lazy):
ids['tvdb'] = entry['tvdb_id']
if entry.get('imdb_id', eval_lazy=lazy):
ids['imdb'] = entry['imdb_id']
if entry.get('tvrage_id', eval_lazy=lazy):
ids['tvrage'] = entry['tvrage_id']
if ids:
break
return ids
class TraktGenre(Base):
__tablename__ = 'trakt_genres'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(Unicode)
show_genres_table = Table('trakt_show_genres', Base.metadata,
Column('show_id', Integer, ForeignKey('trakt_shows.id')),
Column('genre_id', Integer, ForeignKey('trakt_genres.id')))
Base.register_table(show_genres_table)
movie_genres_table = Table('trakt_movie_genres', Base.metadata,
Column('movie_id', Integer, ForeignKey('trakt_movies.id')),
Column('genre_id', Integer, ForeignKey('trakt_genres.id')))
Base.register_table(movie_genres_table)
def get_db_genres(genres, session):
"""Takes a list of genres as strings, returns the database instances for them."""
db_genres = []
for genre in genres:
genre = genre.replace('-', ' ')
db_genre = session.query(TraktGenre).filter(TraktGenre.name == genre).first()
if not db_genre:
db_genre = TraktGenre(name=genre)
session.add(db_genre)
db_genres.append(db_genre)
return db_genres
class TraktActor(Base):
__tablename__ = 'trakt_actors'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(Unicode, nullable=False)
imdb_id = Column(Unicode)
trakt_id = Column(Unicode)
tmdb_id = Column(Unicode)
def __init__(self, name, trakt_id, imdb_id=None, tmdb_id=None):
self.name = name
self.trakt_id = trakt_id
self.imdb_id = imdb_id
self.tmdb_id = tmdb_id
show_actors_table = Table('trakt_show_actors', Base.metadata,
Column('show_id', Integer, ForeignKey('trakt_shows.id')),
Column('actors_id', Integer, ForeignKey('trakt_actors.id')))
Base.register_table(show_actors_table)
movie_actors_table = Table('trakt_movie_actors', Base.metadata,
Column('movie_id', Integer, ForeignKey('trakt_movies.id')),
Column('actors_id', Integer, ForeignKey('trakt_actors.id')))
Base.register_table(movie_actors_table)
def get_db_actors(id, style):
actors = []
url = get_api_url(style + 's', id, 'people')
req_session = get_session()
try:
results = req_session.get(url).json()
with Session() as session:
for result in results.get('cast'):
name = result.get('person').get('name')
ids = result.get('person').get('ids')
trakt_id = ids.get('trakt')
imdb_id = ids.get('imdb')
tmdb_id = ids.get('tmdb')
actor = session.query(TraktActor).filter(TraktActor.trakt_id == trakt_id).first()
if not actor:
actor = TraktActor(name, trakt_id, imdb_id, tmdb_id)
actors.append(actor)
return actors
except requests.RequestException as e:
log.debug('Error searching for actors for trakt id %s' % e)
return
def list_actors(actors):
res = {}
for actor in actors:
info = {}
info['name'] = actor.name
info['imdb_id'] = str(actor.imdb_id)
info['tmdb_id'] = str(actor.tmdb_id)
res[str(actor.trakt_id)] = info
return res
class TraktEpisode(Base):
__tablename__ = 'trakt_episodes'
id = Column(Integer, primary_key=True, autoincrement=False)
tvdb_id = Column(Integer)
imdb_id = Column(Unicode)
tmdb_id = Column(Integer)
tvrage_id = Column(Unicode)
title = Column(Unicode)
season = Column(Integer)
number = Column(Integer)
number_abs = Column(Integer)
overview = Column(Unicode)
first_aired = Column(DateTime)
updated_at = Column(DateTime)
cached_at = Column(DateTime)
series_id = Column(Integer, ForeignKey('trakt_shows.id'), nullable=False)
def __init__(self, trakt_episode):
super(TraktEpisode, self).__init__()
self.update(trakt_episode)
def update(self, trakt_episode):
"""Updates this record from the trakt media object `trakt_movie` returned by the trakt api."""
if self.id and self.id != trakt_episode['ids']['trakt']:
raise Exception('Tried to update db ep with different ep data')
elif not self.id:
self.id = trakt_episode['ids']['trakt']
self.imdb_id = trakt_episode['ids']['imdb']
self.tmdb_id = trakt_episode['ids']['tmdb']
self.tvrage_id = trakt_episode['ids']['tvrage']
self.tvdb_id = trakt_episode['ids']['tvdb']
self.first_aired = None
if trakt_episode.get('first_aired'):
self.first_aired = dateutil_parse(trakt_episode['first_aired'], ignoretz=True)
self.updated_at = dateutil_parse(trakt_episode.get('updated_at'), ignoretz=True)
self.cached_at = datetime.now()
for col in ['title', 'season', 'number', 'number_abs', 'overview']:
setattr(self, col, trakt_episode.get(col))
@property
def expired(self):
# TODO should episode have its own expiration function?
return False
class TraktShow(Base):
__tablename__ = 'trakt_shows'
id = Column(Integer, primary_key=True, autoincrement=False)
title = Column(Unicode)
year = Column(Integer)
slug = Column(Unicode)
tvdb_id = Column(Integer)
imdb_id = Column(Unicode)
tmdb_id = Column(Integer)
tvrage_id = Column(Unicode)
overview = Column(Unicode)
first_aired = Column(DateTime)
air_day = Column(Unicode)
air_time = Column(Time)
runtime = Column(Integer)
certification = Column(Unicode)
network = Column(Unicode)
country = Column(Unicode)
status = Column(String)
rating = Column(Integer)
votes = Column(Integer)
language = Column(Unicode)
aired_episodes = Column(Integer)
episodes = relation(TraktEpisode, backref='show', cascade='all, delete, delete-orphan', lazy='dynamic')
genres = relation(TraktGenre, secondary=show_genres_table)
_actors = relation(TraktActor, secondary=show_actors_table)
updated_at = Column(DateTime)
cached_at = Column(DateTime)
def __init__(self, trakt_show, session):
super(TraktShow, self).__init__()
self.update(trakt_show, session)
def update(self, trakt_show, session):
"""Updates this record from the trakt media object `trakt_show` returned by the trakt api."""
if self.id and self.id != trakt_show['ids']['trakt']:
raise Exception('Tried to update db show with different show data')
elif not self.id:
self.id = trakt_show['ids']['trakt']
self.slug = trakt_show['ids']['slug']
self.imdb_id = trakt_show['ids']['imdb']
self.tmdb_id = trakt_show['ids']['tmdb']
self.tvrage_id = trakt_show['ids']['tvrage']
self.tvdb_id = trakt_show['ids']['tvdb']
if trakt_show.get('air_time'):
self.air_time = dateutil_parse(trakt_show.get('air_time'), ignoretz=True)
else:
self.air_time = None
if trakt_show.get('first_aired'):
self.first_aired = dateutil_parse(trakt_show.get('first_aired'), ignoretz=True)
else:
self.first_aired = None
self.updated_at = dateutil_parse(trakt_show.get('updated_at'), ignoretz=True)
for col in ['overview', 'runtime', 'rating', 'votes', 'language', 'title', 'year', 'air_day',
'runtime', 'certification', 'network', 'country', 'status', 'aired_episodes']:
setattr(self, col, trakt_show.get(col))
self.genres[:] = get_db_genres(trakt_show.get('genres', []), session)
self.cached_at = datetime.now()
def get_episode(self, season, number, only_cached=False):
# TODO: Does series data being expired mean all episode data should be refreshed?
episode = self.episodes.filter(TraktEpisode.season == season).filter(TraktEpisode.number == number).first()
if not episode or self.expired:
url = get_api_url('shows', self.id, 'seasons', season, 'episodes', number, '?extended=full')
if only_cached:
raise LookupError('Episode %s %s not found in cache' % (season, number))
log.debug('Episode %s %s not found in cache, looking up from trakt.' % (season, number))
try:
ses = get_session()
data = ses.get(url).json()
except requests.RequestException:
raise LookupError('Error Retrieving Trakt url: %s' % url)
if not data:
raise LookupError('No data in response from trakt %s' % url)
episode = self.episodes.filter(TraktEpisode.id == data['ids']['trakt']).first()
if episode:
episode.update(data)
else:
episode = TraktEpisode(data)
self.episodes.append(episode)
return episode
@property
def expired(self):
"""
:return: True if show details are considered to be expired, ie. need of update
"""
# TODO stolen from imdb plugin, maybe there's a better way?
if self.cached_at is None:
log.debug('cached_at is None: %s' % self)
return True
refresh_interval = 2
# if show has been cancelled or ended, then it is unlikely to be updated often
if self.year and (self.status == 'ended' or self.status == 'canceled'):
# Make sure age is not negative
age = max((datetime.now().year - self.year), 0)
refresh_interval += age * 5
log.debug('show `%s` age %i expires in %i days' % (self.title, age, refresh_interval))
return self.cached_at < datetime.now() - timedelta(days=refresh_interval)
@property
def actors(self):
if not self._actors:
self._actors[:] = get_db_actors(self.id, 'show')
return self._actors
def __repr__(self):
return '<name=%s, id=%s>' % (self.title, self.id)
class TraktMovie(Base):
__tablename__ = 'trakt_movies'
id = Column(Integer, primary_key=True, autoincrement=False)
title = Column(Unicode)
year = Column(Integer)
slug = Column(Unicode)
imdb_id = Column(Unicode)
tmdb_id = Column(Integer)
tagline = Column(Unicode)
overview = Column(Unicode)
released = Column(Date)
runtime = Column(Integer)
rating = Column(Integer)
votes = Column(Integer)
language = Column(Unicode)
updated_at = Column(DateTime)
cached_at = Column(DateTime)
genres = relation(TraktGenre, secondary=movie_genres_table)
_actors = relation(TraktActor, secondary=movie_actors_table)
def __init__(self, trakt_movie, session):
super(TraktMovie, self).__init__()
self.update(trakt_movie, session)
def update(self, trakt_movie, session):
"""Updates this record from the trakt media object `trakt_movie` returned by the trakt api."""
if self.id and self.id != trakt_movie['ids']['trakt']:
raise Exception('Tried to update db movie with different movie data')
elif not self.id:
self.id = trakt_movie['ids']['trakt']
self.slug = trakt_movie['ids']['slug']
self.imdb_id = trakt_movie['ids']['imdb']
self.tmdb_id = trakt_movie['ids']['tmdb']
for col in ['title', 'overview', 'runtime', 'rating', 'votes', 'language', 'tagline', 'year']:
setattr(self, col, trakt_movie.get(col))
if self.released:
self.released = dateutil_parse(trakt_movie.get('released'), ignoretz=True)
self.updated_at = dateutil_parse(trakt_movie.get('updated_at'), ignoretz=True)
self.genres[:] = get_db_genres(trakt_movie.get('genres', []), session)
self.cached_at = datetime.now()
@property
def expired(self):
"""
:return: True if movie details are considered to be expired, ie. need of update
"""
# TODO stolen from imdb plugin, maybe there's a better way?
if self.updated_at is None:
log.debug('updated_at is None: %s' % self)
return True
refresh_interval = 2
if self.year:
# Make sure age is not negative
age = max((datetime.now().year - self.year), 0)
refresh_interval += age * 5
log.debug('movie `%s` age %i expires in %i days' % (self.title, age, refresh_interval))
return self.cached_at < datetime.now() - timedelta(days=refresh_interval)
@property
def actors(self):
if not self._actors:
self._actors[:] = get_db_actors(self.id, 'movie')
return self._actors
class TraktShowSearchResult(Base):
__tablename__ = 'trakt_show_search_results'
id = Column(Integer, primary_key=True)
search = Column(Unicode, unique=True, nullable=False)
series_id = Column(Integer, ForeignKey('trakt_shows.id'), nullable=True)
series = relation(TraktShow, backref='search_strings')
class TraktMovieSearchResult(Base):
__tablename__ = 'trakt_movie_search_results'
id = Column(Integer, primary_key=True)
search = Column(Unicode, unique=True, nullable=False)
movie_id = Column(Integer, ForeignKey('trakt_movies.id'), nullable=True)
movie = relation(TraktMovie, backref='search_strings')
def split_title_year(title):
"""Splits title containing a year into a title, year pair."""
# We only recognize years from the 2nd and 3rd millennium, FlexGetters from the year 3000 be damned!
match = re.search(r'[\s(]([12]\d{3})\)?$', title)
if match:
title = title[:match.start()].strip()
year = int(match.group(1))
else:
year = None
return title, year
@with_session
def get_cached(style=None, title=None, year=None, trakt_id=None, trakt_slug=None, tmdb_id=None, imdb_id=None,
tvdb_id=None, tvrage_id=None, session=None):
"""
Get the cached info for a given show/movie from the database.
:param type: Either 'show' or 'movie'
"""
ids = {
'id': trakt_id,
'slug': trakt_slug,
'tmdb_id': tmdb_id,
'imdb_id': imdb_id,
}
if style == 'show':
ids['tvdb_id'] = tvdb_id
ids['tvrage_id'] = tvrage_id
model = TraktShow
else:
model = TraktMovie
result = None
if any(ids.values()):
result = session.query(model).filter(
or_(getattr(model, col) == val for col, val in ids.iteritems() if val)).first()
elif title:
title, y = split_title_year(title)
year = year or y
query = session.query(model).filter(model.title == title)
if year:
query = query.filter(model.year == year)
result = query.first()
return result
def get_trakt(style=None, title=None, year=None, trakt_id=None, trakt_slug=None, tmdb_id=None, imdb_id=None,
tvdb_id=None, tvrage_id=None):
"""Returns the matching media object from trakt api."""
# TODO: Better error messages
# Trakt api accepts either id or slug (there is a rare possibility for conflict though, e.g. 24)
trakt_id = trakt_id or trakt_slug
req_session = get_session()
last_search_query = None # used if no results are found
last_search_type = None
if not trakt_id:
# Try finding trakt_id based on other ids
ids = {
'imdb': imdb_id,
'tmdb': tmdb_id
}
if style == 'show':
ids['tvdb'] = tvdb_id
ids['tvrage'] = tvrage_id
for id_type, identifier in ids.iteritems():
if not identifier:
continue
try:
last_search_query = identifier
last_search_type = id_type
results = req_session.get(get_api_url('search'), params={'id_type': id_type, 'id': identifier}).json()
except requests.RequestException as e:
log.debug('Error searching for trakt id %s' % e)
continue
for result in results:
if result['type'] != style:
continue
trakt_id = result[style]['ids']['trakt']
break
if trakt_id:
break
if not trakt_id and title:
last_search_query = title
last_search_type = 'title'
# Try finding trakt id based on title and year
if style == 'show':
parsed_title, y = split_title_year(title)
y = year or y
else:
title_parser = get_plugin_by_name('parsing').instance.parse_movie(title)
y = year or title_parser.year
parsed_title = title_parser.name
try:
results = req_session.get(get_api_url('search'), params={'query': parsed_title, 'type': style,
'year': y}).json()
except requests.RequestException as e:
raise LookupError('Searching trakt for %s failed with error: %s' % (title, e))
for result in results:
if year and result[style]['year'] != year:
continue
if parsed_title.lower() == result[style]['title'].lower():
trakt_id = result[style]['ids']['trakt']
break
# grab the first result if there is no exact match
if not trakt_id and results and results[0]['score'] >= 34:
trakt_id = results[0][style]['ids']['trakt']
if not trakt_id:
raise LookupError('Unable to find %s="%s" on trakt.' % (last_search_type, last_search_query))
# Get actual data from trakt
try:
return req_session.get(get_api_url(style + 's', trakt_id), params={'extended': 'full'}).json()
except requests.RequestException as e:
raise LookupError('Error getting trakt data for id %s: %s' % (trakt_id, e))
class ApiTrakt(object):
@staticmethod
@with_session
def lookup_series(session=None, only_cached=None, **lookup_params):
series = get_cached('show', session=session, **lookup_params)
title = lookup_params.get('title', '')
found = None
if not series and title:
found = session.query(TraktShowSearchResult).filter(func.lower(TraktShowSearchResult.search) ==
title.lower()).first()
if found and found.series:
log.debug('Found %s in previous search results as %s' % (title, found.series.title))
series = found.series
if only_cached:
if series:
return series
raise LookupError('Series %s not found from cache' % lookup_params)
if series and not series.expired:
return series
try:
trakt_show = get_trakt('show', **lookup_params)
except LookupError as e:
if series:
log.debug('Error refreshing show data from trakt, using cached. %s' % e)
return series
raise
series = session.query(TraktShow).filter(TraktShow.id == trakt_show['ids']['trakt']).first()
if series:
series.update(trakt_show, session)
else:
series = TraktShow(trakt_show, session)
session.add(series)
if series and title.lower() == series.title.lower():
return series
elif series and not found:
if not session.query(TraktShowSearchResult).filter(func.lower(TraktShowSearchResult.search) ==
title.lower()).first():
log.debug('Adding search result to db')
session.add(TraktShowSearchResult(search=title, series=series))
elif series and found:
log.debug('Updating search result in db')
found.series = series
return series
@staticmethod
@with_session
def lookup_movie(session=None, only_cached=None, **lookup_params):
movie = get_cached('movie', session=session, **lookup_params)
title = lookup_params.get('title', '')
found = None
if not movie and title:
found = session.query(TraktMovieSearchResult).filter(func.lower(TraktMovieSearchResult.search) ==
title.lower()).first()
if found and found.movie:
log.debug('Found %s in previous search results as %s' % (title, found.movie.title))
movie = found.movie
if only_cached:
if movie:
return movie
raise LookupError('Movie %s not found from cache' % lookup_params)
if movie and not movie.expired:
return movie
try:
trakt_movie = get_trakt('movie', **lookup_params)
except LookupError as e:
if movie:
log.debug('Error refreshing movie data from trakt, using cached. %s' % e)
return movie
raise
movie = session.query(TraktMovie).filter(TraktMovie.id == trakt_movie['ids']['trakt']).first()
if movie:
movie.update(trakt_movie, session)
else:
movie = TraktMovie(trakt_movie, session)
session.add(movie)
if movie and title.lower() == movie.title.lower():
return movie
if movie and not found:
if not session.query(TraktMovieSearchResult).filter(func.lower(TraktMovieSearchResult.search) ==
title.lower()).first():
log.debug('Adding search result to db')
session.add(TraktMovieSearchResult(search=title, movie=movie))
elif movie and found:
log.debug('Updating search result in db')
found.movie = movie
return movie
@staticmethod
def collected(username, style, trakt_data, title, account=None):
url = get_api_url('users', username, 'collection', style + 's')
session = get_session(account=account)
try:
log.debug('Opening %s' % url)
data = session.get(url).json()
except requests.RequestException as e:
raise plugin.PluginError('Unable to get data from trakt.tv: %s' % e)
if not data:
log.warning('No collection data returned from trakt.')
return
log.verbose('Received %d records from trakt.tv %s\'s collection' % (len(data), username))
in_collection = False
if style == 'show':
for series in data:
if trakt_data.show.id == series['show']['ids']['trakt']:
for s in series['seasons']:
if s['number'] == trakt_data.season:
# extract all episode numbers currently in collection for the season number
episodes = [ep['number'] for ep in s['episodes']]
in_collection = trakt_data.number in episodes
break
log.debug('The result for entry "%s" is: %s' % (title,
'Owned' if in_collection else 'Not owned'))
else:
for movie in data:
if trakt_data.id == movie['movie']['ids']['trakt']:
in_collection = True
break
log.debug('The result for entry "%s" is: %s' % (title,
'Owned' if in_collection else 'Not owned'))
return in_collection
@staticmethod
def watched(username, style, trakt_data, title, account=None):
url = get_api_url('users', username, 'history', style + 's', trakt_data.id)
session = get_session(account=account)
try:
log.debug('Opening %s' % url)
data = session.get(url).json()
except requests.RequestException as e:
raise plugin.PluginError('Unable to get data from trakt.tv: %s' % e)
if not data:
log.warning('No data returned from trakt.')
return
log.verbose('Received %d records from trakt.tv %s\'s history' % (len(data), username))
watched = False
if style == 'episode':
for ep in data:
if trakt_data.show.id == ep['show']['ids']['trakt']:
ep_data = ep['episode']
if ep_data['season'] == trakt_data.season and ep_data['number'] == trakt_data.number:
watched = True
break
log.debug('The result for entry "%s" is: %s' % (title,
'Watched' if watched else 'Not watched'))
else:
for movie in data:
if trakt_data.id == movie['movie']['ids']['trakt']:
watched = True
break
log.debug('The result for entry "%s" is: %s' % (title,
'Watched' if watched else 'Not watched'))
return watched
def delete_account(account):
with Session() as session:
acc = session.query(TraktUserAuth).filter(TraktUserAuth.account == account).first()
if not acc:
raise plugin.PluginError('Account %s not found.' % account)
session.delete(acc)
def do_cli(manager, options):
if options.action == 'auth':
if not (options.account and options.pin):
console('You must specify an account (local identifier) so we know where to save your access token! '
'Visit %s to get a pin code and authorize flexget to access your trakt account.' % PIN_URL)
return
try:
get_access_token(options.account, options.pin, re_auth=True)
console('Successfully authorized Flexget app on Trakt.tv. Enjoy!')
return
except plugin.PluginError as e:
console('Authorization failed: %s' % e)
elif options.action == 'show':
with Session() as session:
if not options.account:
# Print all accounts
accounts = session.query(TraktUserAuth).all()
if not accounts:
console('No trakt authorizations stored in database.')
return
console('{:-^21}|{:-^28}|{:-^28}'.format('Account', 'Created', 'Expires'))
for auth in accounts:
console('{:<21}|{:>28}|{:>28}'.format(
auth.account, auth.created.strftime('%Y-%m-%d'), auth.expires.strftime('%Y-%m-%d')))
return
# Show a specific account
acc = session.query(TraktUserAuth).filter(TraktUserAuth.account == options.account).first()
if acc:
console('Authorization expires on %s' % acc.expires)
else:
console('Flexget has not been authorized to access your account.')
elif options.action == 'refresh':
if not options.account:
console('Please specify an account')
return
try:
get_access_token(options.account, refresh=True)
console('Successfully refreshed your access token.')
return
except plugin.PluginError as e:
console('Authorization failed: %s' % e)
elif options.action == 'delete':
if not options.account:
console('Please specify an account')
return
try:
delete_account(options.account)
console('Successfully deleted your access token.')
return
except plugin.PluginError as e:
console('Deletion failed: %s' % e)
@event('options.register')
def register_parser_arguments():
acc_text = 'local identifier which should be used in your config to refer these credentials'
# Register subcommand
parser = options.register_command('trakt', do_cli, help='view and manage trakt authentication.'
'Please visit %s to retrieve your pin code.' % PIN_URL)
# Set up our subparsers
subparsers = parser.add_subparsers(title='actions', metavar='<action>', dest='action')
auth_parser = subparsers.add_parser('auth', help='authorize Flexget to access your Trakt.tv account')
auth_parser.add_argument('account', metavar='<account>', help=acc_text)
auth_parser.add_argument('pin', metavar='<pin>', help='get this by authorizing FlexGet to use your trakt account '
'at %s' % PIN_URL)
show_parser = subparsers.add_parser('show', help='show expiration date for Flexget authorization(s) (don\'t worry, '
'they will automatically refresh when expired)')
show_parser.add_argument('account', metavar='<account>', nargs='?', help=acc_text)
refresh_parser = subparsers.add_parser('refresh', help='manually refresh your access token associated with your'
' --account <name>')
refresh_parser.add_argument('account', metavar='<account>', help=acc_text)
delete_parser = subparsers.add_parser('delete', help='delete the specified <account> name from local database')
delete_parser.add_argument('account', metavar='<account>', help=acc_text)
@event('plugin.register')
def register_plugin():
plugin.register(ApiTrakt, 'api_trakt', api_ver=2)
|
lildadou/Flexget
|
flexget/plugins/api_trakt.py
|
Python
|
mit
| 36,575
|
[
"VisIt"
] |
d5994f86fba028b7e2f63667ec174601d43d8ca28b9bbab3a189762108695c39
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import unittest
import MooseDocs
from MooseDocs.extensions import core, devel
from MooseDocs import common
from MooseDocs.common import exceptions
class TestLoadExtensions(unittest.TestCase):
def testLoadFromModule(self):
ext = common.load_extensions([core])
self.assertIsInstance(ext, list)
self.assertIsInstance(ext[0], core.CoreExtension)
def testLoadFromModuleWithConfig(self):
ext = common.load_extensions([devel],
{'MooseDocs.extensions.devel':{'test':False}})
self.assertFalse(ext[0]['test'])
def testLoadFromStr(self):
ext = common.load_extensions(['MooseDocs.extensions.core'])
self.assertIsInstance(ext, list)
self.assertIsInstance(ext[0], core.CoreExtension)
def testLoadFromStrWithConfig(self):
ext = common.load_extensions(['MooseDocs.extensions.devel'],
{'MooseDocs.extensions.devel':{'test':False}})
self.assertFalse(ext[0]['test'])
def testMissingMakeExtension(self):
with self.assertRaises(exceptions.MooseDocsException) as e:
common.load_extensions([MooseDocs.extensions])
self.assertIn("does not contain the required 'make_extension'", str(e.exception))
def testBadModuleName(self):
with self.assertRaises(exceptions.MooseDocsException) as e:
common.load_extensions(['not.a.module'])
self.assertIn("Failed to import the supplied", str(e.exception))
def testBadModuleType(self):
with self.assertRaises(exceptions.MooseDocsException) as e:
common.load_extensions([42])
self.assertIn("must be a module type", str(e.exception))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
nuclear-wizard/moose
|
python/MooseDocs/test/common/test_load_extensions.py
|
Python
|
lgpl-2.1
| 2,108
|
[
"MOOSE"
] |
8e3c417375483b7887d03e57c4284224f160a1132e122aa7ca70895753e66215
|
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import errno
import json
import operator
import time
from collections import defaultdict
from operator import itemgetter
from random import random
import os
import six
from six.moves.urllib.parse import quote
from eventlet import Timeout
from contextlib import contextmanager
from swift.common import internal_client
from swift.common.constraints import check_drive, AUTO_CREATE_ACCOUNT_PREFIX
from swift.common.direct_client import (direct_put_container,
DirectClientException)
from swift.common.exceptions import DeviceUnavailable
from swift.common.request_helpers import USE_REPLICATION_NETWORK_HEADER
from swift.common.ring.utils import is_local_device
from swift.common.swob import str_to_wsgi
from swift.common.utils import get_logger, config_true_value, \
dump_recon_cache, whataremyips, Timestamp, ShardRange, GreenAsyncPile, \
config_positive_int_value, quorum_size, parse_override_options, \
Everything, config_auto_int_value, ShardRangeList, config_percent_value
from swift.container.backend import ContainerBroker, \
RECORD_TYPE_SHARD, UNSHARDED, SHARDING, SHARDED, COLLAPSED, \
SHARD_UPDATE_STATES
from swift.container.replicator import ContainerReplicator
CLEAVE_SUCCESS = 0
CLEAVE_FAILED = 1
CLEAVE_EMPTY = 2
def sharding_enabled(broker):
# NB all shards will by default have been created with
# X-Container-Sysmeta-Sharding set and will therefore be candidates for
# sharding, along with explicitly configured root containers.
sharding = broker.metadata.get('X-Container-Sysmeta-Sharding')
if sharding and config_true_value(sharding[0]):
return True
# if broker has been marked deleted it will have lost sysmeta, but we still
# need to process the broker (for example, to shrink any shard ranges) so
# fallback to checking if it has any shard ranges
if broker.get_shard_ranges():
return True
return False
def make_shard_ranges(broker, shard_data, shards_account_prefix):
timestamp = Timestamp.now()
shard_ranges = []
for data in shard_data:
# Make a copy so we don't mutate the original
kwargs = data.copy()
path = ShardRange.make_path(
shards_account_prefix + broker.root_account,
broker.root_container, broker.container,
timestamp, kwargs.pop('index'))
shard_ranges.append(ShardRange(path, timestamp, **kwargs))
return shard_ranges
def find_missing_ranges(shard_ranges):
"""
Find any ranges in the entire object namespace that are not covered by any
shard range in the given list.
:param shard_ranges: A list of :class:`~swift.utils.ShardRange`
:return: a list of missing ranges
"""
gaps = []
if not shard_ranges:
return ((ShardRange.MIN, ShardRange.MAX),)
if shard_ranges[0].lower > ShardRange.MIN:
gaps.append((ShardRange.MIN, shard_ranges[0].lower))
for first, second in zip(shard_ranges, shard_ranges[1:]):
if first.upper < second.lower:
gaps.append((first.upper, second.lower))
if shard_ranges[-1].upper < ShardRange.MAX:
gaps.append((shard_ranges[-1].upper, ShardRange.MAX))
return gaps
def find_overlapping_ranges(shard_ranges):
"""
Find all pairs of overlapping ranges in the given list.
:param shard_ranges: A list of :class:`~swift.utils.ShardRange`
:return: a set of tuples, each tuple containing ranges that overlap with
each other.
"""
result = set()
for i, shard_range in enumerate(shard_ranges):
overlapping = [
sr for sr in shard_ranges[i + 1:]
if shard_range.name != sr.name and shard_range.overlaps(sr)]
if overlapping:
overlapping.append(shard_range)
overlapping.sort(key=ShardRange.sort_key)
result.add(tuple(overlapping))
return result
def is_sharding_candidate(shard_range, threshold):
# note: use *object* count as the condition for sharding: tombstones will
# eventually be reclaimed so should not trigger sharding
return (shard_range.state == ShardRange.ACTIVE and
shard_range.object_count >= threshold)
def is_shrinking_candidate(shard_range, shrink_threshold, expansion_limit,
states=None):
# typically shrink_threshold < expansion_limit but check both just in case
# note: use *row* count (objects plus tombstones) as the condition for
# shrinking to avoid inadvertently moving large numbers of tombstones into
# an acceptor
states = states or (ShardRange.ACTIVE,)
return (shard_range.state in states and
shard_range.row_count < shrink_threshold and
shard_range.row_count <= expansion_limit)
def find_sharding_candidates(broker, threshold, shard_ranges=None):
# this should only execute on root containers; the goal is to find
# large shard containers that should be sharded.
# First cut is simple: assume root container shard usage stats are good
# enough to make decision.
if shard_ranges is None:
shard_ranges = broker.get_shard_ranges(states=[ShardRange.ACTIVE])
candidates = []
for shard_range in shard_ranges:
if not is_sharding_candidate(shard_range, threshold):
continue
shard_range.update_state(ShardRange.SHARDING,
state_timestamp=Timestamp.now())
shard_range.epoch = shard_range.state_timestamp
candidates.append(shard_range)
return candidates
def find_shrinking_candidates(broker, shrink_threshold, expansion_limit):
# this is only here to preserve a legacy public function signature;
# superseded by find_compactible_shard_sequences
merge_pairs = {}
# restrict search to sequences with one donor
results = find_compactible_shard_sequences(broker, shrink_threshold,
expansion_limit, 1, -1,
include_shrinking=True)
for sequence in results:
# map acceptor -> donor list
merge_pairs[sequence[-1]] = sequence[-2]
return merge_pairs
def find_compactible_shard_sequences(broker,
shrink_threshold,
expansion_limit,
max_shrinking,
max_expanding,
include_shrinking=False):
"""
Find sequences of shard ranges that could be compacted into a single
acceptor shard range.
This function does not modify shard ranges.
:param broker: A :class:`~swift.container.backend.ContainerBroker`.
:param shrink_threshold: the number of rows below which a shard may be
considered for shrinking into another shard
:param expansion_limit: the maximum number of rows that an acceptor shard
range should have after other shard ranges have been compacted into it
:param max_shrinking: the maximum number of shard ranges that should be
compacted into each acceptor; -1 implies unlimited.
:param max_expanding: the maximum number of acceptors to be found (i.e. the
maximum number of sequences to be returned); -1 implies unlimited.
:param include_shrinking: if True then existing compactible sequences are
included in the results; default is False.
:returns: A list of :class:`~swift.common.utils.ShardRangeList` each
containing a sequence of neighbouring shard ranges that may be
compacted; the final shard range in the list is the acceptor
"""
# this should only execute on root containers that have sharded; the
# goal is to find small shard containers that could be retired by
# merging with a neighbour.
# First cut is simple: assume root container shard usage stats are good
# enough to make decision; only merge with upper neighbour so that
# upper bounds never change (shard names include upper bound).
shard_ranges = broker.get_shard_ranges()
own_shard_range = broker.get_own_shard_range()
def sequence_complete(sequence):
# a sequence is considered complete if any of the following are true:
# - the final shard range has more objects than the shrink_threshold,
# so should not be shrunk (this shard will be the acceptor)
# - the max number of shard ranges to be compacted (max_shrinking) has
# been reached
# - the total number of objects in the sequence has reached the
# expansion_limit
if (sequence and
(not is_shrinking_candidate(
sequence[-1], shrink_threshold, expansion_limit,
states=(ShardRange.ACTIVE, ShardRange.SHRINKING)) or
0 < max_shrinking < len(sequence) or
sequence.row_count >= expansion_limit)):
return True
return False
compactible_sequences = []
index = 0
expanding = 0
while ((max_expanding < 0 or expanding < max_expanding) and
index < len(shard_ranges)):
if not is_shrinking_candidate(
shard_ranges[index], shrink_threshold, expansion_limit,
states=(ShardRange.ACTIVE, ShardRange.SHRINKING)):
# this shard range cannot be the start of a new or existing
# compactible sequence, move on
index += 1
continue
# start of a *possible* sequence
sequence = ShardRangeList([shard_ranges[index]])
for shard_range in shard_ranges[index + 1:]:
# attempt to add contiguous shard ranges to the sequence
if sequence.upper < shard_range.lower:
# found a gap! break before consuming this range because it
# could become the first in the next sequence
break
if shard_range.state not in (ShardRange.ACTIVE,
ShardRange.SHRINKING):
# found? created? sharded? don't touch it
break
if shard_range.state == ShardRange.SHRINKING:
# already shrinking: add to sequence unconditionally
sequence.append(shard_range)
elif (sequence.row_count + shard_range.row_count
<= expansion_limit):
# add to sequence: could be a donor or acceptor
sequence.append(shard_range)
if sequence_complete(sequence):
break
else:
break
index += len(sequence)
if (index == len(shard_ranges) and
len(shard_ranges) == len(sequence) and
not sequence_complete(sequence) and
sequence.includes(own_shard_range)):
# special case: only one sequence has been found, which consumes
# all shard ranges, encompasses the entire namespace, has no more
# than expansion_limit records and whose shard ranges are all
# shrinkable; all the shards in the sequence can be shrunk to the
# root, so append own_shard_range to the sequence to act as an
# acceptor; note: only shrink to the root when *all* the remaining
# shard ranges can be simultaneously shrunk to the root.
sequence.append(own_shard_range)
if len(sequence) < 2 or sequence[-1].state not in (ShardRange.ACTIVE,
ShardRange.SHARDED):
# this sequence doesn't end with a suitable acceptor shard range
continue
# all valid sequences are counted against the max_expanding allowance
# even if the sequence is already shrinking
expanding += 1
if (all([sr.state != ShardRange.SHRINKING for sr in sequence]) or
include_shrinking):
compactible_sequences.append(sequence)
return compactible_sequences
def finalize_shrinking(broker, acceptor_ranges, donor_ranges, timestamp):
"""
Update donor shard ranges to shrinking state and merge donors and acceptors
to broker.
:param broker: A :class:`~swift.container.backend.ContainerBroker`.
:param acceptor_ranges: A list of :class:`~swift.common.utils.ShardRange`
that are to be acceptors.
:param donor_ranges: A list of :class:`~swift.common.utils.ShardRange`
that are to be donors; these will have their state and timestamp
updated.
:param timestamp: timestamp to use when updating donor state
"""
for donor in donor_ranges:
if donor.update_state(ShardRange.SHRINKING):
# Set donor state to shrinking state_timestamp defines new epoch
donor.epoch = donor.state_timestamp = timestamp
broker.merge_shard_ranges(acceptor_ranges + donor_ranges)
def process_compactible_shard_sequences(broker, sequences):
"""
Transform the given sequences of shard ranges into a list of acceptors and
a list of shrinking donors. For each given sequence the final ShardRange in
the sequence (the acceptor) is expanded to accommodate the other
ShardRanges in the sequence (the donors). The donors and acceptors are then
merged into the broker.
:param broker: A :class:`~swift.container.backend.ContainerBroker`.
:param sequences: A list of :class:`~swift.common.utils.ShardRangeList`
"""
timestamp = Timestamp.now()
acceptor_ranges = []
shrinking_ranges = []
for sequence in sequences:
donors = sequence[:-1]
shrinking_ranges.extend(donors)
# Update the acceptor container with its expanded bounds to prevent it
# treating objects cleaved from the donor as misplaced.
acceptor = sequence[-1]
if acceptor.expand(donors):
# Update the acceptor container with its expanded bounds to prevent
# it treating objects cleaved from the donor as misplaced.
acceptor.timestamp = timestamp
if acceptor.update_state(ShardRange.ACTIVE):
# Ensure acceptor state is ACTIVE (when acceptor is root)
acceptor.state_timestamp = timestamp
acceptor_ranges.append(acceptor)
finalize_shrinking(broker, acceptor_ranges, shrinking_ranges, timestamp)
def find_paths(shard_ranges):
"""
Returns a list of all continuous paths through the shard ranges. An
individual path may not necessarily span the entire namespace, but it will
span a continuous namespace without gaps.
:param shard_ranges: A list of :class:`~swift.common.utils.ShardRange`.
:return: A list of :class:`~swift.common.utils.ShardRangeList`.
"""
# A node is a point in the namespace that is used as a bound of any shard
# range. Shard ranges form the edges between nodes.
# First build a dict mapping nodes to a list of edges that leave that node
# (in other words, shard ranges whose lower bound equals the node)
node_successors = collections.defaultdict(list)
for shard_range in shard_ranges:
if shard_range.state == ShardRange.SHRINKING:
# shrinking shards are not a viable edge in any path
continue
node_successors[shard_range.lower].append(shard_range)
paths = []
def clone_path(other=None):
# create a new path, possibly cloning another path, and add it to the
# list of all paths through the shards
path = ShardRangeList() if other is None else ShardRangeList(other)
paths.append(path)
return path
# we need to keep track of every path that ends at each node so that when
# we visit the node we can extend those paths, or clones of them, with the
# edges that leave the node
paths_to_node = collections.defaultdict(list)
# visit the nodes in ascending order by name...
for node, edges in sorted(node_successors.items()):
if not edges:
# this node is a dead-end, so there's no path updates to make
continue
if not paths_to_node[node]:
# this is either the first node to be visited, or it has no paths
# leading to it, so we need to start a new path here
paths_to_node[node].append(clone_path([]))
for path_to_node in paths_to_node[node]:
# extend each path that arrives at this node with all of the
# possible edges that leave the node; if more than edge leaves the
# node then we will make clones of the path to the node and extend
# those clones, adding to the collection of all paths though the
# shards
for i, edge in enumerate(edges):
if i == len(edges) - 1:
# the last edge is used to extend the original path to the
# node; there is nothing special about the last edge, but
# doing this last means the original path to the node can
# be cloned for all other edges before being modified here
path = path_to_node
else:
# for all but one of the edges leaving the node we need to
# make a clone the original path
path = clone_path(path_to_node)
# extend the path with the edge
path.append(edge)
# keep track of which node this path now arrives at
paths_to_node[edge.upper].append(path)
return paths
def rank_paths(paths, shard_range_to_span):
"""
Sorts the given list of paths such that the most preferred path is the
first item in the list.
:param paths: A list of :class:`~swift.common.utils.ShardRangeList`.
:param shard_range_to_span: An instance of
:class:`~swift.common.utils.ShardRange` that describes the namespace
that would ideally be spanned by a path. Paths that include this
namespace will be preferred over those that do not.
:return: A sorted list of :class:`~swift.common.utils.ShardRangeList`.
"""
def sort_key(path):
# defines the order of preference for paths through shards
return (
# complete path for the namespace
path.includes(shard_range_to_span),
# most cleaving progress
path.find_lower(lambda sr: sr.state not in (
ShardRange.CLEAVED, ShardRange.ACTIVE)),
# largest object count
path.object_count,
# fewest timestamps
-1 * len(path.timestamps),
# newest timestamp
sorted(path.timestamps)[-1]
)
paths.sort(key=sort_key, reverse=True)
return paths
class CleavingContext(object):
def __init__(self, ref, cursor='', max_row=None, cleave_to_row=None,
last_cleave_to_row=None, cleaving_done=False,
misplaced_done=False, ranges_done=0, ranges_todo=0):
self.ref = ref
self._cursor = None
self.cursor = cursor
self.max_row = max_row
self.cleave_to_row = cleave_to_row
self.last_cleave_to_row = last_cleave_to_row
self.cleaving_done = cleaving_done
self.misplaced_done = misplaced_done
self.ranges_done = ranges_done
self.ranges_todo = ranges_todo
def __iter__(self):
yield 'ref', self.ref
yield 'cursor', self.cursor
yield 'max_row', self.max_row
yield 'cleave_to_row', self.cleave_to_row
yield 'last_cleave_to_row', self.last_cleave_to_row
yield 'cleaving_done', self.cleaving_done
yield 'misplaced_done', self.misplaced_done
yield 'ranges_done', self.ranges_done
yield 'ranges_todo', self.ranges_todo
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, ', '.join(
'%s=%r' % prop for prop in self))
def _encode(cls, value):
if value is not None and six.PY2 and isinstance(value, six.text_type):
return value.encode('utf-8')
return value
@property
def cursor(self):
return self._cursor
@cursor.setter
def cursor(self, value):
self._cursor = self._encode(value)
@property
def marker(self):
return self.cursor + '\x00'
@classmethod
def _make_ref(cls, broker):
return broker.get_info()['id']
@classmethod
def load_all(cls, broker):
"""
Returns all cleaving contexts stored in the broker.
:param broker:
:return: list of tuples of (CleavingContext, timestamp)
"""
brokers = broker.get_brokers()
sysmeta = brokers[-1].get_sharding_sysmeta_with_timestamps()
contexts = []
for key, (val, timestamp) in sysmeta.items():
# If the value is blank, then the metadata is
# marked for deletion
if key.startswith("Context-") and val:
try:
contexts.append((cls(**json.loads(val)), timestamp))
except ValueError:
continue
return contexts
@classmethod
def load(cls, broker):
"""
Returns a context dict for tracking the progress of cleaving this
broker's retiring DB. The context is persisted in sysmeta using a key
that is based off the retiring db id and max row. This form of
key ensures that a cleaving context is only loaded for a db that
matches the id and max row when the context was created; if a db is
modified such that its max row changes then a different context, or no
context, will be loaded.
:return: A dict to which cleave progress metadata may be added. The
dict initially has a key ``ref`` which should not be modified by
any caller.
"""
brokers = broker.get_brokers()
ref = cls._make_ref(brokers[0])
data = brokers[-1].get_sharding_sysmeta('Context-' + ref)
data = json.loads(data) if data else {}
data['ref'] = ref
data['max_row'] = brokers[0].get_max_row()
return cls(**data)
def store(self, broker):
broker.set_sharding_sysmeta('Context-' + self.ref,
json.dumps(dict(self)))
def reset(self):
self.cursor = ''
self.ranges_done = 0
self.ranges_todo = 0
self.cleaving_done = False
self.misplaced_done = False
self.last_cleave_to_row = self.cleave_to_row
def start(self):
self.cursor = ''
self.ranges_done = 0
self.ranges_todo = 0
self.cleaving_done = False
self.cleave_to_row = self.max_row
def range_done(self, new_cursor):
self.ranges_done += 1
self.ranges_todo -= 1
self.cursor = new_cursor
def done(self):
return all((self.misplaced_done, self.cleaving_done,
self.max_row == self.cleave_to_row))
def delete(self, broker):
# These will get reclaimed when `_reclaim_metadata` in
# common/db.py is called.
broker.set_sharding_sysmeta('Context-' + self.ref, '')
class ContainerSharderConf(object):
def __init__(self, conf=None):
conf = conf if conf else {}
def get_val(key, validator, default):
"""
Get a value from conf and validate it.
:param key: key to lookup value in the ``conf`` dict.
:param validator: A function that will passed the value from the
``conf`` dict and should return the value to be set. This
function should raise a ValueError if the ``conf`` value if not
valid.
:param default: value to use if ``key`` is not found in ``conf``.
:raises: ValueError if the value read from ``conf`` is invalid.
:returns: the configuration value.
"""
try:
return validator(conf.get(key, default))
except ValueError as err:
raise ValueError('Error setting %s: %s' % (key, err))
self.shard_container_threshold = get_val(
'shard_container_threshold', config_positive_int_value, 1000000)
self.max_shrinking = get_val(
'max_shrinking', int, 1)
self.max_expanding = get_val(
'max_expanding', int, -1)
self.shard_scanner_batch_size = get_val(
'shard_scanner_batch_size', config_positive_int_value, 10)
self.cleave_batch_size = get_val(
'cleave_batch_size', config_positive_int_value, 2)
self.cleave_row_batch_size = get_val(
'cleave_row_batch_size', config_positive_int_value, 10000)
self.broker_timeout = get_val(
'broker_timeout', config_positive_int_value, 60)
self.recon_candidates_limit = get_val(
'recon_candidates_limit', int, 5)
self.recon_sharded_timeout = get_val(
'recon_sharded_timeout', int, 43200)
self.conn_timeout = get_val(
'conn_timeout', float, 5)
self.auto_shard = get_val(
'auto_shard', config_true_value, False)
# deprecated percent options still loaded...
self.shrink_threshold = get_val(
'shard_shrink_point', self.percent_of_threshold, 10)
self.expansion_limit = get_val(
'shard_shrink_merge_point', self.percent_of_threshold, 75)
# ...but superseded by absolute options if present in conf
self.shrink_threshold = get_val(
'shrink_threshold', int, self.shrink_threshold)
self.expansion_limit = get_val(
'expansion_limit', int, self.expansion_limit)
self.rows_per_shard = get_val(
'rows_per_shard', config_positive_int_value,
max(self.shard_container_threshold // 2, 1))
self.minimum_shard_size = get_val(
'minimum_shard_size', config_positive_int_value,
max(self.rows_per_shard // 5, 1))
def percent_of_threshold(self, val):
return int(config_percent_value(val) * self.shard_container_threshold)
@classmethod
def validate_conf(cls, namespace):
ops = {'<': operator.lt,
'<=': operator.le}
checks = (('minimum_shard_size', '<=', 'rows_per_shard'),
('shrink_threshold', '<=', 'minimum_shard_size'),
('rows_per_shard', '<', 'shard_container_threshold'),
('expansion_limit', '<', 'shard_container_threshold'))
for key1, op, key2 in checks:
try:
val1 = getattr(namespace, key1)
val2 = getattr(namespace, key2)
except AttributeError:
# swift-manage-shard-ranges uses a subset of conf options for
# each command so only validate those actually in the namespace
continue
if not ops[op](val1, val2):
raise ValueError('%s (%d) must be %s %s (%d)'
% (key1, val1, op, key2, val2))
DEFAULT_SHARDER_CONF = vars(ContainerSharderConf())
class ContainerSharder(ContainerSharderConf, ContainerReplicator):
"""Shards containers."""
log_route = 'container-sharder'
def __init__(self, conf, logger=None):
logger = logger or get_logger(conf, log_route=self.log_route)
ContainerReplicator.__init__(self, conf, logger=logger)
ContainerSharderConf.__init__(self, conf)
ContainerSharderConf.validate_conf(self)
if conf.get('auto_create_account_prefix'):
self.logger.warning('Option auto_create_account_prefix is '
'deprecated. Configure '
'auto_create_account_prefix under the '
'swift-constraints section of '
'swift.conf. This option will '
'be ignored in a future release.')
auto_create_account_prefix = \
self.conf['auto_create_account_prefix']
else:
auto_create_account_prefix = AUTO_CREATE_ACCOUNT_PREFIX
self.shards_account_prefix = (auto_create_account_prefix + 'shards_')
self.sharding_candidates = []
self.shrinking_candidates = []
replica_count = self.ring.replica_count
quorum = quorum_size(replica_count)
self.shard_replication_quorum = config_auto_int_value(
conf.get('shard_replication_quorum'), quorum)
if self.shard_replication_quorum > replica_count:
self.logger.warning(
'shard_replication_quorum of %s exceeds replica count %s'
', reducing to %s', self.shard_replication_quorum,
replica_count, replica_count)
self.shard_replication_quorum = replica_count
self.existing_shard_replication_quorum = config_auto_int_value(
conf.get('existing_shard_replication_quorum'),
self.shard_replication_quorum)
if self.existing_shard_replication_quorum > replica_count:
self.logger.warning(
'existing_shard_replication_quorum of %s exceeds replica count'
' %s, reducing to %s', self.existing_shard_replication_quorum,
replica_count, replica_count)
self.existing_shard_replication_quorum = replica_count
# internal client
request_tries = config_positive_int_value(
conf.get('request_tries', 3))
internal_client_conf_path = conf.get('internal_client_conf_path',
'/etc/swift/internal-client.conf')
try:
self.int_client = internal_client.InternalClient(
internal_client_conf_path,
'Swift Container Sharder',
request_tries,
allow_modify_pipeline=False,
use_replication_network=True,
global_conf={'log_name': '%s-ic' % conf.get(
'log_name', self.log_route)})
except (OSError, IOError) as err:
if err.errno != errno.ENOENT and \
not str(err).endswith(' not found'):
raise
raise SystemExit(
'Unable to load internal client from config: %r (%s)' %
(internal_client_conf_path, err))
self.stats_interval = float(conf.get('stats_interval', '3600'))
self.reported = 0
def _zero_stats(self):
"""Zero out the stats."""
super(ContainerSharder, self)._zero_stats()
# all sharding stats that are additional to the inherited replicator
# stats are maintained under the 'sharding' key in self.stats
self.stats['sharding'] = defaultdict(lambda: defaultdict(int))
self.sharding_candidates = []
self.shrinking_candidates = []
def _append_stat(self, category, key, value):
if not self.stats['sharding'][category][key]:
self.stats['sharding'][category][key] = list()
self.stats['sharding'][category][key].append(value)
def _min_stat(self, category, key, value):
current = self.stats['sharding'][category][key]
if not current:
self.stats['sharding'][category][key] = value
else:
self.stats['sharding'][category][key] = min(current, value)
def _max_stat(self, category, key, value):
current = self.stats['sharding'][category][key]
if not current:
self.stats['sharding'][category][key] = value
else:
self.stats['sharding'][category][key] = max(current, value)
def _increment_stat(self, category, key, step=1, statsd=False):
self.stats['sharding'][category][key] += step
if statsd:
statsd_key = '%s_%s' % (category, key)
self.logger.increment(statsd_key)
def _make_stats_info(self, broker, node, own_shard_range):
try:
file_size = os.stat(broker.db_file).st_size
except OSError:
file_size = None
return {'path': broker.db_file,
'node_index': node.get('index'),
'account': broker.account,
'container': broker.container,
'root': broker.root_path,
'object_count': own_shard_range.object_count,
'meta_timestamp': own_shard_range.meta_timestamp.internal,
'file_size': file_size}
def _identify_sharding_candidate(self, broker, node):
own_shard_range = broker.get_own_shard_range()
if is_sharding_candidate(
own_shard_range, self.shard_container_threshold):
self.sharding_candidates.append(
self._make_stats_info(broker, node, own_shard_range))
def _identify_shrinking_candidate(self, broker, node):
sequences = find_compactible_shard_sequences(
broker, self.shrink_threshold, self.expansion_limit,
self.max_shrinking, self.max_expanding)
# compactible_ranges are all apart from final acceptor in each sequence
compactible_ranges = sum(len(seq) - 1 for seq in sequences)
if compactible_ranges:
own_shard_range = broker.get_own_shard_range()
shrink_candidate = self._make_stats_info(
broker, node, own_shard_range)
# The number of ranges/donors that can be shrunk if the
# tool is used with the current max_shrinking, max_expanding
# settings.
shrink_candidate['compactible_ranges'] = compactible_ranges
self.shrinking_candidates.append(shrink_candidate)
def _transform_candidate_stats(self, category, candidates, sort_keys):
category['found'] = len(candidates)
candidates.sort(key=itemgetter(*sort_keys), reverse=True)
if self.recon_candidates_limit >= 0:
category['top'] = candidates[:self.recon_candidates_limit]
else:
category['top'] = candidates
def _record_sharding_progress(self, broker, node, error):
own_shard_range = broker.get_own_shard_range()
db_state = broker.get_db_state()
if (db_state in (UNSHARDED, SHARDING, SHARDED)
and own_shard_range.state in (ShardRange.SHARDING,
ShardRange.SHARDED)):
if db_state == SHARDED:
contexts = CleavingContext.load_all(broker)
if not contexts:
return
context_ts = max(float(ts) for c, ts in contexts)
if context_ts + self.recon_sharded_timeout \
< Timestamp.now().timestamp:
# last context timestamp too old for the
# broker to be recorded
return
info = self._make_stats_info(broker, node, own_shard_range)
info['state'] = own_shard_range.state_text
info['db_state'] = broker.get_db_state()
states = [ShardRange.FOUND, ShardRange.CREATED,
ShardRange.CLEAVED, ShardRange.ACTIVE]
shard_ranges = broker.get_shard_ranges(states=states)
state_count = {}
for state in states:
state_count[ShardRange.STATES[state]] = 0
for shard_range in shard_ranges:
state_count[shard_range.state_text] += 1
info.update(state_count)
info['error'] = error and str(error)
self._append_stat('sharding_in_progress', 'all', info)
def _report_stats(self):
# report accumulated stats since start of one sharder cycle
default_stats = ('attempted', 'success', 'failure')
category_keys = (
('visited', default_stats + ('skipped', 'completed')),
('scanned', default_stats + ('found', 'min_time', 'max_time')),
('created', default_stats),
('cleaved', default_stats + ('min_time', 'max_time',)),
('misplaced', default_stats + ('found', 'placed', 'unplaced')),
('audit_root', default_stats + ('has_overlap', 'num_overlap')),
('audit_shard', default_stats),
)
now = time.time()
last_report = time.ctime(self.stats['start'])
elapsed = now - self.stats['start']
sharding_stats = self.stats['sharding']
for category, keys in category_keys:
stats = sharding_stats[category]
msg = ' '.join(['%s:%s' % (k, str(stats[k])) for k in keys])
self.logger.info('Since %s %s - %s', last_report, category, msg)
# transform the sharding and shrinking candidate states
# first sharding
category = self.stats['sharding']['sharding_candidates']
self._transform_candidate_stats(category, self.sharding_candidates,
sort_keys=('object_count',))
# next shrinking
category = self.stats['sharding']['shrinking_candidates']
self._transform_candidate_stats(category, self.shrinking_candidates,
sort_keys=('compactible_ranges',))
dump_recon_cache(
{'sharding_stats': self.stats,
'sharding_time': elapsed,
'sharding_last': now},
self.rcache, self.logger)
self.reported = now
def _periodic_report_stats(self):
if (time.time() - self.reported) >= self.stats_interval:
self._report_stats()
def _check_node(self, node):
"""
:return: The path to the device, if the node is mounted.
Returns False if the node is unmounted.
"""
if not node:
return False
if not is_local_device(self.ips, self.port,
node['replication_ip'],
node['replication_port']):
return False
try:
return check_drive(self.root, node['device'], self.mount_check)
except ValueError:
self.logger.warning(
'Skipping %(device)s as it is not mounted' % node)
return False
def _fetch_shard_ranges(self, broker, newest=False, params=None,
include_deleted=False):
path = self.int_client.make_path(broker.root_account,
broker.root_container)
params = params or {}
params.setdefault('format', 'json')
headers = {'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Deleted': 'true',
'X-Backend-Include-Deleted': str(include_deleted)}
if newest:
headers['X-Newest'] = 'true'
try:
try:
resp = self.int_client.make_request(
'GET', path, headers, acceptable_statuses=(2,),
params=params)
except internal_client.UnexpectedResponse as err:
self.logger.warning("Failed to get shard ranges from %s: %s",
quote(broker.root_path), err)
return None
record_type = resp.headers.get('x-backend-record-type')
if record_type != 'shard':
err = 'unexpected record type %r' % record_type
self.logger.error("Failed to get shard ranges from %s: %s",
quote(broker.root_path), err)
return None
try:
data = json.loads(resp.body)
if not isinstance(data, list):
raise ValueError('not a list')
return [ShardRange.from_dict(shard_range)
for shard_range in data]
except (ValueError, TypeError, KeyError) as err:
self.logger.error(
"Failed to get shard ranges from %s: invalid data: %r",
quote(broker.root_path), err)
return None
finally:
self.logger.txn_id = None
def _put_container(self, node, part, account, container, headers, body):
try:
direct_put_container(node, part, account, container,
conn_timeout=self.conn_timeout,
response_timeout=self.node_timeout,
headers=headers, contents=body)
except DirectClientException as err:
self.logger.warning(
'Failed to put shard ranges to %s:%s/%s: %s',
node['ip'], node['port'], node['device'], err.http_status)
except (Exception, Timeout) as err:
self.logger.exception(
'Failed to put shard ranges to %s:%s/%s: %s',
node['ip'], node['port'], node['device'], err)
else:
return True
return False
def _send_shard_ranges(self, account, container, shard_ranges,
headers=None):
body = json.dumps([dict(sr, reported=0)
for sr in shard_ranges]).encode('ascii')
part, nodes = self.ring.get_nodes(account, container)
headers = headers or {}
headers.update({'X-Backend-Record-Type': RECORD_TYPE_SHARD,
USE_REPLICATION_NETWORK_HEADER: 'True',
'User-Agent': 'container-sharder %s' % os.getpid(),
'X-Timestamp': Timestamp.now().normal,
'Content-Length': len(body),
'Content-Type': 'application/json'})
pool = GreenAsyncPile(len(nodes))
for node in nodes:
pool.spawn(self._put_container, node, part, account,
container, headers, body)
results = pool.waitall(None)
return results.count(True) >= quorum_size(self.ring.replica_count)
def _get_shard_broker(self, shard_range, root_path, policy_index):
"""
Get a broker for a container db for the given shard range. If one of
the shard container's primary nodes is a local device then that will be
chosen for the db, otherwise the first of the shard container's handoff
nodes that is local will be chosen.
:param shard_range: a :class:`~swift.common.utils.ShardRange`
:param root_path: the path of the shard's root container
:param policy_index: the storage policy index
:returns: a tuple of ``(part, broker, node_id)`` where ``part`` is the
shard container's partition, ``broker`` is an instance of
:class:`~swift.container.backend.ContainerBroker`,
``node_id`` is the id of the selected node.
"""
part = self.ring.get_part(shard_range.account, shard_range.container)
node = self.find_local_handoff_for_part(part)
put_timestamp = Timestamp.now().internal
if not node:
raise DeviceUnavailable(
'No mounted devices found suitable for creating shard broker '
'for %s in partition %s' % (quote(shard_range.name), part))
shard_broker = ContainerBroker.create_broker(
os.path.join(self.root, node['device']), part, shard_range.account,
shard_range.container, epoch=shard_range.epoch,
storage_policy_index=policy_index, put_timestamp=put_timestamp)
# Get the valid info into the broker.container, etc
shard_broker.get_info()
shard_broker.merge_shard_ranges(shard_range)
shard_broker.set_sharding_sysmeta('Quoted-Root', quote(root_path))
# NB: we *used* to do
# shard_broker.set_sharding_sysmeta('Root', root_path)
# but that isn't safe for container names with nulls or newlines (or
# possibly some other characters). We consciously *don't* make any
# attempt to set the old meta; during an upgrade, some shards may think
# they are in fact roots, but it cleans up well enough once everyone's
# upgraded.
shard_broker.update_metadata({
'X-Container-Sysmeta-Sharding':
('True', Timestamp.now().internal)})
return part, shard_broker, node['id'], put_timestamp
def _audit_root_container(self, broker):
# This is the root container, and therefore the tome of knowledge,
# all we can do is check there is nothing screwy with the ranges
self._increment_stat('audit_root', 'attempted')
warnings = []
own_shard_range = broker.get_own_shard_range()
if own_shard_range.state in (ShardRange.SHARDING, ShardRange.SHARDED):
shard_ranges = [sr for sr in broker.get_shard_ranges()
if sr.state != ShardRange.SHRINKING]
missing_ranges = find_missing_ranges(shard_ranges)
if missing_ranges:
warnings.append(
'missing range(s): %s' %
' '.join(['%s-%s' % (lower, upper)
for lower, upper in missing_ranges]))
for state in ShardRange.STATES:
if state == ShardRange.SHRINKING:
# Shrinking is how we resolve overlaps; we've got to
# allow multiple shards in that state
continue
shard_ranges = broker.get_shard_ranges(states=state)
overlaps = find_overlapping_ranges(shard_ranges)
if overlaps:
self._increment_stat('audit_root', 'has_overlap')
self._increment_stat('audit_root', 'num_overlap',
step=len(overlaps))
all_overlaps = ', '.join(
[' '.join(['%s-%s' % (sr.lower, sr.upper)
for sr in overlapping_ranges])
for overlapping_ranges in sorted(list(overlaps))])
warnings.append(
'overlapping ranges in state %r: %s' %
(ShardRange.STATES[state], all_overlaps))
# We've seen a case in production where the roots own_shard_range
# epoch is reset to None, and state set to ACTIVE (like re-defaulted)
# Epoch it important to sharding so we want to detect if this happens
# 1. So we can alert, and 2. to see how common it is.
if own_shard_range.epoch is None and broker.db_epoch:
warnings.append('own_shard_range reset to None should be %s'
% broker.db_epoch)
if warnings:
self.logger.warning(
'Audit failed for root %s (%s): %s',
broker.db_file, quote(broker.path), ', '.join(warnings))
self._increment_stat('audit_root', 'failure', statsd=True)
return False
self._increment_stat('audit_root', 'success', statsd=True)
return True
def _audit_shard_container(self, broker):
self._increment_stat('audit_shard', 'attempted')
warnings = []
errors = []
if not broker.account.startswith(self.shards_account_prefix):
warnings.append('account not in shards namespace %r' %
self.shards_account_prefix)
own_shard_range = broker.get_own_shard_range(no_default=True)
shard_ranges = own_shard_range_from_root = None
if own_shard_range:
# Get the root view of the world, at least that part of the world
# that overlaps with this shard's namespace. The
# 'states=auditing' parameter will cause the root to include
# its own shard range in the response, which is necessary for the
# particular case when this shard should be shrinking to the root
# container; when not shrinking to root, but to another acceptor,
# the root range should be in sharded state and will not interfere
# with cleaving, listing or updating behaviour.
shard_ranges = self._fetch_shard_ranges(
broker, newest=True,
params={'marker': str_to_wsgi(own_shard_range.lower_str),
'end_marker': str_to_wsgi(own_shard_range.upper_str),
'states': 'auditing'},
include_deleted=True)
if shard_ranges:
for shard_range in shard_ranges:
# look for this shard range in the list of shard ranges
# received from root; the root may have different lower and
# upper bounds for this shard (e.g. if this shard has been
# expanded in the root to accept a shrinking shard) so we
# only match on name.
if shard_range.name == own_shard_range.name:
own_shard_range_from_root = shard_range
break
else:
# this is not necessarily an error - some replicas of the
# root may not yet know about this shard container
warnings.append('root has no matching shard range')
elif not own_shard_range.deleted:
warnings.append('unable to get shard ranges from root')
# else, our shard range is deleted, so root may have reclaimed it
else:
errors.append('missing own shard range')
if warnings:
self.logger.warning(
'Audit warnings for shard %s (%s): %s',
broker.db_file, quote(broker.path), ', '.join(warnings))
if errors:
self.logger.warning(
'Audit failed for shard %s (%s) - skipping: %s',
broker.db_file, quote(broker.path), ', '.join(errors))
self._increment_stat('audit_shard', 'failure', statsd=True)
return False
if own_shard_range_from_root:
# iff we find our own shard range in the root response, merge it
# and reload own shard range (note: own_range_from_root may not
# necessarily be 'newer' than the own shard range we already have,
# but merging will get us to the 'newest' state)
self.logger.debug('Updating own shard range from root')
broker.merge_shard_ranges(own_shard_range_from_root)
orig_own_shard_range = own_shard_range
own_shard_range = broker.get_own_shard_range()
if (orig_own_shard_range != own_shard_range or
orig_own_shard_range.state != own_shard_range.state):
self.logger.debug(
'Updated own shard range from %s to %s',
orig_own_shard_range, own_shard_range)
if own_shard_range.state in (ShardRange.SHRINKING,
ShardRange.SHRUNK):
# If the up-to-date state is shrinking, save off *all* shards
# returned because these may contain shards into which this
# shard is to shrink itself; shrinking is the only case when we
# want to learn about *other* shard ranges from the root.
# We need to include shrunk state too, because one replica of a
# shard may already have moved the own_shard_range state to
# shrunk while another replica may still be in the process of
# shrinking.
other_shard_ranges = [sr for sr in shard_ranges
if sr is not own_shard_range_from_root]
self.logger.debug('Updating %s other shard range(s) from root',
len(other_shard_ranges))
broker.merge_shard_ranges(other_shard_ranges)
delete_age = time.time() - self.reclaim_age
deletable_states = (ShardRange.SHARDED, ShardRange.SHRUNK)
if (own_shard_range.state in deletable_states and
own_shard_range.deleted and
own_shard_range.timestamp < delete_age and
broker.empty()):
broker.delete_db(Timestamp.now().internal)
self.logger.debug('Deleted shard container %s (%s)',
broker.db_file, quote(broker.path))
self._increment_stat('audit_shard', 'success', statsd=True)
return True
def _audit_cleave_contexts(self, broker):
now = Timestamp.now()
for context, last_mod in CleavingContext.load_all(broker):
last_mod = Timestamp(last_mod)
is_done = context.done() and last_mod.timestamp + \
self.recon_sharded_timeout < now.timestamp
is_stale = last_mod.timestamp + self.reclaim_age < now.timestamp
if is_done or is_stale:
context.delete(broker)
def _audit_container(self, broker):
if broker.is_deleted():
if broker.is_old_enough_to_reclaim(time.time(), self.reclaim_age) \
and not broker.is_empty_enough_to_reclaim():
self.logger.warning(
'Reclaimable db stuck waiting for shrinking: %s (%s)',
broker.db_file, quote(broker.path))
# if the container has been marked as deleted, all metadata will
# have been erased so no point auditing. But we want it to pass, in
# case any objects exist inside it.
return True
self._audit_cleave_contexts(broker)
if broker.is_root_container():
return self._audit_root_container(broker)
return self._audit_shard_container(broker)
def yield_objects(self, broker, src_shard_range, since_row=None):
"""
Iterates through all objects in ``src_shard_range`` in name order
yielding them in lists of up to CONTAINER_LISTING_LIMIT length.
:param broker: A :class:`~swift.container.backend.ContainerBroker`.
:param src_shard_range: A :class:`~swift.common.utils.ShardRange`
describing the source range.
:param since_row: include only items whose ROWID is greater than
the given row id; by default all rows are included.
:return: a generator of tuples of (list of objects, broker info dict)
"""
for include_deleted in (False, True):
marker = src_shard_range.lower_str
while True:
info = broker.get_info()
info['max_row'] = broker.get_max_row()
start = time.time()
objects = broker.get_objects(
self.cleave_row_batch_size,
marker=marker,
end_marker=src_shard_range.end_marker,
include_deleted=include_deleted,
since_row=since_row)
if objects:
self.logger.debug('got %s objects from %s in %ss',
len(objects), broker.db_file,
time.time() - start)
yield objects, info
if len(objects) < self.cleave_row_batch_size:
break
marker = objects[-1]['name']
def yield_objects_to_shard_range(self, broker, src_shard_range,
dest_shard_ranges):
"""
Iterates through all objects in ``src_shard_range`` to place them in
destination shard ranges provided by the ``next_shard_range`` function.
Yields tuples of (object list, destination shard range in which those
objects belong). Note that the same destination shard range may be
referenced in more than one yielded tuple.
:param broker: A :class:`~swift.container.backend.ContainerBroker`.
:param src_shard_range: A :class:`~swift.common.utils.ShardRange`
describing the source range.
:param dest_shard_ranges: A function which should return a list of
destination shard ranges in name order.
:return: a generator of tuples of
(object list, shard range, broker info dict)
"""
dest_shard_range_iter = dest_shard_range = None
for objs, info in self.yield_objects(broker, src_shard_range):
if not objs:
return
def next_or_none(it):
try:
return next(it)
except StopIteration:
return None
if dest_shard_range_iter is None:
dest_shard_range_iter = iter(dest_shard_ranges())
dest_shard_range = next_or_none(dest_shard_range_iter)
unplaced = False
last_index = next_index = 0
for obj in objs:
if dest_shard_range is None:
# no more destinations: yield remainder of batch and bail
# NB there may be more batches of objects but none of them
# will be placed so no point fetching them
yield objs[last_index:], None, info
return
if obj['name'] <= dest_shard_range.lower:
unplaced = True
elif unplaced:
# end of run of unplaced objects, yield them
yield objs[last_index:next_index], None, info
last_index = next_index
unplaced = False
while (dest_shard_range and
obj['name'] > dest_shard_range.upper):
if next_index != last_index:
# yield the objects in current dest_shard_range
yield (objs[last_index:next_index],
dest_shard_range,
info)
last_index = next_index
dest_shard_range = next_or_none(dest_shard_range_iter)
next_index += 1
if next_index != last_index:
# yield tail of current batch of objects
# NB there may be more objects for the current
# dest_shard_range in the next batch from yield_objects
yield (objs[last_index:next_index],
None if unplaced else dest_shard_range,
info)
def _post_replicate_hook(self, broker, info, responses):
# override superclass behaviour
pass
def _replicate_and_delete(self, broker, dest_shard_range, part,
dest_broker, node_id, info):
success, responses = self._replicate_object(
part, dest_broker.db_file, node_id)
quorum = quorum_size(self.ring.replica_count)
if not success and responses.count(True) < quorum:
self.logger.warning(
'Failed to sufficiently replicate misplaced objects: %s in %s '
'(not removing)', dest_shard_range, quote(broker.path))
return False
if broker.get_info()['id'] != info['id']:
# the db changed - don't remove any objects
success = False
else:
# remove objects up to the max row of the db sampled prior to
# the first object yielded for this destination; objects added
# after that point may not have been yielded and replicated so
# it is not safe to remove them yet
broker.remove_objects(
dest_shard_range.lower_str,
dest_shard_range.upper_str,
max_row=info['max_row'])
success = True
if not success:
self.logger.warning(
'Refused to remove misplaced objects: %s in %s',
dest_shard_range, quote(broker.path))
return success
def _move_objects(self, src_broker, src_shard_range, policy_index,
shard_range_fetcher):
# move objects from src_shard_range in src_broker to destination shard
# ranges provided by shard_range_fetcher
dest_brokers = {} # map shard range -> broker
placed = unplaced = 0
success = True
for objs, dest_shard_range, info in self.yield_objects_to_shard_range(
src_broker, src_shard_range, shard_range_fetcher):
if not dest_shard_range:
unplaced += len(objs)
success = False
continue
if dest_shard_range.name == src_broker.path:
self.logger.debug(
'Skipping source as misplaced objects destination')
# in shrinking context, the misplaced objects might actually be
# correctly placed if the root has expanded this shard but this
# broker has not yet been updated
continue
if dest_shard_range not in dest_brokers:
part, dest_broker, node_id, _junk = self._get_shard_broker(
dest_shard_range, src_broker.root_path, policy_index)
# save the broker info that was sampled prior to the *first*
# yielded objects for this destination
destination = {'part': part,
'dest_broker': dest_broker,
'node_id': node_id,
'info': info}
dest_brokers[dest_shard_range] = destination
else:
destination = dest_brokers[dest_shard_range]
destination['dest_broker'].merge_items(objs)
placed += len(objs)
if unplaced:
self.logger.warning(
'Failed to find destination for at least %s misplaced objects '
'in %s', unplaced, quote(src_broker.path))
# TODO: consider executing the replication jobs concurrently
for dest_shard_range, dest_args in dest_brokers.items():
self.logger.debug('moving misplaced objects found in range %s' %
dest_shard_range)
success &= self._replicate_and_delete(
src_broker, dest_shard_range, **dest_args)
self._increment_stat('misplaced', 'placed', step=placed)
self._increment_stat('misplaced', 'unplaced', step=unplaced)
return success, placed + unplaced
def _make_shard_range_fetcher(self, broker, src_shard_range):
# returns a function that will lazy load shard ranges on demand;
# this means only one lookup is made for all misplaced ranges.
outer = {}
def shard_range_fetcher():
if not outer:
if broker.is_root_container():
ranges = broker.get_shard_ranges(
marker=src_shard_range.lower_str,
end_marker=src_shard_range.end_marker,
states=SHARD_UPDATE_STATES)
else:
# TODO: the root may not yet know about shard ranges to
# which a shard is sharding, but those could come from
# the broker
ranges = self._fetch_shard_ranges(
broker, newest=True,
params={'states': 'updating',
'marker': str_to_wsgi(
src_shard_range.lower_str),
'end_marker': str_to_wsgi(
src_shard_range.end_marker)})
outer['ranges'] = iter(ranges)
return outer['ranges']
return shard_range_fetcher
def _make_default_misplaced_object_bounds(self, broker):
# Objects outside of this container's own range are misplaced.
own_shard_range = broker.get_own_shard_range()
bounds = []
if own_shard_range.lower:
bounds.append(('', own_shard_range.lower))
if own_shard_range.upper:
bounds.append((own_shard_range.upper, ''))
return bounds
def _make_misplaced_object_bounds(self, broker):
bounds = []
state = broker.get_db_state()
if state == SHARDED:
# Anything in the object table is treated as a misplaced object.
bounds.append(('', ''))
if not bounds and state == SHARDING:
# Objects outside of this container's own range are misplaced.
# Objects in already cleaved shard ranges are also misplaced.
cleave_context = CleavingContext.load(broker)
if cleave_context.cursor:
bounds.append(('', cleave_context.cursor))
own_shard_range = broker.get_own_shard_range()
if own_shard_range.upper:
bounds.append((own_shard_range.upper, ''))
return bounds or self._make_default_misplaced_object_bounds(broker)
def _move_misplaced_objects(self, broker, src_broker=None,
src_bounds=None):
"""
Search for objects in the given broker that do not belong in that
broker's namespace and move those objects to their correct shard
container.
:param broker: An instance of :class:`swift.container.ContainerBroker`.
:param src_broker: optional alternative broker to use as the source
of misplaced objects; if not specified then ``broker`` is used as
the source.
:param src_bounds: optional list of (lower, upper) namespace bounds to
use when searching for misplaced objects
:return: True if all misplaced objects were sufficiently replicated to
their correct shard containers, False otherwise
"""
self.logger.debug('Looking for misplaced objects in %s (%s)',
quote(broker.path), broker.db_file)
self._increment_stat('misplaced', 'attempted')
src_broker = src_broker or broker
if src_bounds is None:
src_bounds = self._make_misplaced_object_bounds(broker)
# (ab)use ShardRange instances to encapsulate source namespaces
src_ranges = [ShardRange('dont/care', Timestamp.now(), lower, upper)
for lower, upper in src_bounds]
self.logger.debug('misplaced object source bounds %s' % src_bounds)
policy_index = broker.storage_policy_index
success = True
num_found = 0
for src_shard_range in src_ranges:
part_success, part_num_found = self._move_objects(
src_broker, src_shard_range, policy_index,
self._make_shard_range_fetcher(broker, src_shard_range))
success &= part_success
num_found += part_num_found
if num_found:
self._increment_stat('misplaced', 'found', statsd=True)
self.logger.debug('Moved %s misplaced objects' % num_found)
self._increment_stat('misplaced', 'success' if success else 'failure')
self.logger.debug('Finished handling misplaced objects')
return success
def _find_shard_ranges(self, broker):
"""
Scans the container to find shard ranges and adds them to the shard
ranges table. If there are existing shard ranges then scanning starts
from the upper bound of the uppermost existing shard range.
:param broker: An instance of :class:`swift.container.ContainerBroker`
:return: a tuple of (success, num of shard ranges found) where success
is True if the last shard range has been found, False otherwise.
"""
own_shard_range = broker.get_own_shard_range()
shard_ranges = broker.get_shard_ranges()
if shard_ranges and shard_ranges[-1].upper >= own_shard_range.upper:
self.logger.debug('Scan for shard ranges already completed for %s',
quote(broker.path))
return 0
self.logger.info('Starting scan for shard ranges on %s',
quote(broker.path))
self._increment_stat('scanned', 'attempted')
start = time.time()
shard_data, last_found = broker.find_shard_ranges(
self.rows_per_shard, limit=self.shard_scanner_batch_size,
existing_ranges=shard_ranges,
minimum_shard_size=self.minimum_shard_size)
elapsed = time.time() - start
if not shard_data:
if last_found:
self.logger.info("Already found all shard ranges")
self._increment_stat('scanned', 'success', statsd=True)
else:
# we didn't find anything
self.logger.warning("No shard ranges found")
self._increment_stat('scanned', 'failure', statsd=True)
return 0
shard_ranges = make_shard_ranges(
broker, shard_data, self.shards_account_prefix)
broker.merge_shard_ranges(shard_ranges)
num_found = len(shard_ranges)
self.logger.info(
"Completed scan for shard ranges: %d found", num_found)
self._increment_stat('scanned', 'found', step=num_found)
self._min_stat('scanned', 'min_time', round(elapsed / num_found, 3))
self._max_stat('scanned', 'max_time', round(elapsed / num_found, 3))
if last_found:
self.logger.info("Final shard range reached.")
self._increment_stat('scanned', 'success', statsd=True)
return num_found
def _create_shard_containers(self, broker):
# Create shard containers that are ready to receive redirected object
# updates. Do this now, so that redirection can begin immediately
# without waiting for cleaving to complete.
found_ranges = broker.get_shard_ranges(states=ShardRange.FOUND)
created_ranges = []
for shard_range in found_ranges:
self._increment_stat('created', 'attempted')
shard_range.update_state(ShardRange.CREATED)
headers = {
'X-Backend-Storage-Policy-Index': broker.storage_policy_index,
'X-Container-Sysmeta-Shard-Quoted-Root': quote(
broker.root_path),
'X-Container-Sysmeta-Sharding': 'True',
'X-Backend-Auto-Create': 'True'}
# NB: we *used* to send along
# 'X-Container-Sysmeta-Shard-Root': broker.root_path
# but that isn't safe for container names with nulls or newlines
# (or possibly some other characters). We consciously *don't* make
# any attempt to set the old meta; during an upgrade, some shards
# may think they are in fact roots, but it cleans up well enough
# once everyone's upgraded.
success = self._send_shard_ranges(
shard_range.account, shard_range.container,
[shard_range], headers=headers)
if success:
self.logger.debug('PUT new shard range container for %s',
shard_range)
self._increment_stat('created', 'success', statsd=True)
else:
self.logger.error(
'PUT of new shard container %r failed for %s.',
shard_range, quote(broker.path))
self._increment_stat('created', 'failure', statsd=True)
# break, not continue, because elsewhere it is assumed that
# finding and cleaving shard ranges progresses linearly, so we
# do not want any subsequent shard ranges to be in created
# state while this one is still in found state
break
created_ranges.append(shard_range)
if created_ranges:
broker.merge_shard_ranges(created_ranges)
if not broker.is_root_container():
self._send_shard_ranges(
broker.root_account, broker.root_container, created_ranges)
self.logger.info(
"Completed creating shard range containers: %d created.",
len(created_ranges))
return len(created_ranges)
def _cleave_shard_broker(self, broker, cleaving_context, shard_range,
own_shard_range, shard_broker, put_timestamp,
shard_part, node_id):
start = time.time()
# only cleave from the retiring db - misplaced objects handler will
# deal with any objects in the fresh db
source_broker = broker.get_brokers()[0]
# if this range has been cleaved before but replication
# failed then the shard db may still exist and it may not be
# necessary to merge all the rows again
source_db_id = source_broker.get_info()['id']
source_max_row = source_broker.get_max_row()
sync_point = shard_broker.get_sync(source_db_id)
if sync_point < source_max_row or source_max_row == -1:
sync_from_row = max(cleaving_context.last_cleave_to_row or -1,
sync_point)
objects = None
for objects, info in self.yield_objects(
source_broker, shard_range,
since_row=sync_from_row):
shard_broker.merge_items(objects)
if objects is None:
self.logger.info("Cleaving '%s': %r - zero objects found",
quote(broker.path), shard_range)
if shard_broker.get_info()['put_timestamp'] == put_timestamp:
# This was just created; don't need to replicate this
# SR because there was nothing there. So cleanup and
# remove the shard_broker from its hand off location.
self.delete_db(shard_broker)
cleaving_context.range_done(shard_range.upper_str)
if shard_range.upper >= own_shard_range.upper:
# cleaving complete
cleaving_context.cleaving_done = True
cleaving_context.store(broker)
# Because nothing was here we wont count it in the shard
# batch count.
return CLEAVE_EMPTY
# Else, it wasn't newly created by us, and
# we don't know what's in it or why. Let it get
# replicated and counted in the batch count.
# Note: the max row stored as a sync point is sampled *before*
# objects are yielded to ensure that is less than or equal to
# the last yielded row. Other sync points are also copied from the
# source broker to the shards; if another replica of the source
# happens to subsequently cleave into a primary replica of the
# shard then it will only need to cleave rows after its last sync
# point with this replica of the source broker.
shard_broker.merge_syncs(
[{'sync_point': source_max_row, 'remote_id': source_db_id}] +
source_broker.get_syncs())
else:
self.logger.debug("Cleaving '%s': %r - shard db already in sync",
quote(broker.path), shard_range)
replication_quorum = self.existing_shard_replication_quorum
if own_shard_range.state in (ShardRange.SHRINKING, ShardRange.SHRUNK):
if shard_range.includes(own_shard_range):
# When shrinking to a single acceptor that completely encloses
# this shard's namespace, include deleted own (donor) shard
# range in the replicated db so that when acceptor next updates
# root it will atomically update its namespace *and* delete the
# donor. This reduces the chance of a temporary listing gap if
# this shard fails to update the root with its SHRUNK/deleted
# state. Don't do this when sharding a shard or shrinking to
# multiple acceptors because in those cases the donor namespace
# should not be deleted until *all* shards are cleaved.
if own_shard_range.update_state(ShardRange.SHRUNK):
own_shard_range.set_deleted()
broker.merge_shard_ranges(own_shard_range)
shard_broker.merge_shard_ranges(own_shard_range)
elif shard_range.state == ShardRange.CREATED:
# The shard range object stats may have changed since the shard
# range was found, so update with stats of objects actually
# copied to the shard broker. Only do this the first time each
# shard range is cleaved.
info = shard_broker.get_info()
shard_range.update_meta(
info['object_count'], info['bytes_used'])
# Update state to CLEAVED; only do this when sharding, not when
# shrinking
shard_range.update_state(ShardRange.CLEAVED)
shard_broker.merge_shard_ranges(shard_range)
replication_quorum = self.shard_replication_quorum
self.logger.info(
'Replicating new shard container %s for %s',
quote(shard_broker.path), own_shard_range)
success, responses = self._replicate_object(
shard_part, shard_broker.db_file, node_id)
replication_successes = responses.count(True)
if (not success and (not responses or
replication_successes < replication_quorum)):
# insufficient replication or replication not even attempted;
# break because we don't want to progress the cleave cursor
# until each shard range has been successfully cleaved
self.logger.warning(
'Failed to sufficiently replicate cleaved shard %s for %s: '
'%s successes, %s required.', shard_range, quote(broker.path),
replication_successes, replication_quorum)
self._increment_stat('cleaved', 'failure', statsd=True)
return CLEAVE_FAILED
elapsed = round(time.time() - start, 3)
self._min_stat('cleaved', 'min_time', elapsed)
self._max_stat('cleaved', 'max_time', elapsed)
broker.merge_shard_ranges(shard_range)
cleaving_context.range_done(shard_range.upper_str)
if shard_range.upper >= own_shard_range.upper:
# cleaving complete
cleaving_context.cleaving_done = True
cleaving_context.store(broker)
self.logger.info(
'Cleaved %s for shard range %s in %gs.',
quote(broker.path), shard_range, elapsed)
self._increment_stat('cleaved', 'success', statsd=True)
return CLEAVE_SUCCESS
def _cleave_shard_range(self, broker, cleaving_context, shard_range,
own_shard_range):
self.logger.info("Cleaving '%s' from row %s into %s for %r",
quote(broker.path),
cleaving_context.last_cleave_to_row,
quote(shard_range.name), shard_range)
self._increment_stat('cleaved', 'attempted')
policy_index = broker.storage_policy_index
try:
shard_part, shard_broker, node_id, put_timestamp = \
self._get_shard_broker(shard_range, broker.root_path,
policy_index)
except DeviceUnavailable as duex:
self.logger.warning(str(duex))
self._increment_stat('cleaved', 'failure', statsd=True)
return CLEAVE_FAILED
else:
return self._cleave_shard_broker(
broker, cleaving_context, shard_range, own_shard_range,
shard_broker, put_timestamp, shard_part, node_id)
def _cleave(self, broker):
# Returns True if misplaced objects have been moved and the entire
# container namespace has been successfully cleaved, False otherwise
if broker.is_sharded():
self.logger.debug('Passing over already sharded container %s',
quote(broker.path))
return True
cleaving_context = CleavingContext.load(broker)
if not cleaving_context.misplaced_done:
# ensure any misplaced objects in the source broker are moved; note
# that this invocation of _move_misplaced_objects is targetted at
# the *retiring* db.
self.logger.debug(
'Moving any misplaced objects from sharding container: %s',
quote(broker.path))
bounds = self._make_default_misplaced_object_bounds(broker)
cleaving_context.misplaced_done = self._move_misplaced_objects(
broker, src_broker=broker.get_brokers()[0],
src_bounds=bounds)
cleaving_context.store(broker)
if cleaving_context.cleaving_done:
self.logger.debug('Cleaving already complete for container %s',
quote(broker.path))
return cleaving_context.misplaced_done
shard_ranges = broker.get_shard_ranges(marker=cleaving_context.marker)
# Ignore shrinking shard ranges: we never want to cleave objects to a
# shrinking shard. Shrinking shard ranges are to be expected in a root;
# shrinking shard ranges (other than own shard range) are not normally
# expected in a shard but can occur if there is an overlapping shard
# range that has been discovered from the root.
ranges_todo = [sr for sr in shard_ranges
if sr.state != ShardRange.SHRINKING]
if cleaving_context.cursor:
# always update ranges_todo in case shard ranges have changed since
# last visit
cleaving_context.ranges_todo = len(ranges_todo)
self.logger.debug('Continuing to cleave (%s done, %s todo): %s',
cleaving_context.ranges_done,
cleaving_context.ranges_todo,
quote(broker.path))
else:
cleaving_context.start()
own_shard_range = broker.get_own_shard_range()
cleaving_context.cursor = own_shard_range.lower_str
cleaving_context.ranges_todo = len(ranges_todo)
self.logger.debug('Starting to cleave (%s todo): %s',
cleaving_context.ranges_todo, quote(broker.path))
own_shard_range = broker.get_own_shard_range(no_default=True)
if own_shard_range is None:
# A default should never be SHRINKING or SHRUNK but because we
# may write own_shard_range back to broker, let's make sure
# it can't be defaulted.
self.logger.warning('Failed to get own_shard_range for %s',
quote(broker.path))
ranges_todo = [] # skip cleaving
ranges_done = []
for shard_range in ranges_todo:
if cleaving_context.cleaving_done:
# note: there may still be ranges_todo, for example: if this
# shard is shrinking and has merged a root shard range in
# sharded state along with an active acceptor shard range, but
# the root range is irrelevant
break
if len(ranges_done) == self.cleave_batch_size:
break
if shard_range.lower > cleaving_context.cursor:
self.logger.info('Stopped cleave at gap: %r - %r' %
(cleaving_context.cursor, shard_range.lower))
break
if shard_range.state not in (ShardRange.CREATED,
ShardRange.CLEAVED,
ShardRange.ACTIVE):
self.logger.info('Stopped cleave at unready %s', shard_range)
break
cleave_result = self._cleave_shard_range(
broker, cleaving_context, shard_range, own_shard_range)
if cleave_result == CLEAVE_SUCCESS:
ranges_done.append(shard_range)
elif cleave_result == CLEAVE_FAILED:
break
# else: CLEAVE_EMPTY: no errors, but no rows found either. keep
# going, and don't count it against our batch size
# _cleave_shard_range always store()s the context on success; *also* do
# that here in case we hit a failure right off the bat or ended loop
# with skipped ranges
cleaving_context.store(broker)
self.logger.debug(
'Cleaved %s shard ranges for %s',
len(ranges_done), quote(broker.path))
return (cleaving_context.misplaced_done and
cleaving_context.cleaving_done)
def _complete_sharding(self, broker):
cleaving_context = CleavingContext.load(broker)
if cleaving_context.done():
# Move all CLEAVED shards to ACTIVE state and if a shard then
# delete own shard range; these changes will be simultaneously
# reported in the next update to the root container.
own_shard_range = broker.get_own_shard_range(no_default=True)
if own_shard_range is None:
# This is more of a belts and braces, not sure we could even
# get this far with without an own_shard_range. But because
# we will be writing own_shard_range back, we need to make sure
self.logger.warning('Failed to get own_shard_range for %s',
quote(broker.path))
return False
own_shard_range.update_meta(0, 0)
if own_shard_range.state in (ShardRange.SHRINKING,
ShardRange.SHRUNK):
own_shard_range.update_state(ShardRange.SHRUNK)
modified_shard_ranges = []
else:
own_shard_range.update_state(ShardRange.SHARDED)
modified_shard_ranges = broker.get_shard_ranges(
states=ShardRange.CLEAVED)
for sr in modified_shard_ranges:
sr.update_state(ShardRange.ACTIVE)
if (not broker.is_root_container() and not
own_shard_range.deleted):
own_shard_range = own_shard_range.copy(
timestamp=Timestamp.now(), deleted=1)
modified_shard_ranges.append(own_shard_range)
broker.merge_shard_ranges(modified_shard_ranges)
if broker.set_sharded_state():
return True
else:
self.logger.warning(
'Failed to remove retiring db file for %s',
quote(broker.path))
else:
self.logger.warning(
'Repeat cleaving required for %r with context: %s',
broker.db_files[0], dict(cleaving_context))
cleaving_context.reset()
cleaving_context.store(broker)
return False
def _find_and_enable_sharding_candidates(self, broker, shard_ranges=None):
candidates = find_sharding_candidates(
broker, self.shard_container_threshold, shard_ranges)
if candidates:
self.logger.debug('Identified %s sharding candidates',
len(candidates))
broker.merge_shard_ranges(candidates)
def _find_and_enable_shrinking_candidates(self, broker):
if not broker.is_sharded():
self.logger.warning('Cannot shrink a not yet sharded container %s',
quote(broker.path))
return
compactible_sequences = find_compactible_shard_sequences(
broker, self.shrink_threshold, self.expansion_limit,
self.max_shrinking, self.max_expanding, include_shrinking=True)
self.logger.debug('Found %s compactible sequences of length(s) %s' %
(len(compactible_sequences),
[len(s) for s in compactible_sequences]))
process_compactible_shard_sequences(broker, compactible_sequences)
own_shard_range = broker.get_own_shard_range()
for sequence in compactible_sequences:
acceptor = sequence[-1]
donors = ShardRangeList(sequence[:-1])
self.logger.debug(
'shrinking %d objects from %d shard ranges into %s in %s' %
(donors.object_count, len(donors), acceptor, broker.db_file))
if acceptor.name != own_shard_range.name:
self._send_shard_ranges(
acceptor.account, acceptor.container, [acceptor])
acceptor.increment_meta(donors.object_count, donors.bytes_used)
# Now send a copy of the expanded acceptor, with an updated
# timestamp, to each donor container. This forces each donor to
# asynchronously cleave its entire contents to the acceptor and
# delete itself. The donor will pass its own deleted shard range to
# the acceptor when cleaving. Subsequent updates from the donor or
# the acceptor will then update the root to have the deleted donor
# shard range.
for donor in donors:
self._send_shard_ranges(
donor.account, donor.container, [donor, acceptor])
def _update_root_container(self, broker):
own_shard_range = broker.get_own_shard_range(no_default=True)
if not own_shard_range:
return
# do a reclaim *now* in order to get best estimate of tombstone count
# that is consistent with the current object_count
reclaimer = self._reclaim(broker)
tombstones = reclaimer.get_tombstone_count()
self.logger.debug('tombstones in %s = %d',
quote(broker.path), tombstones)
own_shard_range.update_tombstones(tombstones)
if own_shard_range.reported:
return
# persist the reported shard metadata
broker.merge_shard_ranges(own_shard_range)
# now get a consistent list of own and other shard ranges
shard_ranges = broker.get_shard_ranges(
include_own=True,
include_deleted=True)
# send everything
if self._send_shard_ranges(
broker.root_account, broker.root_container, shard_ranges,
{'Referer': quote(broker.path)}):
# on success, mark ourselves as reported so we don't keep
# hammering the root
own_shard_range.reported = True
broker.merge_shard_ranges(own_shard_range)
def _process_broker(self, broker, node, part):
broker.get_info() # make sure account/container are populated
state = broker.get_db_state()
self.logger.debug('Starting processing %s state %s',
quote(broker.path), state)
if not self._audit_container(broker):
return
# now look and deal with misplaced objects.
self._move_misplaced_objects(broker)
if broker.is_deleted():
# This container is deleted so we can skip it. We still want
# deleted containers to go via misplaced items because they may
# have new objects sitting in them that may need to move.
return
is_leader = node['index'] == 0 and self.auto_shard
if state in (UNSHARDED, COLLAPSED):
if is_leader and broker.is_root_container():
# bootstrap sharding of root container
self._find_and_enable_sharding_candidates(
broker, shard_ranges=[broker.get_own_shard_range()])
own_shard_range = broker.get_own_shard_range()
if own_shard_range.state in (ShardRange.SHARDING,
ShardRange.SHRINKING,
ShardRange.SHARDED,
ShardRange.SHRUNK):
if broker.get_shard_ranges():
# container has been given shard ranges rather than
# found them e.g. via replication or a shrink event
if broker.set_sharding_state():
state = SHARDING
elif is_leader:
if broker.set_sharding_state():
state = SHARDING
else:
self.logger.debug(
'Own shard range in state %r but no shard ranges '
'and not leader; remaining unsharded: %s',
own_shard_range.state_text, quote(broker.path))
if state == SHARDING:
if is_leader:
num_found = self._find_shard_ranges(broker)
else:
num_found = 0
# create shard containers for newly found ranges
num_created = self._create_shard_containers(broker)
if num_found or num_created:
# share updated shard range state with other nodes
self._replicate_object(part, broker.db_file, node['id'])
# always try to cleave any pending shard ranges
cleave_complete = self._cleave(broker)
if cleave_complete:
self.logger.info('Completed cleaving of %s',
quote(broker.path))
if self._complete_sharding(broker):
state = SHARDED
self._increment_stat('visited', 'completed', statsd=True)
else:
self.logger.debug('Remaining in sharding state %s',
quote(broker.path))
if state == SHARDED and broker.is_root_container():
# look for shrink stats
self._identify_shrinking_candidate(broker, node)
if is_leader:
self._find_and_enable_shrinking_candidates(broker)
self._find_and_enable_sharding_candidates(broker)
for shard_range in broker.get_shard_ranges(
states=[ShardRange.SHARDING]):
self._send_shard_ranges(
shard_range.account, shard_range.container,
[shard_range])
if not broker.is_root_container():
# Update the root container with this container's shard range
# info; do this even when sharded in case previous attempts
# failed; don't do this if there is no own shard range. When
# sharding a shard, this is when the root will see the new
# shards move to ACTIVE state and the sharded shard
# simultaneously become deleted.
self._update_root_container(broker)
self.logger.debug('Finished processing %s state %s',
quote(broker.path), broker.get_db_state())
def _one_shard_cycle(self, devices_to_shard, partitions_to_shard):
"""
The main function, everything the sharder does forks from this method.
The sharder loops through each container with sharding enabled and each
sharded container on the server, on each container it:
- audits the container
- checks and deals with misplaced items
- cleaves any shard ranges as required
- if not a root container, reports shard range stats to the root
container
"""
self.logger.info('Container sharder cycle starting, auto-sharding %s',
self.auto_shard)
if isinstance(devices_to_shard, (list, tuple)):
self.logger.info('(Override devices: %s)',
', '.join(str(d) for d in devices_to_shard))
if isinstance(partitions_to_shard, (list, tuple)):
self.logger.info('(Override partitions: %s)',
', '.join(str(p) for p in partitions_to_shard))
self._zero_stats()
self._local_device_ids = set()
dirs = []
self.ips = whataremyips(bind_ip=self.bind_ip)
for node in self.ring.devs:
device_path = self._check_node(node)
if not device_path:
continue
datadir = os.path.join(device_path, self.datadir)
if os.path.isdir(datadir):
# Populate self._local_device_ids so we can find devices for
# shard containers later
self._local_device_ids.add(node['id'])
if node['device'] not in devices_to_shard:
continue
part_filt = self._partition_dir_filter(
node['id'],
partitions_to_shard)
dirs.append((datadir, node, part_filt))
if not dirs:
self.logger.info('Found no containers directories')
for part, path, node in self.roundrobin_datadirs(dirs):
# NB: get_part_nodes always provides an 'index' key;
# this will be used in leader selection
for primary in self.ring.get_part_nodes(int(part)):
if node['id'] == primary['id']:
node = primary
break
else:
# Set index such that we'll *never* be selected as a leader
node['index'] = 'handoff'
broker = ContainerBroker(path, logger=self.logger,
timeout=self.broker_timeout)
error = None
try:
self._identify_sharding_candidate(broker, node)
if sharding_enabled(broker):
self._increment_stat('visited', 'attempted')
self._process_broker(broker, node, part)
self._increment_stat('visited', 'success', statsd=True)
else:
self._increment_stat('visited', 'skipped')
except (Exception, Timeout) as err:
self._increment_stat('visited', 'failure', statsd=True)
self.logger.exception(
'Unhandled exception while processing %s: %s', path, err)
error = err
try:
self._record_sharding_progress(broker, node, error)
except (Exception, Timeout) as error:
self.logger.exception(
'Unhandled exception while dumping progress for %s: %s',
path, error)
self._periodic_report_stats()
self._report_stats()
@contextmanager
def _set_auto_shard_from_command_line(self, **kwargs):
conf_auto_shard = self.auto_shard
auto_shard = kwargs.get('auto_shard', None)
if auto_shard is not None:
self.auto_shard = config_true_value(auto_shard)
try:
yield
finally:
self.auto_shard = conf_auto_shard
def run_forever(self, *args, **kwargs):
"""Run the container sharder until stopped."""
with self._set_auto_shard_from_command_line(**kwargs):
self.reported = time.time()
time.sleep(random() * self.interval)
while True:
begin = time.time()
try:
self._one_shard_cycle(devices_to_shard=Everything(),
partitions_to_shard=Everything())
except (Exception, Timeout):
self.logger.increment('errors')
self.logger.exception('Exception in sharder')
elapsed = time.time() - begin
self.logger.info(
'Container sharder cycle completed: %.02fs', elapsed)
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""Run the container sharder once."""
self.logger.info('Begin container sharder "once" mode')
override_options = parse_override_options(once=True, **kwargs)
devices_to_shard = override_options.devices or Everything()
partitions_to_shard = override_options.partitions or Everything()
with self._set_auto_shard_from_command_line(**kwargs):
begin = self.reported = time.time()
self._one_shard_cycle(devices_to_shard=devices_to_shard,
partitions_to_shard=partitions_to_shard)
elapsed = time.time() - begin
self.logger.info(
'Container sharder "once" mode completed: %.02fs', elapsed)
|
openstack/swift
|
swift/container/sharder.py
|
Python
|
apache-2.0
| 101,527
|
[
"VisIt"
] |
b4a2748a777acbc5c758089af1b5e9b2f04b4c93caa22ce45391e259e7ac93d7
|
"""
Caffe network visualization: draw the NetParameter protobuffer.
.. note::
This requires pydot>=1.0.2, which is not included in requirements.txt since
it requires graphviz and other prerequisites outside the scope of the
Caffe.
"""
from caffe.proto import caffe_pb2
"""
pydot is not supported under python 3 and pydot2 doesn't work properly.
pydotplus works nicely (pip install pydotplus)
"""
try:
# Try to load pydotplus
import pydotplus as pydot
except ImportError:
import pydot
# Internal layer and blob styles.
LAYER_STYLE_DEFAULT = {'shape': 'record',
'fillcolor': '#6495ED',
'style': 'filled'}
NEURON_LAYER_STYLE = {'shape': 'record',
'fillcolor': '#90EE90',
'style': 'filled'}
BLOB_STYLE = {'shape': 'octagon',
'fillcolor': '#E0E0E0',
'style': 'filled'}
def get_pooling_types_dict():
"""Get dictionary mapping pooling type number to type name
"""
desc = caffe_pb2.PoolingParameter.PoolMethod.DESCRIPTOR
d = {}
for k, v in desc.values_by_name.items():
d[v.number] = k
return d
def get_edge_label(layer):
"""Define edge label based on layer type.
"""
if layer.type == 'Data':
edge_label = 'Batch ' + str(layer.data_param.batch_size)
elif layer.type == 'Convolution' or layer.type == 'Deconvolution':
edge_label = str(layer.convolution_param.num_output)
elif layer.type == 'InnerProduct':
edge_label = str(layer.inner_product_param.num_output)
else:
edge_label = '""'
return edge_label
def get_layer_label(layer, rankdir):
"""Define node label based on layer type.
Parameters
----------
layer : ?
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
Returns
-------
string :
A label for the current layer
"""
if rankdir in ('TB', 'BT'):
# If graph orientation is vertical, horizontal space is free and
# vertical space is not; separate words with spaces
separator = ' '
else:
# If graph orientation is horizontal, vertical space is free and
# horizontal space is not; separate words with newlines
separator = '\\n'
if layer.type == 'Convolution' or layer.type == 'Deconvolution':
# Outer double quotes needed or else colon characters don't parse
# properly
node_label = '"%s%s(%s)%skernel size: %d%sstride: %d%spad: %d"' %\
(layer.name,
separator,
layer.type,
separator,
layer.convolution_param.kernel_size[0] if len(layer.convolution_param.kernel_size._values) else 1,
separator,
layer.convolution_param.stride[0] if len(layer.convolution_param.stride._values) else 1,
separator,
layer.convolution_param.pad[0] if len(layer.convolution_param.pad._values) else 0)
elif layer.type == 'Pooling':
pooling_types_dict = get_pooling_types_dict()
node_label = '"%s%s(%s %s)%skernel size: %d%sstride: %d%spad: %d"' %\
(layer.name,
separator,
pooling_types_dict[layer.pooling_param.pool],
layer.type,
separator,
layer.pooling_param.kernel_size[0] if len(layer.pooling_param.kernel_size._values) else 1,
separator,
layer.pooling_param.stride[0] if len(layer.pooling_param.stride._values) else 1,
separator,
layer.pooling_param.pad[0] if len(layer.pooling_param.pad._values) else 0)
else:
node_label = '"%s%s(%s)"' % (layer.name, separator, layer.type)
return node_label
def choose_color_by_layertype(layertype):
"""Define colors for nodes based on the layer type.
"""
color = '#6495ED' # Default
if layertype == 'Convolution' or layertype == 'Deconvolution':
color = '#FF5050'
elif layertype == 'Pooling':
color = '#FF9900'
elif layertype == 'InnerProduct':
color = '#CC33FF'
return color
def get_pydot_graph(caffe_net, rankdir, label_edges=True, phase=None):
"""Create a data structure which represents the `caffe_net`.
Parameters
----------
caffe_net : object
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
label_edges : boolean, optional
Label the edges (default is True).
phase : {caffe_pb2.Phase.TRAIN, caffe_pb2.Phase.TEST, None} optional
Include layers from this network phase. If None, include all layers.
(the default is None)
Returns
-------
pydot graph object
"""
pydot_graph = pydot.Dot(caffe_net.name if caffe_net.name else 'Net',
graph_type='digraph',
rankdir=rankdir)
pydot_nodes = {}
pydot_edges = []
for layer in caffe_net.layer:
if phase is not None:
included = False
if len(layer.include) == 0:
included = True
if len(layer.include) > 0 and len(layer.exclude) > 0:
raise ValueError('layer ' + layer.name + ' has both include '
'and exclude specified.')
for layer_phase in layer.include:
included = included or layer_phase.phase == phase
for layer_phase in layer.exclude:
included = included and not layer_phase.phase == phase
if not included:
continue
node_label = get_layer_label(layer, rankdir)
node_name = "%s_%s" % (layer.name, layer.type)
if (len(layer.bottom) == 1 and len(layer.top) == 1 and
layer.bottom[0] == layer.top[0]):
# We have an in-place neuron layer.
pydot_nodes[node_name] = pydot.Node(node_label,
**NEURON_LAYER_STYLE)
else:
layer_style = LAYER_STYLE_DEFAULT
layer_style['fillcolor'] = choose_color_by_layertype(layer.type)
pydot_nodes[node_name] = pydot.Node(node_label, **layer_style)
for bottom_blob in layer.bottom:
pydot_nodes[bottom_blob + '_blob'] = pydot.Node('%s' % bottom_blob,
**BLOB_STYLE)
edge_label = '""'
pydot_edges.append({'src': bottom_blob + '_blob',
'dst': node_name,
'label': edge_label})
for top_blob in layer.top:
pydot_nodes[top_blob + '_blob'] = pydot.Node('%s' % (top_blob))
if label_edges:
edge_label = get_edge_label(layer)
else:
edge_label = '""'
pydot_edges.append({'src': node_name,
'dst': top_blob + '_blob',
'label': edge_label})
# Now, add the nodes and edges to the graph.
for node in pydot_nodes.values():
pydot_graph.add_node(node)
for edge in pydot_edges:
pydot_graph.add_edge(
pydot.Edge(pydot_nodes[edge['src']],
pydot_nodes[edge['dst']],
label=edge['label']))
return pydot_graph
def draw_net(caffe_net, rankdir, ext='png', phase=None):
"""Draws a caffe net and returns the image string encoded using the given
extension.
Parameters
----------
caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer.
ext : string, optional
The image extension (the default is 'png').
phase : {caffe_pb2.Phase.TRAIN, caffe_pb2.Phase.TEST, None} optional
Include layers from this network phase. If None, include all layers.
(the default is None)
Returns
-------
string :
Postscript representation of the graph.
"""
return get_pydot_graph(caffe_net, rankdir, phase=phase).create(format=ext)
def draw_net_to_file(caffe_net, filename, rankdir='LR', phase=None):
"""Draws a caffe net, and saves it to file using the format given as the
file extension. Use '.raw' to output raw text that you can manually feed
to graphviz to draw graphs.
Parameters
----------
caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer.
filename : string
The path to a file where the networks visualization will be stored.
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
phase : {caffe_pb2.Phase.TRAIN, caffe_pb2.Phase.TEST, None} optional
Include layers from this network phase. If None, include all layers.
(the default is None)
"""
ext = filename[filename.rfind('.')+1:]
with open(filename, 'wb') as fid:
fid.write(draw_net(caffe_net, rankdir, ext, phase))
|
KellyChan/python-examples
|
cpp/deeplearning/caffe/python/caffe/draw.py
|
Python
|
mit
| 8,974
|
[
"NEURON"
] |
0cf6364da5e31b02383f1a92542e3ccbcbb5114024ea255a7bb25ba5055d6d14
|
# Copyright 2007 by Tiago Antao <tiagoantao@gmail.com>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import os
from Bio.PopGen import GenePop
from Bio.PopGen.GenePop import FileParser
import Bio.PopGen.FDist
# Quite a few utility functions could be done (like remove pop,
# add locus, etc...). The recommended strategy is convert back
# and forth from/to GenePop and use GenePop Utils
def convert_genepop_to_fdist(gp_rec, report_pops = None):
"""Converts a GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (either standard or big)
Returns:
FDist record.
"""
if hasattr(gp_rec, "populations"):
return _convert_genepop_to_fdist(gp_rec)
else:
return _convert_genepop_to_fdist_big(gp_rec, report_pops)
def _convert_genepop_to_fdist(gp_rec):
"""Converts a standard GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (Standard)
Returns:
FDist record.
"""
fd_rec = Bio.PopGen.FDist.Record()
fd_rec.data_org = 0
fd_rec.num_loci = len(gp_rec.loci_list)
fd_rec.num_pops = len(gp_rec.populations)
for lc_i in range(len(gp_rec.loci_list)):
alleles = []
pop_data = []
for pop_i in range(len(gp_rec.populations)):
for indiv in gp_rec.populations[pop_i]:
for al in indiv[1][lc_i]:
if al is not None and al not in alleles:
alleles.append(al)
alleles.sort() #Dominance requires this
#here we go again (necessary...)
for pop_i in range(len(gp_rec.populations)):
allele_counts = {}
for indiv in gp_rec.populations[pop_i]:
for al in indiv[1][lc_i]:
if al is not None:
count = allele_counts.get(al, 0)
allele_counts[al] = count + 1
allele_array = [] #We need the same order as in alleles
for allele in alleles:
allele_array.append(allele_counts.get(allele, 0))
pop_data.append(allele_array)
#if lc_i==3:
# print alleles, allele_counts#, pop_data
fd_rec.loci_data.append((len(alleles), pop_data))
return fd_rec
def _convert_genepop_to_fdist_big(gp_rec, report_pops = None):
"""Converts a big GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (Big)
Returns:
FDist record.
"""
fd_rec = Bio.PopGen.FDist.Record()
fd_rec.data_org = 1
fd_rec.num_loci = len(gp_rec.loci_list)
num_loci = len(gp_rec.loci_list)
loci = []
for i in range(num_loci):
loci.append(set())
pops = []
work_rec = FileParser.read(gp_rec.fname)
lParser = work_rec.get_individual()
def init_pop():
my_pop = []
for i in range(num_loci):
my_pop.append({})
return my_pop
curr_pop = init_pop()
num_pops = 1
if report_pops:
report_pops(num_pops)
while lParser:
if lParser != True:
for loci_pos in range(num_loci):
for al in lParser[1][loci_pos]:
if al is not None:
loci[loci_pos].add(al)
curr_pop[loci_pos][al]= curr_pop[loci_pos].get(al,0)+1
else:
pops.append(curr_pop)
num_pops += 1
if report_pops:
report_pops(num_pops)
curr_pop = init_pop()
lParser = work_rec.get_individual()
pops.append(curr_pop)
fd_rec.num_pops = num_pops
for loci_pos in range(num_loci):
alleles = list(loci[loci_pos])
alleles.sort()
loci_rec = [len(alleles), []]
for pop in pops:
pop_rec = []
for allele in alleles:
pop_rec.append(pop[loci_pos].get(allele, 0))
loci_rec[1].append(pop_rec)
fd_rec.loci_data.append(tuple(loci_rec))
return fd_rec
def _convert_genepop_to_fdist_big_old(gp_rec, report_loci = None):
"""Converts a big GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (Big)
Returns:
FDist record.
"""
fd_rec = Bio.PopGen.FDist.Record()
def countPops(rec):
f2 = FileParser.read(rec.fname)
popCnt = 1
while f2.skip_population():
popCnt += 1
return popCnt
fd_rec.data_org = 0
fd_rec.num_loci = len(gp_rec.loci_list)
work_rec0 = FileParser.read(gp_rec.fname)
fd_rec.num_pops = countPops(work_rec0)
num_loci = len(gp_rec.loci_list)
for lc_i in range(num_loci):
if report_loci:
report_loci(lc_i, num_loci)
work_rec = FileParser.read(gp_rec.fname)
work_rec2 = FileParser.read(gp_rec.fname)
alleles = []
pop_data = []
lParser = work_rec.get_individual()
while lParser:
if lParser != True:
for al in lParser[1][lc_i]:
if al is not None and al not in alleles:
alleles.append(al)
lParser = work_rec.get_individual()
#here we go again (necessary...)
alleles.sort()
def process_pop(pop_data, alleles, allele_counts):
allele_array = [] #We need the same order as in alleles
for allele in alleles:
allele_array.append(allele_counts.get(allele, 0))
pop_data.append(allele_array)
lParser = work_rec2.get_individual()
allele_counts = {}
for allele in alleles:
allele_counts[allele] = 0
allele_counts[None]=0
while lParser:
if lParser == True:
process_pop(pop_data, alleles, allele_counts)
allele_counts = {}
for allele in alleles:
allele_counts[allele] = 0
allele_counts[None]=0
else:
for al in lParser[1][lc_i]:
allele_counts[al] += 1
lParser = work_rec2.get_individual()
process_pop(pop_data, alleles, allele_counts)
fd_rec.loci_data.append((len(alleles), pop_data))
return fd_rec
def approximate_fst(desired_fst, simulated_fst, parameter_fst,
max_run_fst = 1, min_run_fst = 0, limit = 0.005):
"""Calculates the next Fst attempt in order to approximate a
desired Fst.
"""
if abs(simulated_fst - desired_fst) < limit:
return parameter_fst, max_run_fst, min_run_fst
if simulated_fst > desired_fst:
max_run_fst = parameter_fst
next_parameter_fst = (min_run_fst + parameter_fst)/2
else:
min_run_fst = parameter_fst
next_parameter_fst = (max_run_fst + parameter_fst)/2
return next_parameter_fst, max_run_fst, min_run_fst
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/PopGen/FDist/Utils.py
|
Python
|
gpl-2.0
| 6,997
|
[
"Biopython"
] |
3b4c98eaedd377fd710da086828ba14240006b674d3fbe23d3705e8e840c8f6e
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# stopstore - Back end to stop one or more resource store units
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Stop store unit"""
import shared.returnvalues as returnvalues
from shared.conf import get_all_store_names
from shared.findtype import is_owner
from shared.functional import validate_input_and_cert, REJECT_UNSET
from shared.handlers import correct_handler
from shared.init import initialize_main_variables
from shared.resadm import stop_resource_store
from shared.worker import Worker
def signature():
"""Signature of the main function"""
defaults = {
'unique_resource_name': REJECT_UNSET,
'store_name': [],
'all': [''],
'parallel': [''],
}
return ['text', defaults]
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id)
output_objects.append({'object_type': 'text', 'text'
: '--------- Trying to STOP store ----------'})
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
if not correct_handler('POST'):
output_objects.append(
{'object_type': 'error_text', 'text'
: 'Only accepting POST requests to prevent unintended updates'})
return (output_objects, returnvalues.CLIENT_ERROR)
unique_resource_name = accepted['unique_resource_name'][-1]
store_name_list = accepted['store_name']
all = accepted['all'][-1].lower() == 'true'
parallel = accepted['parallel'][-1].lower() == 'true'
if not is_owner(client_id, unique_resource_name,
configuration.resource_home, logger):
output_objects.append({'object_type': 'error_text', 'text'
: 'Failure: You must be an owner of '
+ unique_resource_name
+ ' to stop the store!'})
return (output_objects, returnvalues.CLIENT_ERROR)
exit_status = returnvalues.OK
if all:
store_name_list = get_all_store_names(unique_resource_name)
# take action based on supplied list of stores
if len(store_name_list) == 0:
output_objects.append({'object_type': 'text', 'text'
: "No stores specified and 'all' argument not set to true: Nothing to do!"
})
workers = []
for store_name in store_name_list:
task = Worker(target=stop_resource_store,
args=(unique_resource_name, store_name,
configuration.resource_home, logger))
workers.append((store_name, [task]))
task.start()
if not parallel:
task.join()
for (store_name, task_list) in workers:
(status, msg) = task_list[0].finish()
output_objects.append({'object_type': 'header', 'text'
: 'Stop store'})
if not status:
output_objects.append({'object_type': 'error_text', 'text'
: 'Problems stopping store: %s' % msg})
exit_status = returnvalues.SYSTEM_ERROR
else:
output_objects.append({'object_type': 'text', 'text'
: 'Stop store success: %s' % msg})
return (output_objects, exit_status)
|
heromod/migrid
|
mig/shared/functionality/stopstore.py
|
Python
|
gpl-2.0
| 4,455
|
[
"Brian"
] |
9adde452000f4c281c1c3de89f95cb1feb8bb2751bcfb77a182683f648073bce
|
'''
Created on 2013-06-05
@author: brian
'''
class Item(object):
def __init__(self):
pass
def load(self, data):
pass
def save(self):
data = {}
data['type'] = self.__class__.__name__
return data
|
Greymerk/python-rpg
|
src/items/item.py
|
Python
|
gpl-3.0
| 279
|
[
"Brian"
] |
dec5b0c13fa6974b8af286ebe8098061147260a1c612b1cbf5c3f8db24405755
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 18 17:48:12 2016
@author: antlaplante
"""
import argparse
import pandas as pd
from Bio import SeqIO
def get_aln(header):
(name, _, chrom, pos, qual) = header
return {'name' : name, \
'chrm' : chrom, \
'pos' : int(pos), \
'qual' : int(qual)}
def read_id2idx(reads_fn):
if reads_fn[-1] == 'q':
fmt = 'fastq'
else:
fmt = 'fasta'
reads_idx={}
reads_fh = open(reads_fn, "rU")
num_read=0
for record in SeqIO.parse(reads_fh, fmt):
reads_idx[record.id] = num_read
num_read += 1
reads_fh.close()
return reads_idx
def get_headers(sam_fn):
# Load headers from sam file
headers = []
fh = open(sam_fn, 'rb')
for line in fh:
header=line.split('\t')[:5]
headers.append(header)
fh.close()
# Check if eukaryotic or prokaryotic and strip headers
chr_names = []
skipline = 0
for header in headers:
if header[0] == '@SQ':
chr_names.append(header[1][3:])
skipline += 1
elif header[0][0] == '@':
skipline += 1
continue
else: break
headers = headers[skipline:]
return (headers, chr_names)
def algn_dic(headers, reads_id_dic):
algns = {}
for header in headers:
aln = get_aln(header)
read_idx = reads_id_dic[aln['name']]
if algns.has_key(read_idx):
if algns[read_idx]['qual'] > aln['qual']:
continue
algns[read_idx] = aln
return algns
def int_to_roman(input):
"""
Convert an integer to Roman numerals.
"""
if type(input) != type(1):
raise TypeError, "expected integer, got %s" % type(input)
if not 0 < input < 4000:
raise ValueError, "Argument must be between 1 and 3999"
ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I')
result = ""
for i in range(len(ints)):
count = int(input / ints[i])
result += nums[i] * count
input -= ints[i] * count
return result
# Inputs
parser = argparse.ArgumentParser()
parser.add_argument("alignments", help="path .sam file from BWA-bwasw or GraphMap")
parser.add_argument("reads", help="path to reads")
parser.add_argument('-o', '--out', default='reads_position.csv', \
help='output file with position of reads and chromosome' \
'to which they belong')
args = parser.parse_args()
(headers, chr_names) = get_headers(args.alignments)
names_chr = {name : int_to_roman(idx+1) for (idx, name) in enumerate(chr_names)}
reads_dic = read_id2idx(args.reads)
algnmts = algn_dic(headers, reads_dic)
for (idx, read_aln) in algnmts.items():
read_aln['number'] = idx
if names_chr.has_key(read_aln['chrm']):
read_aln['chr_nb'] = names_chr[read_aln['chrm']]
else:
read_aln['chr_nb'] = '*'
alns_df = pd.DataFrame(algnmts.values()).sort_values('number')
alns_df.to_csv(args.out, sep='\t')
|
antrec/spectrassembler
|
tools/extract_pos_from_sam.py
|
Python
|
mit
| 3,127
|
[
"BWA"
] |
91445ba46439f529e9fb75b460274666e39d9a96cf9da68139e22ee71b1ae010
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.