text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`db` module provides the database and schema that is the backend for the Alerts plugin.
"""
from sqlalchemy import Column, Table, types
from sqlalchemy.orm import mapper
from openlp.core.lib.db import BaseModel, init_db
class AlertItem(BaseModel):
"""
AlertItem model
"""
pass
def init_schema(url):
"""
Setup the alerts database connection and initialise the database schema
``url``
The database to setup
"""
session, metadata = init_db(url)
alerts_table = Table('alerts', metadata,
Column('id', types.Integer(), primary_key=True),
Column('text', types.UnicodeText, nullable=False))
mapper(AlertItem, alerts_table)
metadata.create_all(checkfirst=True)
return session
|
marmyshev/item_title
|
openlp/plugins/alerts/lib/db.py
|
Python
|
gpl-2.0
| 2,872
|
[
"Brian"
] |
505ad17275fe824ac8ee912f75d4d57562c65d652ee88bf97ffac80423638ce4
|
"""params_circuit.py: Summarizes parameters of neurons and the network in
dictionary. Each parameter set implements two functions. One specifying
all default parameter and one calculating the parameter that can be
derived from the default parameter. The default parameter will be used
to create a hash when saving results.
Authors: Hannah Bos, Jannis Schuecker
"""
import numpy as np
import hashlib as hl
import h5py_wrapper.wrapper as h5
def get_data_microcircuit(new_params={}):
""" Implements dictionary specifying all parameter of the microcircuit.
Keyword arguments:
new_params: dictionary, overwrites default parameter
Output:
params: dictionary with default and derived parameter
param_keys: list of default parameter
"""
params = {}
params['populations'] = ['23E', '23I', '4E', '4I',
'5E', '5I', '6E', '6I']
# number of neurons in populations
params['N'] = np.array([20683, 5834, 21915, 5479,
4850, 1065, 14395, 2948])
### Neurons
params['C'] = 250.0 # membrane capacitance in pF
params['taum'] = 10.0 # membrane time constant in ms
params['taur'] = 2.0 # refractory time in ms
params['V0'] = -65.0 # reset potential in mV
params['Vth'] = -50.0 # threshold of membrane potential in mV
### Synapses
params['tauf'] = 0.5 # fast synaptic time constant in ms
params['tau_slow'] = 100.0 # slow synaptic time constant in ms
params['de'] = 1.5 # delay of excitatory connections in ms
params['di'] = 0.75 # delay of inhibitory connections in ms
# standard deviation of delay of excitatory connections in ms
params['de_sd'] = params['de']*0.5
# standard deviation of delay of inhibitory connections in ms
params['di_sd'] = params['di']*0.5
# delay distribution, options: 'none', 'gaussian' (standard deviation
# is defined above), 'truncated gaussian' (standard deviation is
# defined above, truncation at zero)
params['delay_dist'] = 'none'
# PSC amplitude in pA
params['w'] = 87.8*0.5
### Connectivity
# indegrees
params['I'] = np.array([
[2.19986486e+03, 1.07932007e+03, 9.79241261e+02, 4.67578108e+02,
1.59240826e+02, 0, 1.09819852e+02, 0],
[2.99000583e+03, 8.60261056e+02, 7.03691807e+02, 2.89693864e+02,
3.80735859e+02, 0, 6.05863901e+01, 0],
[1.59875428e+02, 3.45225188e+01, 1.11717312e+03, 7.94596213e+02,
3.26043349e+01, 3.19552818e-01, 6.67325211e+02, 0],
[1.48097354e+03, 1.69432378e+01, 1.81302026e+03, 9.53325789e+02,
1.60313926e+01, 0, 1.60812283e+03, 0],
[2.18836598e+03, 3.74651134e+02, 1.13562969e+03, 3.13195876e+01,
4.20770722e+02, 4.96471959e+02, 2.96694639e+02, 0],
[1.16566761e+03, 1.59083568e+02, 5.70579343e+02, 1.20666667e+01,
3.00095775e+02, 4.04172770e+02, 1.24332394e+02, 0],
[3.25197985e+02, 3.86320250e+01, 4.67354637e+02, 9.17147621e+01,
2.85670372e+02, 2.11899271e+01, 5.81635915e+02, 7.52183189e+02],
[7.66905020e+02, 5.83683853e+00, 7.46380597e+01, 2.74016282e+00,
1.36240841e+02, 8.55427408e+00, 9.79791723e+02, 4.59402985e+02]])
# ratio of slow synaptic currents
params['xs'] = np.zeros((8,8))
params['xs_ext'] = np.zeros(8)
# ratio of inhibitory to excitatory weights
params['g']=4.0
### External input
params['v_ext'] = 8.0 # in Hz
# number of external neurons
params['Next'] = np.array([1600,1500,2100,1900,2000,1900,2900,2100])
### Neural response
# Transfer function is either calculated analytically ('analytical')
# or approximated by an exponential ('empirical'). In the latter case
# the time constants in response to an incoming impulse ('tau_impulse'),
# as well as the instantaneous rate jumps ('delta_f') have to be
# specified.
params['tf_mode'] = 'analytical'
if params['tf_mode'] == 'empirical':
params['tau_impulse'] = np.asarray([0.0 for i in range(8)])
params['delta_f'] = np.asarray([0.0 for i in range(8)])
# number of modes used when fast response time constants are calculated
params['num_modes'] = 1
# create list of parameter keys that are used to create hashes
param_keys = params.keys()
# Remove delay parameter from key list since they don't contribute
# when calculating the working point and they are incorporated into
# the transfer function after it has been read from file
for element in ['de', 'di', 'de_sd', 'di_sd', 'delay_dist']:
param_keys.remove(element)
# file storing results
params['datafile'] = 'results_microcircuit.h5'
# update parameter dictionary with new parameters
params.update(new_params)
# calculate all dependent parameters
params = get_dependend_params_microcircuit(params)
return params, param_keys
def get_dependend_params_microcircuit(params):
"""Returns dictionary with parameter which can be derived from the
default parameter.
"""
# weight matrix, only calculated if not already specified in params
if 'W' not in params:
W = np.ones((8,8))*params['w']
W[1:8:2] *= -params['g']
W = np.transpose(W)
# larger weight for L4E->L23E connections
W[0][2] *= 2.0
params['W'] = W
# delay matrix
D = np.ones((8,8))*params['de']
D[1:8:2] = np.ones(8)*params['di']
D = np.transpose(D)
params['Delay'] = D
# delay standard deviation matrix
D = np.ones((8,8))*params['de_sd']
D[1:8:2] = np.ones(8)*params['di_sd']
D = np.transpose(D)
params['Delay_sd'] = D
# fast and slow synaptic indegree currents
params['I_fast'] = params['I']*(1.-params['xs'])
params['I_slow'] = params['I']*params['xs']
params['Next_fast'] = params['Next']*(1.-params['xs_ext'])
params['Next_slow'] = params['Next']*params['xs_ext']
return params
def create_hashes(params, param_keys):
"""Returns hash of values of parameters listed in param_keys."""
label = ''
for key in param_keys:
value = params[key]
if isinstance(value, (np.ndarray, np.generic)):
label += value.tostring()
else:
label += str(value)
return hl.md5(label).hexdigest()
|
INM-6/neural_network_meanfield
|
params_circuit.py
|
Python
|
gpl-3.0
| 6,324
|
[
"Gaussian"
] |
8e1a23b95d1af367f687c55fa0b84a7339f3279f150034c05ec4462c41673eed
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
##############################################
# Configuration, please edit
##############################################
# Data about this site
BLOG_AUTHOR = "Your Name"
BLOG_TITLE = "Demo Site"
# This is the main URL for your site. It will be used
# in a prominent link
SITE_URL = "http://nikola.ralsina.com.ar"
# This is the URL where nikola's output will be deployed.
# If not set, defaults to SITE_URL
# BASE_URL = "http://nikola.ralsina.com.ar
BLOG_EMAIL = "joe@demo.site"
BLOG_DESCRIPTION = "This is a demo site for Nikola."
# Nikola is multilingual!
#
# Currently supported languages are:
# English -> en
# Greek -> gr
# German -> de
# French -> fr
# Polish -> pl
# Russian -> ru
# Spanish -> es
# Italian -> it
# Simplified Chinese -> zh-cn
#
# If you want to use Nikola with a non-supported language you have to provide
# a module containing the necessary translations
# (p.e. look at the modules at: ./nikola/data/themes/default/messages/fr.py).
# If a specific post is not translated to a language, then the version
# in the default language will be shown instead.
# What is the default language?
DEFAULT_LANG = "en"
# What other languages do you have?
# The format is {"translationcode" : "path/to/translation" }
# the path will be used as a prefix for the generated pages location
TRANSLATIONS = {
"en": "",
# Example for another language:
"es": "./es",
}
# Links for the sidebar / navigation bar.
# You should provide a key-value pair for each used language.
SIDEBAR_LINKS = {
DEFAULT_LANG: (
('/archive.html', 'Archives'),
('/categories/index.html', 'Tags'),
),
"es": ()
}
##############################################
# Below this point, everything is optional
##############################################
# post_pages contains (wildcard, destination, template, use_in_feed) tuples.
#
# The wildcard is used to generate a list of reSt source files
# (whatever/thing.txt).
# That fragment must have an associated metadata file (whatever/thing.meta),
# and opcionally translated files (example for spanish, with code "es"):
# whatever/thing.txt.es and whatever/thing.meta.es
#
# From those files, a set of HTML fragment files will be generated:
# cache/whatever/thing.html (and maybe cache/whatever/thing.html.es)
#
# These files are combinated with the template to produce rendered
# pages, which will be placed at
# output / TRANSLATIONS[lang] / destination / pagename.html
#
# where "pagename" is specified in the metadata file.
#
# if use_in_feed is True, then those posts will be added to the site's
# rss feeds.
#
post_pages = (
("posts/*.txt", "posts", "post.tmpl", True),
("stories/*.txt", "stories", "story.tmpl", False),
)
# One or more folders containing files to be copied as-is into the output.
# The format is a dictionary of "source" "relative destination".
# Default is:
# FILES_FOLDERS = {'files': '' }
# Which means copy 'files' into 'output'
# A mapping of languages to file-extensions that represent that language.
# Feel free to add or delete extensions to any list, but don't add any new
# compilers unless you write the interface for it yourself.
#
# 'rest' is reStructuredText
# 'markdown' is MarkDown
# 'html' assumes the file is html and just copies it
post_compilers = {
"rest": ('.txt', '.rst'),
"markdown": ('.md', '.mdown', '.markdown'),
"textile": ('.textile',),
"txt2tags": ('.t2t',),
"bbcode": ('.bb',),
"wiki": ('.wiki',),
"ipynb": ('.ipynb',),
"html": ('.html', '.htm')
}
# Create by default posts in one file format?
# Set to False for two-file posts, with separate metadata.
# ONE_FILE_POSTS = True
# Paths for different autogenerated bits. These are combined with the
# translation paths.
# Final locations are:
# output / TRANSLATION[lang] / TAG_PATH / index.html (list of tags)
# output / TRANSLATION[lang] / TAG_PATH / tag.html (list of posts for a tag)
# output / TRANSLATION[lang] / TAG_PATH / tag.xml (RSS feed for a tag)
# TAG_PATH = "categories"
# If TAG_PAGES_ARE_INDEXES is set to True, each tag's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# TAG_PAGES_ARE_INDEXES = True
# Final location is output / TRANSLATION[lang] / INDEX_PATH / index-*.html
# INDEX_PATH = ""
# Final locations for the archives are:
# output / TRANSLATION[lang] / ARCHIVE_PATH / ARCHIVE_FILENAME
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / index.html
# ARCHIVE_PATH = ""
# ARCHIVE_FILENAME = "archive.html"
# Final locations are:
# output / TRANSLATION[lang] / RSS_PATH / rss.xml
# RSS_PATH = ""
# Slug the Tag URL easier for users to type, special characters are
# often removed or replaced as well.
# SLUG_TAG_PATH = True
# A list of redirection tuples, [("foo/from.html", "/bar/to.html")].
#
# A HTML file will be created in output/foo/from.html that redirects
# to the "/bar/to.html" URL. notice that the "from" side MUST be a
# relative URL.
#
# If you don't need any of these, just set to []
# REDIRECTIONS = []
# Commands to execute to deploy. Can be anything, for example,
# you may use rsync:
# "rsync -rav output/* joe@my.site:/srv/www/site"
# And then do a backup, or ping pingomatic.
# To do manual deployment, set it to []
# DEPLOY_COMMANDS = []
# Where the output site should be located
# If you don't use an absolute path, it will be considered as relative
# to the location of conf.py
# OUTPUT_FOLDER = 'output'
# where the "cache" of partial generated content should be located
# default: 'cache'
# CACHE_FOLDER = 'cache'
# Filters to apply to the output.
# A directory where the keys are either: a file extensions, or
# a tuple of file extensions.
#
# And the value is a list of commands to be applied in order.
#
# Each command must be either:
#
# A string containing a '%s' which will
# be replaced with a filename. The command *must* produce output
# in place.
#
# Or:
#
# A python callable, which will be called with the filename as
# argument.
#
# By default, there are no filters.
# FILTERS = {
# ".jpg": ["jpegoptim --strip-all -m75 -v %s"],
# }
# Create a gzipped copy of each generated file. Cheap server-side optimization.
# GZIP_FILES = False
# File extensions that will be compressed
# GZIP_EXTENSIONS = ('.txt', '.htm', '.html', '.css', '.js', '.json')
# #############################################################################
# Image Gallery Options
# #############################################################################
# Galleries are folders in galleries/
# Final location of galleries will be output / GALLERY_PATH / gallery_name
# GALLERY_PATH = "galleries"
# THUMBNAIL_SIZE = 180
# MAX_IMAGE_SIZE = 1280
# USE_FILENAME_AS_TITLE = True
# #############################################################################
# HTML fragments and diverse things that are used by the templates
# #############################################################################
# Data about post-per-page indexes
# INDEXES_TITLE = "" # If this is empty, the default is BLOG_TITLE
# INDEXES_PAGES = "" # If this is empty, the default is 'old posts page %d' translated
# Name of the theme to use. Themes are located in themes/theme_name
# THEME = 'site'
# If you use 'site-reveal' theme you can select several subthemes
# THEME_REVEAL_CONGIF_SUBTHEME = 'sky' # You can also use: beige/serif/simple/night/default
# Again, if you use 'site-reveal' theme you can select several transitions between the slides
# THEME_REVEAL_CONGIF_TRANSITION = 'cube' # You can also use: page/concave/linear/none/default
# date format used to display post dates. (str used by datetime.datetime.strftime)
# DATE_FORMAT = '%Y-%m-%d %H:%M'
# FAVICONS contains (name, file, size) tuples.
# Used for create favicon link like this:
# <link rel="name" href="file" sizes="size"/>
# about favicons, see: http://www.netmagazine.com/features/create-perfect-favicon
# FAVICONS = {
# ("icon", "/favicon.ico", "16x16"),
# ("icon", "/icon_128x128.png", "128x128"),
# }
# Show only teasers in the index pages? Defaults to False.
# INDEX_TEASERS = False
# A HTML fragment describing the license, for the sidebar. Default is "".
# I recommend using the Creative Commons' wizard:
# http://creativecommons.org/choose/
# LICENSE = """
# <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/2.5/ar/">
# <img alt="Creative Commons License BY-NC-SA"
# style="border-width:0; margin-bottom:12px;"
# src="http://i.creativecommons.org/l/by-nc-sa/2.5/ar/88x31.png"></a>"""
# A small copyright notice for the page footer (in HTML).
# Default is ''
CONTENT_FOOTER = 'Contents © {date} <a href="mailto:{email}">{author}</a> - Powered by <a href="http://nikola.ralsina.com.ar">Nikola</a>'
CONTENT_FOOTER = CONTENT_FOOTER.format(email=BLOG_EMAIL,
author=BLOG_AUTHOR,
date=time.gmtime().tm_year)
# To enable comments via Disqus, you need to create a forum at
# http://disqus.com, and set DISQUS_FORUM to the short name you selected.
# If you want to disable comments, set it to False.
# Default is "nikolademo", used by the demo sites
# DISQUS_FORUM = "nikolademo"
# Create index.html for story folders?
# STORY_INDEX = False
# Enable comments on story pages?
# COMMENTS_IN_STORIES = False
# Enable comments on picture gallery pages?
# COMMENTS_IN_GALLERIES = False
# Do you want a add a Mathjax config file?
# MATHJAX_CONFIG = ""
# If you are using the compile-ipynb plugin, just add this one:
#MATHJAX_CONFIG = """
#<script type="text/x-mathjax-config">
#MathJax.Hub.Config({
# tex2jax: {
# inlineMath: [ ['$','$'], ["\\\(","\\\)"] ],
# displayMath: [ ['$$','$$'], ["\\\[","\\\]"] ]
# },
# displayAlign: 'left', // Change this to 'center' to center equations.
# "HTML-CSS": {
# styles: {'.MathJax_Display': {"margin": 0}}
# }
#});
#</script>
#"""
# Enable Addthis social buttons?
# Defaults to true
# ADD_THIS_BUTTONS = True
# Modify the number of Post per Index Page
# Defaults to 10
# INDEX_DISPLAY_POST_COUNT = 10
# RSS_LINK is a HTML fragment to link the RSS or Atom feeds. If set to None,
# the base.tmpl will use the feed Nikola generates. However, you may want to
# change it for a feedburner feed or something else.
# RSS_LINK = None
# Show only teasers in the RSS feed? Default to True
# RSS_TEASERS = True
# A search form to search this site, for the sidebar. You can use a google
# custom search (http://www.google.com/cse/)
# Or a duckduckgo search: https://duckduckgo.com/search_box.html
# Default is no search form.
# SEARCH_FORM = ""
#
# This search form works for any site and looks good in the "site" theme where it
# appears on the navigation bar
#SEARCH_FORM = """
#<!-- Custom search -->
#<form method="get" id="search" action="http://duckduckgo.com/"
# class="navbar-form pull-left">
#<input type="hidden" name="sites" value="%s"/>
#<input type="hidden" name="k8" value="#444444"/>
#<input type="hidden" name="k9" value="#D51920"/>
#<input type="hidden" name="kt" value="h"/>
#<input type="text" name="q" maxlength="255"
# placeholder="Search…" class="span2" style="margin-top: 4px;"/>
#<input type="submit" value="DuckDuckGo Search" style="visibility: hidden;" />
#</form>
#<!-- End of custom search -->
#""" % BLOG_URL
#
# Also, there is a local search plugin you can use.
# Use content distribution networks for jquery and twitter-bootstrap css and js
# If this is True, jquery is served from the Google CDN and twitter-bootstrap
# is served from the NetDNA CDN
# Set this to False if you want to host your site without requiring access to
# external resources.
# USE_CDN = False
# Google analytics or whatever else you use. Added to the bottom of <body>
# in the default template (base.tmpl).
# ANALYTICS = ""
# The possibility to extract metadata from the filename by using a
# regular expression.
# To make it work you need to name parts of your regular expression.
# The following names will be used to extract metadata:
# - title
# - slug
# - date
# - tags
# - link
# - description
#
# An example re is the following:
# '(?P<date>\d{4}-\d{2}-\d{2})-(?P<slug>.*)-(?P<title>.*)\.md'
# FILE_METADATA_REGEXP = None
# Nikola supports Twitter Card summaries / Open Graph.
# Twitter cards make it possible for you to attach media to Tweets
# that link to your content.
#
# IMPORTANT:
# Please note, that you need to opt-in for using Twitter Cards!
# To do this please visit https://dev.twitter.com/form/participate-twitter-cards
#
# Uncomment and modify to following lines to match your accounts.
# Specifying the id for either 'site' or 'creator' will be preferred
# over the cleartext username. Specifying an ID is not necessary.
# Displaying images is currently not supported.
# TWITTER_CARD = {
# # 'use_twitter_cards': True, # enable Twitter Cards / Open Graph
# # 'site': '@website', # twitter nick for the website
# # 'site:id': 123456, # Same as site, but the website's Twitter user ID instead.
# # 'creator': '@username', # Username for the content creator / author.
# # 'creator:id': 654321, # Same as creator, but the Twitter user's ID.
# }
# If you want to use formatted post time in W3C-DTF Format(ex. 2012-03-30T23:00:00+02:00),
# set timzone if you want a localized posted date.
#
# TIMEZONE = 'Europe/Zurich'
# If webassets is installed, bundle JS and CSS to make site loading faster
# USE_BUNDLES = True
# Plugins you don't want to use. Be careful :-)
# DISABLED_PLUGINS = ["render_galleries"]
# Put in global_context things you want available on all your templates.
# It can be anything, data, functions, modules, etc.
GLOBAL_CONTEXT = {}
|
servalproject/nikola
|
tests/data/translated_titles/conf.py
|
Python
|
mit
| 13,774
|
[
"VisIt"
] |
6cba3acc3601d00edb738c6cbc9ad2aa69ddde40050666a9a31db62ffcc36e5d
|
from django.shortcuts import render
from django.views.generic import TemplateView, View
from django.http import HttpResponse
from django.views.decorators.cache import cache_page
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from django.db.models import Count, Q, Prefetch
from common.diagrams_gpcr import DrawSnakePlot
from common.definitions import AA_PROPENSITY, HYDROPHOBICITY
from common.views import AbsTargetSelection
from common.definitions import AMINO_ACIDS, AMINO_ACID_GROUPS, STRUCTURAL_RULES
from construct.models import *
from construct.functions import *
from construct.tool import *
from protein.models import Protein, ProteinConformation, ProteinSegment
from structure.models import Structure
from mutation.models import Mutation
from residue.models import ResiduePositionSet
from interaction.models import ResidueFragmentInteraction,StructureLigandInteraction
from datetime import datetime
import time
import json
import copy
import re
from collections import OrderedDict
Alignment = getattr(__import__('common.alignment_' + settings.SITE_NAME, fromlist=['Alignment']), 'Alignment')
# Create your views here.
#@cache_page(60 * 60 * 24)
def detail(request, slug):
# get constructs
c = Construct.objects.defer('schematics','snakecache').get(name=slug)
# get residues
residues = Residue.objects.filter(protein_conformation__protein=c.protein).order_by('sequence_number').prefetch_related(
'protein_segment', 'generic_number', 'display_generic_number')
residues_lookup = {}
for r in residues:
residues_lookup[r.sequence_number] = r
schematics = c.schematic()
chunk_size = 10
context = {'c':c, 'chunk_size': chunk_size, 'annotations': json.dumps(schematics['annotations']), 'schematics': schematics, 'residues_lookup': residues_lookup}
return render(request,'construct/construct_detail.html',context)
class ConstructStatistics(TemplateView):
"""
Fetching construct data for browser
"""
template_name = "construct/statistics.html"
def get_context_data (self, **kwargs):
context = super(ConstructStatistics, self).get_context_data(**kwargs)
cache_temp = cache.get('construct_statistics')
# if cache_temp:
# for key, val in cache_temp.items():
# context[key] = val
# return context
cons = Construct.objects.all().defer('schematics','snakecache').order_by("protein__entry_name","crystal__pdb_code").prefetch_related(
"crystal","mutations__effects","purification","protein__family__parent__parent__parent", "insertions__insert_type", "modifications", "deletions", "crystallization__chemical_lists",
"protein__species","structure__pdb_code","structure__publication__web_link", "contributor",
"structure__protein_conformation__protein__parent", "structure__state")
#PREPARE DATA
proteins_ids = Construct.objects.all().values_list('protein', flat = True)
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins_ids).filter(residue__display_generic_number__label__in=['1.50x50','7.50x50','8.50x50','5.50x50','6.50x50','3.50x50','4.50x50']).values_list('protein__entry_name','residue__sequence_number','residue__display_generic_number__label')
x50s = {}
for pc in pconfs:
if pc[0] not in x50s:
x50s[pc[0]] = {}
x50s[pc[0]][pc[2].replace(".50","")] = pc[1]
# print(x50s)
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins_ids).filter(residue__protein_segment__slug__in=['TM3','TM4','TM5','TM6']).values('protein__entry_name','residue__protein_segment__slug').annotate(start=Min('residue__sequence_number'),GN=Max('residue__display_generic_number__label'),GN2=Min('residue__display_generic_number__label'),end=Max('residue__sequence_number'))
# print(pconfs)
# x50s = {}
track_anamalities = {}
for pc in pconfs:
#print(pc)
entry_name = pc['protein__entry_name']
helix = pc['residue__protein_segment__slug'][-1]
if entry_name not in track_anamalities:
track_anamalities[entry_name] = {}
if helix not in track_anamalities[entry_name]:
track_anamalities[entry_name][helix] = [0,0]
x50 = x50s[entry_name][helix+"x50"]
gn_start = int(pc['GN2'][-2:])
gn_end = int(pc['GN'][-2:])
seq_start = pc['start']
seq_end = pc['end']
seq_range_start = x50-seq_start
seq_range_end = seq_end-x50
gn_range_start = 50-gn_start
gn_range_end = gn_end-50
if seq_range_start!=gn_range_start:
# print(entry_name,"Helix",helix, "has anamolity in start",gn_range_start-seq_range_start)
track_anamalities[entry_name][helix][0] = gn_range_start-seq_range_start
if seq_range_end!=gn_range_end:
# print(entry_name,"Helix",helix, "has anamolity in end",gn_range_end-seq_range_end)
track_anamalities[entry_name][helix][1] = gn_range_end-seq_range_end
#print(pc,helix,x50,gn_start,gn_end,seq_start,seq_end,,x50-seq_start,50-gn_start,gn_end-50)
#print(x50s[entry_name])
# if pc[0] not in x50s:
# x50s[pc[0]] = {}
# x50s[pc[0]][pc[2]] = pc[1]
# print(track_anamalities)
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins_ids).prefetch_related('protein').filter(residue__protein_segment__slug='TM1').annotate(start=Min('residue__sequence_number'))
#pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).filter(residue__generic_number__label__in=['1x50']).values_list('protein__entry_name','residue__sequence_number','residue__generic_number__label')
tm1_start = {}
for pc in pconfs:
tm1_start[pc.protein.entry_name] = pc.start
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins_ids).prefetch_related('protein').filter(residue__protein_segment__slug='C-term').annotate(start=Min('residue__sequence_number'),end=Max('residue__sequence_number'))
cterm_start = {}
cterm_end = {}
for pc in pconfs:
cterm_start[pc.protein.entry_name] = pc.start
cterm_end[pc.protein.entry_name] = pc.end
#GRAB RESIDUES for mutations
mutations = []
positions = []
proteins = []
full_p_name = {}
for c in cons:
p = c.protein
entry_name = p.entry_name
full_p_name[entry_name] = p.name.replace('receptor','').replace('-adrenoceptor','')
p_class = p.family.slug.split('_')[0]
pdb = c.crystal.pdb_code
pdb = '' # do not count same mutation many times
for mutation in c.mutations.all():
skip = True
for effect in mutation.effects.all():
if effect.slug == 'thermostabilising':
skip = False
if skip:
continue
if p.entry_name not in proteins:
proteins.append(entry_name)
mutations.append((mutation,entry_name,pdb,p_class))
if mutation.sequence_number not in positions:
positions.append(mutation.sequence_number)
rs = Residue.objects.filter(protein_conformation__protein__entry_name__in=proteins, sequence_number__in=positions).prefetch_related('generic_number','protein_conformation__protein','annotations__data_type')
rs_lookup = {}
gns = []
for r in rs:
if not r.generic_number: #skip non gn
continue
entry_name = r.protein_conformation.protein.entry_name
pos = r.sequence_number
# segment = r.protein_segment.slug
if entry_name not in rs_lookup:
rs_lookup[entry_name] = {}
if pos not in rs_lookup[entry_name]:
rs_lookup[entry_name][pos] = r
rs = Residue.objects.filter(protein_conformation__protein__id__in=proteins_ids, protein_segment__slug__in=['N-term','C-term'],annotations__data_type__slug='dynamine').prefetch_related('generic_number','protein_segment','protein_conformation__protein','annotations__data_type')
rs_annotations = {}
for r in rs:
entry_name = r.protein_conformation.protein.entry_name
pos = r.sequence_number
segment = r.protein_segment.slug
if entry_name not in rs_annotations:
rs_annotations[entry_name] = {}
if segment not in rs_annotations[entry_name]:
rs_annotations[entry_name][segment] = {}
if pos not in rs_annotations[entry_name][segment]:
try:
rs_annotations[entry_name][segment][pos] = r.annotations.all()[0].value
except:
print('no dynamine for ',entry_name,pos,r.pk)
# print(rs_annotations)
truncations = {}
truncations_new = {}
truncations['nterm'] = {}
truncations['nterm_fusion'] = {}
truncations_new['nterm'] = OrderedDict()
truncations_new['cterm'] = OrderedDict()
truncations_new['nterm_fusion'] = OrderedDict()
truncations_new['icl3_fusion'] = OrderedDict()
truncations_new['icl2_fusion'] = OrderedDict()
truncations_new['icl3_start'] = OrderedDict()
truncations_new['icl3_end'] = OrderedDict()
truncations_new['icl3_fusion_start'] = OrderedDict()
truncations_new['icl3_fusion_end'] = OrderedDict()
truncations_new['icl2_fusion_start'] = OrderedDict()
truncations_new['icl2_fusion_end'] = OrderedDict()
track_fusions = OrderedDict()
track_fusions2 = OrderedDict()
track_without_fusions = OrderedDict()
truncations_new_possibilties = {}
truncations_maximums = {}
truncations_new_sum = {}
truncations['cterm'] = {}
truncations['icl3'] = {}
truncations['icl3_fusion'] = {}
truncations['icl2'] = {}
truncations['icl2_fusion'] = {}
class_names = {}
states = {}
linkers_exist_before = {}
linkers_exist_after = {}
fusion_by_pdb = {}
fusions_short = {
'Flavodoxin': 'Flav',
'T4 Lysozyme (T4L)': 'T4L',
'Rubredoxin': 'Rubr',
'PGS (Pyrococcus abyssi glycogen synthase)': 'PGS',
'BRIL (b562RIL)': 'BRIL',
'mT4L' : 'mT4L',
'OB1' : 'OB1',
'3A Arrestin': 'Arr'
}
for c in cons:
p = c.protein
entry_name = p.entry_name
pdb_code = c.crystal.pdb_code
entry_name_pdb = entry_name+ "_"+ pdb_code
state = c.structure.state.slug
entry_name_pdb_state = entry_name+ "_"+ pdb_code + "_" +state
crystal_p = c.structure.protein_conformation.protein.parent.entry_name
if entry_name!=crystal_p:
print("ERROR ERROR ERROR",pdb_code,entry_name,crystal_p)
c.protein = c.structure.protein_conformation.protein.parent
c.save()
#print(c.structure.state.slug)
p_class = p.family.slug.split('_')[0]
if p_class not in class_names:
class_names[p_class] = re.sub(r'\([^)]*\)', '', p.family.parent.parent.parent.name)
p_class_name = class_names[p_class].strip()
states[pdb_code] = state
# if state=='active':
# p_class_name += "_active"
# if state=='intermediate':
# p_class_name += "_interm"
fusion_n = False
fusion_icl3 = False
fusion_position, fusions, linkers = c.fusion()
found_nterm = False
found_cterm = False
if p_class_name not in track_fusions:
track_fusions[p_class_name] = OrderedDict()
# print(entry_name_pdb,fusions)
if fusions:
if entry_name_pdb not in track_fusions[p_class_name]:
track_fusions[p_class_name][entry_name_pdb] = {'found':[],'for_print':[], '3_4_length':[], '5_6_length':[], '3_4_deleted':[], '5_6_deleted':[]}
if fusions:
fusion_name = fusions[0][2]
if fusion_name in fusions_short:
fusion_by_pdb[pdb_code] = fusions_short[fusion_name]
else:
fusion_by_pdb[pdb_code] = fusion_name
if fusion_name not in track_fusions2:
track_fusions2[fusion_name] = {'found':[],'for_print':[]}
# if entry_name=='aa2ar_human':
# print(state,p_class_name)
for deletion in c.deletions.all():
# if entry_name=='aa2ar_human':
# print(entry_name,deletion.start,cterm_start[entry_name],c.name) # lpar1_human
if deletion.end <= x50s[entry_name]['1x50']:
found_nterm = True
bw = "1."+str(50-x50s[entry_name]['1x50']+deletion.end)
#bw = bw + " " + str(x50s[entry_name]['1x50']-deletion.end)
from_tm1 = tm1_start[entry_name] - deletion.end-1
if entry_name=='agtr1_human' and pdb_code=='4YAY':
# print(from_tm1,entry_name,c.name,fusion_position)
# This is due to odd situation with 4YAY where they deleted in the middle.
from_tm1 = 14
if pdb_code=='4ZUD':
from_tm1 = 9
position = 'nterm'
if fusion_position=='nterm' or fusion_position=='nterm_icl3':
position = 'nterm_fusion'
if from_tm1 not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['found'].append(from_tm1)
if from_tm1 not in track_fusions2[fusion_name]['found']:
track_fusions2[fusion_name]['found'].append(from_tm1)
if p_class_name not in truncations[position]:
truncations[position][p_class_name] = {}
if bw not in truncations[position][p_class_name]:
truncations[position][p_class_name][bw] = []
if entry_name_pdb not in truncations[position][p_class_name][bw]:
truncations[position][p_class_name][bw].append(entry_name_pdb)
if position not in truncations_new_possibilties:
truncations_new_possibilties[position] = []
if position not in truncations_maximums:
truncations_maximums[position] = {}
if p_class_name not in truncations_maximums[position]:
truncations_maximums[position][p_class_name] = 0
if from_tm1 not in truncations_new_possibilties[position]:
truncations_new_possibilties[position].append(from_tm1)
truncations_new_possibilties[position] = sorted(truncations_new_possibilties[position])
if tm1_start[entry_name]-1 > truncations_maximums[position][p_class_name]:
truncations_maximums[position][p_class_name] = tm1_start[entry_name]-1
if position not in truncations_new_sum:
truncations_new_sum[position] = {}
if p_class_name not in truncations_new_sum[position]:
truncations_new_sum[position][p_class_name] = {}
if p_class_name not in truncations_new[position]:
truncations_new[position][p_class_name] = {'receptors':OrderedDict(),'no_cut':[], 'possiblities':[]}
if entry_name_pdb_state not in truncations_new[position][p_class_name]['receptors']:
truncations_new[position][p_class_name]['receptors'][entry_name_pdb_state] = [[],[],[tm1_start[entry_name]-1]]
if fusion_position!='nterm' or 1==1:
if from_tm1 not in truncations_new[position][p_class_name]['receptors'][entry_name_pdb_state][0]:
truncations_new[position][p_class_name]['receptors'][entry_name_pdb_state][0].append(from_tm1)
if from_tm1 not in truncations_new_sum[position][p_class_name]:
truncations_new_sum[position][p_class_name][from_tm1] = 0
truncations_new_sum[position][p_class_name][from_tm1] += 1
# if from_tm1 not in truncations_new[position][p_class_name]['possiblities']:
# truncations_new[position][p_class_name]['possiblities'].append(from_tm1)
# truncations_new[position][p_class_name]['possiblities'] = sorted(truncations_new[position][p_class_name]['possiblities'])
# if from_tm1==0:
# print(state,entry_name,p_class_name,truncations_new[position][p_class_name]['receptors'][entry_name])
if deletion.start >= x50s[entry_name]['7x50']:
found_cterm = True
import html
# bw = x50s[entry_name]['8x50']-deletion.start
# bw = "8."+str(50-x50s[entry_name]['8x50']+deletion.start)
from_h8 = deletion.start - cterm_start[entry_name]
# print(p_class_name,':',html.unescape(p.family.name),':',entry_name,':',pdb_code,':',deletion.start-x50s[entry_name]['8x50'],':',from_h8)
if p_class_name not in truncations['cterm']:
truncations['cterm'][p_class_name] = {}
if bw not in truncations['cterm'][p_class_name]:
truncations['cterm'][p_class_name][bw] = []
if entry_name_pdb not in truncations['cterm'][p_class_name][bw]:
truncations['cterm'][p_class_name][bw].append(entry_name_pdb)
position = 'cterm'
if deletion.start>1000:
#TODO there are some wrong ones, can be seen by having >1000 positions which are fusion
continue
print(deletion.start,from_h8,cterm_start[entry_name],c.crystal.pdb_code )
if position not in truncations_new_possibilties:
truncations_new_possibilties[position] = []
if position not in truncations_maximums:
truncations_maximums[position] = {}
if p_class_name not in truncations_maximums[position]:
truncations_maximums[position][p_class_name] = 0
if from_h8 not in truncations_new_possibilties[position]:
truncations_new_possibilties[position].append(from_h8)
truncations_new_possibilties[position] = sorted(truncations_new_possibilties[position])
if from_h8 > truncations_maximums[position][p_class_name]:
truncations_maximums[position][p_class_name] = from_h8
if position not in truncations_new_sum:
truncations_new_sum[position] = {}
if p_class_name not in truncations_new_sum[position]:
truncations_new_sum[position][p_class_name] = {}
if p_class_name not in truncations_new[position]:
truncations_new[position][p_class_name] = {'receptors':OrderedDict(),'no_cut':[], 'possiblities':[]}
if entry_name_pdb not in truncations_new[position][p_class_name]['receptors']:
truncations_new[position][p_class_name]['receptors'][entry_name_pdb] = [[],[],[cterm_end[entry_name]-cterm_start[entry_name]+1]]
if from_h8 not in truncations_new[position][p_class_name]['receptors'][entry_name_pdb][0]:
truncations_new[position][p_class_name]['receptors'][entry_name_pdb][0].append(from_h8)
if from_h8 not in truncations_new_sum[position][p_class_name]:
truncations_new_sum[position][p_class_name][from_h8] = 0
truncations_new_sum[position][p_class_name][from_h8] += 1
if deletion.start > x50s[entry_name]['5x50'] and deletion.start < x50s[entry_name]['6x50']:
# if linkers['before']:
# print(entry_name,c.name,deletion.start,deletion.end,x50s[entry_name]['5x50'])
if linkers['before']:
deletion.start += len(linkers['before'])
linkers_exist_before[c.crystal.pdb_code] = len(linkers['before'])
if linkers['after']:
deletion.end -= len(linkers['after'])
linkers_exist_after[c.crystal.pdb_code] = len(linkers['after'])
# if linkers['before']:
# print(entry_name,c.name,deletion.start,deletion.end,x50s[entry_name]['5x50'])
fusion_icl3 = True
bw = x50s[entry_name]['5x50']-deletion.start-1
bw = "5x"+str(50-x50s[entry_name]['5x50']+deletion.start+track_anamalities[entry_name]['5'][1]-1)
bw_real = "5."+str(50-x50s[entry_name]['5x50']+deletion.start-1)
bw2 = "6x"+str(50-x50s[entry_name]['6x50']+deletion.end+track_anamalities[entry_name]['6'][0]+1)
bw2_real = "6."+str(50-x50s[entry_name]['6x50']+deletion.end+1)
# Make 1.50x50 number
# bw = bw_real+"x"+bw
# bw2 = bw2_real+"x"+bw2
bw_combine = bw+"-"+bw2
position = 'icl3'
del_length = 1+deletion.end-deletion.start
if bw=='5x107':
# Skip these false deletions in melga
continue
# if entry_name=='s1pr1_human':
# print("CHECK",deletion.start,deletion.end, bw,bw2)
if entry_name=='s1pr1_human' and deletion.start==250:
# Skip these false deletions in s1pr1_human (3V2W, 3V2Y)
continue
l_5_6_length = x50s[entry_name]['6x50']-x50s[entry_name]['5x50']
if fusion_position=='icl3' or fusion_position=='nterm_icl3':
position = 'icl3_fusion'
if bw not in track_fusions2[fusion_name]['found']:
track_fusions2[fusion_name]['found'].append(bw)
if bw2 not in track_fusions2[fusion_name]['found']:
track_fusions2[fusion_name]['found'].append(bw2)
# else:
# print(entry_name,c.name,fusions)
if fusion_position=='icl3' or fusion_position=='nterm_icl3':
#Track those with fusion
if bw not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['found'].append(bw)
if bw2 not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['found'].append(bw2)
if del_length not in track_fusions[p_class_name][entry_name_pdb]['5_6_deleted']:
track_fusions[p_class_name][entry_name_pdb]['5_6_deleted'].append(del_length)
if l_5_6_length not in track_fusions[p_class_name][entry_name_pdb]['5_6_length']:
track_fusions[p_class_name][entry_name_pdb]['5_6_length'].append(l_5_6_length)
if p_class_name not in truncations_new[position+'_start']:
truncations_new[position+'_start'][p_class_name] = {'receptors':OrderedDict(),'no_cut':[], 'possiblities':[]}
if entry_name_pdb not in truncations_new[position+'_start'][p_class_name]['receptors']:
truncations_new[position+'_start'][p_class_name]['receptors'][entry_name_pdb] = [[],[],[bw]]
if bw not in truncations_new[position+'_start'][p_class_name]['receptors'][entry_name_pdb][0]:
truncations_new[position+'_start'][p_class_name]['receptors'][entry_name_pdb][0].append(bw)
if p_class_name not in truncations_new[position+'_end']:
truncations_new[position+'_end'][p_class_name] = {'receptors':OrderedDict(),'no_cut':[], 'possiblities':[]}
if entry_name_pdb not in truncations_new[position+'_end'][p_class_name]['receptors']:
truncations_new[position+'_end'][p_class_name]['receptors'][entry_name_pdb] = [[],[],[bw2]]
if bw not in truncations_new[position+'_end'][p_class_name]['receptors'][entry_name_pdb][0]:
truncations_new[position+'_end'][p_class_name]['receptors'][entry_name_pdb][0].append(bw2)
else:
# print('ICL3 CUT WITHOUT FUSION',bw_combine,entry_name,c.name)
if p_class_name not in track_without_fusions:
track_without_fusions[p_class_name] = OrderedDict()
if entry_name_pdb not in track_without_fusions[p_class_name]:
track_without_fusions[p_class_name][entry_name_pdb] = {'found':[],'for_print':[], '3_4_length':[], '5_6_length':[], '3_4_deleted':[], '5_6_deleted':[]}
#Track those without fusion
if bw not in track_without_fusions[p_class_name][entry_name_pdb]['found']:
track_without_fusions[p_class_name][entry_name_pdb]['found'].append(bw)
if bw2 not in track_without_fusions[p_class_name][entry_name_pdb]['found']:
track_without_fusions[p_class_name][entry_name_pdb]['found'].append(bw2)
if del_length not in track_without_fusions[p_class_name][entry_name_pdb]['5_6_deleted']:
track_without_fusions[p_class_name][entry_name_pdb]['5_6_deleted'].append(del_length)
if l_5_6_length not in track_without_fusions[p_class_name][entry_name_pdb]['5_6_length']:
track_without_fusions[p_class_name][entry_name_pdb]['5_6_length'].append(l_5_6_length)
if p_class_name not in truncations_new[position+'_start']:
truncations_new[position+'_start'][p_class_name] = {'receptors':OrderedDict(),'no_cut':[], 'possiblities':[]}
if entry_name_pdb not in truncations_new[position+'_start'][p_class_name]['receptors']:
truncations_new[position+'_start'][p_class_name]['receptors'][entry_name_pdb] = [[],[],[bw]]
if bw not in truncations_new[position+'_start'][p_class_name]['receptors'][entry_name_pdb][0]:
truncations_new[position+'_start'][p_class_name]['receptors'][entry_name_pdb][0].append(bw)
if p_class_name not in truncations_new[position+'_end']:
truncations_new[position+'_end'][p_class_name] = {'receptors':OrderedDict(),'no_cut':[], 'possiblities':[]}
if entry_name_pdb not in truncations_new[position+'_end'][p_class_name]['receptors']:
truncations_new[position+'_end'][p_class_name]['receptors'][entry_name_pdb] = [[],[],[bw2]]
if bw not in truncations_new[position+'_end'][p_class_name]['receptors'][entry_name_pdb][0]:
truncations_new[position+'_end'][p_class_name]['receptors'][entry_name_pdb][0].append(bw2)
if p_class_name not in truncations[position]:
truncations[position][p_class_name] = {}
if bw_combine not in truncations[position][p_class_name]:
truncations[position][p_class_name][bw_combine] = []
if entry_name_pdb not in truncations[position][p_class_name][bw_combine]:
truncations[position][p_class_name][bw_combine].append(entry_name_pdb)
if position+"_start" not in truncations_new_possibilties:
truncations_new_possibilties[position+"_start"] = []
if position+"_end" not in truncations_new_possibilties:
truncations_new_possibilties[position+"_end"] = []
if bw not in truncations_new_possibilties[position+"_start"]:
truncations_new_possibilties[position+"_start"].append(bw)
truncations_new_possibilties[position+"_start"] = sorted(truncations_new_possibilties[position+"_start"])
if bw2 not in truncations_new_possibilties[position+"_end"]:
truncations_new_possibilties[position+"_end"].append(bw2)
truncations_new_possibilties[position+"_end"] = sorted(truncations_new_possibilties[position+"_end"])
if deletion.start > x50s[entry_name]['3x50'] and deletion.start < x50s[entry_name]['4x50']:
# if fusion_icl3:
# print(entry_name,c.name,deletion.start,deletion.end,x50s[entry_name]['5x50'])
fusion_icl3 = True
bw = x50s[entry_name]['5x50']-deletion.start
bw = "3x"+str(50-x50s[entry_name]['3x50']+deletion.start+track_anamalities[entry_name]['3'][1]-1)
bw_real = "3."+str(50-x50s[entry_name]['3x50']+deletion.start-1)
bw2 = "4x"+str(50-x50s[entry_name]['4x50']+deletion.end+track_anamalities[entry_name]['4'][0]+1)
bw2_real = "4."+str(50-x50s[entry_name]['4x50']+deletion.end+1)
# Make 1.50x50 number
# bw = bw_real+"x"+bw
# bw2 = bw2_real+"x"+bw2
bw_combine = bw+"-"+bw2
position = 'icl2'
del_length = 1+deletion.end-deletion.start
l_3_4_length = x50s[entry_name]['4x50']-x50s[entry_name]['3x50']
# print(fusion_position)
if fusion_position=='icl3' or fusion_position=='nterm_icl3':
position = 'icl2_fusion'
if bw not in track_fusions2[fusion_name]['found']:
track_fusions2[fusion_name]['found'].append(bw)
if bw2 not in track_fusions2[fusion_name]['found']:
track_fusions2[fusion_name]['found'].append(bw2)
if p_class_name not in truncations_new[position+'_start']:
truncations_new[position+'_start'][p_class_name] = {'receptors':OrderedDict(),'no_cut':[], 'possiblities':[]}
if entry_name_pdb not in truncations_new[position+'_start'][p_class_name]['receptors']:
truncations_new[position+'_start'][p_class_name]['receptors'][entry_name_pdb] = [[],[],[bw]]
if bw not in truncations_new[position+'_start'][p_class_name]['receptors'][entry_name_pdb][0]:
truncations_new[position+'_start'][p_class_name]['receptors'][entry_name_pdb][0].append(bw)
if p_class_name not in truncations_new[position+'_end']:
truncations_new[position+'_end'][p_class_name] = {'receptors':OrderedDict(),'no_cut':[], 'possiblities':[]}
if entry_name_pdb not in truncations_new[position+'_end'][p_class_name]['receptors']:
truncations_new[position+'_end'][p_class_name]['receptors'][entry_name_pdb] = [[],[],[bw2]]
if bw not in truncations_new[position+'_end'][p_class_name]['receptors'][entry_name_pdb][0]:
truncations_new[position+'_end'][p_class_name]['receptors'][entry_name_pdb][0].append(bw2)
if bw not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['found'].append(bw)
if bw2 not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['found'].append(bw2)
if del_length not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['3_4_deleted'].append(del_length)
if l_3_4_length not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['3_4_length'].append(l_3_4_length)
if p_class_name not in truncations[position]:
truncations[position][p_class_name] = {}
if bw_combine not in truncations[position][p_class_name]:
truncations[position][p_class_name][bw_combine] = []
if entry_name_pdb not in truncations[position][p_class_name][bw_combine]:
truncations[position][p_class_name][bw_combine].append(entry_name_pdb)
if position+"_start" not in truncations_new_possibilties:
truncations_new_possibilties[position+"_start"] = []
if position+"_end" not in truncations_new_possibilties:
truncations_new_possibilties[position+"_end"] = []
if bw not in truncations_new_possibilties[position+"_start"]:
truncations_new_possibilties[position+"_start"].append(bw)
truncations_new_possibilties[position+"_start"] = sorted(truncations_new_possibilties[position+"_start"])
if bw2 not in truncations_new_possibilties[position+"_end"]:
truncations_new_possibilties[position+"_end"].append(bw2)
truncations_new_possibilties[position+"_end"] = sorted(truncations_new_possibilties[position+"_end"])
if fusions:
if track_fusions[p_class_name][entry_name_pdb] == {'found':[],'for_print':[], '3_4_length':[], '5_6_length':[], '3_4_deleted':[], '5_6_deleted':[]}:
if fusion_position=='nterm' or fusions[0][3].startswith('N-term'):
from_tm1 = tm1_start[entry_name]-1
# print(entry_name_pdb,'Seems to be without truncated N-term, fixme',tm1_start[entry_name])
position = 'nterm_fusion'
if from_tm1 not in truncations_new_possibilties[position]:
truncations_new_possibilties[position].append(from_tm1)
truncations_new_possibilties[position] = sorted(truncations_new_possibilties[position])
if from_tm1 not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['found'].append(from_tm1)
if from_tm1 not in track_fusions2[fusion_name]['found']:
track_fusions2[fusion_name]['found'].append(from_tm1)
elif not fusions[0][3].startswith('C-term'):
# print(entry_name_pdb,'NOT FOUND CUT??',fusion_position,fusions)
deletion.start = fusions[0][4] #the next one is "cut"
deletion.end = fusions[0][4]+1 #the 'prev' is cut
if deletion.start > x50s[entry_name]['5x50'] and deletion.start < x50s[entry_name]['6x50']:
# if fusion_icl3:
# print(entry_name,c.name,deletion.start,deletion.end,x50s[entry_name]['5x50'])
fusion_icl3 = True
bw = x50s[entry_name]['5x50']-deletion.start
bw = "5x"+str(50-x50s[entry_name]['5x50']+deletion.start+track_anamalities[entry_name]['5'][1])
bw_real = "5."+str(50-x50s[entry_name]['5x50']+deletion.start)
bw2 = "6x"+str(50-x50s[entry_name]['6x50']+deletion.end+track_anamalities[entry_name]['6'][0])
bw2_real = "6."+str(50-x50s[entry_name]['6x50']+deletion.end)
# Make 1.50x50 number
# bw = bw_real+"x"+bw
# bw2 = bw2_real+"x"+bw2
bw_combine = bw+"-"+bw2
position = 'icl3'
del_length = 1+deletion.end-deletion.start
l_5_6_length = x50s[entry_name]['6x50']-x50s[entry_name]['5x50']
if fusion_position=='icl3' or fusion_position=='nterm_icl3':
position = 'icl3_fusion'
if bw not in track_fusions2[fusion_name]['found']:
track_fusions2[fusion_name]['found'].append(bw)
if bw2 not in track_fusions2[fusion_name]['found']:
track_fusions2[fusion_name]['found'].append(bw2)
if fusion_position=='icl3' or fusion_position=='nterm_icl3':
#Track those with fusion
if bw not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['found'].append(bw)
if bw2 not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['found'].append(bw2)
if del_length not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['5_6_deleted'].append(del_length)
if l_5_6_length not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['5_6_length'].append(l_5_6_length)
else:
print('ICL3 CUT WITHOUT FUSION',bw_combine,entry_name,c.name)
if p_class_name not in track_without_fusions:
track_without_fusions[p_class_name] = OrderedDict()
if entry_name_pdb not in track_without_fusions[p_class_name]:
track_without_fusions[p_class_name][entry_name_pdb] = {'found':[],'for_print':[]}
#Track those without fusion
if bw not in track_without_fusions[p_class_name][entry_name_pdb]['found']:
track_without_fusions[p_class_name][entry_name_pdb]['found'].append(bw)
if bw2 not in track_without_fusions[p_class_name][entry_name_pdb]['found']:
track_without_fusions[p_class_name][entry_name_pdb]['found'].append(bw2)
if p_class_name not in truncations[position]:
truncations[position][p_class_name] = {}
if bw_combine not in truncations[position][p_class_name]:
truncations[position][p_class_name][bw_combine] = []
if entry_name_pdb not in truncations[position][p_class_name][bw_combine]:
truncations[position][p_class_name][bw_combine].append(entry_name_pdb)
if position+"_start" not in truncations_new_possibilties:
truncations_new_possibilties[position+"_start"] = []
if position+"_end" not in truncations_new_possibilties:
truncations_new_possibilties[position+"_end"] = []
if bw not in truncations_new_possibilties[position+"_start"]:
truncations_new_possibilties[position+"_start"].append(bw)
truncations_new_possibilties[position+"_start"] = sorted(truncations_new_possibilties[position+"_start"])
if bw2 not in truncations_new_possibilties[position+"_end"]:
truncations_new_possibilties[position+"_end"].append(bw2)
truncations_new_possibilties[position+"_end"] = sorted(truncations_new_possibilties[position+"_end"])
if deletion.start > x50s[entry_name]['3x50'] and deletion.start < x50s[entry_name]['4x50']:
# if fusion_icl3:
# print(entry_name,c.name,deletion.start,deletion.end,x50s[entry_name]['5x50'])
fusion_icl3 = True
bw = x50s[entry_name]['5x50']-deletion.start
bw = "3x"+str(50-x50s[entry_name]['3x50']+deletion.start+track_anamalities[entry_name]['3'][1])
bw_real = "3."+str(50-x50s[entry_name]['3x50']+deletion.start)
bw2 = "4x"+str(50-x50s[entry_name]['4x50']+deletion.end+track_anamalities[entry_name]['4'][0])
bw2_real = "4."+str(50-x50s[entry_name]['4x50']+deletion.end)
# Make 1.50x50 number
# bw = bw_real+"x"+bw
# bw2 = bw2_real+"x"+bw2
bw_combine = bw+"-"+bw2
position = 'icl2'
del_length = deletion.end-deletion.start-1
l_3_4_length = x50s[entry_name]['4x50']-x50s[entry_name]['3x50']
if fusion_position=='icl3':
position = 'icl2_fusion'
if bw not in track_fusions2[fusion_name]['found']:
track_fusions2[fusion_name]['found'].append(bw)
if bw2 not in track_fusions2[fusion_name]['found']:
track_fusions2[fusion_name]['found'].append(bw2)
if bw not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['found'].append(bw)
if bw2 not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['found'].append(bw2)
if del_length not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['3_4_deleted'].append(del_length)
if l_3_4_length not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['3_4_length'].append(l_3_4_length)
if p_class_name not in truncations[position]:
truncations[position][p_class_name] = {}
if bw_combine not in truncations[position][p_class_name]:
truncations[position][p_class_name][bw_combine] = []
if entry_name_pdb not in truncations[position][p_class_name][bw_combine]:
truncations[position][p_class_name][bw_combine].append(entry_name_pdb)
if position+"_start" not in truncations_new_possibilties:
truncations_new_possibilties[position+"_start"] = []
if position+"_end" not in truncations_new_possibilties:
truncations_new_possibilties[position+"_end"] = []
if bw not in truncations_new_possibilties[position+"_start"]:
truncations_new_possibilties[position+"_start"].append(bw)
truncations_new_possibilties[position+"_start"] = sorted(truncations_new_possibilties[position+"_start"])
if bw2 not in truncations_new_possibilties[position+"_end"]:
truncations_new_possibilties[position+"_end"].append(bw2)
truncations_new_possibilties[position+"_end"] = sorted(truncations_new_possibilties[position+"_end"])
else:
print(entry_name_pdb," is CTERM FUSION")
position = 'nterm'
if fusion_position=='nterm' or fusion_position=='nterm_icl3':
position = 'nterm_fusion'
if p_class_name not in truncations_new[position]:
truncations_new[position][p_class_name] = {'receptors':OrderedDict(),'no_cut':[], 'possiblities':[]}
# if entry_name=='aa2ar_human':
# print(found_nterm,entry_name,position,p_class_name)
from_tm1 = tm1_start[entry_name]-1
if not found_nterm:
if position not in truncations_new_sum:
truncations_new_sum[position] = {}
if p_class_name not in truncations_new_sum[position]:
truncations_new_sum[position][p_class_name] = {}
if from_tm1 not in truncations_new_sum[position][p_class_name]:
truncations_new_sum[position][p_class_name][from_tm1] = 0
#if full receptor in xtal
if entry_name_pdb not in truncations_new[position][p_class_name]['receptors']:
truncations_new[position][p_class_name]['receptors'][entry_name_pdb] = [[],[from_tm1],[from_tm1]]
# add one for this position if it is first time receptor is mentioned
truncations_new_sum[position][p_class_name][from_tm1] += 1
else:
if from_tm1 not in truncations_new[position][p_class_name]['receptors'][entry_name_pdb][1]:
truncations_new[position][p_class_name]['receptors'][entry_name_pdb][1].append(from_tm1)
truncations_new_sum[position][p_class_name][from_tm1] += 1
# else:
# #if full was found, fill in the max
# #print(entry_name,found_nterm)
# if from_tm1 not in truncations_new[position][p_class_name]['receptors'][entry_name][1]:
# truncations_new[position][p_class_name]['receptors'][entry_name][2].append(from_tm1)
if position!='nterm_fusion' and from_tm1 not in truncations_new_possibilties[position]:
truncations_new_possibilties[position].append(from_tm1)
truncations_new_possibilties[position] = sorted(truncations_new_possibilties[position])
position = 'cterm'
if p_class_name not in truncations_new[position]:
truncations_new[position][p_class_name] = {'receptors':OrderedDict(),'no_cut':[], 'possiblities':[]}
if position not in truncations_new_possibilties:
truncations_new_possibilties[position] = []
from_h8 = cterm_end[entry_name] - cterm_start[entry_name]+1
if not found_cterm:
if position not in truncations_new_sum:
truncations_new_sum[position] = {}
if p_class_name not in truncations_new_sum[position]:
truncations_new_sum[position][p_class_name] = {}
if from_h8 not in truncations_new_sum[position][p_class_name]:
truncations_new_sum[position][p_class_name][from_h8] = 0
if entry_name_pdb not in truncations_new[position][p_class_name]['receptors']:
truncations_new[position][p_class_name]['receptors'][entry_name_pdb] = [[],[from_h8],[from_h8]]
# add one for this position if it is first time receptor is mentioned
truncations_new_sum[position][p_class_name][from_h8] += 1
else:
if from_h8 not in truncations_new[position][p_class_name]['receptors'][entry_name_pdb][1]:
truncations_new[position][p_class_name]['receptors'][entry_name_pdb][1].append(from_h8)
truncations_new_sum[position][p_class_name][from_h8] += 1
# else:
# if from_h8 not in truncations_new[position][p_class_name]['receptors'][entry_name][1]:
# truncations_new[position][p_class_name]['receptors'][entry_name][1].append(from_h8)
if from_h8 not in truncations_new_possibilties[position]:
truncations_new_possibilties[position].append(from_h8)
truncations_new_possibilties[position] = sorted(truncations_new_possibilties[position])
#print(truncations_new)
max_pos_range = {}
max_pos_range2 = {}
max_pos_range3 = {}
site_fusions = {}
for site in truncations_new:
# print(site)
max_pos_range[site] = 0
max_pos_range2[site] = [100,0,0]
max_pos_range3[site] = [0,0]
site_fusions[site] = []
for pclass, val in truncations_new[site].items():
# print(site,pclass)
unique_sites = OrderedDict()
sites = {}
distinct_fusion = {}
min_cut = 0
max_cut = 0
if site not in truncations_new_sum:
truncations_new_sum[site] = {}
if pclass not in truncations_new_sum[site]:
truncations_new_sum[site][pclass] = {}
for r,v in val['receptors'].items():
entry_name = "_".join(r.split("_")[:2])
original_entryname=entry_name
pdbcode = r.split("_")[2]
if len(v[0])>1:
print('multiple cuts?',entry_name,r,v[0])
cut = v[0][0] if v[0] else v[2][0]
if site in truncations_maximums:
if cut < min_cut:
min_cut = cut
if cut > max_cut:
max_cut = cut
# print(site,r,v,pdbcode,entry_name,cut)
entry_name += "_"+str(cut)
if entry_name not in unique_sites:
unique_sites[entry_name] = v
unique_sites[entry_name].append([]) #for pdbs
unique_sites[entry_name].append('') #for GPCR
unique_sites[entry_name].append('') #for Species
unique_sites[entry_name].append({'inactive':'','intermediate':'','active':''}) #for State
unique_sites[entry_name].append('') #for cut
unique_sites[entry_name].append(full_p_name[original_entryname])
unique_sites[entry_name].append([]) #for fusions
unique_sites[entry_name].append([]) #for linkers #10
if cut not in sites:
sites[cut] = 0
sites[cut] += 1
unique_sites[entry_name][3].append(pdbcode)
unique_sites[entry_name][4] = original_entryname.split("_")[0].upper()
unique_sites[entry_name][5] = original_entryname.split("_")[1].lower()
if unique_sites[entry_name][5]=='human':
unique_sites[entry_name][5] = ''
if unique_sites[entry_name][6][states[pdbcode]] != '':
unique_sites[entry_name][6][states[pdbcode]] += 1
else:
unique_sites[entry_name][6][states[pdbcode]] = 1
unique_sites[entry_name][7] = cut
if pdbcode in fusion_by_pdb:
if fusion_by_pdb[pdbcode] not in unique_sites[entry_name][9]:
unique_sites[entry_name][9].append(fusion_by_pdb[pdbcode])
if fusion_by_pdb[pdbcode] not in distinct_fusion:
distinct_fusion[fusion_by_pdb[pdbcode]] = 0
distinct_fusion[fusion_by_pdb[pdbcode]] += 1
if fusion_by_pdb[pdbcode] not in site_fusions[site]:
site_fusions[site].append(fusion_by_pdb[pdbcode])
if site=='icl3_fusion_start' and pdbcode in linkers_exist_before:
# print('FOUND',linkers_exist_before[pdbcode])
unique_sites[entry_name][10].append(str(linkers_exist_before[pdbcode]))
if site=='icl3_fusion_end' and pdbcode in linkers_exist_after:
# print('FOUND',linkers_exist_after[pdbcode])
unique_sites[entry_name][10].append(str(linkers_exist_after[pdbcode]))
# print(sites)
truncations_new_sum[site][pclass] = sites
if site in truncations_maximums:
unique_sites = OrderedDict(sorted(unique_sites.items(), key=lambda x: int(x[0].split("_")[-1])))
else:
unique_sites = OrderedDict(sorted(unique_sites.items(), key=lambda x: x[0].split("_")[-1]))
val['range'] = sorted(list(sites.keys()))
first_range = val['range'][0]
last_range = val['range'][-1]
prefix = val['range'][0].split('x')[0]
start = int(val['range'][0].split('x')[1])
end = int(val['range'][-1].split('x')[1])+1
max_pos_range2[site][2] = prefix
if start < max_pos_range2[site][0]:
max_pos_range2[site][0] = start
if end > max_pos_range2[site][1]:
max_pos_range2[site][1] = end
# print('\n ### doing range',site, sites,max_pos_range2[site],val['range'])
val['receptors'] = unique_sites
val['fusions'] = distinct_fusion
if site in truncations_maximums:
val['range'] = list(range(min_cut,truncations_maximums[site][pclass]+1))
if min_cut < max_pos_range3[site][0]:
max_pos_range3[site][0] = min_cut
if max_cut > max_pos_range3[site][1]:
max_pos_range3[site][1] = max_cut
if 'fusion' in site:
val['range'] = list(range(min_cut,max_cut+1))
if len(val['range'])>300:
val['range'] = val['range'][::2]
if len(val['range'])>max_pos_range[site]:
max_pos_range[site] = len(val['range'])
# Add offset to align tables
for site in truncations_new:
for pclass, val in truncations_new[site].items():
for recp, rval in val['receptors'].items():
if rval[10]:
# print(recp,rval[10])
if len(rval[10])!=len(rval[3]): #if pdbs with linker is not same as amount of linkers
rval[10].append('0')
rval[10] = ','.join(list(set(rval[10])))
else:
rval[10] = '' #no linkers
temp = {}
for fusion in site_fusions[site]:
if fusion in val['fusions']:
temp[fusion] = val['fusions'][fusion]
else:
temp[fusion] = ''
val['fusions'] = temp
if 'range' in val:
if len(val['range'])<max_pos_range[site]:
val['range'] = val['range'] + [5000] * (max_pos_range[site]-len(val['range']))
if site in truncations_maximums and 'fusion' in site:
val['range'] = list(range(max_pos_range3[site][0],max_pos_range3[site][1]+1))
if max_pos_range2[site][2] != 0:
temp = []
for x in range(max_pos_range2[site][0],max_pos_range2[site][1]):
temp.append(max_pos_range2[site][2]+"x"+str(x))
val['range'] = temp
temp = []
for x in val['range']:
if x in truncations_new_sum[site][pclass]:
temp.append(truncations_new_sum[site][pclass][x])
else:
temp.append('')
val['sum'] = temp
# print(linkers_exist_before,linkers_exist_after)
# print("NEWCHECK",truncations_new['icl3_start'])
for pos, p_vals in truncations_new_sum.items():
for pclass, c_vals in p_vals.items():
new_list = OrderedDict()
for position in truncations_new_possibilties[pos]:
if position in c_vals:
new_list[position] = c_vals[position]
else:
new_list[position] = ''
# print(pclass,c_vals,new_list)
if pos!='cterm':
truncations_new_sum[pos][pclass] = OrderedDict(reversed(list(new_list.items())))
else:
truncations_new_sum[pos][pclass] = OrderedDict(list(new_list.items()))
# print(truncations_new)
#truncations = OrderedDict(truncations)
ordered_truncations = OrderedDict()
for segment, s_vals in sorted(truncations.items()):
#print(segment)
ordered_truncations[segment] = OrderedDict()
for p_class, c_vals in sorted(s_vals.items()):
#print(p_class)
ordered_truncations[segment][p_class] = OrderedDict()
for pos, p_vals in sorted(c_vals.items(),key=lambda x: (len(x[1]),x[0]), reverse=True):
#print(pos, len(p_vals))
ordered_truncations[segment][p_class][pos] = p_vals
fusion_possibilities = truncations_new_possibilties['nterm_fusion'][::-1] + ['_'] + truncations_new_possibilties['icl2_fusion_start'] + ['3_4_length'] + ['3_4_deleted'] + truncations_new_possibilties['icl2_fusion_end'] + ['.'] + truncations_new_possibilties['icl3_fusion_start'] + ['5_6_length'] + ['5_6_deleted'] + truncations_new_possibilties['icl3_fusion_end']
# fusion_possibilities = truncations_new_possibilties['nterm_fusion'][::-1] + truncations_new_possibilties['icl3_start'] + truncations_new_possibilties['icl3_end']
# print('fusion_possibilities',fusion_possibilities)
track_fusion_sums = OrderedDict()
track_without_fusion_sums = OrderedDict()
for pclass, receptors in track_fusions.items():
track_fusion_sums[pclass] = OrderedDict()
for p in fusion_possibilities:
track_fusion_sums[pclass][p] = 0
for receptor, vals in receptors.items():
temp = []
for p in fusion_possibilities:
if p in vals['found']:
temp.append('C')
track_fusion_sums[pclass][p] += 1
elif p=='3_4_length' and vals['3_4_length']:
temp.append(vals['3_4_length'][0])
elif p=='3_4_deleted' and vals['3_4_deleted']:
temp.append(vals['3_4_deleted'][0])
elif p=='5_6_length' and vals['5_6_length']:
temp.append(vals['5_6_length'][0])
elif p=='5_6_deleted' and vals['5_6_deleted']:
temp.append(vals['5_6_deleted'][0])
else:
temp.append(0)
vals['for_print'] = temp
# print(receptor,vals)
for pclass, receptors in track_without_fusions.items():
track_without_fusion_sums[pclass] = OrderedDict()
for p in truncations_new_possibilties['icl3_start'] + ['5_6_length'] + ['5_6_deleted'] + truncations_new_possibilties['icl3_end']:
track_without_fusion_sums[pclass][p] = 0
for receptor, vals in receptors.items():
temp = []
for p in truncations_new_possibilties['icl3_start'] + ['5_6_length'] + ['5_6_deleted'] + truncations_new_possibilties['icl3_end']:
if p in vals['found']:
temp.append('C')
track_without_fusion_sums[pclass][p] += 1
elif p=='5_6_length' and vals['5_6_length']:
temp.append(vals['5_6_length'][0])
elif p=='5_6_deleted' and vals['5_6_deleted']:
temp.append(vals['5_6_deleted'][0])
else:
temp.append(0)
vals['for_print'] = temp
# print(track_fusion_sums)
for fusion, vals in track_fusions2.items():
temp = []
for p in fusion_possibilities:
# print(p)
if p in vals['found']:
temp.append(1)
else:
temp.append("")
vals['for_print'] = temp
# print(track_without_fusions)
#truncations = OrderedDict(sorted(truncations.items(), key=lambda x: x[1]['hits'],reverse=True))
#print(ordered_truncations)
# print(track_fusions2)
context['truncations'] = ordered_truncations
context['truncations_new'] = truncations_new
context['truncations_new_possibilties'] = truncations_new_possibilties
context['truncations_new_sum'] = truncations_new_sum
context['fusion_possibilities'] = fusion_possibilities
context['test'] = track_fusions
context['test2'] = track_fusions2
context['track_fusion_sums'] = track_fusion_sums
context['track_without_fusions'] = track_without_fusions
mutation_list = OrderedDict()
mutation_type = OrderedDict()
mutation_wt = OrderedDict()
mutation_mut = OrderedDict()
mutation_matrix = OrderedDict()
mutation_track = []
aa_list = list(AMINO_ACIDS.keys())[:20]
mutation_matrix_sum_mut = OrderedDict()
#print(aa_list)
for i, mut in enumerate(AMINO_ACIDS):
if i==20:
break
mutation_matrix[mut] = OrderedDict()
for aa in aa_list:
mutation_matrix[mut][aa] = [0,0]
mutation_matrix[mut][mut] = [0,'-']
mutation_matrix[mut]['sum'] = [0,0]
mutation_matrix_sum_mut[mut] = [0,0]
#print(mutation_matrix)
for mutation in mutations:
wt = mutation[0].wild_type_amino_acid
mut = mutation[0].mutated_amino_acid
entry_name = mutation[1]
pos = mutation[0].sequence_number
p_class = mutation[3]
p_class = class_names[p_class]
pdb = mutation[2]
mut_uniq = entry_name+'_'+str(pos)+'_'+wt+'_'+mut
if mut_uniq not in mutation_track:
# print(mut_uniq)
#do not count the same mutation (from different Xtals) multiple times
mutation_track.append(mut_uniq)
mutation_matrix[wt][mut][1] += 1
mutation_matrix[wt][mut][0] = min(1,round(mutation_matrix[wt][mut][1]/30,2))
mutation_matrix[wt]['sum'][1] += 1
mutation_matrix[wt]['sum'][0] = min(1,round(mutation_matrix[wt]['sum'][1]/30,2))
mutation_matrix_sum_mut[mut][1] += 1
mutation_matrix_sum_mut[mut][0] = min(1,round(mutation_matrix_sum_mut[mut][1]/30,2))
gn = ''
if entry_name in rs_lookup and pos in rs_lookup[entry_name]:
if rs_lookup[entry_name][pos].generic_number:
gn = rs_lookup[entry_name][pos].generic_number.label
# print(entry_name,"\t", pdb,"\t",gn,"\t", pos,"\t", wt,"\t", mut)
if p_class not in mutation_type:
mutation_type[p_class] = OrderedDict()
if wt+"=>"+mut not in mutation_type[p_class]:
mutation_type[p_class][wt+"=>"+mut] = {'hits':0, 'proteins':[]}
if entry_name not in mutation_type[p_class][wt+"=>"+mut]['proteins']:
mutation_type[p_class][wt+"=>"+mut]['proteins'].append(entry_name)
mutation_type[p_class][wt+"=>"+mut]['hits'] += 1
if p_class not in mutation_wt:
mutation_wt[p_class] = OrderedDict()
if wt not in mutation_wt[p_class]:
mutation_wt[p_class][wt] = {'hits':0, 'proteins':[]}
if entry_name not in mutation_wt[p_class][wt]['proteins']:
mutation_wt[p_class][wt]['proteins'].append(entry_name)
mutation_wt[p_class][wt]['hits'] += 1
if p_class not in mutation_mut:
mutation_mut[p_class] = OrderedDict()
if mut not in mutation_mut[p_class]:
mutation_mut[p_class][mut] = {'hits':0, 'proteins':[]}
if entry_name not in mutation_mut[p_class][mut]['proteins']:
mutation_mut[p_class][mut]['proteins'].append(entry_name)
mutation_mut[p_class][mut]['hits'] += 1
if entry_name not in rs_lookup:
continue
if pos not in rs_lookup[entry_name]:
continue
gn = rs_lookup[entry_name][pos].generic_number.label
if p_class not in mutation_list:
mutation_list[p_class] = OrderedDict()
if gn not in mutation_list[p_class]:
mutation_list[p_class][gn] = {'proteins':[], 'hits':0, 'mutation':[]}
if entry_name not in mutation_list[p_class][gn]['proteins']:
mutation_list[p_class][gn]['proteins'].append(entry_name)
mutation_list[p_class][gn]['hits'] += 1
mutation_list[p_class][gn]['mutation'].append((mutation[0].wild_type_amino_acid,mutation[0].mutated_amino_acid))
mutation_matrix_total_sum = sum([v[1] for k,v in mutation_matrix_sum_mut.items()])
for p_class, values in mutation_list.items():
for gn, vals in values.items():
if vals['hits']<2:
pass
#values.pop(gn, None)
mutation_list[p_class] = OrderedDict(sorted(values.items(), key=lambda x: x[1]['hits'],reverse=True))
#mutation_list = OrderedDict(sorted(mutation_list.items(), key=lambda x: x[1]['hits'],reverse=True))
for p_class, values in mutation_type.items():
mutation_type[p_class] = OrderedDict(sorted(values.items(), key=lambda x: x[1]['hits'],reverse=True))
for p_class, values in mutation_wt.items():
mutation_wt[p_class] = OrderedDict(sorted(values.items(), key=lambda x: x[1]['hits'],reverse=True))
for p_class, values in mutation_mut.items():
mutation_mut[p_class] = OrderedDict(sorted(values.items(), key=lambda x: x[1]['hits'],reverse=True))
context['mutation_list'] = mutation_list
context['mutation_type'] = mutation_type
context['mutation_wt'] = mutation_wt
context['mutation_mut'] = mutation_mut
context['mutation_matrix'] = mutation_matrix
context['mutation_matrix_sum_mut'] = mutation_matrix_sum_mut
context['mutation_matrix_total_sum'] = mutation_matrix_total_sum
context['rs_annotations'] = rs_annotations
for c in cons:
pass
cache_temp = {}
cache_temp['truncations'] = ordered_truncations
cache_temp['truncations_new'] = truncations_new
cache_temp['truncations_new_possibilties'] = truncations_new_possibilties
cache_temp['truncations_new_sum'] = truncations_new_sum
cache_temp['fusion_possibilities'] = fusion_possibilities
cache_temp['test'] = track_fusions
cache_temp['test2'] = track_fusions2
cache_temp['track_fusion_sums'] = track_fusion_sums
cache_temp['track_without_fusions'] = track_without_fusions
cache_temp['mutation_list'] = mutation_list
cache_temp['mutation_type'] = mutation_type
cache_temp['mutation_wt'] = mutation_wt
cache_temp['mutation_mut'] = mutation_mut
cache_temp['mutation_matrix'] = mutation_matrix
cache_temp['mutation_matrix_sum_mut'] = mutation_matrix_sum_mut
cache_temp['rs_annotations'] = rs_annotations
cache.set('construct_statistics', cache_temp, 60*60*24*2) #two days
return context
class ConstructTable(TemplateView):
"""
Fetching construct data for browser
"""
template_name = "construct/residuetable.html"
def get_context_data (self, **kwargs):
context = super(ConstructTable, self).get_context_data(**kwargs)
cons = Construct.objects.all().prefetch_related(
"crystal","mutations","purification","protein__family__parent__parent__parent", "insertions__insert_type", "modifications", "deletions", "crystallization__chemical_lists",
"protein__species","structure__pdb_code","structure__publication__web_link", "contributor")
#PREPARE DATA
proteins = Construct.objects.all().values_list('protein', flat = True)
#GRAB RESIDUES for mutations
mutations = []
positions = []
proteins = []
class_names = {}
classes = []
for c in cons:
p = c.protein
entry_name = p.entry_name
p_class = p.family.slug.split('_')[0]
if p_class not in classes:
classes.append(p_class)
pdb = c.crystal.pdb_code
for mutation in c.mutations.all():
if p.entry_name not in proteins:
proteins.append(entry_name)
mutations.append((mutation,entry_name,pdb,p_class,c.name))
if mutation.sequence_number not in positions:
positions.append(mutation.sequence_number)
rs = Residue.objects.filter(protein_conformation__protein__entry_name__in=proteins, sequence_number__in=positions).prefetch_related('generic_number','protein_conformation__protein','protein_segment')
#print(classes)
excluded_segment = ['C-term','N-term']
list(settings.REFERENCE_POSITIONS.keys())
align_segments = ProteinSegment.objects.all().filter(slug__in = list(settings.REFERENCE_POSITIONS.keys())).prefetch_related()
amino_acids_stats = {}
amino_acids_groups_stats = {}
accessible_in_class = {}
for c in classes:
#if c=='001':
# continue
amino_acids_stats[c] = {}
amino_acids_groups_stats[c] = {}
accessible_in_class[c] = []
if c =='001':
residue_set_name = 'Class A binding pocket'
elif c=='004':
residue_set_name = 'Class C binding pocket'
elif c=='005':
residue_set_name = 'Class F binding pocket'
else:
residue_set_name = ''
if residue_set_name:
rset = ResiduePositionSet.objects.get(name=residue_set_name)
for residue in rset.residue_position.all():
accessible_in_class[c].append(residue.label)
#print(accessible_in_class)
alignment_proteins = Protein.objects.filter(family__slug__startswith=c, species__common_name='Human', source__name='SWISSPROT')
#print(c,len(alignment_proteins))
a = Alignment()
a.load_proteins(alignment_proteins)
a.load_segments(align_segments) #get all segments to make correct diagrams
# build the alignment data matrix
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
#print(a.amino_acid_stats)
# {% for ns, segments in a.generic_numbers.items %}
# <tr>
# {% for s, num in segments.items %}
# {% for n, dn in num.items %}
# {% if 'Common G-alpha numbering scheme' in a.numbering_schemes.0 %}
# <td class="ali-td-generic-num">{{ dn|make_list|slice:'2:'|join:''}}</td>
# {% else %}
# <td class="ali-td-generic-num">{{ dn|safe }}</td>
# {% endif %}
# {% endfor %}
# <td class="ali-td"> </td>
# {% endfor %}
# </tr>
# {% endfor %}
s_id = 0
a_id = 0
for ns, segments in a.generic_numbers.items():
for s, num in segments.items():
for n, dn in num.items():
temp = []
temp2 = []
for i, aa in enumerate(AMINO_ACIDS):
temp.append(a.amino_acid_stats[i][s_id][a_id])
for i, aa in enumerate(AMINO_ACID_GROUPS):
temp2.append(a.feature_stats[i][s_id][a_id])
amino_acids_stats[c][n] = temp
amino_acids_groups_stats[c][n] = temp2
a_id += 1
s_id += 1
rs_lookup = {}
gns = []
for r in rs:
entry_name = r.protein_conformation.protein.entry_name
pos = r.sequence_number
segment = r.protein_segment.slug
if entry_name not in rs_lookup:
rs_lookup[entry_name] = {}
if pos not in rs_lookup[entry_name]:
rs_lookup[entry_name][pos] = r
count_per_gn = {}
for mutation in mutations:
entry_name = mutation[1]
pos = mutation[0].sequence_number
if rs_lookup[entry_name][pos].generic_number:
gn = rs_lookup[entry_name][pos].generic_number.label
if gn not in count_per_gn:
count_per_gn[gn] = {'hits': 0, 'proteins': []}
if entry_name not in count_per_gn[gn]['proteins']:
count_per_gn[gn]['proteins'].append(entry_name)
count_per_gn[gn]['hits'] += 1
#print(count_per_gn)
mutation_list = []
for mutation in mutations:
wt = mutation[0].wild_type_amino_acid
mut = mutation[0].mutated_amino_acid
entry_name = mutation[1]
pdb = mutation[2]
cname = mutation[4]
pos = mutation[0].sequence_number
p_class = mutation[3]
if p_class not in class_names:
class_names[p_class] = p.family.parent.parent.parent.name
p_class_name = class_names[p_class]
p_class = class_names[p_class]
if entry_name not in rs_lookup:
continue
if pos not in rs_lookup[entry_name]:
continue
segment = rs_lookup[entry_name][pos].protein_segment.slug
if rs_lookup[entry_name][pos].generic_number:
gn = rs_lookup[entry_name][pos].generic_number.label
stats = amino_acids_stats[mutation[3]][gn]
stats2 = amino_acids_groups_stats[mutation[3]][gn]
if gn in accessible_in_class[mutation[3]]:
accessible = 'yes'
else:
accessible = 'no'
count = count_per_gn[gn]['hits']
else:
gn = ''
stats = ''
stats2 = ''
accessible = 'N/A'
mutation_list.append({'entry_name':entry_name,'pdb':pdb,'cname':cname, 'segment':segment,'pos': pos, 'gn': gn, 'wt': wt, 'mut': mut,'p_class': p_class, 'amino_acids':stats, 'amino_acids_groups':stats2, 'accessible': accessible, 'count': count})
context['amino_acids'] = AMINO_ACIDS
context['amino_groups'] = AMINO_ACID_GROUPS
context['mutation_list'] = mutation_list
#print(mutation_list)
return context
class ConstructMutations(TemplateView):
"""
Fetching construct data for browser
"""
template_name = "construct/mutations.html"
def get_context_data (self, **kwargs):
context = super(ConstructMutations, self).get_context_data(**kwargs)
cons = Construct.objects.all().prefetch_related(
"crystal","mutations__effects","purification","protein__family__parent__parent__parent", "insertions__insert_type", "modifications", "deletions", "crystallization__chemical_lists",
"protein__species","structure__pdb_code","structure__publication__web_link", "contributor")
#PREPARE DATA
proteins = Construct.objects.all().values_list('protein', flat = True)
#GRAB RESIDUES for mutations
mutations = []
positions = []
proteins = []
class_names = {}
for c in cons:
# print(c)
p = c.protein
entry_name = p.entry_name
p_class = p.family.slug.split('_')[0]
pdb = c.crystal.pdb_code
for mutation in c.mutations.all():
if p.entry_name not in proteins:
proteins.append(entry_name)
mutations.append((mutation,entry_name,pdb,p_class,c.name,p))
if mutation.sequence_number not in positions:
positions.append(mutation.sequence_number)
rs = Residue.objects.filter(protein_conformation__protein__entry_name__in=proteins, sequence_number__in=positions).prefetch_related('generic_number','protein_conformation__protein','protein_segment')
rs_lookup = {}
gns = []
for r in rs:
# print("r",r)
entry_name = r.protein_conformation.protein.entry_name
pos = r.sequence_number
segment = r.protein_segment.slug
if entry_name not in rs_lookup:
rs_lookup[entry_name] = {}
if pos not in rs_lookup[entry_name]:
rs_lookup[entry_name][pos] = r
mutation_list = []
new_mutations = {}
overall_mut_types = set()
for mutation in mutations:
wt = mutation[0].wild_type_amino_acid
mut = mutation[0].mutated_amino_acid
mut_type = mutation[0].effects.all().values('slug')
mut_types = []
for eff in mut_type:
mut_types.append(eff['slug'])
overall_mut_types.add(eff['slug'])
mut_type = ",".join(mut_types)
# # for
# print(mut_type)
# mut_type = ''
entry_name = mutation[1]
pdb = mutation[2]
cname = mutation[4]
pos = mutation[0].sequence_number
p_class = mutation[3]
p = mutation[5]
if p_class not in class_names:
class_names[p_class] = p.family.parent.parent.parent.short
p_class_name = class_names[p_class]
p_class = class_names[p_class]
entry_short = p.entry_short
receptor_short = p.short
if entry_name not in rs_lookup:
continue
if pos not in rs_lookup[entry_name]:
continue
segment = rs_lookup[entry_name][pos].protein_segment.slug
if rs_lookup[entry_name][pos].generic_number:
gn = rs_lookup[entry_name][pos].generic_number.label
else:
gn = ''
key = mutation[1]+"_"+str(mutation[0].sequence_number)+"_"+mutation[0].mutated_amino_acid
if key not in new_mutations:
new_mutations[key] = {'entry_name':entry_short,'receptor_short':receptor_short,'cname':cname, 'segment':segment,'pos': pos, 'gn': gn, 'wt': wt, 'mut': mut,'p_class': p_class, 'type': set(), 'pdbs': set()}
new_mutations[key]['type'].update(mut_types)
new_mutations[key]['pdbs'].add(pdb)
mutation_list.append({'entry_name':entry_name,'pdb':pdb,'cname':cname, 'segment':segment,'pos': pos, 'gn': gn, 'wt': wt, 'mut': mut,'p_class': p_class, 'type': mut_type})
context['mutation_list'] = new_mutations
context['overall_mut_types'] = overall_mut_types
return context
def stabilisation_browser(request):
''' View to display and summarise mutation data for thermostabilising mutational constructs. '''
gpcr_class = ['001','002','003','004','005','006','007']
class_interactions_list = {}
for c in gpcr_class:
class_interactions = ResidueFragmentInteraction.objects.filter(
structure_ligand_pair__structure__protein_conformation__protein__family__slug__startswith=c, structure_ligand_pair__annotated=True).exclude(interaction_type__slug='acc').prefetch_related(
'rotamer__residue__generic_number','interaction_type',
'rotamer__residue__protein_conformation__protein__parent__family')
generic = {}
for i in class_interactions:
if i.rotamer.residue.generic_number:
gn = i.rotamer.residue.generic_number.label
else:
continue
protein = i.rotamer.residue.protein_conformation.protein.parent.family.slug
if gn not in generic.keys():
generic[gn] = set()
generic[gn].add(protein)
class_interactions_list[c]=generic
arrestin_data = {'001':
{"12.49x49": {"001_009_001_001"},
"2.37x37": {"001_009_001_001"},
"2.38x38": {"001_009_001_001"},
"2.39x39": {"001_009_001_001"},
"2.40x40": {"001_009_001_001"},
"2.43x43": {"001_009_001_001"},
"3.50x50": {"001_009_001_001"},
"3.54x54": {"001_009_001_001"},
"3.55x55": {"001_009_001_001"},
"3.56x56": {"001_009_001_001"},
"34.50x50": {"001_009_001_001"},
"34.51x51": {"001_009_001_001"},
"34.53x53": {"001_009_001_001"},
"34.54x54": {"001_009_001_001"},
"34.55x55": {"001_009_001_001"},
"34.56x56": {"001_009_001_001"},
"4.38x38": {"001_009_001_001"},
"5.61x61": {"001_009_001_001"},
"5.64x64": {"001_009_001_001"},
"5.68x68": {"001_009_001_001"},
"5.69x69": {"001_009_001_001"},
"5.71x71": {"001_009_001_001"},
"5.72x72": {"001_009_001_001"},
"6.24x24": {"001_009_001_001"},
"6.25x25": {"001_009_001_001"},
"6.26x26": {"001_009_001_001"},
"6.28x28": {"001_009_001_001"},
"6.29x29": {"001_009_001_001"},
"6.32x32": {"001_009_001_001"},
"6.33x33": {"001_009_001_001"},
"6.36x36": {"001_009_001_001"},
"6.37x37": {"001_009_001_001"},
"6.40x40": {"001_009_001_001"},
"8.47x47": {"001_009_001_001"},
"8.48x48": {"001_009_001_001"},
"8.49x49": {"001_009_001_001"},
"8.50x50": {"001_009_001_001"}}}
gprotein_data = {'001':
{"1.60x60": {"001_006_001_001", " 001_006_001_002"},
"12.48x48": {"001_001_003_008", " 001_006_001_001", " 001_006_001_002", " 001_009_001_001"},
"12.49x49": {"001_001_003_008", " 001_006_001_002"},
"12.51x51": {"001_006_001_002"},
"2.37x37": {"001_006_001_001"},
"2.39x39": {"001_002_022_003"},
"2.40x40": {"001_006_001_001"},
"3.49x49": {"001_001_003_008", " 001_002_022_003"},
"3.50x50": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002", " 001_009_001_001"},
"3.53x53": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002"},
"3.54x54": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002", " 001_009_001_001"},
"3.55x55": {"001_001_003_008", " 001_006_001_002"},
"3.56x56": {"001_006_001_002", " 001_009_001_001"},
"34.50x50": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002"},
"34.51x51": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002"},
"34.52x52": {"001_001_003_008", " 001_002_022_003", " 001_006_001_002"},
"34.53x53": {"001_001_003_008", " 001_006_001_002"},
"34.54x54": {"001_001_003_008", " 001_002_022_003", " 001_006_001_002"},
"34.55x55": {"001_001_003_008", " 001_002_022_003", " 001_006_001_002", " 001_009_001_001"},
"34.57x57": {"001_001_001_002", " 001_002_022_003"},
"4.40x40": {"001_002_022_003"},
"5.61x61": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002"},
"5.64x64": {"001_001_003_008", " 001_002_022_003", " 001_006_001_002"},
"5.65x65": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002"},
"5.67x67": {"001_001_003_008"},
"5.68x68": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002"},
"5.69x69": {"001_001_001_002", " 001_001_003_008", " 001_006_001_001", " 001_006_001_002"},
"5.71x71": {"001_001_003_008", " 001_006_001_001", " 001_006_001_002"},
"5.72x72": {"001_001_003_008", " 001_006_001_002", " 001_009_001_001"},
"5.74x74": {"001_001_003_008"},
"6.23x23": {"001_002_022_003"},
"6.24x24": {"001_009_001_001"},
"6.25x25": {"001_002_022_003", " 001_006_001_001", " 001_009_001_001"},
"6.26x26": {"001_002_022_003", " 001_009_001_001"},
"6.28x28": {"001_009_001_001"},
"6.29x29": {"001_001_001_002", " 001_006_001_001", " 001_006_001_002", " 001_009_001_001"},
"6.32x32": {"001_001_001_002", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002", " 001_009_001_001"},
"6.33x33": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002", " 001_009_001_001"},
"6.36x36": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_002", " 001_009_001_001"},
"6.37x37": {"001_001_001_002", " 001_001_003_008", " 001_006_001_001", " 001_006_001_002"},
"7.56x56": {"001_001_001_002", " 001_006_001_001", " 001_006_001_002", " 001_009_001_001"},
"8.47x47": {"001_001_001_002", " 001_002_022_003", " 001_006_001_001", " 001_009_001_001"},
"8.48x48": {"001_002_022_003", " 001_006_001_002", " 001_009_001_001"},
"8.49x49": {"001_006_001_001", " 001_006_001_002"},
"8.51x51": {"001_006_001_002"},
"8.56x56": {"001_006_001_001"}},
'002':
{"12.48x48": {"002_001_003_003"},
"12.49x49": {"002_001_003_003"},
"2.46x46": {"002_001_001_001", " 002_001_003_003"},
"2.50x50": {"002_001_001_001", " 002_001_003_003"},
"3.53x53": {"002_001_001_001", " 002_001_003_003"},
"3.54x54": {"002_001_001_001", " 002_001_003_003"},
"3.57x57": {"002_001_001_001", " 002_001_003_003"},
"3.58x58": {"002_001_001_001", " 002_001_003_003"},
"3.59x59": {"002_001_001_001", " 002_001_003_003"},
"4.37x38": {"002_001_003_003"},
"4.38x39": {"002_001_003_003"},
"4.39x40": {"002_001_003_003"},
"4.40x41": {"002_001_003_003"},
"5.57x57": {"002_001_003_003"},
"5.61x61": {"002_001_001_001", " 002_001_003_003"},
"5.64x64": {"002_001_001_001", " 002_001_003_003"},
"5.65x65": {"002_001_001_001"},
"6.37x37": {"002_001_003_003"},
"6.41x41": {"002_001_003_003"},
"6.42x42": {"002_001_001_001", " 002_001_003_003"},
"6.45x45": {"002_001_001_001", " 002_001_003_003"},
"6.48x48": {"002_001_003_003"},
"7.56x56": {"002_001_003_003"},
"7.57x57": {"002_001_003_003"},
"7.60x60": {"002_001_001_001", " 002_001_003_003"},
"8.47x47": {"002_001_003_003"},
"8.48x48": {"002_001_001_001", " 002_001_003_003"},
"8.49x49": {"002_001_003_003"},
"8.53x53": {"002_001_003_003"},
"8.56x56": {"002_001_001_001", " 002_001_003_003"},
"8.60x60": {"002_001_001_001", " 002_001_003_003"},
"8.63x63": {"002_001_001_001"},
"8.64x64": {"002_001_003_003"},
"8.67x67": {"002_001_001_001"}}}
# Set up: Restructure the STRUCTURAL_RULES for the constructs into a crude-tree like structure to enable
# quick and concise searching within the for loops below.
structural_rule_tree = create_structural_rule_trees(STRUCTURAL_RULES)
# Get a list of all constructs.
constructs = Construct.objects.all()\
.order_by().only(
"protein__entry_name",
# "mutations__sequence_number",
# "mutations__residue__generic_number",
# "mutations__residue__protein_segment__slug",
# "mutations__mutated_amino_acid",
# "mutations__wild_type_amino_acid",
"protein__family__slug",
"protein__family__parent__parent__parent__name",
"structure__state__name",
"crystal__pdb_code")\
.prefetch_related(
"structure__state",
"mutations__residue__generic_number",
"mutations__residue__protein_segment",
"mutations__effects__bar",
"protein__family__parent__parent__parent",
"crystal")
# Get a list of all relevant proteins and generic numbers
conservation_proteins = constructs.values_list('protein__family__parent__parent__parent__name',
flat=True)\
.distinct()
conservation_gen_nums = constructs.values_list('mutations__residue__generic_number__label', flat=True).distinct()
# Calculate the conservation values for the mutations across their receptor families and protein classes.
# Alignment performed using generic numbers.
conservation = conservation_table(conservation_proteins, conservation_gen_nums)
# For each analysis mode, define the information that is to be used as a unique identifier for grouping data.
# I.e. for position_only, grouping is performed by class an position. Hence each row will have a unique class &
# position. This is used as the unique identifier, or ID. -- recorded in 'include_in_id'
# Each row has some calculated or 'unique' values, as well as the id. This is found below. However, for example,
# the wild type AA is not unique accross the pos_and_mut group, as so this must be removed from the row-info.
# This is recorded in 'exclude_from_info'
groupings = {
"all":{"include_in_id":['class', 'gen_num', 'wild_type', 'mutant'], "exclude_from_info":['']},
"pos_and_wt":{"include_in_id":['class', 'gen_num', 'wild_type'],
"exclude_from_info":['ala_leu_subset', 'ala_subset', 'mutant']},
"pos_and_mut":{"include_in_id":['class', 'gen_num', 'mutant'],
"exclude_from_info":['ala_leu_subset', 'wild_type']},
"position_only":{"include_in_id":['class', 'gen_num'],
"exclude_from_info":['ala_leu_subset', 'ala_subset', 'wild_type', 'mutant']}
}
# Set up dictionaries to record information.
mutation_groups = {"position_only":{}, "all":{}, "pos_and_wt":{}, "pos_and_mut":{}}
# Grab thermostabilising mutations
mutations_thermo = ConstructMutation.objects.filter(effects__slug='thermostabilising').all()\
.prefetch_related(
"construct__structure__state",
"residue__generic_number",
"residue__protein_segment",
"construct__protein__family__parent__parent__parent",
"construct__crystal")
# For each construct, get the needed information, and add to the context dictionary called mutation_list.
for mutant in mutations_thermo:
# Get info for the construct
struct_id = mutant.construct.structure_id
state = mutant.construct.structure.state.name
prot = mutant.construct.protein
p_class = prot.family.parent.parent.parent.short()
p_class_name = prot.family.parent.parent.parent.name
p_class_slug = prot.family.parent.parent.parent.slug
p_ligand = prot.family.parent.parent.short()
p_receptor = prot.family.parent.short()
# print(p_receptor,'p_receptor')
real_receptor = prot.entry_short
real_receptor_iuphar = prot.short()
pdb = mutant.construct.crystal.pdb_code
# Get the generic number and segment, if known.
generic_number_display = None
try:
if mutant.residue.generic_number is None:
generic_number = u'\u2014'
else:
generic_number = mutant.residue.generic_number.label
generic_number_display = mutant.residue.display_generic_number.label
segment = mutant.residue.protein_segment.slug
except AttributeError:
generic_number = u'\u2014'
segment = u'\u2014'
# Collect the mutation info needed to create a unique group id, and the info relevant to the full row.
mutant_id = {'gen_num':generic_number, 'wild_type':mutant.wild_type_amino_acid,
'mutant':mutant.mutated_amino_acid, 'GPCR_count':0, 'segment':segment, 'class': p_class}
mutant_info = {'pdb':pdb,
'ligand': p_ligand,
'receptor': p_receptor,
'real_receptor': real_receptor,
'real_receptor_iuphar': real_receptor_iuphar,
'wild_type':mutant_id["wild_type"],
'mutant':mutant_id['mutant'],
'state':state,
'struct_id':struct_id}
# Check if the calculated columns have been calculated for the pos, wt & mut grouping.
# If so, all groups already have the column calculations needed added.
# If not, all other grouping info must be calculated anyway to retrieve the site info for the wt & mut
# grouping.
wt_mut_group_id = ",".join([str(val) for key, val in mutant_id.items()
if key in groupings['all']['include_in_id']])
if wt_mut_group_id not in mutation_groups['all']:
# In here: insert the code to find the site info
calced_cols = get_calculated_columns(structural_rule_tree,
mutant_id['mutant'],
mutant_id['wild_type'],
generic_number,
p_class_name,
p_receptor,
conservation)
# For each group, add the required info.
for group_name, attr in groupings.items():
# Create a dictionary of information pertaining to the whole group to which the mutant belongs
#
group_info = {key:item for key, item in mutant_id.items() if key not in attr['exclude_from_info']}
# Create a group ID (which will be unique for each grouping)
group_id = ",".join([str(val) for key, val in mutant_id.items()
if key in attr['include_in_id']])
# Get the context dict entry for which the mutant should be added.
# If none, create one with the group_info
group = mutation_groups[group_name].setdefault(group_id,
[group_info, {}]
)
# If the group is newly created, calculate the values for the Frequency and Conservation Cols
if group[1] == {}:
# Get propensity and hydrophobicity values.
group[0]['propensity'],\
group[0]['hydro'],\
group[0]["class_cons"],\
group[0]["receptor_fam_cons"],\
group[0]["ionic_lock"],\
group[0]["sodium_ion"],\
group[0]["res_switch"]\
= calced_cols[group_name]
# Add further information to group_info allow for fast mutation subset filtering.
if group_name == "all":
if mutant_id['mutant'] == 'A':
in_ala_subset = 'ala_subset'
elif mutant_id['wild_type'] == 'A' and mutant_id['mutant'] == 'L':
in_ala_subset = 'ala_subset'
else:
in_ala_subset = 'no_subset'
group[0]['ala_subset'] = in_ala_subset
if generic_number in class_interactions_list[p_class_slug]:
group[0]['ligand_binding'] = len(class_interactions_list[p_class_slug][generic_number])
else:
group[0]['ligand_binding'] = u'\u2014'
group[0]['arrestin_binding'] = u'\u2014'
if p_class_slug in arrestin_data:
if generic_number_display in arrestin_data[p_class_slug]:
group[0]['arrestin_binding'] = len(arrestin_data[p_class_slug][generic_number_display])
group[0]['gprotein_binding'] = u'\u2014'
if p_class_slug in gprotein_data:
if generic_number_display in gprotein_data[p_class_slug]:
group[0]['gprotein_binding'] = len(gprotein_data[p_class_slug][generic_number_display])
# Count the number of construct mutations recorded in the row.
group[0]['GPCR_count'] += 1
# Remove unnecessary items from the mutant info
info = {key:set((item,)) for key, item in mutant_info.items() if key not in attr['include_in_id']}
if group[1] == {}:
# Initialise the dict with the first mutant.
group[1].update(info)
else:
# Add the specific mutant info.
for key, item in info.items():
group[1][key].update(item)
# Remove receptor family conservation info if row refers to >1 receptor family
if len(group[1]['receptor']) != 1:
group[0]["receptor_fam_cons"] = u'\u2014'
# Send the context dictionary to the template to be rendered
return render(request, "construct/stabilisation_browser.html",
{'pos_and_mut': mutation_groups['pos_and_mut'],
'pos_and_wt': mutation_groups['pos_and_wt'],
'all': mutation_groups['all'],
'position_only': mutation_groups["position_only"]})
def conservation_table(prot_classes, gen_nums):
'''Calculate the conservation values needed for the thermostabilisation view'''
table = {}
# Collect residue counts for all residues in the protein classes and at the generic number positions within the
# prot_classes and gen_nums set, grouped by amino acid, generic number, protein receptor family, and protein class.
residues = Residue.objects.order_by()\
.only(
"amino_acid",
"generic_number__label",
"protein_conformation__protein__species_id",
"protein_conformation__protein__source_id",
"protein_conformation__protein__family__parent__parent__parent__name")\
.prefetch_related(
"protein_conformation__protein__family__parent__parent__parent",
"protein_conformation__protein__species",
"protein_conformation__protein__source",
"generic_number")\
.filter(
protein_conformation__protein__family__parent__parent__parent__name__in=list(prot_classes),
protein_conformation__protein__species_id="1", protein_conformation__protein__source_id="1",
generic_number__label__in=list(gen_nums))\
.values(
'amino_acid',
'protein_conformation__protein__family__parent__parent__parent__name',
"protein_conformation__protein__family__parent__name",
"generic_number__label")\
.annotate(Count('amino_acid'))
# Restructure the data into table format, where each row contains the count for an amino acid at generic number
# position, for either a given protein class or receptor family.
for dic in residues:
prot_row = table.setdefault(
(dic['protein_conformation__protein__family__parent__parent__parent__name'], dic['generic_number__label']),
{'total':0})
prot_row['total'] += dic['amino_acid__count']
prot_row.setdefault(dic['amino_acid'], 0)
prot_row[dic['amino_acid']] += dic['amino_acid__count']
rec_row = table.setdefault(
(dic['protein_conformation__protein__family__parent__name'], dic['generic_number__label']), {'total':0})
rec_row['total'] += dic['amino_acid__count']
rec_row.setdefault(dic['amino_acid'], 0)
rec_row[dic['amino_acid']] += dic['amino_acid__count']
# Divide each row by it's total to get the frequency of each amino acid across the row (rather than it's count).
for _, row in table.items():
for amino_acid, count in row.items():
if amino_acid != 'total':
row[amino_acid] = round(count/row['total'], 2)
return table
def get_calculated_columns(rule_tree, mutant, wild_type, g_n, prot_class, rec_fam, conservation): # pylint: disable=too-many-arguments
''' Calculate the propensity, hydrophobicity and site info for the given mut & wt for each grouping'''
# Get the conservation values for the protein class and receptor family
class_cons = conservation.get((prot_class, g_n), {})
fam_cons = conservation.get((rec_fam, g_n), {})
# Get the part of the structural_rule_tree relevant to the position and generic number (& hence to all groupings).
related_rules = {
'ionic_lock_tree':rule_tree["ionic_lock_tree"].get(prot_class[6], {}).get(g_n, {}),
'sodium_ion_tree':rule_tree["sodium_ion_tree"].get(prot_class[6], {}).get(g_n, {}),
'residue_switch_tree':rule_tree["residue_switch_tree"].get(prot_class[6], {}).get(g_n, {})
}
# print(related_rules,rule_tree["ionic_lock_tree"])
# Return a dictionary consisting of the data and site column entries for each grouping / data analysis mode.
return {
'position_only': get_data_pos_grouping(related_rules),
'pos_and_mut':get_data_mut_grouping(related_rules, mutant, class_cons, fam_cons),
'pos_and_wt':get_data_wt_grouping(related_rules, wild_type, class_cons, fam_cons),
'all': get_data_all_grouping(related_rules, mutant, wild_type, class_cons, fam_cons)
}
def get_data_pos_grouping(rules):
'''
Calculate the Data and Site columns in the browser view for the position only analysis mode
'''
# Note: an empty dictionary evaluates to False in an if statement,
ionic_lock = 'Pos Match' if rules['ionic_lock_tree'] else u'\u2014'
sodium_ion = 'Pos Match' if rules['sodium_ion_tree'] else u'\u2014'
residue_switch = 'Pos Match' if rules['residue_switch_tree'] else u'\u2014'
# There is no mutant or wild type info, so all data cols are returned as u'\u2014'
return (u'\u2014', u'\u2014', u'\u2014', u'\u2014', ionic_lock, sodium_ion, residue_switch)
def get_data_mut_grouping(rules, mutant, class_cons, fam_cons):
'''
Calculate the Data and Site columns in the browser view for the pos & mut analysis mode
'''
# Note: an empty dictionary evaluates to False in an if statement,
# Check that rules exist that apply to the class, position and gn.
if rules['ionic_lock_tree']:
# If so, check if there is a rule relevant to the mutant
if rules['ionic_lock_tree'].get(mutant, {}):
ionic_lock = 'Pos & Mutant AA Match'
else:
ionic_lock = 'Pos Match (But Not Mutant AA)'
else:
ionic_lock = u'\u2014'
if rules['sodium_ion_tree']:
if rules['sodium_ion_tree'].get(mutant, {}):
sodium_ion = 'Pos & AA Mutant Match'
else:
sodium_ion = 'Pos Match (But Not Mutant AA)'
else:
sodium_ion = u'\u2014'
if rules['residue_switch_tree']:
if rules['residue_switch_tree'].get(mutant, {}):
residue_switch = 'Pos & Mutant AA Match'
else:
residue_switch = 'Pos Match (But Not Mutant AA)'
else:
residue_switch = u'\u2014'
return (AA_PROPENSITY.get(mutant, u'\u2014'),
HYDROPHOBICITY.get(mutant, u'\u2014'),
class_cons.get(mutant, u'\u2014'),
fam_cons.get(mutant, u'\u2014'),
ionic_lock, sodium_ion, residue_switch)
def get_data_wt_grouping(rules, wild_type, class_cons, fam_cons):
'''
Calculate the Data and Site columns in the browser view for the pos & wt analysis mode
'''
# # Note: an empty dictionary evaluates to False in an if statement,
if rules['ionic_lock_tree']:
# Note: This is the simpliest, but not the most concise code.
# However, okay as code is VERY rarely used.
ionic_lock_set = set()
for _, wt_rule_dict in rules['ionic_lock_tree'].items():
for key in wt_rule_dict:
ionic_lock_set.add(key)
if wild_type in ionic_lock_set:
ionic_lock = 'Pos & Wild Type AA Match'
else:
ionic_lock = 'Pos Match (But Not Wild Type AA)'
else:
ionic_lock = u'\u2014'
# Check that rules exist that apply to the class, position and gn.
if rules['sodium_ion_tree']:
sodium_ion_set = set()
# If so, check if there is a rule relevant to the wild type. As the dictionary tree is constructed so that
# the mutant is in the 3rd level, and the wold type in the 4th. Hence each mutant branch must be checked
# for the wild type.
for _, wt_rule_dict in rules['sodium_ion_tree'].items():
for key in wt_rule_dict:
sodium_ion_set.add(key)
if wild_type in sodium_ion_set:
sodium_ion = 'Pos & Wild Type AA Match'
else:
sodium_ion = 'Pos Match (But Not Wild Type AA)'
else:
sodium_ion = u'\u2014'
if rules['residue_switch_tree']:
residue_switch_set = set()
for _, wt_rule_dict in rules['residue_switch_tree'].items():
for key in wt_rule_dict:
residue_switch_set.add(key)
if wild_type in residue_switch_set:
residue_switch = 'Pos & Wild Type AA Match'
else:
residue_switch = 'Pos Match (But Not Wild Type AA)'
else:
residue_switch = u'\u2014'
return (AA_PROPENSITY.get(wild_type, u'\u2014'),
HYDROPHOBICITY.get(wild_type, u'\u2014'),
class_cons.get(wild_type, u'\u2014'),
fam_cons.get(wild_type, u'\u2014'),
ionic_lock, sodium_ion, residue_switch)
def get_data_all_grouping(rules, mutant, wild_type, class_cons, fam_cons):
'''
Calculate the Data and Site columns in the browser view for the pos, mut & wt analysis mode
'''
# Get propensity fold change where possible
mut = AA_PROPENSITY.get(mutant, u'\u2014')
w_t = AA_PROPENSITY.get(wild_type, u'\u2014')
# Where possible, calculate the fold change
prop = u'\u2014' if isinstance(mut, str) | isinstance(w_t, str) else str(round(mut-w_t, 2))
# Append the mut and wt values to the end.
prop = prop + ' (' + str(mut) + u'\u2212'+ str(w_t) +')'
# Get hydrophobicity fold change where possible
mut = HYDROPHOBICITY.get(mutant, u'\u2014')
w_t = HYDROPHOBICITY.get(wild_type, u'\u2014')
hydro = u'\u2014' if isinstance(mut, str) | isinstance(w_t, str) else str(round(mut-w_t, 2))
hydro = hydro + ' (' + str(mut) + u'\u2212'+ str(w_t) +')'
# Get the receptor family conservation fold change where possible
mut = fam_cons.get(mutant, 0)
w_t = fam_cons.get(wild_type, 0)
rec_cons = u'\u2014' if isinstance(mut, str) | isinstance(w_t, str) else str(round(mut-w_t, 2))
rec_cons += ' ('+str(mut)+u'\u2212'+str(w_t)+')'
# Get the protein class conservation fold change where possible
mut = class_cons.get(mutant, 0)
w_t = class_cons.get(wild_type, 0)
prot_cons = u'\u2014' if isinstance(mut, str) | isinstance(w_t, str) else str(round(mut-w_t, 2))
prot_cons += ' ('+str(mut)+u'\u2212'+str(w_t)+')'
# Get site info from the structural site rules
ionic_lock = rules['ionic_lock_tree'].get(mutant, {}).get(wild_type, u'\u2014')
sodium_ion = rules['sodium_ion_tree'].get(mutant, {}).get(wild_type, u'\u2014')
residue_switch = rules['residue_switch_tree'].get(mutant, {}).get(wild_type, u'\u2014')
return (prop,
hydro,
prot_cons,
rec_cons,
ionic_lock,
sodium_ion,
residue_switch)
def parse_rule_definition(rule_def,rule_result):
'''
Take in a rule definition from the structural rules, and parse so that's it's suitable both for display and
use in the rule dictionaries.
Args:
- rule_def should be of the form:
Ionic / Sodium / Residue + ... ... + removal / contraction / addition
Returns:
site - meaning type of site the definiton refers to. to be 'ionic_lock', 'sodium_ion', or 'residue_switch'
definiton - the action at the site. to be 'Removed', 'Contracted', or 'Added'
'''
# Get the type of action in the definition
if rule_result[-7:] == 'Removal':
result = 'Removed'
elif rule_result[-11:] == 'Contraction':
result = 'Contracted'
else:
result = 'Added'
# Get action placement from the definition
if rule_def[:5] == 'Ionic':
site = 'ionic_lock'
elif rule_def[:6] == 'Sodium':
site = 'sodium_ion'
elif rule_def[:5] == 'State':
site = 'residue_switch'
else: # Then there is no sensible way to understand this rule.
site = 'other'
result = rule_def # Override previous rule finding.
return (site, result)
def create_structural_rule_trees(rule_dictionary):
'''
Restructure the structural rules from a list of dictionaries to a tree-like nested dictionary,
so that they may be easily and quickly searched.
I.e. each type of site gets its own tree/dictionary, as it has it's own column,
This allows for simplier code when querying the rules.
'''
structural_rule_trees = {'ionic_lock_tree':{}, 'sodium_ion_tree':{}, 'residue_switch_tree':{}, 'other_tree':{}}
# List of classes included by the 'All' class designation.
classes = {'A', 'B', 'C', 'F'}
# List of amino acids included by the 'X' amino acid designation.
amino_acids = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S',
'T', 'V', 'W', 'Y', 'B', 'Z', 'J']
# For each tree, initiate the inner class dictionary, for each class.
for _, tree in structural_rule_trees.items():
for prot_class in classes:
tree.setdefault(prot_class, {})
# For each class type in the Structural rules list, iterate through the contained dictionaries.
for item in {'A', 'B', 'C', 'All'}:
for rule in rule_dictionary[item]:
# Get the dictionary to which the rule pertains
site, definition = parse_rule_definition(rule['Design Principle'], rule["Addition / Removal"])
tree = structural_rule_trees[site+"_tree"]
# Get a set of the classes and wild type aas that the rule affects
rule_class = classes if rule['Class'] == 'All' else {rule['Class']}
rule_wt = amino_acids if rule['Wt AA'] == 'X' else rule['Wt AA'].split('/')
# Iterate through the keys in each rule, adding a 'branch' to the nested dictionary, as needed.
for prot_class in rule_class:
node = tree.setdefault(prot_class, {})\
.setdefault(rule['Generic Position'], {})\
.setdefault(rule['Mut AA'], {})
for acid in rule_wt:
# If the rule definition is already stored, append the next definition to it.
# Otherwise, create a new entry, consisting of the rule definiton.
acid_node = node.get(acid, "")
if acid_node == "":
# Then no previous rules.
node[acid] = definition
else: # Add to the previous results
node[acid] = acid_node + ", " + definition
return structural_rule_trees
def fetch_all_pdb(request):
structures = Structure.objects.filter(refined=False)
for s in structures:
pdbname = str(s)
print(pdbname)
failed = []
try:
protein = Protein.objects.filter(entry_name=pdbname.lower()).get()
d = fetch_pdb_info(pdbname,protein)
#delete before adding new
Construct.objects.filter(name=d['construct_crystal']['pdb_name']).delete()
add_construct(d)
except:
print(pdbname,'failed')
failed.append(pdbname)
context = {'failed':failed}
return render(request,'pdb_all.html',context)
def fetch_pdb(request, slug):
try:
protein = Protein.objects.filter(entry_name=slug.lower()).get()
except:
protein = False
d = fetch_pdb_info(slug,protein, ignore_gasper_annotation=True)
#delete before adding new
print(d['construct_crystal']['pdb_name'])
#Construct.objects.filter(name__iexact=d['construct_crystal']['pdb_name']).delete()
#add_construct(d)
#cache.delete(d['construct_crystal']['pdb_name']+'_schematics')
#cache.delete(d['construct_crystal']['pdb_name']+'_snake')
context = {'d':d}
return render(request,'pdb_fetch.html',context)
def fetch_pdb_for_webform(request, slug, **response_kwargs):
slug = slug.lower()
protein = Protein.objects.filter(entry_name=slug).get()
d = fetch_pdb_info(slug,protein)
d = convert_ordered_to_disordered_annotation(d)
jsondata = json.dumps(d)
response_kwargs['content_type'] = 'application/json'
return HttpResponse(jsondata, **response_kwargs)
class ConstructBrowser(TemplateView):
"""
Fetching construct data for browser
"""
template_name = "construct_browser.html"
def get_context_data (self, **kwargs):
context = super(ConstructBrowser, self).get_context_data(**kwargs)
try:
cons = Construct.objects.defer('schematics','snakecache').all().prefetch_related(
"crystal","mutations","purification","protein__family__parent__parent__parent", "insertions__insert_type", "modifications", "deletions", "crystallization__chemical_lists",
"protein__species","structure__pdb_code","structure__publication__web_link", "contributor")
context['constructs'] = []
for c in cons:
# c.schematics = c.schematic()
c.wt_schematic = c.wt_schematic()
c.cons_schematic = c.cons_schematic()
context['constructs'].append(c)
except Construct.DoesNotExist as e:
pass
return context
class ExperimentBrowser(TemplateView):
"""
Fetching construct data for browser
"""
template_name = "experimental_browser.html"
def get_context_data (self, **kwargs):
context = super(ExperimentBrowser , self).get_context_data(**kwargs)
try:
cons = Construct.objects.defer('schematics','snakecache').all().prefetch_related(
"crystal","mutations","purification","protein__family__parent__parent__parent", "insertions__insert_type","expression","solubilization", "modifications", "deletions",
"crystallization__crystal_method", "crystallization__crystal_type",
"crystallization__chemical_lists", "crystallization__chemical_lists__chemicals__chemical__chemical_type",
"protein__species","structure__pdb_code","structure__publication__web_link", "contributor",
Prefetch("structure__ligands", queryset=StructureLigandInteraction.objects.filter(
annotated=True).prefetch_related('ligand__properities__ligand_type', 'ligand_role','ligand__properities__web_links__web_resource'))).annotate(pur_count = Count('purification__steps')).annotate(sub_count = Count('solubilization__chemical_list__chemicals'))
#context['constructs'] = cache.get('construct_browser')
#if context['constructs']==None:
context['constructs'] = []
context['schematics'] = []
for c in cons:
# c.schematic_cache = c.schematic()
c.summary = c.chem_summary()
context['constructs'].append(c)
#cache.set('construct_browser', context['constructs'], 60*60*24*2) #two days
# else:
# print('construct_browser used cache')
except Construct.DoesNotExist as e:
pass
return context
class design(AbsTargetSelection):
# Left panel
step = 1
number_of_steps = 1
docs = 'constructs.html#construct-design-tool' # FIXME
# description = 'Select receptors to index by searching or browsing in the middle column. You can select entire' \
# + ' receptor families and/or individual receptors.\n\nSelected receptors will appear in the right column,' \
# + ' where you can edit the list.\n\nSelect which numbering schemes to use in the middle column.\n\nOnce you' \
# + ' have selected all your receptors, click the green button.'
description = '''This is a tool to design structure constructs based on all published GPCR structures.
A modification can be based on a closest template, most frequent solution or structural rationale (mutations).'''
# Middle section
numbering_schemes = False
filters = False
search = True
title = "Select a receptor"
template_name = 'designselection.html'
type_of_selection = 'targets'
selection_only_receptors = True
selection_boxes = OrderedDict([
('reference', False),
('targets', False),
('segments', False),
])
# Buttons
buttons = {
'continue': {
'label': 'Show results',
'onclick': 'submitupload()',
'color': 'success',
'url': '/construct/tool/',
#'url': 'calculate/'
}
}
redirect_on_select = True
selection_heading = "Construct Design Tool"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
@csrf_exempt #jquery send post, so no csrf
def align(request):
ids = json.loads(request.POST.get('ids'))
c_ids = []
s_ids = []
for i in ids:
if i.startswith('align'):
s_ids.append(i.split('_')[1])
else:
c_ids.append(i)
cons = Construct.objects.filter(pk__in=c_ids).prefetch_related(
"crystal","mutations","purification","protein__family__parent__parent__parent", "insertions", "modifications", "deletions", "crystallization__chemical_lists",
"protein__species","structure__pdb_code","structure__publication__web_link", "contributor")
proteins = []
constructs = OrderedDict()
annotations = {}
for c in cons:
# print(c)
proteins.append(c.protein)
constructs[c.name] = c.protein.entry_name
annotations[c.name] = c.schematic()['annotations']
# print(annotations)
if len(s_ids):
rs = Residue.objects.filter(protein_conformation__protein__in=proteins, protein_segment__slug__in=s_ids).prefetch_related(
'protein_conformation__protein', 'protein_conformation__state', 'protein_segment',
'generic_number__scheme', 'display_generic_number__scheme')
else:
s_ids = ['N-term','TM1','ICL1','TM2','ECL1','TM3','ICL2','TM4','ECL2','TM5','ICL3','TM6','ECL3','TM7','ICL4','H8','C-term']
rs = Residue.objects.filter(protein_conformation__protein__in=proteins).prefetch_related(
'protein_conformation__protein', 'protein_conformation__state', 'protein_segment',
'generic_number__scheme', 'display_generic_number__scheme')
# print("residues",len(rs))
numbering_schemes = {}
numbering_schemes_list = []
for pc in proteins:
if pc.residue_numbering_scheme.slug not in numbering_schemes:
rnsn = pc.residue_numbering_scheme.name
numbering_schemes[pc.residue_numbering_scheme.slug] = rnsn
numbering_schemes_list.append(rnsn)
# order and convert numbering scheme dict to tuple
numbering_schemes = sorted(numbering_schemes.items(), key=itemgetter(0))
# print(numbering_schemes_list)
distinct_gn = []
ordered_gn = OrderedDict()
distinct_segments = []
overview = OrderedDict()
segment_length = OrderedDict()
for s in s_ids:
overview[s] = OrderedDict()
segment_length[s] = {'aligned':0, 'before':0,'after':0,'total':0}
protein_lookup = {}
# print('build stuff')
segment = ''
protein = ''
track_unaligned = {}
#Find all possible generic numbers, to ensure gaps
for r in rs.order_by('protein_conformation__id','sequence_number'):
if segment!=r.protein_segment.slug or protein!=r.protein_conformation.protein.entry_name:
no_encountered_gn = True
length = 0
length_before = 0
length_after = 0
segment = r.protein_segment.slug
protein = r.protein_conformation.protein.entry_name
if protein not in protein_lookup:
protein_lookup[protein] = {}
track_unaligned[protein] = {}
if segment not in track_unaligned[protein]:
track_unaligned[protein][segment] = {'before':[],'after':[]}
if segment not in distinct_segments:
distinct_segments.append(segment)
overview[segment] = OrderedDict()
if r.generic_number:
no_encountered_gn = False
gn = r.generic_number.label
gn_sort = gn.split('x')[1]
gn_sort = float("0."+gn_sort)
if len(numbering_schemes) == 1:
gn = r.display_generic_number.label
gn_sort = gn.split('x')[1]
gn_sort = float("0."+gn_sort)
protein_lookup[protein][gn] = {'aa':r.amino_acid,'pos':r.sequence_number,'display_gn':r.display_generic_number.label,'scheme':r.display_generic_number.scheme.name}
if gn not in distinct_gn:
distinct_gn.append(gn)
overview[segment][gn_sort] = [gn,{'aa':'-','pos':''}]
length += 1
else:
if no_encountered_gn:
track_unaligned[protein][segment]['before'].append({'aa':r.amino_acid,'pos':r.sequence_number})
length_before += 1
else:
track_unaligned[protein][segment]['after'].append({'aa':r.amino_acid,'pos':r.sequence_number})
length_after += 1
if len(overview[segment])>segment_length[segment]['aligned']:
segment_length[segment]['aligned'] = len(overview[segment])
if length_before>segment_length[segment]['before']:
segment_length[segment]['before'] = length_before
if length_after>segment_length[segment]['after']:
segment_length[segment]['after'] = length_after
if segment_length[segment]['aligned']+segment_length[segment]['before']+segment_length[segment]['after']>segment_length[segment]['total']:
segment_length[segment]['total'] = segment_length[segment]['aligned']+segment_length[segment]['before']+segment_length[segment]['after']
# SORT generic residues to ensure correct order
gn_list = ""
ordered_summary = OrderedDict()
for seg,gns in overview.items():
ordered_summary[seg] = OrderedDict()
#GN LIST
gn_list += """<td class="ali-td ali-residue res-color-X"> </td>"""
if seg!='C-term':
for _ in range(segment_length[seg]['before']):
gn_list += """<td class="ali-td"> </td>"""
for gn in sorted(gns):
ordered_summary[seg][gns[gn][0]] = {'aa':'-','pos':'', 'display_gn':'', 'scheme':''}
gn_list += """<td class="ali-td-generic-num">{}</td>""".format("x"+gns[gn][0].split("x")[1])
if seg=='C-term':
for _ in range(segment_length[seg]['before']):
gn_list += """<td class="ali-td"> </td>"""
for _ in range(segment_length[seg]['after']):
gn_list += """<td class="ali-td"> </td>"""
alignment = OrderedDict()
alignment_print_sequence = ""
for c,p in constructs.items():
alignment[c] = copy.deepcopy(ordered_summary)
alignment_print_sequence += '<tr>'
for seg,gns in alignment[c].items():
if p not in track_unaligned:
track_unaligned[p] = {seg: {'before':[],'after':[]}}
if p not in protein_lookup:
protein_lookup[p] = {}
if seg not in track_unaligned[p]:
track_unaligned[p][seg] = {'before':[],'after':[]}
alignment_print_sequence += """<td class="ali-td ali-residue res-color-_"> </td>"""
if seg!='C-term':
for _ in range(segment_length[seg]['before']-len(track_unaligned[p][seg]['before'])):
alignment_print_sequence += """<td class="ali-td ali-residue res-color-">
-</td>"""
for aa in track_unaligned[p][seg]['before']:
if aa['pos'] in annotations[c]:
annotation = annotations[c][aa['pos']][0]
annotation_text = "<br>"+annotations[c][aa['pos']][1]
else:
annotation = ''
annotation_text = ''
alignment_print_sequence += """<td class="ali-td ali-residue res-color-{}">
<div data-toggle="tooltip" data-placement="top" data-html="true"
title="{}{}{}">{}</div></td>""".format(annotation,aa['aa'],aa['pos'],annotation_text,aa['aa'])
for gn, aa in gns.items():
if gn in protein_lookup[p]:
aa = protein_lookup[p][gn]
alignment[c][seg][gn] = aa
if aa['pos'] in annotations[c]:
annotation = annotations[c][aa['pos']][0]
annotation_text = "<br>"+annotations[c][aa['pos']][1]
else:
annotation = ''
annotation_text = ''
alignment_print_sequence += """<td class="ali-td ali-residue res-color-{}">
<div data-toggle="tooltip" data-placement="top" data-html="true"
title="{}{}<br>{}: {}{}">{}</div></td>""".format(annotation,aa['aa'],aa['pos'],aa['scheme'],aa['display_gn'],annotation_text,aa['aa'])
for aa in track_unaligned[p][seg]['after']:
if aa['pos'] in annotations[c]:
annotation = annotations[c][aa['pos']][0]
annotation_text = "<br>"+annotations[c][aa['pos']][1]
else:
annotation = ''
annotation_text = ''
alignment_print_sequence += """<td class="ali-td ali-residue res-color-{}">
<div data-toggle="tooltip" data-placement="top" data-html="true"
title="{}{}{}">{}</div></td>""".format(annotation,aa['aa'],aa['pos'],annotation_text,aa['aa'])
for _ in range(segment_length[seg]['after']-len(track_unaligned[p][seg]['after'])):
alignment_print_sequence += """<td class="ali-td ali-residue res-color-">
-</td>"""
if seg=='C-term':
for _ in range(segment_length[seg]['before']-len(track_unaligned[p][seg]['before'])):
alignment_print_sequence += """<td class="ali-td ali-residue res-color-">
-</td>"""
alignment_print_sequence += '</tr>'
# print('done',len(alignment_print_sequence),numbering_schemes_list)
context = {'constructs': constructs,'numbering_schemes_list':numbering_schemes_list,'alignment_print_sequence': alignment_print_sequence, 'segment_length' : segment_length, 'gn_list' : gn_list, 'segments': s_ids, 'c_ids': json.dumps(c_ids)} #, 'alignment_print_sequence': alignment_print_sequence
return render(request,'align.html',context)
|
cmunk/protwis
|
construct/views.py
|
Python
|
apache-2.0
| 131,498
|
[
"CRYSTAL"
] |
22e327310e93d5c71ff3fdb52e6f61ab490a7322e07acbc20744fd8fb717f793
|
import numpy as np
from ase import Atom, Atoms
from gpaw import GPAW, Mixer, RMM_DIIS
from gpaw.test import equal
a = 4.0
n = 20
d = 1.0
x = d / 3**0.5
atoms = Atoms([Atom('C', (0.0, 0.0, 0.0)),
Atom('H', (x, x, x)),
Atom('H', (-x, -x, x)),
Atom('H', (x, -x, -x)),
Atom('H', (-x, x, -x))],
cell=(a, a, a), pbc=True)
calc = GPAW(gpts=(n, n, n), nbands=4, txt='a.txt',
mixer=Mixer(0.25, 3, 1), eigensolver='rmm-diis')
atoms.set_calculator(calc)
e0 = atoms.get_potential_energy()
niter0 = calc.get_number_of_iterations()
es = RMM_DIIS(blocksize=3)
calc = GPAW(gpts=(n, n, n), nbands=4, txt='b.txt',
mixer=Mixer(0.25, 3, 1), eigensolver=es)
atoms.set_calculator(calc)
e1 = atoms.get_potential_energy()
niter1 = calc.get_number_of_iterations()
equal(e0, e1, 0.000001)
equal(niter0, niter1, 0)
|
robwarm/gpaw-symm
|
gpaw/test/blocked_rmm_diis.py
|
Python
|
gpl-3.0
| 888
|
[
"ASE",
"GPAW"
] |
849f4926720a997c485a1fc5578a2448a7b00432d821cdacb7c94995afba4b91
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import unittest as ut
import espressomd
import espressomd.observables
import espressomd.math
import tests_common
class TestCylindricalObservable(ut.TestCase):
"""
Testcase for the cylindrical observables.
"""
system = espressomd.System(box_l=[15.0, 15.0, 15.0])
system.time_step = 0.01
system.cell_system.skin = 0.4
cyl_transform_params = espressomd.math.CylindricalTransformationParameters(
center=3 * [7.5], axis=[1 / np.sqrt(2), 1 / np.sqrt(2), 0], orientation=[0, 0, 1])
params = {
'ids': None,
'transform_params': cyl_transform_params,
'n_r_bins': 4,
'n_phi_bins': 3,
'n_z_bins': 4,
'min_r': 0.0,
'min_phi': -np.pi,
'min_z': -5.0,
'max_r': 5.0,
'max_phi': np.pi,
'max_z': 5.0,
}
v_r = 0.6
v_phi = 0.7
v_z = 0.8
def tearDown(self):
self.system.part.clear()
def calc_ellipsis_pos_vel(
self, n_part, z_min, z_max, semi_x=1., semi_y=1.):
"""
Calculate positions on an elliptical corkscrew line.
Calculate cartesian velocities that lead to a
constant velocity in cylindrical coordinates
"""
zs = np.linspace(z_min, z_max, num=n_part)
angles = np.linspace(-0.99 * np.pi, 0.999 * np.pi, num=n_part)
positions = []
velocities = []
for angle, z in zip(angles, zs):
position = np.array(
[semi_x * np.cos(angle),
semi_y * np.sin(angle),
z])
e_r, e_phi, e_z = tests_common.get_cylindrical_basis_vectors(
position)
velocity = self.v_r * e_r + self.v_phi * e_phi + self.v_z * e_z
positions.append(position)
velocities.append(velocity)
return np.array(positions), np.array(velocities)
def align_with_observable_frame(self, vec):
"""
Rotate vectors from the original box frame to the frame of the observables.
"""
# align original z to observable z
vec = tests_common.rodrigues_rot(vec, [1, -1, 0], -np.pi / 2.)
# original x now points along [sqrt(3),-sqrt(3),-sqrt(3)]
# align original x to observable orientation
vec = tests_common.rodrigues_rot(vec, [1, 1, 0], -3. / 4. * np.pi)
return vec
def setup_system_get_np_hist(self):
"""
Pick positions and velocities in the original box frame
and calculate the np histogram.
Then rotate and move the positions and velocities
to the frame of the observables.
After calculating the core observables, the result should be
the same as the np histogram obtained from the original box frame.
"""
positions, velocities = self.calc_ellipsis_pos_vel(100, 0.99 *
self.params['min_z'], 0.9 *
self.params['max_z'], semi_x=0.9 *
self.params['max_r'], semi_y=0.2 *
self.params['max_r'])
# first, get the numpy histogram of the cylinder coordinates
pos_cyl = []
for pos in positions:
pos_cyl.append(
tests_common.transform_pos_from_cartesian_to_polar_coordinates(pos))
np_hist, np_edges = tests_common.get_histogram(
np.array(pos_cyl), self.params, 'cylindrical')
np_dens = tests_common.normalize_cylindrical_hist(
np_hist.copy(), self.params)
# now align the positions and velocities with the frame of reference
# used in the observables
pos_aligned = []
vel_aligned = []
for pos, vel in zip(positions, velocities):
pos_aligned.append(
self.align_with_observable_frame(pos) +
self.cyl_transform_params.center)
vel_aligned.append(self.align_with_observable_frame(vel))
self.system.part.add(pos=pos_aligned, v=vel_aligned)
self.params['ids'] = self.system.part[:].id
return np_dens, np_edges
def check_edges(self, observable, np_edges):
core_edges = observable.call_method("edges")
for core_edge, np_edge in zip(core_edges, np_edges):
np.testing.assert_array_almost_equal(core_edge, np_edge)
def test_density_profile(self):
"""
Check that the result from the observable (in its own frame)
matches the np result from the box frame
"""
np_dens, np_edges = self.setup_system_get_np_hist()
cyl_dens_prof = espressomd.observables.CylindricalDensityProfile(
**self.params)
core_hist = cyl_dens_prof.calculate()
np.testing.assert_array_almost_equal(np_dens, core_hist)
self.check_edges(cyl_dens_prof, np_edges)
def test_vel_profile(self):
"""
Check that the result from the observable (in its own frame)
matches the np result from the box frame
"""
np_dens, np_edges = self.setup_system_get_np_hist()
cyl_vel_prof = espressomd.observables.CylindricalVelocityProfile(
**self.params)
core_hist = cyl_vel_prof.calculate()
core_hist_v_r = core_hist[:, :, :, 0]
core_hist_v_phi = core_hist[:, :, :, 1]
core_hist_v_z = core_hist[:, :, :, 2]
np_hist_binary = np_dens
np_hist_binary[np.nonzero(np_hist_binary)] = 1
np.testing.assert_array_almost_equal(
np_hist_binary * self.v_r, core_hist_v_r)
np.testing.assert_array_almost_equal(
np_hist_binary * self.v_phi, core_hist_v_phi)
np.testing.assert_array_almost_equal(
np_hist_binary * self.v_z, core_hist_v_z)
self.check_edges(cyl_vel_prof, np_edges)
def test_flux_density_profile(self):
"""
Check that the result from the observable (in its own frame)
matches the np result from the box frame
"""
np_dens, np_edges = self.setup_system_get_np_hist()
cyl_flux_dens = espressomd.observables.CylindricalFluxDensityProfile(
**self.params)
core_hist = cyl_flux_dens.calculate()
core_hist_v_r = core_hist[:, :, :, 0]
core_hist_v_phi = core_hist[:, :, :, 1]
core_hist_v_z = core_hist[:, :, :, 2]
np.testing.assert_array_almost_equal(np_dens * self.v_r, core_hist_v_r)
np.testing.assert_array_almost_equal(
np_dens * self.v_phi, core_hist_v_phi)
np.testing.assert_array_almost_equal(np_dens * self.v_z, core_hist_v_z)
self.check_edges(cyl_flux_dens, np_edges)
def test_cylindrical_pid_profile_interface(self):
"""
Test setters and getters of the script interface
"""
params = self.params.copy()
params['n_r_bins'] = 4
params['n_phi_bins'] = 6
params['n_z_bins'] = 8
self.system.part.add(pos=[0, 0, 0], type=0)
self.system.part.add(pos=[0, 0, 0], type=1)
params['ids'] = self.system.part[:].id
observable = espressomd.observables.CylindricalDensityProfile(**params)
# check pids
np.testing.assert_array_equal(np.copy(observable.ids), params['ids'])
with self.assertRaises(RuntimeError):
observable.ids = [observable.ids[0]]
# check bins
self.assertEqual(observable.n_r_bins, params['n_r_bins'])
self.assertEqual(observable.n_phi_bins, params['n_phi_bins'])
self.assertEqual(observable.n_z_bins, params['n_z_bins'])
obs_data = observable.calculate()
np.testing.assert_array_equal(obs_data.shape, [4, 6, 8])
observable.n_r_bins = 1
observable.n_phi_bins = 2
observable.n_z_bins = 3
self.assertEqual(observable.n_r_bins, 1)
self.assertEqual(observable.n_phi_bins, 2)
self.assertEqual(observable.n_z_bins, 3)
obs_data = observable.calculate()
np.testing.assert_array_equal(obs_data.shape, [1, 2, 3])
# check edges lower corner
self.assertEqual(observable.min_r, params['min_r'])
self.assertEqual(observable.min_phi, params['min_phi'])
self.assertEqual(observable.min_z, params['min_z'])
observable.min_r = 4
observable.min_phi = 5
observable.min_z = 6
self.assertEqual(observable.min_r, 4)
self.assertEqual(observable.min_phi, 5)
self.assertEqual(observable.min_z, 6)
obs_bin_edges = observable.bin_edges()
np.testing.assert_array_equal(obs_bin_edges[0, 0, 0], [4, 5, 6])
# check edges upper corner
self.assertEqual(observable.max_r, params['max_r'])
self.assertEqual(observable.max_phi, params['max_phi'])
self.assertEqual(observable.max_z, params['max_z'])
observable.max_r = 7
observable.max_phi = 8
observable.max_z = 9
self.assertEqual(observable.max_r, 7)
self.assertEqual(observable.max_phi, 8)
self.assertEqual(observable.max_z, 9)
obs_bin_edges = observable.bin_edges()
np.testing.assert_array_equal(obs_bin_edges[-1, -1, -1], [7, 8, 9])
# check center, axis, orientation
ctp = espressomd.math.CylindricalTransformationParameters(
center=[1, 2, 3], axis=[0, 1, 0], orientation=[0, 0, 1])
observable.transform_params = ctp
for attr_name in ['center', 'axis', 'orientation']:
np.testing.assert_array_almost_equal(np.copy(ctp.__getattr__(attr_name)),
np.copy(observable.transform_params.__getattr__(attr_name)))
if __name__ == "__main__":
ut.main()
|
fweik/espresso
|
testsuite/python/observable_cylindrical.py
|
Python
|
gpl-3.0
| 10,523
|
[
"ESPResSo"
] |
80e5b4d2675cecdca6e3a5956f96896ab8c3f9c41bd651e8e6e306cfb8c2a5ea
|
#!/usr/bin/python
# Copyright 2003-2016 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
__author__ = "Thomas de Grenier de Latour (tgl), " + \
"modular re-write by: Brian Dolbec (dol-sen)"
__email__ = "degrenier@easyconnect.fr, " + \
"brian.dolbec@gmail.com"
__version__ = "git"
__productname__ = "eclean"
__description__ = "A cleaning tool for Gentoo distfiles and binaries."
import os
import sys
import re
import time
import getopt
import portage
from portage.output import white, yellow, turquoise, green
import gentoolkit.pprinter as pp
from gentoolkit.eclean.search import (DistfilesSearch,
findPackages, port_settings, pkgdir)
from gentoolkit.eclean.exclude import (parseExcludeFile,
ParseExcludeFileException)
from gentoolkit.eclean.clean import CleanUp
from gentoolkit.eclean.output import OutputControl
#from gentoolkit.eclean.dbapi import Dbapi
from gentoolkit.eprefix import EPREFIX
def printVersion():
"""Output the version info."""
print( "%s (%s) - %s" \
% (__productname__, __version__, __description__))
print()
print("Author: %s <%s>" % (__author__,__email__))
print("Copyright 2003-2009 Gentoo Foundation")
print("Distributed under the terms of the GNU General Public License v2")
def printUsage(_error=None, help=None):
"""Print help message. May also print partial help to stderr if an
error from {'options','actions'} is specified."""
out = sys.stdout
if _error:
out = sys.stderr
if not _error in ('actions', 'global-options', \
'packages-options', 'distfiles-options', \
'merged-packages-options', 'merged-distfiles-options', \
'time', 'size'):
_error = None
if not _error and not help: help = 'all'
if _error == 'time':
print( pp.error("Wrong time specification"), file=out)
print( "Time specification should be an integer followed by a"+
" single letter unit.", file=out)
print( "Available units are: y (years), m (months), w (weeks), "+
"d (days) and h (hours).", file=out)
print( "For instance: \"1y\" is \"one year\", \"2w\" is \"two"+
" weeks\", etc. ", file=out)
return
if _error == 'size':
print( pp.error("Wrong size specification"), file=out)
print( "Size specification should be an integer followed by a"+
" single letter unit.", file=out)
print( "Available units are: G, M, K and B.", file=out)
print("For instance: \"10M\" is \"ten megabytes\", \"200K\" "+
"is \"two hundreds kilobytes\", etc.", file=out)
return
if _error in ('global-options', 'packages-options', 'distfiles-options', \
'merged-packages-options', 'merged-distfiles-options',):
print( pp.error("Wrong option on command line."), file=out)
print( file=out)
elif _error == 'actions':
print( pp.error("Wrong or missing action name on command line."), file=out)
print( file=out)
print( white("Usage:"), file=out)
if _error in ('actions','global-options', 'packages-options', \
'distfiles-options') or help == 'all':
print( " "+turquoise(__productname__),
yellow("[global-option] ..."),
green("<action>"),
yellow("[action-option] ..."), file=out)
if _error == 'merged-distfiles-options' or help in ('all','distfiles'):
print( " "+turquoise(__productname__+'-dist'),
yellow("[global-option, distfiles-option] ..."), file=out)
if _error == 'merged-packages-options' or help in ('all','packages'):
print( " "+turquoise(__productname__+'-pkg'),
yellow("[global-option, packages-option] ..."), file=out)
if _error in ('global-options', 'actions'):
print( " "+turquoise(__productname__),
yellow("[--help, --version]"), file=out)
if help == 'all':
print( " "+turquoise(__productname__+"(-dist,-pkg)"),
yellow("[--help, --version]"), file=out)
if _error == 'merged-packages-options' or help == 'packages':
print( " "+turquoise(__productname__+'-pkg'),
yellow("[--help, --version]"), file=out)
if _error == 'merged-distfiles-options' or help == 'distfiles':
print( " "+turquoise(__productname__+'-dist'),
yellow("[--help, --version]"), file=out)
print(file=out)
if _error in ('global-options', 'merged-packages-options', \
'merged-distfiles-options') or help:
print( "Available global", yellow("options")+":", file=out)
print( yellow(" -C, --nocolor")+
" - turn off colors on output", file=out)
print( yellow(" -d, --deep")+
" - only keep the minimum for a reinstallation", file=out)
print( yellow(" -e, --exclude-file=<path>")+
" - path to the exclusion file", file=out)
print( yellow(" -i, --interactive")+
" - ask confirmation before deletions", file=out)
print( yellow(" -n, --package-names")+
" - protect all versions (when --deep)", file=out)
print( yellow(" -p, --pretend")+
" - only display what would be cleaned", file=out)
print( yellow(" -q, --quiet")+
" - be as quiet as possible", file=out)
print( yellow(" -t, --time-limit=<time>")+
" - don't delete files modified since "+yellow("<time>"), file=out)
print( " "+yellow("<time>"), "is a duration: \"1y\" is"+
" \"one year\", \"2w\" is \"two weeks\", etc. ", file=out)
print( " "+"Units are: y (years), m (months), w (weeks), "+
"d (days) and h (hours).", file=out)
print( yellow(" -h, --help")+ \
" - display the help screen", file=out)
print( yellow(" -V, --version")+
" - display version info", file=out)
print( file=out)
if _error == 'actions' or help == 'all':
print( "Available", green("actions")+":", file=out)
print( green(" packages")+
" - clean outdated binary packages from PKGDIR", file=out)
print( green(" distfiles")+
" - clean outdated packages sources files from DISTDIR", file=out)
print( file=out)
if _error in ('packages-options','merged-packages-options') \
or help in ('all','packages'):
print( "Available", yellow("options"),"for the",
green("packages"),"action:", file=out)
print( yellow(" -i, --ignore-failure")+
" - ignore failure to locate PKGDIR", file=out)
print( file=out)
if _error in ('distfiles-options', 'merged-distfiles-options') \
or help in ('all','distfiles'):
print("Available", yellow("options"),"for the",
green("distfiles"),"action:", file=out)
print( yellow(" -f, --fetch-restricted")+
" - protect fetch-restricted files (when --deep)", file=out)
print( yellow(" -s, --size-limit=<size>")+
" - don't delete distfiles bigger than "+yellow("<size>"), file=out)
print( " "+yellow("<size>"), "is a size specification: "+
"\"10M\" is \"ten megabytes\", \"200K\" is", file=out)
print( " "+"\"two hundreds kilobytes\", etc. Units are: "+
"G, M, K and B.", file=out)
print( file=out)
print( "More detailed instruction can be found in",
turquoise("`man %s`" % __productname__), file=out)
class ParseArgsException(Exception):
"""For parseArgs() -> main() communications."""
def __init__(self, value):
self.value = value # sdfgsdfsdfsd
def __str__(self):
return repr(self.value)
def parseSize(size):
"""Convert a file size "Xu" ("X" is an integer, and "u" in
[G,M,K,B]) into an integer (file size in Bytes).
@raise ParseArgsException: in case of failure
"""
units = {
'G': (1024**3),
'M': (1024**2),
'K': 1024,
'B': 1
}
try:
match = re.match(r"^(?P<value>\d+)(?P<unit>[GMKBgmkb])?$",size)
size = int(match.group('value'))
if match.group('unit'):
size *= units[match.group('unit').capitalize()]
except:
raise ParseArgsException('size')
return size
def parseTime(timespec):
"""Convert a duration "Xu" ("X" is an int, and "u" a time unit in
[Y,M,W,D,H]) into an integer which is a past EPOCH date.
Raises ParseArgsException('time') in case of failure.
(yep, big approximations inside... who cares?).
"""
units = {'H' : (60 * 60)}
units['D'] = units['H'] * 24
units['W'] = units['D'] * 7
units['M'] = units['D'] * 30
units['Y'] = units['D'] * 365
try:
# parse the time specification
match = re.match(r"^(?P<value>\d+)(?P<unit>[YMWDHymwdh])?$",timespec)
value = int(match.group('value'))
if not match.group('unit'): unit = 'D'
else: unit = match.group('unit').capitalize()
except:
raise ParseArgsException('time')
return time.time() - (value * units[unit])
def parseArgs(options={}):
"""Parse the command line arguments. Raise exceptions on
errors or non-action modes (help/version). Returns an action, and affect
the options dict.
"""
def optionSwitch(option,opts,action=None):
"""local function for interpreting command line options
and setting options accordingly"""
return_code = True
do_help = False
for o, a in opts:
if o in ("-h", "--help"):
do_help = True
elif o in ("-V", "--version"):
raise ParseArgsException('version')
elif o in ("-C", "--nocolor"):
options['nocolor'] = True
pp.output.nocolor()
elif o in ("-d", "--deep", "--destructive"):
options['destructive'] = True
elif o in ("-D", "--deprecated"):
options['deprecated'] = True
elif o in ("-i", "--interactive") and not options['pretend']:
options['interactive'] = True
elif o in ("-p", "--pretend"):
options['pretend'] = True
options['interactive'] = False
elif o in ("-q", "--quiet"):
options['quiet'] = True
options['verbose'] = False
elif o in ("-t", "--time-limit"):
options['time-limit'] = parseTime(a)
elif o in ("-e", "--exclude-file"):
print("cli --exclude option")
options['exclude-file'] = a
elif o in ("-n", "--package-names"):
options['package-names'] = True
elif o in ("-f", "--fetch-restricted"):
options['fetch-restricted'] = True
elif o in ("-s", "--size-limit"):
options['size-limit'] = parseSize(a)
elif o in ("-v", "--verbose") and not options['quiet']:
options['verbose'] = True
elif o in ("-i", "--ignore-failure"):
options['ignore-failure'] = True
else:
return_code = False
# sanity check of --deep only options:
for opt in ('fetch-restricted', 'package-names'):
if (not options['destructive']) and options[opt]:
if not options['quiet']:
print( pp.error(
"--%s only makes sense in --deep mode." % opt), file=sys.stderr)
options[opt] = False
if do_help:
if action:
raise ParseArgsException('help-'+action)
else:
raise ParseArgsException('help')
return return_code
# here are the different allowed command line options (getopt args)
getopt_options = {'short':{}, 'long':{}}
getopt_options['short']['global'] = "CdDipqe:t:nhVv"
getopt_options['long']['global'] = ["nocolor", "deep", "destructive",
"deprecated", "interactive", "pretend", "quiet", "exclude-file=",
"time-limit=", "package-names", "help", "version", "verbose"]
getopt_options['short']['distfiles'] = "fs:"
getopt_options['long']['distfiles'] = ["fetch-restricted", "size-limit="]
getopt_options['short']['packages'] = "i"
getopt_options['long']['packages'] = ["ignore-failure"]
# set default options, except 'nocolor', which is set in main()
options['interactive'] = False
options['pretend'] = False
options['quiet'] = False
options['accept_all'] = False
options['destructive'] = False
options['deprecated'] = False
options['time-limit'] = 0
options['package-names'] = False
options['fetch-restricted'] = False
options['size-limit'] = 0
options['verbose'] = False
options['ignore-failure'] = False
# if called by a well-named symlink, set the action accordingly:
action = None
# temp print line to ensure it is the svn/branch code running, etc..
#print( "###### svn/branch/gentoolkit_eclean ####### ==> ", os.path.basename(sys.argv[0]))
if os.path.basename(sys.argv[0]).startswith(__productname__+'-pkg') or \
os.path.basename(sys.argv[0]).startswith(__productname__+'-packages'):
action = 'packages'
elif os.path.basename(sys.argv[0]).startswith(__productname__+'-dist') or \
os.path.basename(sys.argv[0]).startswith(__productname__+'distfiles'):
action = 'distfiles'
# prepare for the first getopt
if action:
short_opts = getopt_options['short']['global'] \
+ getopt_options['short'][action]
long_opts = getopt_options['long']['global'] \
+ getopt_options['long'][action]
opts_mode = 'merged-'+action
else:
short_opts = getopt_options['short']['global']
long_opts = getopt_options['long']['global']
opts_mode = 'global'
# apply getopts to command line, show partial help on failure
try:
opts, args = getopt.getopt(sys.argv[1:], short_opts, long_opts)
except:
raise ParseArgsException(opts_mode+'-options')
# set options accordingly
optionSwitch(options,opts,action=action)
# if action was already set, there should be no more args
if action and len(args):
raise ParseArgsException(opts_mode+'-options')
# if action was set, there is nothing left to do
if action:
return action
# So, we are in "eclean --foo action --bar" mode. Parse remaining args...
# Only two actions are allowed: 'packages' and 'distfiles'.
if not len(args) or not args[0] in ('packages','distfiles'):
raise ParseArgsException('actions')
action = args.pop(0)
# parse the action specific options
try:
opts, args = getopt.getopt(args, \
getopt_options['short'][action], \
getopt_options['long'][action])
except:
raise ParseArgsException(action+'-options')
# set options again, for action-specific options
optionSwitch(options,opts,action=action)
# any remaning args? Then die!
if len(args):
raise ParseArgsException(action+'-options')
# returns the action. Options dictionary is modified by side-effect.
return action
def doAction(action,options,exclude={}, output=None):
"""doAction: execute one action, ie display a few message, call the right
find* function, and then call doCleanup with its result."""
# define vocabulary for the output
if action == 'packages':
files_type = "binary packages"
else:
files_type = "distfiles"
saved = {}
deprecated = {}
# find files to delete, depending on the action
if not options['quiet']:
output.einfo("Building file list for "+action+" cleaning...")
if action == 'packages':
clean_me = findPackages(
options,
exclude=exclude,
destructive=options['destructive'],
package_names=options['package-names'],
time_limit=options['time-limit'],
pkgdir=pkgdir,
#port_dbapi=Dbapi(portage.db[portage.root]["porttree"].dbapi),
#var_dbapi=Dbapi(portage.db[portage.root]["vartree"].dbapi),
)
else:
# accept defaults
engine = DistfilesSearch(output=options['verbose-output'],
#portdb=Dbapi(portage.db[portage.root]["porttree"].dbapi),
#var_dbapi=Dbapi(portage.db[portage.root]["vartree"].dbapi),
)
clean_me, saved, deprecated = engine.findDistfiles(
exclude=exclude,
destructive=options['destructive'],
fetch_restricted=options['fetch-restricted'],
package_names=options['package-names'],
time_limit=options['time-limit'],
size_limit=options['size-limit'],
deprecate = options['deprecated']
)
# initialize our cleaner
cleaner = CleanUp(output.progress_controller)
# actually clean files if something was found
if clean_me:
# verbose pretend message
if options['pretend'] and not options['quiet']:
output.einfo("Here are the "+files_type+" that would be deleted:")
# verbose non-pretend message
elif not options['quiet']:
output.einfo("Cleaning " + files_type +"...")
# do the cleanup, and get size of deleted files
if options['pretend']:
clean_size = cleaner.pretend_clean(clean_me)
elif action in ['distfiles']:
clean_size = cleaner.clean_dist(clean_me)
elif action in ['packages']:
clean_size = cleaner.clean_pkgs(clean_me,
pkgdir)
# vocabulary for final message
if options['pretend']:
verb = "would be"
else:
verb = "were"
# display freed space
if not options['quiet']:
output.total('normal', clean_size, len(clean_me), verb, action)
# nothing was found
elif not options['quiet']:
output.einfo("Your "+action+" directory was already clean.")
if saved and not options['quiet']:
print()
print( (pp.emph(" The following ") + yellow("unavailable") +
pp.emph(" files were saved from cleaning due to exclusion file entries")))
output.set_colors('deprecated')
clean_size = cleaner.pretend_clean(saved)
output.total('deprecated', clean_size, len(saved), verb, action)
if deprecated and not options['quiet']:
print()
print( (pp.emph(" The following ") + yellow("unavailable") +
pp.emph(" installed packages were found")))
output.set_colors('deprecated')
output.list_pkgs(deprecated)
def main():
"""Parse command line and execute all actions."""
# set default options
options = {}
options['nocolor'] = (port_settings.get("NOCOLOR") in ('yes','true')
or not sys.stdout.isatty())
if options['nocolor']:
pp.output.nocolor()
# parse command line options and actions
try:
action = parseArgs(options)
# filter exception to know what message to display
except ParseArgsException as e:
if e.value == 'help':
printUsage(help='all')
sys.exit(0)
elif e.value[:5] == 'help-':
printUsage(help=e.value[5:])
sys.exit(0)
elif e.value == 'version':
printVersion()
sys.exit(0)
else:
printUsage(e.value)
sys.exit(2)
output = OutputControl(options)
options['verbose-output'] = lambda x: None
if not options['quiet']:
if options['verbose']:
options['verbose-output'] = output.einfo
# parse the exclusion file
if not 'exclude-file' in options:
# set it to the default exclude file if it exists
exclude_file = "%s/etc/%s/%s.exclude" % (EPREFIX,__productname__ , action)
if os.path.isfile(exclude_file):
options['exclude-file'] = exclude_file
if 'exclude-file' in options:
try:
exclude = parseExcludeFile(options['exclude-file'],
options['verbose-output'])
except ParseExcludeFileException as e:
print( pp.error(str(e)), file=sys.stderr)
print( pp.error(
"Invalid exclusion file: %s" % options['exclude-file']), file=sys.stderr)
print( pp.error(
"See format of this file in `man %s`" % __productname__), file=sys.stderr)
sys.exit(1)
else:
exclude = {}
# security check for non-pretend mode
if not options['pretend'] and portage.secpass == 0:
print( pp.error(
"Permission denied: you must be root or belong to " +
"the portage group."), file=sys.stderr)
sys.exit(1)
# execute action
doAction(action, options, exclude=exclude,
output=output)
if __name__ == "__main__":
"""actually call main() if launched as a script"""
try:
main()
except KeyboardInterrupt:
print( "Aborted.")
sys.exit(130)
sys.exit(0)
|
zmedico/gentoolkit
|
pym/gentoolkit/eclean/cli.py
|
Python
|
gpl-2.0
| 18,562
|
[
"Brian"
] |
f3c76bf3efb5b69521b369af8251110fb17a898de96023afe337ed46be3d7927
|
from collections import Sequence
from distutils.version import LooseVersion
import logging
import warnings
import sys
import os
from os import path as op
import inspect
from functools import wraps
import mayavi
from mayavi import mlab
from mayavi.filters.api import Threshold
import numpy as np
import nibabel as nib
from scipy import sparse
from scipy.spatial.distance import cdist
import matplotlib as mpl
from matplotlib import cm as mpl_cm
from . import cm as surfer_cm
logger = logging.getLogger('surfer')
# Py3k compat
if sys.version[0] == '2':
string_types = basestring # noqa, analysis:ignore
else:
string_types = str
if LooseVersion(mayavi.__version__) == LooseVersion('4.5.0'):
# Monkey-patch Mayavi 4.5:
# In Mayavi 4.5, filters seem to be missing a .point_data attribute that
# Threshold accesses on initialization.
_orig_meth = Threshold._get_data_range
def _patch_func():
return []
def _patch_meth(self):
return []
class _MayaviThresholdPatch(object):
def __enter__(self):
Threshold._get_data_range = _patch_meth
def __exit__(self, exc_type, exc_val, exc_tb):
Threshold._get_data_range = _orig_meth
_mayavi_threshold_patch = _MayaviThresholdPatch()
def threshold_filter(*args, **kwargs):
with _mayavi_threshold_patch:
thresh = mlab.pipeline.threshold(*args, **kwargs)
thresh._get_data_range = _patch_func
return thresh
else:
threshold_filter = mlab.pipeline.threshold
class Surface(object):
"""Container for surface object
Attributes
----------
subject_id : string
Name of subject
hemi : {'lh', 'rh'}
Which hemisphere to load
surf : string
Name of the surface to load (eg. inflated, orig ...)
subjects_dir : str | None
If not None, this directory will be used as the subjects directory
instead of the value set using the SUBJECTS_DIR environment variable.
offset : float | None
If float, align inside edge of each hemisphere to center + offset.
If None, do not change coordinates (default).
units : str
Can be 'm' or 'mm' (default).
"""
def __init__(self, subject_id, hemi, surf, subjects_dir=None,
offset=None, units='mm'):
"""Surface
Parameters
----------
subject_id : string
Name of subject
hemi : {'lh', 'rh'}
Which hemisphere to load
surf : string
Name of the surface to load (eg. inflated, orig ...)
offset : float | None
If 0.0, the surface will be offset such that the medial
wall is aligned with the origin. If None, no offset will
be applied. If != 0.0, an additional offset will be used.
"""
if hemi not in ['lh', 'rh']:
raise ValueError('hemi must be "lh" or "rh')
self.subject_id = subject_id
self.hemi = hemi
self.surf = surf
self.offset = offset
self.coords = None
self.faces = None
self.nn = None
self.units = _check_units(units)
subjects_dir = _get_subjects_dir(subjects_dir)
self.data_path = op.join(subjects_dir, subject_id)
def load_geometry(self):
surf_path = op.join(self.data_path, "surf",
"%s.%s" % (self.hemi, self.surf))
coords, faces = nib.freesurfer.read_geometry(surf_path)
if self.units == 'm':
coords /= 1000.
if self.offset is not None:
if self.hemi == 'lh':
coords[:, 0] -= (np.max(coords[:, 0]) + self.offset)
else:
coords[:, 0] -= (np.min(coords[:, 0]) + self.offset)
nn = _compute_normals(coords, faces)
if self.coords is None:
self.coords = coords
self.faces = faces
self.nn = nn
else:
self.coords[:] = coords
self.faces[:] = faces
self.nn[:] = nn
@property
def x(self):
return self.coords[:, 0]
@property
def y(self):
return self.coords[:, 1]
@property
def z(self):
return self.coords[:, 2]
def load_curvature(self):
"""Load in curvature values from the ?h.curv file."""
curv_path = op.join(self.data_path, "surf", "%s.curv" % self.hemi)
self.curv = nib.freesurfer.read_morph_data(curv_path)
self.bin_curv = np.array(self.curv > 0, np.int)
def load_label(self, name):
"""Load in a Freesurfer .label file.
Label files are just text files indicating the vertices included
in the label. Each Surface instance has a dictionary of labels, keyed
by the name (which is taken from the file name if not given as an
argument.
"""
label = nib.freesurfer.read_label(op.join(self.data_path, 'label',
'%s.%s.label' % (self.hemi, name)))
label_array = np.zeros(len(self.x), np.int)
label_array[label] = 1
try:
self.labels[name] = label_array
except AttributeError:
self.labels = {name: label_array}
def apply_xfm(self, mtx):
"""Apply an affine transformation matrix to the x,y,z vectors."""
self.coords = np.dot(np.c_[self.coords, np.ones(len(self.coords))],
mtx.T)[:, :3]
def _fast_cross_3d(x, y):
"""Compute cross product between list of 3D vectors
Much faster than np.cross() when the number of cross products
becomes large (>500). This is because np.cross() methods become
less memory efficient at this stage.
Parameters
----------
x : array
Input array 1.
y : array
Input array 2.
Returns
-------
z : array
Cross product of x and y.
Notes
-----
x and y must both be 2D row vectors. One must have length 1, or both
lengths must match.
"""
assert x.ndim == 2
assert y.ndim == 2
assert x.shape[1] == 3
assert y.shape[1] == 3
assert (x.shape[0] == 1 or y.shape[0] == 1) or x.shape[0] == y.shape[0]
if max([x.shape[0], y.shape[0]]) >= 500:
return np.c_[x[:, 1] * y[:, 2] - x[:, 2] * y[:, 1],
x[:, 2] * y[:, 0] - x[:, 0] * y[:, 2],
x[:, 0] * y[:, 1] - x[:, 1] * y[:, 0]]
else:
return np.cross(x, y)
def _compute_normals(rr, tris):
"""Efficiently compute vertex normals for triangulated surface"""
# first, compute triangle normals
r1 = rr[tris[:, 0], :]
r2 = rr[tris[:, 1], :]
r3 = rr[tris[:, 2], :]
tri_nn = _fast_cross_3d((r2 - r1), (r3 - r1))
# Triangle normals and areas
size = np.sqrt(np.sum(tri_nn * tri_nn, axis=1))
zidx = np.where(size == 0)[0]
size[zidx] = 1.0 # prevent ugly divide-by-zero
tri_nn /= size[:, np.newaxis]
npts = len(rr)
# the following code replaces this, but is faster (vectorized):
#
# for p, verts in enumerate(tris):
# nn[verts, :] += tri_nn[p, :]
#
nn = np.zeros((npts, 3))
for verts in tris.T: # note this only loops 3x (number of verts per tri)
for idx in range(3): # x, y, z
nn[:, idx] += np.bincount(verts, tri_nn[:, idx], minlength=npts)
size = np.sqrt(np.sum(nn * nn, axis=1))
size[size == 0] = 1.0 # prevent ugly divide-by-zero
nn /= size[:, np.newaxis]
return nn
###############################################################################
# LOGGING (courtesy of mne-python)
def set_log_level(verbose=None, return_old_level=False):
"""Convenience function for setting the logging level
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
If None, the environment variable MNE_LOG_LEVEL is read, and if
it doesn't exist, defaults to INFO.
return_old_level : bool
If True, return the old verbosity level.
"""
if verbose is None:
verbose = "INFO"
elif isinstance(verbose, bool):
if verbose is True:
verbose = 'INFO'
else:
verbose = 'WARNING'
if isinstance(verbose, string_types):
verbose = verbose.upper()
logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO,
WARNING=logging.WARNING, ERROR=logging.ERROR,
CRITICAL=logging.CRITICAL)
if verbose not in logging_types:
raise ValueError('verbose must be of a valid type')
verbose = logging_types[verbose]
old_verbose = logger.level
logger.setLevel(verbose)
return (old_verbose if return_old_level else None)
class WrapStdOut(object):
"""Ridiculous class to work around how doctest captures stdout"""
def __getattr__(self, name):
# Even more ridiculous than this class, this must be sys.stdout (not
# just stdout) in order for this to work (tested on OSX and Linux)
return getattr(sys.stdout, name)
def set_log_file(fname=None, output_format='%(message)s', overwrite=None):
"""Convenience function for setting the log to print to a file
Parameters
----------
fname : str, or None
Filename of the log to print to. If None, stdout is used.
To suppress log outputs, use set_log_level('WARN').
output_format : str
Format of the output messages. See the following for examples:
http://docs.python.org/dev/howto/logging.html
e.g., "%(asctime)s - %(levelname)s - %(message)s".
overwrite : bool, or None
Overwrite the log file (if it exists). Otherwise, statements
will be appended to the log (default). None is the same as False,
but additionally raises a warning to notify the user that log
entries will be appended.
"""
handlers = logger.handlers
for h in handlers:
if isinstance(h, logging.FileHandler):
h.close()
logger.removeHandler(h)
if fname is not None:
if op.isfile(fname) and overwrite is None:
warnings.warn('Log entries will be appended to the file. Use '
'overwrite=False to avoid this message in the '
'future.')
mode = 'w' if overwrite is True else 'a'
lh = logging.FileHandler(fname, mode=mode)
else:
""" we should just be able to do:
lh = logging.StreamHandler(sys.stdout)
but because doctests uses some magic on stdout, we have to do this:
"""
lh = logging.StreamHandler(WrapStdOut())
lh.setFormatter(logging.Formatter(output_format))
# actually add the stream handler
logger.addHandler(lh)
if hasattr(inspect, 'signature'): # py35
def _get_args(function, varargs=False):
params = inspect.signature(function).parameters
args = [key for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)]
if varargs:
varargs = [param.name for param in params.values()
if param.kind == param.VAR_POSITIONAL]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
else:
def _get_args(function, varargs=False):
out = inspect.getargspec(function) # args, varargs, keywords, defaults
if varargs:
return out[:2]
else:
return out[0]
def verbose(function):
"""Decorator to allow functions to override default log level
Do not call this function directly to set the global verbosity level,
instead use set_log_level().
Parameters (to decorated function)
----------------------------------
verbose : bool, str, int, or None
The level of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
None defaults to using the current log level [e.g., set using
mne.set_log_level()].
"""
arg_names = _get_args(function)
# this wrap allows decorated functions to be pickled (e.g., for parallel)
@wraps(function)
def dec(*args, **kwargs):
# Check if the first arg is "self", if it has verbose, make it default
if len(arg_names) > 0 and arg_names[0] == 'self':
default_level = getattr(args[0], 'verbose', None)
else:
default_level = None
verbose_level = kwargs.get('verbose', default_level)
if verbose_level is not None:
old_level = set_log_level(verbose_level, True)
# set it back if we get an exception
try:
ret = function(*args, **kwargs)
except Exception:
set_log_level(old_level)
raise
set_log_level(old_level)
return ret
else:
return function(*args, **kwargs)
# set __wrapped__ attribute so ?? in IPython gets the right source
dec.__wrapped__ = function
return dec
###############################################################################
# USEFUL FUNCTIONS
def _check_units(units):
if units not in ('m', 'mm'):
raise ValueError('Units must be "m" or "mm", got %r' % (units,))
return units
def find_closest_vertices(surface_coords, point_coords):
"""Return the vertices on a surface mesh closest to some given coordinates.
The distance metric used is Euclidian distance.
Parameters
----------
surface_coords : numpy array
Array of coordinates on a surface mesh
point_coords : numpy array
Array of coordinates to map to vertices
Returns
-------
closest_vertices : numpy array
Array of mesh vertex ids
"""
point_coords = np.atleast_2d(point_coords)
return np.argmin(cdist(surface_coords, point_coords), axis=0)
def tal_to_mni(coords, units='mm'):
"""Convert Talairach coords to MNI using the Lancaster transform.
Parameters
----------
coords : n x 3 numpy array
Array of Talairach coordinates
units : str
Can be 'm' or 'mm' (default).
Returns
-------
mni_coords : n x 3 numpy array
Array of coordinates converted to MNI space.
"""
coords = np.atleast_2d(coords)
xfm = np.array([[1.06860, -0.00396, 0.00826, 1.07816],
[0.00640, 1.05741, 0.08566, 1.16824],
[-0.01281, -0.08863, 1.10792, -4.17805],
[0.00000, 0.00000, 0.00000, 1.00000]])
units = _check_units(units)
if units == 'm':
xfm[:3, 3] /= 1000.
mni_coords = np.dot(np.c_[coords, np.ones(coords.shape[0])], xfm.T)[:, :3]
return mni_coords
def mesh_edges(faces):
"""Returns sparse matrix with edges as an adjacency matrix
Parameters
----------
faces : array of shape [n_triangles x 3]
The mesh faces
Returns
-------
edges : sparse matrix
The adjacency matrix
"""
npoints = np.max(faces) + 1
nfaces = len(faces)
a, b, c = faces.T
edges = sparse.coo_matrix((np.ones(nfaces), (a, b)),
shape=(npoints, npoints))
edges = edges + sparse.coo_matrix((np.ones(nfaces), (b, c)),
shape=(npoints, npoints))
edges = edges + sparse.coo_matrix((np.ones(nfaces), (c, a)),
shape=(npoints, npoints))
edges = edges + edges.T
edges = edges.tocoo()
return edges
def create_color_lut(cmap, n_colors=256, center=None):
"""Return a colormap suitable for setting as a Mayavi LUT.
Parameters
----------
cmap : string, list of colors, n x 3 or n x 4 array
Input colormap definition. This can be the name of a matplotlib
colormap, a list of valid matplotlib colors, or a suitable
mayavi LUT (possibly missing the alpha channel).
if value is "auto", a default sequential or divergent colormap is
returned
n_colors : int, optional
Number of colors in the resulting LUT. This is ignored if cmap
is a 2d array.
center : double, optional
indicates whether desired colormap should be for divergent values,
currently only used to select default colormap for cmap='auto'
Returns
-------
lut : n_colors x 4 integer array
Color LUT suitable for passing to mayavi
"""
if isinstance(cmap, np.ndarray):
if np.ndim(cmap) == 2:
if cmap.shape[1] == 4:
# This looks likes a LUT that's ready to go
lut = cmap.astype(np.int)
elif cmap.shape[1] == 3:
# This looks like a LUT, but it's missing the alpha channel
alpha = np.ones(len(cmap), np.int) * 255
lut = np.c_[cmap, alpha]
return lut
# choose default colormaps (REMEMBER to change doc, e.g., in
# Brain.add_data, when changing these defaults)
if isinstance(cmap, string_types) and cmap == "auto":
if center is None:
cmap = "rocket"
else:
cmap = "icefire"
surfer_cmaps = ["rocket", "mako", "icefire", "vlag"]
surfer_cmaps += [name + "_r" for name in surfer_cmaps]
if not isinstance(cmap, string_types) and isinstance(cmap, Sequence):
colors = list(map(mpl.colors.colorConverter.to_rgba, cmap))
cmap = mpl.colors.ListedColormap(colors)
elif cmap in surfer_cmaps:
cmap = getattr(surfer_cm, cmap)
else:
try:
# Try to get a named matplotlib colormap
# This will also pass Colormap object back out
cmap = mpl_cm.get_cmap(cmap)
except (TypeError, ValueError):
# If we get here, it's a bad input
# but don't raise the matplotlib error as it is less accurate
raise ValueError("Input %r was not valid for making a lut" % cmap)
# Convert from a matplotlib colormap to a lut array
lut = (cmap(np.linspace(0, 1, n_colors)) * 255).astype(np.int)
return lut
@verbose
def smoothing_matrix(vertices, adj_mat, smoothing_steps=20, verbose=None):
"""Create a smoothing matrix which can be used to interpolate data defined
for a subset of vertices onto mesh with an adjancency matrix given by
adj_mat.
If smoothing_steps is None, as many smoothing steps are applied until
the whole mesh is filled with with non-zeros. Only use this option if
the vertices correspond to a subsampled version of the mesh.
Parameters
----------
vertices : 1d array
vertex indices
adj_mat : sparse matrix
N x N adjacency matrix of the full mesh
smoothing_steps : int or None
number of smoothing steps (Default: 20)
verbose : bool, str, int, or None
If not None, override default verbose level (see surfer.verbose).
Returns
-------
smooth_mat : sparse matrix
smoothing matrix with size N x len(vertices)
"""
from scipy import sparse
logger.info("Updating smoothing matrix, be patient..")
e = adj_mat.copy()
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
idx_use = vertices
smooth_mat = 1.0
n_iter = smoothing_steps if smoothing_steps is not None else 1000
for k in range(n_iter):
e_use = e[:, idx_use]
data1 = e_use * np.ones(len(idx_use))
idx_use = np.where(data1)[0]
scale_mat = sparse.dia_matrix((1 / data1[idx_use], 0),
shape=(len(idx_use), len(idx_use)))
smooth_mat = scale_mat * e_use[idx_use, :] * smooth_mat
logger.info("Smoothing matrix creation, step %d" % (k + 1))
if smoothing_steps is None and len(idx_use) >= n_vertices:
break
# Make sure the smoothing matrix has the right number of rows
# and is in COO format
smooth_mat = smooth_mat.tocoo()
smooth_mat = sparse.coo_matrix((smooth_mat.data,
(idx_use[smooth_mat.row],
smooth_mat.col)),
shape=(n_vertices,
len(vertices)))
return smooth_mat
@verbose
def coord_to_label(subject_id, coord, label, hemi='lh', n_steps=30,
map_surface='white', coord_as_vert=False, units='mm',
verbose=None):
"""Create label from MNI coordinate
Parameters
----------
subject_id : string
Use if file is in register with subject's orig.mgz
coord : numpy array of size 3 | int
One coordinate in MNI space or the vertex index.
label : str
Label name
hemi : [lh, rh]
Hemisphere target
n_steps : int
Number of dilation iterations
map_surface : str
The surface name used to find the closest point
coord_as_vert : bool
whether the coords parameter should be interpreted as vertex ids
units : str
Can be 'm' or 'mm' (default).
verbose : bool, str, int, or None
If not None, override default verbose level (see surfer.verbose).
"""
geo = Surface(subject_id, hemi, map_surface, units=units)
geo.load_geometry()
coords = geo.coords
# work in mm from here on
if geo.units == 'm':
coords = coords * 1000
if coord_as_vert:
coord = coords[coord]
n_vertices = len(coords)
adj_mat = mesh_edges(geo.faces)
foci_vtxs = find_closest_vertices(coords, [coord])
data = np.zeros(n_vertices)
data[foci_vtxs] = 1.
smooth_mat = smoothing_matrix(np.arange(n_vertices), adj_mat, 1)
for _ in range(n_steps):
data = smooth_mat * data
idx = np.where(data.ravel() > 0)[0]
# Write label
label_fname = label + '-' + hemi + '.label'
logger.info("Saving label : %s" % label_fname)
f = open(label_fname, 'w')
f.write('#label at %s from subject %s\n' % (coord, subject_id))
f.write('%d\n' % len(idx))
for i in idx:
x, y, z = coords[i]
f.write('%d %f %f %f 0.000000\n' % (i, x, y, z))
def _get_subjects_dir(subjects_dir=None, raise_error=True):
"""Get the subjects directory from parameter or environment variable
Parameters
----------
subjects_dir : str | None
The subjects directory.
raise_error : bool
If True, raise a ValueError if no value for SUBJECTS_DIR can be found
or the corresponding directory does not exist.
Returns
-------
subjects_dir : str
The subjects directory. If the subjects_dir input parameter is not
None, its value will be returned, otherwise it will be obtained from
the SUBJECTS_DIR environment variable.
"""
if subjects_dir is None:
subjects_dir = os.environ.get("SUBJECTS_DIR", "")
if not subjects_dir and raise_error:
raise ValueError('The subjects directory has to be specified '
'using the subjects_dir parameter or the '
'SUBJECTS_DIR environment variable.')
if raise_error and not os.path.exists(subjects_dir):
raise ValueError('The subjects directory %s does not exist.'
% subjects_dir)
return subjects_dir
def has_fsaverage(subjects_dir=None, raise_error=True, return_why=False):
"""Determine whether the user has a usable fsaverage"""
subjects_dir = _get_subjects_dir(subjects_dir, raise_error=raise_error)
out = ''
if not op.isdir(subjects_dir):
out = 'SUBJECTS_DIR not found: %s' % (subjects_dir,)
else:
fs_dir = op.join(_get_subjects_dir(subjects_dir, False), 'fsaverage')
surf_dir = op.join(fs_dir, 'surf')
if not op.isdir(fs_dir):
out = 'fsaverage not found in SUBJECTS_DIR: %s' % (fs_dir,)
elif not op.isdir(surf_dir):
out = 'fsaverage has no "surf" directory: %s' % (surf_dir,)
out = (out == '', out) if return_why else (out == '')
return out
def requires_fsaverage():
import pytest
has, why = has_fsaverage(raise_error=False, return_why=True)
return pytest.mark.skipif(
not has, reason='Requires fsaverage subject data (%s)' % why)
def requires_imageio():
import pytest
try:
from imageio.plugins.ffmpeg import get_exe # noqa, analysis:ignore
except ImportError:
has = False
else:
has = True
return pytest.mark.skipif(not has, reason="Requires imageio with ffmpeg")
def requires_fs():
import pytest
has = ('FREESURFER_HOME' in os.environ)
return pytest.mark.skipif(
not has, reason='Requires FreeSurfer command line tools')
def _get_extra():
# Get extra label for newer freesurfer
subj_dir = _get_subjects_dir()
fname = op.join(subj_dir, 'fsaverage', 'label', 'lh.BA1.label')
return '_exvivo' if not op.isfile(fname) else '', subj_dir
|
mwaskom/PySurfer
|
surfer/utils.py
|
Python
|
bsd-3-clause
| 25,506
|
[
"Mayavi"
] |
e3ee74187b57007508300da838c7cf71ee3f804fbd4cfa207ad0412356b1c373
|
#!/usr/bin/env python
#
# Restriction Analysis Libraries.
# Copyright (C) 2004. Frederic Sohm.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""
Usage:
=====
>>> from Rana.fts import fts #
>>> from Rana.Vector import * # Just a way to get a sequence.
>>> from Bio.Seq import Seq # Use your prefer method here.
>>> pbr = fts(pBR322) #
>>> seq = Seq(str(pbr)) #
>>>
>>> from Bio.Restriction import *
>>> a = Analysis(AllEnzymes, seq, linear=False)
>>> b = a.blunt()
>>> a.print_that() # no argument -> print all the results
AasI : 2169, 2582.
AatII : 4289.
Acc16I : 263, 1359, 1457, 3589.
...
More enzymes here.
...
>>> b = a.without_site()
>>> a.print_that(b, '', '\n Enzymes which do not cut pBR322.\n\n')
Enzymes which do not cut pBR322.
AarI AatI Acc65I AcsI AcvI AdeI AflII AgeI
AhlI AleI AloI ApaI ApoI AscI AsiAI AsiSI
Asp718I AspA2I AsuII AvaIII AvrII AxyI BaeI BbrPI
BbvCI BclI BcuI BfrBI BfrI BglII BlnI BlpI
BmgBI BmgI BplI Bpu1102I Bpu14I BsaXI Bse21I BsePI
BseRI BshTI BsiWI Bsp119I Bsp120I Bsp1407I Bsp1720I Bsp19I
BspT104I BspTI BsrGI BssHI BssHII Bst98I BstAUI BstBI
BstEII BstPI BstSNI BstXI Bsu36I BtrI CciNI CelII
Cfr42I Cfr9I CpoI Csp45I CspAI CspCI CspI DraIII
DrdII Ecl136II Eco105I Eco147I Eco72I Eco81I Eco91I EcoICRI
EcoO65I EcoRI EcoT22I EspI FalI FbaI FseI FunII
HpaI KpnI Ksp22I KspAI KspI MabI MfeI MluI
Mph1103I MspCI MssI MunI NcoI NotI NsiI NspV
OliI PacI PaeR7I PasI PauI PceI Pfl23II PinAI
PmaCI PmeI PmlI Ppu10I PsiI Psp124BI PspAI PspCI
PspEI PspLI PspOMI PspXI PsrI RleAI Rsr2I RsrII
SacI SacII SanDI SauI SbfI SciI SdaI SexAI
SfiI Sfr274I Sfr303I SfuI SgfI SgrBI SlaI SmaI
SmiI SnaBI SpeI SplI SrfI Sse232I Sse8387I Sse8647I
SseBI SspBI SstI StuI SunI SwaI TliI UthSI
Vha464I XapI XbaI XcmI XhoI XmaCI XmaI XmaJI
Zsp2I
>>>
"""
from Bio.Restriction.Restriction import *
#
# OK can't put the following code in Bio.Restriction.__init__ unless
# I put everything from Restriction in here.
# or at least the RestrictionBatch class.
#
# The reason for that is if I do that, I break the __contains__ method of
# the RestrictionBatch in Restriction, which expect to find the name of
# the enzymes in the locals() dictionary when evaluating string to see if
# it is an enzyme.
#
# This call for some explanations I guess:
# When testing for the presence of a Restriction enzyme in a
# RestrictionBatch, the user can use:
#
# 1) a class of type 'RestrictionType'
# 2) a string of the name of the enzyme (it's repr)
# i.e:
# >>> from Bio.Restriction import RestrictionBatch, EcoRI
# >>> MyBatch = RestrictionBatch(EcoRI)
# >>> #!/usr/bin/env python
# >>> EcoRI in MyBatch # the class EcoRI.
# True
# >>>
# >>> 'EcoRI' in MyBatch # a string representation
# True
#
# OK, that's how it is suppose to work. And I find it quite useful.
#
# Now if I leave the code here I got:
# >>> from Bio.Restriction import RestrictionBatch, EcoRI
# >>> MyBatch = RestrictionBatch(EcoRI)
# >>> EcoRI in MyBatch # the class EcoRI.
# True
# >>> 'EcoRI' in MyBatch # a string.
# False
# There is 5 ways to change that:
# 1) abandon the evaluation of string representation.
# 2) leave the code like that and hack something in RestrictionBatch.
# 3) Move back the code in Bio.Restriction.Restriction
# 4) Move RestrictionBatch here.
# 5) Remove Restriction.Restriction and move all the code in here
#
# 1) no fun in that.
# 2) there is a simpler way to do it.
# 3) I prefer to keep all the code together.
# 4) and 5) both are OK. Only a matter of preference.
#
# So the following code has been moved back to Bio.Restricion.Restriction
# For the user the results is transparent:
# from Bio.Restriction import * works as before.
#
###
### The restriction enzyme classes are created dynamically when the module is
### imported. Here is the magic which allow the creation of the
### restriction-enzyme classes.
###
### The reason for the two dictionaries in Restriction_Dictionary
### one for the types (which will be called pseudo-type as they really
### correspond to the values that instances of RestrictionType can take)
### and one for the enzymes is efficiency as the bases are evaluated
### once per pseudo-type.
###
### However Restriction is still a very inefficient module at import. But
### remember that around 660 classes (which is more or less the size of Rebase)
### have to be created dynamically. However, this processing take place only
### once.
### This inefficiency is however largely compensated by the use of metaclass
### which provide a very efficient layout for the class themselves mostly
### alleviating the need of if/else loops in the class methods.
###
### It is essential to run Restriction with doc string optimisation (-OO switch)
### as the doc string of 660 classes take a lot of processing.
###
##CommOnly = RestrictionBatch() # commercial enzymes
##NonComm = RestrictionBatch() # not available commercially
##for TYPE, (bases, enzymes) in typedict.iteritems():
## #
## # The keys are the pseudo-types TYPE (stored as type1, type2...)
## # The names are not important and are only present to differentiate
## # the keys in the dict. All the pseudo-types are in fact RestrictionType.
## # These names will not be used after and the pseudo-types are not
## # kept in the locals() dictionary. It is therefore impossible to
## # import them.
## # Now, if you have look at the dictionary, you will see that not all the
## # types are present as those without corresponding enzymes have been
## # removed by Dictionary_Builder().
## #
## # The values are tuples which contain
## # as first element a tuple of bases (as string) and
## # as second element the names of the enzymes.
## #
## # First eval the bases.
## #
## bases = tuple([eval(x) for x in bases])
## #
## # now create the particular value of RestrictionType for the classes
## # in enzymes.
## #
## T = type.__new__(RestrictionType, 'RestrictionType', bases, {})
## for k in enzymes:
## #
## # Now, we go through all the enzymes and assign them their type.
## # enzymedict[k] contains the values of the attributes for this
## # particular class (self.site, self.ovhg,....).
## #
## newenz = T(k, bases, enzymedict[k])
## #
## # we add the enzymes to the corresponding batch.
## #
## # No need to verify the enzyme is a RestrictionType -> add_nocheck
## #
## if newenz.is_comm() : CommOnly.add_nocheck(newenz)
## else : NonComm.add_nocheck(newenz)
###
### AllEnzymes is a RestrictionBatch with all the enzymes from Rebase.
###
##AllEnzymes = CommOnly | NonComm
###
### Now, place the enzymes in locals so they can be imported.
###
##names = [str(x) for x in AllEnzymes]
##locals().update(dict(map(None, names, AllEnzymes)))
###
### Limit what can be imported by from Restriction import *
### Most of the classes here will never be used outside this module
### (Defined,Palindromic...). It is still possible to request them specifically
###
### also delete the variable that are no longer needed.
###
###
##__all__=['Analysis', 'RestrictionBatch','AllEnzymes','CommOnly','NonComm']+names
##del k, x, enzymes, TYPE, bases, names
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/Restriction/__init__.py
|
Python
|
gpl-2.0
| 8,744
|
[
"Biopython"
] |
bee9a3ff2653e27ae9375cdc2aabda6271e37319cf88436b5ea89e20d50e6300
|
from django.contrib.postgres.aggregates import ArrayAgg
from django.conf import settings
from django.http import JsonResponse
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
#from common.views import AbsTargetSelection
from common.views import AbsTargetSelectionTable
from common.views import AbsSegmentSelection
from common.views import AbsMiscSelection
from common.selection import Selection, SelectionItem
from mutation.models import *
from phylogenetic_trees.PrepareTree import *
from protein.models import ProteinFamily, ProteinSet, Protein, ProteinSegment, ProteinCouplings
from copy import deepcopy
import json
import math
import os, shutil, subprocess, signal
import uuid
from collections import OrderedDict
Alignment = getattr(__import__('common.alignment_' + settings.SITE_NAME, fromlist=['Alignment']), 'Alignment')
def kill_phylo(): #FIXME, needs better way of handling this!
p = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'protdist' in str(line):
pid = int(line.split(None, 1)[0])
os.kill(pid, signal.SIGKILL)
class TargetSelection(AbsTargetSelectionTable):
step = 1
number_of_steps = 3
title = "SELECT RECEPTORS"
description = "Select receptors in the table (below) or browse the classification tree (right). You can select entire" \
+ " families or individual receptors.\n\nOnce you have selected all your receptors, click the green button."
docs = 'sequences.html#phylogeneric-trees'
selection_boxes = OrderedDict([
("reference", False),
("targets", True),
("segments", False),
])
buttons = {
"continue": {
"label": "Next",
"onclick": "submitSelection('/phylogenetic_trees/segmentselection', 3);",
"color": "success",
},
}
# class TargetSelection(AbsTargetSelection):
# step = 1
# number_of_steps = 3
# docs = 'sequences.html#phylogeneric-trees'
# selection_boxes = OrderedDict([
# ('targets', True),
# ('segments', False),
# ])
#
# buttons = {
# 'continue': {
# 'label': 'Continue to next step',
# 'url': '/phylogenetic_trees/segmentselection',
# 'color': 'success',
# 'onclick': 'return VerifyMinimumSelection(\'receptors\', 3);', # check for a minimum selection of 3 receptors
# },
# }
class SegmentSelection(AbsSegmentSelection):
step = 2
number_of_steps = 3
docs = 'sequences.html#phylogeneric-trees'
selection_boxes = OrderedDict([
('targets', True),
('segments', True),
])
buttons = {
'continue': {
'label': 'Continue to next step',
'url': '/phylogenetic_trees/treesettings',
'color': 'success',
},
}
class TreeSettings(AbsMiscSelection):
step = 3
number_of_steps = 3
docs = 'sequences.html#phylogeneric-trees'
title = 'SELECT TREE OPTIONS'
description = 'Select options for tree generation in the middle column.\nOnce you have selected your' \
+ ' settings, click the green button.'
docs = '/documentation/similarities'
selection_boxes = OrderedDict([
('targets', True),
('segments', True),
])
buttons = {
'continue': {
'label': 'Calculate & draw tree',
'url': '/phylogenetic_trees/render_v3',
'color': 'success',
},
}
tree_settings = True
class Treeclass:
family = {}
def __init__(self):
self.Additional_info={"crystal": {"include":"False", "order":6, "colours":{"crystal_true":"#6dcde1","crystal_false":"#EEE"}, "color_type":"single", "proteins":[], "parent":None, "child": None, "name":"Crystals"},
"class": {"include":"True", "order":0, "colours":{}, "proteins":[], "color_type":"grayscale", "parent":[], "child": ["family,ligand"], "name":"Class"},
"family": {"include":"True", "order":1, "colours":{}, "proteins":[], "color_type":"spectrum", "parent":[], "child": ["ligand"], "name":"Ligand type"},
"ligand": {"include":"True", "order":2, "colours":{}, "proteins":[], "color_type":"spectrum", "parent":["family","class"], "child": [], "name":"Receptor type"},
"mutant": {"include":"False", "order":3, "colours":{"mutant_true":"#6dcde1","mutant_false":"#EEE"}, "color_type":"single", "proteins":[], "parent":[], "child": ["mutant_plus","mutant_minus"], "name":"Mutated proteins"},
"mutant_plus": {"include":"False", "order":4, "colours":{"mutant_plus_true":"#6dcde1","mutant_plus_false":"#EEE"}, "color_type":"single", "proteins":[], "parent":"mutant", "child": [], "name":"Positive affinity mutants"},
"mutant_minus": {"include":"False", "order":5, "colours":{"mutant_minus_true":"#6dcde1","mutant_minus_false":"#EEE"}, "color_type":"single", "proteins":[], "parent":"mutant", "child": [], "name":"Negative affinity mutants"}
}
self.buttons = [(x[1]['order'],x[1]['name']) for x in sorted(self.Additional_info.items(), key= lambda x: x[1]['order']) if x[1]['include']=='True']
self.family = {}
self.phylip = None
self.outtree = None
self.dir = ''
def Prepare_file(self, request,build=False):
self.Tree = PrepareTree(build)
a=Alignment()
sets = ProteinSet.objects.all()
#### Get additional data ####
crysts=[]
for n in sets:
if n.id==1:
for prot in n.proteins.all():
crysts.append(prot.entry_name)
#############################
# get the user selection from session
if build != False:
################################## FOR BUILDING STATISTICS ONLY##########################
build_proteins=[]
if build == '001':
for prot in Protein.objects.filter(sequence_type__slug='consensus', species_id=1):
if prot.family.slug.startswith('001') and len(prot.family.slug.split('_'))==3:
build_proteins.append(prot)
for rset in sets:
if rset.id==1:
for prot in rset.proteins.all():
if prot.family.slug.startswith('001_') and prot.species.latin_name=='Homo sapiens':
build_proteins.append(prot)
else:
for prot in Protein.objects.filter(sequence_type__slug='wt', species_id=1):
if prot.family.slug.startswith(build):
build_proteins.append(prot)
a.load_proteins(build_proteins)
segments = ProteinSegment.objects.all()
a.load_segments(segments)
self.bootstrap,self.UPGMA,self.branches,self.ttype=[0,1,1,0]
##################################################################
else:
simple_selection=request.session.get('selection', False)
a.load_proteins_from_selection(simple_selection, True)
a.load_segments_from_selection(simple_selection)
self.bootstrap,self.UPGMA,self.branches,self.ttype = map(int,simple_selection.tree_settings)
if self.bootstrap!=0:
self.bootstrap=pow(10,self.bootstrap)
#### Create an alignment object
a.build_alignment()
a.calculate_statistics()
a.calculate_similarity()
self.total = len(a.proteins)
total_length = 0
for chain in a.proteins[0].alignment:
total_length += len(a.proteins[0].alignment[chain])
families = ProteinFamily.objects.all()
self.famdict = {}
for n in families:
self.famdict[self.Tree.trans_0_2_A(n.slug)]=n.name
dirname = uuid.uuid4()
os.mkdir('/tmp/%s' %dirname)
infile = open('/tmp/%s/infile' %dirname,'w')
infile.write(' '+str(self.total)+' '+str(total_length)+'\n')
if len(a.proteins) < 3:
return 'More_prots',None, None, None, None,None,None,None,None
####Get additional protein information
accesions = {}
for n in a.proteins:
fam = self.Tree.trans_0_2_A(n.protein.family.slug)
if n.protein.sequence_type.slug == 'consensus':
fam+='_CON'
entry_name = n.protein.entry_name
name = n.protein.name.replace('<sub>','').replace('</sub>','').replace('<i>','').replace('</i>','')
if '&' in name and ';' in name:
name = name.replace('&','').replace(';',' ')
acc = n.protein.accession
if acc:
acc = acc.replace('-','_')
else:
acc = name
spec = str(n.protein.species)
fam += '_'+n.protein.species.common_name.replace(' ','_').upper()
desc = name
if entry_name in crysts:
if not fam in self.Additional_info['crystal']['proteins']:
self.Additional_info['crystal']['proteins'].append(fam)
if len(name)>25:
name=name[:25]+'...'
self.family[entry_name] = {'name':name,'family':fam,'description':desc,'species':spec,'class':'','accession':acc,'ligand':'','type':'','link': entry_name}
accesions[acc]=entry_name
####Write PHYLIP input
sequence = ''
for chain in n.alignment:
for residue in n.alignment[chain]:
sequence += residue[2].replace('_','-')
infile.write(acc+' '*9+sequence+'\n')
infile.close()
####Run bootstrap
if self.bootstrap:
### Write phylip input options
inp = open('/tmp/%s/temp' %dirname,'w')
inp.write('\n'.join(['r',str(self.bootstrap),'y','77','y'])+'\n')
inp.close()
###
try:
subprocess.check_output(['phylip seqboot<temp'], shell=True, cwd = '/tmp/%s' %dirname, timeout=300)
os.rename('/tmp/%s/outfile' %dirname, '/tmp/%s/infile' %dirname)
except:
kill_phylo() #FIXME, needs better way of handling this!
return "too big","too big","too big","too big","too big","too big","too big","too big","too big"
### Write phylip input options
inp = open('/tmp/%s/temp' %dirname,'w')
if self.bootstrap:
inp.write('\n'.join(['m','d',str(self.bootstrap),'y'])+'\n')
else:
inp.write('y\n')
inp.close()
###
try:
subprocess.check_output(['phylip protdist<temp>>log'], shell=True, cwd = '/tmp/%s' %dirname, timeout=300)
except:
kill_phylo() #FIXME, needs better way of handling this!
return "too big","too big","too big","too big","too big","too big","too big","too big","too big"
os.rename('/tmp/%s/infile' %dirname, '/tmp/%s/dupa' %dirname)
os.rename('/tmp/%s/outfile' %dirname, '/tmp/%s/infile' %dirname)
inp = open('/tmp/%s/temp' %dirname,'w')
if self.bootstrap:
### Write phylip input options
if self.UPGMA:
inp.write('\n'.join(['N','m',str(self.bootstrap),'111','y'])+'\n')
else:
inp.write('\n'.join(['m',str(self.bootstrap),'111','y'])+'\n')
else:
if self.UPGMA:
inp.write('N\ny\n')
else:
inp.write('y\n')
inp.close()
###
try:
subprocess.check_output(['phylip neighbor<temp'], shell=True, cwd = '/tmp/%s' %dirname, timeout=300)
except:
kill_phylo() #FIXME, needs better way of handling this!
return "too big","too big","too big","too big","too big","too big","too big","too big"
if self.bootstrap:
os.rename('/tmp/%s/outfile' %dirname, '/tmp/%s/infile' %dirname)
os.rename('/tmp/%s/outtree' %dirname, '/tmp/%s/intree' %dirname)
### Write phylip input options
inp = open('/tmp/%s/temp' %dirname,'w')
inp.write('y\n')
inp.close()
###
try:
subprocess.check_output(['phylip consense<temp'], shell=True, cwd = '/tmp/%s' %dirname, timeout=300)
except:
kill_phylo() #FIXME, needs better way of handling this!
return "too big","too big","too big","too big","too big","too big","too big","too big"
self.phylip = open('/tmp/%s/outtree' %dirname).read()
for acc in accesions.keys():
self.phylip=self.phylip.replace(acc,accesions[acc])
# self.phylogeny_output = self.phylip
self.outtree = open('/tmp/%s/outfile' %dirname).read().lstrip()
phylogeny_input = self.get_phylogeny('/tmp/%s/' %dirname)
shutil.rmtree('/tmp/%s' %dirname)
if build != False:
open('static/home/images/'+build+'_legend.svg','w').write(str(self.Tree.legend))
open('static/home/images/'+build+'_tree.xml','w').write(phylogeny_input)
else:
return phylogeny_input, self.branches, self.ttype, self.total, str(self.Tree.legend), self.Tree.box, self.Additional_info, self.buttons, a.proteins
def get_phylogeny(self, dirname):
self.Tree.treeDo(dirname, self.phylip,self.branches,self.family,self.Additional_info, self.famdict)
phylogeny_input = open('%s/out.xml' %dirname,'r').read().replace('\n','')
return phylogeny_input
def get_data(self):
return self.branches, self.ttype, self.total, str(self.Tree.legend), self.Tree.box, self.Additional_info, self.buttons
# DEPRECATED CODE - can be cleaned up
def get_buttons(request):
Tree_class=request.session['Tree']
buttons = [(x[1]['order'],x[1]['name']) for x in sorted(Tree_class.Additional_info.items(), key= lambda x: x[1]['order']) if x[1]['include']=='True']
return render(request, 'phylogenetic_trees/ring_buttons.html', {'but':buttons })
# DEPRECATED CODE - can be cleaned up
def modify_tree(request):
try:
shutil.rmtree('/tmp/modify')
except:
pass
arg = request.GET.getlist('arg[]')
value = request.GET.getlist('value[]')
Tree_class=request.session['Tree']
for n in range(len(arg)):
Tree_class.Additional_info[arg[n].replace('_btn','')]['include']=value[n]
request.session['Tree']=Tree_class
os.mkdir('/tmp/modify')
phylogeny_input = Tree_class.get_phylogeny('/tmp/modify')
branches, ttype, total, legend, box, Additional_info, buttons=Tree_class.get_data()
shutil.rmtree('/tmp/modify')
if ttype == '1':
float(total)/4*100
else:
count = 1900 - 1400/math.sqrt(float(total))
return render(request, 'phylogenetic_trees/main.html', {'phylo': phylogeny_input, 'branch':branches, 'ttype': ttype, 'count':count, 'leg':legend, 'b':box, 'add':Additional_info, 'but':buttons, 'phylip':Tree_class.phylip, 'outtree':Tree_class.outtree})
# DEPRECATED CODE - can be cleaned up
def render_tree(request):
Tree_class=Treeclass()
phylogeny_input, branches, ttype, total, legend, box, Additional_info, buttons, proteins=Tree_class.Prepare_file(request)
if phylogeny_input == 'too big':
return render(request, 'phylogenetic_trees/too_big.html')
if phylogeny_input == 'More_prots':
return render(request, 'phylogenetic_trees/warning.html')
if ttype == '1':
float(total)/4*100
else:
count = 1900 - 1400/math.sqrt(float(total))
request.session['Tree']=Tree_class
return render(request, 'phylogenetic_trees/alignment.html', {'phylo': phylogeny_input, 'branch':branches, 'ttype': ttype, 'count':count, 'leg':legend, 'b':box, 'add':Additional_info, 'but':buttons, 'phylip':Tree_class.phylip, 'outtree':Tree_class.outtree })
# DEPRECATED CODE - can be cleaned up
def render_tree_v2(request):
Tree_class=Treeclass()
phylogeny_input, branches, ttype, total, legend, box, Additional_info, buttons, proteins=Tree_class.Prepare_file(request)
if phylogeny_input == 'too big':
return render(request, 'phylogenetic_trees/too_big.html')
if phylogeny_input == 'More_prots':
return render(request, 'phylogenetic_trees/warning.html')
protein_data = []
#FIXME remove
import random
for pc in proteins:
v = {}
p = pc.protein
v['name'] = p.entry_name
v['GPCR_class'] = p.family.parent.parent.parent.name
v['selectivity'] = ["Gq/G11 family"]
v['ligand_type'] = p.family.parent.parent.name
v['coverage'] = random.uniform(0, 1)
v['receptor_page'] = ''
print(v)
protein_data.append(v)
request.session['Tree']=Tree_class
context = {}
context['phylip'] = Tree_class.phylip.replace('\n', '')
context['protein_data'] = protein_data
return render(request, 'phylogenetic_trees/display.html', context)
# TODO: move this to seqsign
@csrf_exempt
def signature_selection(request):
# create full selection and import simple selection (if it exists)
simple_selection = request.session.get('selection', False)
selection_pos = Selection()
selection_pos.importer(deepcopy(simple_selection))
selection_pos.clear('targets')
selection_neg = Selection()
selection_neg.importer(deepcopy(simple_selection))
selection_neg.clear('targets')
if 'group1' in request.POST and 'group2' in request.POST:
up_names = request.POST['group1'].split('\r')
for up_name in up_names:
try:
selection_object = SelectionItem('protein', Protein.objects.get(entry_name=up_name.strip().lower()))
selection_pos.add('targets', 'protein', selection_object)
except:
continue
down_names = request.POST['group2'].split('\r')
for down_name in down_names:
try:
selection_object = SelectionItem('protein', Protein.objects.get(entry_name=down_name.strip().lower()))
selection_neg.add('targets', 'protein', selection_object)
except:
continue
# Set both the positive and negative target selections
request.session['targets_pos'] = selection_pos.exporter()
request.session['selection'] = selection_neg.exporter()
return JsonResponse({"response": "ok"})
else:
return JsonResponse({"response": "error"})
def render_tree_v3(request):
# Verify the user selection from session and if not present redirect
simple_selection = request.session.get('selection', False)
if simple_selection == False or not simple_selection.targets :
return redirect("/phylogenetic_trees/targetselection")
Tree_class=Treeclass()
phylogeny_input, branches, ttype, total, legend, box, Additional_info, buttons, proteins=Tree_class.Prepare_file(request)
if phylogeny_input == 'too big':
return render(request, 'phylogenetic_trees/too_big.html')
if phylogeny_input == 'More_prots':
return render(request, 'phylogenetic_trees/warning.html')
request.session['Tree'] = Tree_class
# output dictionary
data = {}
data['tree'] = Tree_class.phylip.replace('\n', '')
# context['protein_data'] = protein_data
protein_entries = []
for pc in proteins:
protein_entries.append(pc.protein.entry_name)
# Collect structure annotations
protein_annotations = {}
# Grab all annotations and all the ligand role when present in aggregates
# NOTE: we can probably remove the parent step and go directly via family in the query
annotations = Protein.objects.filter(entry_name__in=protein_entries) \
.values_list('entry_name', 'name', 'family__parent__name', 'family__parent__parent__name', 'family__parent__parent__parent__name', 'family__slug', 'name')
protein_slugs = set()
for an in annotations:
protein_annotations[an[0]] = list(an[1:])
# add slug to lists
slug = protein_annotations[an[0]][4]
protein_slugs.add(slug)
data['annotations'] = protein_annotations
# Grab G-protein coupling profile for all receptors covered by the selection
# TODO: make general cache(s) for these kinds of data
selectivitydata = {}
coupling = ProteinCouplings.objects.filter(protein__family__slug__in=protein_slugs, source="GuideToPharma").values_list('protein__family__slug', 'transduction').annotate(arr=ArrayAgg('g_protein__name'))
for pairing in coupling:
if pairing[0] not in selectivitydata:
selectivitydata[pairing[0]] = {}
selectivitydata[pairing[0]][pairing[1]] = pairing[2]
data['Gprot_coupling'] = selectivitydata
context = {}
context["data"] = json.dumps(data)
return render(request, 'phylogenetic_trees/phylo_tree.html', context)
|
protwis/protwis
|
phylogenetic_trees/views.py
|
Python
|
apache-2.0
| 21,508
|
[
"CRYSTAL"
] |
3fcda4d13494071054a82d9e6c0a08aac1bc580931228b47a04fc99f29798a4f
|
"""
Set of programs and tools to read the outputs from RH, 1.5D version
"""
import os
import warnings
import datetime
import numpy as np
import xarray as xr
import h5py
import netCDF4
from io import StringIO
from astropy import units
class Rh15dout:
"""
Class to load and manipulate output from RH 1.5D.
"""
def __init__(self, fdir='.', verbose=True, autoread=True):
self.files = []
self.params = {}
self.verbose = verbose
self.fdir = fdir
if autoread:
for outfile in ["output_aux", "output_indata"]:
OUTFILE = os.path.join(self.fdir, "%s.hdf5" % (outfile))
self.read_groups(OUTFILE)
RAYFILE = os.path.join(self.fdir, "output_ray.hdf5")
self.read_ray(RAYFILE)
def read_groups(self, infile):
''' Reads indata file, group by group. '''
if not os.path.isfile(infile): # See if netCDF file exists
infile = os.path.splitext(infile)[0] + '.ncdf'
if not os.path.isfile(infile):
return
GROUPS = netCDF4.Dataset(infile).groups.keys()
for g in GROUPS:
setattr(self, g, xr.open_dataset(infile, group=g, lock=None))
self.files.append(getattr(self, g))
if self.verbose:
print(('--- Read %s file.' % infile))
def read_ray(self, infile=None):
''' Reads ray file. '''
if infile is None:
infile = '%s/output_ray.hdf5' % self.fdir
if not os.path.isfile(infile): # See if netCDF file exists
infile = os.path.splitext(infile)[0] + '.ncdf'
if not os.path.isfile(infile):
return
self.ray = xr.open_dataset(infile, lock=None)
self.files.append(self.ray)
if self.verbose:
print(('--- Read %s file.' % infile))
def close(self):
''' Closes the open files '''
for f in self.files:
f.close()
def __del__(self):
self.close()
class HDF5Atmos:
"""
Class to load and manipulate RH 1.5D input atmosphere files in HDF5.
"""
def __init__(self, infile):
self.file = read_hdf5(self, infile)
self.closed = False
def close(self):
try:
self.file.close()
self.closed = True
except RuntimeError:
print('(WWW) HDF5Atmos: input file already closed.')
def read(self, infile):
if not self.closed:
self.close()
self.file = read_hdf5(self, infile)
def write_multi(self, outfile, xi, yi, nti=0, writeB=False,
write_dscale=False, zcut=0, depth_optimise=False):
'''
Writes MULTI atmosphere file from a column of the 3D model,
in RH 1.5D HDF5 format. Also writes the binary XDR file with magnetic
fields, if writeB is true.
'''
from .multi import watmos_multi
from .rh import write_B
writeB = writeB and self.params['has_B']
# if only total H available, will have to use rhpy (which is sometimes
# risky...)
if self.params['nhydr'] == 1:
try:
import rhpy
except ImportError:
raise ValueError("This function depents on rhpy, which is not"
" installed in this system.")
nh = rhpy.nh_lte(self.temperature[nti, xi, yi, zcut:].astype('Float64'),
self.electron_density[
nti, xi, yi, zcut:].astype('Float64'),
self.hydrogen_populations[
nti, 0, xi, yi, zcut:].astype('Float64'))
elif self.params['nhydr'] == 6:
nh = self.hydrogen_populations[nti, :, xi, yi, zcut:]
else:
raise ValueError("(EEE) write_multi: found %i hydrogen levels."
" For multi, need 6 or 1 " % self.params['nhydr'])
M_TO_CM3 = (units.m**-3).to('1 / cm3')
M_TO_KM = units.m.to('km')
temp = self.temperature[nti, xi, yi, zcut:].copy()
ne = self.electron_density[nti, xi, yi, zcut:].copy() / M_TO_CM3
if len(self.z.shape) > 2:
self.z = self.z[:, xi, yi]
z = self.z[nti, zcut:].copy() * M_TO_KM * 1.e5 # in cm
vz = self.velocity_z[nti, xi, yi, zcut:].copy() * M_TO_KM
nh = nh / M_TO_CM3
if writeB:
bx = self.B_x[nti, xi, yi, zcut:].copy()
by = self.B_y[nti, xi, yi, zcut:].copy()
bz = self.B_z[nti, xi, yi, zcut:].copy()
else:
bx = by = bz = None
if depth_optimise:
rho = self.hydrogen_populations[
nti, 0, xi, yi, zcut:] * 2.380491e-24 / M_TO_CM3
res = depth_optim(z, temp, ne, vz, rho, nh=nh, bx=bx, by=by, bz=bz)
z, temp, ne, vz, rho, nh = res[:6]
if writeB:
bx, by, bz = res[6:]
watmos_multi(outfile, temp, ne, z * 1e-5, vz=vz, nh=nh,
write_dscale=write_dscale,
id='%s txy-slice: (t,x,y) = (%i,%i,%i)' %
(self.params['description'], nti, xi, yi))
if writeB:
write_B('%s.B' % outfile, bx, by, bz)
print(('--- Wrote magnetic field to %s.B' % outfile))
def write_multi_3d(self, outfile, nti=0, sx=None, sy=None, sz=None,
big_endian=False):
''' Writes atmosphere in multi_3d format (the same as the
pre-Jorrit multi3d) '''
from . import multi
ul = units.m.to('cm')
uv = (units.m / units.s).to('km / s')
# slicing and unit conversion
if sx is None:
sx = [0, self.nx, 1]
if sy is None:
sy = [0, self.ny, 1]
if sz is None:
sz = [0, self.nz, 1]
if self.params['nhydr'] > 1:
nh = np.mean(self.hydrogen_populations[nti, :, sx[0]:sx[1]:sx[2],
sy[0]:sy[1]:sy[2],
sz[0]:sz[1]:sz[2]], axis=1) / (ul**3)
else:
nh = self.hydrogen_populations[nti, 0, sx[0]:sx[1]:sx[2],
sy[0]:sy[1]:sy[2],
sz[0]:sz[1]:sz[2]] / (ul**3)
rho = nh * 2.380491e-24 # nH to rho [g cm-3]
x = self.x[sx[0]:sx[1]:sx[2]] * ul
y = self.y[sy[0]:sy[1]:sy[2]] * ul
z = self.z[nti, sz[0]:sz[1]:sz[2]] * ul
ne = self.electron_density[nti, sx[0]:sx[1]:sx[2], sy[0]:sy[1]:sy[2],
sz[0]:sz[1]:sz[2]] / (ul**3)
temp = self.temperature[nti, sx[0]:sx[1]:sx[2], sy[0]:sy[1]:sy[2],
sz[0]:sz[1]:sz[2]]
vz = self.velocity_z[nti, sx[0]:sx[1]:sx[2], sy[0]:sy[1]:sy[2],
sz[0]:sz[1]:sz[2]] * uv
# write to file
multi.write_atmos3d(outfile, x, y, z, ne, temp, vz, rho=rho,
big_endian=big_endian)
class DataHolder:
def __init__(self):
pass
class AtomFile:
"""
Class to hold data from an RH or MULTI atom file.
Parameters
----------
filename: str
String with atom file name.
format: str
Can be 'RH' (default) or 'MULTI'.
"""
def __init__(self, filename, format='RH'):
self.read_atom(filename, format)
@staticmethod
def read_atom_levels(data, format='RH'):
"""
Reads levels part of atom file
"""
tmp = []
dtype=[('energy', 'f8'), ('g_factor', 'f8'),('label', '|U30'),
('stage', 'i4'), ('level_no','i4')]
if format.upper() == "RH":
extra_cols = 2
elif format.upper() == "MULTI":
extra_cols = 1
dtype = dtype[:-1]
else:
raise ValueError("Format must be RH or MULTI")
for line in data:
buf = line.split("'")
assert len(buf) == 3
tmp.append(tuple(buf[0].split() +
[buf[1].strip()] + buf[2].split()[:extra_cols]))
return np.array(tmp, dtype=dtype)
def read_atom(self, filename, format='RH'):
self.format = format.upper()
data = []
counter = 0
with open(filename, 'r') as atom_file:
for line in atom_file:
tmp = line.strip()
# clean up comments and blank lines
if not tmp:
continue
if tmp[0] in ['#', '*']:
continue
data.append(tmp)
self.element = data[counter]
counter += 1
if self.format == 'RH':
self.units = {'level_energies': units.Unit('J m / cm'),
'line_wavelength': units.Unit('nm'),
'line_stark': units.Unit('m'),
'continua_photoionisation': units.Unit('m2'),
'continua_wavelength': units.Unit('nm'),
'collision_cross_sections': units.Unit('m3')}
elif self.format == 'MULTI':
self.units = {'level_energies': units.Unit('J m / cm'),
'line_wavelength': units.Unit('Angstrom'),
'line_stark': units.Unit('cm'),
'continua_photoionisation': units.Unit('cm2'),
'continua_wavelength': units.Unit('Angstrom'),
'collision_cross_sections': units.Unit('cm3')}
self.abund = data[counter].split()[0]
self.atomic_weight = data[counter].split()[1]
counter += 1
else:
raise ValueError("Unsupported atom format " + format)
nlevel, nline, ncont, nfixed = np.array(data[counter].split(), dtype='i')
self.nlevel = nlevel
self.nline = nline
self.ncont = ncont
self.nfixed = nfixed
counter += 1
# read levels
self.levels = self.read_atom_levels(data[counter:counter + nlevel],
self.format)
counter += nlevel
# read lines
tmp = StringIO('\n'.join(data[counter:counter + nline]))
if self.format == "RH":
data_type = [('level_start', 'i4'), ('level_end', 'i4'),
('f_value', 'f8'), ('type', 'U10'), ('nlambda', 'i'),
('symmetric', 'U10'), ('qcore', 'f8'), ('qwing', 'f8'),
('vdApprox', 'U10'), ('vdWaals', 'f8', (4,)),
('radiative_broadening', 'f8'),
('stark_broadening', 'f8')]
elif self.format == "MULTI":
data_type = [('level_start', 'i4'), ('level_end', 'i4'),
('f_value', 'f8'), ('nlambda', 'i'),
('qwing', 'f8'), ('qcore', 'f8'), ('iw', 'i4'),
('radiative_broadening', 'f8'),
('vdWaals', 'f8', (1,)), ('stark_broadening', 'f8'),
('type', 'U10')]
self.lines = np.genfromtxt(tmp, dtype=data_type)
counter += nline
# read continua
self.continua = []
for _ in range(ncont):
line = data[counter].split()
counter += 1
result = {}
result['level_start'] = int(line[0])
result['level_end'] = int(line[1])
result['edge_cross_section'] = float(line[2])
result['nlambda'] = int(line[3])
if self.format == "RH":
result['wavelength_dependence'] = line[4].upper()
result['wave_min'] = float(line[5])
elif self.format == "MULTI":
if float(line[4]) > 0:
result['wavelength_dependence'] = "HYDROGENIC"
else:
result['wavelength_dependence'] = "EXPLICIT"
if result['wavelength_dependence'] == 'EXPLICIT':
tmp = '\n'.join(data[counter:counter + result['nlambda']])
counter += result['nlambda']
result['cross_section'] = np.genfromtxt(StringIO(tmp))
self.continua.append(result)
# read fixed transitions
self.fixed_transitions = []
for _ in range(nfixed):
line = data[counter].split()
counter += 1
result = {}
result['level_start'] = int(line[0])
result['level_end'] = int(line[1])
result['strength'] = float(line[2])
result['trad'] = float(line[3])
result['trad_option'] = line[4]
self.fixed_transitions.append(result)
# read collisions
### IN MULTI FORMAT COLLISIONS START WITH GENCOL
### Also in MULTI, must merge together lines that are written in
### free format (ie, not prefixed by OMEGA, CE, etc...)
self.collision_temperatures = []
self.collision_tables = []
# Keys for rates given as function of temperature
COLLISION_KEYS_TEMP = ['OHMEGA', 'OMEGA', 'CE', 'CI', 'CP', 'CH',
'CH0', 'CH+', 'CR', 'TEMP']
# Keys for rates written as single line
COLLISION_KEYS_LINE = ['AR85-CEA', 'AR85-CHP', 'AR85-CHH', 'SHULL82',
'BURGESS', 'SUMMERS']
COLLISION_KEYS_OTHER = ['AR85-CDI', 'BADNELL']
ALL_KEYS = (COLLISION_KEYS_TEMP + COLLISION_KEYS_LINE +
COLLISION_KEYS_OTHER)
SINGLE_KEYS = ['GENCOL', 'END']
if self.format == 'MULTI': # merge lines in free FORMAT
collision_data = []
while counter < len(data):
line = data[counter]
key = data[counter].split()[0].upper().strip()
if key in ALL_KEYS:
tmp = line
while True:
counter += 1
key = data[counter].split()[0].upper().strip()
if key in ALL_KEYS + SINGLE_KEYS:
collision_data.append(tmp)
break
else:
tmp += ' ' + data[counter]
elif key in SINGLE_KEYS:
collision_data.append(line)
counter += 1
else:
collision_data = data[counter:]
unread_lines = False
counter = 0
while counter < len(collision_data) - 1:
line = collision_data[counter].split()
key = line[0].upper()
result = {}
if key == 'END':
break
elif key == 'TEMP':
temp_tmp = np.array(line[2:]).astype('f')
self.collision_temperatures.append(temp_tmp)
# Collision rates given as function of temperature
elif key in COLLISION_KEYS_TEMP:
assert self.collision_temperatures, ('No temperature block'
' found before %s table' % (key))
ntemp = len(self.collision_temperatures[-1])
result = {'type': key, 'level_start': int(line[1]),
'level_end': int(line[2]),
'temp_index': len(self.collision_temperatures) - 1,
'data': np.array(line[3:3 + ntemp]).astype('d')} # this will not work in MULTI
assert len(result['data']) == len(temp_tmp), ('Inconsistent '
'number of points between temperature and collision table')
elif key in COLLISION_KEYS_LINE:
if key == "SUMMERS":
result = {'type': key, 'data': float(line[1])}
else:
result = {'type': key, 'level_start': int(line[1]),
'level_end': int(line[2]),
'data': np.array(line[2:]).astype('f')}
elif key in ["AR85-CDI", "BADNELL"]:
assert len(line) >= 4, '%s must have >3 elements' % key
result = {'type': key, 'level_start': int(line[1]),
'level_end': int(line[2])}
if key == "BADNELL":
rows = 2
else:
rows = int(line[3])
if self.format == 'MULTI': # All values in one line
tmp = np.array(line[4:]).astype('d')
assert tmp.shape[0] % rows == 0, ('Inconsistent number of'
' data points for %s' % key)
result['data'] = tmp.reshape((rows, tmp.shape[0] // rows))
counter += 1
else: # For RH, values written in matrix form
tmp = data[counter + 1: counter + 1 + rows]
result['data'] = np.array([l.split() for l in tmp]).astype('d')
counter += rows
elif key == "GENCOL":
pass
else:
unread_lines = True
if result:
self.collision_tables.append(result)
counter += 1
if unread_lines:
warnings.warn("Some lines in collision section were not understood",
RuntimeWarning)
def read_hdf5(inclass, infile):
"""
Reads HDF5/netCDF4 file into inclass, instance of any class.
Variables are read into class attributes, dimensions and attributes
are read into params dictionary.
"""
if not os.path.isfile(infile):
raise IOError('read_hdf5: File %s not found' % infile)
f = h5py.File(infile, mode='r')
if 'params' not in dir(inclass):
inclass.params = {}
# add attributes
attrs = [a for a in f.attrs]
for att in f.attrs:
try:
inclass.params[att] = f.attrs[att]
except OSError: # catch errors where h5py cannot read UTF-8 strings
pass
# add variables and groups
for element in f:
name = element.replace(' ', '_') # sanitise string for spaces
if type(f[element]) == h5py._hl.dataset.Dataset:
setattr(inclass, name, f[element])
# special case for netCDF dimensions, add them to param list
if 'NAME' in f[element].attrs:
if f[element].attrs['NAME'][:20] == b'This is a netCDF dim':
inclass.params[element] = f[element].shape[0]
if type(f[element]) == h5py._hl.group.Group:
setattr(inclass, name, DataHolder())
cur_class = getattr(inclass, name)
cur_class.params = {}
for variable in f[element]: # add group variables
vname = variable.replace(' ', '_')
setattr(cur_class, vname, f[element][variable])
for att in f[element].attrs: # add group attributes
cur_class.params[att] = f[element].attrs[att]
return f
def make_xarray_atmos(outfile, T, vz, z, nH=None, x=None, y=None, Bz=None, By=None,
Bx=None, rho=None, ne=None, vx=None, vy=None, vturb=None,
desc=None, snap=None, boundary=None, append=False):
"""
Creates HDF5 input file for RH 1.5D using xarray.
Parameters
----------
outfile : string
Name of destination. If file exists it will be wiped.
T : n-D array
Temperature in K. Its shape will determine the output
dimensions. Shape is generally (nt, nx, ny, nz), but any
dimensions except nz can be omitted. Therefore the array can
be 1D, 2D, or 3D, 4D but ultimately will always be saved as 4D.
vz : n-D array
Line of sight velocity in m/s. Same shape as T.
z : n-D array
Height in m. Can have same shape as T (different height scale
for each column) or be only 1D (same height for all columns).
nH : n-D array, optional
Hydrogen populations in m^-3. Shape is (nt, nhydr, nx, ny, nz),
where nt, nx, ny can be omitted but must be consistent with
the shape of T. nhydr can be 1 (total number of protons) or
more (level populations). If nH is not given, rho must be given!
ne : n-D array, optional
Electron density in m^-3. Same shape as T.
rho : n-D array, optional
Density in kg m^-3. Same shape as T. Only used if nH is not given.
vx : n-D array, optional
x velocity in m/s. Same shape as T. Not in use by RH 1.5D.
vy : n-D array, optional
y velocity in m/s. Same shape as T. Not in use by RH 1.5D.
vturb : n-D array, optional
Turbulent velocity (Microturbulence) in km/s. Not usually needed
for MHD models, and should only be used when a depth dependent
microturbulence is needed (constant microturbulence can be added
in RH).
Bx : n-D array, optional
Magnetic field in x dimension, in Tesla. Same shape as T.
By : n-D array, optional
Magnetic field in y dimension, in Tesla. Same shape as T.
Bz : n-D array, optional
Magnetic field in z dimension, in Tesla. Same shape as T.
x : 1-D array, optional
Grid distances in m. Same shape as first index of T.
y : 1-D array, optional
Grid distances in m. Same shape as second index of T.
x : 1-D array, optional
Grid distances in m. Same shape as first index of T.
snap : array-like, optional
Snapshot number(s).
desc : string, optional
Description of file
boundary : Tuple, optional
Tuple with [bottom, top] boundary conditions. Options are:
0: Zero, 1: Thermalised, 2: Reflective.
append : boolean, optional
If True, will append to existing file (if any).
"""
data = {'temperature': [T, 'K'],
'velocity_z': [vz, 'm / s'],
'velocity_y': [vy, 'm / s'],
'velocity_x': [vx, 'm / s'],
'electron_density': [ne, '1 / m3'],
'hydrogen_populations': [nH, '1 / m3'],
'density': [rho, 'kg / m3'],
'B_x': [Bx, 'T'],
'B_y': [By, 'T'],
'B_z': [Bz, 'T'],
'velocity_turbulent': [vturb, 'm / s'],
'x': [x, 'm'],
'y': [y, 'm'],
'z': [z, 'm']}
VARS4D = ['temperature', 'B_x', 'B_y', 'B_z', 'density', 'velocity_x',
'velocity_y', 'velocity_z', 'velocity_turbulent', 'density',
'electron_density']
# Remove variables not given
data = {key: data[key] for key in data if data[key][0] is not None}
if (nH is None) and (rho is None):
raise ValueError("Missing nH or rho. Need at least one of them")
if (append and not os.path.isfile(outfile)):
append = False
idx = [None] * (4 - len(T.shape)) + [Ellipsis] # empty axes for 1D/2D/3D
for var in data:
if var not in ['x', 'y']: # these are always 1D
data[var][0] = data[var][0][idx]
if len(data['temperature'][0].shape) != 4:
raise ValueError('Invalid shape for T')
nt, nx, ny, nz = data['temperature'][0].shape
if boundary is None:
boundary = [1, 0]
if snap is None:
data['snapshot_number'] = [np.arange(nt, dtype='i4'), '']
else:
data['snapshot_number'] = [np.array([snap], dtype='i4'), '']
if not append:
variables = {}
coordinates = {}
for v in data:
if v in VARS4D:
variables[v] = (('snapshot_number', 'x', 'y', 'depth'),
data[v][0], {'units': data[v][1]})
elif v == 'hydrogen_populations':
variables[v] = (('snapshot_number', 'nhydr', 'x', 'y', 'depth'),
data[v][0], {'units': data[v][1]})
elif v == 'z':
dims = ('snapshot_number', 'depth')
if len(data[v][0].shape) == 1: # extra dim for nt dependency
data[v][0] = data[v][0][None, :]
elif len(data[v][0].shape) == 4:
dims = ('snapshot_number', 'x', 'y', 'depth')
coordinates[v] = (dims, data[v][0], {'units': data[v][1]})
elif v in ['x', 'y', 'snapshot_number']:
coordinates[v] = ((v), data[v][0], {'units': data[v][1]})
attrs = {"comment": ("Created with make_xarray_atmos "
"on %s" % datetime.datetime.now()),
"boundary_top": boundary[1], "boundary_bottom": boundary[0],
"has_B": int(Bz is not None), "description": str(desc),
"nx": nx, "ny": ny, "nz": nz, "nt": nt}
data = xr.Dataset(variables, coordinates, attrs)
data.to_netcdf(outfile, mode='w', format='NETCDF4',
unlimited_dims=('snapshot_number'))
else: # use h5py to append existing file
rootgrp = h5py.File(outfile, mode='a')
nti = int(rootgrp.attrs['nt'])
#rootgrp.attrs['nt'] = nti + nt # add appended number of snapshots
for var in data:
if var in VARS4D + ['hydrogen_populations', 'z', 'snapshot_number']:
rootgrp[var].resize(nti + nt, axis=0)
rootgrp[var][nti:nti + nt] = data[var][0][:]
rootgrp.close()
def depth_optim(height, temp, ne, vz, rho, nh=None, bx=None, by=None, bz=None,
tmax=5e4):
"""
Performs depth optimisation of one single column (as per multi_3d).
IN:
height [cm]
temp [K]
ne [cm-3]
vz [any]
rho [g cm-3]
nh [any] (optional)
bx,by,bz [any] (optional)
tmax [K] maximum temperature of the first point
"""
from scipy.integrate import cumtrapz
import scipy.interpolate as interp
import astropy.constants as const
ndep = len(height)
# calculate optical depth from H-bf only
taumax = 100
grph = 2.26e-24 # grams per hydrogen atom
crhmbf = 2.9256e-17
ee = constants.e.si.value * 1e7
bk = constants.k_B.cgs.value
xhbf = 1.03526e-16 * ne * crhmbf / temp**1.5 * \
np.exp(0.754 * ee / bk / temp) * rho / grph
tau = np.concatenate(([0.], cumtrapz(xhbf, -height)))
idx = (tau < taumax) & (temp < tmax)
# find maximum variance of T, rho, and tau for each depth
tt = temp[idx]
rr = rho[idx]
ta = tau[idx]
tdiv = np.abs(np.log10(tt[1:]) - np.log10(tt[:-1])) / np.log10(1.1)
rdiv = np.abs(np.log10(rr[1:]) - np.log10(rr[:-1])) / np.log10(1.1)
taudiv = np.abs(np.log10(ta[1:]) - np.log10(ta[:-1])) / 0.1
taudiv[0] = 0.
aind = np.concatenate(
([0.], np.cumsum(np.max(np.array([tdiv, rdiv, taudiv]), axis=0))))
aind *= (ndep - 1) / aind[-1]
# interpolate new height so it is constant in aind2
nheight = interp.splev(np.arange(ndep), interp.splrep(
aind, height[idx], k=3, s=0), der=0)
# interpolate quantities for new depth scale
ntemp = np.exp(interp.splev(nheight, interp.splrep(height[::-1], np.log(temp[::-1]),
k=3, s=0), der=0))
nne = np.exp(interp.splev(nheight, interp.splrep(height[::-1], np.log(ne[::-1]),
k=3, s=0), der=0))
nrho = np.exp(interp.splev(nheight, interp.splrep(height[::-1], np.log(rho[::-1]),
k=3, s=0), der=0))
nvz = interp.splev(nheight, interp.splrep(height[::-1], vz[::-1],
k=3, s=0), der=0)
result = [nheight, ntemp, nne, nvz, nrho]
if nh is not None:
for k in range(nh.shape[0]):
nh[k] = np.exp(interp.splev(nheight,
interp.splrep(height[::-1],
np.log(nh[k, ::-1]), k=3,
s=0), der=0))
result += [nh]
if bx is not None:
nbx = interp.splev(nheight, interp.splrep(
height[::-1], bx[::-1], k=3, s=0), der=0)
nby = interp.splev(nheight, interp.splrep(
height[::-1], by[::-1], k=3, s=0), der=0)
nbz = interp.splev(nheight, interp.splrep(
height[::-1], bz[::-1], k=3, s=0), der=0)
result += [nbx, nby, nbz]
return result
def make_wave_file(outfile, start=None, end=None, step=None, new_wave=None,
ewave=None, air=True):
"""
Writes RH wave file (in xdr format). All wavelengths should be in nm.
Parameters
----------
start: number
Starting wavelength.
end: number
Ending wavelength (non-inclusive)
step: number
Wavelength separation
new_wave: 1D array
Alternatively to start/end, one can specify an array of
wavelengths here.
outfile: string
Name of file to write.
ewave: 1-D array, optional
Array of existing wavelengths. Program will make discard points
to make sure no step is enforced using these points too.
air: boolean, optional
If true, will at the end convert the wavelengths into vacuum
wavelengths.
"""
import xdrlib
from specutils.utils.wcs_utils import air_to_vac
if new_wave is None:
new_wave = np.arange(start, end, step)
if None in [start, end, step]:
raise ValueError('Must specify either new_wave, or start, end, '
'step. Stopping.')
if step is None:
step = np.median(np.diff(new_wave))
if ewave is not None: # ensure step is kept at most times
keepers = []
for w in new_wave:
if np.min(np.abs(w - ewave)) > step * 0.375:
keepers.append(w)
new_wave = np.array(keepers)
if air:
# RH uses Edlen (1966) to convert from vacuum to air
new_wave = air_to_vac(new_wave * units.nm, method='edlen1966',
scheme='iteration').value
# write file
p = xdrlib.Packer()
nw = len(new_wave)
p.pack_int(nw)
p.pack_farray(nw, new_wave.astype('d'), p.pack_double)
f = open(outfile, 'wb')
f.write(p.get_buffer())
f.close()
print(("Wrote %i wavelengths to file." % nw))
def read_wave_file(infile):
"""
Reads RH wavelength file.
Parameters
----------
infile : str
Name of wavelength file to read.
Returns
-------
wave : array
Wavelength from file.
"""
import xdrlib
import io
from .rh import read_xdr_var
f = io.open(infile, 'rb')
buf = xdrlib.Unpacker(f.read())
f.close()
nw = read_xdr_var(buf, 'i')
return read_xdr_var(buf, ('d', (nw,)))
def clean_var(data, only_positive=True):
"""
Cleans a 2D or 3D variable filled with NaNs and other irregularities.
"""
from ..utils import utilsfast
data = np.ma.masked_invalid(data, copy=False)
if only_positive:
data = np.ma.masked_less(data, 0., copy=False)
tmp = np.abs(data)
thres = tmp.mean() + tmp.std() * 4 # points more than 4 std away
data = np.ma.masked_where(tmp > thres, data, copy=False)
if data.ndim == 2:
data = data[..., np.newaxis]
for k in range(data.shape[-1]):
tmp = data[..., k].astype("d")
tmp[data[..., k].mask] = np.nan
data[..., k] = utilsfast.replace_nans(tmp, 15, 0.1, 3, "localmean")
return np.squeeze(data)
|
ITA-Solar/helita
|
helita/sim/rh15d.py
|
Python
|
bsd-3-clause
| 31,586
|
[
"NetCDF"
] |
0b5ece4ef8a2416a95edecae16009a2bca35eac3620b2a88303743a0c912effa
|
"""Bayesian Gaussian Mixture Models and
Dirichlet Process Gaussian Mixture Models"""
from __future__ import print_function
# Author: Alexandre Passos (alexandre.tp@gmail.com)
# Bertrand Thirion <bertrand.thirion@inria.fr>
#
# Based on mixture.py by:
# Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
import numpy as np
from scipy.special import digamma as _digamma, gammaln as _gammaln
from scipy import linalg
from scipy.spatial.distance import cdist
from ..externals.six.moves import xrange
from ..utils import check_random_state, deprecated
from ..utils.extmath import norm, logsumexp, pinvh
from .. import cluster
from .gmm import GMM
def sqnorm(v):
return norm(v) ** 2
def digamma(x):
return _digamma(x + np.finfo(np.float32).eps)
def gammaln(x):
return _gammaln(x + np.finfo(np.float32).eps)
def log_normalize(v, axis=0):
"""Normalized probabilities from unnormalized log-probabilites"""
v = np.rollaxis(v, axis)
v = v.copy()
v -= v.max(axis=0)
out = logsumexp(v)
v = np.exp(v - out)
v += np.finfo(np.float32).eps
v /= np.sum(v, axis=0)
return np.swapaxes(v, 0, axis)
def wishart_log_det(a, b, detB, n_features):
"""Expected value of the log of the determinant of a Wishart
The expected value of the logarithm of the determinant of a
wishart-distributed random variable with the specified parameters."""
l = np.sum(digamma(0.5 * (a - np.arange(-1, n_features - 1))))
l += n_features * np.log(2)
return l + detB
def wishart_logz(v, s, dets, n_features):
"The logarithm of the normalization constant for the wishart distribution"
z = 0.
z += 0.5 * v * n_features * np.log(2)
z += (0.25 * (n_features * (n_features - 1)) * np.log(np.pi))
z += 0.5 * v * np.log(dets)
z += np.sum(gammaln(0.5 * (v - np.arange(n_features) + 1)))
return z
def _bound_wishart(a, B, detB):
"""Returns a function of the dof, scale matrix and its determinant
used as an upper bound in variational approcimation of the evidence"""
n_features = B.shape[0]
logprior = wishart_logz(a, B, detB, n_features)
logprior -= wishart_logz(n_features,
np.identity(n_features),
1, n_features)
logprior += 0.5 * (a - 1) * wishart_log_det(a, B, detB, n_features)
logprior += 0.5 * a * np.trace(B)
return logprior
##############################################################################
# Variational bound on the log likelihood of each class
##############################################################################
def _sym_quad_form(x, mu, A):
"""helper function to calculate symmetric quadratic form x.T * A * x"""
q = (cdist(x, mu[np.newaxis], "mahalanobis", VI=A) ** 2).reshape(-1)
return q
def _bound_state_log_lik(X, initial_bound, precs, means, covariance_type):
"""Update the bound with likelihood terms, for standard covariance types"""
n_components, n_features = means.shape
n_samples = X.shape[0]
bound = np.empty((n_samples, n_components))
bound[:] = initial_bound
if covariance_type in ['diag', 'spherical']:
for k in range(n_components):
d = X - means[k]
bound[:, k] -= 0.5 * np.sum(d * d * precs[k], axis=1)
elif covariance_type == 'tied':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs)
elif covariance_type == 'full':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs[k])
return bound
class DPGMM(GMM):
"""Variational Inference for the Infinite Gaussian Mixture Model.
DPGMM stands for Dirichlet Process Gaussian Mixture Model, and it
is an infinite mixture model with the Dirichlet Process as a prior
distribution on the number of clusters. In practice the
approximate inference algorithm uses a truncated distribution with
a fixed maximum number of components, but almost always the number
of components actually used depends on the data.
Stick-breaking Representation of a Gaussian mixture model
probability distribution. This class allows for easy and efficient
inference of an approximate posterior distribution over the
parameters of a Gaussian mixture model with a variable number of
components (smaller than the truncation parameter n_components).
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Parameters
----------
n_components: int, optional
Number of mixture components. Defaults to 1.
covariance_type: string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
alpha: float, optional
Real number representing the concentration parameter of
the dirichlet process. Intuitively, the Dirichlet Process
is as likely to start a new cluster for a point as it is
to add that point to a cluster with alpha elements. A
higher alpha means more clusters, as the expected number
of clusters is ``alpha*log(N)``. Defaults to 1.
thresh : float, optional
Convergence threshold.
n_iter : int, optional
Maximum number of iterations to perform before convergence.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_components : int
Number of mixture components.
`weights_` : array, shape (`n_components`,)
Mixing weights for each mixture component.
`means_` : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
`precs_` : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
`converged_` : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
VBGMM : Finite Gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, thresh=1e-2, verbose=False,
min_covar=None, n_iter=10, params='wmc', init_params='wmc'):
self.alpha = alpha
self.verbose = verbose
super(DPGMM, self).__init__(n_components, covariance_type,
random_state=random_state,
thresh=thresh, min_covar=min_covar,
n_iter=n_iter, params=params,
init_params=init_params)
def _get_precisions(self):
"""Return precisions as a full matrix."""
if self.covariance_type == 'full':
return self.precs_
elif self.covariance_type in ['diag', 'spherical']:
return [np.diag(cov) for cov in self.precs_]
elif self.covariance_type == 'tied':
return [self.precs_] * self.n_components
def _get_covars(self):
return [pinvh(c) for c in self._get_precisions()]
def _set_covars(self, covars):
raise NotImplementedError("""The variational algorithm does
not support setting the covariance parameters.""")
@deprecated("DPGMM.eval was renamed to DPGMM.score_samples in 0.14 and "
"will be removed in 0.16.")
def eval(self, X):
return self.score_samples(X)
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
z = np.zeros((X.shape[0], self.n_components))
sd = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dgamma1 = digamma(self.gamma_.T[1]) - sd
dgamma2 = np.zeros(self.n_components)
dgamma2[0] = digamma(self.gamma_[0, 2]) - digamma(self.gamma_[0, 1] +
self.gamma_[0, 2])
for j in range(1, self.n_components):
dgamma2[j] = dgamma2[j - 1] + digamma(self.gamma_[j - 1, 2])
dgamma2[j] -= sd[j - 1]
dgamma = dgamma1 + dgamma2
# Free memory and developers cognitive load:
del dgamma1, dgamma2, sd
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dgamma
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
"""Update the concentration parameters for each cluster"""
sz = np.sum(z, axis=0)
self.gamma_.T[1] = 1. + sz
self.gamma_.T[2].fill(0)
for i in range(self.n_components - 2, -1, -1):
self.gamma_[i, 2] = self.gamma_[i + 1, 2] + sz[i]
self.gamma_.T[2] += self.alpha
def _update_means(self, X, z):
"""Update the variational distributions for the means"""
n_features = X.shape[1]
for k in range(self.n_components):
if self.covariance_type in ['spherical', 'diag']:
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num *= self.precs_[k]
den = 1. + self.precs_[k] * np.sum(z.T[k])
self.means_[k] = num / den
elif self.covariance_type in ['tied', 'full']:
if self.covariance_type == 'tied':
cov = self.precs_
else:
cov = self.precs_[k]
den = np.identity(n_features) + cov * np.sum(z.T[k])
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num = np.dot(cov, num)
self.means_[k] = linalg.lstsq(den, num)[0]
def _update_precisions(self, X, z):
"""Update the variational distributions for the precisions"""
n_features = X.shape[1]
if self.covariance_type == 'spherical':
self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
for k in range(self.n_components):
# could be more memory efficient ?
sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
self.scale_[k] = 1.
self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
self.bound_prec_[k] = (
0.5 * n_features * (
digamma(self.dof_[k]) - np.log(self.scale_[k])))
self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T
elif self.covariance_type == 'diag':
for k in range(self.n_components):
self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
sq_diff = (X - self.means_[k]) ** 2 # see comment above
self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
z.T[k], (sq_diff + 1))
self.precs_[k] = self.dof_[k] / self.scale_[k]
self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
- np.log(self.scale_[k]))
self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])
elif self.covariance_type == 'tied':
self.dof_ = 2 + X.shape[0] + n_features
self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
for k in range(self.n_components):
diff = X - self.means_[k]
self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_ = pinvh(self.scale_)
self.precs_ = self.dof_ * self.scale_
self.det_scale_ = linalg.det(self.scale_)
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
sum_resp = np.sum(z.T[k])
self.dof_[k] = 2 + sum_resp + n_features
self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
diff = X - self.means_[k]
self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_[k] = pinvh(self.scale_[k])
self.precs_[k] = self.dof_[k] * self.scale_[k]
self.det_scale_[k] = linalg.det(self.scale_[k])
self.bound_prec_[k] = 0.5 * wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
self.scale_[k])
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_.T[1])
print("covariance_type:", self.covariance_type)
def _do_mstep(self, X, z, params):
"""Maximize the variational lower bound
Update each of the parameters to maximize the lower bound."""
self._monitor(X, z, "z")
self._update_concentration(z)
self._monitor(X, z, "gamma")
if 'm' in params:
self._update_means(X, z)
self._monitor(X, z, "mu")
if 'c' in params:
self._update_precisions(X, z)
self._monitor(X, z, "a and b", end=True)
def _initialize_gamma(self):
"Initializes the concentration parameters"
self.gamma_ = self.alpha * np.ones((self.n_components, 3))
def _bound_concentration(self):
"""The variational lower bound for the concentration parameter."""
logprior = gammaln(self.alpha) * self.n_components
logprior += np.sum((self.alpha - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior += np.sum(- gammaln(self.gamma_.T[1] + self.gamma_.T[2]))
logprior += np.sum(gammaln(self.gamma_.T[1]) +
gammaln(self.gamma_.T[2]))
logprior -= np.sum((self.gamma_.T[1] - 1) * (
digamma(self.gamma_.T[1]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior -= np.sum((self.gamma_.T[2] - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
return logprior
def _bound_means(self):
"The variational lower bound for the mean parameters"
logprior = 0.
logprior -= 0.5 * sqnorm(self.means_)
logprior -= 0.5 * self.means_.shape[1] * self.n_components
return logprior
def _bound_precisions(self):
"""Returns the bound term related to precisions"""
logprior = 0.
if self.covariance_type == 'spherical':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_
- self.precs_[:, 0])
elif self.covariance_type == 'diag':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_ - self.precs_)
elif self.covariance_type == 'tied':
logprior += _bound_wishart(self.dof_, self.scale_, self.det_scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
logprior += _bound_wishart(self.dof_[k],
self.scale_[k],
self.det_scale_[k])
return logprior
def _bound_proportions(self, z):
"""Returns the bound term related to proportions"""
dg12 = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dg1 = digamma(self.gamma_.T[1]) - dg12
dg2 = digamma(self.gamma_.T[2]) - dg12
cz = np.cumsum(z[:, ::-1], axis=-1)[:, -2::-1]
logprior = np.sum(cz * dg2[:-1]) + np.sum(z * dg1)
del cz # Save memory
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _logprior(self, z):
logprior = self._bound_concentration()
logprior += self._bound_means()
logprior += self._bound_precisions()
logprior += self._bound_proportions(z)
return logprior
def lower_bound(self, X, z):
"""returns a lower bound on model evidence based on X and membership"""
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
c = np.sum(z * _bound_state_log_lik(X, self._initial_bound +
self.bound_prec_, self.precs_,
self.means_, self.covariance_type))
return c + self._logprior(z)
def _set_weights(self):
for i in xrange(self.n_components):
self.weights_[i] = self.gamma_[i, 1] / (self.gamma_[i, 1]
+ self.gamma_[i, 2])
self.weights_ /= np.sum(self.weights_)
def fit(self, X):
"""Estimate model parameters with the variational
algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html#
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when when creating
the object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
self.random_state = check_random_state(self.random_state)
## initialization step
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
n_features = X.shape[1]
z = np.ones((X.shape[0], self.n_components))
z /= self.n_components
self._initial_bound = - 0.5 * n_features * np.log(2 * np.pi)
self._initial_bound -= np.log(2 * np.pi * np.e)
if (self.init_params != '') or not hasattr(self, 'gamma_'):
self._initialize_gamma()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_[::-1]
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components, self.n_components)
if 'c' in self.init_params or not hasattr(self, 'precs_'):
if self.covariance_type == 'spherical':
self.dof_ = np.ones(self.n_components)
self.scale_ = np.ones(self.n_components)
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * n_features * (
digamma(self.dof_) - np.log(self.scale_))
elif self.covariance_type == 'diag':
self.dof_ = 1 + 0.5 * n_features
self.dof_ *= np.ones((self.n_components, n_features))
self.scale_ = np.ones((self.n_components, n_features))
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * (np.sum(digamma(self.dof_) -
np.log(self.scale_), 1))
self.bound_prec_ -= 0.5 * np.sum(self.precs_, 1)
elif self.covariance_type == 'tied':
self.dof_ = 1.
self.scale_ = np.identity(n_features)
self.precs_ = np.identity(n_features)
self.det_scale_ = 1.
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
self.dof_ = (1 + self.n_components + X.shape[0])
self.dof_ *= np.ones(self.n_components)
self.scale_ = [2 * np.identity(n_features)
for _ in range(self.n_components)]
self.precs_ = [np.identity(n_features)
for _ in range(self.n_components)]
self.det_scale_ = np.ones(self.n_components)
self.bound_prec_ = np.zeros(self.n_components)
for k in range(self.n_components):
self.bound_prec_[k] = wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= (self.dof_[k] *
np.trace(self.scale_[k]))
self.bound_prec_ *= 0.5
logprob = []
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
# Expectation step
curr_logprob, z = self.score_samples(X)
logprob.append(curr_logprob.sum() + self._logprior(z))
# Check for convergence.
if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, z, self.params)
self._set_weights()
return self
class VBGMM(DPGMM):
"""Variational Inference for the Gaussian Mixture Model
Variational inference for a Gaussian mixture model probability
distribution. This class allows for easy and efficient inference
of an approximate posterior distribution over the parameters of a
Gaussian mixture model with a fixed number of components.
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Parameters
----------
n_components: int, optional
Number of mixture components. Defaults to 1.
covariance_type: string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
alpha: float, optional
Real number representing the concentration parameter of
the dirichlet distribution. Intuitively, the higher the
value of alpha the more likely the variational mixture of
Gaussians model will use all components it can. Defaults
to 1.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussians.
n_components : int (read-only)
Number of mixture components.
`weights_` : array, shape (`n_components`,)
Mixing weights for each mixture component.
`means_` : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
`precs_` : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
`converged_` : bool
True when convergence was reached in fit(), False
otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
DPGMM : Ininite Gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, thresh=1e-2, verbose=False,
min_covar=None, n_iter=10, params='wmc', init_params='wmc'):
super(VBGMM, self).__init__(
n_components, covariance_type, random_state=random_state,
thresh=thresh, verbose=verbose, min_covar=min_covar,
n_iter=n_iter, params=params, init_params=init_params)
self.alpha = float(alpha) / n_components
@deprecated("VBGMM.eval was renamed to VBGMM.score_samples in 0.14 and"
" will be removed in 0.16.")
def eval(self, X):
return self.score_samples(X)
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
z = np.zeros((X.shape[0], self.n_components))
p = np.zeros(self.n_components)
bound = np.zeros(X.shape[0])
dg = digamma(self.gamma_) - digamma(np.sum(self.gamma_))
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dg
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
for i in range(self.n_components):
self.gamma_[i] = self.alpha + np.sum(z.T[i])
def _initialize_gamma(self):
self.gamma_ = self.alpha * np.ones(self.n_components)
def _bound_proportions(self, z):
logprior = 0.
dg = digamma(self.gamma_)
dg -= digamma(np.sum(self.gamma_))
logprior += np.sum(dg.reshape((-1, 1)) * z.T)
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _bound_concentration(self):
logprior = 0.
logprior = gammaln(np.sum(self.gamma_)) - gammaln(self.n_components
* self.alpha)
logprior -= np.sum(gammaln(self.gamma_) - gammaln(self.alpha))
sg = digamma(np.sum(self.gamma_))
logprior += np.sum((self.gamma_ - self.alpha)
* (digamma(self.gamma_) - sg))
return logprior
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_)
print("covariance_type:", self.covariance_type)
def _set_weights(self):
self.weights_[:] = self.gamma_
self.weights_ /= np.sum(self.weights_)
|
treycausey/scikit-learn
|
sklearn/mixture/dpgmm.py
|
Python
|
bsd-3-clause
| 31,090
|
[
"Gaussian"
] |
38adeba6945cf052c7cb340abc8affd5d1876c8f8eb1aea12b197cfa6cef534a
|
import bs4
from typing import List, Dict
from collections import namedtuple
from difflib import SequenceMatcher
from .logger import Logger
from .gallery import GenericGallery
from .request_managers import ex_request_manager, chaika_request_manager
ChaikaResult = namedtuple("ChaikaResult", "url title")
class Search(Logger):
BASE_EX_URL = "http://exhentai.org/?inline_set=dm_t&f_doujinshi=1&f_manga=1&f_artistcg=1&f_gamecg=1&f_western=1&f_non-h=1&f_imageset=1&f_cosplay=1&f_asianporn=1&f_misc=0&f_sname=on&adv&f_search=%s&advsearch=1&f_srdd=2&f_apply=Apply+Filter&f_shash=%s&page=%s&fs_smiliar=1&fs_covers=%s"
BASE_CHAIKA_URL = "http://panda.chaika.moe/?title={TITLE}&tags=&posted_from=&posted_to=&filesize_from=&filesize_to=&source_type=&sort=posted&asc_desc=desc&apply=Apply"
@classmethod
def search_ex_by_gallery(cls, gallery: GenericGallery):
cls = cls()
cls.name = gallery.title # For logging
sha_hash = gallery.generate_image_hash(index=0)
hash_search = next(cls.ex_search(sha_hash=sha_hash))
cls.logger.info("EX cover hash search results: %s" % hash_search)
if len(hash_search) == 1:
return hash_search[0]
all_pages_hash = next(cls.ex_search(sha_hash=sha_hash, cover_only=0))
cls.logger.info("EX all pages hash results: %s" % all_pages_hash)
if len(all_pages_hash) == 1:
return all_pages_hash[0]
combined = hash_search + all_pages_hash
if len(combined) == 0:
try:
sha_hash = gallery.generate_image_hash(index=1)
second_hash_search = next(cls.ex_search(sha_hash=sha_hash))
if len(second_hash_search) == 1:
return second_hash_search[0]
else:
hash_search += second_hash_search
combined += hash_search
except IndexError:
pass
if len(combined) == 0:
cls.logger.info("No ex search results for gallery.")
return
intersection = [r for r in hash_search if r in all_pages_hash]
if intersection:
cls.logger.info("Returning ex intersection result.")
return intersection[0]
else:
cls.logger.info("No ex intersection results, picking first available hash.")
return combined[0]
@classmethod
def ex_search(cls, **kwargs):
cls = cls()
recursive = kwargs.get("recursive", False)
page_num = kwargs.get("page_num", 0)
num_pages = kwargs.get("num_pages")
sha_hash = kwargs.get("sha_hash", "")
cover_only = kwargs.get("cover_only", 1)
title = kwargs.get("title", "")
url = kwargs.get("url") or cls.BASE_EX_URL % (title, sha_hash, page_num, cover_only)
response = ex_request_manager.get(url)
html_results = bs4.BeautifulSoup(response, "html.parser")
results = html_results.findAll("div", {"class": "it5"})
result_urls = [r.a.attrs["href"] for r in results]
if num_pages is None:
pages = html_results.find("table", "ptt")
if pages is not None:
try:
num_pages = int(pages.findAll("a")[-2].contents[0]) - 1
except IndexError:
pass
kwargs["num_pages"] = num_pages
yield result_urls
if not recursive or page_num >= num_pages:
return
else:
if page_num == 0:
kwargs["page_num"] = 1
else:
kwargs["page_num"] += 1
yield from cls.ex_search(**kwargs)
@classmethod
def search_chaika_by_gallery(cls, gallery: GenericGallery) -> str:
cls = cls()
chaika_url = cls.search_chaika(gallery)
if chaika_url:
cls.logger.info("{GALLERY} - Chaika url of {URL} found".format(GALLERY=gallery, URL=chaika_url))
gallery_page = bs4.BeautifulSoup(chaika_request_manager.get(chaika_url), "html.parser")
ex_url = gallery_page.find("a", {"rel": "nofollow"})
if ex_url:
return ex_url.get_text()
@classmethod
def search_chaika(cls, gallery: GenericGallery) -> str:
cls = cls()
cls.name = gallery.name
title_results = cls.convert_chaika_results(
chaika_request_manager.get(cls.BASE_CHAIKA_URL.format(TITLE=gallery.title)))
cls.logger.info("Chaika results: {RESULTS}".format(RESULTS=title_results))
for result in title_results:
ratio = SequenceMatcher(None, gallery.name, result.title).ratio()
if ratio >= .6:
return result.url
@classmethod
def convert_chaika_results(cls, results: str) -> List[ChaikaResult]:
base_gallery_url = "http://panda.chaika.moe{PATH}"
results_html = bs4.BeautifulSoup(results, "html.parser").find("table", {"class": "resulttable"})
galleries = bs4.BeautifulSoup(str(results_html), "html.parser").find_all("tr")
return [ChaikaResult(base_gallery_url.format(PATH=gallery.a.attrs["href"]),
gallery.a.get_text()) for gallery in galleries if gallery.a]
|
seanegoodwin/PandaViewer
|
PandaViewer/search.py
|
Python
|
gpl-3.0
| 5,215
|
[
"MOE"
] |
4a9fd25a277db53fc805625041183a3fe885a1a9069fcf32f77a46e699a18ffa
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The layout of the theme
"""
from PyQt4 import QtGui
from openlp.core.lib import translate
from openlp.core.lib.ui import create_button_box
class Ui_ThemeLayoutDialog(object):
"""
The layout of the theme
"""
def setupUi(self, themeLayoutDialog):
"""
Set up the UI
"""
themeLayoutDialog.setObjectName('themeLayoutDialogDialog')
#themeLayoutDialog.resize(300, 200)
self.previewLayout = QtGui.QVBoxLayout(themeLayoutDialog)
self.previewLayout.setObjectName('previewLayout')
self.previewArea = QtGui.QWidget(themeLayoutDialog)
self.previewArea.setObjectName('previewArea')
self.previewAreaLayout = QtGui.QGridLayout(self.previewArea)
self.previewAreaLayout.setMargin(0)
self.previewAreaLayout.setColumnStretch(0, 1)
self.previewAreaLayout.setRowStretch(0, 1)
self.previewAreaLayout.setObjectName('previewAreaLayout')
self.themeDisplayLabel = QtGui.QLabel(self.previewArea)
self.themeDisplayLabel.setFrameShape(QtGui.QFrame.Box)
self.themeDisplayLabel.setScaledContents(True)
self.themeDisplayLabel.setObjectName('themeDisplayLabel')
self.previewAreaLayout.addWidget(self.themeDisplayLabel)
self.previewLayout.addWidget(self.previewArea)
self.mainColourLabel = QtGui.QLabel(self.previewArea)
self.mainColourLabel.setObjectName('mainColourLabel')
self.previewLayout.addWidget(self.mainColourLabel)
self.footerColourLabel = QtGui.QLabel(self.previewArea)
self.footerColourLabel.setObjectName('footerColourLabel')
self.previewLayout.addWidget(self.footerColourLabel)
self.button_box = create_button_box(themeLayoutDialog, 'button_box', ['ok'])
self.previewLayout.addWidget(self.button_box)
self.retranslateUi(themeLayoutDialog)
def retranslateUi(self, themeLayoutDialog):
"""
Translate the UI on the fly
"""
themeLayoutDialog.setWindowTitle(translate('OpenLP.StartTimeForm', 'Theme Layout'))
self.mainColourLabel.setText(translate('OpenLP.StartTimeForm', 'The blue box shows the main area.'))
self.footerColourLabel.setText(translate('OpenLP.StartTimeForm', 'The red box shows the footer.'))
|
marmyshev/item_title
|
openlp/core/ui/themelayoutdialog.py
|
Python
|
gpl-2.0
| 4,397
|
[
"Brian"
] |
d788ba69121933da54e097509f01d4fa3600b3e9a0c60d969b468b6b38124317
|
# Licensed under an MIT open source license - see LICENSE
import numpy as np
import matplotlib.pyplot as p
import scipy.ndimage as nd
from scipy.stats import lognorm
from skimage.morphology import remove_small_objects, medial_axis
from astropy.io import fits
from astropy.table import Table, Column
from astropy import units as u
from astropy.wcs import WCS
from astropy.nddata.utils import overlap_slices
from copy import deepcopy
import os
import time
import warnings
from .pixel_ident import recombine_skeletons, isolateregions
from .utilities import eight_con, round_to_odd, threshold_local, in_ipynb
from .io_funcs import input_data
from .base_conversions import (BaseInfoMixin, UnitConverter,
find_beam_properties, data_unit_check)
from .filament import Filament2D
# The try/except is here to deal with TypeErrors when building the docs on RTD
# This isn't really a solution... but it is lazy and does the job until I
# add astropy_helpers.
try:
FWHM_FACTOR = 2 * np.sqrt(2 * np.log(2.))
except TypeError:
FWHM_FACTOR = np.NaN
class FilFinder2D(BaseInfoMixin):
"""
Extract and analyze filamentary structure from a 2D image.
Parameters
----------
image : `~numpy.ndarray` or `~astropy.io.fits.PrimaryHDU`
A 2D array of the data to be analyzed. If a FITS HDU is passed, the
header is automatically loaded.
header : FITS header, optional
The header from fits file containing the data. If no header is provided,
and it could not be loaded from ``image``, all results will be returned
in pixel units.
beamwidth : float or astropy.units.Quantity, optional
The FWHM beamwidth with an appropriately attached unit. By default,
the beam is read from a provided header. If the beam cannot be read
from the header, or a header is not provided, this input must be
given. If a float is given, it is assumed to be in pixel units.
ang_scale : `~astropy.units.Quantity`, optional
Give the angular to pixel units conversion. If none is given, it will
be read from the header. The units must be a valid angular unit.
distance : float, optional
The distance to the region being examined (in pc). If None, the
analysis is carried out in pixel and angular units. In this case,
the physical priors used in other optional parameters is meaningless
and each must be specified initially.
mask : numpy.ndarray, optional
A pre-made, boolean mask may be supplied to skip the segmentation
process. The algorithm will skeletonize and run the analysis portions
only.
capture_pre_recombine_masks : bool, optional
If True, will save the pre-`recombine_skeletons()` mask objects and
corners and expose them as attributes. Default is False.
save_name : str, optional
Sets the prefix name that is used for output files. Can be overridden
in ``save_fits`` and ``save_table``. Default is "FilFinder_output".
Examples
--------
>>> from fil_finder import FilFinder2D
>>> from astropy.io import fits
>>> import astropy.units as u
>>> hdu = fits.open("twod.fits")[0] # doctest: +SKIP
>>> filfind = FilFinder2D(hdu, beamwidth=15*u.arcsec, distance=170*u.pc, save_name='twod_filaments') # doctest: +SKIP
>>> filfind.preprocess_image(verbose=False) # doctest: +SKIP
>>> filfind.create_mask(verbose=False) # doctest: +SKIP
>>> filfind.medskel(verbose=False) # doctest: +SKIP
>>> filfind.analyze_skeletons(verbose=False) # doctest: +SKIP
>>> filfind.exec_rht(verbose=False) # doctest: +SKIP
>>> filfind.find_widths(verbose=False) # doctest: +SKIP
>>> fil_table = filfind.output_table(verbose=False) # doctest: +SKIP
>>> branch_table = filfind.branch_tables(verbose=False) # doctest: +SKIP
>>> filfind.save_fits() # doctest: +SKIP
>>> filfind.save_stamp_fits() # doctest: +SKIP
"""
def __init__(self, image, header=None, beamwidth=None, ang_scale=None,
distance=None, mask=None, save_name="FilFinder_output",
capture_pre_recombine_masks=False):
# Accepts a numpy array or fits.PrimaryHDU
output = input_data(image, header)
self._image = output["data"]
if "header" in output:
self._header = output["header"]
elif ang_scale is not None:
if not isinstance(ang_scale, u.Quantity):
raise TypeError("ang_scale must be an "
"astropy.units.Quantity.")
if not ang_scale.unit.is_equivalent(u.deg):
raise u.UnitsError("ang_scale must be given in angular "
"units.")
# Mock up a simple header
hdr_dict = {"NAXIS": 2,
"NAXIS1": self.image.shape[1],
"NAXIS2": self.image.shape[0],
"CDELT1": - ang_scale.to(u.deg).value,
"CDELT2": ang_scale.to(u.deg).value,
'CTYPE1': 'GLON-CAR',
'CTYPE2': 'GLAT-CAR',
'CUNIT1': 'deg',
'CUNIT2': 'deg',
}
self._header = fits.Header(hdr_dict)
else:
self._header = None
if self.header is not None:
self._wcs = WCS(self.header)
else:
self._wcs = None
self.converter = UnitConverter(self.wcs, distance)
if beamwidth is None:
if self.header is not None:
major = find_beam_properties(self.header)[0]
else:
major = beamwidth
else:
major = beamwidth
if major is not None:
self._beamwidth = self.converter.to_pixel(major)
else:
warnings.warn("No beam width given. Using 0 pixels.")
self._beamwidth = 0.0 * u.pix
self.save_name = save_name
# If pre-made mask is provided, remove nans if any.
self.mask = None
if mask is not None:
if self.image.shape != mask.shape:
raise ValueError("The given pre-existing mask must have the "
"same shape as the image.")
mask[np.isnan(mask)] = 0.0
self.mask = mask
self.capture_pre_recombine_masks = capture_pre_recombine_masks
self._pre_recombine_mask_objs = None
self._pre_recombine_mask_corners = None
def preprocess_image(self, skip_flatten=False, flatten_percent=None):
'''
Preprocess and flatten the image before running the masking routine.
Parameters
----------
skip_flatten : bool, optional
Skip the flattening step and use the original image to construct
the mask. Default is False.
flatten_percent : int, optional
The percentile of the data (0-100) to set the normalization of the
arctan transform. By default, a log-normal distribution is fit and
the threshold is set to :math:`\mu + 2\sigma`. If the data contains
regions of a much higher intensity than the mean, it is recommended
this be set >95 percentile.
'''
if skip_flatten:
self._flatten_threshold = None
self.flat_img = self.image
else:
# Make flattened image
if flatten_percent is None:
# Fit to a log-normal
fit_vals = lognorm.fit(self.image[~np.isnan(self.image)].value)
median = lognorm.median(*fit_vals)
std = lognorm.std(*fit_vals)
thresh_val = median + 2 * std
else:
thresh_val = np.percentile(self.image[~np.isnan(self.image)],
flatten_percent)
self._flatten_threshold = data_unit_check(thresh_val,
self.image.unit)
# Make the units dimensionless
self.flat_img = thresh_val * \
np.arctan(self.image / self.flatten_threshold) / u.rad
@property
def flatten_threshold(self):
'''
Threshold value used in the arctan transform.
'''
return self._flatten_threshold
def create_mask(self, glob_thresh=None, adapt_thresh=None,
smooth_size=None, size_thresh=None, verbose=False,
test_mode=False, regrid=True, border_masking=True,
border_kwargs={'size': 50 * u.pix**2,
'filt_width': 25 * u.pix, 'eros_iter': 15},
fill_hole_size=None,
use_existing_mask=False, save_png=False):
'''
This runs the complete segmentation process and returns a mask of the
filaments found. The process is broken into six steps:
* An arctan tranform is taken to flatten extremely bright regions.
Adaptive thresholding is very sensitive to local intensity changes
and small, bright objects(ie. cores) will leave patch-sized holes
in the mask.
* The flattened image is smoothed over with a median filter.
The size of the patch used here is set to be much smaller than the
typical filament width. Smoothing is necessary to minimizing
extraneous branches when the medial axis transform is taken.
* A binary opening is performed using an 8-connected structure
element. This is very successful at removing small regions around
the edge of the data.
* Objects smaller than a certain threshold (set to be ~1/10 the area
of a small filament) are removed to ensure only regions which are
sufficiently large enough to be real structure remain.
The parameters for this function are as previously defined.
They are included here for fine-tuning purposes only.
Parameters
----------
smooth_size : int, optional
The patch size (in pixels) used to smooth the flatten image before
adaptive thresholding is performed. Smoothing is necessary to ensure
the extraneous branches on the skeletons is minimized.
If None, the patch size is set to ~0.05 pc. This ensures the large
scale structure is not affected while smoothing extraneous pixels off
the edges.
size_thresh : int, optional
This sets the lower threshold on the size of objects found in the
adaptive thresholding. If None, the value is set at
:math:`5\pi (0.1 \text(pc))^2` which is the area of the minimum dimensions
expected for a filament. Any region smaller than this threshold may be
safely labeled as an artifact of the thresholding.
glob_thresh : float, optional
This is the percentile of the data to mask off. All intensities below
are cut off from being included in the filamentary structure.
adapt_thresh : int, optional
This is the size in pixels of the patch used in the adaptive
thresholding. Bright structure is not very sensitive to the choice of
patch size, but faint structure is very sensitive. If None, the patch
size is set to twice the width of a typical filament (~0.2 pc). As the
width of filaments is somewhat ubiquitous, this patch size generally
segments all filamentary structure in a given image.
verbose : bool, optional
Enables plotting. Default is False.
test_mode : bool, optional
Plot each masking step. Default is False.
regrid : bool, optional
Enables the regridding of the image to larger sizes when the patch
size for the adaptive thresholding is less than 40 pixels. This
decreases fragmentation of regions due to pixellization effects.
Default is True.
border_masking : bool, optional
Dilates a mask of the regions along the edge of the image to remove
regions dominated by noise. Disabling leads to regions characterized
at the image boundaries and should only be used if there is not
significant noise at the edges. Default is True.
fill_hole_size : int or float, optional
Sets the maximum hole size to fill in the skeletons. If <1,
maximum is that proportion of the total number of pixels in
skeleton. Otherwise, it sets the maximum number of pixels.
Defaults to a square area with length of the beamwidth.
use_existing_mask : bool, optional
If ``mask`` is already specified, enabling this skips
recomputing the mask.
save_png : bool, optional
Saves the plot made in verbose mode. Disabled by default.
Attributes
----------
mask : numpy.ndarray
The mask of filaments.
'''
if self.mask is not None and use_existing_mask:
warnings.warn("Using inputted mask. Skipping creation of a"
"new mask.")
# Skip if pre-made mask given
self.glob_thresh = 'usermask'
self.adapt_thresh = 'usermask'
self.size_thresh = 'usermask'
self.smooth_size = 'usermask'
if self.capture_pre_recombine_masks:
warnings.warn("Creation of a new mask skipped, pre-"
"recombined masks will not be captured.")
return
if not hasattr(self.converter, 'distance'):
if smooth_size is None:
raise ValueError("Distance not given. Must specify smooth_size"
" in pixel units.")
if adapt_thresh is None:
raise ValueError("Distance not given. Must specify"
"adapt_thresh in pixel units.")
if size_thresh is None:
raise ValueError("Distance not given. Must specify size_thresh"
" in pixel units.")
if glob_thresh is None:
self.glob_thresh = None
else:
self.glob_thresh = data_unit_check(glob_thresh, self.image.unit)
if size_thresh is None:
# Adapt a typical filament area as pi * length * width,
# width width ~ 0.1 pc, and length = 5 * width
min_fil_area = \
self.converter.to_pixel_area(np.pi * 5 * 0.1**2 * u.pc**2)
# Use a threshold rounded to the nearest pixel
self.size_thresh = round(min_fil_area.value) * u.pix**2
else:
self.size_thresh = self.converter.to_pixel_area(size_thresh)
# Area of ellipse for typical filament size. Divided by 10 to
# incorporate sparsity.
if adapt_thresh is None:
# twice average FWHM for filaments
fil_width = self.converter.to_pixel(0.2 * u.pc)
self.adapt_thresh = round(fil_width.value) * u.pix
else:
self.adapt_thresh = self.converter.to_pixel(adapt_thresh)
if smooth_size is None:
# half average FWHM for filaments
smooth_width = self.converter.to_pixel(0.05 * u.pc)
self.smooth_size = round(smooth_width.value) * u.pix
else:
self.smooth_size = self.converter.to_pixel(smooth_size)
# Check if regridding is even necessary
if self.adapt_thresh >= 40 * u.pix and regrid:
regrid = False
warnings.warn("Adaptive thresholding patch is larger than 40"
"pixels. Regridding has been disabled.")
# Adaptive thresholding can't handle nans, so we create a nan mask
# by finding the large, outer regions, smoothing with a large median
# filter and eroding it.
# Make a copy of the flattened image
flat_copy = self.flat_img.copy()
# Make the nan mask
if border_masking:
nan_mask = np.isnan(flat_copy)
# Convert the size and width to pixel units
border_size_pix = \
self.converter.to_pixel_area(border_kwargs['size'])
border_med_width = \
np.ceil(self.converter.to_pixel(border_kwargs['filt_width']))
nan_mask = remove_small_objects(nan_mask,
min_size=border_size_pix.value,
connectivity=8)
nan_mask = np.logical_not(nan_mask)
nan_mask = nd.median_filter(nan_mask, int(border_med_width.value))
nan_mask = nd.binary_erosion(nan_mask, eight_con(),
iterations=border_kwargs['eros_iter'])
else:
nan_mask = np.logical_not(np.isnan(flat_copy))
# Remove nans in the copy
flat_copy[np.isnan(flat_copy)] = 0.0
# Perform regridding
if regrid:
# Calculate the needed zoom to make the patch size ~40 pixels
ratio = 40 / self.adapt_thresh.value
# Round to the nearest factor of 2
regrid_factor = np.min([2., int(round(ratio / 2.0) * 2.0)])
# Defaults to cubic interpolation
masking_img = nd.zoom(flat_copy, (regrid_factor, regrid_factor))
else:
regrid_factor = 1
ratio = 1
masking_img = flat_copy
med_filter_size = int(round(self.smooth_size.value * ratio))
smooth_img = nd.median_filter(masking_img,
size=med_filter_size)
adapt = threshold_local(smooth_img,
round_to_odd(ratio *
self.adapt_thresh.value),
method="mean")
if regrid:
regrid_factor = float(regrid_factor)
adapt = nd.zoom(adapt, (1 / regrid_factor, 1 / regrid_factor),
order=0)
# Remove areas near the image border
adapt = adapt * nan_mask
if self.glob_thresh is not None:
glob = self.image > self.glob_thresh
adapt = glob * adapt
cleaned = remove_small_objects(adapt, min_size=self.size_thresh.value)
# Remove small holes within the object
if fill_hole_size is None:
fill_hole_size = np.pi * (self.beamwidth / FWHM_FACTOR)**2
else:
fill_hole_size = self.converter.to_pixel_area(fill_hole_size)
mask_objs, num, corners = \
isolateregions(cleaned, fill_hole=True,
rel_size=fill_hole_size.value,
morph_smooth=True, pad_size=1)
if self.capture_pre_recombine_masks:
self._pre_recombine_mask_objs = mask_objs
self._pre_recombine_mask_corners = corners
self.mask = recombine_skeletons(mask_objs,
corners, self.image.shape, 1)
# WARNING!! Setting some image values to 0 to avoid negative weights.
# This may cause issues, however it will allow for proper skeletons
# Through all the testing and deriving science results, this has not
# been an issue! EK
# XXX Check this
# self.image[np.where((self.mask * self.image) < 0.0)] = 0
if test_mode:
fig, ax = p.subplots(3, 2, sharex=True, sharey=True)
im0 = ax[0, 0].imshow(np.log10(self.image.value), origin="lower",
interpolation='nearest',
cmap='binary')
fig.colorbar(im0, ax=ax[0, 0])
im1 = ax[1, 0].imshow(masking_img, origin="lower",
interpolation='nearest',
cmap='binary')
fig.colorbar(im1, ax=ax[1, 0])
im2 = ax[0, 1].imshow(smooth_img, origin="lower",
interpolation='nearest',
cmap='binary')
fig.colorbar(im2, ax=ax[0, 1])
im3 = ax[1, 1].imshow(adapt, origin="lower",
interpolation='nearest',
cmap='binary')
fig.colorbar(im3, ax=ax[1, 1])
im4 = ax[2, 0].imshow(cleaned, origin="lower",
interpolation='nearest',
cmap='binary')
fig.colorbar(im4, ax=ax[2, 0])
im5 = ax[2, 1].imshow(self.mask, origin="lower",
interpolation='nearest',
cmap='binary')
fig.colorbar(im5, ax=ax[2, 1])
p.show()
if verbose or save_png:
vmin = np.percentile(self.flat_img[np.isfinite(self.flat_img)], 20)
vmax = np.percentile(self.flat_img[np.isfinite(self.flat_img)], 90)
p.clf()
p.imshow(self.flat_img.value, interpolation='nearest',
origin="lower", cmap='binary', vmin=vmin, vmax=vmax)
p.contour(self.mask, colors="r")
p.title("Mask on Flattened Image.")
if save_png:
p.savefig(self.save_name + "_mask.png")
if verbose:
p.show()
if in_ipynb():
p.clf()
def medskel(self, verbose=False, save_png=False):
'''
This function performs the medial axis transform (skeletonization)
on the mask. This is essentially a wrapper function of
skimage.morphology.medial_axis with the ability to delete narrow
regions in the mask.
If the distance transform is returned from the transform, it is used
as a pruning step. Regions where the width of a region are far too
small (set to >0.01 pc) are deleted. This ensures there no unnecessary
connections between filaments.
Parameters
----------
verbose : bool, optional
Enables plotting.
save_png : bool, optional
Saves the plot made in verbose mode. Disabled by default.
Attributes
----------
skeleton : numpy.ndarray
The array containing all of the skeletons.
medial_axis_distance : numpy.ndarray
The distance transform used to create the skeletons.
'''
self.skeleton, self.medial_axis_distance = \
medial_axis(self.mask, return_distance=True)
self.medial_axis_distance = \
self.medial_axis_distance * self.skeleton * u.pix
# Delete connection smaller than 2 pixels wide. Such a small
# connection is more likely to be from limited pixel resolution
# rather than actual structure.
width_threshold = 1 * u.pix
narrow_pts = np.where(self.medial_axis_distance < width_threshold)
self.skeleton[narrow_pts] = 0 # Eliminate narrow connections
self.medial_axis_distance[narrow_pts] = 0 * u.pix
if verbose or save_png: # For examining results of skeleton
vmin = np.percentile(self.flat_img[np.isfinite(self.flat_img)], 20)
vmax = np.percentile(self.flat_img[np.isfinite(self.flat_img)], 90)
p.clf()
p.imshow(self.flat_img.value, interpolation=None, origin="lower",
cmap='binary', vmin=vmin, vmax=vmax)
p.contour(self.skeleton, colors="r")
if save_png:
p.savefig(self.save_name + "_initial_skeletons.png")
if verbose:
p.show()
if in_ipynb():
p.clf()
def analyze_skeletons(self, prune_criteria='all', relintens_thresh=0.2,
nbeam_lengths=5, branch_nbeam_lengths=3,
skel_thresh=None, branch_thresh=None,
max_prune_iter=10,
verbose=False, save_png=False, save_name=None):
'''
Prune skeleton structure and calculate the branch and longest-path
lengths. See `~Filament2D.skeleton_analysis`.
Parameters
----------
prune_criteria : {'all', 'intensity', 'length'}, optional
Choose the property to base pruning on. 'all' requires that the
branch fails to satisfy the length and relative intensity checks.
relintens_thresh : float, optional
Relative intensity threshold for pruning. Sets the importance
a branch must have in intensity relative to all other branches
in the skeleton. Must be between (0.0, 1.0].
nbeam_lengths : float or int, optional
Sets the minimum skeleton length based on the number of beam
sizes specified.
branch_nbeam_lengths : float or int, optional
Sets the minimum branch length based on the number of beam
sizes specified.
skel_thresh : float, optional
Given in pixel units.Below this cut off, skeletons with less pixels
will be deleted. The default value is 0.3 pc converted to pixels.
branch_thresh : float, optional
Any branches shorter than this length (in pixels) will be labeled as
extraneous and pruned off. The default value is 3 times the FWHM
beamwidth.
max_prune_iter : int, optional
Maximum number of pruning iterations to apply.
verbose : bool, optional
Enables plotting.
save_png : bool, optional
Saves the plot made in verbose mode. Disabled by default.
save_name : str, optional
Prefix for the saved plots.
'''
if relintens_thresh > 1.0 or relintens_thresh <= 0.0:
raise ValueError("relintens_thresh must be set between "
"(0.0, 1.0].")
if not hasattr(self.converter, 'distance') and skel_thresh is None:
raise ValueError("Distance not given. Must specify skel_thresh"
" in pixel units.")
if save_name is None:
save_name = self.save_name
# Set the skeleton length threshold to some factor of the beam width
if skel_thresh is None:
# Double check these defaults.
# min_length = self.converter.to_pixel(0.3 * u.pc)
min_length = nbeam_lengths * self.beamwidth
skel_thresh = round(min_length.value) * u.pix
else:
skel_thresh = self.converter.to_pixel(skel_thresh)
self.skel_thresh = np.ceil(skel_thresh)
# Set the minimum branch length to be the beam size.
if branch_thresh is None:
branch_thresh = branch_nbeam_lengths * self.beamwidth
else:
branch_thresh = self.converter.to_pixel(branch_thresh)
self.branch_thresh = np.ceil(branch_thresh).astype(int)
# Label individual filaments and define the set of filament objects
labels, num = nd.label(self.skeleton, eight_con())
# Find the objects that don't satisfy skel_thresh
if self.skel_thresh > 0.:
obj_sums = nd.sum(self.skeleton, labels, range(1, num + 1))
remove_fils = np.where(obj_sums <= self.skel_thresh.value)[0]
for lab in remove_fils:
self.skeleton[np.where(labels == lab + 1)] = 0
# Relabel after deleting short skeletons.
labels, num = nd.label(self.skeleton, eight_con())
self.filaments = [Filament2D(np.where(labels == lab),
converter=self.converter) for lab in
range(1, num + 1)]
self.number_of_filaments = num
# Now loop over the skeleton analysis for each filament object
for n, fil in enumerate(self.filaments):
savename = "{0}_{1}".format(save_name, n)
if verbose:
print("Filament: %s / %s" % (n + 1, self.number_of_filaments))
fil.skeleton_analysis(self.image, verbose=verbose,
save_png=save_png,
save_name=savename,
prune_criteria=prune_criteria,
relintens_thresh=relintens_thresh,
branch_thresh=self.branch_thresh,
max_prune_iter=max_prune_iter)
self.array_offsets = [fil.pixel_extents for fil in self.filaments]
branch_properties = {}
branch_properties['length'] = [fil.branch_properties['length']
for fil in self.filaments]
branch_properties['intensity'] = [fil.branch_properties['intensity']
for fil in self.filaments]
branch_properties['pixels'] = [fil.branch_properties['pixels']
for fil in self.filaments]
branch_properties['number'] = np.array([fil.branch_properties['number']
for fil in self.filaments])
self.branch_properties = branch_properties
self.filament_extents = [fil.pixel_extents for fil in self.filaments]
long_path_skel = [fil.skeleton(out_type='longpath')
for fil in self.filaments]
final_skel = [fil.skeleton() for fil in self.filaments]
self.skeleton = \
recombine_skeletons(final_skel,
self.array_offsets, self.image.shape,
0)
self.skeleton_longpath = \
recombine_skeletons(long_path_skel,
self.array_offsets, self.image.shape,
0)
def lengths(self, unit=u.pix):
'''
Return longest path lengths of the filaments.
Parameters
----------
unit : `~astropy.units.Unit`, optional
Pixel, angular, or physical unit to convert to.
'''
pix_lengths = np.array([fil.length().value
for fil in self.filaments]) * u.pix
return self.converter.from_pixel(pix_lengths, unit)
def branch_lengths(self, unit=u.pix):
'''
Return the length of all branches in all filaments.
Parameters
----------
unit : `~astropy.units.Unit`, optional
Pixel, angular, or physical unit to convert to.
'''
branches = []
for lengths in self.branch_properties['length']:
branches.append(self.converter.from_pixel(lengths, unit))
return branches
def filament_positions(self, world_coord=False):
'''
Return the median pixel or world positions of the filaments.
Parameters
----------
world_coord : bool, optional
Return the world coordinates, defined by the WCS information. If no
WCS information is given, the output stays in pixel units.
Returns
-------
filament positions : list of tuples
The median positions of each filament.
'''
return [fil.position(world_coord=world_coord) for fil in
self.filaments]
@property
def intersec_pts(self):
'''
Intersection pixels for each filament.
'''
return [fil.intersec_pts for fil in self.filaments]
@property
def end_pts(self):
'''
End pixels for each filament.
'''
return [fil.end_pts for fil in self.filaments]
def exec_rht(self, radius=10 * u.pix,
ntheta=180, background_percentile=25,
branches=False, min_branch_length=3 * u.pix,
verbose=False, save_png=False, save_name=None):
'''
Implements the Rolling Hough Transform (Clark et al., 2014).
The orientation of each filament is denoted by the mean value of the
RHT, which from directional statistics can be defined as:
:math:`\\langle\\theta \\rangle = \\frac{1}{2} \\tan^{-1}\\left(\\frac{\\Sigma_i w_i\\sin2\\theta_i}{\\Sigma_i w_i\\cos2\\theta_i}\\right)`
where :math:`w_i` is the normalized value of the RHT at
:math:`\\theta_i`. This definition assumes that :math:`\\Sigma_iw_i=1`.
:math:`\\theta` is defined on :math:`\\left[-\\pi/2, \\pi/2\\right)`.
"Curvature" is represented by the IQR confidence interval about the mean,
:math:`\\langle\\theta \\rangle \\pm \\sin^{-1} \\left( u_{\\alpha} \\sqrt{ \\frac{1-\\alpha}{2R^2} } \\right)`
where :math:`u_{\\alpha}` is the z-score of the two-tail probability,
:math:`\\alpha=\\Sigma_i\\cos{\\left[2w_i\\left(\\theta_i-\\langle\\theta\\rangle\\right)\\right]}`
is the estimated weighted second trigonometric moment and
:math:`R^2=\\left[\\left(\\Sigma_iw_i\\sin{\\theta_i}\\right)^2 +\\left(\\Sigma_iw_i\\cos{\\theta_i}\\right)^2\\right]`
is the weighted length of the vector.
These equations can be found in Fisher & Lewis (1983).
Parameters
----------
radius : int
Sets the patch size that the RHT uses.
ntheta : int, optional
The number of bins to use for the RHT.
background : int, optional
RHT distribution often has a constant background. This sets the
percentile to subtract off.
branches : bool, optional
If enabled, runs the RHT on individual branches in the skeleton.
min_branch_length : int, optional
Sets the minimum pixels a branch must have to calculate the RHT
verbose : bool, optional
Enables plotting.
save_png : bool, optional
Saves the plot made in verbose mode. Disabled by default.
save_name : str, optional
Prefix for the saved plots.
Attributes
----------
rht_curvature : dict
Contains the median and IQR for each filament.
References
----------
`Clark et al. (2014) <http://adsabs.harvard.edu/abs/2014ApJ...789...82C>`_
`Fisher & Lewis (1983) <http://biomet.oxfordjournals.org/content/70/2/333.short>`_
'''
# Flag branch output
self._rht_branches_flag = False
if branches:
self._rht_branches_flag = True
if save_name is None:
save_name = self.save_name
for n, fil in enumerate(self.filaments):
if verbose:
print("Filament: %s / %s" % (n + 1, self.number_of_filaments))
if branches:
fil.rht_branch_analysis(radius=radius,
ntheta=ntheta,
background_percentile=background_percentile,
min_branch_length=min_branch_length)
else:
fil.rht_analysis(radius=radius, ntheta=ntheta,
background_percentile=background_percentile)
if verbose:
if save_png:
savename = "{0}_{1}_rht.png".format(save_name, n)
else:
save_name = None
fil.plot_rht_distrib(save_name=save_name)
@property
def orientation(self):
'''
Returns the orientations of the filament longest paths computed with
`~FilFinder2D.exec_rht` with `branches=False`.
'''
return [fil.orientation.value for fil in self.filaments] * u.rad
@property
def curvature(self):
'''
Returns the orientations of the filament longest paths computed with
`~FilFinder2D.exec_rht` with `branches=False`.
'''
return [fil.curvature.value for fil in self.filaments] * u.rad
@property
def orientation_branches(self):
'''
Returns the orientations of the filament longest paths computed with
`~FilFinder2D.exec_rht` with `branches=False`.
'''
return [fil.orientation_branches for fil in self.filaments]
@property
def curvature_branches(self):
'''
Returns the orientations of the filament longest paths computed with
`~FilFinder2D.exec_rht` with `branches=False`.
'''
return [fil.curvature_branches for fil in self.filaments]
@property
def pre_recombine_mask_objs(self):
'''
Returns the pre `recombine_skeletons()` mask objects. These objects will
only be captured if `capture_pre_recombine_masks=True` and
`FilFinder2D.create_mask` has been run with `use_existing_mask=False`.
Otherwise will return None. This is useful if there are multiple
filamentary objects in the image and you want to extract the masks for
individual filaments.
'''
return self._pre_recombine_mask_objs
@property
def pre_recombine_mask_corners(self):
'''
Returns the pre-recombine skeletons mask corners. These corners are
needed if you want to use `FilFinder2D.pre_recombine_mask_objs`.
'''
return self._pre_recombine_mask_corners
def find_widths(self, max_dist=10 * u.pix,
pad_to_distance=0 * u.pix,
fit_model='gaussian_bkg',
fitter=None,
try_nonparam=True,
use_longest_path=False,
add_width_to_length=True,
deconvolve_width=True,
fwhm_function=None,
chisq_max=10.,
verbose=False, save_png=False, save_name=None,
xunit=u.pix,
**kwargs):
'''
Create an average radial profile for each filaments and fit a given
model. See `~Filament2D.width_analysis`.
* Radial profiles are created from a Euclidean Distance Transform
on the skeleton.
* A user-specified model is fit to each of the radial profiles.
The default model is a Gaussian with a constant background
('gaussian_bkg'). Other built-in models include a Gaussian with
no background ('gaussian_nobkg') or a non-parametric estimate
('nonparam'). Any 1D astropy model (or compound model) can be
passed for fitting.
Parameters
----------
image : `~astropy.unit.Quantity` or `~numpy.ndarray`
The image from which the filament was extracted.
all_skeleton_array : np.ndarray
An array with the skeletons of other filaments. This is used to
avoid double-counting pixels in the radial profiles in nearby
filaments.
max_dist : `~astropy.units.Quantity`, optional
Largest radius around the skeleton to create the profile from. This
can be given in physical, angular, or physical units.
pad_to_distance : `~astropy.units.Quantity`, optional
Force all pixels within this distance to be kept, even if a pixel
is closer to another skeleton, as given in `all_skeleton_array`.
fit_model : str or `~astropy.modeling.Fittable1DModel`, optional
The model to fit to the profile. Built-in models include
'gaussian_bkg' for a Gaussian with a constant background,
'gaussian_nobkg' for just a Gaussian, 'nonparam' for the
non-parametric estimator. Defaults to 'gaussian_bkg'.
fitter : `~astropy.modeling.fitting.Fitter`, optional
One of the astropy fitting classes. Defaults to a
Levenberg-Marquardt fitter.
try_nonparam : bool, optional
If the chosen model fit fails, fall back to a non-parametric
estimate.
use_longest_path : bool, optional
Only fit profile to the longest path skeleton. Disabled by
default.
add_width_to_length : bool, optional
Add the FWHM to the filament length. This accounts for the
expected shortening in the medial axis transform. Enabled by
default.
deconvolve_width : bool, optional
Deconvolve the beam width from the FWHM. Enabled by default.
fwhm_function : function, optional
Convert the width parameter to the FWHM. Must take the fit model
as an argument and return the FWHM and its uncertainty. If no
function is given, the Gaussian FWHM is used.
chisq_max : float, optional
Enable the fail flag if the reduced chi-squared value is above
this limit.
verbose : bool, optional
Enables plotting.
save_png : bool, optional
Saves the plot made in verbose mode. Disabled by default.
save_name : str, optional
Prefix for the saved plots.
xunit : `~astropy.units.Unit`, optional
Pixel, angular, or physical unit to convert to in the plot.
kwargs : Passed to `~fil_finder.width.radial_profile`.
'''
if save_name is None:
save_name = self.save_name
for n, fil in enumerate(self.filaments):
if verbose:
print("Filament: %s / %s" % (n + 1, self.number_of_filaments))
fil.width_analysis(self.image, all_skeleton_array=self.skeleton,
max_dist=max_dist,
pad_to_distance=pad_to_distance,
fit_model=fit_model,
fitter=fitter, try_nonparam=try_nonparam,
use_longest_path=use_longest_path,
add_width_to_length=add_width_to_length,
deconvolve_width=deconvolve_width,
beamwidth=self.beamwidth,
fwhm_function=fwhm_function,
chisq_max=chisq_max,
**kwargs)
if verbose:
if save_png:
save_name = "{0}_{1}_radprof.png".format(self.save_name, n)
else:
save_name = None
fil.plot_radial_profile(save_name=save_name, xunit=xunit)
def widths(self, unit=u.pix):
'''
Fitted FWHM of the filaments and their uncertainties.
Parameters
----------
unit : `~astropy.units.Quantity`, optional
The output unit for the FWHM. Default is in pixel units.
'''
pix_fwhm = np.array([fil.radprof_fwhm()[0].value for fil in
self.filaments])
pix_fwhm_err = np.array([fil.radprof_fwhm()[1].value for fil in
self.filaments])
return self.converter.from_pixel(pix_fwhm * u.pix, unit), \
self.converter.from_pixel(pix_fwhm_err * u.pix, unit)
def width_fits(self, xunit=u.pix):
'''
Return an `~astropy.table.Table` of the width fit parameters,
uncertainties, and whether a flag was raised for a bad fit.
Parameters
----------
xunit : `~astropy.units.Unit`, optional
Pixel, angular, or physical unit to convert to.
Returns
-------
tab : `~astropy.table.Table`
Table with width fit results.
'''
from astropy.table import vstack as tab_vstack
for i, fil in enumerate(self.filaments):
if i == 0:
tab = fil.radprof_fit_table(unit=xunit)
continue
add_tab = fil.radprof_fit_table(unit=xunit)
# Concatenate the row together
tab = tab_vstack([tab, add_tab])
return tab
def total_intensity(self, bkg_subtract=False, bkg_mod_index=2):
'''
Return the sum of all pixels within the FWHM of the filament.
.. warning::
`fil_finder_2D` multiplied the total intensity by the angular size
of a pixel. This function is just the sum of pixel values. Unit
conversions can be applied on the output if needed.
Parameters
----------
bkg_subtract : bool, optional
Subtract off the fitted background level.
bkg_mod_index : int, optional
Indicate which element in `Filament2D.radprof_params` is the
background level. Defaults to 2 for the Gaussian with background
model.
Returns
-------
total_intensity : `~astropy.units.Quantity`
Array of the total intensities for the filament.
'''
total_intensity = []
for i, fil in enumerate(self.filaments):
total_fil = fil.total_intensity(bkg_subtract=bkg_subtract,
bkg_mod_index=bkg_mod_index)
if i == 0:
unit = total_fil.unit
total_intensity.append(total_fil.value)
return total_intensity * unit
def median_brightness(self):
'''
Returns the median brightness along the skeleton of the filament.
Returns
-------
filament_brightness : list
Average brightness/intensity over the skeleton pixels
for each filament.
'''
if len(self.filaments) == 0:
return np.array([])
med_bright0 = self.filaments[0].median_brightness(self.image)
median_bright = np.zeros(len(self.filaments))
if hasattr(med_bright0, 'unit'):
median_bright = median_bright * med_bright0.unit
median_bright[0] = med_bright0
for i, fil in enumerate(self.filaments):
median_bright[i] = fil.median_brightness(self.image)
return median_bright
def filament_model(self, max_radius=None, bkg_subtract=True,
bkg_mod_index=2):
'''
Returns a model of the diffuse filamentary network based
on the radial profiles.
Parameters
----------
max_radius : `~astropy.units.Quantity`, optional
Number of pixels to extend profiles to. If None is given, each
filament model is computed to 3 * FWHM.
bkg_subtract : bool, optional
Subtract off the fitted background level.
bkg_mod_index : int, optional
Indicate which element in `Filament2D.radprof_params` is the
background level. Defaults to 2 for the Gaussian with background
model.
Returns
-------
model_image : `~numpy.ndarray`
Array of the model
'''
model_image = np.zeros(self.image.shape)
for i, fil in enumerate(self.filaments):
if max_radius is None:
max_rad = 3 * fil.radprof_fwhm()[0]
else:
max_rad = max_radius
fil_model = fil.model_image(max_radius=max_rad,
bkg_subtract=bkg_subtract,
bkg_mod_index=bkg_mod_index)
# Add to the global model.
if i == 0 and hasattr(fil_model, 'unit'):
model_image = model_image * fil_model.unit
pad_size = int(max_rad.value)
arr_cent = [(fil_model.shape[0] - pad_size * 2 - 1) / 2. +
fil.pixel_extents[0][0],
(fil_model.shape[1] - pad_size * 2 - 1) / 2. +
fil.pixel_extents[0][1]]
big_slice, small_slice = overlap_slices(model_image.shape,
fil_model.shape,
arr_cent)
model_image[big_slice] += fil_model[small_slice]
return model_image
def covering_fraction(self, max_radius=None, bkg_subtract=True,
bkg_mod_index=2):
'''
Compute the fraction of the intensity in the image contained in
the filamentary structure.
Parameters
----------
max_radius : `~astropy.units.Quantity`, optional
Number of pixels to extend profiles to. If None is given, each
filament model is computed to 3 * FWHM.
bkg_subtract : bool, optional
Subtract off the fitted background level.
bkg_mod_index : int, optional
Indicate which element in `Filament2D.radprof_params` is the
background level. Defaults to 2 for the Gaussian with background
model.
Returns
-------
covering_fraction : float
Fraction of the total image intensity contained in the
filamentary structure (based on the local, individual fits)
'''
fil_model = self.filament_model(max_radius=max_radius,
bkg_subtract=bkg_subtract,
bkg_mod_index=bkg_mod_index)
frac = np.nansum(fil_model) / np.nansum(self.image)
if hasattr(frac, 'value'):
frac = frac.value
return frac
def ridge_profiles(self):
'''
Return the image values along the longest path of the skeleton.
See `~Filament2D.ridge_profile`.
Returns
-------
ridges : list
List of the ridge values for each filament.
'''
return [fil.ridge_profile(self.image) for fil in self.filaments]
def output_table(self, xunit=u.pix, world_coord=False, **kwargs):
'''
Return the analysis results as an astropy table.
If `~FilFinder2D.exec_rht` was run on the whole skeleton, the
orientation and curvature will be included in the table. If the RHT
was run on individual branches, use `~FilFinder2D.save_branch_tables`
with `include_rht=True` to save the curvature and orientations.
Parameters
----------
xunit : `~astropy.units.Unit`, optional
Unit for spatial properties. Defaults to pixel units.
world_coord : bool, optional
Return the median filament position in world coordinates.
kwargs : Passed to `~FilFinder2D.total_intensity`.
Return
------
tab : `~astropy.table.Table`
Table with all analyzed parameters.
'''
tab = Table()
tab["lengths"] = Column(self.lengths(xunit))
tab['branches'] = Column(self.branch_properties["number"])
tab['total_intensity'] = Column(self.total_intensity(**kwargs))
tab['median_brightness'] = Column(self.median_brightness())
if not self._rht_branches_flag:
tab['orientation'] = Column(self.orientation)
tab['curvature'] = Column(self.curvature)
# Return centres
fil_centres = self.filament_positions(world_coord=world_coord)
if fil_centres[0][0].unit == u.pix:
yposn = [centre[0].value for centre in fil_centres] * u.pix
xposn = [centre[1].value for centre in fil_centres] * u.pix
tab['X_posn'] = Column(xposn)
tab['Y_posn'] = Column(yposn)
else:
ra_unit = fil_centres[0][0].unit
ra = [centre[0].value for centre in fil_centres] * ra_unit
dec_unit = fil_centres[0][1].unit
dec = [centre[1] for centre in fil_centres] * dec_unit
tab['RA'] = Column(ra)
tab['Dec'] = Column(dec)
# Join with the width table
width_table = self.width_fits(xunit=xunit)
from astropy.table import hstack as tab_hstack
tab = tab_hstack([tab, width_table])
return tab
def branch_tables(self, include_rht=False):
'''
Return the branch properties of each filament. If the RHT was run
on individual branches (`branches=True` in `~FilFinder2D.exec_rht`),
the orientation and curvature of each branch can be included in the
saved table.
A table will be returned for each filament in order of the filaments
in `~FilFinder2D.filaments`.
Parameters
----------
include_rht : bool, optional
Include RHT orientation and curvature if `~FilFinder2D.exec_rht`
is run with `branches=True`.
Returns
-------
tables : list
List of `~astropy.table.Table` for each filament.
'''
tables = []
for n, fil in enumerate(self.filaments):
tables.append(fil.branch_table(include_rht=include_rht))
return tables
def save_fits(self, save_name=None,
save_longpath_skeletons=True,
save_model=True,
model_kwargs={}, **kwargs):
'''
Save the mask and the skeleton array as FITS files. The header includes
the settings used to create them.
The mask, skeleton, (optional) longest skeletons, and (optional)
model are included in the outputted file. The skeletons are labeled to
match their order in `~FilFinder2D.filaments`.
Parameters
----------
save_name : str, optional
The prefix for the saved file. If None, the save name specified
when `~FilFinder2D` was first called.
save_longpath_skeletons : bool, optional
Save a FITS extension with the longest path skeleton array.
Default is `True`. Requires `~FilFinder2D.analyze_skeletons`
to be run.
save_model : bool, optional
Save a FITS extension with the longest path skeleton array.
Default is `True`. Requires `~FilFinder2D.find_widths`
to be run.
model_kwargs : dict, optional
Passed to `~FilFinder2D.filament_model`.
kwargs : Passed to `~astropy.io.fits.PrimaryHDU.writeto`.
'''
if save_name is None:
save_name = self.save_name
else:
save_name = os.path.splitext(save_name)[0]
# Create header based off of image header.
if self.header is not None:
new_hdr = deepcopy(self.header)
else:
new_hdr = fits.Header()
new_hdr["NAXIS"] = 2
new_hdr["NAXIS1"] = self.image.shape[1]
new_hdr["NAXIS2"] = self.image.shape[0]
try: # delete the original history
del new_hdr["HISTORY"]
except KeyError:
pass
from fil_finder.version import version
new_hdr["BUNIT"] = ("bool", "")
new_hdr["COMMENT"] = \
"Mask created by fil_finder at {0}. Version {1}"\
.format(time.strftime("%c"), version)
new_hdr["COMMENT"] = \
"See fil_finder documentation for more info on parameter meanings."
new_hdr["COMMENT"] = "Smoothing Filter Size: " + \
str(self.smooth_size) + " pixels"
new_hdr["COMMENT"] = "Area Threshold: " + \
str(self.size_thresh) + " pixels^2"
new_hdr["COMMENT"] = "Global Intensity Threshold: " + \
str(self.glob_thresh) + " %"
new_hdr["COMMENT"] = "Size of Adaptive Threshold Patch: " + \
str(self.adapt_thresh) + " pixels"
new_hdr['BITPIX'] = "8"
mask_hdu = fits.PrimaryHDU(self.mask.astype(int), new_hdr)
out_hdu = fits.HDUList([mask_hdu])
# Skeletons
new_hdr_skel = new_hdr.copy()
new_hdr_skel["BUNIT"] = ("int", "")
new_hdr_skel['BITPIX'] = "16"
new_hdr_skel["COMMENT"] = "Skeleton Size Threshold: " + \
str(self.skel_thresh)
new_hdr_skel["COMMENT"] = "Branch Size Threshold: " + \
str(self.branch_thresh)
# Final Skeletons - create labels which match up with table output
labels = nd.label(self.skeleton, eight_con())[0]
out_hdu.append(fits.ImageHDU(labels, header=new_hdr_skel))
# Longest Paths
if save_longpath_skeletons:
labels_lp = nd.label(self.skeleton_longpath, eight_con())[0]
out_hdu.append(fits.ImageHDU(labels_lp,
header=new_hdr_skel))
if save_model:
model = self.filament_model(**model_kwargs)
if hasattr(model, 'unit'):
model = model.value
model_hdr = new_hdr.copy()
model_hdr['COMMENT'] = "Image generated from fitted filament models."
if self.header is not None:
bunit = self.header.get('BUNIT', None)
if bunit is not None:
model_hdr['BUNIT'] = bunit
else:
model_hdr['BUNIT'] = ""
model_hdr['BITPIX'] = fits.DTYPE2BITPIX[str(model.dtype)]
model_hdu = fits.ImageHDU(model, header=model_hdr)
out_hdu.append(model_hdu)
out_hdu.writeto("{0}_image_output.fits".format(save_name),
**kwargs)
def save_stamp_fits(self, save_name=None, pad_size=20 * u.pix,
model_kwargs={},
**kwargs):
'''
Save stamps of each filament image, skeleton, longest-path skeleton,
and the model image.
A suffix of "stamp_{num}" is added to each file, where the number is
is the order in the list of `~FilFinder2D.filaments`.
Parameters
----------
save_name : str, optional
The prefix for the saved file. If None, the save name specified
when `~FilFinder2D` was first called.
stamps : bool, optional
Enables saving of individual stamps
model_kwargs : dict, optional
Passed to `~FilFinder2D.filament_model`.
kwargs : Passed to `~astropy.io.fits.PrimaryHDU.writeto`.
'''
if save_name is None:
save_name = self.save_name
else:
save_name = os.path.splitext(save_name)[0]
for n, fil in enumerate(self.filaments):
savename = "{0}_stamp_{1}.fits".format(save_name, n)
fil.save_fits(savename, self.image, pad_size=pad_size,
model_kwargs=model_kwargs,
**kwargs)
|
e-koch/FilFinder
|
fil_finder/filfinder2D.py
|
Python
|
mit
| 58,807
|
[
"Gaussian"
] |
7f14db561d3a674ad4b59d92ea09ffb4cab9f62d3cb894671ea6c8e7d6780c38
|
#!/usr/bin/env python
########################################################################
# File : dirac-admin-ban-site
# Author : Stuart Paterson
########################################################################
"""
Remove Site from Active mask for current Setup
Example:
$ dirac-admin-ban-site LCG.IN2P3.fr "Pilot installation problems"
"""
import time
from DIRAC.Core.Base.Script import Script
@Script()
def main():
Script.registerSwitch("E:", "email=", "Boolean True/False (True by default)")
# Registering arguments will automatically add their description to the help menu
Script.registerArgument("Site: Name of the Site")
Script.registerArgument("Comment: Reason of the action")
Script.parseCommandLine(ignoreErrors=True)
from DIRAC import exit as DIRACExit, gConfig, gLogger
from DIRAC.Core.Utilities.PromptUser import promptUser
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
def getBoolean(value):
if value.lower() == "true":
return True
elif value.lower() == "false":
return False
else:
Script.showHelp()
email = True
for switch in Script.getUnprocessedSwitches():
if switch[0] == "email":
email = getBoolean(switch[1])
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
setup = gConfig.getValue("/DIRAC/Setup", "")
if not setup:
print("ERROR: Could not contact Configuration Service")
exitCode = 2
DIRACExit(exitCode)
# result = promptUser(
# 'All the elements that are associated with this site will be banned,'
# 'are you sure about this action?'
# )
# if not result['OK'] or result['Value'] is 'n':
# print 'Script stopped'
# DIRACExit( 0 )
# parseCommandLine show help when mandatory arguments are not specified or incorrect argument
site, comment = Script.getPositionalArgs(group=True)
result = diracAdmin.banSite(site, comment, printOutput=True)
if not result["OK"]:
errorList.append((site, result["Message"]))
exitCode = 2
else:
if email:
userName = diracAdmin._getCurrentUser()
if not userName["OK"]:
print("ERROR: Could not obtain current username from proxy")
exitCode = 2
DIRACExit(exitCode)
userName = userName["Value"]
subject = "%s is banned for %s setup" % (site, setup)
body = "Site %s is removed from site mask for %s setup by %s on %s.\n\n" % (
site,
setup,
userName,
time.asctime(),
)
body += "Comment:\n%s" % comment
addressPath = "EMail/Production"
address = Operations().getValue(addressPath, "")
if not address:
gLogger.notice("'%s' not defined in Operations, can not send Mail\n" % addressPath, body)
else:
result = diracAdmin.sendMail(address, subject, body)
else:
print("Automatic email disabled by flag.")
for error in errorList:
print("ERROR %s: %s" % error)
DIRACExit(exitCode)
if __name__ == "__main__":
main()
|
DIRACGrid/DIRAC
|
src/DIRAC/Interfaces/scripts/dirac_admin_ban_site.py
|
Python
|
gpl-3.0
| 3,347
|
[
"DIRAC"
] |
d1a4491735803bc8fde3835d14e7008b724ca8cf55feb9d25b01f40326ded2f3
|
# Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# This case corresponds to: /visu/SWIG_scripts/B0 case
# Create table
from paravistest import tablesdir
import pvserver as paravis
import pvsimple
# Define script for table creation
script = """
import math
# Get table output
table = self.GetTableOutput()
nb_rows = 10
nb_cols = 200
# Create first column
col1 = vtk.vtkDoubleArray()
col1.SetName('Frequency')
for i in xrange(0, nb_rows):
col1.InsertNextValue(i * 10 + 1)
table.AddColumn(col1)
# Create the rest columns
for i in xrange(1, nb_cols + 1):
col = vtk.vtkDoubleArray()
col.SetName('Power ' + str(i))
# Fill the next column
for j in xrange(0, nb_rows):
if j % 2 == 1:
col.InsertNextValue(math.log10(j * 30 * math.pi / 180) * 20 + i * 15 + j * 5)
else:
col.InsertNextValue(math.sin(j * 30 * math.pi / 180) * 20 + i * 15 + j * 5)
table.AddColumn(col)
"""
# Creating programmable source (table)
ps = pvsimple.ProgrammableSource()
ps.OutputDataSetType = 'vtkTable'
ps.Script = script
pvsimple.RenameSource("Very useful data", ps)
ps.UpdatePipeline()
|
FedoraScientific/salome-paravis
|
test/VisuPrs/SWIG_scripts/B0.py
|
Python
|
lgpl-2.1
| 1,920
|
[
"VTK"
] |
c9b38e8ea1131bf70180245d44b316a55da123aafb10dd8abad321459979f3c6
|
##input: /Users/metagenomics/Documents/Databases/uniprot_tremble.dat or uniprot_sprot.dat
import sys
import os
from Bio import SeqIO
#import gzip
#This script uses biopython to get taxonomy from uniprot .dat file
'''
record.annotations record.letter_annotations
record.dbxrefs record.lower
record.description record.name
record.features record.reverse_complement
record.format record.seq
record.id record.upper
'''
def get_taxa(uniprot_dat):
taxa_to_print=[]
for record in SeqIO.parse(uniprot_dat, "swiss"):
# taxa_to_print.append(record.id)
taxa_to_print.append(uniprot_dat.split(".")[0])
taxa_to_print.append(record.annotations['organism'])
for taxa in record.annotations['taxonomy']:
taxa_to_print.append(taxa)
return '\t'.join(taxa_to_print)
uniprot_dat = sys.argv[1:]
for f in uniprot_dat:
taxa = get_taxa(f)
print taxa
|
fandemonium/code
|
uniprot_dat.py
|
Python
|
mit
| 914
|
[
"Biopython"
] |
cba4dc0a5b12a150d42a34537d0422b7dc531a4ff28b5ded82d60d95cf8396f8
|
#!/usr/bin/env python3
import math
import warnings
from abc import ABC, abstractmethod
from copy import deepcopy
import torch
from .. import settings
from ..distributions import MultivariateNormal, base_distributions
from ..module import Module
from ..utils.quadrature import GaussHermiteQuadrature1D
from ..utils.warnings import GPInputWarning
class _Likelihood(Module, ABC):
def __init__(self, max_plate_nesting=1):
super().__init__()
self.max_plate_nesting = max_plate_nesting
def _draw_likelihood_samples(self, function_dist, *args, sample_shape=None, **kwargs):
if sample_shape is None:
sample_shape = torch.Size(
[settings.num_likelihood_samples.value()]
+ [1] * (self.max_plate_nesting - len(function_dist.batch_shape) - 1)
)
else:
sample_shape = sample_shape[: -len(function_dist.batch_shape) - 1]
if self.training:
num_event_dims = len(function_dist.event_shape)
function_dist = base_distributions.Normal(function_dist.mean, function_dist.variance.sqrt())
function_dist = base_distributions.Independent(function_dist, num_event_dims - 1)
function_samples = function_dist.rsample(sample_shape)
return self.forward(function_samples, *args, **kwargs)
def expected_log_prob(self, observations, function_dist, *args, **kwargs):
likelihood_samples = self._draw_likelihood_samples(function_dist, *args, **kwargs)
res = likelihood_samples.log_prob(observations).mean(dim=0)
return res
@abstractmethod
def forward(self, function_samples, *args, **kwargs):
raise NotImplementedError
def get_fantasy_likelihood(self, **kwargs):
return deepcopy(self)
def log_marginal(self, observations, function_dist, *args, **kwargs):
likelihood_samples = self._draw_likelihood_samples(function_dist, *args, **kwargs)
log_probs = likelihood_samples.log_prob(observations)
res = log_probs.sub(math.log(log_probs.size(0))).logsumexp(dim=0)
return res
def marginal(self, function_dist, *args, **kwargs):
res = self._draw_likelihood_samples(function_dist, *args, **kwargs)
return res
def __call__(self, input, *args, **kwargs):
# Conditional
if torch.is_tensor(input):
return super().__call__(input, *args, **kwargs)
# Marginal
elif isinstance(input, MultivariateNormal):
return self.marginal(input, *args, **kwargs)
# Error
else:
raise RuntimeError(
"Likelihoods expects a MultivariateNormal input to make marginal predictions, or a "
"torch.Tensor for conditional predictions. Got a {}".format(input.__class__.__name__)
)
try:
import pyro
class Likelihood(_Likelihood):
r"""
A Likelihood in GPyTorch specifies the mapping from latent function values
:math:`f(\mathbf X)` to observed labels :math:`y`.
For example, in the case of regression this might be a Gaussian
distribution, as :math:`y(\mathbf x)` is equal to :math:`f(\mathbf x)` plus Gaussian noise:
.. math::
y(\mathbf x) = f(\mathbf x) + \epsilon, \:\:\:\: \epsilon ~ N(0,\sigma^{2}_{n} \mathbf I)
In the case of classification, this might be a Bernoulli distribution,
where the probability that :math:`y=1` is given by the latent function
passed through some sigmoid or probit function:
.. math::
y(\mathbf x) = \begin{cases}
1 & \text{w/ probability} \:\: \sigma(f(\mathbf x)) \\
0 & \text{w/ probability} \:\: 1-\sigma(f(\mathbf x))
\end{cases}
In either case, to implement a likelihood function, GPyTorch only
requires a :attr:`forward` method that computes the conditional distribution
:math:`p(y \mid f(\mathbf x))`.
Calling this object does one of two things:
- If likelihood is called with a :class:`torch.Tensor` object, then it is
assumed that the input is samples from :math:`f(\mathbf x)`. This
returns the *conditional* distribution `p(y|f(\mathbf x))`.
- If likelihood is called with a :class:`~gpytorch.distribution.MultivariateNormal` object,
then it is assumed that the input is the distribution :math:`f(\mathbf x)`.
This returns the *marginal* distribution `p(y|\mathbf x)`.
Args:
:attr:`max_plate_nesting` (int, default=1)
(For Pyro integration only). How many batch dimensions are in the function.
This should be modified if the likelihood uses plated random variables.
"""
@property
def num_data(self):
if hasattr(self, "_num_data"):
return self._num_data
else:
warnings.warn(
"likelihood.num_data isn't set. This might result in incorrect ELBO scaling.", GPInputWarning
)
return ""
@num_data.setter
def num_data(self, val):
self._num_data = val
@property
def name_prefix(self):
if hasattr(self, "_name_prefix"):
return self._name_prefix
else:
return ""
@name_prefix.setter
def name_prefix(self, val):
self._name_prefix = val
def _draw_likelihood_samples(self, function_dist, *args, sample_shape=None, **kwargs):
if self.training:
num_event_dims = len(function_dist.event_shape)
function_dist = base_distributions.Normal(function_dist.mean, function_dist.variance.sqrt())
function_dist = base_distributions.Independent(function_dist, num_event_dims - 1)
plate_name = self.name_prefix + ".num_particles_vectorized"
num_samples = settings.num_likelihood_samples.value()
max_plate_nesting = max(self.max_plate_nesting, len(function_dist.batch_shape))
with pyro.plate(plate_name, size=num_samples, dim=(-max_plate_nesting - 1)):
if sample_shape is None:
function_samples = pyro.sample(self.name_prefix, function_dist.mask(False))
# Deal with the fact that we're not assuming conditional indendence over data points here
function_samples = function_samples.squeeze(-len(function_dist.event_shape) - 1)
else:
sample_shape = sample_shape[: -len(function_dist.batch_shape)]
function_samples = function_dist(sample_shape)
if not self.training:
function_samples = function_samples.squeeze(-len(function_dist.event_shape) - 1)
return self.forward(function_samples, *args, **kwargs)
def expected_log_prob(self, observations, function_dist, *args, **kwargs):
r"""
(Used by :obj:`~gpytorch.mlls.VariationalELBO` for variational inference.)
Computes the expected log likelihood, where the expectation is over the GP variational distribution.
.. math::
\sum_{\mathbf x, y} \mathbb{E}_{q\left( f(\mathbf x) \right)}
\left[ \log p \left( y \mid f(\mathbf x) \right) \right]
Args:
:attr:`observations` (:class:`torch.Tensor`)
Values of :math:`y`.
:attr:`function_dist` (:class:`~gpytorch.distributions.MultivariateNormal`)
Distribution for :math:`f(x)`.
:attr:`args`, :attr:`kwargs`
Passed to the `forward` function
Returns
`torch.Tensor` (log probability)
"""
return super().expected_log_prob(observations, function_dist, *args, **kwargs)
@abstractmethod
def forward(self, function_samples, *args, data={}, **kwargs):
r"""
Computes the conditional distribution :math:`p(\mathbf y \mid
\mathbf f, \ldots)` that defines the likelihood.
:param torch.Tensor function_samples: Samples from the function (:math:`\mathbf f`)
:param dict data: (Optional, Pyro integration only) Additional
variables (:math:`\ldots`) that the likelihood needs to condition
on. The keys of the dictionary will correspond to Pyro sample sites
in the likelihood's model/guide.
:param args: Additional args
:param kwargs: Additional kwargs
:return: Distribution object (with same shape as :attr:`function_samples`)
:rtype: :obj:`Distribution`
"""
raise NotImplementedError
def get_fantasy_likelihood(self, **kwargs):
"""
"""
return super().get_fantasy_likelihood(**kwargs)
def log_marginal(self, observations, function_dist, *args, **kwargs):
r"""
(Used by :obj:`~gpytorch.mlls.PredictiveLogLikelihood` for approximate inference.)
Computes the log marginal likelihood of the approximate predictive distribution
.. math::
\sum_{\mathbf x, y} \log \mathbb{E}_{q\left( f(\mathbf x) \right)}
\left[ p \left( y \mid f(\mathbf x) \right) \right]
Note that this differs from :meth:`expected_log_prob` because the :math:`log` is on the outside
of the expectation.
Args:
:attr:`observations` (:class:`torch.Tensor`)
Values of :math:`y`.
:attr:`function_dist` (:class:`~gpytorch.distributions.MultivariateNormal`)
Distribution for :math:`f(x)`.
:attr:`args`, :attr:`kwargs`
Passed to the `forward` function
Returns
`torch.Tensor` (log probability)
"""
return super().log_marginal(observations, function_dist, *args, **kwargs)
def marginal(self, function_dist, *args, **kwargs):
r"""
Computes a predictive distribution :math:`p(y^* | \mathbf x^*)` given either a posterior
distribution :math:`p(\mathbf f | \mathcal D, \mathbf x)` or a
prior distribution :math:`p(\mathbf f|\mathbf x)` as input.
With both exact inference and variational inference, the form of
:math:`p(\mathbf f|\mathcal D, \mathbf x)` or :math:`p(\mathbf f|
\mathbf x)` should usually be Gaussian. As a result, :attr:`function_dist`
should usually be a :obj:`~gpytorch.distributions.MultivariateNormal` specified by the mean and
(co)variance of :math:`p(\mathbf f|...)`.
Args:
:attr:`function_dist` (:class:`~gpytorch.distributions.MultivariateNormal`)
Distribution for :math:`f(x)`.
:attr:`args`, :attr:`kwargs`
Passed to the `forward` function
Returns:
Distribution object (the marginal distribution, or samples from it)
"""
return super().marginal(function_dist, *args, **kwargs)
def pyro_guide(self, function_dist, target, *args, **kwargs):
r"""
(For Pyro integration only).
Part of the guide function for the likelihood.
This should be re-defined if the likelihood contains any latent variables that need to be infered.
:param ~gpytorch.distributions.MultivariateNormal function_dist: Distribution of latent function
:math:`q(\mathbf f)`.
:param torch.Tensor target: Observed :math:`\mathbf y`.
:param args: Additional args (for :meth:`~forward`).
:param kwargs: Additional kwargs (for :meth:`~forward`).
"""
with pyro.plate(self.name_prefix + ".data_plate", dim=-1):
pyro.sample(self.name_prefix + ".f", function_dist)
def pyro_model(self, function_dist, target, *args, **kwargs):
r"""
(For Pyro integration only).
Part of the model function for the likelihood.
It should return the
This should be re-defined if the likelihood contains any latent variables that need to be infered.
:param ~gpytorch.distributions.MultivariateNormal function_dist: Distribution of latent function
:math:`p(\mathbf f)`.
:param torch.Tensor target: Observed :math:`\mathbf y`.
:param args: Additional args (for :meth:`~forward`).
:param kwargs: Additional kwargs (for :meth:`~forward`).
"""
with pyro.plate(self.name_prefix + ".data_plate", dim=-1):
function_samples = pyro.sample(self.name_prefix + ".f", function_dist)
output_dist = self(function_samples, *args, **kwargs)
return self.sample_target(output_dist, target)
def sample_target(self, output_dist, target):
scale = (self.num_data or output_dist.batch_shape[-1]) / output_dist.batch_shape[-1]
with pyro.poutine.scale(scale=scale):
return pyro.sample(self.name_prefix + ".y", output_dist, obs=target)
def __call__(self, input, *args, **kwargs):
# Conditional
if torch.is_tensor(input):
return super().__call__(input, *args, **kwargs)
# Marginal
elif any(
[
isinstance(input, MultivariateNormal),
isinstance(input, pyro.distributions.Normal),
(
isinstance(input, pyro.distributions.Independent)
and isinstance(input.base_dist, pyro.distributions.Normal)
),
]
):
return self.marginal(input, *args, **kwargs)
# Error
else:
raise RuntimeError(
"Likelihoods expects a MultivariateNormal or Normal input to make marginal predictions, or a "
"torch.Tensor for conditional predictions. Got a {}".format(input.__class__.__name__)
)
except ImportError:
class Likelihood(_Likelihood):
@property
def num_data(self):
warnings.warn("num_data is only used for likehoods that are integrated with Pyro.", RuntimeWarning)
return 0
@num_data.setter
def num_data(self, val):
warnings.warn("num_data is only used for likehoods that are integrated with Pyro.", RuntimeWarning)
@property
def name_prefix(self):
warnings.warn("name_prefix is only used for likehoods that are integrated with Pyro.", RuntimeWarning)
return ""
@name_prefix.setter
def name_prefix(self, val):
warnings.warn("name_prefix is only used for likehoods that are integrated with Pyro.", RuntimeWarning)
class _OneDimensionalLikelihood(Likelihood, ABC):
r"""
A specific case of :obj:`~gpytorch.likelihoods.Likelihood` when the GP represents a one-dimensional
output. (I.e. for a specific :math:`\mathbf x`, :math:`f(\mathbf x) \in \mathbb{R}`.)
Inheriting from this likelihood reduces the variance when computing approximate GP objective functions
by using 1D Gauss-Hermite quadrature.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.quadrature = GaussHermiteQuadrature1D()
def expected_log_prob(self, observations, function_dist, *args, **kwargs):
log_prob_lambda = lambda function_samples: self.forward(function_samples).log_prob(observations)
log_prob = self.quadrature(log_prob_lambda, function_dist)
return log_prob
def log_marginal(self, observations, function_dist, *args, **kwargs):
prob_lambda = lambda function_samples: self.forward(function_samples).log_prob(observations).exp()
prob = self.quadrature(prob_lambda, function_dist)
return prob.log()
|
jrg365/gpytorch
|
gpytorch/likelihoods/likelihood.py
|
Python
|
mit
| 16,260
|
[
"Gaussian"
] |
7aa3d5fc69a99d17cd1fde09705b84f257c583bbba5c561bf10eb5451f0fbcd7
|
# -*- coding: utf-8 -*-
############################################################################
# Copyright (C) 2010 by Nestor Aguirre #
# nfaguirre@imaff.cfmac.csic.es #
# #
# This program is free software; you can redistribute it and#or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the #
# Free Software Foundation, Inc., #
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. #
############################################################################
from Molecule import *
from CrystalParser import *
###
# Make a Crystal extending from Molecule
##
class Crystal(Molecule):
###
# Constructor
##
def __init__( this, name="Unknown", atomicNumbers=None, labels=None, xPos=None, yPos=None, zPos=None, charges=None ):
Molecule.__init__( this, name, atomicNumbers, labels, xPos, yPos, zPos, charges )
this.latticeVectors = -numpy.ones( (3,3) )
this.numberOfReplicas = [1,1,1]
###
#
##
def __str__( this ):
output = Molecule.__str__( this )+"\n\n"
output += "Lattice Vectors ="
for i in [0,1,2]:
output += "\n"
for j in [0,1,2]:
output += "%15.7f" % this.latticeVectors[i,j]
output += "\n\n"
output += "Supercell Size = ["
output += "%2d," % this.numberOfReplicas[0]
output += "%2d," % this.numberOfReplicas[1]
output += "%2d" % this.numberOfReplicas[2]
output += " ]"
return output
###
#
##
def __copy__( this ):
output = Crystal()
output.name = this.name
output.symmetryOperators = copy(this.symmetryOperators)
output.orbitalEnergies = copy(output.orbitalEnergies)
for atom in this:
output.append( atom, makeCopy=True, automaticId=False )
#if( atom.real ):
#this.realAtoms.append( this[-1] )
output.latticeVectors = copy(this.latticeVectors)
output.numberOfReplicas = copy(this.numberOfReplicas)
return output
###
# Select the latticeVectors
##
def setLatticeVectors( this, latticeVectors ):
# Checks if the lattice vectors have been chosen,
# if these values have been chosen then the atomic
# positions will be rescaled to the new values
if( not ( this.latticeVectors+1.0 ).any() ):
this.latticeVectors = numpy.matrix( latticeVectors )
else:
oldLatticeVectors = numpy.matrix( this.latticeVectors )
this.latticeVectors = numpy.matrix( latticeVectors )
for atom in this:
atom.x = ( this.latticeVectors[0,0]/oldLatticeVectors[0,0] )*atom.x
atom.y = ( this.latticeVectors[1,1]/oldLatticeVectors[1,1] )*atom.y
atom.z = ( this.latticeVectors[2,2]/oldLatticeVectors[2,2] )*atom.z
###
# Select the supercell dimentions
##
def buildSuperCell( this, nx=1, ny=1, nz=0, fixUnitCell=False, debug=False ):
#print "BUILDING SUPERCELL"
#print "------------------"
#print " NEW SIZE: "+"[ "+str(nx)+", "+str(ny)+", "+str(nz)+" ]"
#print ""
this.numberOfReplicas[0] = nx
this.numberOfReplicas[1] = ny
this.numberOfReplicas[2] = nz
molecule = this[:]
c = this.latticeVectors
toRemove = {}
#for ix in range(nx/2-nx+1, nx/2+1):
#for iy in range(ny/2-ny+1, ny/2+1):
#for iz in range(nz/2-nz+1, nz/2+1):
#for iz in range(1,nz):
iz = 1
#for ix in range(1,nx):
#for iy in range(1,ny):
for ix in range(nx/2-nx+1, nx/2+1):
for iy in range(ny/2-ny+1, ny/2+1):
if( fixUnitCell ):
if( ix == 0 and iy == 0 ):
continue
for atom in molecule:
x = atom.x+float(ix-1)*c[0,0]+float(iy-1)*c[0,1]+float(iz-1)*c[0,2]
y = atom.y+float(ix-1)*c[1,0]+float(iy-1)*c[1,1]+float(iz-1)*c[1,2]
z = atom.z+float(ix-1)*c[2,0]+float(iy-1)*c[2,1]+float(iz-1)*c[2,2]
if( fixUnitCell ):
success = this.append( Atom( x, y, z, charge=atom.charge, label=atom.label, real=False, symGrp=atom.symGrp ), check=True, onlyTest=True, debug=debug )
if( not success ):
toRemove[ atom.id ] = 1
else:
this.append( Atom( x, y, z, charge=atom.charge, label=atom.label, real=False, symGrp=atom.symGrp ), check=True, debug=debug )
if( fixUnitCell ):
if( debug ):
print "List of atoms to remove:"
print toRemove.keys()
this.remove( idList=toRemove.keys() )
#for ix in range(nx/2-nx+1, nx/2+1):
#for iy in range(ny/2-ny+1, ny/2+1):
#if( ix != 0 or iy != 0 ):
#for atom in molecule:
#x = atom.x+ix*c[0,0]
#y = atom.y+iy*c[1,1]
#z = atom.z
#this.append( Atom( x, y, z, charge=atom.charge, label=atom.label, real=False, symGrp=atom.symGrp ), check=False )
###
# Test method
##
@staticmethod
def test():
PIAMOD_HOME = os.getenv("PIAMOD_HOME")
crystal = Crystal("Celda unidad de TiO2")
crystal.append( Atom( 0.000000000000, 0.000000000000, 0.000000000000, label="Ti" ) )
crystal.append( Atom( 1.419489652269, 1.419489652269, 0.000000000000, label="O" ) )
sym1 = [[-1.000, 0.000, 0.000],
[ 0.000, -1.000, 0.000],
[ 0.000, 0.000, 1.000],
[ 0.000, 0.000, 0.000]]
sym2 = [[ 0.100, 0.000, 0.000],
[ 0.000, -0.100, 0.000],
[ 0.000, 0.000, -0.100],
[ 2.319, 2.319, 1.489]]
sym3 = [[-1.000, 0.000, 0.000],
[ 0.000, 1.000, 0.000],
[ 0.000, 0.000, -1.000],
[ 2.319, 2.319, 1.489]]
sym4 = [[ 0.000, -1.000, 0.000],
[-1.000, 0.000, 0.000],
[ 0.000, 0.000, -1.000],
[ 0.000, 0.000, 0.000]]
sym5 = [[ 0.000, 1.000, 0.000],
[ 1.000, 0.000, 0.000],
[ 0.000, 0.000, -1.000],
[ 0.000, 0.000, 0.000]]
sym6 = [[ 0.000, 1.000, 0.000],
[-1.000, 0.000, 0.000],
[ 0.000, 0.000, 1.000],
[ 2.319, 2.319, 1.489]]
sym7 = [[ 0.000, -0.100, 0.000],
[ 0.100, 0.000, 0.000],
[ 0.000, 0.000, 0.100],
[ 2.319, 2.319, 1.489]]
sym8 = [[-0.100, 0.000, 0.000],
[ 0.000, -0.100, 0.000],
[ 0.000, 0.000, -0.100],
[ 0.000, 0.000, 0.000]]
sym9 = [[ 0.100, 0.000, 0.000],
[ 0.000, 0.100, 0.000],
[ 0.000, 0.000, -0.100],
[ 0.000, 0.000, 0.000]]
sym10= [[-0.100, 0.000, 0.000],
[ 0.000, 0.100, 0.000],
[ 0.000, 0.000, 0.100],
[ 2.319, 2.319, 1.489]]
sym11= [[ 0.100, 0.000, 0.000],
[ 0.000, -0.100, 0.000],
[ 0.000, 0.000, 0.100],
[ 2.319, 2.319, 1.489]]
sym12= [[ 0.000, 0.100, 0.000],
[ 0.100, 0.000, 0.000],
[ 0.000, 0.000, 0.100],
[ 0.000, 0.000, 0.000]]
sym13= [[ 0.000, -0.100, 0.000],
[-0.100, 0.000, 0.000],
[ 0.000, 0.000, 0.100],
[ 0.000, 0.000, 0.000]]
sym14= [[ 0.000, -0.100, 0.000],
[ 0.100, 0.000, 0.000],
[ 0.000, 0.000, -0.100],
[ 2.319, 2.319, 1.489]]
sym15= [[ 0.000, 0.100, 0.000],
[-0.100, 0.000, 0.000],
[ 0.000, 0.000, -0.100],
[ 2.319, 2.319, 1.489]]
latticeVectors = [[ 4.64, 0.00, 0.00],
[ 0.00, 4.64, 0.00],
[ 0.00, 0.00, 2.98]]
crystal.setSymetryOperators( [sym1, sym2, sym3, sym4, sym5, sym6, sym6, sym7, sym8, sym9, sym10, sym11, sym12, sym13, sym14, sym15] )
crystal.setLatticeVectors( latticeVectors )
crystal.buildSuperCell( 2, 2, 2 )
crystal.save( "final.xyz", format=Molecule.XYZ )
latticeVectors = [[ 1.2*4.64, 0.00, 0.00],
[ 0.00, 1.2*4.64, 0.00],
[ 0.00, 0.00, 1.2*2.98]]
crystal.setLatticeVectors( latticeVectors )
crystal.save( "final2.xyz", format=Molecule.XYZ )
print crystal
|
nfaguirrec/piamod
|
src/Crystal.py
|
Python
|
gpl-2.0
| 8,336
|
[
"CRYSTAL"
] |
c032d01113a35a98927c2c6fbdeeea3ce038195fb4e175b2ca182a168adbfeec
|
# (C) 2016, Markus Wildi, wildi.markus@bluewin.ch
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
__author__ = 'wildi.markus@bluewin.ch'
# Transform with libnova
# Python bindings for libnova
#
import numpy as np
from astropy.coordinates import Longitude,Latitude,Angle
from astropy import units as u
from astropy.coordinates import SkyCoord
from ctypes import *
class LN_equ_posn(Structure):
_fields_ = [("ra", c_double),("dec", c_double)]
class LN_hrz_posn(Structure):
_fields_ = [("az", c_double),("alt", c_double)]
class LN_lnlat_posn(Structure):
_fields_ = [("lng", c_double),("lat", c_double)]
class LN_nut(Structure):
_fields_ = [("longitude", c_double),("obliquity", c_double),("ecliptic", c_double)]
# add full path if it is not on LD_PATH
ln=cdll.LoadLibrary("libnova.so")
ln.ln_get_equ_aber.restype = None
ln.ln_get_equ_prec.restype = None
ln.ln_get_equ_from_ecl.restype = None
ln.ln_get_nutation.restype = None
ln.ln_get_refraction_adj.restype = c_double
ln.ln_get_angular_separation.restype = c_double
ln_pos_eq=LN_equ_posn()
ln_pos_eq_ab=LN_equ_posn()
ln_pos_eq_pm=LN_equ_posn()
ln_pos_eq_app=LN_equ_posn()
ln_pos_eq_pr=LN_equ_posn()
ln_pos_aa_ab=LN_hrz_posn()
ln_hrz_posn=LN_hrz_posn()
ln_lnlat_posn=LN_lnlat_posn()
ln_nut= LN_nut()
ln_pos_eq_nut=LN_equ_posn()
class Transformation(object):
def __init__(self, lg=None,obs=None,refraction_method=None):
#
self.lg=lg
self.name='LN Libnova'
self.refraction_method=refraction_method
self.obs=obs
self.ln_obs=LN_lnlat_posn()
self.ln_obs.lng=obs.lon.degree # deg
self.ln_obs.lat=obs.lat.degree # deg
self.ln_hght=obs.height # hm, no .meter?? m, not a libnova quantity
def transform_to_hadec(self,tf=None,sky=None,mount_set_icrs=None):
tem=sky.temperature
pre=sky.pressure
hum=sky.humidity
pre_qfe=pre # to make it clear what is used
aa=self.LN_EQ_to_AltAz(ra=Longitude(tf.ra.radian,u.radian).degree,dec=Latitude(tf.dec.radian,u.radian).degree,ln_pressure_qfe=pre_qfe,ln_temperature=tem,ln_humidity=hum,obstime=tf.obstime)
ha=self.LN_AltAz_to_HA(az=aa.az.degree,alt=aa.alt.degree,obstime=tf.obstime)
return ha
def transform_to_altaz(self,tf=None,sky=None,mount_set_icrs=None):
tem=sky.temperature
pre=sky.pressure
hum=sky.humidity
aa=self.LN_EQ_to_AltAz(ra=Longitude(tf.ra.radian,u.radian).degree,dec=Latitude(tf.dec.radian,u.radian).degree,ln_pressure_qfe=pre_qfe,ln_temperature=tem,ln_humidity=hum,obstime=tf.obstime,mount_set_icrs=mount_set_icrs)
return aa
def LN_nutation_meeus(self,eq_pr,JD=None):
# first order correction
ln.ln_get_nutation(c_double(JD),byref(ln_nut))
d_psi=ln_nut.longitude /180.*np.pi
epsilon_0=(ln_nut.ecliptic+ln_nut.obliquity)/180.*np.pi # true obliquity
d_epsilon=ln_nut.obliquity/180.*np.pi
ra=Longitude(eq_pr.ra,u.degree).radian
dec=Latitude(eq_pr.dec,u.degree).radian
d_ra=(np.cos(epsilon_0)+np.sin(epsilon_0)*np.sin(ra)*np.tan(dec))*d_psi-np.cos(ra)*np.tan(dec)*d_epsilon
d_dec=(np.sin(epsilon_0)*np.cos(ra))*d_psi+np.sin(ra)*d_epsilon
ln_pos_eq_nut.ra=eq_pr.ra + d_ra * 180./np.pi
ln_pos_eq_nut.dec=eq_pr.dec + d_dec* 180./np.pi
return ln_pos_eq_nut
def LN_EQ_to_AltAz(self,ra=None,dec=None,ln_pressure_qfe=None,ln_temperature=None,ln_humidity=None,obstime=None,mount_set_icrs=False):
ln_pos_eq.ra=ra
ln_pos_eq.dec=dec
if mount_set_icrs:
# ToDo missing see Jean Meeus, Astronomical Algorithms, chapter 23
# proper motion
# annual paralax (0".8)
# gravitational deflection of light (0".003)
ln.ln_get_equ_prec(byref(ln_pos_eq), c_double(obstime.jd), byref(ln_pos_eq_pr))
ln_pos_eq_nut=self.LN_nutation_meeus(eq_pr=ln_pos_eq_pr,JD=obstime.jd)
ln.ln_get_equ_aber(byref(ln_pos_eq_nut), c_double(obstime.jd), byref(ln_pos_eq_ab))
ln.ln_get_hrz_from_equ(byref(ln_pos_eq_ab), byref(self.ln_obs), c_double(obstime.jd), byref(ln_pos_aa_ab))
# here we use QFE not pressure at sea level!
# E.g. at Dome-C this formula:
# ln_pressure=ln_see_pres * pow(1. - (0.0065 * ln_alt) / 288.15, (9.80665 * 0.0289644) / (8.31447 * 0.0065));
# is not precise.
if self.refraction_method is None:
d_alt_deg=ln.ln_get_refraction_adj(c_double(ln_pos_aa_ab.alt),c_double(ln_pressure_qfe),c_double(ln_temperature))
else:
d_alt_deg=180./np.pi* self.refraction_method(alt=ln_pos_aa_ab.alt,tem=ln_temperature,pre=ln_pressure_qfe,hum=ln_humidity)
else:
# ... but not for the star position as measured in mount frame
ln.ln_get_hrz_from_equ(byref(ln_pos_eq), byref(self.ln_obs), c_double(obstime.jd), byref(ln_pos_aa_ab));
d_alt_deg=0.
a_az=Longitude(ln_pos_aa_ab.az,u.deg)
a_alt=Latitude(ln_pos_aa_ab.alt + d_alt_deg,u.deg)
pos_aa=SkyCoord(az=a_az.radian,alt=a_alt.radian,unit=(u.radian,u.radian),frame='altaz',location=self.obs,obstime=obstime,obswl=0.5*u.micron, pressure=ln_pressure_qfe*u.hPa,temperature=ln_temperature*u.deg_C,relative_humidity=ln_humidity)
return pos_aa
def LN_AltAz_to_HA(self,az=None,alt=None,obstime=None):
ln_hrz_posn.alt=alt
ln_hrz_posn.az=az
ln.ln_get_equ_from_hrz(byref(ln_hrz_posn),byref(self.ln_obs), c_double(obstime.jd),byref(ln_pos_eq))
# calculate HA
ra=Longitude(ln_pos_eq.ra,u.deg)
HA= obstime.sidereal_time('apparent') - ra
# hm, ra=ha a bit ugly
ha=SkyCoord(ra=HA, dec=Latitude(ln_pos_eq.dec,u.deg).radian,unit=(u.radian,u.radian),frame='cirs')
return ha
def LN_ICRS_to_GCRS(self,ra=None,dec=None,ln_pressure_qfe=None,ln_temperature=None,ln_humidity=None,obstime=None):
ln_pos_eq.ra=ra
ln_pos_eq.dec=dec
ln.ln_get_equ_prec(byref(ln_pos_eq), c_double(obstime.jd), byref(ln_pos_eq_pr))
ln_pos_eq_nut=self.LN_nutation_meeus(eq_pr=ln_pos_eq_pr,JD=obstime.jd)
ln.ln_get_equ_aber(byref(ln_pos_eq_nut), c_double(obstime.jd), byref(ln_pos_eq_ab))
ra=Longitude(ln_pos_eq_ab.ra,u.deg)
dec=Latitude(ln_pos_eq_ab.dec,u.deg)
gcrs=SkyCoord(ra=ra.radian,dec=dec.radian,unit=(u.radian,u.radian),frame='gcrs',location=self.obs,obstime=obstime,obswl=0.5*u.micron, pressure=ln_pressure_qfe*u.hPa,temperature=ln_temperature*u.deg_C,relative_humidity=ln_humidity)
return gcrs
def LN_ICRS_to_AltAz(self,ra=None,dec=None,ln_pressure_qfe=None,ln_temperature=None,ln_humidity=None,obstime=None,mount_set_icrs=True):
ln_pos_eq.ra=ra
ln_pos_eq.dec=dec
if mount_set_icrs:
# libnova corrections for catalog data ...
# ToDo missing see Jean Meeus, Astronomical Algorithms, chapter 23
# proper motion
# annual paralax (0".8)
# gravitational deflection of light (0".003)
ln.ln_get_equ_prec(byref(ln_pos_eq), c_double(obstime.jd), byref(ln_pos_eq_pr))
ln_pos_eq_nut=self.LN_nutation_meeus(eq_pr=ln_pos_eq_pr,JD=obstime.jd)
ln.ln_get_equ_aber(byref(ln_pos_eq_nut), c_double(obstime.jd), byref(ln_pos_eq_ab))
ln.ln_get_hrz_from_equ(byref(ln_pos_eq_ab), byref(self.ln_obs), c_double(obstime.jd), byref(ln_pos_aa_ab))
# here we use QFE not pressure at sea level!
# E.g. at Dome-C this formula:
# ln_pressure=ln_see_pres * pow(1. - (0.0065 * ln_alt) / 288.15, (9.80665 * 0.0289644) / (8.31447 * 0.0065));
# is not precise.
if self.refraction_method is None:
d_alt_deg=ln.ln_get_refraction_adj(c_double(ln_pos_aa_ab.alt),c_double(ln_pressure_qfe),c_double(ln_temperature))
else:
d_alt_deg=180./np.pi* self.refraction_method(alt=ln_pos_aa_ab.alt,tem=ln_temperature,pre=ln_pressure_qfe,hum=ln_humidity)
else:
# ... but not for the star position as measured in mount frame
ln.ln_get_hrz_from_equ(byref(ln_pos_eq), byref(self.ln_obs), c_double(obstime.jd), byref(ln_pos_aa_ab));
d_alt_deg=0.
a_az=Longitude(ln_pos_aa_ab.az,u.deg)
a_az.wrap_at(0.*u.degree)
a_alt=Latitude(ln_pos_aa_ab.alt + d_alt_deg,u.deg)
pos_aa=SkyCoord(az=a_az.radian,alt=a_alt.radian,unit=(u.radian,u.radian),frame='altaz',location=self.obs,obstime=obstime,obswl=0.5*u.micron, pressure=ln_pressure_qfe*u.hPa,temperature=ln_temperature*u.deg_C,relative_humidity=ln_humidity)
return pos_aa
|
RTS2/rts2
|
scripts/u_point/transform/u_libnova.py
|
Python
|
lgpl-3.0
| 8,905
|
[
"VisIt"
] |
2c4c8affa2b244c0fa92cc3804f12ef3434f2438c8aed26afadc1ac0eb8fefec
|
from datetime import datetime, timedelta
import time
from dateutil.parser import parser
from django.core.cache import cache
import simplejson
from casexml.apps.case.models import CommCareCase
from corehq.apps.api.es import ReportXFormES, get_report_script_field
from pact.enums import PACT_DOMAIN
from pact.lib.quicksect import IntervalNode
from pact.utils import get_patient_display_cache
import logging
cached_schedules = {}
def get_seconds(d):
return time.mktime(d.utctimetuple())
class CHWPatientSchedule(object):
def __init__(self, username, intervaltrees, raw_schedule):
self.username = username
self.intervals = intervaltrees
self.raw_schedule = raw_schedule
def scheduled_for_date(self, date_val):
"""
For a given date, return the array of pact_ids that are scheduled for visiting. This will check the activate date by using the internal interval tree.
Parameter: datetime value
Returns: array of pact_ids
"""
day_of_week = date_val.isoweekday() % 7
if not self.intervals.has_key(day_of_week):
return []
else:
pass
day_tree = self.intervals[day_of_week]
results = []
day_tree.intersect(get_seconds(date_val) - .1, get_seconds(date_val),
lambda x: results.append(x.other))
return results
@classmethod
def get_schedule(cls, chw_username, override_date=None):
"""
Generate schedule object for a given username
"""
cached_schedules = None
if override_date == None:
nowdate = datetime.now()
else:
nowdate = override_date
day_intervaltree = {}
if cached_schedules == None:
#no documents, then we need to load them up
db = CommCareCase.get_db()
chw_schedules = db.view('pact/chw_dot_schedules', key=chw_username).all()
to_cache = []
for item in chw_schedules:
single_sched = item['value']
to_cache.append(single_sched)
cache.set("%s_schedule" % (chw_username), simplejson.dumps(to_cache), 3600)
cached_arr = to_cache
else:
cached_arr = simplejson.loads(cached_schedules)
for single_sched in cached_arr:
day_of_week = int(single_sched['day_of_week'])
if day_intervaltree.has_key(day_of_week):
daytree = day_intervaltree[day_of_week]
else:
#if there's no day of week indication for this, then it's just a null interval node. To start this node, we make it REALLY old.
daytree = IntervalNode(get_seconds(datetime.min),
get_seconds(nowdate + timedelta(days=10)))
if single_sched['ended_date'] == None:
enddate = nowdate + timedelta(days=9)
else:
enddate = datetime.strptime(single_sched['ended_date'], "%Y-%m-%dT%H:%M:%SZ")
startdate = datetime.strptime(single_sched['active_date'], "%Y-%m-%dT%H:%M:%SZ")
case_id = single_sched['case_id']
if single_sched.has_key('error'):
#this is a non-showstopping issue due to quirks with older submissions
logging.error("Error, no pactid: %s" % single_sched['error'])
daytree.insert(get_seconds(startdate), get_seconds(enddate), other=case_id)
day_intervaltree[day_of_week] = daytree
return cls(chw_username, day_intervaltree, cached_arr)
def dots_submissions_by_case(case_id, query_date, username=None):
"""
Actually run query for username submissions
todo: do terms for the pact_ids instead of individual term?
"""
xform_es = ReportXFormES(PACT_DOMAIN)
script_fields = {
"doc_id": get_report_script_field('_id', is_known=True),
"pact_id": get_report_script_field("form.pact_id"),
"encounter_date": get_report_script_field('form.encounter_date'),
"username": get_report_script_field('form.meta.username', is_known=True),
"visit_type": get_report_script_field('form.visit_type'),
"visit_kept": get_report_script_field('form.visit_kept'),
"contact_type": get_report_script_field('form.contact_type'),
"observed_art": get_report_script_field('form.observed_art'),
"observed_non_art": get_report_script_field('form.observed_non_art'),
"observer_non_art_dose": get_report_script_field('form.observed_non_art_dose'),
"observed_art_dose": get_report_script_field('form.observed_art_dose'),
"pillbox_check": get_report_script_field('form.pillbox_check.check'),
"scheduled": get_report_script_field('form.scheduled'),
}
term_block = {'form.#type': 'dots_form'}
if username is not None:
term_block['form.meta.username'] = username
query = xform_es.by_case_id_query(PACT_DOMAIN, case_id, terms=term_block,
date_field='form.encounter_date.#value', startdate=query_date,
enddate=query_date)
query['sort'] = {'received_on': 'asc'}
query['script_fields'] = script_fields
query['size'] = 1
query['from'] = 0
res = xform_es.run_query(query)
print simplejson.dumps(res, indent=2)
return res
def get_schedule_tally(username, total_interval, override_date=None):
"""
Main entry point
For a given username and interval, get a simple array of the username and scheduled visit (whether a submission is there or not) exists.
returns (schedule_tally_array, patient_array, total_scheduled (int), total_visited(int))
schedul_tally_array = [visit_date, [(patient1, visit1), (patient2, visit2), (patient3, None), (patient4, visit4), ...]]
where visit = XFormInstance
"""
if override_date is None:
nowdate = datetime.now()
chw_schedule = CHWPatientSchedule.get_schedule(username)
else:
nowdate = override_date
chw_schedule = CHWPatientSchedule.get_schedule(username, override_date=nowdate)
patient_case_ids = set([x['case_id'] for x in chw_schedule.raw_schedule])
patient_cache = get_patient_display_cache(list(patient_case_ids))
#got the chw schedule
#now let's walk through the date range, and get the scheduled CHWs per this date.visit_dates = []
ret = [] #where it's going to be an array of tuples:
#(date, scheduled[], submissions[] - that line up with the scheduled)
total_scheduled = 0
total_visited = 0
for n in range(0, total_interval):
td = timedelta(days=n)
visit_date = nowdate - td
scheduled_case_ids = chw_schedule.scheduled_for_date(visit_date)
patient_case_ids = set(filter(lambda x: x is not None, scheduled_case_ids))
dereferenced_patient_info = [patient_cache.get(x, {}) for x in patient_case_ids]
visited = []
#inefficient, but we need to get the patients in alpha order
#patients = sorted(patients, key=lambda x: x.last_name)
dp = parser()
for case_id in patient_case_ids:
total_scheduled += 1
search_results = dots_submissions_by_case(case_id, visit_date, username=username)
submissions = search_results['hits']['hits']
if len(submissions) > 0:
#calculate if pillbox checked
pillbox_check_str = submissions[0]['fields']['pillbox_check']
if len(pillbox_check_str) > 0:
pillbox_check_data = simplejson.loads(pillbox_check_str)
anchor_date = dp.parse(pillbox_check_data.get('anchor'))
else:
pillbox_check_data = {}
anchor_date = datetime.min
encounter_date = dp.parse(submissions[0]['fields']['encounter_date'])
submissions[0]['fields']['has_pillbox_check'] = 'Yes' if anchor_date.date() == encounter_date.date() else 'No'
visited.append(submissions[0]['fields'])
total_visited += 1
else:
#ok, so no submission from this chw, let's see if there's ANY from anyone on this day.
search_results = dots_submissions_by_case(case_id, visit_date)
other_submissions = search_results['hits']['hits']
if len(other_submissions) > 0:
visited.append(other_submissions[0]['fields'])
total_visited += 1
else:
visited.append(None)
ret.append((visit_date, zip(dereferenced_patient_info, visited)))
return ret, patient_case_ids, total_scheduled, total_visited
def chw_calendar_submit_report(request, username, interval=7):
"""Calendar view of submissions by CHW, overlaid with their scheduled visits, and whether they made them or not."""
return_context = {}
return_context['username'] = username
total_interval = interval
if 'interval' in request.GET:
try:
total_interval = int(request.GET['interval'])
except ValueError:
pass
#secret date ranges
if 'enddate' in request.GET:
end_date_str = request.GET.get('enddate', datetime.utcnow().strftime('%Y-%m-%d'))
end_date = datetime.strptime(end_date_str, '%Y-%m-%d')
else:
end_date = datetime.utcnow()
if 'startdate' in request.GET:
#if there's a startdate, trump interval
start_date_str = request.GET.get('startdate', datetime.utcnow().strftime('%Y-%m-%d'))
start_date = datetime.strptime(start_date_str, '%Y-%m-%d')
total_interval = (end_date - start_date).days
ret, patients, total_scheduled, total_visited = get_schedule_tally(username,
total_interval,
override_date=end_date)
if len(ret) > 0:
return_context['date_arr'] = ret
return_context['total_scheduled'] = total_scheduled
return_context['total_visited'] = total_visited
return_context['start_date'] = ret[0][0]
return_context['end_date'] = ret[-1][0]
else:
return_context['total_scheduled'] = 0
return_context['total_visited'] = 0
return return_context
|
SEL-Columbia/commcare-hq
|
custom/_legacy/pact/reports/chw_schedule.py
|
Python
|
bsd-3-clause
| 10,412
|
[
"VisIt"
] |
defee02e47b04d4fdfcae0ba5c9c3acac2fc0b3b57c1a46385ccc431a00c0077
|
# author: brian dillmann
# for rscs
from context import Input
from context import Output
from context import Timer
from context import AnalogInput
import unittest
class test_device_simple(unittest.TestCase):
def test_naming_convention(self):
import RPi
RPi.GPIO.setmode(RPi.GPIO.BCM)
with self.assertRaises(ValueError):
Input('{', 5)
with self.assertRaises(ValueError):
Output('{', 5)
with self.assertRaises(ValueError):
AnalogInput('{', 5)
with self.assertRaises(ValueError):
Timer('{')
Input('cannamethis123', 5)
Output('cannamethis123', 5)
AnalogInput('cannamethis123', 5)
Timer('cannamethis123')
if __name__ == 'main':
unittest.main()
|
dillmann/rscs
|
test/devicetests/device_test.py
|
Python
|
mit
| 685
|
[
"Brian"
] |
39270de78ef46b00541fd3143e3854d1e60242d07e791cb06d421792230fd97b
|
# encoding:utf-8
'''
Exemplo módulo pairwise2
Doc:
http://biopython.org/DIST/docs/api/Bio.pairwise2-module.html
'''
from Bio.pairwise2 import *
'''
alinhamento global
1 ponto para match (caractere igual)
nenhum ponto para mismatch ou gap
'''
print 'Alinhamentos globais:\n'
alignments = align.globalxx('ACCGT', 'ACG')
for a in alignments:
print format_alignment(*a)
# match: 2, mismatch = -1, gap = -0.5, gap extend = -0.1
'''
alignments = align.globalms('ACCGT', 'ACG', 2, -1, -0.5, -0.1)
for a in alignments:
print format_alignment(*a)
'''
print '\nAlinhamentos locais:\n'
# alinhamento local
alignments = align.localxx('ACCGT', 'CCG')
for a in alignments:
print format_alignment(*a)
|
marcoscastro/pybr11
|
codigos/17_alinhamentos.py
|
Python
|
gpl-2.0
| 701
|
[
"Biopython"
] |
db609b0f5c732037d039a6e85ca211275eab9eb888b64041f5d1afa04fa0dec9
|
# -*- coding: utf-8 -*-
#
# Gpipe documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 20 11:17:20 2009.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.extend([os.path.abspath('../scripts'),
os.path.abspath('../CGATPipelines'),
os.path.abspath('python')])
# The data directory
data_dir = os.path.abspath('..')
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.ifconfig',
'sphinx.ext.inheritance_diagram',
'sphinxcontrib.programoutput',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'CGATReport.report_directive']
intersphinx_mapping = {
'python': ('http://docs.python.org/2.7', None),
'cgat': ('https://www.cgat.org/downloads/public/cgat/documentation/',
None)}
# order of autodocumented functions
autodoc_member_order = "bysource"
# autoclass configuration - use both class and __init__ method to
# document methods.
autoclass_content = "both"
# include todos
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'cgatflow'
copyright = '2011, 2012, 2013, 2014, 2015 2016 2017 CGAT'
# Included at the end of each rst file
rst_epilog = '''
.. _CGAT Training Programme: http://www.cgat.org
.. _CGAT Pipeline Collection: https://www.cgat.org/downloads/public/CGATPipelines/documentation/
.. _CGAT Code Collection: https://www.cgat.org/downloads/public/cgat/documentation/
.. _pysam: https://github.com/pysam-developers/pysam
.. _samtools: http://samtools.sourceforge.net/
.. _htslib: http://www.htslib.org/
.. _tabix: http://samtools.sourceforge.net/tabix.shtml/
.. _Galaxy: https://main.g2.bx.psu.edu/
.. _cython: http://cython.org/
.. _python: http://python.org/
.. _ipython: http://ipython.org/
.. _pyximport: http://www.prescod.net/pyximport/
.. _sphinx: http://sphinx-doc.org/
.. _ruffus: http://www.ruffus.org.uk/
.. _cgatreport: https://github.com/AndreasHeger/CGATReport/
.. _sqlite: http://www.sqlite.org/
.. _make: http://www.gnu.org/software/make
.. _UCSC: http://genome.ucsc.edu
.. _ENSEMBL: http://www.ensembl.org
.. _GO: http://www.geneontology.org
.. _gwascatalog: http://www.genome.gov/gwastudies/
.. _distlid: http://distild.jensenlab.org/
.. _mysql: https://mariadb.org/
.. _postgres: http://www.postgresql.org/
.. _bedtools: http://bedtools.readthedocs.org/en/latest/
.. _UCSC Tools: http://genome.ucsc.edu/admin/git.html
.. _git: http://git-scm.com/
.. _sge: http://wiki.gridengine.info/wiki/index.php/Main_Page
.. _alignlib: https://github.com/AndreasHeger/alignlib
.. _iGenomes: https://support.illumina.com/sequencing/sequencing_software/igenome.html
'''
sys.path.insert(0, "../scripts")
import version
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = version.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build', 'obsolete']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output --------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "CGAT Pipelines",
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Site navigation",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
#'navbar_links': [
# ("Examples", "examples"),
# ("Link", "http://example.com", True),
#],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': True,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': True,
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "Page navigation",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar navbar-inverse",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "nav",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing (default) or the name of a valid theme
# such as "cosmo" or "sandstone".
'bootswatch_theme': "sandstone",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path=sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_templates/cgat_logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Gpipedoc'
# -- Options for LaTeX output ------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [(
'index', 'Gpipe.tex',
r'Gpipe Documentation',
r'Andreas Heger',
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
def setup(app):
app.add_stylesheet("my-styles.css") # also can be a full URL
# app.add_stylesheet("ANOTHER.css")
# app.add_stylesheet("AND_ANOTHER.css")
|
CGATOxford/CGATPipelines
|
doc/conf.py
|
Python
|
mit
| 11,285
|
[
"pysam"
] |
a8564059c69d6a8487e9d9780ab65012b874b4e66edc0314fc620294e893a079
|
from pprint import pprint
from dateutil.parser import parse
from arrow import Arrow
import requests
runi = lambda s: s.encode('cp850', errors='replace').decode('cp850')
BASE = 'https://graph.facebook.com/v2.0/'
DAYS = [
'monday',
'tuesday',
'wednesday',
'thursday',
'friday',
'saturday',
'sunday'
]
def arrow_parse(t):
return Arrow.fromdate(parse(t))
def get_posts(access_token):
r = requests.get(
BASE + 'posts',
params={
'format': 'json',
'id': 'parkdatcurtin',
'access_token': access_token,
'fields': 'updated_time,message',
'limit': 50
}
)
rj = r.json()
if not r.ok:
pprint(rj)
assert r.ok
return rj['data']
def get_date_posts(access_token):
for post in get_posts(access_token):
if 'message' not in post:
# odd. why does this post not have a message?
continue
if 'food truck' not in post['message']:
# misc or entertainment post
continue
# ignore that which has nothing to do with what we need
lines = post['message'].splitlines()
for idx, line in enumerate(lines):
if line.startswith('Monday'):
post['message'] = '\n'.join(lines[idx:])
yield post
def parse_day(day):
for visit in day[1:]:
if visit[0] != '*':
yield ('', visit)
elif '-' not in visit:
# invalid, probably an announcment for that day
continue
else:
location, visitors = visit[1:].split(' - ', 1)
visitors = [
visit
.strip()
.replace(b'\xe2\x80\x99'.decode(), "'")
.replace(b'\xe2\x80\x93'.decode(), '-')
for visit in visitors.split(',')
]
for visitor in visitors:
yield (location, visitor)
def parse_week(days):
for day in days:
day = day.splitlines()
day_name, date = day[0].strip().split(' ', 1)
if day_name.lower() not in DAYS:
continue
yield (
arrow_parse(date),
list(parse_day(day))
)
def get_dates(access_token):
for update in get_date_posts(access_token):
days = update['message'].split('\n\n')
yield parse_week(days)
|
Mause/parkd
|
event_posts.py
|
Python
|
mit
| 2,418
|
[
"VisIt"
] |
f2ebb2b752e1d86e5112d55f7dcdc9902919ddcb762495f73703bf4c7bddf429
|
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
from outputparser import OutputParser, ValuesMatcher
import re, mx, mx_graal, os, sys, StringIO, subprocess
from os.path import isfile, join, exists
import mx_jvmci
gc = 'UseSerialGC'
dacapoSanityWarmup = {
'avrora': [0, 0, 3, 6, 13],
'batik': [0, 0, 5, 5, 20],
'eclipse': [0, 0, 0, 0, 0],
'fop': [4, 8, 10, 20, 30],
'h2': [0, 0, 5, 5, 8],
'jython': [0, 0, 5, 10, 13],
'luindex': [0, 0, 5, 10, 10],
'lusearch': [0, 4, 5, 5, 8],
'pmd': [0, 0, 5, 10, 13],
'sunflow': [0, 2, 5, 10, 15],
'tomcat': [0, 0, 5, 10, 15],
'tradebeans': [0, 0, 5, 10, 13],
'tradesoap': [0, 0, 5, 10, 15],
'xalan': [0, 0, 5, 10, 18],
}
dacapoScalaSanityWarmup = {
'actors': [0, 0, 2, 5, 5],
'apparat': [0, 0, 2, 5, 5],
'factorie': [0, 0, 2, 5, 5],
'kiama': [0, 4, 3, 13, 15],
'scalac': [0, 0, 5, 15, 20],
'scaladoc': [0, 0, 5, 15, 15],
'scalap': [0, 0, 5, 15, 20],
'scalariform':[0, 0, 6, 15, 20],
'scalatest': [0, 0, 2, 10, 12],
'scalaxb': [0, 0, 5, 15, 25],
# (gdub) specs sometimes returns a non-zero value event though there is no apparent failure
'specs': [0, 0, 0, 0, 0],
'tmt': [0, 0, 3, 10, 12]
}
dacapoGateBuildLevels = {
'avrora': ['product', 'fastdebug', 'debug'],
'batik': ['product', 'fastdebug', 'debug'],
# (lewurm): does not work with JDK8
'eclipse': [],
'fop': ['fastdebug', 'debug'],
'h2': ['product', 'fastdebug', 'debug'],
'jython': ['product', 'fastdebug', 'debug'],
'luindex': ['product', 'fastdebug', 'debug'],
'lusearch': ['product'],
'pmd': ['product', 'fastdebug', 'debug'],
'sunflow': ['fastdebug', 'debug'],
'tomcat': ['product', 'fastdebug', 'debug'],
'tradebeans': ['product', 'fastdebug', 'debug'],
# tradesoap is too unreliable for the gate, often crashing with concurrency problems:
# http://sourceforge.net/p/dacapobench/bugs/99/
'tradesoap': [],
'xalan': ['product', 'fastdebug', 'debug'],
}
dacapoScalaGateBuildLevels = {
'actors': ['product', 'fastdebug', 'debug'],
'apparat': ['product', 'fastdebug', 'debug'],
'factorie': ['product', 'fastdebug', 'debug'],
'kiama': ['fastdebug', 'debug'],
'scalac': ['product', 'fastdebug', 'debug'],
'scaladoc': ['product', 'fastdebug', 'debug'],
'scalap': ['product', 'fastdebug', 'debug'],
'scalariform':['product', 'fastdebug', 'debug'],
'scalatest': ['product', 'fastdebug', 'debug'],
'scalaxb': ['product', 'fastdebug', 'debug'],
'specs': ['product', 'fastdebug', 'debug'],
'tmt': ['product', 'fastdebug', 'debug'],
}
specjvm2008Names = [
'startup.helloworld',
'startup.compiler.compiler',
'startup.compiler.sunflow',
'startup.compress',
'startup.crypto.aes',
'startup.crypto.rsa',
'startup.crypto.signverify',
'startup.mpegaudio',
'startup.scimark.fft',
'startup.scimark.lu',
'startup.scimark.monte_carlo',
'startup.scimark.sor',
'startup.scimark.sparse',
'startup.serial',
'startup.sunflow',
'startup.xml.transform',
'startup.xml.validation',
'compiler.compiler',
'compiler.sunflow',
'compress',
'crypto.aes',
'crypto.rsa',
'crypto.signverify',
'derby',
'mpegaudio',
'scimark.fft.large',
'scimark.lu.large',
'scimark.sor.large',
'scimark.sparse.large',
'scimark.fft.small',
'scimark.lu.small',
'scimark.sor.small',
'scimark.sparse.small',
'scimark.monte_carlo',
'serial',
'sunflow',
'xml.transform',
'xml.validation'
]
def _noneAsEmptyList(a):
if a is None:
return []
return a
class SanityCheckLevel:
Fast, Gate, Normal, Extensive, Benchmark = range(5)
def getSPECjbb2005(benchArgs=None):
benchArgs = [] if benchArgs is None else benchArgs
specjbb2005 = mx.get_env('SPECJBB2005')
if specjbb2005 is None or not exists(join(specjbb2005, 'jbb.jar')):
mx.abort('Please set the SPECJBB2005 environment variable to a SPECjbb2005 directory')
score = re.compile(r"^Valid run, Score is (?P<score>[0-9]+)$", re.MULTILINE)
error = re.compile(r"VALIDATION ERROR")
success = re.compile(r"^Valid run, Score is [0-9]+$", re.MULTILINE)
matcher = ValuesMatcher(score, {'group' : 'SPECjbb2005', 'name' : 'score', 'score' : '<score>'})
classpath = ['jbb.jar', 'check.jar']
return Test("SPECjbb2005", ['spec.jbb.JBBmain', '-propfile', 'SPECjbb.props'] + benchArgs, [success], [error], [matcher], vmOpts=['-Xms3g', '-XX:+' + gc, '-XX:-UseCompressedOops', '-cp', os.pathsep.join(classpath)], defaultCwd=specjbb2005)
def getSPECjbb2013(benchArgs=None):
specjbb2013 = mx.get_env('SPECJBB2013')
if specjbb2013 is None or not exists(join(specjbb2013, 'specjbb2013.jar')):
mx.abort('Please set the SPECJBB2013 environment variable to a SPECjbb2013 directory')
jops = re.compile(r"^RUN RESULT: hbIR \(max attempted\) = [0-9]+, hbIR \(settled\) = [0-9]+, max-jOPS = (?P<max>[0-9]+), critical-jOPS = (?P<critical>[0-9]+)$", re.MULTILINE)
# error?
success = re.compile(r"org.spec.jbb.controller: Run finished", re.MULTILINE)
matcherMax = ValuesMatcher(jops, {'group' : 'SPECjbb2013', 'name' : 'max', 'score' : '<max>'})
matcherCritical = ValuesMatcher(jops, {'group' : 'SPECjbb2013', 'name' : 'critical', 'score' : '<critical>'})
return Test("SPECjbb2013", ['-jar', 'specjbb2013.jar', '-m', 'composite'] +
_noneAsEmptyList(benchArgs), [success], [], [matcherCritical, matcherMax],
vmOpts=['-Xmx6g', '-Xms6g', '-Xmn3g', '-XX:+UseParallelOldGC', '-XX:-UseAdaptiveSizePolicy', '-XX:-UseBiasedLocking', '-XX:-UseCompressedOops'], defaultCwd=specjbb2013)
def getSPECjvm2008(benchArgs=None):
specjvm2008 = mx.get_env('SPECJVM2008')
if specjvm2008 is None or not exists(join(specjvm2008, 'SPECjvm2008.jar')):
mx.abort('Please set the SPECJVM2008 environment variable to a SPECjvm2008 directory')
score = re.compile(r"^(Score on|Noncompliant) (?P<benchmark>[a-zA-Z0-9\._]+)( result)?: (?P<score>[0-9]+((,|\.)[0-9]+)?)( SPECjvm2008 Base)? ops/m$", re.MULTILINE)
error = re.compile(r"^Errors in benchmark: ", re.MULTILINE)
# The ' ops/m' at the end of the success string is important : it's how you can tell valid and invalid runs apart
success = re.compile(r"^(Noncompliant c|C)omposite result: [0-9]+((,|\.)[0-9]+)?( SPECjvm2008 (Base|Peak))? ops/m$", re.MULTILINE)
matcher = ValuesMatcher(score, {'group' : 'SPECjvm2008', 'name' : '<benchmark>', 'score' : '<score>'})
return Test("SPECjvm2008", ['-jar', 'SPECjvm2008.jar'] + _noneAsEmptyList(benchArgs), [success], [error], [matcher], vmOpts=['-Xms3g', '-XX:+' + gc, '-XX:-UseCompressedOops'], defaultCwd=specjvm2008)
def getDacapos(level=SanityCheckLevel.Normal, gateBuildLevel=None, dacapoArgs=None):
checks = []
for (bench, ns) in dacapoSanityWarmup.items():
if ns[level] > 0:
if gateBuildLevel is None or gateBuildLevel in dacapoGateBuildLevels[bench]:
checks.append(getDacapo(bench, ['-n', str(ns[level])] + _noneAsEmptyList(dacapoArgs)))
return checks
def getDacapo(name, dacapoArgs=None):
dacapo = mx.get_env('DACAPO_CP')
if dacapo is None:
l = mx.library('DACAPO', False)
if l is not None:
dacapo = l.get_path(True)
else:
mx.abort('DaCapo 9.12 jar file must be specified with DACAPO_CP environment variable or as DACAPO library')
if not isfile(dacapo) or not dacapo.endswith('.jar'):
mx.abort('Specified DaCapo jar file does not exist or is not a jar file: ' + dacapo)
dacapoSuccess = re.compile(r"^===== DaCapo 9\.12 ([a-zA-Z0-9_]+) PASSED in ([0-9]+) msec =====", re.MULTILINE)
dacapoFail = re.compile(r"^===== DaCapo 9\.12 ([a-zA-Z0-9_]+) FAILED (warmup|) =====", re.MULTILINE)
dacapoTime = re.compile(r"===== DaCapo 9\.12 (?P<benchmark>[a-zA-Z0-9_]+) PASSED in (?P<time>[0-9]+) msec =====")
dacapoTime1 = re.compile(r"===== DaCapo 9\.12 (?P<benchmark>[a-zA-Z0-9_]+) completed warmup 1 in (?P<time>[0-9]+) msec =====")
dacapoMatcher = ValuesMatcher(dacapoTime, {'group' : 'DaCapo', 'name' : '<benchmark>', 'score' : '<time>'})
dacapoMatcher1 = ValuesMatcher(dacapoTime1, {'group' : 'DaCapo-1stRun', 'name' : '<benchmark>', 'score' : '<time>'})
# Use ipv4 stack for dacapos; tomcat+solaris+ipv6_interface fails (see also: JDK-8072384)
return Test("DaCapo-" + name, ['-jar', mx._cygpathU2W(dacapo), name] + _noneAsEmptyList(dacapoArgs), [dacapoSuccess], [dacapoFail], [dacapoMatcher, dacapoMatcher1], ['-Xms2g', '-XX:+' + gc, '-XX:-UseCompressedOops', "-Djava.net.preferIPv4Stack=true"])
def getScalaDacapos(level=SanityCheckLevel.Normal, gateBuildLevel=None, dacapoArgs=None):
checks = []
for (bench, ns) in dacapoScalaSanityWarmup.items():
if ns[level] > 0:
if gateBuildLevel is None or gateBuildLevel in dacapoScalaGateBuildLevels[bench]:
checks.append(getScalaDacapo(bench, ['-n', str(ns[level])] + _noneAsEmptyList(dacapoArgs)))
return checks
def getScalaDacapo(name, dacapoArgs=None):
dacapo = mx.get_env('DACAPO_SCALA_CP')
if dacapo is None:
l = mx.library('DACAPO_SCALA', False)
if l is not None:
dacapo = l.get_path(True)
else:
mx.abort('Scala DaCapo 0.1.0 jar file must be specified with DACAPO_SCALA_CP environment variable or as DACAPO_SCALA library')
if not isfile(dacapo) or not dacapo.endswith('.jar'):
mx.abort('Specified Scala DaCapo jar file does not exist or is not a jar file: ' + dacapo)
dacapoSuccess = re.compile(r"^===== DaCapo 0\.1\.0(-SNAPSHOT)? ([a-zA-Z0-9_]+) PASSED in ([0-9]+) msec =====", re.MULTILINE)
dacapoFail = re.compile(r"^===== DaCapo 0\.1\.0(-SNAPSHOT)? ([a-zA-Z0-9_]+) FAILED (warmup|) =====", re.MULTILINE)
dacapoTime = re.compile(r"===== DaCapo 0\.1\.0(-SNAPSHOT)? (?P<benchmark>[a-zA-Z0-9_]+) PASSED in (?P<time>[0-9]+) msec =====")
dacapoMatcher = ValuesMatcher(dacapoTime, {'group' : "Scala-DaCapo", 'name' : '<benchmark>', 'score' : '<time>'})
return Test("Scala-DaCapo-" + name, ['-jar', mx._cygpathU2W(dacapo), name] + _noneAsEmptyList(dacapoArgs), [dacapoSuccess], [dacapoFail], [dacapoMatcher], ['-Xms2g', '-XX:+' + gc, '-XX:-UseCompressedOops'])
def getBootstraps():
time = re.compile(r"Bootstrapping Graal\.+ in (?P<time>[0-9]+) ms( \(compiled (?P<methods>[0-9]+) methods\))?")
scoreMatcher = ValuesMatcher(time, {'group' : 'Bootstrap', 'name' : 'BootstrapTime', 'score' : '<time>'})
methodMatcher = ValuesMatcher(time, {'group' : 'Bootstrap', 'name' : 'BootstrapMethods', 'score' : '<methods>'})
scoreMatcherBig = ValuesMatcher(time, {'group' : 'Bootstrap-bigHeap', 'name' : 'BootstrapTime', 'score' : '<time>'})
methodMatcherBig = ValuesMatcher(time, {'group' : 'Bootstrap-bigHeap', 'name' : 'BootstrapMethods', 'score' : '<methods>'})
tests = []
tests.append(Test("Bootstrap", ['-version'], successREs=[time], scoreMatchers=[scoreMatcher, methodMatcher], ignoredVMs=['client', 'server'], benchmarkCompilationRate=False))
tests.append(Test("Bootstrap-bigHeap", ['-version'], successREs=[time], scoreMatchers=[scoreMatcherBig, methodMatcherBig], vmOpts=['-Xms2g'], ignoredVMs=['client', 'server'], benchmarkCompilationRate=False))
return tests
class CTWMode:
Full, NoInline = range(2)
def getCTW(vm, mode):
time = re.compile(r"CompileTheWorld : Done \([0-9]+ classes, [0-9]+ methods, (?P<time>[0-9]+) ms\)")
scoreMatcher = ValuesMatcher(time, {'group' : 'CompileTheWorld', 'name' : 'CompileTime', 'score' : '<time>'})
jre = os.environ.get('JAVA_HOME')
if exists(join(jre, 'jre')):
jre = join(jre, 'jre')
rtjar = join(jre, 'lib', 'rt.jar')
args = ['-XX:+CompileTheWorld', '-Xbootclasspath/p:' + rtjar]
if vm == 'jvmci':
args += ['-XX:+BootstrapGraal']
if mode >= CTWMode.NoInline:
if not mx_jvmci.isJVMCIEnabled(vm):
args.append('-XX:-Inline')
else:
args.append('-G:CompileTheWordConfig=-Inline')
return Test("CompileTheWorld", args, successREs=[time], scoreMatchers=[scoreMatcher], benchmarkCompilationRate=False)
class Tee:
def __init__(self):
self.output = StringIO.StringIO()
def eat(self, line):
self.output.write(line)
sys.stdout.write(line)
"""
Encapsulates a single program that is a sanity test and/or a benchmark.
"""
class Test:
def __init__(self, name, cmd, successREs=None, failureREs=None, scoreMatchers=None, vmOpts=None, defaultCwd=None, ignoredVMs=None, benchmarkCompilationRate=False):
self.name = name
self.successREs = _noneAsEmptyList(successREs)
self.failureREs = _noneAsEmptyList(failureREs) + [re.compile(r"Exception occurred in scope: ")]
self.scoreMatchers = _noneAsEmptyList(scoreMatchers)
self.vmOpts = _noneAsEmptyList(vmOpts)
self.cmd = cmd
self.defaultCwd = defaultCwd
self.ignoredVMs = _noneAsEmptyList(ignoredVMs)
self.benchmarkCompilationRate = benchmarkCompilationRate
if benchmarkCompilationRate:
self.vmOpts = self.vmOpts + ['-XX:+CITime']
def __str__(self):
return self.name
def test(self, vm, cwd=None, extraVmOpts=None, vmbuild=None):
"""
Run this program as a sanity test.
"""
if vm in self.ignoredVMs:
return True
if cwd is None:
cwd = self.defaultCwd
parser = OutputParser()
jvmError = re.compile(r"(?P<jvmerror>([A-Z]:|/).*[/\\]hs_err_pid[0-9]+\.log)")
parser.addMatcher(ValuesMatcher(jvmError, {'jvmError' : '<jvmerror>'}))
for successRE in self.successREs:
parser.addMatcher(ValuesMatcher(successRE, {'passed' : '1'}))
for failureRE in self.failureREs:
parser.addMatcher(ValuesMatcher(failureRE, {'failed' : '1'}))
tee = Tee()
retcode = mx_graal.run_vm(self.vmOpts + _noneAsEmptyList(extraVmOpts) + self.cmd, vm, nonZeroIsFatal=False, out=tee.eat, err=subprocess.STDOUT, cwd=cwd, vmbuild=vmbuild)
output = tee.output.getvalue()
valueMaps = parser.parse(output)
if len(valueMaps) == 0:
return False
record = {}
for valueMap in valueMaps:
for key, value in valueMap.items():
if record.has_key(key) and record[key] != value:
mx.abort('Inconsistant values returned by test machers : ' + str(valueMaps))
record[key] = value
jvmErrorFile = record.get('jvmError')
if jvmErrorFile:
mx.log('/!\\JVM Error : dumping error log...')
with open(jvmErrorFile, 'rb') as fp:
mx.log(fp.read())
os.unlink(jvmErrorFile)
return False
if record.get('failed') == '1':
return False
return retcode == 0 and record.get('passed') == '1'
def bench(self, vm, cwd=None, extraVmOpts=None, vmbuild=None):
"""
Run this program as a benchmark.
"""
if vm in self.ignoredVMs:
return {}
if cwd is None:
cwd = self.defaultCwd
parser = OutputParser()
for successRE in self.successREs:
parser.addMatcher(ValuesMatcher(successRE, {'passed' : '1'}))
for failureRE in self.failureREs:
parser.addMatcher(ValuesMatcher(failureRE, {'failed' : '1'}))
for scoreMatcher in self.scoreMatchers:
parser.addMatcher(scoreMatcher)
if self.benchmarkCompilationRate:
if vm == 'jvmci':
bps = re.compile(r"ParsedBytecodesPerSecond@final: (?P<rate>[0-9]+)")
ibps = re.compile(r"InlinedBytecodesPerSecond@final: (?P<rate>[0-9]+)")
parser.addMatcher(ValuesMatcher(bps, {'group' : 'ParsedBytecodesPerSecond', 'name' : self.name, 'score' : '<rate>'}))
parser.addMatcher(ValuesMatcher(ibps, {'group' : 'InlinedBytecodesPerSecond', 'name' : self.name, 'score' : '<rate>'}))
else:
ibps = re.compile(r"(?P<compiler>[\w]+) compilation speed: +(?P<rate>[0-9]+) bytes/s {standard")
parser.addMatcher(ValuesMatcher(ibps, {'group' : 'InlinedBytecodesPerSecond', 'name' : '<compiler>:' + self.name, 'score' : '<rate>'}))
startDelim = 'START: ' + self.name
endDelim = 'END: ' + self.name
outputfile = os.environ.get('BENCH_OUTPUT', None)
if outputfile:
# Used only to debug output parsing
with open(outputfile) as fp:
output = fp.read()
start = output.find(startDelim)
end = output.find(endDelim, start)
if start == -1 and end == -1:
return {}
output = output[start + len(startDelim + os.linesep): end]
mx.log(startDelim)
mx.log(output)
mx.log(endDelim)
else:
tee = Tee()
mx.log(startDelim)
if mx_graal.run_vm(self.vmOpts + _noneAsEmptyList(extraVmOpts) + self.cmd, vm, nonZeroIsFatal=False, out=tee.eat, err=subprocess.STDOUT, cwd=cwd, vmbuild=vmbuild) != 0:
mx.abort("Benchmark failed (non-zero retcode)")
mx.log(endDelim)
output = tee.output.getvalue()
groups = {}
passed = False
for valueMap in parser.parse(output):
assert (valueMap.has_key('name') and valueMap.has_key('score') and valueMap.has_key('group')) or valueMap.has_key('passed') or valueMap.has_key('failed'), valueMap
if valueMap.get('failed') == '1':
mx.abort("Benchmark failed")
if valueMap.get('passed') == '1':
passed = True
groupName = valueMap.get('group')
if groupName:
group = groups.setdefault(groupName, {})
name = valueMap.get('name')
score = valueMap.get('score')
if name and score:
group[name] = score
if not passed:
mx.abort("Benchmark failed (not passed)")
return groups
|
smarr/GraalCompiler
|
mx.graal/sanitycheck.py
|
Python
|
gpl-2.0
| 19,705
|
[
"VisIt"
] |
ccf055dc3afc5fb028f1feddda76b4a3e0e08eea3eb41b3a247c8b4add7c7ab3
|
#
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
import random
import ctypes.util
import binascii
import struct
import six
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa
from cffi import FFI
if six.PY3:
unicode = str
_ffi = FFI()
_ffi.cdef('''
/* p11-kit/pkcs11.h */
typedef unsigned long CK_FLAGS;
struct _CK_VERSION
{
unsigned char major;
unsigned char minor;
};
typedef unsigned long CK_SLOT_ID;
typedef CK_SLOT_ID *CK_SLOT_ID_PTR;
typedef unsigned long CK_SESSION_HANDLE;
typedef unsigned long CK_USER_TYPE;
typedef unsigned long CK_OBJECT_HANDLE;
typedef unsigned long CK_OBJECT_CLASS;
typedef unsigned long CK_KEY_TYPE;
typedef unsigned long CK_ATTRIBUTE_TYPE;
typedef unsigned long ck_flags_t;
typedef unsigned char CK_BBOOL;
typedef unsigned long int CK_ULONG;
typedef CK_ULONG *CK_ULONG_PTR;
struct _CK_ATTRIBUTE
{
CK_ATTRIBUTE_TYPE type;
void *pValue;
unsigned long ulValueLen;
};
typedef unsigned long CK_MECHANISM_TYPE;
struct _CK_MECHANISM
{
CK_MECHANISM_TYPE mechanism;
void *pParameter;
unsigned long ulParameterLen;
};
struct _CK_TOKEN_INFO
{
unsigned char label[32];
unsigned char manufacturer_id[32];
unsigned char model[16];
unsigned char serial_number[16];
ck_flags_t flags;
unsigned long max_session_count;
unsigned long session_count;
unsigned long max_rw_session_count;
unsigned long rw_session_count;
unsigned long max_pin_len;
unsigned long min_pin_len;
unsigned long total_public_memory;
unsigned long free_public_memory;
unsigned long total_private_memory;
unsigned long free_private_memory;
struct _CK_VERSION hardware_version;
struct _CK_VERSION firmware_version;
unsigned char utc_time[16];
};
typedef struct _CK_TOKEN_INFO CK_TOKEN_INFO;
typedef CK_TOKEN_INFO *CK_TOKEN_INFO_PTR;
typedef unsigned long CK_RV;
typedef ... *CK_NOTIFY;
struct _CK_FUNCTION_LIST;
typedef CK_RV (*CK_C_Initialize) (void *init_args);
typedef CK_RV (*CK_C_Finalize) (void *pReserved);
typedef ... *CK_C_GetInfo;
typedef ... *CK_C_GetFunctionList;
CK_RV C_GetFunctionList (struct _CK_FUNCTION_LIST **function_list);
typedef CK_RV (*CK_C_GetSlotList) (CK_BBOOL tokenPresent,
CK_SLOT_ID_PTR pSlotList,
CK_ULONG_PTR pulCount);
typedef ... *CK_C_GetSlotInfo;
typedef CK_RV (*CK_C_GetTokenInfo) (CK_SLOT_ID slotID,
CK_TOKEN_INFO_PTR pInfo);
typedef ... *CK_C_WaitForSlotEvent;
typedef ... *CK_C_GetMechanismList;
typedef ... *CK_C_GetMechanismInfo;
typedef ... *CK_C_InitToken;
typedef ... *CK_C_InitPIN;
typedef ... *CK_C_SetPIN;
typedef CK_RV (*CK_C_OpenSession) (CK_SLOT_ID slotID, CK_FLAGS flags,
void *application, CK_NOTIFY notify,
CK_SESSION_HANDLE *session);
typedef CK_RV (*CK_C_CloseSession) (CK_SESSION_HANDLE session);
typedef ... *CK_C_CloseAllSessions;
typedef ... *CK_C_GetSessionInfo;
typedef ... *CK_C_GetOperationState;
typedef ... *CK_C_SetOperationState;
typedef CK_RV (*CK_C_Login) (CK_SESSION_HANDLE session, CK_USER_TYPE user_type,
unsigned char *pin, unsigned long pin_len);
typedef CK_RV (*CK_C_Logout) (CK_SESSION_HANDLE session);
typedef CK_RV (*CK_C_CreateObject) (CK_SESSION_HANDLE session,
struct _CK_ATTRIBUTE *templ,
unsigned long count,
CK_OBJECT_HANDLE *object);
typedef ... *CK_C_CopyObject;
typedef CK_RV (*CK_C_DestroyObject) (CK_SESSION_HANDLE session,
CK_OBJECT_HANDLE object);
typedef ... *CK_C_GetObjectSize;
typedef CK_RV (*CK_C_GetAttributeValue) (CK_SESSION_HANDLE session,
CK_OBJECT_HANDLE object,
struct _CK_ATTRIBUTE *templ,
unsigned long count);
typedef CK_RV (*CK_C_SetAttributeValue) (CK_SESSION_HANDLE session,
CK_OBJECT_HANDLE object,
struct _CK_ATTRIBUTE *templ,
unsigned long count);
typedef CK_RV (*CK_C_FindObjectsInit) (CK_SESSION_HANDLE session,
struct _CK_ATTRIBUTE *templ,
unsigned long count);
typedef CK_RV (*CK_C_FindObjects) (CK_SESSION_HANDLE session,
CK_OBJECT_HANDLE *object,
unsigned long max_object_count,
unsigned long *object_count);
typedef CK_RV (*CK_C_FindObjectsFinal) (CK_SESSION_HANDLE session);
typedef ... *CK_C_EncryptInit;
typedef ... *CK_C_Encrypt;
typedef ... *CK_C_EncryptUpdate;
typedef ... *CK_C_EncryptFinal;
typedef ... *CK_C_DecryptInit;
typedef ... *CK_C_Decrypt;
typedef ... *CK_C_DecryptUpdate;
typedef ... *CK_C_DecryptFinal;
typedef ... *CK_C_DigestInit;
typedef ... *CK_C_Digest;
typedef ... *CK_C_DigestUpdate;
typedef ... *CK_C_DigestKey;
typedef ... *CK_C_DigestFinal;
typedef ... *CK_C_SignInit;
typedef ... *CK_C_Sign;
typedef ... *CK_C_SignUpdate;
typedef ... *CK_C_SignFinal;
typedef ... *CK_C_SignRecoverInit;
typedef ... *CK_C_SignRecover;
typedef ... *CK_C_VerifyInit;
typedef ... *CK_C_Verify;
typedef ... *CK_C_VerifyUpdate;
typedef ... *CK_C_VerifyFinal;
typedef ... *CK_C_VerifyRecoverInit;
typedef ... *CK_C_VerifyRecover;
typedef ... *CK_C_DigestEncryptUpdate;
typedef ... *CK_C_DecryptDigestUpdate;
typedef ... *CK_C_SignEncryptUpdate;
typedef ... *CK_C_DecryptVerifyUpdate;
typedef CK_RV (*CK_C_GenerateKey) (CK_SESSION_HANDLE session,
struct _CK_MECHANISM *mechanism,
struct _CK_ATTRIBUTE *templ,
unsigned long count,
CK_OBJECT_HANDLE *key);
typedef CK_RV (*CK_C_GenerateKeyPair) (CK_SESSION_HANDLE session,
struct _CK_MECHANISM *mechanism,
struct _CK_ATTRIBUTE *
public_key_template,
unsigned long
public_key_attribute_count,
struct _CK_ATTRIBUTE *
private_key_template,
unsigned long
private_key_attribute_count,
CK_OBJECT_HANDLE *public_key,
CK_OBJECT_HANDLE *private_key);
typedef CK_RV (*CK_C_WrapKey) (CK_SESSION_HANDLE session,
struct _CK_MECHANISM *mechanism,
CK_OBJECT_HANDLE wrapping_key,
CK_OBJECT_HANDLE key,
unsigned char *wrapped_key,
unsigned long *wrapped_key_len);
typedef CK_RV (*CK_C_UnwrapKey) (CK_SESSION_HANDLE session,
struct _CK_MECHANISM *mechanism,
CK_OBJECT_HANDLE unwrapping_key,
unsigned char *wrapped_key,
unsigned long wrapped_key_len,
struct _CK_ATTRIBUTE *templ,
unsigned long attribute_count,
CK_OBJECT_HANDLE *key);
typedef ... *CK_C_DeriveKey;
typedef ... *CK_C_SeedRandom;
typedef ... *CK_C_GenerateRandom;
typedef ... *CK_C_GetFunctionStatus;
typedef ... *CK_C_CancelFunction;
struct _CK_FUNCTION_LIST
{
struct _CK_VERSION version;
CK_C_Initialize C_Initialize;
CK_C_Finalize C_Finalize;
CK_C_GetInfo C_GetInfo;
CK_C_GetFunctionList C_GetFunctionList;
CK_C_GetSlotList C_GetSlotList;
CK_C_GetSlotInfo C_GetSlotInfo;
CK_C_GetTokenInfo C_GetTokenInfo;
CK_C_GetMechanismList C_GetMechanismList;
CK_C_GetMechanismInfo C_GetMechanismInfo;
CK_C_InitToken C_InitToken;
CK_C_InitPIN C_InitPIN;
CK_C_SetPIN C_SetPIN;
CK_C_OpenSession C_OpenSession;
CK_C_CloseSession C_CloseSession;
CK_C_CloseAllSessions C_CloseAllSessions;
CK_C_GetSessionInfo C_GetSessionInfo;
CK_C_GetOperationState C_GetOperationState;
CK_C_SetOperationState C_SetOperationState;
CK_C_Login C_Login;
CK_C_Logout C_Logout;
CK_C_CreateObject C_CreateObject;
CK_C_CopyObject C_CopyObject;
CK_C_DestroyObject C_DestroyObject;
CK_C_GetObjectSize C_GetObjectSize;
CK_C_GetAttributeValue C_GetAttributeValue;
CK_C_SetAttributeValue C_SetAttributeValue;
CK_C_FindObjectsInit C_FindObjectsInit;
CK_C_FindObjects C_FindObjects;
CK_C_FindObjectsFinal C_FindObjectsFinal;
CK_C_EncryptInit C_EncryptInit;
CK_C_Encrypt C_Encrypt;
CK_C_EncryptUpdate C_EncryptUpdate;
CK_C_EncryptFinal C_EncryptFinal;
CK_C_DecryptInit C_DecryptInit;
CK_C_Decrypt C_Decrypt;
CK_C_DecryptUpdate C_DecryptUpdate;
CK_C_DecryptFinal C_DecryptFinal;
CK_C_DigestInit C_DigestInit;
CK_C_Digest C_Digest;
CK_C_DigestUpdate C_DigestUpdate;
CK_C_DigestKey C_DigestKey;
CK_C_DigestFinal C_DigestFinal;
CK_C_SignInit C_SignInit;
CK_C_Sign C_Sign;
CK_C_SignUpdate C_SignUpdate;
CK_C_SignFinal C_SignFinal;
CK_C_SignRecoverInit C_SignRecoverInit;
CK_C_SignRecover C_SignRecover;
CK_C_VerifyInit C_VerifyInit;
CK_C_Verify C_Verify;
CK_C_VerifyUpdate C_VerifyUpdate;
CK_C_VerifyFinal C_VerifyFinal;
CK_C_VerifyRecoverInit C_VerifyRecoverInit;
CK_C_VerifyRecover C_VerifyRecover;
CK_C_DigestEncryptUpdate C_DigestEncryptUpdate;
CK_C_DecryptDigestUpdate C_DecryptDigestUpdate;
CK_C_SignEncryptUpdate C_SignEncryptUpdate;
CK_C_DecryptVerifyUpdate C_DecryptVerifyUpdate;
CK_C_GenerateKey C_GenerateKey;
CK_C_GenerateKeyPair C_GenerateKeyPair;
CK_C_WrapKey C_WrapKey;
CK_C_UnwrapKey C_UnwrapKey;
CK_C_DeriveKey C_DeriveKey;
CK_C_SeedRandom C_SeedRandom;
CK_C_GenerateRandom C_GenerateRandom;
CK_C_GetFunctionStatus C_GetFunctionStatus;
CK_C_CancelFunction C_CancelFunction;
CK_C_WaitForSlotEvent C_WaitForSlotEvent;
};
typedef unsigned char CK_BYTE;
typedef unsigned char CK_UTF8CHAR;
typedef CK_BYTE *CK_BYTE_PTR;
typedef CK_OBJECT_HANDLE *CK_OBJECT_HANDLE_PTR;
typedef struct _CK_ATTRIBUTE CK_ATTRIBUTE;
typedef struct _CK_ATTRIBUTE *CK_ATTRIBUTE_PTR;
typedef struct _CK_MECHANISM CK_MECHANISM;
typedef struct _CK_FUNCTION_LIST *CK_FUNCTION_LIST_PTR;
/* p11-kit/uri.h */
typedef enum {
DUMMY /* ..., */
} P11KitUriType;
typedef ... P11KitUri;
CK_ATTRIBUTE_PTR p11_kit_uri_get_attributes (P11KitUri *uri,
CK_ULONG *n_attrs);
int p11_kit_uri_any_unrecognized (P11KitUri *uri);
P11KitUri* p11_kit_uri_new (void);
int p11_kit_uri_parse (const char *string,
P11KitUriType uri_type,
P11KitUri *uri);
void p11_kit_uri_free (P11KitUri *uri);
/* p11helper.c */
struct ck_rsa_pkcs_oaep_params {
CK_MECHANISM_TYPE hash_alg;
unsigned long mgf;
unsigned long source;
void *source_data;
unsigned long source_data_len;
};
typedef struct ck_rsa_pkcs_oaep_params CK_RSA_PKCS_OAEP_PARAMS;
''')
_libp11_kit = _ffi.dlopen(ctypes.util.find_library('p11-kit'))
# utility
NULL = _ffi.NULL
unsigned_char = _ffi.typeof('unsigned char')
unsigned_long = _ffi.typeof('unsigned long')
sizeof = _ffi.sizeof
def new_ptr(ctype, *args):
return _ffi.new(_ffi.getctype(ctype, '*'), *args)
def new_array(ctype, *args):
return _ffi.new(_ffi.getctype(ctype, '[]'), *args)
# p11-kit/pkcs11.h
CK_SESSION_HANDLE = _ffi.typeof('CK_SESSION_HANDLE')
CK_OBJECT_HANDLE = _ffi.typeof('CK_OBJECT_HANDLE')
CKU_USER = 1
CKF_RW_SESSION = 0x2
CKF_SERIAL_SESSION = 0x4
CK_OBJECT_CLASS = _ffi.typeof('CK_OBJECT_CLASS')
CKO_PUBLIC_KEY = 2
CKO_PRIVATE_KEY = 3
CKO_SECRET_KEY = 4
CKO_VENDOR_DEFINED = 0x80000000
CK_KEY_TYPE = _ffi.typeof('CK_KEY_TYPE')
CKK_RSA = 0
CKK_AES = 0x1f
CKA_CLASS = 0
CKA_TOKEN = 1
CKA_PRIVATE = 2
CKA_LABEL = 3
CKA_TRUSTED = 0x86
CKA_KEY_TYPE = 0x100
CKA_ID = 0x102
CKA_SENSITIVE = 0x103
CKA_ENCRYPT = 0x104
CKA_DECRYPT = 0x105
CKA_WRAP = 0x106
CKA_UNWRAP = 0x107
CKA_SIGN = 0x108
CKA_SIGN_RECOVER = 0x109
CKA_VERIFY = 0x10a
CKA_VERIFY_RECOVER = 0x10b
CKA_DERIVE = 0x10c
CKA_MODULUS = 0x120
CKA_MODULUS_BITS = 0x121
CKA_PUBLIC_EXPONENT = 0x122
CKA_VALUE_LEN = 0x161
CKA_EXTRACTABLE = 0x162
CKA_LOCAL = 0x163
CKA_NEVER_EXTRACTABLE = 0x164
CKA_ALWAYS_SENSITIVE = 0x165
CKA_MODIFIABLE = 0x170
CKA_ALWAYS_AUTHENTICATE = 0x202
CKA_WRAP_WITH_TRUSTED = 0x210
CKM_RSA_PKCS_KEY_PAIR_GEN = 0
CKM_RSA_PKCS = 1
CKM_RSA_PKCS_OAEP = 9
CKM_SHA_1 = 0x220
CKM_AES_KEY_GEN = 0x1080
CKR_OK = 0
CKR_ATTRIBUTE_TYPE_INVALID = 0x12
CKR_USER_NOT_LOGGED_IN = 0x101
CKR_BUFFER_TOO_SMALL = 0x150
CK_BYTE = _ffi.typeof('CK_BYTE')
CK_BBOOL = _ffi.typeof('CK_BBOOL')
CK_ULONG = _ffi.typeof('CK_ULONG')
CK_BYTE_PTR = _ffi.typeof('CK_BYTE_PTR')
CK_FALSE = 0
CK_TRUE = 1
CK_OBJECT_HANDLE_PTR = _ffi.typeof('CK_OBJECT_HANDLE_PTR')
CK_ATTRIBUTE = _ffi.typeof('CK_ATTRIBUTE')
CK_MECHANISM = _ffi.typeof('CK_MECHANISM')
CK_FUNCTION_LIST_PTR = _ffi.typeof('CK_FUNCTION_LIST_PTR')
CK_SLOT_ID = _ffi.typeof('CK_SLOT_ID')
CK_TOKEN_INFO = _ffi.typeof('CK_TOKEN_INFO')
NULL_PTR = NULL
# p11-kit/uri.h
P11_KIT_URI_OK = 0
P11_KIT_URI_FOR_OBJECT = 2
p11_kit_uri_get_attributes = _libp11_kit.p11_kit_uri_get_attributes
p11_kit_uri_any_unrecognized = _libp11_kit.p11_kit_uri_any_unrecognized
p11_kit_uri_new = _libp11_kit.p11_kit_uri_new
p11_kit_uri_parse = _libp11_kit.p11_kit_uri_parse
p11_kit_uri_free = _libp11_kit.p11_kit_uri_free
# library.c
def loadLibrary(module):
"""Load the PKCS#11 library"""
# Load PKCS #11 library
if module:
# pylint: disable=no-member
pDynLib = _ffi.dlopen(module, _ffi.RTLD_NOW | _ffi.RTLD_LOCAL)
else:
raise Error("PKCS#11 module name is empty")
# Retrieve the entry point for C_GetFunctionList
pGetFunctionList = pDynLib.C_GetFunctionList
if pGetFunctionList == NULL:
raise Error(
f"Module '{module}' has no function 'C_GetFunctionList'."
)
# Store the handle so we can dlclose it later
return pGetFunctionList, pDynLib
# p11helper.c
# compat TODO
CKM_AES_KEY_WRAP = 0x2109
CKM_AES_KEY_WRAP_PAD = 0x210a
# TODO
CKA_COPYABLE = 0x0017
CKG_MGF1_SHA1 = 0x00000001
CKZ_DATA_SPECIFIED = 0x00000001
CK_RSA_PKCS_OAEP_PARAMS = _ffi.typeof('CK_RSA_PKCS_OAEP_PARAMS')
true_ptr = new_ptr(CK_BBOOL, CK_TRUE)
false_ptr = new_ptr(CK_BBOOL, CK_FALSE)
MAX_TEMPLATE_LEN = 32
#
# Constants
#
CONST_RSA_PKCS_OAEP_PARAMS_ptr = new_ptr(CK_RSA_PKCS_OAEP_PARAMS, dict(
hash_alg=CKM_SHA_1,
mgf=CKG_MGF1_SHA1,
source=CKZ_DATA_SPECIFIED,
source_data=NULL,
source_data_len=0,
))
#
# ipap11helper Exceptions
#
class P11HelperException(Exception):
"""parent class for all exceptions"""
P11HelperException.__name__ = 'Exception'
class Error(P11HelperException):
"""general error"""
class NotFound(P11HelperException):
"""key not found"""
class DuplicationError(P11HelperException):
"""key already exists"""
########################################################################
# Support functions
#
def pyobj_to_bool(pyobj):
if pyobj:
return true_ptr
return false_ptr
def convert_py2bool(mapping):
return tuple(pyobj_to_bool(py_obj) for py_obj in mapping)
def string_to_pybytes_or_none(str, len):
if str == NULL:
return None
return _ffi.buffer(str, len)[:]
def unicode_to_char_array(unicode):
"""
Convert a unicode string to the utf8 encoded char array
:param unicode: input python unicode object
"""
try:
utf8_str = unicode.encode('utf-8')
except Exception:
raise Error("Unable to encode UTF-8")
try:
result = new_array(unsigned_char, utf8_str)
except Exception:
raise Error("Unable to get bytes from string")
l = len(utf8_str)
return result, l
def char_array_to_unicode(array, l):
"""
Convert utf-8 encoded char array to unicode object
"""
return _ffi.buffer(array, l)[:].decode('utf-8')
def int_to_bytes(value):
try:
return binascii.unhexlify('{0:x}'.format(value))
except (TypeError, binascii.Error):
return binascii.unhexlify('0{0:x}'.format(value))
def bytes_to_int(value):
return int(binascii.hexlify(value), 16)
def check_return_value(rv, message):
"""
Tests result value of pkc11 operations
"""
if rv != CKR_OK:
try:
errmsg = "Error at %s: 0x%x\n" % (message, rv)
except Exception:
raise Error("An error occured during error message generation. "
"Please report this problem. Developers will use "
"a crystal ball to find out the root cause.")
else:
raise Error(errmsg)
def _fill_template_from_parts(attr, template_len, id, id_len, label, label_len,
class_, cka_wrap, cka_unwrap):
"""
Fill template structure with pointers to attributes passed as independent
variables.
Variables with NULL values will be omitted from template.
@warning input variables should not be modified when template is in use
"""
cnt = 0
if label != NULL:
attr[0].type = CKA_LABEL
attr[0].pValue = label
attr[0].ulValueLen = label_len
attr += 1
cnt += 1
assert cnt < template_len[0]
if id != NULL:
attr[0].type = CKA_ID
attr[0].pValue = id
attr[0].ulValueLen = id_len
attr += 1
cnt += 1
assert cnt < template_len[0]
if cka_wrap != NULL:
attr[0].type = CKA_WRAP
attr[0].pValue = cka_wrap
attr[0].ulValueLen = sizeof(CK_BBOOL)
attr += 1
cnt += 1
assert cnt < template_len[0]
if cka_unwrap != NULL:
attr[0].type = CKA_UNWRAP
attr[0].pValue = cka_unwrap
attr[0].ulValueLen = sizeof(CK_BBOOL)
attr += 1
cnt += 1
assert cnt < template_len[0]
if class_ != NULL:
attr[0].type = CKA_CLASS
attr[0].pValue = class_
attr[0].ulValueLen = sizeof(CK_OBJECT_CLASS)
attr += 1
cnt += 1
assert cnt < template_len[0]
template_len[0] = cnt
def _parse_uri(uri_str):
"""
Parse string to P11-kit representation of PKCS#11 URI.
"""
uri = p11_kit_uri_new()
if not uri:
raise Error("Cannot initialize URI parser")
try:
result = p11_kit_uri_parse(uri_str, P11_KIT_URI_FOR_OBJECT, uri)
if result != P11_KIT_URI_OK:
raise Error("Cannot parse URI")
if p11_kit_uri_any_unrecognized(uri):
raise Error("PKCS#11 URI contains unsupported attributes")
except Error:
p11_kit_uri_free(uri)
raise
return uri
def _set_wrapping_mech_parameters(mech_type, mech):
"""
Function set default param values for wrapping mechanism
:param mech_type: mechanism type
:param mech: filled structure with params based on mech type
Warning: do not dealloc param values, it is static variables
"""
if mech_type in (CKM_RSA_PKCS, CKM_AES_KEY_WRAP, CKM_AES_KEY_WRAP_PAD):
mech.pParameter = NULL
mech.ulParameterLen = 0
elif mech_type == CKM_RSA_PKCS_OAEP:
# Use the same configuration as openSSL
# https://www.openssl.org/docs/crypto/RSA_public_encrypt.html
mech.pParameter = CONST_RSA_PKCS_OAEP_PARAMS_ptr
mech.ulParameterLen = sizeof(CK_RSA_PKCS_OAEP_PARAMS)
else:
raise Error("Unsupported wrapping mechanism")
mech.mechanism = mech_type
########################################################################
# P11_Helper object
#
class P11_Helper:
@property
def p11(self):
return self.p11_ptr[0]
@property
def session(self):
return self.session_ptr[0]
def _find_key(self, template, template_len):
"""
Find keys matching specified template.
Function returns list of key handles via objects parameter.
:param template: PKCS#11 template for attribute matching
"""
result_objects = []
result_object_ptr = new_ptr(CK_OBJECT_HANDLE)
objectCount_ptr = new_ptr(CK_ULONG)
rv = self.p11.C_FindObjectsInit(self.session, template, template_len)
check_return_value(rv, "Find key init")
rv = self.p11.C_FindObjects(self.session, result_object_ptr, 1,
objectCount_ptr)
check_return_value(rv, "Find key")
while objectCount_ptr[0] > 0:
result_objects.append(result_object_ptr[0])
rv = self.p11.C_FindObjects(self.session, result_object_ptr, 1,
objectCount_ptr)
check_return_value(rv, "Check for duplicated key")
rv = self.p11.C_FindObjectsFinal(self.session)
check_return_value(rv, "Find objects final")
return result_objects
def _id_exists(self, id, id_len, class_):
"""
Test if object with specified label, id and class exists
:param id: key ID, (if value is NULL, will not be used to find key)
:param id_len: key ID length
:param class_ key: class
:return: True if object was found, False if object doesnt exists
"""
object_count_ptr = new_ptr(CK_ULONG)
result_object_ptr = new_ptr(CK_OBJECT_HANDLE)
class_ptr = new_ptr(CK_OBJECT_CLASS, class_)
class_sec_ptr = new_ptr(CK_OBJECT_CLASS, CKO_SECRET_KEY)
template_pub_priv = new_array(CK_ATTRIBUTE, (
(CKA_ID, id, id_len),
(CKA_CLASS, class_ptr, sizeof(CK_OBJECT_CLASS)),
))
template_sec = new_array(CK_ATTRIBUTE, (
(CKA_ID, id, id_len),
(CKA_CLASS, class_sec_ptr, sizeof(CK_OBJECT_CLASS)),
))
template_id = new_array(CK_ATTRIBUTE, (
(CKA_ID, id, id_len),
))
#
# Only one secret key with same ID is allowed
#
if class_ == CKO_SECRET_KEY:
rv = self.p11.C_FindObjectsInit(self.session, template_id, 1)
check_return_value(rv, "id, label exists init")
rv = self.p11.C_FindObjects(self.session, result_object_ptr, 1,
object_count_ptr)
check_return_value(rv, "id, label exists")
rv = self.p11.C_FindObjectsFinal(self.session)
check_return_value(rv, "id, label exists final")
if object_count_ptr[0] > 0:
return True
return False
#
# Public and private keys can share one ID, but
#
# test if secret key with same ID exists
rv = self.p11.C_FindObjectsInit(self.session, template_sec, 2)
check_return_value(rv, "id, label exists init")
rv = self.p11.C_FindObjects(self.session, result_object_ptr, 1,
object_count_ptr)
check_return_value(rv, "id, label exists")
rv = self.p11.C_FindObjectsFinal(self.session)
check_return_value(rv, "id, label exists final")
if object_count_ptr[0] > 0:
# object found
return True
# test if pub/private key with same id exists
object_count_ptr[0] = 0
rv = self.p11.C_FindObjectsInit(self.session, template_pub_priv, 2)
check_return_value(rv, "id, label exists init")
rv = self.p11.C_FindObjects(self.session, result_object_ptr, 1,
object_count_ptr)
check_return_value(rv, "id, label exists")
rv = self.p11.C_FindObjectsFinal(self.session)
check_return_value(rv, "id, label exists final")
if object_count_ptr[0] > 0:
# Object found
return True
# Object not found
return False
def __init__(self, token_label, user_pin, library_path):
self.p11_ptr = new_ptr(CK_FUNCTION_LIST_PTR)
self.session_ptr = new_ptr(CK_SESSION_HANDLE)
self.session_ptr[0] = 0
self.p11_ptr[0] = NULL
self.module_handle = None
# Parse method args
if isinstance(user_pin, unicode):
user_pin = user_pin.encode()
self.token_label = token_label
try:
pGetFunctionList, module_handle = loadLibrary(library_path)
except Exception:
raise Error(f"Could not load the library '{library_path}'.")
self.module_handle = module_handle
#
# Load the function list
#
pGetFunctionList(self.p11_ptr)
#
# Initialize
#
rv = self.p11.C_Initialize(NULL)
check_return_value(rv, "initialize")
#
# Get Slot
#
slot = self.get_slot()
if slot is None:
raise Error("No slot for label {} found".format(self.token_label))
#
# Start session
#
rv = self.p11.C_OpenSession(slot,
CKF_SERIAL_SESSION | CKF_RW_SESSION, NULL,
NULL, self.session_ptr)
check_return_value(rv, "open session")
#
# Login
#
rv = self.p11.C_Login(self.session, CKU_USER, user_pin, len(user_pin))
check_return_value(rv, "log in")
def get_slot(self):
"""Get slot where then token is located
:return: slot number or None when slot not found
"""
object_count_ptr = new_ptr(CK_ULONG)
# get slots ID
slots = None
for _i in range(0, 10):
# try max N times, then die to avoid infinite iteration
rv = self.p11.C_GetSlotList(CK_TRUE, NULL, object_count_ptr)
check_return_value(rv, "get slots IDs - prepare")
result_ids_ptr = new_array(CK_SLOT_ID, object_count_ptr[0])
rv = self.p11.C_GetSlotList(
CK_TRUE, result_ids_ptr, object_count_ptr)
if rv == CKR_BUFFER_TOO_SMALL:
continue
check_return_value(rv, "get slots IDs")
slots = result_ids_ptr
break # we have slots !!!
if slots is None:
raise Error("Failed to get slots")
for slot in slots:
token_info_ptr = new_ptr(CK_TOKEN_INFO)
rv = self.p11.C_GetTokenInfo(slot, token_info_ptr)
check_return_value(rv, 'get token info')
# softhsm always returns label 32 bytes long with padding made of
# whitespaces (#32), so we have to rstrip() padding and compare
# Label was created by softhsm-util so it is not our fault that
# there are #32 as padding (cffi initializes structures with
# zeroes)
# In case that this is not valid anymore, keep in mind backward
# compatibility
if self.token_label == char_array_to_unicode(
token_info_ptr[0].label, 32).rstrip():
return slot
return None
def finalize(self):
"""
Finalize operations with pkcs11 library
"""
if self.p11 == NULL:
return
#
# Logout
#
rv = self.p11.C_Logout(self.session)
check_return_value(rv, "log out")
#
# End session
#
rv = self.p11.C_CloseSession(self.session)
check_return_value(rv, "close session")
#
# Finalize
#
self.p11.C_Finalize(NULL)
self.p11_ptr[0] = NULL
self.session_ptr[0] = 0
self.module_handle = None
#################################################################
# Methods working with keys
#
def generate_master_key(self, label, id, key_length=16, cka_copyable=True,
cka_decrypt=False, cka_derive=False,
cka_encrypt=False, cka_extractable=True,
cka_modifiable=True, cka_private=True,
cka_sensitive=True, cka_sign=False,
cka_unwrap=True, cka_verify=False, cka_wrap=True,
cka_wrap_with_trusted=False):
"""
Generate master key
:return: master key handle
"""
if isinstance(id, unicode):
id = id.encode()
attrs = (
cka_copyable,
cka_decrypt,
cka_derive,
cka_encrypt,
cka_extractable,
cka_modifiable,
cka_private,
cka_sensitive,
cka_sign,
cka_unwrap,
cka_verify,
cka_wrap,
cka_wrap_with_trusted,
)
key_length_ptr = new_ptr(CK_ULONG, key_length)
master_key_ptr = new_ptr(CK_OBJECT_HANDLE)
label_unicode = label
id_length = len(id)
id_ = new_array(CK_BYTE, id)
# TODO check long overflow
label, label_length = unicode_to_char_array(label_unicode)
# TODO param?
mechanism_ptr = new_ptr(CK_MECHANISM, (
CKM_AES_KEY_GEN, NULL_PTR, 0
))
if key_length not in (16, 24, 32):
raise Error("generate_master_key: key length allowed values are: "
"16, 24 and 32")
if self._id_exists(id_, id_length, CKO_SECRET_KEY):
raise DuplicationError("Master key with same ID already exists")
# Process keyword boolean arguments
(_cka_copyable_ptr, cka_decrypt_ptr, cka_derive_ptr, cka_encrypt_ptr,
cka_extractable_ptr, cka_modifiable_ptr, cka_private_ptr,
cka_sensitive_ptr, cka_sign_ptr, cka_unwrap_ptr, cka_verify_ptr,
cka_wrap_ptr, cka_wrap_with_trusted_ptr,) = convert_py2bool(attrs)
symKeyTemplate = new_array(CK_ATTRIBUTE, (
(CKA_ID, id_, id_length),
(CKA_LABEL, label, label_length),
(CKA_TOKEN, true_ptr, sizeof(CK_BBOOL)),
(CKA_VALUE_LEN, key_length_ptr, sizeof(CK_ULONG)),
# TODO Softhsm doesn't support it
# (CKA_COPYABLE, cka_copyable_ptr, sizeof(CK_BBOOL)),
(CKA_DECRYPT, cka_decrypt_ptr, sizeof(CK_BBOOL)),
(CKA_DERIVE, cka_derive_ptr, sizeof(CK_BBOOL)),
(CKA_ENCRYPT, cka_encrypt_ptr, sizeof(CK_BBOOL)),
(CKA_EXTRACTABLE, cka_extractable_ptr, sizeof(CK_BBOOL)),
(CKA_MODIFIABLE, cka_modifiable_ptr, sizeof(CK_BBOOL)),
(CKA_PRIVATE, cka_private_ptr, sizeof(CK_BBOOL)),
(CKA_SENSITIVE, cka_sensitive_ptr, sizeof(CK_BBOOL)),
(CKA_SIGN, cka_sign_ptr, sizeof(CK_BBOOL)),
(CKA_UNWRAP, cka_unwrap_ptr, sizeof(CK_BBOOL)),
(CKA_VERIFY, cka_verify_ptr, sizeof(CK_BBOOL)),
(CKA_WRAP, cka_wrap_ptr, sizeof(CK_BBOOL)),
(CKA_WRAP_WITH_TRUSTED, cka_wrap_with_trusted_ptr,
sizeof(CK_BBOOL)),
))
rv = self.p11.C_GenerateKey(self.session, mechanism_ptr,
symKeyTemplate,
(sizeof(symKeyTemplate) //
sizeof(CK_ATTRIBUTE)), master_key_ptr)
check_return_value(rv, "generate master key")
return master_key_ptr[0]
def generate_replica_key_pair(self, label, id, modulus_bits=2048,
pub_cka_copyable=True, pub_cka_derive=False,
pub_cka_encrypt=False,
pub_cka_modifiable=True,
pub_cka_private=True, pub_cka_trusted=False,
pub_cka_verify=False,
pub_cka_verify_recover=False,
pub_cka_wrap=True,
priv_cka_always_authenticate=False,
priv_cka_copyable=True,
priv_cka_decrypt=False,
priv_cka_derive=False,
priv_cka_extractable=False,
priv_cka_modifiable=True,
priv_cka_private=True,
priv_cka_sensitive=True,
priv_cka_sign=False,
priv_cka_sign_recover=False,
priv_cka_unwrap=True,
priv_cka_wrap_with_trusted=False):
"""
Generate replica keys
:returns: tuple (public_key_handle, private_key_handle)
"""
if isinstance(id, unicode):
id = id.encode()
attrs_pub = (
pub_cka_copyable,
pub_cka_derive,
pub_cka_encrypt,
pub_cka_modifiable,
pub_cka_private,
pub_cka_trusted,
pub_cka_verify,
pub_cka_verify_recover,
pub_cka_wrap,
)
attrs_priv = (
priv_cka_always_authenticate,
priv_cka_copyable,
priv_cka_decrypt,
priv_cka_derive,
priv_cka_extractable,
priv_cka_modifiable,
priv_cka_private,
priv_cka_sensitive,
priv_cka_sign,
priv_cka_sign_recover,
priv_cka_unwrap,
priv_cka_wrap_with_trusted,
)
label_unicode = label
id_ = new_array(CK_BYTE, id)
id_length = len(id)
label, label_length = unicode_to_char_array(label_unicode)
public_key_ptr = new_ptr(CK_OBJECT_HANDLE)
private_key_ptr = new_ptr(CK_OBJECT_HANDLE)
mechanism_ptr = new_ptr(CK_MECHANISM,
(CKM_RSA_PKCS_KEY_PAIR_GEN, NULL_PTR, 0))
if self._id_exists(id_, id_length, CKO_PRIVATE_KEY):
raise DuplicationError("Private key with same ID already exists")
if self._id_exists(id_, id_length, CKO_PUBLIC_KEY):
raise DuplicationError("Public key with same ID already exists")
modulus_bits_ptr = new_ptr(CK_ULONG, modulus_bits)
# Process keyword boolean arguments
(_pub_cka_copyable_ptr, pub_cka_derive_ptr, pub_cka_encrypt_ptr,
pub_cka_modifiable_ptr, pub_cka_private_ptr, pub_cka_trusted_ptr,
pub_cka_verify_ptr, pub_cka_verify_recover_ptr, pub_cka_wrap_ptr,
) = convert_py2bool(attrs_pub)
(priv_cka_always_authenticate_ptr, _priv_cka_copyable_ptr,
priv_cka_decrypt_ptr, priv_cka_derive_ptr, priv_cka_extractable_ptr,
priv_cka_modifiable_ptr, priv_cka_private_ptr, priv_cka_sensitive_ptr,
priv_cka_sign_ptr, _priv_cka_sign_recover_ptr, priv_cka_unwrap_ptr,
priv_cka_wrap_with_trusted_ptr,) = convert_py2bool(attrs_priv)
# 65537 (RFC 6376 section 3.3.1)
public_exponent = new_array(CK_BYTE, (1, 0, 1))
publicKeyTemplate = new_array(CK_ATTRIBUTE, (
(CKA_ID, id_, id_length),
(CKA_LABEL, label, label_length),
(CKA_TOKEN, true_ptr, sizeof(CK_BBOOL)),
(CKA_MODULUS_BITS, modulus_bits_ptr, sizeof(CK_ULONG)),
(CKA_PUBLIC_EXPONENT, public_exponent, 3),
# TODO Softhsm doesn't support it
# (CKA_COPYABLE, pub_cka_copyable_p, sizeof(CK_BBOOL)),
(CKA_DERIVE, pub_cka_derive_ptr, sizeof(CK_BBOOL)),
(CKA_ENCRYPT, pub_cka_encrypt_ptr, sizeof(CK_BBOOL)),
(CKA_MODIFIABLE, pub_cka_modifiable_ptr, sizeof(CK_BBOOL)),
(CKA_PRIVATE, pub_cka_private_ptr, sizeof(CK_BBOOL)),
(CKA_TRUSTED, pub_cka_trusted_ptr, sizeof(CK_BBOOL)),
(CKA_VERIFY, pub_cka_verify_ptr, sizeof(CK_BBOOL)),
(CKA_VERIFY_RECOVER, pub_cka_verify_recover_ptr, sizeof(CK_BBOOL)),
(CKA_WRAP, pub_cka_wrap_ptr, sizeof(CK_BBOOL)),
))
privateKeyTemplate = new_array(CK_ATTRIBUTE, (
(CKA_ID, id_, id_length),
(CKA_LABEL, label, label_length),
(CKA_TOKEN, true_ptr, sizeof(CK_BBOOL)),
(CKA_ALWAYS_AUTHENTICATE, priv_cka_always_authenticate_ptr,
sizeof(CK_BBOOL)),
# TODO Softhsm doesn't support it
# (CKA_COPYABLE, priv_cka_copyable_ptr, sizeof(CK_BBOOL)),
(CKA_DECRYPT, priv_cka_decrypt_ptr, sizeof(CK_BBOOL)),
(CKA_DERIVE, priv_cka_derive_ptr, sizeof(CK_BBOOL)),
(CKA_EXTRACTABLE, priv_cka_extractable_ptr, sizeof(CK_BBOOL)),
(CKA_MODIFIABLE, priv_cka_modifiable_ptr, sizeof(CK_BBOOL)),
(CKA_PRIVATE, priv_cka_private_ptr, sizeof(CK_BBOOL)),
(CKA_SENSITIVE, priv_cka_sensitive_ptr, sizeof(CK_BBOOL)),
(CKA_SIGN, priv_cka_sign_ptr, sizeof(CK_BBOOL)),
(CKA_SIGN_RECOVER, priv_cka_sign_ptr, sizeof(CK_BBOOL)),
(CKA_UNWRAP, priv_cka_unwrap_ptr, sizeof(CK_BBOOL)),
(CKA_WRAP_WITH_TRUSTED, priv_cka_wrap_with_trusted_ptr,
sizeof(CK_BBOOL)),
))
rv = self.p11.C_GenerateKeyPair(self.session, mechanism_ptr,
publicKeyTemplate,
(sizeof(publicKeyTemplate) //
sizeof(CK_ATTRIBUTE)),
privateKeyTemplate,
(sizeof(privateKeyTemplate) //
sizeof(CK_ATTRIBUTE)),
public_key_ptr,
private_key_ptr)
check_return_value(rv, "generate key pair")
return public_key_ptr[0], private_key_ptr[0]
def find_keys(self, objclass=CKO_VENDOR_DEFINED, label=None, id=None,
cka_wrap=None, cka_unwrap=None, uri=None):
"""
Find key
"""
if isinstance(id, unicode):
id = id.encode()
if isinstance(uri, unicode):
uri = uri.encode()
class_ = objclass
class_ptr = new_ptr(CK_OBJECT_CLASS, class_)
ckawrap = NULL
ckaunwrap = NULL
if id is not None:
id_ = new_array(CK_BYTE, id)
id_length = len(id)
else:
id_ = NULL
id_length = 0
label_unicode, label = label, NULL
cka_wrap_bool = cka_wrap
cka_unwrap_bool = cka_unwrap
label_length = 0
uri_str = uri
uri = NULL
template = new_array(CK_ATTRIBUTE, MAX_TEMPLATE_LEN)
template_len_ptr = new_ptr(CK_ULONG, MAX_TEMPLATE_LEN)
# TODO check long overflow
if label_unicode is not None:
label, label_length = unicode_to_char_array(label_unicode)
if cka_wrap_bool is not None:
if cka_wrap_bool:
ckawrap = true_ptr
else:
ckawrap = false_ptr
if cka_unwrap_bool is not None:
if cka_unwrap_bool:
ckaunwrap = true_ptr
else:
ckaunwrap = false_ptr
if class_ == CKO_VENDOR_DEFINED:
class_ptr = NULL
try:
if uri_str is None:
_fill_template_from_parts(template, template_len_ptr, id_,
id_length, label, label_length,
class_ptr, ckawrap, ckaunwrap)
else:
uri = _parse_uri(uri_str)
template = (p11_kit_uri_get_attributes(uri, template_len_ptr))
# Do not deallocate URI while you are using the template.
# Template contains pointers to values inside URI!
result_list = self._find_key(template, template_len_ptr[0])
return result_list
finally:
if uri != NULL:
p11_kit_uri_free(uri)
def delete_key(self, key_handle):
"""
delete key
"""
# TODO check long overflow
rv = self.p11.C_DestroyObject(self.session, key_handle)
check_return_value(rv, "object deletion")
def _export_RSA_public_key(self, object):
"""
export RSA public key
"""
class_ptr = new_ptr(CK_OBJECT_CLASS, CKO_PUBLIC_KEY)
key_type_ptr = new_ptr(CK_KEY_TYPE, CKK_RSA)
obj_template = new_array(CK_ATTRIBUTE, (
(CKA_MODULUS, NULL_PTR, 0),
(CKA_PUBLIC_EXPONENT, NULL_PTR, 0),
(CKA_CLASS, class_ptr, sizeof(CK_OBJECT_CLASS)),
(CKA_KEY_TYPE, key_type_ptr, sizeof(CK_KEY_TYPE)),
))
rv = self.p11.C_GetAttributeValue(self.session, object, obj_template,
(sizeof(obj_template) //
sizeof(CK_ATTRIBUTE)))
check_return_value(rv, "get RSA public key values - prepare")
# Set proper size for attributes
modulus = new_array(CK_BYTE,
obj_template[0].ulValueLen * sizeof(CK_BYTE))
obj_template[0].pValue = modulus
exponent = new_array(CK_BYTE,
obj_template[1].ulValueLen * sizeof(CK_BYTE))
obj_template[1].pValue = exponent
rv = self.p11.C_GetAttributeValue(self.session, object, obj_template,
(sizeof(obj_template) //
sizeof(CK_ATTRIBUTE)))
check_return_value(rv, "get RSA public key values")
# Check if the key is RSA public key
if class_ptr[0] != CKO_PUBLIC_KEY:
raise Error("export_RSA_public_key: required public key class")
if key_type_ptr[0] != CKK_RSA:
raise Error("export_RSA_public_key: required RSA key type")
try:
n = bytes_to_int(string_to_pybytes_or_none(
modulus, obj_template[0].ulValueLen))
except Exception:
raise Error("export_RSA_public_key: internal error: unable to "
"convert modulus")
try:
e = bytes_to_int(string_to_pybytes_or_none(
exponent, obj_template[1].ulValueLen))
except Exception:
raise Error("export_RSA_public_key: internal error: unable to "
"convert exponent")
# set modulus and exponent
rsa_ = rsa.RSAPublicNumbers(e, n)
try:
pkey = rsa_.public_key(default_backend())
except Exception:
raise Error("export_RSA_public_key: internal error: "
"EVP_PKEY_set1_RSA failed")
try:
ret = pkey.public_bytes(
format=serialization.PublicFormat.SubjectPublicKeyInfo,
encoding=serialization.Encoding.DER,
)
except Exception:
ret = None
return ret
def export_public_key(self, key_handle):
"""
Export public key
Export public key in SubjectPublicKeyInfo (RFC5280) DER encoded format
"""
object = key_handle
class_ptr = new_ptr(CK_OBJECT_CLASS, CKO_PUBLIC_KEY)
key_type_ptr = new_ptr(CK_KEY_TYPE, CKK_RSA)
# TODO check long overflow
obj_template = new_array(CK_ATTRIBUTE, (
(CKA_CLASS, class_ptr, sizeof(CK_OBJECT_CLASS)),
(CKA_KEY_TYPE, key_type_ptr, sizeof(CK_KEY_TYPE)),
))
rv = self.p11.C_GetAttributeValue(self.session, object, obj_template,
(sizeof(obj_template) //
sizeof(CK_ATTRIBUTE)))
check_return_value(rv, "export_public_key: get RSA public key values")
if class_ptr[0] != CKO_PUBLIC_KEY:
raise Error("export_public_key: required public key class")
if key_type_ptr[0] == CKK_RSA:
return self._export_RSA_public_key(object)
else:
raise Error("export_public_key: unsupported key type")
def _import_RSA_public_key(self, label, label_length, id, id_length, pkey,
cka_copyable, cka_derive, cka_encrypt,
cka_modifiable, cka_private, cka_trusted,
cka_verify, cka_verify_recover, cka_wrap):
"""
Import RSA public key
"""
class_ptr = new_ptr(CK_OBJECT_CLASS, CKO_PUBLIC_KEY)
keyType_ptr = new_ptr(CK_KEY_TYPE, CKK_RSA)
cka_token = true_ptr
if not isinstance(pkey, rsa.RSAPublicKey):
raise Error("Required RSA public key")
rsa_ = pkey.public_numbers()
# convert BIGNUM to binary array
modulus = new_array(CK_BYTE, int_to_bytes(rsa_.n))
modulus_len = sizeof(modulus) - 1
if modulus_len == 0:
raise Error("import_RSA_public_key: BN_bn2bin modulus error")
exponent = new_array(CK_BYTE, int_to_bytes(rsa_.e))
exponent_len = sizeof(exponent) - 1
if exponent_len == 0:
raise Error("import_RSA_public_key: BN_bn2bin exponent error")
template = new_array(CK_ATTRIBUTE, (
(CKA_ID, id, id_length),
(CKA_CLASS, class_ptr, sizeof(CK_OBJECT_CLASS)),
(CKA_KEY_TYPE, keyType_ptr, sizeof(CK_KEY_TYPE)),
(CKA_TOKEN, cka_token, sizeof(CK_BBOOL)),
(CKA_LABEL, label, label_length),
(CKA_MODULUS, modulus, modulus_len),
(CKA_PUBLIC_EXPONENT, exponent, exponent_len),
# TODO Softhsm doesn't support it
# (CKA_COPYABLE, cka_copyable, sizeof(CK_BBOOL)),
(CKA_DERIVE, cka_derive, sizeof(CK_BBOOL)),
(CKA_ENCRYPT, cka_encrypt, sizeof(CK_BBOOL)),
(CKA_MODIFIABLE, cka_modifiable, sizeof(CK_BBOOL)),
(CKA_PRIVATE, cka_private, sizeof(CK_BBOOL)),
(CKA_TRUSTED, cka_trusted, sizeof(CK_BBOOL)),
(CKA_VERIFY, cka_verify, sizeof(CK_BBOOL)),
(CKA_VERIFY_RECOVER, cka_verify_recover, sizeof(CK_BBOOL)),
(CKA_WRAP, cka_wrap, sizeof(CK_BBOOL)),
))
object_ptr = new_ptr(CK_OBJECT_HANDLE)
rv = self.p11.C_CreateObject(self.session, template,
(sizeof(template) //
sizeof(CK_ATTRIBUTE)), object_ptr)
check_return_value(rv, "create public key object")
return object_ptr[0]
def import_public_key(self, label, id, data, cka_copyable=True,
cka_derive=False, cka_encrypt=False,
cka_modifiable=True, cka_private=True,
cka_trusted=False, cka_verify=True,
cka_verify_recover=True, cka_wrap=False):
"""
Import RSA public key
"""
if isinstance(id, unicode):
id = id.encode()
if isinstance(data, unicode):
data = data.encode()
label_unicode = label
id_ = new_array(CK_BYTE, id)
id_length = len(id)
attrs_pub = (
cka_copyable,
cka_derive,
cka_encrypt,
cka_modifiable,
cka_private,
cka_trusted,
cka_verify,
cka_verify_recover,
cka_wrap,
)
label, label_length = unicode_to_char_array(label_unicode)
if self._id_exists(id_, id_length, CKO_PUBLIC_KEY):
raise DuplicationError("Public key with same ID already exists")
# Process keyword boolean arguments
(cka_copyable_ptr, cka_derive_ptr, cka_encrypt_ptr, cka_modifiable_ptr,
cka_private_ptr, cka_trusted_ptr, cka_verify_ptr,
cka_verify_recover_ptr, cka_wrap_ptr,) = convert_py2bool(attrs_pub)
# decode from ASN1 DER
try:
pkey = serialization.load_der_public_key(data, default_backend())
except Exception:
raise Error("import_public_key: d2i_PUBKEY error")
if isinstance(pkey, rsa.RSAPublicKey):
ret = self._import_RSA_public_key(label, label_length, id_,
id_length, pkey,
cka_copyable_ptr,
cka_derive_ptr,
cka_encrypt_ptr,
cka_modifiable_ptr,
cka_private_ptr,
cka_trusted_ptr,
cka_verify_ptr,
cka_verify_recover_ptr,
cka_wrap_ptr)
elif isinstance(pkey, dsa.DSAPublicKey):
raise Error("DSA is not supported")
elif isinstance(pkey, ec.EllipticCurvePublicKey):
raise Error("EC is not supported")
else:
raise Error("Unsupported key type")
return ret
def export_wrapped_key(self, key, wrapping_key, wrapping_mech):
"""
Export wrapped key
"""
object_key = key
object_wrapping_key = wrapping_key
wrapped_key_len_ptr = new_ptr(CK_ULONG, 0)
wrapping_mech_ptr = new_ptr(CK_MECHANISM, (wrapping_mech, NULL, 0))
# currently we don't support parameter in mechanism
# TODO check long overflow
# TODO export method
# fill mech parameters
_set_wrapping_mech_parameters(wrapping_mech_ptr.mechanism,
wrapping_mech_ptr)
rv = self.p11.C_WrapKey(self.session, wrapping_mech_ptr,
object_wrapping_key, object_key, NULL,
wrapped_key_len_ptr)
check_return_value(rv, "key wrapping: get buffer length")
wrapped_key = new_array(CK_BYTE, wrapped_key_len_ptr[0])
rv = self.p11.C_WrapKey(self.session, wrapping_mech_ptr,
object_wrapping_key, object_key, wrapped_key,
wrapped_key_len_ptr)
check_return_value(rv, "key wrapping: wrapping")
result = string_to_pybytes_or_none(wrapped_key, wrapped_key_len_ptr[0])
return result
def import_wrapped_secret_key(self, label, id, data, unwrapping_key,
wrapping_mech, key_type, cka_copyable=True,
cka_decrypt=False, cka_derive=False,
cka_encrypt=False, cka_extractable=True,
cka_modifiable=True, cka_private=True,
cka_sensitive=True, cka_sign=False,
cka_unwrap=True, cka_verify=False,
cka_wrap=True, cka_wrap_with_trusted=False):
"""
Import wrapped secret key
"""
if isinstance(id, unicode):
id = id.encode()
if isinstance(data, unicode):
data = data.encode()
wrapped_key = new_array(CK_BYTE, data)
wrapped_key_len = len(data)
unwrapping_key_object = unwrapping_key
unwrapped_key_object_ptr = new_ptr(CK_OBJECT_HANDLE, 0)
label_unicode = label
id_ = new_array(CK_BYTE, id)
id_length = len(id)
wrapping_mech_ptr = new_ptr(CK_MECHANISM, (wrapping_mech, NULL, 0))
key_class_ptr = new_ptr(CK_OBJECT_CLASS, CKO_SECRET_KEY)
key_type_ptr = new_ptr(CK_KEY_TYPE, key_type)
attrs = (
cka_copyable,
cka_decrypt,
cka_derive,
cka_encrypt,
cka_extractable,
cka_modifiable,
cka_private,
cka_sensitive,
cka_sign,
cka_unwrap,
cka_verify,
cka_wrap,
cka_wrap_with_trusted,
)
_set_wrapping_mech_parameters(wrapping_mech_ptr.mechanism,
wrapping_mech_ptr)
label, label_length = unicode_to_char_array(label_unicode)
if self._id_exists(id_, id_length, key_class_ptr[0]):
raise DuplicationError("Secret key with same ID already exists")
# Process keyword boolean arguments
(_cka_copyable_ptr, cka_decrypt_ptr, cka_derive_ptr, cka_encrypt_ptr,
cka_extractable_ptr, cka_modifiable_ptr, cka_private_ptr,
cka_sensitive_ptr, cka_sign_ptr, cka_unwrap_ptr, cka_verify_ptr,
cka_wrap_ptr, cka_wrap_with_trusted_ptr,) = convert_py2bool(attrs)
template = new_array(CK_ATTRIBUTE, (
(CKA_CLASS, key_class_ptr, sizeof(CK_OBJECT_CLASS)),
(CKA_KEY_TYPE, key_type_ptr, sizeof(CK_KEY_TYPE)),
(CKA_ID, id_, id_length),
(CKA_LABEL, label, label_length),
(CKA_TOKEN, true_ptr, sizeof(CK_BBOOL)),
# TODO Softhsm doesn't support it
# (CKA_COPYABLE, cka_copyable_ptr, sizeof(CK_BBOOL)),
(CKA_DECRYPT, cka_decrypt_ptr, sizeof(CK_BBOOL)),
(CKA_DERIVE, cka_derive_ptr, sizeof(CK_BBOOL)),
(CKA_ENCRYPT, cka_encrypt_ptr, sizeof(CK_BBOOL)),
(CKA_EXTRACTABLE, cka_extractable_ptr, sizeof(CK_BBOOL)),
(CKA_MODIFIABLE, cka_modifiable_ptr, sizeof(CK_BBOOL)),
(CKA_PRIVATE, cka_private_ptr, sizeof(CK_BBOOL)),
(CKA_SENSITIVE, cka_sensitive_ptr, sizeof(CK_BBOOL)),
(CKA_SIGN, cka_sign_ptr, sizeof(CK_BBOOL)),
(CKA_UNWRAP, cka_unwrap_ptr, sizeof(CK_BBOOL)),
(CKA_VERIFY, cka_verify_ptr, sizeof(CK_BBOOL)),
(CKA_WRAP, cka_wrap_ptr, sizeof(CK_BBOOL)),
(CKA_WRAP_WITH_TRUSTED, cka_wrap_with_trusted_ptr,
sizeof(CK_BBOOL)),
))
rv = self.p11.C_UnwrapKey(self.session, wrapping_mech_ptr,
unwrapping_key_object, wrapped_key,
wrapped_key_len, template,
sizeof(template) // sizeof(CK_ATTRIBUTE),
unwrapped_key_object_ptr)
check_return_value(rv, "import_wrapped_key: key unwrapping")
return unwrapped_key_object_ptr[0]
def import_wrapped_private_key(self, label, id, data, unwrapping_key,
wrapping_mech, key_type,
cka_always_authenticate=False,
cka_copyable=True, cka_decrypt=False,
cka_derive=False, cka_extractable=True,
cka_modifiable=True, cka_private=True,
cka_sensitive=True, cka_sign=True,
cka_sign_recover=True, cka_unwrap=False,
cka_wrap_with_trusted=False):
"""
Import wrapped private key
"""
if isinstance(id, unicode):
id = id.encode()
if isinstance(data, unicode):
data = data.encode()
wrapped_key = new_array(CK_BYTE, data)
wrapped_key_len = len(data)
unwrapping_key_object = unwrapping_key
unwrapped_key_object_ptr = new_ptr(CK_OBJECT_HANDLE, 0)
label_unicode = label
id_ = new_array(CK_BYTE, id)
id_length = len(id)
wrapping_mech_ptr = new_ptr(CK_MECHANISM, (wrapping_mech, NULL, 0))
key_class_ptr = new_ptr(CK_OBJECT_CLASS, CKO_PRIVATE_KEY)
key_type_ptr = new_ptr(CK_KEY_TYPE, key_type)
attrs_priv = (
cka_always_authenticate,
cka_copyable,
cka_decrypt,
cka_derive,
cka_extractable,
cka_modifiable,
cka_private,
cka_sensitive,
cka_sign,
cka_sign_recover,
cka_unwrap,
cka_wrap_with_trusted,
)
label, label_length = unicode_to_char_array(label_unicode)
if self._id_exists(id_, id_length, CKO_SECRET_KEY):
raise DuplicationError("Secret key with same ID already exists")
# Process keyword boolean arguments
(cka_always_authenticate_ptr, _cka_copyable_ptr, cka_decrypt_ptr,
cka_derive_ptr, cka_extractable_ptr, cka_modifiable_ptr,
cka_private_ptr, cka_sensitive_ptr, cka_sign_ptr,
_cka_sign_recover_ptr, cka_unwrap_ptr, cka_wrap_with_trusted_ptr,
) = convert_py2bool(attrs_priv)
template = new_array(CK_ATTRIBUTE, (
(CKA_CLASS, key_class_ptr, sizeof(CK_OBJECT_CLASS)),
(CKA_KEY_TYPE, key_type_ptr, sizeof(CK_KEY_TYPE)),
(CKA_ID, id_, id_length),
(CKA_LABEL, label, label_length),
(CKA_TOKEN, true_ptr, sizeof(CK_BBOOL)),
(CKA_ALWAYS_AUTHENTICATE, cka_always_authenticate_ptr,
sizeof(CK_BBOOL)),
# TODO Softhsm doesn't support it
# (CKA_COPYABLE, cka_copyable_ptr, sizeof(CK_BBOOL)),
(CKA_DECRYPT, cka_decrypt_ptr, sizeof(CK_BBOOL)),
(CKA_DERIVE, cka_derive_ptr, sizeof(CK_BBOOL)),
(CKA_EXTRACTABLE, cka_extractable_ptr, sizeof(CK_BBOOL)),
(CKA_MODIFIABLE, cka_modifiable_ptr, sizeof(CK_BBOOL)),
(CKA_PRIVATE, cka_private_ptr, sizeof(CK_BBOOL)),
(CKA_SENSITIVE, cka_sensitive_ptr, sizeof(CK_BBOOL)),
(CKA_SIGN, cka_sign_ptr, sizeof(CK_BBOOL)),
(CKA_SIGN_RECOVER, cka_sign_ptr, sizeof(CK_BBOOL)),
(CKA_UNWRAP, cka_unwrap_ptr, sizeof(CK_BBOOL)),
(CKA_WRAP_WITH_TRUSTED, cka_wrap_with_trusted_ptr,
sizeof(CK_BBOOL)),
))
rv = self.p11.C_UnwrapKey(self.session, wrapping_mech_ptr,
unwrapping_key_object, wrapped_key,
wrapped_key_len, template,
sizeof(template) // sizeof(CK_ATTRIBUTE),
unwrapped_key_object_ptr)
check_return_value(rv, "import_wrapped_key: key unwrapping")
return unwrapped_key_object_ptr[0]
def set_attribute(self, key_object, attr, value):
"""
Set object attributes
"""
object = key_object
attribute_ptr = new_ptr(CK_ATTRIBUTE)
attribute_ptr.type = attr
if attr in (CKA_ALWAYS_AUTHENTICATE,
CKA_ALWAYS_SENSITIVE,
CKA_COPYABLE,
CKA_ENCRYPT,
CKA_EXTRACTABLE,
CKA_DECRYPT,
CKA_DERIVE,
CKA_LOCAL,
CKA_MODIFIABLE,
CKA_NEVER_EXTRACTABLE,
CKA_PRIVATE,
CKA_SENSITIVE,
CKA_SIGN,
CKA_SIGN_RECOVER,
CKA_TOKEN,
CKA_TRUSTED,
CKA_UNWRAP,
CKA_VERIFY,
CKA_VERIFY_RECOVER,
CKA_WRAP,
CKA_WRAP_WITH_TRUSTED):
attribute_ptr.pValue = true_ptr if value else false_ptr
attribute_ptr.ulValueLen = sizeof(CK_BBOOL)
elif attr == CKA_ID:
if not isinstance(value, bytes):
raise Error("Bytestring value expected")
attribute_ptr.pValue = new_array(CK_BYTE, value)
attribute_ptr.ulValueLen = len(value)
elif attr == CKA_LABEL:
if not isinstance(value, unicode):
raise Error("Unicode value expected")
label, label_length = unicode_to_char_array(value)
attribute_ptr.pValue = label
attribute_ptr.ulValueLen = label_length
elif attr == CKA_KEY_TYPE:
if not isinstance(value, int):
raise Error("Integer value expected")
attribute_ptr.pValue = new_ptr(unsigned_long, value)
attribute_ptr.ulValueLen = sizeof(unsigned_long)
else:
raise Error("Unknown attribute")
template = new_array(CK_ATTRIBUTE, (attribute_ptr[0],))
rv = self.p11.C_SetAttributeValue(self.session, object, template,
(sizeof(template) //
sizeof(CK_ATTRIBUTE)))
check_return_value(rv, "set_attribute")
def get_attribute(self, key_object, attr):
object = key_object
attribute_ptr = new_ptr(CK_ATTRIBUTE)
attribute_ptr.type = attr
attribute_ptr.pValue = NULL_PTR
attribute_ptr.ulValueLen = 0
template = new_array(CK_ATTRIBUTE, (attribute_ptr[0],))
rv = self.p11.C_GetAttributeValue(self.session, object, template,
(sizeof(template) //
sizeof(CK_ATTRIBUTE)))
if rv == CKR_ATTRIBUTE_TYPE_INVALID or template[0].ulValueLen == -1:
raise NotFound("attribute does not exist")
check_return_value(rv, "get_attribute init")
value = new_array(unsigned_char, template[0].ulValueLen)
template[0].pValue = value
rv = self.p11.C_GetAttributeValue(self.session, object, template,
(sizeof(template) //
sizeof(CK_ATTRIBUTE)))
check_return_value(rv, "get_attribute")
if attr in (CKA_ALWAYS_AUTHENTICATE,
CKA_ALWAYS_SENSITIVE,
CKA_COPYABLE,
CKA_ENCRYPT,
CKA_EXTRACTABLE,
CKA_DECRYPT,
CKA_DERIVE,
CKA_LOCAL,
CKA_MODIFIABLE,
CKA_NEVER_EXTRACTABLE,
CKA_PRIVATE,
CKA_SENSITIVE,
CKA_SIGN,
CKA_SIGN_RECOVER,
CKA_TOKEN,
CKA_TRUSTED,
CKA_UNWRAP,
CKA_VERIFY,
CKA_VERIFY_RECOVER,
CKA_WRAP,
CKA_WRAP_WITH_TRUSTED):
ret = bool(_ffi.cast(_ffi.getctype(CK_BBOOL, '*'), value)[0])
elif attr == CKA_LABEL:
ret = char_array_to_unicode(value, template[0].ulValueLen)
elif attr in (CKA_MODULUS, CKA_PUBLIC_EXPONENT, CKA_ID):
ret = string_to_pybytes_or_none(value, template[0].ulValueLen)
elif attr == CKA_KEY_TYPE:
ret = _ffi.cast(_ffi.getctype(unsigned_long, '*'), value)[0]
else:
raise Error("Unknown attribute")
return ret
# Key Classes
KEY_CLASS_PUBLIC_KEY = CKO_PUBLIC_KEY
KEY_CLASS_PRIVATE_KEY = CKO_PRIVATE_KEY
KEY_CLASS_SECRET_KEY = CKO_SECRET_KEY
# Key types
KEY_TYPE_RSA = CKK_RSA
KEY_TYPE_AES = CKK_AES
# Wrapping mech type
MECH_RSA_PKCS = CKM_RSA_PKCS
MECH_RSA_PKCS_OAEP = CKM_RSA_PKCS_OAEP
MECH_AES_KEY_WRAP = CKM_AES_KEY_WRAP
MECH_AES_KEY_WRAP_PAD = CKM_AES_KEY_WRAP_PAD
def gen_key_id(key_id_len=16):
"""
Generate random softhsm KEY_ID
:param key_id_len: this should be 16
:return: random softhsm KEY_ID in bytes representation
"""
return struct.pack(
"B" * key_id_len, # key_id must be bytes
*(random.randint(0, 255) for _ in range(key_id_len))
)
def generate_master_key(p11, keylabel=u"dnssec-master", key_length=16,
disable_old_keys=True):
assert isinstance(p11, P11_Helper)
key_id = None
while True:
# check if key with this ID exist in LDAP or softHSM
# id is 16 Bytes long
key_id = gen_key_id()
keys = p11.find_keys(KEY_CLASS_SECRET_KEY,
label=keylabel,
id=key_id)
if not keys:
break # we found unique id
p11.generate_master_key(keylabel,
key_id,
key_length=key_length,
cka_wrap=True,
cka_unwrap=True)
if disable_old_keys:
# set CKA_WRAP=False for old master keys
master_keys = p11.find_keys(KEY_CLASS_SECRET_KEY,
label=keylabel,
cka_wrap=True)
for handle in master_keys:
# don't disable wrapping for new key
# compare IDs not handle
if key_id != p11.get_attribute(handle, CKA_ID):
p11.set_attribute(handle, CKA_WRAP, False)
|
encukou/freeipa
|
ipaserver/p11helper.py
|
Python
|
gpl-3.0
| 65,253
|
[
"CRYSTAL"
] |
c3be86b935d3e9f5f5d6b41590260af362c55c0a7bf84d52cd78a68385be53db
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This module is also sponsored by E.T.A.I. (www.etai.fr)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vmware_guest
short_description: Manages virtual machines in vCenter
description: >
This module can be used to create new virtual machines from templates or other virtual machines,
manage power state of virtual machine such as power on, power off, suspend, shutdown, reboot, restart etc.,
modify various virtual machine components like network, disk, customization etc.,
rename a virtual machine and remove a virtual machine with associated components.
version_added: '2.2'
author:
- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
- Philippe Dellaert (@pdellaert) <philippe@dellaert.org>
- Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
requirements:
- python >= 2.6
- PyVmomi
notes:
- Please make sure that the user used for vmware_guest has the correct level of privileges.
- For example, following is the list of minimum privileges required by users to create virtual machines.
- " DataStore > Allocate Space"
- " Virtual Machine > Configuration > Add New Disk"
- " Virtual Machine > Configuration > Add or Remove Device"
- " Virtual Machine > Inventory > Create New"
- " Network > Assign Network"
- " Resource > Assign Virtual Machine to Resource Pool"
- "Module may require additional privileges as well, which may be required for gathering facts - e.g. ESXi configurations."
- Tested on vSphere 5.5, 6.0, 6.5 and 6.7
- Use SCSI disks instead of IDE when you want to expand online disks by specifing a SCSI controller
- "For additional information please visit Ansible VMware community wiki - U(https://github.com/ansible/community/wiki/VMware)."
options:
state:
description:
- Specify the state the virtual machine should be in.
- 'If C(state) is set to C(present) and virtual machine exists, ensure the virtual machine
configurations conforms to task arguments.'
- 'If C(state) is set to C(absent) and virtual machine exists, then the specified virtual machine
is removed with its associated components.'
- 'If C(state) is set to one of the following C(poweredon), C(poweredoff), C(present), C(restarted), C(suspended)
and virtual machine does not exists, then virtual machine is deployed with given parameters.'
- 'If C(state) is set to C(poweredon) and virtual machine exists with powerstate other than powered on,
then the specified virtual machine is powered on.'
- 'If C(state) is set to C(poweredoff) and virtual machine exists with powerstate other than powered off,
then the specified virtual machine is powered off.'
- 'If C(state) is set to C(restarted) and virtual machine exists, then the virtual machine is restarted.'
- 'If C(state) is set to C(suspended) and virtual machine exists, then the virtual machine is set to suspended mode.'
- 'If C(state) is set to C(shutdownguest) and virtual machine exists, then the virtual machine is shutdown.'
- 'If C(state) is set to C(rebootguest) and virtual machine exists, then the virtual machine is rebooted.'
default: present
choices: [ present, absent, poweredon, poweredoff, restarted, suspended, shutdownguest, rebootguest ]
name:
description:
- Name of the virtual machine to work with.
- Virtual machine names in vCenter are not necessarily unique, which may be problematic, see C(name_match).
- 'If multiple virtual machines with same name exists, then C(folder) is required parameter to
identify uniqueness of the virtual machine.'
- This parameter is required, if C(state) is set to C(poweredon), C(poweredoff), C(present), C(restarted), C(suspended)
and virtual machine does not exists.
- This parameter is case sensitive.
required: yes
name_match:
description:
- If multiple virtual machines matching the name, use the first or last found.
default: 'first'
choices: [ first, last ]
uuid:
description:
- UUID of the virtual machine to manage if known, this is VMware's unique identifier.
- This is required if C(name) is not supplied.
- If virtual machine does not exists, then this parameter is ignored.
- Please note that a supplied UUID will be ignored on virtual machine creation, as VMware creates the UUID internally.
use_instance_uuid:
description:
- Whether to use the VMWare instance UUID rather than the BIOS UUID.
default: no
type: bool
version_added: '2.8'
template:
description:
- Template or existing virtual machine used to create new virtual machine.
- If this value is not set, virtual machine is created without using a template.
- If the virtual machine already exists, this parameter will be ignored.
- This parameter is case sensitive.
- You can also specify template or VM UUID for identifying source. version_added 2.8. Use C(hw_product_uuid) from M(vmware_guest_facts) as UUID value.
- From version 2.8 onwards, absolute path to virtual machine or template can be used.
aliases: [ 'template_src' ]
is_template:
description:
- Flag the instance as a template.
- This will mark the given virtual machine as template.
default: 'no'
type: bool
version_added: '2.3'
folder:
description:
- Destination folder, absolute path to find an existing guest or create the new guest.
- The folder should include the datacenter. ESX's datacenter is ha-datacenter.
- This parameter is case sensitive.
- This parameter is required, while deploying new virtual machine. version_added 2.5.
- 'If multiple machines are found with same name, this parameter is used to identify
uniqueness of the virtual machine. version_added 2.5'
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
hardware:
description:
- Manage virtual machine's hardware attributes.
- All parameters case sensitive.
- 'Valid attributes are:'
- ' - C(hotadd_cpu) (boolean): Allow virtual CPUs to be added while the virtual machine is running.'
- ' - C(hotremove_cpu) (boolean): Allow virtual CPUs to be removed while the virtual machine is running.
version_added: 2.5'
- ' - C(hotadd_memory) (boolean): Allow memory to be added while the virtual machine is running.'
- ' - C(memory_mb) (integer): Amount of memory in MB.'
- ' - C(nested_virt) (bool): Enable nested virtualization. version_added: 2.5'
- ' - C(num_cpus) (integer): Number of CPUs.'
- ' - C(num_cpu_cores_per_socket) (integer): Number of Cores Per Socket. Value should be multiple of C(num_cpus).'
- ' - C(scsi) (string): Valid values are C(buslogic), C(lsilogic), C(lsilogicsas) and C(paravirtual) (default).'
- ' - C(memory_reservation) (integer): Amount of memory in MB to set resource limits for memory. version_added: 2.5'
- " - C(memory_reservation_lock) (boolean): If set true, memory resource reservation for the virtual machine
will always be equal to the virtual machine's memory size. version_added: 2.5"
- ' - C(max_connections) (integer): Maximum number of active remote display connections for the virtual machines.
version_added: 2.5.'
- ' - C(mem_limit) (integer): The memory utilization of a virtual machine will not exceed this limit. Unit is MB.
version_added: 2.5'
- ' - C(mem_reservation) (integer): The amount of memory resource that is guaranteed available to the virtual
machine. Unit is MB. version_added: 2.5'
- ' - C(cpu_limit) (integer): The CPU utilization of a virtual machine will not exceed this limit. Unit is MHz.
version_added: 2.5'
- ' - C(cpu_reservation) (integer): The amount of CPU resource that is guaranteed available to the virtual machine.
Unit is MHz. version_added: 2.5'
- ' - C(version) (integer): The Virtual machine hardware versions. Default is 10 (ESXi 5.5 and onwards).
Please check VMware documentation for correct virtual machine hardware version.
Incorrect hardware version may lead to failure in deployment. If hardware version is already equal to the given
version then no action is taken. version_added: 2.6'
- ' - C(boot_firmware) (string): Choose which firmware should be used to boot the virtual machine.
Allowed values are "bios" and "efi". version_added: 2.7'
- ' - C(virt_based_security) (bool): Enable Virtualization Based Security feature for Windows 10.
(Support from Virtual machine hardware version 14, Guest OS Windows 10 64 bit, Windows Server 2016)'
guest_id:
description:
- Set the guest ID.
- This parameter is case sensitive.
- 'Examples:'
- " virtual machine with RHEL7 64 bit, will be 'rhel7_64Guest'"
- " virtual machine with CensOS 64 bit, will be 'centos64Guest'"
- " virtual machine with Ubuntu 64 bit, will be 'ubuntu64Guest'"
- This field is required when creating a virtual machine.
- >
Valid values are referenced here:
U(http://pubs.vmware.com/vsphere-6-5/topic/com.vmware.wssdk.apiref.doc/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html)
version_added: '2.3'
disk:
description:
- A list of disks to add.
- This parameter is case sensitive.
- Shrinking disks is not supported.
- Removing existing disks of the virtual machine is not supported.
- 'Valid attributes are:'
- ' - C(size_[tb,gb,mb,kb]) (integer): Disk storage size in specified unit.'
- ' - C(type) (string): Valid values are:'
- ' - C(thin) thin disk'
- ' - C(eagerzeroedthick) eagerzeroedthick disk, added in version 2.5'
- ' Default: C(None) thick disk, no eagerzero.'
- ' - C(datastore) (string): The name of datastore which will be used for the disk. If C(autoselect_datastore) is set to True,
then will select the less used datastore whose name contains this "disk.datastore" string.'
- ' - C(filename) (string): Existing disk image to be used. Filename must be already exists on the datastore.'
- ' Specify filename string in C([datastore_name] path/to/file.vmdk) format. Added in version 2.8.'
- ' - C(autoselect_datastore) (bool): select the less used datastore. "disk.datastore" and "disk.autoselect_datastore"
will not be used if C(datastore) is specified outside this C(disk) configuration.'
- ' - C(disk_mode) (string): Type of disk mode. Added in version 2.6'
- ' - Available options are :'
- ' - C(persistent): Changes are immediately and permanently written to the virtual disk. This is default.'
- ' - C(independent_persistent): Same as persistent, but not affected by snapshots.'
- ' - C(independent_nonpersistent): Changes to virtual disk are made to a redo log and discarded at power off, but not affected by snapshots.'
cdrom:
description:
- A CD-ROM configuration for the virtual machine.
- 'Valid attributes are:'
- ' - C(type) (string): The type of CD-ROM, valid options are C(none), C(client) or C(iso). With C(none) the CD-ROM will be disconnected but present.'
- ' - C(iso_path) (string): The datastore path to the ISO file to use, in the form of C([datastore1] path/to/file.iso). Required if type is set C(iso).'
version_added: '2.5'
resource_pool:
description:
- Use the given resource pool for virtual machine operation.
- This parameter is case sensitive.
- Resource pool should be child of the selected host parent.
version_added: '2.3'
wait_for_ip_address:
description:
- Wait until vCenter detects an IP address for the virtual machine.
- This requires vmware-tools (vmtoolsd) to properly work after creation.
- "vmware-tools needs to be installed on the given virtual machine in order to work with this parameter."
default: 'no'
type: bool
wait_for_customization:
description:
- Wait until vCenter detects all guest customizations as successfully completed.
- When enabled, the VM will automatically be powered on.
default: 'no'
type: bool
version_added: '2.8'
state_change_timeout:
description:
- If the C(state) is set to C(shutdownguest), by default the module will return immediately after sending the shutdown signal.
- If this argument is set to a positive integer, the module will instead wait for the virtual machine to reach the poweredoff state.
- The value sets a timeout in seconds for the module to wait for the state change.
default: 0
version_added: '2.6'
snapshot_src:
description:
- Name of the existing snapshot to use to create a clone of a virtual machine.
- This parameter is case sensitive.
- While creating linked clone using C(linked_clone) parameter, this parameter is required.
version_added: '2.4'
linked_clone:
description:
- Whether to create a linked clone from the snapshot specified.
- If specified, then C(snapshot_src) is required parameter.
default: 'no'
type: bool
version_added: '2.4'
force:
description:
- Ignore warnings and complete the actions.
- This parameter is useful while removing virtual machine which is powered on state.
- 'This module reflects the VMware vCenter API and UI workflow, as such, in some cases the `force` flag will
be mandatory to perform the action to ensure you are certain the action has to be taken, no matter what the consequence.
This is specifically the case for removing a powered on the virtual machine when C(state) is set to C(absent).'
default: 'no'
type: bool
datacenter:
description:
- Destination datacenter for the deploy operation.
- This parameter is case sensitive.
default: ha-datacenter
cluster:
description:
- The cluster name where the virtual machine will run.
- This is a required parameter, if C(esxi_hostname) is not set.
- C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
- This parameter is case sensitive.
version_added: '2.3'
esxi_hostname:
description:
- The ESXi hostname where the virtual machine will run.
- This is a required parameter, if C(cluster) is not set.
- C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
- This parameter is case sensitive.
annotation:
description:
- A note or annotation to include in the virtual machine.
version_added: '2.3'
customvalues:
description:
- Define a list of custom values to set on virtual machine.
- A custom value object takes two fields C(key) and C(value).
- Incorrect key and values will be ignored.
version_added: '2.3'
networks:
description:
- A list of networks (in the order of the NICs).
- Removing NICs is not allowed, while reconfiguring the virtual machine.
- All parameters and VMware object names are case sensetive.
- 'One of the below parameters is required per entry:'
- ' - C(name) (string): Name of the portgroup or distributed virtual portgroup for this interface.
When specifying distributed virtual portgroup make sure given C(esxi_hostname) or C(cluster) is associated with it.'
- ' - C(vlan) (integer): VLAN number for this interface.'
- 'Optional parameters per entry (used for virtual hardware):'
- ' - C(device_type) (string): Virtual network device (one of C(e1000), C(e1000e), C(pcnet32), C(vmxnet2), C(vmxnet3) (default), C(sriov)).'
- ' - C(mac) (string): Customize MAC address.'
- ' - C(dvswitch_name) (string): Name of the distributed vSwitch.
This value is required if multiple distributed portgroups exists with the same name. version_added 2.7'
- ' - C(start_connected) (bool): Indicates that virtual network adapter starts with associated virtual machine powers on. version_added: 2.5'
- 'Optional parameters per entry (used for OS customization):'
- ' - C(type) (string): Type of IP assignment (either C(dhcp) or C(static)). C(dhcp) is default.'
- ' - C(ip) (string): Static IP address (implies C(type: static)).'
- ' - C(netmask) (string): Static netmask required for C(ip).'
- ' - C(gateway) (string): Static gateway.'
- ' - C(dns_servers) (string): DNS servers for this network interface (Windows).'
- ' - C(domain) (string): Domain name for this network interface (Windows).'
- ' - C(wake_on_lan) (bool): Indicates if wake-on-LAN is enabled on this virtual network adapter. version_added: 2.5'
- ' - C(allow_guest_control) (bool): Enables guest control over whether the connectable device is connected. version_added: 2.5'
version_added: '2.3'
customization:
description:
- Parameters for OS customization when cloning from the template or the virtual machine, or apply to the existing virtual machine directly.
- Not all operating systems are supported for customization with respective vCenter version,
please check VMware documentation for respective OS customization.
- For supported customization operating system matrix, (see U(http://partnerweb.vmware.com/programs/guestOS/guest-os-customization-matrix.pdf))
- All parameters and VMware object names are case sensitive.
- Linux based OSes requires Perl package to be installed for OS customizations.
- 'Common parameters (Linux/Windows):'
- ' - C(existing_vm) (bool): If set to C(True), do OS customization on the specified virtual machine directly.
If set to C(False) or not specified, do OS customization when cloning from the template or the virtual machine. version_added: 2.8'
- ' - C(dns_servers) (list): List of DNS servers to configure.'
- ' - C(dns_suffix) (list): List of domain suffixes, also known as DNS search path (default: C(domain) parameter).'
- ' - C(domain) (string): DNS domain name to use.'
- ' - C(hostname) (string): Computer hostname (default: shorted C(name) parameter). Allowed characters are alphanumeric (uppercase and lowercase)
and minus, rest of the characters are dropped as per RFC 952.'
- 'Parameters related to Windows customization:'
- ' - C(autologon) (bool): Auto logon after virtual machine customization (default: False).'
- ' - C(autologoncount) (int): Number of autologon after reboot (default: 1).'
- ' - C(domainadmin) (string): User used to join in AD domain (mandatory with C(joindomain)).'
- ' - C(domainadminpassword) (string): Password used to join in AD domain (mandatory with C(joindomain)).'
- ' - C(fullname) (string): Server owner name (default: Administrator).'
- ' - C(joindomain) (string): AD domain to join (Not compatible with C(joinworkgroup)).'
- ' - C(joinworkgroup) (string): Workgroup to join (Not compatible with C(joindomain), default: WORKGROUP).'
- ' - C(orgname) (string): Organisation name (default: ACME).'
- ' - C(password) (string): Local administrator password.'
- ' - C(productid) (string): Product ID.'
- ' - C(runonce) (list): List of commands to run at first user logon.'
- ' - C(timezone) (int): Timezone (See U(https://msdn.microsoft.com/en-us/library/ms912391.aspx)).'
version_added: '2.3'
vapp_properties:
description:
- A list of vApp properties
- 'For full list of attributes and types refer to: U(https://github.com/vmware/pyvmomi/blob/master/docs/vim/vApp/PropertyInfo.rst)'
- 'Basic attributes are:'
- ' - C(id) (string): Property id - required.'
- ' - C(value) (string): Property value.'
- ' - C(type) (string): Value type, string type by default.'
- ' - C(operation): C(remove): This attribute is required only when removing properties.'
version_added: '2.6'
customization_spec:
description:
- Unique name identifying the requested customization specification.
- This parameter is case sensitive.
- If set, then overrides C(customization) parameter values.
version_added: '2.6'
datastore:
description:
- Specify datastore or datastore cluster to provision virtual machine.
- 'This parameter takes precedence over "disk.datastore" parameter.'
- 'This parameter can be used to override datastore or datastore cluster setting of the virtual machine when deployed
from the template.'
- Please see example for more usage.
version_added: '2.7'
convert:
description:
- Specify convert disk type while cloning template or virtual machine.
choices: [ thin, thick, eagerzeroedthick ]
version_added: '2.8'
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Create a virtual machine on given ESXi hostname
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
folder: /DC1/vm/
name: test_vm_0001
state: poweredon
guest_id: centos64Guest
# This is hostname of particular ESXi server on which user wants VM to be deployed
esxi_hostname: "{{ esxi_hostname }}"
disk:
- size_gb: 10
type: thin
datastore: datastore1
hardware:
memory_mb: 512
num_cpus: 4
scsi: paravirtual
networks:
- name: VM Network
mac: aa:bb:dd:aa:00:14
ip: 10.10.10.100
netmask: 255.255.255.0
device_type: vmxnet3
wait_for_ip_address: yes
delegate_to: localhost
register: deploy_vm
- name: Create a virtual machine from a template
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
folder: /testvms
name: testvm_2
state: poweredon
template: template_el7
disk:
- size_gb: 10
type: thin
datastore: g73_datastore
hardware:
memory_mb: 512
num_cpus: 6
num_cpu_cores_per_socket: 3
scsi: paravirtual
memory_reservation: 512
memory_reservation_lock: True
mem_limit: 8096
mem_reservation: 4096
cpu_limit: 8096
cpu_reservation: 4096
max_connections: 5
hotadd_cpu: True
hotremove_cpu: True
hotadd_memory: False
version: 12 # Hardware version of virtual machine
boot_firmware: "efi"
cdrom:
type: iso
iso_path: "[datastore1] livecd.iso"
networks:
- name: VM Network
mac: aa:bb:dd:aa:00:14
wait_for_ip_address: yes
delegate_to: localhost
register: deploy
- name: Clone a virtual machine from Windows template and customize
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
datacenter: datacenter1
cluster: cluster
name: testvm-2
template: template_windows
networks:
- name: VM Network
ip: 192.168.1.100
netmask: 255.255.255.0
gateway: 192.168.1.1
mac: aa:bb:dd:aa:00:14
domain: my_domain
dns_servers:
- 192.168.1.1
- 192.168.1.2
- vlan: 1234
type: dhcp
customization:
autologon: yes
dns_servers:
- 192.168.1.1
- 192.168.1.2
domain: my_domain
password: new_vm_password
runonce:
- powershell.exe -ExecutionPolicy Unrestricted -File C:\Windows\Temp\ConfigureRemotingForAnsible.ps1 -ForceNewSSLCert -EnableCredSSP
delegate_to: localhost
- name: Clone a virtual machine from Linux template and customize
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
datacenter: "{{ datacenter }}"
state: present
folder: /DC1/vm
template: "{{ template }}"
name: "{{ vm_name }}"
cluster: DC1_C1
networks:
- name: VM Network
ip: 192.168.10.11
netmask: 255.255.255.0
wait_for_ip_address: True
customization:
domain: "{{ guest_domain }}"
dns_servers:
- 8.9.9.9
- 7.8.8.9
dns_suffix:
- example.com
- example2.com
delegate_to: localhost
- name: Rename a virtual machine (requires the virtual machine's uuid)
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
uuid: "{{ vm_uuid }}"
name: new_name
state: present
delegate_to: localhost
- name: Remove a virtual machine by uuid
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
uuid: "{{ vm_uuid }}"
state: absent
delegate_to: localhost
- name: Manipulate vApp properties
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
name: vm_name
state: present
vapp_properties:
- id: remoteIP
category: Backup
label: Backup server IP
type: str
value: 10.10.10.1
- id: old_property
operation: remove
delegate_to: localhost
- name: Set powerstate of a virtual machine to poweroff by using UUID
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
uuid: "{{ vm_uuid }}"
state: poweredoff
delegate_to: localhost
- name: Deploy a virtual machine in a datastore different from the datastore of the template
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
name: "{{ vm_name }}"
state: present
template: "{{ template_name }}"
# Here datastore can be different which holds template
datastore: "{{ virtual_machine_datastore }}"
hardware:
memory_mb: 512
num_cpus: 2
scsi: paravirtual
delegate_to: localhost
'''
RETURN = r'''
instance:
description: metadata about the new virtual machine
returned: always
type: dict
sample: None
'''
import re
import time
import string
HAS_PYVMOMI = False
try:
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
pass
from random import randint
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.vmware import (find_obj, gather_vm_facts, get_all_objs,
compile_folder_path_for_object, serialize_spec,
vmware_argument_spec, set_vm_power_state, PyVmomi,
find_dvs_by_name, find_dvspg_by_name, wait_for_vm_ip,
wait_for_task, TaskError)
class PyVmomiDeviceHelper(object):
""" This class is a helper to create easily VMWare Objects for PyVmomiHelper """
def __init__(self, module):
self.module = module
self.next_disk_unit_number = 0
self.scsi_device_type = {
'lsilogic': vim.vm.device.VirtualLsiLogicController,
'paravirtual': vim.vm.device.ParaVirtualSCSIController,
'buslogic': vim.vm.device.VirtualBusLogicController,
'lsilogicsas': vim.vm.device.VirtualLsiLogicSASController,
}
def create_scsi_controller(self, scsi_type):
scsi_ctl = vim.vm.device.VirtualDeviceSpec()
scsi_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
scsi_device = self.scsi_device_type.get(scsi_type, vim.vm.device.ParaVirtualSCSIController)
scsi_ctl.device = scsi_device()
scsi_ctl.device.busNumber = 0
# While creating a new SCSI controller, temporary key value
# should be unique negative integers
scsi_ctl.device.key = -randint(1000, 9999)
scsi_ctl.device.hotAddRemove = True
scsi_ctl.device.sharedBus = 'noSharing'
scsi_ctl.device.scsiCtlrUnitNumber = 7
return scsi_ctl
def is_scsi_controller(self, device):
return isinstance(device, tuple(self.scsi_device_type.values()))
@staticmethod
def create_ide_controller():
ide_ctl = vim.vm.device.VirtualDeviceSpec()
ide_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
ide_ctl.device = vim.vm.device.VirtualIDEController()
ide_ctl.device.deviceInfo = vim.Description()
# While creating a new IDE controller, temporary key value
# should be unique negative integers
ide_ctl.device.key = -randint(200, 299)
ide_ctl.device.busNumber = 0
return ide_ctl
@staticmethod
def create_cdrom(ide_ctl, cdrom_type, iso_path=None):
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
cdrom_spec.device = vim.vm.device.VirtualCdrom()
cdrom_spec.device.controllerKey = ide_ctl.device.key
cdrom_spec.device.key = -1
cdrom_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
cdrom_spec.device.connectable.allowGuestControl = True
cdrom_spec.device.connectable.startConnected = (cdrom_type != "none")
if cdrom_type in ["none", "client"]:
cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
elif cdrom_type == "iso":
cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path)
return cdrom_spec
@staticmethod
def is_equal_cdrom(vm_obj, cdrom_device, cdrom_type, iso_path):
if cdrom_type == "none":
return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and
cdrom_device.connectable.allowGuestControl and
not cdrom_device.connectable.startConnected and
(vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or not cdrom_device.connectable.connected))
elif cdrom_type == "client":
return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and
cdrom_device.connectable.allowGuestControl and
cdrom_device.connectable.startConnected and
(vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected))
elif cdrom_type == "iso":
return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.IsoBackingInfo) and
cdrom_device.backing.fileName == iso_path and
cdrom_device.connectable.allowGuestControl and
cdrom_device.connectable.startConnected and
(vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected))
def create_scsi_disk(self, scsi_ctl, disk_index=None):
diskspec = vim.vm.device.VirtualDeviceSpec()
diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
diskspec.device = vim.vm.device.VirtualDisk()
diskspec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
diskspec.device.controllerKey = scsi_ctl.device.key
if self.next_disk_unit_number == 7:
raise AssertionError()
if disk_index == 7:
raise AssertionError()
"""
Configure disk unit number.
"""
if disk_index is not None:
diskspec.device.unitNumber = disk_index
self.next_disk_unit_number = disk_index + 1
else:
diskspec.device.unitNumber = self.next_disk_unit_number
self.next_disk_unit_number += 1
# unit number 7 is reserved to SCSI controller, increase next index
if self.next_disk_unit_number == 7:
self.next_disk_unit_number += 1
return diskspec
def get_device(self, device_type, name):
nic_dict = dict(pcnet32=vim.vm.device.VirtualPCNet32(),
vmxnet2=vim.vm.device.VirtualVmxnet2(),
vmxnet3=vim.vm.device.VirtualVmxnet3(),
e1000=vim.vm.device.VirtualE1000(),
e1000e=vim.vm.device.VirtualE1000e(),
sriov=vim.vm.device.VirtualSriovEthernetCard(),
)
if device_type in nic_dict:
return nic_dict[device_type]
else:
self.module.fail_json(msg='Invalid device_type "%s"'
' for network "%s"' % (device_type, name))
def create_nic(self, device_type, device_label, device_infos):
nic = vim.vm.device.VirtualDeviceSpec()
nic.device = self.get_device(device_type, device_infos['name'])
nic.device.wakeOnLanEnabled = bool(device_infos.get('wake_on_lan', True))
nic.device.deviceInfo = vim.Description()
nic.device.deviceInfo.label = device_label
nic.device.deviceInfo.summary = device_infos['name']
nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nic.device.connectable.startConnected = bool(device_infos.get('start_connected', True))
nic.device.connectable.allowGuestControl = bool(device_infos.get('allow_guest_control', True))
nic.device.connectable.connected = True
if 'mac' in device_infos and self.is_valid_mac_addr(device_infos['mac']):
nic.device.addressType = 'manual'
nic.device.macAddress = device_infos['mac']
else:
nic.device.addressType = 'generated'
return nic
@staticmethod
def is_valid_mac_addr(mac_addr):
"""
Function to validate MAC address for given string
Args:
mac_addr: string to validate as MAC address
Returns: (Boolean) True if string is valid MAC address, otherwise False
"""
mac_addr_regex = re.compile('[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$')
return bool(mac_addr_regex.match(mac_addr))
def integer_value(self, input_value, name):
"""
Function to return int value for given input, else return error
Args:
input_value: Input value to retrive int value from
name: Name of the Input value (used to build error message)
Returns: (int) if integer value can be obtained, otherwise will send a error message.
"""
if isinstance(input_value, int):
return input_value
elif isinstance(input_value, str) and input_value.isdigit():
return int(input_value)
else:
self.module.fail_json(msg='"%s" attribute should be an'
' integer value.' % name)
class PyVmomiCache(object):
""" This class caches references to objects which are requested multiples times but not modified """
def __init__(self, content, dc_name=None):
self.content = content
self.dc_name = dc_name
self.networks = {}
self.clusters = {}
self.esx_hosts = {}
self.parent_datacenters = {}
def find_obj(self, content, types, name, confine_to_datacenter=True):
""" Wrapper around find_obj to set datacenter context """
result = find_obj(content, types, name)
if result and confine_to_datacenter:
if to_text(self.get_parent_datacenter(result).name) != to_text(self.dc_name):
result = None
objects = self.get_all_objs(content, types, confine_to_datacenter=True)
for obj in objects:
if name is None or to_text(obj.name) == to_text(name):
return obj
return result
def get_all_objs(self, content, types, confine_to_datacenter=True):
""" Wrapper around get_all_objs to set datacenter context """
objects = get_all_objs(content, types)
if confine_to_datacenter:
if hasattr(objects, 'items'):
# resource pools come back as a dictionary
# make a copy
tmpobjs = objects.copy()
for k, v in objects.items():
parent_dc = self.get_parent_datacenter(k)
if parent_dc.name != self.dc_name:
tmpobjs.pop(k, None)
objects = tmpobjs
else:
# everything else should be a list
objects = [x for x in objects if self.get_parent_datacenter(x).name == self.dc_name]
return objects
def get_network(self, network):
if network not in self.networks:
self.networks[network] = self.find_obj(self.content, [vim.Network], network)
return self.networks[network]
def get_cluster(self, cluster):
if cluster not in self.clusters:
self.clusters[cluster] = self.find_obj(self.content, [vim.ClusterComputeResource], cluster)
return self.clusters[cluster]
def get_esx_host(self, host):
if host not in self.esx_hosts:
self.esx_hosts[host] = self.find_obj(self.content, [vim.HostSystem], host)
return self.esx_hosts[host]
def get_parent_datacenter(self, obj):
""" Walk the parent tree to find the objects datacenter """
if isinstance(obj, vim.Datacenter):
return obj
if obj in self.parent_datacenters:
return self.parent_datacenters[obj]
datacenter = None
while True:
if not hasattr(obj, 'parent'):
break
obj = obj.parent
if isinstance(obj, vim.Datacenter):
datacenter = obj
break
self.parent_datacenters[obj] = datacenter
return datacenter
class PyVmomiHelper(PyVmomi):
def __init__(self, module):
super(PyVmomiHelper, self).__init__(module)
self.device_helper = PyVmomiDeviceHelper(self.module)
self.configspec = None
self.change_detected = False # a change was detected and needs to be applied through reconfiguration
self.change_applied = False # a change was applied meaning at least one task succeeded
self.customspec = None
self.cache = PyVmomiCache(self.content, dc_name=self.params['datacenter'])
def gather_facts(self, vm):
return gather_vm_facts(self.content, vm)
def remove_vm(self, vm):
# https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.ManagedEntity.html#destroy
if vm.summary.runtime.powerState.lower() == 'poweredon':
self.module.fail_json(msg="Virtual machine %s found in 'powered on' state, "
"please use 'force' parameter to remove or poweroff VM "
"and try removing VM again." % vm.name)
task = vm.Destroy()
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'destroy'}
else:
return {'changed': self.change_applied, 'failed': False}
def configure_guestid(self, vm_obj, vm_creation=False):
# guest_id is not required when using templates
if self.params['template'] and not self.params['guest_id']:
return
# guest_id is only mandatory on VM creation
if vm_creation and self.params['guest_id'] is None:
self.module.fail_json(msg="guest_id attribute is mandatory for VM creation")
if self.params['guest_id'] and \
(vm_obj is None or self.params['guest_id'].lower() != vm_obj.summary.config.guestId.lower()):
self.change_detected = True
self.configspec.guestId = self.params['guest_id']
def configure_resource_alloc_info(self, vm_obj):
"""
Function to configure resource allocation information about virtual machine
:param vm_obj: VM object in case of reconfigure, None in case of deploy
:return: None
"""
rai_change_detected = False
memory_allocation = vim.ResourceAllocationInfo()
cpu_allocation = vim.ResourceAllocationInfo()
if 'hardware' in self.params:
if 'mem_limit' in self.params['hardware']:
mem_limit = None
try:
mem_limit = int(self.params['hardware'].get('mem_limit'))
except ValueError:
self.module.fail_json(msg="hardware.mem_limit attribute should be an integer value.")
memory_allocation.limit = mem_limit
if vm_obj is None or memory_allocation.limit != vm_obj.config.memoryAllocation.limit:
rai_change_detected = True
if 'mem_reservation' in self.params['hardware']:
mem_reservation = None
try:
mem_reservation = int(self.params['hardware'].get('mem_reservation'))
except ValueError:
self.module.fail_json(msg="hardware.mem_reservation should be an integer value.")
memory_allocation.reservation = mem_reservation
if vm_obj is None or \
memory_allocation.reservation != vm_obj.config.memoryAllocation.reservation:
rai_change_detected = True
if 'cpu_limit' in self.params['hardware']:
cpu_limit = None
try:
cpu_limit = int(self.params['hardware'].get('cpu_limit'))
except ValueError:
self.module.fail_json(msg="hardware.cpu_limit attribute should be an integer value.")
cpu_allocation.limit = cpu_limit
if vm_obj is None or cpu_allocation.limit != vm_obj.config.cpuAllocation.limit:
rai_change_detected = True
if 'cpu_reservation' in self.params['hardware']:
cpu_reservation = None
try:
cpu_reservation = int(self.params['hardware'].get('cpu_reservation'))
except ValueError:
self.module.fail_json(msg="hardware.cpu_reservation should be an integer value.")
cpu_allocation.reservation = cpu_reservation
if vm_obj is None or \
cpu_allocation.reservation != vm_obj.config.cpuAllocation.reservation:
rai_change_detected = True
if rai_change_detected:
self.configspec.memoryAllocation = memory_allocation
self.configspec.cpuAllocation = cpu_allocation
self.change_detected = True
def configure_cpu_and_memory(self, vm_obj, vm_creation=False):
# set cpu/memory/etc
if 'hardware' in self.params:
if 'num_cpus' in self.params['hardware']:
try:
num_cpus = int(self.params['hardware']['num_cpus'])
except ValueError:
self.module.fail_json(msg="hardware.num_cpus attribute should be an integer value.")
# check VM power state and cpu hot-add/hot-remove state before re-config VM
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
if not vm_obj.config.cpuHotRemoveEnabled and num_cpus < vm_obj.config.hardware.numCPU:
self.module.fail_json(msg="Configured cpu number is less than the cpu number of the VM, "
"cpuHotRemove is not enabled")
if not vm_obj.config.cpuHotAddEnabled and num_cpus > vm_obj.config.hardware.numCPU:
self.module.fail_json(msg="Configured cpu number is more than the cpu number of the VM, "
"cpuHotAdd is not enabled")
if 'num_cpu_cores_per_socket' in self.params['hardware']:
try:
num_cpu_cores_per_socket = int(self.params['hardware']['num_cpu_cores_per_socket'])
except ValueError:
self.module.fail_json(msg="hardware.num_cpu_cores_per_socket attribute "
"should be an integer value.")
if num_cpus % num_cpu_cores_per_socket != 0:
self.module.fail_json(msg="hardware.num_cpus attribute should be a multiple "
"of hardware.num_cpu_cores_per_socket")
self.configspec.numCoresPerSocket = num_cpu_cores_per_socket
if vm_obj is None or self.configspec.numCoresPerSocket != vm_obj.config.hardware.numCoresPerSocket:
self.change_detected = True
self.configspec.numCPUs = num_cpus
if vm_obj is None or self.configspec.numCPUs != vm_obj.config.hardware.numCPU:
self.change_detected = True
# num_cpu is mandatory for VM creation
elif vm_creation and not self.params['template']:
self.module.fail_json(msg="hardware.num_cpus attribute is mandatory for VM creation")
if 'memory_mb' in self.params['hardware']:
try:
memory_mb = int(self.params['hardware']['memory_mb'])
except ValueError:
self.module.fail_json(msg="Failed to parse hardware.memory_mb value."
" Please refer the documentation and provide"
" correct value.")
# check VM power state and memory hotadd state before re-config VM
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
if vm_obj.config.memoryHotAddEnabled and memory_mb < vm_obj.config.hardware.memoryMB:
self.module.fail_json(msg="Configured memory is less than memory size of the VM, "
"operation is not supported")
elif not vm_obj.config.memoryHotAddEnabled and memory_mb != vm_obj.config.hardware.memoryMB:
self.module.fail_json(msg="memoryHotAdd is not enabled")
self.configspec.memoryMB = memory_mb
if vm_obj is None or self.configspec.memoryMB != vm_obj.config.hardware.memoryMB:
self.change_detected = True
# memory_mb is mandatory for VM creation
elif vm_creation and not self.params['template']:
self.module.fail_json(msg="hardware.memory_mb attribute is mandatory for VM creation")
if 'hotadd_memory' in self.params['hardware']:
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
vm_obj.config.memoryHotAddEnabled != bool(self.params['hardware']['hotadd_memory']):
self.module.fail_json(msg="Configure hotadd memory operation is not supported when VM is power on")
self.configspec.memoryHotAddEnabled = bool(self.params['hardware']['hotadd_memory'])
if vm_obj is None or self.configspec.memoryHotAddEnabled != vm_obj.config.memoryHotAddEnabled:
self.change_detected = True
if 'hotadd_cpu' in self.params['hardware']:
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
vm_obj.config.cpuHotAddEnabled != bool(self.params['hardware']['hotadd_cpu']):
self.module.fail_json(msg="Configure hotadd cpu operation is not supported when VM is power on")
self.configspec.cpuHotAddEnabled = bool(self.params['hardware']['hotadd_cpu'])
if vm_obj is None or self.configspec.cpuHotAddEnabled != vm_obj.config.cpuHotAddEnabled:
self.change_detected = True
if 'hotremove_cpu' in self.params['hardware']:
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
vm_obj.config.cpuHotRemoveEnabled != bool(self.params['hardware']['hotremove_cpu']):
self.module.fail_json(msg="Configure hotremove cpu operation is not supported when VM is power on")
self.configspec.cpuHotRemoveEnabled = bool(self.params['hardware']['hotremove_cpu'])
if vm_obj is None or self.configspec.cpuHotRemoveEnabled != vm_obj.config.cpuHotRemoveEnabled:
self.change_detected = True
if 'memory_reservation' in self.params['hardware']:
memory_reservation_mb = 0
try:
memory_reservation_mb = int(self.params['hardware']['memory_reservation'])
except ValueError as e:
self.module.fail_json(msg="Failed to set memory_reservation value."
"Valid value for memory_reservation value in MB (integer): %s" % e)
mem_alloc = vim.ResourceAllocationInfo()
mem_alloc.reservation = memory_reservation_mb
self.configspec.memoryAllocation = mem_alloc
if vm_obj is None or self.configspec.memoryAllocation.reservation != vm_obj.config.memoryAllocation.reservation:
self.change_detected = True
if 'memory_reservation_lock' in self.params['hardware']:
self.configspec.memoryReservationLockedToMax = bool(self.params['hardware']['memory_reservation_lock'])
if vm_obj is None or self.configspec.memoryReservationLockedToMax != vm_obj.config.memoryReservationLockedToMax:
self.change_detected = True
if 'boot_firmware' in self.params['hardware']:
# boot firmware re-config can cause boot issue
if vm_obj is not None:
return
boot_firmware = self.params['hardware']['boot_firmware'].lower()
if boot_firmware not in ('bios', 'efi'):
self.module.fail_json(msg="hardware.boot_firmware value is invalid [%s]."
" Need one of ['bios', 'efi']." % boot_firmware)
self.configspec.firmware = boot_firmware
self.change_detected = True
def configure_cdrom(self, vm_obj):
# Configure the VM CD-ROM
if "cdrom" in self.params and self.params["cdrom"]:
if "type" not in self.params["cdrom"] or self.params["cdrom"]["type"] not in ["none", "client", "iso"]:
self.module.fail_json(msg="cdrom.type is mandatory")
if self.params["cdrom"]["type"] == "iso" and ("iso_path" not in self.params["cdrom"] or not self.params["cdrom"]["iso_path"]):
self.module.fail_json(msg="cdrom.iso_path is mandatory in case cdrom.type is iso")
if vm_obj and vm_obj.config.template:
# Changing CD-ROM settings on a template is not supported
return
cdrom_spec = None
cdrom_device = self.get_vm_cdrom_device(vm=vm_obj)
iso_path = self.params["cdrom"]["iso_path"] if "iso_path" in self.params["cdrom"] else None
if cdrom_device is None:
# Creating new CD-ROM
ide_device = self.get_vm_ide_device(vm=vm_obj)
if ide_device is None:
# Creating new IDE device
ide_device = self.device_helper.create_ide_controller()
self.change_detected = True
self.configspec.deviceChange.append(ide_device)
elif len(ide_device.device) > 3:
self.module.fail_json(msg="hardware.cdrom specified for a VM or template which already has 4 IDE devices of which none are a cdrom")
cdrom_spec = self.device_helper.create_cdrom(ide_ctl=ide_device, cdrom_type=self.params["cdrom"]["type"], iso_path=iso_path)
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
cdrom_spec.device.connectable.connected = (self.params["cdrom"]["type"] != "none")
elif not self.device_helper.is_equal_cdrom(vm_obj=vm_obj, cdrom_device=cdrom_device, cdrom_type=self.params["cdrom"]["type"], iso_path=iso_path):
# Updating an existing CD-ROM
if self.params["cdrom"]["type"] in ["client", "none"]:
cdrom_device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
elif self.params["cdrom"]["type"] == "iso":
cdrom_device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path)
cdrom_device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
cdrom_device.connectable.allowGuestControl = True
cdrom_device.connectable.startConnected = (self.params["cdrom"]["type"] != "none")
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
cdrom_device.connectable.connected = (self.params["cdrom"]["type"] != "none")
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
cdrom_spec.device = cdrom_device
if cdrom_spec:
self.change_detected = True
self.configspec.deviceChange.append(cdrom_spec)
def configure_hardware_params(self, vm_obj):
"""
Function to configure hardware related configuration of virtual machine
Args:
vm_obj: virtual machine object
"""
if 'hardware' in self.params:
if 'max_connections' in self.params['hardware']:
# maxMksConnections == max_connections
self.configspec.maxMksConnections = int(self.params['hardware']['max_connections'])
if vm_obj is None or self.configspec.maxMksConnections != vm_obj.config.hardware.maxMksConnections:
self.change_detected = True
if 'nested_virt' in self.params['hardware']:
self.configspec.nestedHVEnabled = bool(self.params['hardware']['nested_virt'])
if vm_obj is None or self.configspec.nestedHVEnabled != bool(vm_obj.config.nestedHVEnabled):
self.change_detected = True
if 'version' in self.params['hardware']:
hw_version_check_failed = False
temp_version = self.params['hardware'].get('version', 10)
try:
temp_version = int(temp_version)
except ValueError:
hw_version_check_failed = True
if temp_version not in range(3, 15):
hw_version_check_failed = True
if hw_version_check_failed:
self.module.fail_json(msg="Failed to set hardware.version '%s' value as valid"
" values range from 3 (ESX 2.x) to 14 (ESXi 6.5 and greater)." % temp_version)
# Hardware version is denoted as "vmx-10"
version = "vmx-%02d" % temp_version
self.configspec.version = version
if vm_obj is None or self.configspec.version != vm_obj.config.version:
self.change_detected = True
if vm_obj is not None:
# VM exists and we need to update the hardware version
current_version = vm_obj.config.version
# current_version = "vmx-10"
version_digit = int(current_version.split("-", 1)[-1])
if temp_version < version_digit:
self.module.fail_json(msg="Current hardware version '%d' which is greater than the specified"
" version '%d'. Downgrading hardware version is"
" not supported. Please specify version greater"
" than the current version." % (version_digit,
temp_version))
new_version = "vmx-%02d" % temp_version
try:
task = vm_obj.UpgradeVM_Task(new_version)
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'upgrade'}
except vim.fault.AlreadyUpgraded:
# Don't fail if VM is already upgraded.
pass
if 'virt_based_security' in self.params['hardware']:
host_version = self.select_host().summary.config.product.version
if int(host_version.split('.')[0]) < 6 or (int(host_version.split('.')[0]) == 6 and int(host_version.split('.')[1]) < 7):
self.module.fail_json(msg="ESXi version %s not support VBS." % host_version)
guest_ids = ['windows9_64Guest', 'windows9Server64Guest']
if vm_obj is None:
guestid = self.configspec.guestId
else:
guestid = vm_obj.summary.config.guestId
if guestid not in guest_ids:
self.module.fail_json(msg="Guest '%s' not support VBS." % guestid)
if (vm_obj is None and int(self.configspec.version.split('-')[1]) >= 14) or \
(vm_obj and int(vm_obj.config.version.split('-')[1]) >= 14 and (vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOff)):
self.configspec.flags = vim.vm.FlagInfo()
self.configspec.flags.vbsEnabled = bool(self.params['hardware']['virt_based_security'])
if bool(self.params['hardware']['virt_based_security']):
self.configspec.flags.vvtdEnabled = True
self.configspec.nestedHVEnabled = True
if (vm_obj is None and self.configspec.firmware == 'efi') or \
(vm_obj and vm_obj.config.firmware == 'efi'):
self.configspec.bootOptions = vim.vm.BootOptions()
self.configspec.bootOptions.efiSecureBootEnabled = True
else:
self.module.fail_json(msg="Not support VBS when firmware is BIOS.")
if vm_obj is None or self.configspec.flags.vbsEnabled != vm_obj.config.flags.vbsEnabled:
self.change_detected = True
def get_device_by_type(self, vm=None, type=None):
if vm is None or type is None:
return None
for device in vm.config.hardware.device:
if isinstance(device, type):
return device
return None
def get_vm_cdrom_device(self, vm=None):
return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualCdrom)
def get_vm_ide_device(self, vm=None):
return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualIDEController)
def get_vm_network_interfaces(self, vm=None):
device_list = []
if vm is None:
return device_list
nw_device_types = (vim.vm.device.VirtualPCNet32, vim.vm.device.VirtualVmxnet2,
vim.vm.device.VirtualVmxnet3, vim.vm.device.VirtualE1000,
vim.vm.device.VirtualE1000e, vim.vm.device.VirtualSriovEthernetCard)
for device in vm.config.hardware.device:
if isinstance(device, nw_device_types):
device_list.append(device)
return device_list
def sanitize_network_params(self):
"""
Sanitize user provided network provided params
Returns: A sanitized list of network params, else fails
"""
network_devices = list()
# Clean up user data here
for network in self.params['networks']:
if 'name' not in network and 'vlan' not in network:
self.module.fail_json(msg="Please specify at least a network name or"
" a VLAN name under VM network list.")
if 'name' in network and self.cache.get_network(network['name']) is None:
self.module.fail_json(msg="Network '%(name)s' does not exist." % network)
elif 'vlan' in network:
dvps = self.cache.get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup])
for dvp in dvps:
if hasattr(dvp.config.defaultPortConfig, 'vlan') and \
isinstance(dvp.config.defaultPortConfig.vlan.vlanId, int) and \
str(dvp.config.defaultPortConfig.vlan.vlanId) == str(network['vlan']):
network['name'] = dvp.config.name
break
if 'dvswitch_name' in network and \
dvp.config.distributedVirtualSwitch.name == network['dvswitch_name'] and \
dvp.config.name == network['vlan']:
network['name'] = dvp.config.name
break
if dvp.config.name == network['vlan']:
network['name'] = dvp.config.name
break
else:
self.module.fail_json(msg="VLAN '%(vlan)s' does not exist." % network)
if 'type' in network:
if network['type'] not in ['dhcp', 'static']:
self.module.fail_json(msg="Network type '%(type)s' is not a valid parameter."
" Valid parameters are ['dhcp', 'static']." % network)
if network['type'] != 'static' and ('ip' in network or 'netmask' in network):
self.module.fail_json(msg='Static IP information provided for network "%(name)s",'
' but "type" is set to "%(type)s".' % network)
else:
# Type is optional parameter, if user provided IP or Subnet assume
# network type as 'static'
if 'ip' in network or 'netmask' in network:
network['type'] = 'static'
else:
# User wants network type as 'dhcp'
network['type'] = 'dhcp'
if network.get('type') == 'static':
if 'ip' in network and 'netmask' not in network:
self.module.fail_json(msg="'netmask' is required if 'ip' is"
" specified under VM network list.")
if 'ip' not in network and 'netmask' in network:
self.module.fail_json(msg="'ip' is required if 'netmask' is"
" specified under VM network list.")
validate_device_types = ['pcnet32', 'vmxnet2', 'vmxnet3', 'e1000', 'e1000e', 'sriov']
if 'device_type' in network and network['device_type'] not in validate_device_types:
self.module.fail_json(msg="Device type specified '%s' is not valid."
" Please specify correct device"
" type from ['%s']." % (network['device_type'],
"', '".join(validate_device_types)))
if 'mac' in network and not PyVmomiDeviceHelper.is_valid_mac_addr(network['mac']):
self.module.fail_json(msg="Device MAC address '%s' is invalid."
" Please provide correct MAC address." % network['mac'])
network_devices.append(network)
return network_devices
def configure_network(self, vm_obj):
# Ignore empty networks, this permits to keep networks when deploying a template/cloning a VM
if len(self.params['networks']) == 0:
return
network_devices = self.sanitize_network_params()
# List current device for Clone or Idempotency
current_net_devices = self.get_vm_network_interfaces(vm=vm_obj)
if len(network_devices) < len(current_net_devices):
self.module.fail_json(msg="Given network device list is lesser than current VM device list (%d < %d). "
"Removing interfaces is not allowed"
% (len(network_devices), len(current_net_devices)))
for key in range(0, len(network_devices)):
nic_change_detected = False
network_name = network_devices[key]['name']
if key < len(current_net_devices) and (vm_obj or self.params['template']):
# We are editing existing network devices, this is either when
# are cloning from VM or Template
nic = vim.vm.device.VirtualDeviceSpec()
nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
nic.device = current_net_devices[key]
if ('wake_on_lan' in network_devices[key] and
nic.device.wakeOnLanEnabled != network_devices[key].get('wake_on_lan')):
nic.device.wakeOnLanEnabled = network_devices[key].get('wake_on_lan')
nic_change_detected = True
if ('start_connected' in network_devices[key] and
nic.device.connectable.startConnected != network_devices[key].get('start_connected')):
nic.device.connectable.startConnected = network_devices[key].get('start_connected')
nic_change_detected = True
if ('allow_guest_control' in network_devices[key] and
nic.device.connectable.allowGuestControl != network_devices[key].get('allow_guest_control')):
nic.device.connectable.allowGuestControl = network_devices[key].get('allow_guest_control')
nic_change_detected = True
if nic.device.deviceInfo.summary != network_name:
nic.device.deviceInfo.summary = network_name
nic_change_detected = True
if 'device_type' in network_devices[key]:
device = self.device_helper.get_device(network_devices[key]['device_type'], network_name)
device_class = type(device)
if not isinstance(nic.device, device_class):
self.module.fail_json(msg="Changing the device type is not possible when interface is already present. "
"The failing device type is %s" % network_devices[key]['device_type'])
# Changing mac address has no effect when editing interface
if 'mac' in network_devices[key] and nic.device.macAddress != current_net_devices[key].macAddress:
self.module.fail_json(msg="Changing MAC address has not effect when interface is already present. "
"The failing new MAC address is %s" % nic.device.macAddress)
else:
# Default device type is vmxnet3, VMWare best practice
device_type = network_devices[key].get('device_type', 'vmxnet3')
nic = self.device_helper.create_nic(device_type,
'Network Adapter %s' % (key + 1),
network_devices[key])
nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nic_change_detected = True
if hasattr(self.cache.get_network(network_name), 'portKeys'):
# VDS switch
pg_obj = None
if 'dvswitch_name' in network_devices[key]:
dvs_name = network_devices[key]['dvswitch_name']
dvs_obj = find_dvs_by_name(self.content, dvs_name)
if dvs_obj is None:
self.module.fail_json(msg="Unable to find distributed virtual switch %s" % dvs_name)
pg_obj = find_dvspg_by_name(dvs_obj, network_name)
if pg_obj is None:
self.module.fail_json(msg="Unable to find distributed port group %s" % network_name)
else:
pg_obj = self.cache.find_obj(self.content, [vim.dvs.DistributedVirtualPortgroup], network_name)
if (nic.device.backing and
(not hasattr(nic.device.backing, 'port') or
(nic.device.backing.port.portgroupKey != pg_obj.key or
nic.device.backing.port.switchUuid != pg_obj.config.distributedVirtualSwitch.uuid))):
nic_change_detected = True
dvs_port_connection = vim.dvs.PortConnection()
dvs_port_connection.portgroupKey = pg_obj.key
# If user specifies distributed port group without associating to the hostsystem on which
# virtual machine is going to be deployed then we get error. We can infer that there is no
# association between given distributed port group and host system.
host_system = self.params.get('esxi_hostname')
if host_system and host_system not in [host.config.host.name for host in pg_obj.config.distributedVirtualSwitch.config.host]:
self.module.fail_json(msg="It seems that host system '%s' is not associated with distributed"
" virtual portgroup '%s'. Please make sure host system is associated"
" with given distributed virtual portgroup" % (host_system, pg_obj.name))
# TODO: (akasurde) There is no way to find association between resource pool and distributed virtual portgroup
# For now, check if we are able to find distributed virtual switch
if not pg_obj.config.distributedVirtualSwitch:
self.module.fail_json(msg="Failed to find distributed virtual switch which is associated with"
" distributed virtual portgroup '%s'. Make sure hostsystem is associated with"
" the given distributed virtual portgroup." % pg_obj.name)
dvs_port_connection.switchUuid = pg_obj.config.distributedVirtualSwitch.uuid
nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
nic.device.backing.port = dvs_port_connection
elif isinstance(self.cache.get_network(network_name), vim.OpaqueNetwork):
# NSX-T Logical Switch
nic.device.backing = vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo()
network_id = self.cache.get_network(network_name).summary.opaqueNetworkId
nic.device.backing.opaqueNetworkType = 'nsx.LogicalSwitch'
nic.device.backing.opaqueNetworkId = network_id
nic.device.deviceInfo.summary = 'nsx.LogicalSwitch: %s' % network_id
else:
# vSwitch
if not isinstance(nic.device.backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):
nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
nic_change_detected = True
net_obj = self.cache.get_network(network_name)
if nic.device.backing.network != net_obj:
nic.device.backing.network = net_obj
nic_change_detected = True
if nic.device.backing.deviceName != network_name:
nic.device.backing.deviceName = network_name
nic_change_detected = True
if nic_change_detected:
self.configspec.deviceChange.append(nic)
self.change_detected = True
def configure_vapp_properties(self, vm_obj):
if len(self.params['vapp_properties']) == 0:
return
for x in self.params['vapp_properties']:
if not x.get('id'):
self.module.fail_json(msg="id is required to set vApp property")
new_vmconfig_spec = vim.vApp.VmConfigSpec()
# This is primarily for vcsim/integration tests, unset vAppConfig was not seen on my deployments
orig_spec = vm_obj.config.vAppConfig if vm_obj.config.vAppConfig else new_vmconfig_spec
vapp_properties_current = dict((x.id, x) for x in orig_spec.property)
vapp_properties_to_change = dict((x['id'], x) for x in self.params['vapp_properties'])
# each property must have a unique key
# init key counter with max value + 1
all_keys = [x.key for x in orig_spec.property]
new_property_index = max(all_keys) + 1 if all_keys else 0
for property_id, property_spec in vapp_properties_to_change.items():
is_property_changed = False
new_vapp_property_spec = vim.vApp.PropertySpec()
if property_id in vapp_properties_current:
if property_spec.get('operation') == 'remove':
new_vapp_property_spec.operation = 'remove'
new_vapp_property_spec.removeKey = vapp_properties_current[property_id].key
is_property_changed = True
else:
# this is 'edit' branch
new_vapp_property_spec.operation = 'edit'
new_vapp_property_spec.info = vapp_properties_current[property_id]
try:
for property_name, property_value in property_spec.items():
if property_name == 'operation':
# operation is not an info object property
# if set to anything other than 'remove' we don't fail
continue
# Updating attributes only if needed
if getattr(new_vapp_property_spec.info, property_name) != property_value:
setattr(new_vapp_property_spec.info, property_name, property_value)
is_property_changed = True
except Exception as e:
self.module.fail_json(msg="Failed to set vApp property field='%s' and value='%s'. Error: %s"
% (property_name, property_value, to_text(e)))
else:
if property_spec.get('operation') == 'remove':
# attemp to delete non-existent property
continue
# this is add new property branch
new_vapp_property_spec.operation = 'add'
property_info = vim.vApp.PropertyInfo()
property_info.classId = property_spec.get('classId')
property_info.instanceId = property_spec.get('instanceId')
property_info.id = property_spec.get('id')
property_info.category = property_spec.get('category')
property_info.label = property_spec.get('label')
property_info.type = property_spec.get('type', 'string')
property_info.userConfigurable = property_spec.get('userConfigurable', True)
property_info.defaultValue = property_spec.get('defaultValue')
property_info.value = property_spec.get('value', '')
property_info.description = property_spec.get('description')
new_vapp_property_spec.info = property_info
new_vapp_property_spec.info.key = new_property_index
new_property_index += 1
is_property_changed = True
if is_property_changed:
new_vmconfig_spec.property.append(new_vapp_property_spec)
if new_vmconfig_spec.property:
self.configspec.vAppConfig = new_vmconfig_spec
self.change_detected = True
def customize_customvalues(self, vm_obj, config_spec):
if len(self.params['customvalues']) == 0:
return
vm_custom_spec = config_spec
vm_custom_spec.extraConfig = []
changed = False
facts = self.gather_facts(vm_obj)
for kv in self.params['customvalues']:
if 'key' not in kv or 'value' not in kv:
self.module.exit_json(msg="customvalues items required both 'key' and 'value fields.")
# If kv is not kv fetched from facts, change it
if kv['key'] not in facts['customvalues'] or facts['customvalues'][kv['key']] != kv['value']:
option = vim.option.OptionValue()
option.key = kv['key']
option.value = kv['value']
vm_custom_spec.extraConfig.append(option)
changed = True
if changed:
self.change_detected = True
def customize_vm(self, vm_obj):
# User specified customization specification
custom_spec_name = self.params.get('customization_spec')
if custom_spec_name:
cc_mgr = self.content.customizationSpecManager
if cc_mgr.DoesCustomizationSpecExist(name=custom_spec_name):
temp_spec = cc_mgr.GetCustomizationSpec(name=custom_spec_name)
self.customspec = temp_spec.spec
return
else:
self.module.fail_json(msg="Unable to find customization specification"
" '%s' in given configuration." % custom_spec_name)
# Network settings
adaptermaps = []
for network in self.params['networks']:
guest_map = vim.vm.customization.AdapterMapping()
guest_map.adapter = vim.vm.customization.IPSettings()
if 'ip' in network and 'netmask' in network:
guest_map.adapter.ip = vim.vm.customization.FixedIp()
guest_map.adapter.ip.ipAddress = str(network['ip'])
guest_map.adapter.subnetMask = str(network['netmask'])
elif 'type' in network and network['type'] == 'dhcp':
guest_map.adapter.ip = vim.vm.customization.DhcpIpGenerator()
if 'gateway' in network:
guest_map.adapter.gateway = network['gateway']
# On Windows, DNS domain and DNS servers can be set by network interface
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.IPSettings.html
if 'domain' in network:
guest_map.adapter.dnsDomain = network['domain']
elif 'domain' in self.params['customization']:
guest_map.adapter.dnsDomain = self.params['customization']['domain']
if 'dns_servers' in network:
guest_map.adapter.dnsServerList = network['dns_servers']
elif 'dns_servers' in self.params['customization']:
guest_map.adapter.dnsServerList = self.params['customization']['dns_servers']
adaptermaps.append(guest_map)
# Global DNS settings
globalip = vim.vm.customization.GlobalIPSettings()
if 'dns_servers' in self.params['customization']:
globalip.dnsServerList = self.params['customization']['dns_servers']
# TODO: Maybe list the different domains from the interfaces here by default ?
if 'dns_suffix' in self.params['customization']:
dns_suffix = self.params['customization']['dns_suffix']
if isinstance(dns_suffix, list):
globalip.dnsSuffixList = " ".join(dns_suffix)
else:
globalip.dnsSuffixList = dns_suffix
elif 'domain' in self.params['customization']:
globalip.dnsSuffixList = self.params['customization']['domain']
if self.params['guest_id']:
guest_id = self.params['guest_id']
else:
guest_id = vm_obj.summary.config.guestId
# For windows guest OS, use SysPrep
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.Sysprep.html#field_detail
if 'win' in guest_id:
ident = vim.vm.customization.Sysprep()
ident.userData = vim.vm.customization.UserData()
# Setting hostName, orgName and fullName is mandatory, so we set some default when missing
ident.userData.computerName = vim.vm.customization.FixedName()
# computer name will be truncated to 15 characters if using VM name
default_name = self.params['name'].translate(None, string.punctuation)
ident.userData.computerName.name = str(self.params['customization'].get('hostname', default_name[0:15]))
ident.userData.fullName = str(self.params['customization'].get('fullname', 'Administrator'))
ident.userData.orgName = str(self.params['customization'].get('orgname', 'ACME'))
if 'productid' in self.params['customization']:
ident.userData.productId = str(self.params['customization']['productid'])
ident.guiUnattended = vim.vm.customization.GuiUnattended()
if 'autologon' in self.params['customization']:
ident.guiUnattended.autoLogon = self.params['customization']['autologon']
ident.guiUnattended.autoLogonCount = self.params['customization'].get('autologoncount', 1)
if 'timezone' in self.params['customization']:
# Check if timezone value is a int before proceeding.
ident.guiUnattended.timeZone = self.device_helper.integer_value(
self.params['customization']['timezone'],
'customization.timezone')
ident.identification = vim.vm.customization.Identification()
if self.params['customization'].get('password', '') != '':
ident.guiUnattended.password = vim.vm.customization.Password()
ident.guiUnattended.password.value = str(self.params['customization']['password'])
ident.guiUnattended.password.plainText = True
if 'joindomain' in self.params['customization']:
if 'domainadmin' not in self.params['customization'] or 'domainadminpassword' not in self.params['customization']:
self.module.fail_json(msg="'domainadmin' and 'domainadminpassword' entries are mandatory in 'customization' section to use "
"joindomain feature")
ident.identification.domainAdmin = str(self.params['customization']['domainadmin'])
ident.identification.joinDomain = str(self.params['customization']['joindomain'])
ident.identification.domainAdminPassword = vim.vm.customization.Password()
ident.identification.domainAdminPassword.value = str(self.params['customization']['domainadminpassword'])
ident.identification.domainAdminPassword.plainText = True
elif 'joinworkgroup' in self.params['customization']:
ident.identification.joinWorkgroup = str(self.params['customization']['joinworkgroup'])
if 'runonce' in self.params['customization']:
ident.guiRunOnce = vim.vm.customization.GuiRunOnce()
ident.guiRunOnce.commandList = self.params['customization']['runonce']
else:
# FIXME: We have no clue whether this non-Windows OS is actually Linux, hence it might fail!
# For Linux guest OS, use LinuxPrep
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.LinuxPrep.html
ident = vim.vm.customization.LinuxPrep()
# TODO: Maybe add domain from interface if missing ?
if 'domain' in self.params['customization']:
ident.domain = str(self.params['customization']['domain'])
ident.hostName = vim.vm.customization.FixedName()
hostname = str(self.params['customization'].get('hostname', self.params['name'].split('.')[0]))
# Remove all characters except alphanumeric and minus which is allowed by RFC 952
valid_hostname = re.sub(r"[^a-zA-Z0-9\-]", "", hostname)
ident.hostName.name = valid_hostname
self.customspec = vim.vm.customization.Specification()
self.customspec.nicSettingMap = adaptermaps
self.customspec.globalIPSettings = globalip
self.customspec.identity = ident
def get_vm_scsi_controller(self, vm_obj):
# If vm_obj doesn't exist there is no SCSI controller to find
if vm_obj is None:
return None
for device in vm_obj.config.hardware.device:
if self.device_helper.is_scsi_controller(device):
scsi_ctl = vim.vm.device.VirtualDeviceSpec()
scsi_ctl.device = device
return scsi_ctl
return None
def get_configured_disk_size(self, expected_disk_spec):
# what size is it?
if [x for x in expected_disk_spec.keys() if x.startswith('size_') or x == 'size']:
# size, size_tb, size_gb, size_mb, size_kb
if 'size' in expected_disk_spec:
size_regex = re.compile(r'(\d+(?:\.\d+)?)([tgmkTGMK][bB])')
disk_size_m = size_regex.match(expected_disk_spec['size'])
try:
if disk_size_m:
expected = disk_size_m.group(1)
unit = disk_size_m.group(2)
else:
raise ValueError
if re.match(r'\d+\.\d+', expected):
# We found float value in string, let's typecast it
expected = float(expected)
else:
# We found int value in string, let's typecast it
expected = int(expected)
if not expected or not unit:
raise ValueError
except (TypeError, ValueError, NameError):
# Common failure
self.module.fail_json(msg="Failed to parse disk size please review value"
" provided using documentation.")
else:
param = [x for x in expected_disk_spec.keys() if x.startswith('size_')][0]
unit = param.split('_')[-1].lower()
expected = [x[1] for x in expected_disk_spec.items() if x[0].startswith('size_')][0]
expected = int(expected)
disk_units = dict(tb=3, gb=2, mb=1, kb=0)
if unit in disk_units:
unit = unit.lower()
return expected * (1024 ** disk_units[unit])
else:
self.module.fail_json(msg="%s is not a supported unit for disk size."
" Supported units are ['%s']." % (unit,
"', '".join(disk_units.keys())))
# No size found but disk, fail
self.module.fail_json(
msg="No size, size_kb, size_mb, size_gb or size_tb attribute found into disk configuration")
def find_vmdk(self, vmdk_path):
"""
Takes a vsphere datastore path in the format
[datastore_name] path/to/file.vmdk
Returns vsphere file object or raises RuntimeError
"""
datastore_name, vmdk_fullpath, vmdk_filename, vmdk_folder = self.vmdk_disk_path_split(vmdk_path)
datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
if datastore is None:
self.module.fail_json(msg="Failed to find the datastore %s" % datastore_name)
return self.find_vmdk_file(datastore, vmdk_fullpath, vmdk_filename, vmdk_folder)
def add_existing_vmdk(self, vm_obj, expected_disk_spec, diskspec, scsi_ctl):
"""
Adds vmdk file described by expected_disk_spec['filename'], retrieves the file
information and adds the correct spec to self.configspec.deviceChange.
"""
filename = expected_disk_spec['filename']
# if this is a new disk, or the disk file names are different
if (vm_obj and diskspec.device.backing.fileName != filename) or vm_obj is None:
vmdk_file = self.find_vmdk(expected_disk_spec['filename'])
diskspec.device.backing.fileName = expected_disk_spec['filename']
diskspec.device.capacityInKB = VmomiSupport.vmodlTypes['long'](vmdk_file.fileSize / 1024)
diskspec.device.key = -1
self.change_detected = True
self.configspec.deviceChange.append(diskspec)
def configure_disks(self, vm_obj):
# Ignore empty disk list, this permits to keep disks when deploying a template/cloning a VM
if len(self.params['disk']) == 0:
return
scsi_ctl = self.get_vm_scsi_controller(vm_obj)
# Create scsi controller only if we are deploying a new VM, not a template or reconfiguring
if vm_obj is None or scsi_ctl is None:
scsi_ctl = self.device_helper.create_scsi_controller(self.get_scsi_type())
self.change_detected = True
self.configspec.deviceChange.append(scsi_ctl)
disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)] \
if vm_obj is not None else None
if disks is not None and self.params.get('disk') and len(self.params.get('disk')) < len(disks):
self.module.fail_json(msg="Provided disks configuration has less disks than "
"the target object (%d vs %d)" % (len(self.params.get('disk')), len(disks)))
disk_index = 0
for expected_disk_spec in self.params.get('disk'):
disk_modified = False
# If we are manipulating and existing objects which has disks and disk_index is in disks
if vm_obj is not None and disks is not None and disk_index < len(disks):
diskspec = vim.vm.device.VirtualDeviceSpec()
# set the operation to edit so that it knows to keep other settings
diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
diskspec.device = disks[disk_index]
else:
diskspec = self.device_helper.create_scsi_disk(scsi_ctl, disk_index)
disk_modified = True
# increment index for next disk search
disk_index += 1
# index 7 is reserved to SCSI controller
if disk_index == 7:
disk_index += 1
if 'disk_mode' in expected_disk_spec:
disk_mode = expected_disk_spec.get('disk_mode', 'persistent').lower()
valid_disk_mode = ['persistent', 'independent_persistent', 'independent_nonpersistent']
if disk_mode not in valid_disk_mode:
self.module.fail_json(msg="disk_mode specified is not valid."
" Should be one of ['%s']" % "', '".join(valid_disk_mode))
if (vm_obj and diskspec.device.backing.diskMode != disk_mode) or (vm_obj is None):
diskspec.device.backing.diskMode = disk_mode
disk_modified = True
else:
diskspec.device.backing.diskMode = "persistent"
# is it thin?
if 'type' in expected_disk_spec:
disk_type = expected_disk_spec.get('type', '').lower()
if disk_type == 'thin':
diskspec.device.backing.thinProvisioned = True
elif disk_type == 'eagerzeroedthick':
diskspec.device.backing.eagerlyScrub = True
if 'filename' in expected_disk_spec and expected_disk_spec['filename'] is not None:
self.add_existing_vmdk(vm_obj, expected_disk_spec, diskspec, scsi_ctl)
continue
elif vm_obj is None or self.params['template']:
# We are creating new VM or from Template
diskspec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
# which datastore?
if expected_disk_spec.get('datastore'):
# TODO: This is already handled by the relocation spec,
# but it needs to eventually be handled for all the
# other disks defined
pass
kb = self.get_configured_disk_size(expected_disk_spec)
# VMWare doesn't allow to reduce disk sizes
if kb < diskspec.device.capacityInKB:
self.module.fail_json(
msg="Given disk size is smaller than found (%d < %d). Reducing disks is not allowed." %
(kb, diskspec.device.capacityInKB))
if kb != diskspec.device.capacityInKB or disk_modified:
diskspec.device.capacityInKB = kb
self.configspec.deviceChange.append(diskspec)
self.change_detected = True
def select_host(self):
hostsystem = self.cache.get_esx_host(self.params['esxi_hostname'])
if not hostsystem:
self.module.fail_json(msg='Failed to find ESX host "%(esxi_hostname)s"' % self.params)
if hostsystem.runtime.connectionState != 'connected' or hostsystem.runtime.inMaintenanceMode:
self.module.fail_json(msg='ESXi "%(esxi_hostname)s" is in invalid state or in maintenance mode.' % self.params)
return hostsystem
def autoselect_datastore(self):
datastore = None
datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
if datastores is None or len(datastores) == 0:
self.module.fail_json(msg="Unable to find a datastore list when autoselecting")
datastore_freespace = 0
for ds in datastores:
if ds.summary.freeSpace > datastore_freespace:
datastore = ds
datastore_freespace = ds.summary.freeSpace
return datastore
def get_recommended_datastore(self, datastore_cluster_obj=None):
"""
Function to return Storage DRS recommended datastore from datastore cluster
Args:
datastore_cluster_obj: datastore cluster managed object
Returns: Name of recommended datastore from the given datastore cluster
"""
if datastore_cluster_obj is None:
return None
# Check if Datastore Cluster provided by user is SDRS ready
sdrs_status = datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled
if sdrs_status:
# We can get storage recommendation only if SDRS is enabled on given datastorage cluster
pod_sel_spec = vim.storageDrs.PodSelectionSpec()
pod_sel_spec.storagePod = datastore_cluster_obj
storage_spec = vim.storageDrs.StoragePlacementSpec()
storage_spec.podSelectionSpec = pod_sel_spec
storage_spec.type = 'create'
try:
rec = self.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec)
rec_action = rec.recommendations[0].action[0]
return rec_action.destination.name
except Exception:
# There is some error so we fall back to general workflow
pass
datastore = None
datastore_freespace = 0
for ds in datastore_cluster_obj.childEntity:
if isinstance(ds, vim.Datastore) and ds.summary.freeSpace > datastore_freespace:
# If datastore field is provided, filter destination datastores
datastore = ds
datastore_freespace = ds.summary.freeSpace
if datastore:
return datastore.name
return None
def select_datastore(self, vm_obj=None):
datastore = None
datastore_name = None
if len(self.params['disk']) != 0:
# TODO: really use the datastore for newly created disks
if 'autoselect_datastore' in self.params['disk'][0] and self.params['disk'][0]['autoselect_datastore']:
datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
datastores = [x for x in datastores if self.cache.get_parent_datacenter(x).name == self.params['datacenter']]
if datastores is None or len(datastores) == 0:
self.module.fail_json(msg="Unable to find a datastore list when autoselecting")
datastore_freespace = 0
for ds in datastores:
if (ds.summary.freeSpace > datastore_freespace) or (ds.summary.freeSpace == datastore_freespace and not datastore):
# If datastore field is provided, filter destination datastores
if 'datastore' in self.params['disk'][0] and \
isinstance(self.params['disk'][0]['datastore'], str) and \
ds.name.find(self.params['disk'][0]['datastore']) < 0:
continue
datastore = ds
datastore_name = datastore.name
datastore_freespace = ds.summary.freeSpace
elif 'datastore' in self.params['disk'][0]:
datastore_name = self.params['disk'][0]['datastore']
# Check if user has provided datastore cluster first
datastore_cluster = self.cache.find_obj(self.content, [vim.StoragePod], datastore_name)
if datastore_cluster:
# If user specified datastore cluster so get recommended datastore
datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster)
# Check if get_recommended_datastore or user specified datastore exists or not
datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
else:
self.module.fail_json(msg="Either datastore or autoselect_datastore should be provided to select datastore")
if not datastore and self.params['template']:
# use the template's existing DS
disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)]
if disks:
datastore = disks[0].backing.datastore
datastore_name = datastore.name
# validation
if datastore:
dc = self.cache.get_parent_datacenter(datastore)
if dc.name != self.params['datacenter']:
datastore = self.autoselect_datastore()
datastore_name = datastore.name
if not datastore:
if len(self.params['disk']) != 0 or self.params['template'] is None:
self.module.fail_json(msg="Unable to find the datastore with given parameters."
" This could mean, %s is a non-existent virtual machine and module tried to"
" deploy it as new virtual machine with no disk. Please specify disks parameter"
" or specify template to clone from." % self.params['name'])
self.module.fail_json(msg="Failed to find a matching datastore")
return datastore, datastore_name
def obj_has_parent(self, obj, parent):
if obj is None and parent is None:
raise AssertionError()
current_parent = obj
while True:
if current_parent.name == parent.name:
return True
# Check if we have reached till root folder
moid = current_parent._moId
if moid in ['group-d1', 'ha-folder-root']:
return False
current_parent = current_parent.parent
if current_parent is None:
return False
def get_scsi_type(self):
disk_controller_type = "paravirtual"
# set cpu/memory/etc
if 'hardware' in self.params:
if 'scsi' in self.params['hardware']:
if self.params['hardware']['scsi'] in ['buslogic', 'paravirtual', 'lsilogic', 'lsilogicsas']:
disk_controller_type = self.params['hardware']['scsi']
else:
self.module.fail_json(msg="hardware.scsi attribute should be 'paravirtual' or 'lsilogic'")
return disk_controller_type
def find_folder(self, searchpath):
""" Walk inventory objects one position of the searchpath at a time """
# split the searchpath so we can iterate through it
paths = [x.replace('/', '') for x in searchpath.split('/')]
paths_total = len(paths) - 1
position = 0
# recursive walk while looking for next element in searchpath
root = self.content.rootFolder
while root and position <= paths_total:
change = False
if hasattr(root, 'childEntity'):
for child in root.childEntity:
if child.name == paths[position]:
root = child
position += 1
change = True
break
elif isinstance(root, vim.Datacenter):
if hasattr(root, 'vmFolder'):
if root.vmFolder.name == paths[position]:
root = root.vmFolder
position += 1
change = True
else:
root = None
if not change:
root = None
return root
def get_resource_pool(self, cluster=None, host=None, resource_pool=None):
""" Get a resource pool, filter on cluster, esxi_hostname or resource_pool if given """
cluster_name = cluster or self.params.get('cluster', None)
host_name = host or self.params.get('esxi_hostname', None)
resource_pool_name = resource_pool or self.params.get('resource_pool', None)
# get the datacenter object
datacenter = find_obj(self.content, [vim.Datacenter], self.params['datacenter'])
if not datacenter:
self.module.fail_json(msg='Unable to find datacenter "%s"' % self.params['datacenter'])
# if cluster is given, get the cluster object
if cluster_name:
cluster = find_obj(self.content, [vim.ComputeResource], cluster_name, folder=datacenter)
if not cluster:
self.module.fail_json(msg='Unable to find cluster "%s"' % cluster_name)
# if host is given, get the cluster object using the host
elif host_name:
host = find_obj(self.content, [vim.HostSystem], host_name, folder=datacenter)
if not host:
self.module.fail_json(msg='Unable to find host "%s"' % host_name)
cluster = host.parent
else:
cluster = None
# get resource pools limiting search to cluster or datacenter
resource_pool = find_obj(self.content, [vim.ResourcePool], resource_pool_name, folder=cluster or datacenter)
if not resource_pool:
if resource_pool_name:
self.module.fail_json(msg='Unable to find resource_pool "%s"' % resource_pool_name)
else:
self.module.fail_json(msg='Unable to find resource pool, need esxi_hostname, resource_pool, or cluster')
return resource_pool
def deploy_vm(self):
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/clone_vm.py
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.CloneSpec.html
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.ConfigSpec.html
# https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
# FIXME:
# - static IPs
self.folder = self.params.get('folder', None)
if self.folder is None:
self.module.fail_json(msg="Folder is required parameter while deploying new virtual machine")
# Prepend / if it was missing from the folder path, also strip trailing slashes
if not self.folder.startswith('/'):
self.folder = '/%(folder)s' % self.params
self.folder = self.folder.rstrip('/')
datacenter = self.cache.find_obj(self.content, [vim.Datacenter], self.params['datacenter'])
if datacenter is None:
self.module.fail_json(msg='No datacenter named %(datacenter)s was found' % self.params)
dcpath = compile_folder_path_for_object(datacenter)
# Nested folder does not have trailing /
if not dcpath.endswith('/'):
dcpath += '/'
# Check for full path first in case it was already supplied
if (self.folder.startswith(dcpath + self.params['datacenter'] + '/vm') or
self.folder.startswith(dcpath + '/' + self.params['datacenter'] + '/vm')):
fullpath = self.folder
elif self.folder.startswith('/vm/') or self.folder == '/vm':
fullpath = "%s%s%s" % (dcpath, self.params['datacenter'], self.folder)
elif self.folder.startswith('/'):
fullpath = "%s%s/vm%s" % (dcpath, self.params['datacenter'], self.folder)
else:
fullpath = "%s%s/vm/%s" % (dcpath, self.params['datacenter'], self.folder)
f_obj = self.content.searchIndex.FindByInventoryPath(fullpath)
# abort if no strategy was successful
if f_obj is None:
# Add some debugging values in failure.
details = {
'datacenter': datacenter.name,
'datacenter_path': dcpath,
'folder': self.folder,
'full_search_path': fullpath,
}
self.module.fail_json(msg='No folder %s matched in the search path : %s' % (self.folder, fullpath),
details=details)
destfolder = f_obj
if self.params['template']:
vm_obj = self.get_vm_or_template(template_name=self.params['template'])
if vm_obj is None:
self.module.fail_json(msg="Could not find a template named %(template)s" % self.params)
else:
vm_obj = None
# always get a resource_pool
resource_pool = self.get_resource_pool()
# set the destination datastore for VM & disks
if self.params['datastore']:
# Give precedence to datastore value provided by user
# User may want to deploy VM to specific datastore.
datastore_name = self.params['datastore']
# Check if user has provided datastore cluster first
datastore_cluster = self.cache.find_obj(self.content, [vim.StoragePod], datastore_name)
if datastore_cluster:
# If user specified datastore cluster so get recommended datastore
datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster)
# Check if get_recommended_datastore or user specified datastore exists or not
datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
else:
(datastore, datastore_name) = self.select_datastore(vm_obj)
self.configspec = vim.vm.ConfigSpec()
self.configspec.deviceChange = []
self.configure_guestid(vm_obj=vm_obj, vm_creation=True)
self.configure_cpu_and_memory(vm_obj=vm_obj, vm_creation=True)
self.configure_hardware_params(vm_obj=vm_obj)
self.configure_resource_alloc_info(vm_obj=vm_obj)
self.configure_disks(vm_obj=vm_obj)
self.configure_network(vm_obj=vm_obj)
self.configure_cdrom(vm_obj=vm_obj)
# Find if we need network customizations (find keys in dictionary that requires customizations)
network_changes = False
for nw in self.params['networks']:
for key in nw:
# We don't need customizations for these keys
if key not in ('device_type', 'mac', 'name', 'vlan', 'type', 'start_connected'):
network_changes = True
break
if len(self.params['customization']) > 0 or network_changes or self.params.get('customization_spec') is not None:
self.customize_vm(vm_obj=vm_obj)
clonespec = None
clone_method = None
try:
if self.params['template']:
# create the relocation spec
relospec = vim.vm.RelocateSpec()
# Only select specific host when ESXi hostname is provided
if self.params['esxi_hostname']:
relospec.host = self.select_host()
relospec.datastore = datastore
# Convert disk present in template if is set
if self.params['convert']:
for device in vm_obj.config.hardware.device:
if hasattr(device.backing, 'fileName'):
disk_locator = vim.vm.RelocateSpec.DiskLocator()
disk_locator.diskBackingInfo = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
if self.params['convert'] in ['thin']:
disk_locator.diskBackingInfo.thinProvisioned = True
if self.params['convert'] in ['eagerzeroedthick']:
disk_locator.diskBackingInfo.eagerlyScrub = True
if self.params['convert'] in ['thick']:
disk_locator.diskBackingInfo.diskMode = "persistent"
disk_locator.diskId = device.key
disk_locator.datastore = datastore
relospec.disk.append(disk_locator)
# https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
# > pool: For a clone operation from a template to a virtual machine, this argument is required.
relospec.pool = resource_pool
linked_clone = self.params.get('linked_clone')
snapshot_src = self.params.get('snapshot_src', None)
if linked_clone:
if snapshot_src is not None:
relospec.diskMoveType = vim.vm.RelocateSpec.DiskMoveOptions.createNewChildDiskBacking
else:
self.module.fail_json(msg="Parameter 'linked_src' and 'snapshot_src' are"
" required together for linked clone operation.")
clonespec = vim.vm.CloneSpec(template=self.params['is_template'], location=relospec)
if self.customspec:
clonespec.customization = self.customspec
if snapshot_src is not None:
if vm_obj.snapshot is None:
self.module.fail_json(msg="No snapshots present for virtual machine or template [%(template)s]" % self.params)
snapshot = self.get_snapshots_by_name_recursively(snapshots=vm_obj.snapshot.rootSnapshotList,
snapname=snapshot_src)
if len(snapshot) != 1:
self.module.fail_json(msg='virtual machine "%(template)s" does not contain'
' snapshot named "%(snapshot_src)s"' % self.params)
clonespec.snapshot = snapshot[0].snapshot
clonespec.config = self.configspec
clone_method = 'Clone'
try:
task = vm_obj.Clone(folder=destfolder, name=self.params['name'], spec=clonespec)
except vim.fault.NoPermission as e:
self.module.fail_json(msg="Failed to clone virtual machine %s to folder %s "
"due to permission issue: %s" % (self.params['name'],
destfolder,
to_native(e.msg)))
self.change_detected = True
else:
# ConfigSpec require name for VM creation
self.configspec.name = self.params['name']
self.configspec.files = vim.vm.FileInfo(logDirectory=None,
snapshotDirectory=None,
suspendDirectory=None,
vmPathName="[" + datastore_name + "]")
clone_method = 'CreateVM_Task'
try:
task = destfolder.CreateVM_Task(config=self.configspec, pool=resource_pool)
except vmodl.fault.InvalidRequest as e:
self.module.fail_json(msg="Failed to create virtual machine due to invalid configuration "
"parameter %s" % to_native(e.msg))
except vim.fault.RestrictedVersion as e:
self.module.fail_json(msg="Failed to create virtual machine due to "
"product versioning restrictions: %s" % to_native(e.msg))
self.change_detected = True
self.wait_for_task(task)
except TypeError as e:
self.module.fail_json(msg="TypeError was returned, please ensure to give correct inputs. %s" % to_text(e))
if task.info.state == 'error':
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2021361
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2173
# provide these to the user for debugging
clonespec_json = serialize_spec(clonespec)
configspec_json = serialize_spec(self.configspec)
kwargs = {
'changed': self.change_applied,
'failed': True,
'msg': task.info.error.msg,
'clonespec': clonespec_json,
'configspec': configspec_json,
'clone_method': clone_method
}
return kwargs
else:
# set annotation
vm = task.info.result
if self.params['annotation']:
annotation_spec = vim.vm.ConfigSpec()
annotation_spec.annotation = str(self.params['annotation'])
task = vm.ReconfigVM_Task(annotation_spec)
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'annotation'}
if self.params['customvalues']:
vm_custom_spec = vim.vm.ConfigSpec()
self.customize_customvalues(vm_obj=vm, config_spec=vm_custom_spec)
task = vm.ReconfigVM_Task(vm_custom_spec)
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'customvalues'}
if self.params['wait_for_ip_address'] or self.params['wait_for_customization'] or self.params['state'] in ['poweredon', 'restarted']:
set_vm_power_state(self.content, vm, 'poweredon', force=False)
if self.params['wait_for_ip_address']:
self.wait_for_vm_ip(vm)
if self.params['wait_for_customization']:
is_customization_ok = self.wait_for_customization(vm)
if not is_customization_ok:
vm_facts = self.gather_facts(vm)
return {'changed': self.change_applied, 'failed': True, 'instance': vm_facts, 'op': 'customization'}
vm_facts = self.gather_facts(vm)
return {'changed': self.change_applied, 'failed': False, 'instance': vm_facts}
def get_snapshots_by_name_recursively(self, snapshots, snapname):
snap_obj = []
for snapshot in snapshots:
if snapshot.name == snapname:
snap_obj.append(snapshot)
else:
snap_obj = snap_obj + self.get_snapshots_by_name_recursively(snapshot.childSnapshotList, snapname)
return snap_obj
def reconfigure_vm(self):
self.configspec = vim.vm.ConfigSpec()
self.configspec.deviceChange = []
self.configure_guestid(vm_obj=self.current_vm_obj)
self.configure_cpu_and_memory(vm_obj=self.current_vm_obj)
self.configure_hardware_params(vm_obj=self.current_vm_obj)
self.configure_disks(vm_obj=self.current_vm_obj)
self.configure_network(vm_obj=self.current_vm_obj)
self.configure_cdrom(vm_obj=self.current_vm_obj)
self.customize_customvalues(vm_obj=self.current_vm_obj, config_spec=self.configspec)
self.configure_resource_alloc_info(vm_obj=self.current_vm_obj)
self.configure_vapp_properties(vm_obj=self.current_vm_obj)
if self.params['annotation'] and self.current_vm_obj.config.annotation != self.params['annotation']:
self.configspec.annotation = str(self.params['annotation'])
self.change_detected = True
relospec = vim.vm.RelocateSpec()
if self.params['resource_pool']:
relospec.pool = self.get_resource_pool()
if relospec.pool != self.current_vm_obj.resourcePool:
task = self.current_vm_obj.RelocateVM_Task(spec=relospec)
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'relocate'}
# Only send VMWare task if we see a modification
if self.change_detected:
task = None
try:
task = self.current_vm_obj.ReconfigVM_Task(spec=self.configspec)
except vim.fault.RestrictedVersion as e:
self.module.fail_json(msg="Failed to reconfigure virtual machine due to"
" product versioning restrictions: %s" % to_native(e.msg))
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'reconfig'}
# Rename VM
if self.params['uuid'] and self.params['name'] and self.params['name'] != self.current_vm_obj.config.name:
task = self.current_vm_obj.Rename_Task(self.params['name'])
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'rename'}
# Mark VM as Template
if self.params['is_template'] and not self.current_vm_obj.config.template:
try:
self.current_vm_obj.MarkAsTemplate()
self.change_applied = True
except vmodl.fault.NotSupported as e:
self.module.fail_json(msg="Failed to mark virtual machine [%s] "
"as template: %s" % (self.params['name'], e.msg))
# Mark Template as VM
elif not self.params['is_template'] and self.current_vm_obj.config.template:
resource_pool = self.get_resource_pool()
kwargs = dict(pool=resource_pool)
if self.params.get('esxi_hostname', None):
host_system_obj = self.select_host()
kwargs.update(host=host_system_obj)
try:
self.current_vm_obj.MarkAsVirtualMachine(**kwargs)
self.change_applied = True
except vim.fault.InvalidState as invalid_state:
self.module.fail_json(msg="Virtual machine is not marked"
" as template : %s" % to_native(invalid_state.msg))
except vim.fault.InvalidDatastore as invalid_ds:
self.module.fail_json(msg="Converting template to virtual machine"
" operation cannot be performed on the"
" target datastores: %s" % to_native(invalid_ds.msg))
except vim.fault.CannotAccessVmComponent as cannot_access:
self.module.fail_json(msg="Failed to convert template to virtual machine"
" as operation unable access virtual machine"
" component: %s" % to_native(cannot_access.msg))
except vmodl.fault.InvalidArgument as invalid_argument:
self.module.fail_json(msg="Failed to convert template to virtual machine"
" due to : %s" % to_native(invalid_argument.msg))
except Exception as generic_exc:
self.module.fail_json(msg="Failed to convert template to virtual machine"
" due to generic error : %s" % to_native(generic_exc))
# Automatically update VMWare UUID when converting template to VM.
# This avoids an interactive prompt during VM startup.
uuid_action = [x for x in self.current_vm_obj.config.extraConfig if x.key == "uuid.action"]
if not uuid_action:
uuid_action_opt = vim.option.OptionValue()
uuid_action_opt.key = "uuid.action"
uuid_action_opt.value = "create"
self.configspec.extraConfig.append(uuid_action_opt)
self.change_detected = True
# add customize existing VM after VM re-configure
if 'existing_vm' in self.params['customization'] and self.params['customization']['existing_vm']:
if self.current_vm_obj.config.template:
self.module.fail_json(msg="VM is template, not support guest OS customization.")
if self.current_vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOff:
self.module.fail_json(msg="VM is not in poweroff state, can not do guest OS customization.")
cus_result = self.customize_exist_vm()
if cus_result['failed']:
return cus_result
vm_facts = self.gather_facts(self.current_vm_obj)
return {'changed': self.change_applied, 'failed': False, 'instance': vm_facts}
def customize_exist_vm(self):
task = None
# Find if we need network customizations (find keys in dictionary that requires customizations)
network_changes = False
for nw in self.params['networks']:
for key in nw:
# We don't need customizations for these keys
if key not in ('device_type', 'mac', 'name', 'vlan', 'type', 'start_connected'):
network_changes = True
break
if len(self.params['customization']) > 1 or network_changes or self.params.get('customization_spec'):
self.customize_vm(vm_obj=self.current_vm_obj)
try:
task = self.current_vm_obj.CustomizeVM_Task(self.customspec)
except vim.fault.CustomizationFault as e:
self.module.fail_json(msg="Failed to customization virtual machine due to CustomizationFault: %s" % to_native(e.msg))
except vim.fault.RuntimeFault as e:
self.module.fail_json(msg="failed to customization virtual machine due to RuntimeFault: %s" % to_native(e.msg))
except Exception as e:
self.module.fail_json(msg="failed to customization virtual machine due to fault: %s" % to_native(e.msg))
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'customize_exist'}
if self.params['wait_for_customization']:
set_vm_power_state(self.content, self.current_vm_obj, 'poweredon', force=False)
is_customization_ok = self.wait_for_customization(self.current_vm_obj)
if not is_customization_ok:
return {'changed': self.change_applied, 'failed': True, 'op': 'wait_for_customize_exist'}
return {'changed': self.change_applied, 'failed': False}
def wait_for_task(self, task, poll_interval=1):
"""
Wait for a VMware task to complete. Terminal states are 'error' and 'success'.
Inputs:
- task: the task to wait for
- poll_interval: polling interval to check the task, in seconds
Modifies:
- self.change_applied
"""
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.Task.html
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.TaskInfo.html
# https://github.com/virtdevninja/pyvmomi-community-samples/blob/master/samples/tools/tasks.py
while task.info.state not in ['error', 'success']:
time.sleep(poll_interval)
self.change_applied = self.change_applied or task.info.state == 'success'
def wait_for_vm_ip(self, vm, poll=100, sleep=5):
ips = None
facts = {}
thispoll = 0
while not ips and thispoll <= poll:
newvm = self.get_vm()
facts = self.gather_facts(newvm)
if facts['ipv4'] or facts['ipv6']:
ips = True
else:
time.sleep(sleep)
thispoll += 1
return facts
def get_vm_events(self, vm, eventTypeIdList):
byEntity = vim.event.EventFilterSpec.ByEntity(entity=vm, recursion="self")
filterSpec = vim.event.EventFilterSpec(entity=byEntity, eventTypeId=eventTypeIdList)
eventManager = self.content.eventManager
return eventManager.QueryEvent(filterSpec)
def wait_for_customization(self, vm, poll=10000, sleep=10):
thispoll = 0
while thispoll <= poll:
eventStarted = self.get_vm_events(vm, ['CustomizationStartedEvent'])
if len(eventStarted):
thispoll = 0
while thispoll <= poll:
eventsFinishedResult = self.get_vm_events(vm, ['CustomizationSucceeded', 'CustomizationFailed'])
if len(eventsFinishedResult):
if not isinstance(eventsFinishedResult[0], vim.event.CustomizationSucceeded):
self.module.fail_json(msg='Customization failed with error {0}:\n{1}'.format(
eventsFinishedResult[0]._wsdlName, eventsFinishedResult[0].fullFormattedMessage))
return False
break
else:
time.sleep(sleep)
thispoll += 1
return True
else:
time.sleep(sleep)
thispoll += 1
self.module.fail_json('waiting for customizations timed out.')
return False
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
state=dict(type='str', default='present',
choices=['absent', 'poweredoff', 'poweredon', 'present', 'rebootguest', 'restarted', 'shutdownguest', 'suspended']),
template=dict(type='str', aliases=['template_src']),
is_template=dict(type='bool', default=False),
annotation=dict(type='str', aliases=['notes']),
customvalues=dict(type='list', default=[]),
name=dict(type='str'),
name_match=dict(type='str', choices=['first', 'last'], default='first'),
uuid=dict(type='str'),
use_instance_uuid=dict(type='bool', default=False),
folder=dict(type='str'),
guest_id=dict(type='str'),
disk=dict(type='list', default=[]),
cdrom=dict(type='dict', default={}),
hardware=dict(type='dict', default={}),
force=dict(type='bool', default=False),
datacenter=dict(type='str', default='ha-datacenter'),
esxi_hostname=dict(type='str'),
cluster=dict(type='str'),
wait_for_ip_address=dict(type='bool', default=False),
state_change_timeout=dict(type='int', default=0),
snapshot_src=dict(type='str'),
linked_clone=dict(type='bool', default=False),
networks=dict(type='list', default=[]),
resource_pool=dict(type='str'),
customization=dict(type='dict', default={}, no_log=True),
customization_spec=dict(type='str', default=None),
wait_for_customization=dict(type='bool', default=False),
vapp_properties=dict(type='list', default=[]),
datastore=dict(type='str'),
convert=dict(type='str', choices=['thin', 'thick', 'eagerzeroedthick']),
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['cluster', 'esxi_hostname'],
],
required_one_of=[
['name', 'uuid'],
],
)
result = {'failed': False, 'changed': False}
pyv = PyVmomiHelper(module)
# Check if the VM exists before continuing
vm = pyv.get_vm()
# VM already exists
if vm:
if module.params['state'] == 'absent':
# destroy it
if module.check_mode:
result.update(
vm_name=vm.name,
changed=True,
current_powerstate=vm.summary.runtime.powerState.lower(),
desired_operation='remove_vm',
)
module.exit_json(**result)
if module.params['force']:
# has to be poweredoff first
set_vm_power_state(pyv.content, vm, 'poweredoff', module.params['force'])
result = pyv.remove_vm(vm)
elif module.params['state'] == 'present':
if module.check_mode:
result.update(
vm_name=vm.name,
changed=True,
desired_operation='reconfigure_vm',
)
module.exit_json(**result)
result = pyv.reconfigure_vm()
elif module.params['state'] in ['poweredon', 'poweredoff', 'restarted', 'suspended', 'shutdownguest', 'rebootguest']:
if module.check_mode:
result.update(
vm_name=vm.name,
changed=True,
current_powerstate=vm.summary.runtime.powerState.lower(),
desired_operation='set_vm_power_state',
)
module.exit_json(**result)
# set powerstate
tmp_result = set_vm_power_state(pyv.content, vm, module.params['state'], module.params['force'], module.params['state_change_timeout'])
if tmp_result['changed']:
result["changed"] = True
if module.params['state'] in ['poweredon', 'restarted', 'rebootguest'] and module.params['wait_for_ip_address']:
wait_result = wait_for_vm_ip(pyv.content, vm)
if not wait_result:
module.fail_json(msg='Waiting for IP address timed out')
tmp_result['instance'] = wait_result
if not tmp_result["failed"]:
result["failed"] = False
result['instance'] = tmp_result['instance']
else:
# This should not happen
raise AssertionError()
# VM doesn't exist
else:
if module.params['state'] in ['poweredon', 'poweredoff', 'present', 'restarted', 'suspended']:
if module.check_mode:
result.update(
changed=True,
desired_operation='deploy_vm',
)
module.exit_json(**result)
result = pyv.deploy_vm()
if result['failed']:
module.fail_json(msg='Failed to create a virtual machine : %s' % result['msg'])
if result['failed']:
module.fail_json(**result)
else:
module.exit_json(**result)
if __name__ == '__main__':
main()
|
ujenmr/ansible
|
lib/ansible/modules/cloud/vmware/vmware_guest.py
|
Python
|
gpl-3.0
| 134,383
|
[
"VisIt"
] |
3b77c0b667edcf39841df0efa2a6d6b37eeab0accbddffcf14e9a62b8c347c30
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with functions that encode the sequence of PSI module
calls for each of the *name* values of the energy(), optimize(),
response(), and frequency() function. *name* can be assumed lowercase by here.
"""
import os
import sys
import shutil
import subprocess
import warnings
import numpy as np
from qcelemental import constants
from psi4 import extras
from psi4 import core
from psi4.driver import p4util
from psi4.driver import qcdb
from psi4.driver import psifiles as psif
from psi4.driver.p4util.exceptions import ManagedMethodError, PastureRequiredError, ValidationError
#from psi4.driver.molutil import *
from psi4.driver.qcdb.basislist import corresponding_basis
# never import driver, wrappers, or aliases into this file
from .roa import run_roa
from . import proc_util
from . import empirical_dispersion
from . import dft
from . import mcscf
from . import response
from . import solvent
# ATTN NEW ADDITIONS!
# consult http://psicode.org/psi4manual/master/proc_py.html
def select_mp2(name, **kwargs):
"""Function selecting the algorithm for a MP2 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/dfmp2/detci/fnocc
# MP2_TYPE exists largely for py-side reasoning, so must manage it
# here rather than passing to c-side unprepared for validation
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module == 'FNOCC':
func = run_fnocc
elif module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'DFMP2']:
func = run_dfmp2
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'DFMP2']:
func = run_dfmp2
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'DFMP2']:
func = run_dfmp2
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference in ['RKS', 'UKS']:
if mtd_type == 'DF':
if module in ['', 'DFMP2']:
func = run_dfmp2
if module == 'DETCI':
core.print_out("""\nDETCI is ill-advised for method MP2 as it is available inefficiently as a """
"""byproduct of a CISD computation.\n DETCI ROHF MP2 will produce non-standard results.\n""")
if func is None:
raise ManagedMethodError(['select_mp2', name, 'MP2_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp2_gradient(name, **kwargs):
"""Function selecting the algorithm for a MP2 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
all_electron = (core.get_global_option('FREEZE_CORE') == "FALSE")
# Considering only [df]occ/dfmp2
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if all_electron:
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc_gradient
elif module in ['', 'DFMP2']:
func = run_dfmp2_gradient
elif reference == 'UHF':
if mtd_type == 'CONV':
if all_electron:
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_mp2_gradient', name, 'MP2_TYPE', mtd_type, reference, module, all_electron])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp2_property(name, **kwargs):
"""Function selecting the algorithm for a MP2 property call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only dfmp2 for now
func = None
if reference == 'RHF':
if mtd_type == 'DF':
#if module == 'OCC':
# func = run_dfocc_property
if module in ['', 'DFMP2']:
func = run_dfmp2_property
#elif reference == 'UHF':
# if mtd_type == 'DF':
# if module in ['', 'OCC']:
# func = run_dfocc_property
if func is None:
raise ManagedMethodError(['select_mp2_property', name, 'MP2_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2(name, **kwargs):
"""Function selecting the algorithm for an OMP2 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_omp2', name, 'MP2_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2_gradient(name, **kwargs):
"""Function selecting the algorithm for an OMP2 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_omp2_gradient', name, 'MP2_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2_property(name, **kwargs):
"""Function selecting the algorithm for an OMP2 property call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_property
if func is None:
raise ManagedMethodError(['select_omp2_property', name, 'MP2_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2p5_property(name, **kwargs):
"""Function selecting the algorithm for an OMP2.5 property call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_property
if func is None:
raise ManagedMethodError(['select_omp2p5_property', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp3_property(name, **kwargs):
"""Function selecting the algorithm for an OMP3 property call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_property
if func is None:
raise ManagedMethodError(['select_omp3_property', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_olccd_property(name, **kwargs):
"""Function selecting the algorithm for an OLCCD property call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_property
if func is None:
raise ManagedMethodError(['select_olccd_property', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp3(name, **kwargs):
"""Function selecting the algorithm for a MP3 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/fnocc/detci
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module == 'FNOCC':
func = run_fnocc
elif module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module == 'DETCI': # no default for this case
func = run_detci
elif module in ['']:
core.print_out("""\nThis method is available inefficiently as a """
"""byproduct of a CISD computation.\n Add "set """
"""qc_module detci" to input to access this route.\n""")
if func is None:
raise ManagedMethodError(['select_mp3', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp3_gradient(name, **kwargs):
"""Function selecting the algorithm for a MP3 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
module = core.get_global_option('QC_MODULE')
all_electron = (core.get_global_option('FREEZE_CORE') == "FALSE")
# Considering only [df]occ
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if all_electron:
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
elif reference == 'UHF':
if mtd_type == 'CONV':
if all_electron:
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_mp3_gradient', name, 'MP_TYPE', mtd_type, reference, module, all_electron])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp3(name, **kwargs):
"""Function selecting the algorithm for an OMP3 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_omp3', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp3_gradient(name, **kwargs):
"""Function selecting the algorithm for an OMP3 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_omp3_gradient', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp2p5(name, **kwargs):
"""Function selecting the algorithm for a MP2.5 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_mp2p5', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp2p5_gradient(name, **kwargs):
"""Function selecting the algorithm for a MP2.5 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
module = core.get_global_option('QC_MODULE')
all_electron = (core.get_global_option('FREEZE_CORE') == "FALSE")
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF']:
if mtd_type == 'CONV':
if all_electron:
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_mp2p5_gradient', name, 'MP_TYPE', mtd_type, reference, module, all_electron])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2p5(name, **kwargs):
"""Function selecting the algorithm for an OMP2.5 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_omp2p5', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2p5_gradient(name, **kwargs):
"""Function selecting the algorithm for an OMP2.5 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_omp2p5_gradient', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_lccd(name, **kwargs):
"""Function selecting the algorithm for a LCCD energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'OCC':
func = run_occ
elif module in ['', 'FNOCC']:
func = run_cepa
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_lccd', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_lccd_gradient(name, **kwargs):
"""Function selecting the algorithm for a LCCD gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
all_electron = (core.get_global_option('FREEZE_CORE') == "FALSE")
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF']:
if mtd_type == 'CONV':
if all_electron:
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_lccd_gradient', name, 'CC_TYPE', mtd_type, reference, module, all_electron])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_olccd(name, **kwargs):
"""Function selecting the algorithm for an OLCCD energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_olccd', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_olccd_gradient(name, **kwargs):
"""Function selecting the algorithm for an OLCCD gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_olccd_gradient', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_fnoccsd(name, **kwargs):
"""Function selecting the algorithm for a FNO-CCSD energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module in ['', 'FNOCC']:
func = run_fnocc
elif mtd_type == 'DF':
if module in ['', 'FNOCC']:
func = run_fnodfcc
elif mtd_type == 'CD':
if module in ['', 'FNOCC']:
func = run_fnodfcc
if func is None:
raise ManagedMethodError(['select_fnoccsd', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_ccsd(name, **kwargs):
"""Function selecting the algorithm for a CCSD energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/ccenergy/detci/fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'FNOCC':
func = run_fnocc
elif module == 'CCT3' and extras.addons("cct3"):
import cct3
func = cct3.run_cct3
elif module in ['', 'CCENERGY']:
func = run_ccenergy
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'FNOCC']:
func = run_fnodfcc
elif mtd_type == 'CD':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'FNOCC']:
func = run_fnodfcc
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module == 'CCT3' and extras.addons("cct3"):
import cct3
func = cct3.run_cct3
elif module in ['', 'CCENERGY']:
func = run_ccenergy
if func is None:
raise ManagedMethodError(['select_ccsd', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_ccsd_gradient(name, **kwargs):
"""Function selecting the algorithm for a CCSD gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/ccenergy
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy_gradient
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy_gradient
if func is None:
raise ManagedMethodError(['select_ccsd_gradient', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_fnoccsd_t_(name, **kwargs):
"""Function selecting the algorithm for a FNO-CCSD(T) energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module in ['', 'FNOCC']:
func = run_fnocc
elif mtd_type == 'DF':
if module in ['', 'FNOCC']:
func = run_fnodfcc
elif mtd_type == 'CD':
if module in ['', 'FNOCC']:
func = run_fnodfcc
if func is None:
raise ManagedMethodError(['select_fnoccsd_t_', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_ccsd_t_(name, **kwargs):
"""Function selecting the algorithm for a CCSD(T) energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/ccenergy/fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'FNOCC':
func = run_fnocc
elif module in ['', 'CCENERGY']:
func = run_ccenergy
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'FNOCC']:
func = run_fnodfcc
elif mtd_type == 'CD':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'FNOCC']:
func = run_fnodfcc
elif reference in ['UHF', 'ROHF']:
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy
if func is None:
raise ManagedMethodError(['select_ccsd_t_', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_ccsd_t__gradient(name, **kwargs):
"""Function selecting the algorithm for a CCSD(T) gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only ccenergy
func = None
if reference in ['RHF']:
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy_gradient
if func is None:
raise ManagedMethodError(['select_ccsd_t__gradient', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_ccsd_at_(name, **kwargs):
"""Function selecting the algorithm for a CCSD(AT) energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/ccenergy
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_ccsd_at_', name, 'CC_TYPE', mtd_type, reference, module])
if name.lower() == "a-ccsd(t)":
pass
elif name.lower() in ["ccsd(at)", "lambda-ccsd(t)"]:
core.print_out(f"""\nMethod "{name.lower()}" has been regularized to "a-ccsd(t)" for QCVariables.""")
name = "a-ccsd(t)"
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_cisd(name, **kwargs):
"""Function selecting the algorithm for a CISD energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CI_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only detci/fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module in ['', 'FNOCC']:
func = run_cepa
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module in ['', 'DETCI']:
func = run_detci
if func is None:
raise ManagedMethodError(['select_cisd', name, 'CI_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp4(name, **kwargs):
"""Function selecting the algorithm for a MP4 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only detci/fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module in ['', 'FNOCC']:
func = run_fnocc
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module == 'DETCI': # no default for this case
func = run_detci
elif module in ['']:
core.print_out("""\nThis method is available inefficiently as a """
"""byproduct of a CISDT computation.\n Add "set """
"""qc_module detci" to input to access this route.\n""")
if func is None:
raise ManagedMethodError(['select_mp4', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_adc2(name, **kwargs):
"""Function selecting the algorithm for ADC(2) excited state energy
call and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only adcc/adc
# TODO Actually one should do selection on a couple of other options here
# as well, e.g. adcc supports frozen-core and frozen-virtual,
# spin-specific states or spin-flip methods.
# But as far as I (mfherbst) know the BUILTIN ADC routine only supports
# singlet states and without freezing some core or some virtual orbitals.
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'ADCC' and extras.addons("adcc"):
func = run_adcc
elif module in ['', 'BUILTIN']:
func = run_adc
if reference == 'UHF':
if mtd_type == 'CONV':
if module in ['ADCC', ''] and extras.addons("adcc"):
func = run_adcc
# Note: ROHF is theoretically available in adcc, but are not fully tested
# ... so will be added later.
if func is None:
raise ManagedMethodError(['select_adc2', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def build_disp_functor(name, restricted, save_pairwise_disp=False, **kwargs):
if core.has_option_changed("SCF", "DFT_DISPERSION_PARAMETERS"):
modified_disp_params = core.get_option("SCF", "DFT_DISPERSION_PARAMETERS")
else:
modified_disp_params = None
# Figure out functional
superfunc, disp_type = dft.build_superfunctional(name, restricted)
if disp_type:
if isinstance(name, dict):
# user dft_functional={} spec - type for lookup, dict val for param defs,
# name & citation discarded so only param matches to existing defs will print labels
_disp_functor = empirical_dispersion.EmpiricalDispersion(name_hint='',
level_hint=disp_type["type"],
param_tweaks=disp_type["params"],
save_pairwise_disp=save_pairwise_disp,
engine=kwargs.get('engine', None))
else:
# dft/*functionals.py spec - name & type for lookup, option val for param tweaks
_disp_functor = empirical_dispersion.EmpiricalDispersion(name_hint=superfunc.name(),
level_hint=disp_type["type"],
param_tweaks=modified_disp_params,
save_pairwise_disp=save_pairwise_disp,
engine=kwargs.get('engine', None))
# [Aug 2018] there once was a breed of `disp_type` that quacked
# like a list rather than the more common dict handled above. if
# ever again sighted, make an issue so this code can accommodate.
_disp_functor.print_out()
return superfunc, _disp_functor
else:
return superfunc, None
def scf_wavefunction_factory(name, ref_wfn, reference, **kwargs):
"""Builds the correct (R/U/RO/CU HF/KS) wavefunction from the
provided information, sets relevant auxiliary basis sets on it,
and prepares any empirical dispersion.
"""
# Figure out functional and dispersion
superfunc, _disp_functor = build_disp_functor(name, restricted=(reference in ["RKS", "RHF"]), **kwargs)
# Build the wavefunction
core.prepare_options_for_module("SCF")
if reference in ["RHF", "RKS"]:
wfn = core.RHF(ref_wfn, superfunc)
elif reference == "ROHF":
wfn = core.ROHF(ref_wfn, superfunc)
elif reference in ["UHF", "UKS"]:
wfn = core.UHF(ref_wfn, superfunc)
elif reference == "CUHF":
wfn = core.CUHF(ref_wfn, superfunc)
else:
raise ValidationError("SCF: Unknown reference (%s) when building the Wavefunction." % reference)
if _disp_functor and _disp_functor.engine != 'nl':
wfn._disp_functor = _disp_functor
# Set the DF basis sets
if (("DF" in core.get_global_option("SCF_TYPE")) or
(core.get_option("SCF", "DF_SCF_GUESS") and (core.get_global_option("SCF_TYPE") == "DIRECT"))):
aux_basis = core.BasisSet.build(wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=wfn.basisset().has_puream())
wfn.set_basisset("DF_BASIS_SCF", aux_basis)
else:
wfn.set_basisset("DF_BASIS_SCF", core.BasisSet.zero_ao_basis_set())
# Set the relativistic basis sets
if core.get_global_option("RELATIVISTIC") in ["X2C", "DKH"]:
decon_basis = core.BasisSet.build(wfn.molecule(), "BASIS_RELATIVISTIC",
core.get_option("SCF", "BASIS_RELATIVISTIC"),
"DECON", core.get_global_option('BASIS'),
puream=wfn.basisset().has_puream())
wfn.set_basisset("BASIS_RELATIVISTIC", decon_basis)
# Set the multitude of SAD basis sets
if (core.get_option("SCF", "GUESS") in ["SAD", "SADNO", "HUCKEL"]):
sad_basis_list = core.BasisSet.build(wfn.molecule(), "ORBITAL",
core.get_global_option("BASIS"),
puream=wfn.basisset().has_puream(),
return_atomlist=True)
wfn.set_sad_basissets(sad_basis_list)
if ("DF" in core.get_option("SCF", "SAD_SCF_TYPE")):
# We need to force this to spherical regardless of any user or other demands.
optstash = p4util.OptionsState(['PUREAM'])
core.set_global_option('PUREAM', True)
sad_fitting_list = core.BasisSet.build(wfn.molecule(), "DF_BASIS_SAD",
core.get_option("SCF", "DF_BASIS_SAD"),
puream=True,
return_atomlist=True)
wfn.set_sad_fitting_basissets(sad_fitting_list)
optstash.restore()
if hasattr(core, "EXTERN") and 'external_potentials' in kwargs:
core.print_out("\n Warning! Both an external potential EXTERN object and the external_potential" +
" keyword argument are specified. The external_potentials keyword argument will be ignored.\n")
# If EXTERN is set, then place that potential on the wfn
if hasattr(core, "EXTERN"):
wfn.set_potential_variable("C", core.EXTERN) # This is for the FSAPT procedure
wfn.set_external_potential(core.EXTERN)
elif 'external_potentials' in kwargs:
# For FSAPT, we can take a dictionary of external potentials, e.g.,
# external_potentials={'A': potA, 'B': potB, 'C': potC} (any optional)
# For the dimer SAPT calculation, we need to account for the external potential
# in all of the subsystems A, B, C. So we add them all in total_external_potential
# and set the external potential to the dimer wave function
total_external_potential = core.ExternalPotential()
for frag in kwargs['external_potentials']:
if frag.upper() in "ABC":
wfn.set_potential_variable(frag.upper(), kwargs['external_potentials'][frag].extern)
total_external_potential.appendCharges(kwargs['external_potentials'][frag].extern.getCharges())
else:
core.print_out("\n Warning! Unknown key for the external_potentials argument: %s" %frag)
wfn.set_external_potential(total_external_potential)
return wfn
def scf_helper(name, post_scf=True, **kwargs):
"""Function serving as helper to SCF, choosing whether to cast
up or just run SCF with a standard guess. This preserves
previous SCF options set by other procedures (e.g., SAPT
output file types for SCF).
"""
if post_scf:
name = "scf"
optstash = p4util.OptionsState(
['PUREAM'],
['BASIS'],
['QMEFP'],
['INTS_TOLERANCE'],
['DF_BASIS_SCF'],
['SCF', 'GUESS'],
['SCF', 'DF_INTS_IO'],
['SCF', 'ORBITALS_WRITE'],
['SCF_TYPE'], # Hack: scope gets changed internally with the Andy trick
)
optstash2 = p4util.OptionsState(
['BASIS'],
['DF_BASIS_SCF'],
['SCF_TYPE'],
['SCF', 'DF_INTS_IO'],
)
# Make sure we grab the correctly scoped integral threshold for SCF
core.set_global_option('INTS_TOLERANCE', core.get_option('SCF', 'INTS_TOLERANCE'))
# Grab a few kwargs
use_c1 = kwargs.get('use_c1', False)
scf_molecule = kwargs.get('molecule', core.get_active_molecule())
read_orbitals = core.get_option('SCF', 'GUESS') == "READ"
do_timer = kwargs.pop("do_timer", True)
ref_wfn = kwargs.pop('ref_wfn', None)
if ref_wfn is not None:
raise ValidationError("Cannot seed an SCF calculation with a reference wavefunction ('ref_wfn' kwarg).")
# decide if we keep the checkpoint file
_chkfile = kwargs.get('write_orbitals', True)
write_checkpoint_file = False
if isinstance(_chkfile, str):
write_checkpoint_file = True
filename = kwargs.get('write_orbitals')
core.set_local_option("SCF", "ORBITALS_WRITE", filename)
elif _chkfile is True:
write_checkpoint_file = True
# PCM needs to be run w/o symmetry
if core.get_option("SCF", "PCM"):
c1_molecule = scf_molecule.clone()
c1_molecule.reset_point_group('c1')
c1_molecule.update_geometry()
scf_molecule = c1_molecule
core.print_out(""" PCM does not make use of molecular symmetry: """
"""further calculations in C1 point group.\n""")
# PE needs to use exactly input orientation to correspond to potfile
if core.get_option("SCF", "PE"):
c1_molecule = scf_molecule.clone()
if getattr(scf_molecule, "_initial_cartesian", None) is not None:
c1_molecule._initial_cartesian = scf_molecule._initial_cartesian.clone()
c1_molecule.set_geometry(c1_molecule._initial_cartesian)
c1_molecule.reset_point_group("c1")
c1_molecule.fix_orientation(True)
c1_molecule.fix_com(True)
c1_molecule.update_geometry()
else:
raise ValidationError("Set no_com/no_reorient/symmetry c1 by hand for PE on non-Cartesian molecules.")
scf_molecule = c1_molecule
core.print_out(""" PE does not make use of molecular symmetry: """
"""further calculations in C1 point group.\n""")
core.print_out(""" PE geometry must align with POTFILE keyword: """
"""resetting coordinates with fixed origin and orientation.\n""")
# SCF Banner data
banner = kwargs.pop('banner', None)
bannername = name
# Did we pass in a DFT functional?
dft_func = kwargs.pop('dft_functional', None)
if dft_func is not None:
if name.lower() != "scf":
raise ValidationError("dft_functional was supplied to SCF, but method name was not SCF ('%s')" % name)
name = dft_func
bannername = name
if isinstance(name, dict):
bannername = name.get("name", "custom functional")
# Setup the timer
if do_timer:
core.tstart()
# Second-order SCF requires non-symmetric density matrix support
if core.get_option('SCF', 'SOSCF'):
proc_util.check_non_symmetric_jk_density("Second-order SCF")
# sort out cast_up settings. no need to stash these since only read, never reset
cast = False
if core.has_option_changed('SCF', 'BASIS_GUESS'):
cast = core.get_option('SCF', 'BASIS_GUESS')
if p4util.yes.match(str(cast)):
cast = True
elif p4util.no.match(str(cast)):
cast = False
if cast:
# A user can set "BASIS_GUESS" to True and we default to 3-21G
if cast is True:
guessbasis = corresponding_basis(core.get_global_option('BASIS'), 'GUESS')[0]
if guessbasis is None:
guessbasis = '3-21G' # guess of last resort
else:
guessbasis = cast
core.set_global_option('BASIS', guessbasis)
castdf = 'DF' in core.get_global_option('SCF_TYPE')
if core.has_option_changed('SCF', 'DF_BASIS_GUESS'):
castdf = core.get_option('SCF', 'DF_BASIS_GUESS')
if p4util.yes.match(str(castdf)):
castdf = True
elif p4util.no.match(str(castdf)):
castdf = False
if castdf:
core.set_global_option('SCF_TYPE', 'DF')
core.set_local_option('SCF', 'DF_INTS_IO', 'none')
# Figure out the fitting basis set
if castdf is True:
core.set_global_option('DF_BASIS_SCF', '')
elif isinstance(castdf, str):
core.set_global_option('DF_BASIS_SCF', castdf)
else:
raise ValidationError("Unexpected castdf option (%s)." % castdf)
# Switch to the guess namespace
namespace = core.IO.get_default_namespace()
guesspace = namespace + '.guess'
if namespace == '':
guesspace = 'guess'
core.IO.set_default_namespace(guesspace)
# Print some info about the guess
core.print_out('\n')
p4util.banner('Guess SCF, %s Basis' % (guessbasis))
core.print_out('\n')
# sort out broken_symmetry settings.
if 'brokensymmetry' in kwargs:
multp = scf_molecule.multiplicity()
if multp != 1:
raise ValidationError('Broken symmetry is only for singlets.')
if core.get_option('SCF', 'REFERENCE') not in ['UHF', 'UKS']:
raise ValidationError("""You must specify 'set reference uhf' to use broken symmetry.""")
do_broken = True
else:
do_broken = False
if cast and read_orbitals:
raise ValidationError("""Detected options to both cast and read orbitals""")
if cast and do_broken:
raise ValidationError("""Detected options to both cast and perform a broken symmetry computation""")
if (core.get_option('SCF', 'STABILITY_ANALYSIS') == 'FOLLOW') and (core.get_option('SCF', 'REFERENCE') != 'UHF'):
raise ValidationError("""Stability analysis root following is only available for UHF""")
# broken set-up
if do_broken:
raise ValidationError("""Broken symmetry computations are not currently enabled.""")
scf_molecule.set_multiplicity(3)
core.print_out('\n')
p4util.banner(' Computing high-spin triplet guess ')
core.print_out('\n')
# If GUESS is auto guess what it should be
if core.get_option('SCF', 'GUESS') == "AUTO":
if (scf_molecule.natom() > 1):
core.set_local_option('SCF', 'GUESS', 'SAD')
else:
core.set_local_option('SCF', 'GUESS', 'CORE')
if core.get_global_option('BASIS') in ['', '(AUTO)']:
if name in ['hf3c', 'hf-3c']:
core.set_global_option('BASIS', 'minix')
elif name in ['pbeh3c', 'pbeh-3c']:
core.set_global_option('BASIS', 'def2-msvp')
# the FIRST scf call
if cast or do_broken:
# Cast or broken are special cases
base_wfn = core.Wavefunction.build(scf_molecule, core.get_global_option('BASIS'))
core.print_out("\n ---------------------------------------------------------\n");
if banner:
core.print_out(" " + banner.center(58));
if cast:
core.print_out(" " + "SCF Castup computation".center(58));
ref_wfn = scf_wavefunction_factory(name, base_wfn, core.get_option('SCF', 'REFERENCE'), **kwargs)
core.set_legacy_wavefunction(ref_wfn)
# Compute additive correction: dftd3, mp2d, dftd4, etc.
if hasattr(ref_wfn, "_disp_functor"):
disp_energy = ref_wfn._disp_functor.compute_energy(ref_wfn.molecule())
ref_wfn.set_variable("-D Energy", disp_energy)
ref_wfn.compute_energy()
# broken clean-up
if do_broken:
raise ValidationError("Broken Symmetry computations are temporarily disabled.")
scf_molecule.set_multiplicity(1)
core.set_local_option('SCF', 'GUESS', 'READ')
core.print_out('\n')
p4util.banner(' Computing broken symmetry solution from high-spin triplet guess ')
core.print_out('\n')
# cast clean-up
if cast:
# Move files to proper namespace
core.IO.change_file_namespace(180, guesspace, namespace)
core.IO.set_default_namespace(namespace)
optstash2.restore()
# Print the banner for the standard operation
core.print_out('\n')
p4util.banner(bannername.upper())
core.print_out('\n')
# the SECOND scf call
base_wfn = core.Wavefunction.build(scf_molecule, core.get_global_option('BASIS'))
if banner:
core.print_out("\n ---------------------------------------------------------\n");
core.print_out(" " + banner.center(58));
scf_wfn = scf_wavefunction_factory(name, base_wfn, core.get_option('SCF', 'REFERENCE'), **kwargs)
core.set_legacy_wavefunction(scf_wfn)
# The wfn from_file routine adds the npy suffix if needed, but we add it here so that
# we can use os.path.isfile to query whether the file exists before attempting to read
read_filename = scf_wfn.get_scratch_filename(180) + '.npy'
if ((core.get_option('SCF', 'GUESS') == 'READ') and os.path.isfile(read_filename)):
old_wfn = core.Wavefunction.from_file(read_filename)
Ca_occ = old_wfn.Ca_subset("SO", "OCC")
Cb_occ = old_wfn.Cb_subset("SO", "OCC")
if old_wfn.molecule().schoenflies_symbol() != scf_molecule.schoenflies_symbol():
raise ValidationError("Cannot compute projection of different symmetries.")
if old_wfn.basisset().name() == scf_wfn.basisset().name():
core.print_out(f" Reading orbitals from file {read_filename}, no projection.\n\n")
scf_wfn.guess_Ca(Ca_occ)
scf_wfn.guess_Cb(Cb_occ)
else:
core.print_out(f" Reading orbitals from file {read_filename}, projecting to new basis.\n\n")
core.print_out(" Computing basis projection from %s to %s\n\n" % (old_wfn.basisset().name(), scf_wfn.basisset().name()))
pCa = scf_wfn.basis_projection(Ca_occ, old_wfn.nalphapi(), old_wfn.basisset(), scf_wfn.basisset())
pCb = scf_wfn.basis_projection(Cb_occ, old_wfn.nbetapi(), old_wfn.basisset(), scf_wfn.basisset())
scf_wfn.guess_Ca(pCa)
scf_wfn.guess_Cb(pCb)
# Strip off headers to only get R, RO, U, CU
old_ref = old_wfn.name().replace("KS", "").replace("HF", "")
new_ref = scf_wfn.name().replace("KS", "").replace("HF", "")
if old_ref != new_ref:
scf_wfn.reset_occ_ = True
elif (core.get_option('SCF', 'GUESS') == 'READ') and not os.path.isfile(read_filename):
core.print_out(f"\n !!! Unable to find file {read_filename}, defaulting to SAD guess. !!!\n\n")
core.set_local_option('SCF', 'GUESS', 'SAD')
sad_basis_list = core.BasisSet.build(scf_wfn.molecule(), "ORBITAL",
core.get_global_option("BASIS"),
puream=scf_wfn.basisset().has_puream(),
return_atomlist=True)
scf_wfn.set_sad_basissets(sad_basis_list)
if ("DF" in core.get_option("SCF", "SAD_SCF_TYPE")):
sad_fitting_list = core.BasisSet.build(scf_wfn.molecule(), "DF_BASIS_SAD",
core.get_option("SCF", "DF_BASIS_SAD"),
puream=scf_wfn.basisset().has_puream(),
return_atomlist=True)
scf_wfn.set_sad_fitting_basissets(sad_fitting_list)
if cast:
core.print_out("\n Computing basis projection from %s to %s\n\n" % (ref_wfn.basisset().name(), base_wfn.basisset().name()))
if ref_wfn.basisset().n_ecp_core() != base_wfn.basisset().n_ecp_core():
raise ValidationError("Projecting from basis ({}) with ({}) ECP electrons to basis ({}) with ({}) ECP electrons will be a disaster. Select a compatible cast-up basis with `set guess_basis YOUR_BASIS_HERE`.".format(
ref_wfn.basisset().name(), ref_wfn.basisset().n_ecp_core(), base_wfn.basisset().name(), base_wfn.basisset().n_ecp_core()))
pCa = ref_wfn.basis_projection(ref_wfn.Ca(), ref_wfn.nalphapi(), ref_wfn.basisset(), scf_wfn.basisset())
pCb = ref_wfn.basis_projection(ref_wfn.Cb(), ref_wfn.nbetapi(), ref_wfn.basisset(), scf_wfn.basisset())
scf_wfn.guess_Ca(pCa)
scf_wfn.guess_Cb(pCb)
# Print basis set info
if core.get_option("SCF", "PRINT_BASIS"):
scf_wfn.basisset().print_detail_out()
# Compute additive correction: dftd3, mp2d, dftd4, etc.
if hasattr(scf_wfn, "_disp_functor"):
disp_energy = scf_wfn._disp_functor.compute_energy(scf_wfn.molecule(), scf_wfn)
scf_wfn.set_variable("-D Energy", disp_energy)
# PCM preparation
if core.get_option('SCF', 'PCM'):
if core.get_option('SCF', 'PE'):
raise ValidationError("""Error: 3-layer QM/MM/PCM not implemented.\n""")
pcmsolver_parsed_fname = core.get_local_option('PCM', 'PCMSOLVER_PARSED_FNAME')
pcm_print_level = core.get_option('SCF', "PRINT")
scf_wfn.set_PCM(core.PCM(pcmsolver_parsed_fname, pcm_print_level, scf_wfn.basisset()))
# PE preparation
if core.get_option('SCF', 'PE'):
if not solvent._have_pe:
raise ModuleNotFoundError('Python module cppe not found. Solve by installing it: `conda install -c psi4 pycppe`')
# PE needs information about molecule and basis set
pol_embed_options = solvent.pol_embed.get_pe_options()
core.print_out(f""" Using potential file
{pol_embed_options["potfile"]}
for Polarizable Embedding calculation.\n""")
scf_wfn.pe_state = solvent.pol_embed.CppeInterface(
molecule=scf_molecule, options=pol_embed_options,
basisset=scf_wfn.basisset()
)
e_scf = scf_wfn.compute_energy()
for obj in [core, scf_wfn]:
# set_variable("SCF TOTAL ENERGY") # P::e SCF
for pv in ["SCF TOTAL ENERGY", "CURRENT ENERGY", "CURRENT REFERENCE ENERGY"]:
obj.set_variable(pv, e_scf)
# We always would like to print a little property information
if kwargs.get('scf_do_properties', True):
oeprop = core.OEProp(scf_wfn)
oeprop.set_title("SCF")
# Figure our properties, if empty do dipole
props = [x.upper() for x in core.get_option("SCF", "SCF_PROPERTIES")]
if "DIPOLE" not in props:
props.append("DIPOLE")
proc_util.oeprop_validator(props)
for x in props:
oeprop.add(x)
# Compute properties
oeprop.compute()
for obj in [core, scf_wfn]:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# component qcvars can be retired at v1.5
for xyz in 'XYZ':
obj.set_variable('CURRENT DIPOLE ' + xyz, obj.variable('SCF DIPOLE ' + xyz))
obj.set_variable("CURRENT DIPOLE", obj.variable("SCF DIPOLE")) # P::e SCF
# Write out MO's
if core.get_option("SCF", "PRINT_MOS"):
mowriter = core.MOWriter(scf_wfn)
mowriter.write()
# Write out a molden file
if core.get_option("SCF", "MOLDEN_WRITE"):
filename = core.get_writer_file_prefix(scf_molecule.name()) + ".molden"
dovirt = bool(core.get_option("SCF", "MOLDEN_WITH_VIRTUAL"))
occa = scf_wfn.occupation_a()
occb = scf_wfn.occupation_a()
mw = core.MoldenWriter(scf_wfn)
mw.write(filename, scf_wfn.Ca(), scf_wfn.Cb(), scf_wfn.epsilon_a(),
scf_wfn.epsilon_b(), scf_wfn.occupation_a(),
scf_wfn.occupation_b(), dovirt)
# Write checkpoint file (orbitals and basis); Can be disabled, e.g., for findif displacements
if write_checkpoint_file and isinstance(_chkfile, str):
filename = kwargs['write_orbitals']
scf_wfn.to_file(filename)
# core.set_local_option("SCF", "ORBITALS_WRITE", filename)
elif write_checkpoint_file:
filename = scf_wfn.get_scratch_filename(180)
scf_wfn.to_file(filename)
extras.register_numpy_file(filename) # retain with -m (messy) option
if do_timer:
core.tstop()
optstash.restore()
if (not use_c1) or (scf_molecule.schoenflies_symbol() == 'c1'):
return scf_wfn
else:
# C1 copy quietly
c1_optstash = p4util.OptionsState(['PRINT'])
core.set_global_option("PRINT", 0)
# If we force c1 copy the active molecule
scf_molecule.update_geometry()
core.print_out("""\n A requested method does not make use of molecular symmetry: """
"""further calculations in C1 point group.\n\n""")
c1_molecule = scf_molecule.clone()
c1_molecule.reset_point_group('c1')
c1_molecule.fix_orientation(True)
c1_molecule.fix_com(True)
c1_molecule.update_geometry()
c1_basis = core.BasisSet.build(c1_molecule, "ORBITAL", core.get_global_option('BASIS'), quiet=True)
tmp = scf_wfn.c1_deep_copy(c1_basis)
c1_jkbasis = core.BasisSet.build(c1_molecule, "DF_BASIS_SCF",
core.get_global_option("DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'), quiet=True)
tmp.set_basisset("DF_BASIS_SCF", c1_jkbasis)
c1_optstash.restore()
return tmp
def run_dct(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density cumulant theory calculation.
"""
if (core.get_global_option('FREEZE_CORE') == 'TRUE'):
raise ValidationError('Frozen core is not available for DCT.')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
if (core.get_global_option("DCT_TYPE") == "DF"):
core.print_out(" Constructing Basis Sets for DCT...\n\n")
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_DCT",
core.get_global_option("DF_BASIS_DCT"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_DCT", aux_basis)
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
dct_wfn = core.dct(ref_wfn)
else:
# Ensure IWL files have been written for non DF-DCT
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
dct_wfn = core.dct(ref_wfn)
for k, v in dct_wfn.variables().items():
core.set_variable(k, v)
return dct_wfn
def run_dct_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
DCT gradient calculation.
"""
optstash = p4util.OptionsState(
['GLOBALS', 'DERTYPE'])
core.set_global_option('DERTYPE', 'FIRST')
dct_wfn = run_dct_property(name, **kwargs)
derivobj = core.Deriv(dct_wfn)
derivobj.set_tpdm_presorted(True)
if core.get_option('DCT', 'DCT_TYPE') == 'CONV':
grad = derivobj.compute()
else:
grad = derivobj.compute_df('DF_BASIS_SCF', 'DF_BASIS_DCT')
dct_wfn.set_gradient(grad)
optstash.restore()
return dct_wfn
def run_dct_property(name, **kwargs):
""" Function encoding sequence of PSI module calls for
DCT property calculation.
"""
optstash = p4util.OptionsState(
['DCT', 'OPDM'])
core.set_local_option('DCT', 'OPDM', 'true');
dct_wfn = run_dct(name, **kwargs)
# Run OEProp
oe = core.OEProp(dct_wfn)
oe.set_title("DCT")
for prop in kwargs.get("properties", []):
prop = prop.upper()
if prop in core.OEProp.valid_methods or "MULTIPOLE(" in prop:
oe.add(prop)
oe.compute()
dct_wfn.oeprop = oe
for k, v in dct_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return dct_wfn
def run_dfocc(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density-fitted or Cholesky-decomposed
(non-)orbital-optimized MPN or CC computation.
"""
optstash = p4util.OptionsState(
['SCF', 'DF_INTS_IO'],
['DFOCC', 'WFN_TYPE'],
['DFOCC', 'ORB_OPT'],
['DFOCC', 'DO_SCS'],
['DFOCC', 'DO_SOS'],
['DFOCC', 'READ_SCF_3INDEX'],
['DFOCC', 'CHOLESKY'],
['DFOCC', 'CC_LAMBDA'])
def set_cholesky_from(corl_type):
if corl_type == 'DF':
core.set_local_option('DFOCC', 'CHOLESKY', 'FALSE')
proc_util.check_disk_df(name.upper(), optstash)
elif corl_type == 'CD':
core.set_local_option('DFOCC', 'CHOLESKY', 'TRUE')
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
optstash.add_option(['SCF_TYPE'])
core.set_global_option('SCF_TYPE', 'CD')
core.print_out(""" SCF Algorithm Type (re)set to CD.\n""")
if core.get_global_option('SCF_TYPE') != 'CD':
core.set_local_option('DFOCC', 'READ_SCF_3INDEX', 'FALSE')
else:
raise ValidationError(f"""Invalid type '{corl_type}' for DFOCC""")
if name in ['mp2', 'omp2']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2')
corl_type = core.get_global_option('MP2_TYPE')
elif name in ['mp2.5']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2.5')
corl_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
elif name in ['omp2.5']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2.5')
corl_type = core.get_global_option('MP_TYPE')
elif name in ['mp3']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP3')
corl_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
elif name in ['omp3']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP3')
corl_type = core.get_global_option('MP_TYPE')
elif name in ['lccd', 'olccd']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OLCCD')
corl_type = core.get_global_option('CC_TYPE')
elif name == 'ccd':
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCD')
corl_type = core.get_global_option('CC_TYPE')
elif name == 'ccsd':
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCSD')
corl_type = core.get_global_option('CC_TYPE')
elif name == 'ccsd(t)':
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCSD(T)')
corl_type = core.get_global_option('CC_TYPE')
elif name == 'a-ccsd(t)':
core.set_local_option('DFOCC', 'CC_LAMBDA', 'TRUE')
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCSD(AT)')
corl_type = core.get_global_option('CC_TYPE')
elif name == 'dfocc':
pass
else:
raise ValidationError('Unidentified method %s' % (name))
set_cholesky_from(corl_type)
# conventional vs. optimized orbitals
if name in ['mp2', 'mp2.5', 'mp3', 'lccd',
'ccd', 'ccsd', 'ccsd(t)', 'a-ccsd(t)']:
core.set_local_option('DFOCC', 'ORB_OPT', 'FALSE')
elif name in ['omp2', 'omp2.5', 'omp3', 'olccd']:
core.set_local_option('DFOCC', 'ORB_OPT', 'TRUE')
core.set_local_option('DFOCC', 'DO_SCS', 'FALSE')
core.set_local_option('DFOCC', 'DO_SOS', 'FALSE')
core.set_local_option('SCF', 'DF_INTS_IO', 'SAVE')
if name in ["mp2.5", "mp3"] and not core.has_global_option_changed("MP_TYPE"):
core.print_out(f" Information: {name.upper()} default algorithm changed to DF in August 2020. Use `set mp_type conv` for previous behavior.\n")
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
else:
if ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" DFOCC does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
if not core.get_local_option("DFOCC", "CHOLESKY"):
core.print_out(" Constructing Basis Sets for DFOCC...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_CC",
core.get_global_option("DF_BASIS_CC"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_CC", aux_basis)
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
dfocc_wfn = core.dfocc(ref_wfn)
# Shove variables into global space
if name in ['mp2', 'omp2', 'mp2.5', 'mp3', 'lccd',]:
for k, v in dfocc_wfn.variables().items():
core.set_variable(k, v)
if name == "a-ccsd(t)":
# temporary until dfocc can be edited and qcvar name changed
core.set_variable("A-CCSD(T) TOTAL ENERGY", core.variables()["CCSD(AT) TOTAL ENERGY"])
core.set_variable("A-(T) CORRECTION ENERGY", core.variables()["(AT) CORRECTION ENERGY"])
core.del_variable("CCSD(AT) TOTAL ENERGY")
core.del_variable("(AT) CORRECTION ENERGY")
optstash.restore()
return dfocc_wfn
def run_dfocc_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density-fitted (non-)orbital-optimized MPN or CC computation.
"""
optstash = p4util.OptionsState(
['SCF', 'DF_INTS_IO'],
['REFERENCE'],
['DFOCC', 'WFN_TYPE'],
['DFOCC', 'ORB_OPT'],
['DFOCC', 'CC_LAMBDA'],
['GLOBALS', 'DERTYPE'])
proc_util.check_disk_df(name.upper(), optstash)
if core.get_global_option('SCF_TYPE') != 'DISK_DF':
raise ValidationError('DFOCC gradients need DF-SCF reference.')
if name in ['mp2', 'omp2']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2')
corl_type = core.get_global_option('MP2_TYPE')
elif name in ['mp2.5']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2.5')
corl_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
elif name in ["omp2.5"]:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2.5')
corl_type = core.get_global_option('MP_TYPE')
elif name in ['mp3']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP3')
corl_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
elif name in ['omp3']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP3')
corl_type = core.get_global_option('MP_TYPE')
elif name in ['lccd', 'olccd']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OLCCD')
corl_type = core.get_global_option('CC_TYPE')
elif name in ['ccd']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCD')
core.set_local_option('DFOCC', 'CC_LAMBDA', 'TRUE')
corl_type = core.get_global_option('CC_TYPE')
elif name in ['ccsd']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCSD')
core.set_local_option('DFOCC', 'CC_LAMBDA', 'TRUE')
corl_type = core.get_global_option('CC_TYPE')
elif name in ['ccsd(t)']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCSD(T)')
core.set_local_option('DFOCC', 'CC_LAMBDA', 'TRUE')
corl_type = core.get_global_option('CC_TYPE')
else:
raise ValidationError('Unidentified method %s' % (name))
if name in ['mp2', 'mp2.5', 'mp3', 'lccd', 'ccd', 'ccsd', 'ccsd(t)']:
core.set_local_option('DFOCC', 'ORB_OPT', 'FALSE')
elif name in ['omp2', 'omp2.5', 'omp3', 'olccd']:
core.set_local_option('DFOCC', 'ORB_OPT', 'TRUE')
if corl_type not in ["DF", "CD"]:
raise ValidationError(f"""Invalid type '{corl_type}' for DFOCC""")
core.set_global_option('DERTYPE', 'FIRST')
core.set_local_option('DFOCC', 'DO_SCS', 'FALSE')
core.set_local_option('DFOCC', 'DO_SOS', 'FALSE')
core.set_local_option('SCF', 'DF_INTS_IO', 'SAVE')
if name in ["mp2.5", "mp3"] and not core.has_global_option_changed("MP_TYPE"):
core.print_out(f" Information: {name.upper()} default algorithm changed to DF in August 2020. Use `set mp_type conv` for previous behavior.\n")
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
else:
if ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" DFOCC does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
core.print_out(" Constructing Basis Sets for DFOCC...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_CC",
core.get_global_option("DF_BASIS_CC"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_CC", aux_basis)
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
dfocc_wfn = core.dfocc(ref_wfn)
derivobj = core.Deriv(dfocc_wfn)
derivobj.compute_df("DF_BASIS_SCF", "DF_BASIS_CC")
dfocc_wfn.set_variable(f"{name.upper()} TOTAL GRADIENT", dfocc_wfn.gradient())
# Shove variables into global space
if name in ['mp2', 'mp2.5', 'mp3', 'lccd', 'ccsd', 'omp2']:
for k, v in dfocc_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return dfocc_wfn
def run_dfocc_property(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density-fitted (non-)orbital-optimized MPN or CC computation.
"""
optstash = p4util.OptionsState(
['SCF', 'DF_INTS_IO'],
['DFOCC', 'WFN_TYPE'],
['DFOCC', 'ORB_OPT'],
['DFOCC', 'OEPROP'])
if name in ['mp2', 'omp2']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2')
elif name in ['omp3']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP3')
elif name in ['omp2.5']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2.5')
elif name in ['olccd']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OLCCD')
else:
raise ValidationError('Unidentified method ' % (name))
proc_util.check_disk_df(name.upper(), optstash)
if name in ['mp2']:
core.set_local_option('DFOCC', 'ORB_OPT', 'FALSE')
elif name in ['omp2', 'omp3', 'omp2.5', 'olccd']:
core.set_local_option('DFOCC', 'ORB_OPT', 'TRUE')
core.set_local_option('DFOCC', 'OEPROP', 'TRUE')
core.set_local_option('DFOCC', 'DO_SCS', 'FALSE')
core.set_local_option('DFOCC', 'DO_SOS', 'FALSE')
core.set_local_option('SCF', 'DF_INTS_IO', 'SAVE')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
else:
if ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" DFOCC does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
core.print_out(" Constructing Basis Sets for DFOCC...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_CC",
core.get_global_option("DF_BASIS_CC"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_CC", aux_basis)
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
dfocc_wfn = core.dfocc(ref_wfn)
# Shove variables into global space
# TODO: Make other methods in DFOCC update all variables, then add them to the list. Adding now, risks setting outdated information.
if name in ['mp2', 'omp2']:
for k, v in dfocc_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return dfocc_wfn
def run_qchf(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an density-fitted orbital-optimized MP2 computation
"""
optstash = p4util.OptionsState(
['SCF', 'DF_INTS_IO'],
['DF_BASIS_SCF'],
['DIE_IF_NOT_CONVERGED'],
['MAXITER'],
['DFOCC', 'ORB_OPT'],
['DFOCC', 'WFN_TYPE'],
['DFOCC', 'QCHF'],
['DFOCC', 'E_CONVERGENCE'])
core.set_local_option('DFOCC', 'ORB_OPT', 'TRUE')
core.set_local_option('DFOCC', 'WFN_TYPE', 'QCHF')
core.set_local_option('DFOCC', 'QCHF', 'TRUE')
core.set_local_option('DFOCC', 'E_CONVERGENCE', 8)
core.set_local_option('SCF', 'DF_INTS_IO', 'SAVE')
core.set_local_option('SCF', 'DIE_IF_NOT_CONVERGED', 'FALSE')
core.set_local_option('SCF', 'MAXITER', 1)
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
else:
if ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" QCHF does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
dfocc_wfn = core.dfocc(ref_wfn)
return dfocc_wfn
def run_occ(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a conventional integral (O)MPN computation
"""
# Stash these options so we can reload them at computation end.
optstash = p4util.OptionsState(
['OCC', 'SPIN_SCALE_TYPE'],
['OCC', 'ORB_OPT'],
['OCC', 'WFN_TYPE'])
if name == 'mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'scs-mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SCS')
elif name == 'scs(n)-mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SCSN')
elif name == 'scs-mp2-vdw':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SCSVDW')
elif name == 'sos-mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SOS')
elif name == 'sos-pi-mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SOSPI')
elif name == 'custom-scs-mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
elif name == 'omp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'scs-omp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SCS')
elif name == 'sos-omp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SOS')
elif name == 'custom-scs-omp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
elif name == 'mp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'custom-scs-mp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
elif name == 'omp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'custom-scs-omp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
elif name == 'mp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'scs-mp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SCS')
elif name == 'custom-scs-mp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
elif name == 'omp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'scs-omp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SCS')
elif name == 'sos-omp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SOS')
elif name == 'custom-scs-omp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
elif name == 'lccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'custom-scs-lccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
elif name == 'olccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'custom-scs-olccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
else:
raise ValidationError("""Invalid method %s""" % name)
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
occ_wfn = core.occ(ref_wfn)
# Shove variables into global space
keep_custom_spin_scaling = core.has_option_changed("OCC", "SS_SCALE") or core.has_option_changed("OCC", "OS_SCALE")
for k, v in occ_wfn.variables().items():
# Custom spin component scaling variables are meaningless if custom scalings hasn't been set. Delete them.
if k.startswith("CUSTOM SCS") and not keep_custom_spin_scaling:
occ_wfn.del_variable(k)
else:
core.set_variable(k, v)
optstash.restore()
return occ_wfn
def run_occ_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a conventional integral (O)MPN computation
"""
optstash = p4util.OptionsState(
['OCC', 'ORB_OPT'],
['OCC', 'WFN_TYPE'],
['OCC', 'DO_SCS'],
['OCC', 'DO_SOS'],
['GLOBALS', 'DERTYPE'])
if core.get_global_option('SCF_TYPE') in ['CD', 'DF', 'MEM_DF', 'DISK_DF']:
raise ValidationError('OCC gradients need conventional SCF reference.')
if name == 'mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
elif name in ['omp2', 'conv-omp2']:
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
elif name == 'mp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
elif name == 'omp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
elif name == 'mp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
elif name == 'omp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
elif name == 'lccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
elif name == 'olccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
else:
raise ValidationError("""Invalid method %s""" % name)
core.set_global_option('DERTYPE', 'FIRST')
# locking out SCS through explicit keyword setting
# * so that current energy must match call
# * since grads not avail for scs
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
occ_wfn = core.occ(ref_wfn)
derivobj = core.Deriv(occ_wfn)
grad = derivobj.compute()
occ_wfn.set_gradient(grad)
occ_wfn.set_variable(f"{name.upper()} TOTAL GRADIENT", grad)
# Shove variables into global space
keep_custom_spin_scaling = core.has_option_changed("OCC", "SS_SCALE") or core.has_option_changed("OCC", "OS_SCALE")
for k, v in occ_wfn.variables().items():
# Custom spin component scaling variables are meaningless if custom scalings hasn't been set. Delete them.
if k.startswith("CUSTOM SCS") and not keep_custom_spin_scaling:
occ_wfn.del_variable(k)
else:
core.set_variable(k, v)
optstash.restore()
return occ_wfn
def run_scf(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a self-consistent-field theory (HF & DFT) calculation.
"""
optstash_mp2 = p4util.OptionsState(
['DF_BASIS_MP2'],
['DFMP2', 'MP2_OS_SCALE'],
['DFMP2', 'MP2_SS_SCALE'])
dft_func = False
if "dft_functional" in kwargs:
dft_func = True
optstash_scf = proc_util.scf_set_reference_local(name, is_dft=dft_func)
# See if we're doing TDSCF after, keep JK if so
if sum(core.get_option("SCF", "TDSCF_STATES")) > 0:
core.set_local_option("SCF", "SAVE_JK", True)
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
scf_wfn = scf_helper(name, post_scf=False, **kwargs)
returnvalue = scf_wfn.energy()
ssuper = scf_wfn.functional()
if ssuper.is_c_hybrid():
core.tstart()
aux_basis = core.BasisSet.build(scf_wfn.molecule(), "DF_BASIS_MP2",
core.get_option("DFMP2", "DF_BASIS_MP2"),
"RIFIT", core.get_global_option('BASIS'),
puream=-1)
scf_wfn.set_basisset("DF_BASIS_MP2", aux_basis)
if ssuper.is_c_scs_hybrid():
core.set_local_option('DFMP2', 'MP2_OS_SCALE', ssuper.c_os_alpha())
core.set_local_option('DFMP2', 'MP2_SS_SCALE', ssuper.c_ss_alpha())
dfmp2_wfn = core.dfmp2(scf_wfn)
dfmp2_wfn.compute_energy()
vdh = dfmp2_wfn.variable('CUSTOM SCS-MP2 CORRELATION ENERGY')
else:
dfmp2_wfn = core.dfmp2(scf_wfn)
dfmp2_wfn.compute_energy()
vdh = ssuper.c_alpha() * dfmp2_wfn.variable('MP2 CORRELATION ENERGY')
# remove misleading MP2 psivars computed with DFT, not HF, reference
for var in dfmp2_wfn.variables():
if var.startswith('MP2 ') and ssuper.name() not in ['MP2D']:
scf_wfn.del_variable(var)
scf_wfn.set_variable("DOUBLE-HYBRID CORRECTION ENERGY", vdh) # P::e SCF
scf_wfn.set_variable("{} DOUBLE-HYBRID CORRECTION ENERGY".format(ssuper.name()), vdh)
returnvalue += vdh
scf_wfn.set_variable("DFT TOTAL ENERGY", returnvalue) # P::e SCF
for pv, pvv in scf_wfn.variables().items():
if pv.endswith('DISPERSION CORRECTION ENERGY') and pv.startswith(ssuper.name()):
fctl_plus_disp_name = pv.split()[0]
scf_wfn.set_variable(fctl_plus_disp_name + ' TOTAL ENERGY', returnvalue)
break
else:
scf_wfn.set_variable('{} TOTAL ENERGY'.format(ssuper.name()), returnvalue)
scf_wfn.set_variable('CURRENT ENERGY', returnvalue)
scf_wfn.set_energy(returnvalue)
core.print_out('\n\n')
core.print_out(' %s Energy Summary\n' % (name.upper()))
core.print_out(' ' + '-' * (15 + len(name)) + '\n')
core.print_out(' DFT Reference Energy = %22.16lf\n' % (returnvalue - vdh))
core.print_out(' Scaled MP2 Correlation = %22.16lf\n' % (vdh))
core.print_out(' @Final double-hybrid DFT total energy = %22.16lf\n\n' % (returnvalue))
core.tstop()
if ssuper.name() == 'MP2D':
for pv, pvv in dfmp2_wfn.variables().items():
scf_wfn.set_variable(pv, pvv)
# Conversely, remove DFT qcvars from MP2D
for var in scf_wfn.variables():
if 'DFT ' in var or 'DOUBLE-HYBRID ' in var:
scf_wfn.del_variable(var)
# DFT groups dispersion with SCF. Reshuffle so dispersion with MP2 for MP2D.
for pv in ['SCF TOTAL ENERGY', 'SCF ITERATION ENERGY', 'MP2 TOTAL ENERGY']:
scf_wfn.set_variable(pv, scf_wfn.variable(pv) - scf_wfn.variable('DISPERSION CORRECTION ENERGY'))
scf_wfn.set_variable('MP2D CORRELATION ENERGY', scf_wfn.variable('MP2 CORRELATION ENERGY') + scf_wfn.variable('DISPERSION CORRECTION ENERGY'))
scf_wfn.set_variable('MP2D TOTAL ENERGY', scf_wfn.variable('MP2D CORRELATION ENERGY') + scf_wfn.variable('HF TOTAL ENERGY'))
scf_wfn.set_variable('CURRENT ENERGY', scf_wfn.variable('MP2D TOTAL ENERGY'))
scf_wfn.set_variable('CURRENT CORRELATION ENERGY', scf_wfn.variable('MP2D CORRELATION ENERGY'))
scf_wfn.set_variable('CURRENT REFERENCE ENERGY', scf_wfn.variable('SCF TOTAL ENERGY'))
# Shove variables into global space
for k, v in scf_wfn.variables().items():
core.set_variable(k, v)
optstash_scf.restore()
optstash_mp2.restore()
return scf_wfn
def run_scf_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a SCF gradient calculation.
"""
dft_func = False
if "dft_functional" in kwargs:
dft_func = True
optstash = proc_util.scf_set_reference_local(name, is_dft=dft_func)
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = run_scf(name, **kwargs)
if core.get_option('SCF', 'REFERENCE') in ['ROHF', 'CUHF']:
ref_wfn.semicanonicalize()
if hasattr(ref_wfn, "_disp_functor"):
disp_grad = ref_wfn._disp_functor.compute_gradient(ref_wfn.molecule(), ref_wfn)
ref_wfn.set_variable("-D Gradient", disp_grad)
grad = core.scfgrad(ref_wfn)
if ref_wfn.basisset().has_ECP():
core.print_out("\n\n ==> Adding ECP gradient terms (computed numerically) <==\n")
# Build a map of atom->ECP number
old_print = ref_wfn.get_print()
ref_wfn.set_print(0)
delta = 0.0001
natom = ref_wfn.molecule().natom()
mints = core.MintsHelper(ref_wfn)
ecpgradmat = core.Matrix("ECP Gradient", natom, 3)
ecpgradmat.zero()
ecpgrad = np.asarray(ecpgradmat)
Dmat = ref_wfn.Da_subset("AO")
Dmat.add(ref_wfn.Db_subset("AO"))
def displaced_energy(atom, displacement):
mints.basisset().move_atom(atom, displacement)
E = Dmat.vector_dot(mints.ao_ecp())
mints.basisset().move_atom(atom, -1*displacement)
return E
for atom in range(natom):
for xyz in range(3):
transvec = core.Vector3(0.0)
transvec[xyz] += delta
# +1 displacement
Ep1 = displaced_energy(atom, 1*transvec)
# -1 displacement
Em1 = displaced_energy(atom, -1*transvec)
# +2 displacement
Ep2 = displaced_energy(atom, 2*transvec)
# -2 displacement
Em2 = displaced_energy(atom, -2*transvec)
# Evaluate
ecpgrad[atom, xyz] = (Em2 + 8*Ep1 - 8*Em1 - Ep2) / (12*delta)
ecpgradmat.symmetrize_gradient(ref_wfn.molecule())
ecpgradmat.print_atom_vector()
grad.add(ecpgradmat)
grad.print_atom_vector()
ref_wfn.set_print(old_print)
ref_wfn.set_gradient(grad)
ref_wfn.set_variable("SCF TOTAL GRADIENT", grad) # P::e SCF
if ref_wfn.functional().needs_xc():
ref_wfn.set_variable("DFT TOTAL GRADIENT", grad) # overwritten later for DH -- TODO when DH gradients # P::e SCF
else:
ref_wfn.set_variable("HF TOTAL GRADIENT", grad) # P::e SCF
# Shove variables into global space
for k, v in ref_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return ref_wfn
def run_scf_hessian(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an SCF hessian calculation.
"""
optstash = proc_util.scf_set_reference_local(name)
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = run_scf(name, **kwargs)
badref = core.get_option('SCF', 'REFERENCE') in ['ROHF', 'CUHF', 'UKS']
badint = core.get_global_option('SCF_TYPE') in [ 'CD', 'OUT_OF_CORE']
if badref or badint:
raise ValidationError("Only RHF/UHF Hessians are currently implemented. SCF_TYPE either CD or OUT_OF_CORE not supported")
if hasattr(ref_wfn, "_disp_functor"):
disp_hess = ref_wfn._disp_functor.compute_hessian(ref_wfn.molecule(), ref_wfn)
ref_wfn.set_variable("-D Hessian", disp_hess)
H = core.scfhess(ref_wfn)
ref_wfn.set_hessian(H)
# Clearly, add some logic when the reach of this fn expands
ref_wfn.set_variable("HF TOTAL HESSIAN", H) # P::e SCF
ref_wfn.set_variable("SCF TOTAL HESSIAN", H) # P::e SCF
core.set_variable("SCF TOTAL HESSIAN", H) # P::e SCF
# Shove variables into global space
for k, v in ref_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return ref_wfn
def run_mcscf(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a multiconfigurational self-consistent-field calculation.
"""
# Make sure the molecule the user provided is the active one
mcscf_molecule = kwargs.get('molecule', core.get_active_molecule())
mcscf_molecule.update_geometry()
if 'ref_wfn' in kwargs:
raise ValidationError("It is not possible to pass run_mcscf a reference wavefunction")
new_wfn = core.Wavefunction.build(mcscf_molecule, core.get_global_option('BASIS'))
return core.mcscf(new_wfn)
def run_dfmp2_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a DFMP2 gradient calculation.
"""
optstash = p4util.OptionsState(
['DF_BASIS_SCF'],
['DF_BASIS_MP2'],
['SCF_TYPE'])
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
core.print_out(""" SCF Algorithm Type (re)set to DF.\n""")
if "DF" not in core.get_global_option('SCF_TYPE'):
raise ValidationError('DF-MP2 gradients need DF-SCF reference.')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
if ref_wfn.basisset().has_ECP():
raise ValidationError('DF-MP2 gradients with an ECP are not yet available. Use dertype=0 to select numerical gradients.')
core.tstart()
core.print_out('\n')
p4util.banner('DFMP2')
core.print_out('\n')
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_MP2",
core.get_option("DFMP2", "DF_BASIS_MP2"),
"RIFIT", core.get_global_option('BASIS'))
ref_wfn.set_basisset("DF_BASIS_MP2", aux_basis)
dfmp2_wfn = core.dfmp2(ref_wfn)
grad = dfmp2_wfn.compute_gradient()
dfmp2_wfn.set_gradient(grad)
# Shove variables into global space
dfmp2_wfn.set_variable("MP2 TOTAL GRADIENT", grad) # P::e DFMP2
dfmp2_wfn.set_variable('CURRENT ENERGY', dfmp2_wfn.variable('MP2 TOTAL ENERGY'))
dfmp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dfmp2_wfn.variable('MP2 CORRELATION ENERGY'))
for k, v in dfmp2_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
core.tstop()
return dfmp2_wfn
def run_dfmp2d_gradient(name, **kwargs):
"""Encode MP2-D method."""
dfmp2_wfn = run_dfmp2_gradient('mp2', **kwargs)
wfn_grad = dfmp2_wfn.gradient().clone()
_, _disp_functor = build_disp_functor('MP2D', restricted=True)
disp_grad = _disp_functor.compute_gradient(dfmp2_wfn.molecule(), dfmp2_wfn)
wfn_grad.add(disp_grad)
dfmp2_wfn.set_gradient(wfn_grad)
dfmp2_wfn.set_variable('MP2D CORRELATION ENERGY', dfmp2_wfn.variable('MP2 CORRELATION ENERGY') + dfmp2_wfn.variable('DISPERSION CORRECTION ENERGY'))
dfmp2_wfn.set_variable('MP2D TOTAL ENERGY', dfmp2_wfn.variable('MP2D CORRELATION ENERGY') + dfmp2_wfn.variable('HF TOTAL ENERGY'))
dfmp2_wfn.set_variable('CURRENT ENERGY', dfmp2_wfn.variable('MP2D TOTAL ENERGY'))
dfmp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dfmp2_wfn.variable('MP2D CORRELATION ENERGY'))
# Shove variables into global space
for k, v in dfmp2_wfn.variables().items():
core.set_variable(k, v)
return dfmp2_wfn
def run_ccenergy(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a CCSD, CC2, and CC3 calculation.
"""
optstash = p4util.OptionsState(
['TRANSQT2', 'WFN'],
['CCSORT', 'WFN'],
['CCENERGY', 'WFN'])
if name == 'ccsd':
core.set_local_option('TRANSQT2', 'WFN', 'CCSD')
core.set_local_option('CCSORT', 'WFN', 'CCSD')
core.set_local_option('CCTRANSORT', 'WFN', 'CCSD')
core.set_local_option('CCENERGY', 'WFN', 'CCSD')
elif name == 'ccsd(t)':
core.set_local_option('TRANSQT2', 'WFN', 'CCSD_T')
core.set_local_option('CCSORT', 'WFN', 'CCSD_T')
core.set_local_option('CCTRANSORT', 'WFN', 'CCSD_T')
core.set_local_option('CCENERGY', 'WFN', 'CCSD_T')
elif name == 'a-ccsd(t)':
core.set_local_option('TRANSQT2', 'WFN', 'CCSD_AT')
core.set_local_option('CCSORT', 'WFN', 'CCSD_AT')
core.set_local_option('CCTRANSORT', 'WFN', 'CCSD_AT')
core.set_local_option('CCENERGY', 'WFN', 'CCSD_AT')
core.set_local_option('CCHBAR', 'WFN', 'CCSD_AT')
core.set_local_option('CCLAMBDA', 'WFN', 'CCSD_AT')
elif name == 'cc2':
core.set_local_option('TRANSQT2', 'WFN', 'CC2')
core.set_local_option('CCSORT', 'WFN', 'CC2')
core.set_local_option('CCTRANSORT', 'WFN', 'CC2')
core.set_local_option('CCENERGY', 'WFN', 'CC2')
elif name == 'cc3':
core.set_local_option('TRANSQT2', 'WFN', 'CC3')
core.set_local_option('CCSORT', 'WFN', 'CC3')
core.set_local_option('CCTRANSORT', 'WFN', 'CC3')
core.set_local_option('CCENERGY', 'WFN', 'CC3')
elif name == 'eom-cc2':
core.set_local_option('TRANSQT2', 'WFN', 'EOM_CC2')
core.set_local_option('CCSORT', 'WFN', 'EOM_CC2')
core.set_local_option('CCTRANSORT', 'WFN', 'EOM_CC2')
core.set_local_option('CCENERGY', 'WFN', 'EOM_CC2')
elif name == 'eom-ccsd':
core.set_local_option('TRANSQT2', 'WFN', 'EOM_CCSD')
core.set_local_option('CCSORT', 'WFN', 'EOM_CCSD')
core.set_local_option('CCTRANSORT', 'WFN', 'EOM_CCSD')
core.set_local_option('CCENERGY', 'WFN', 'EOM_CCSD')
# Call a plain energy('ccenergy') and have full control over options, incl. wfn
elif name == 'ccenergy':
pass
# Bypass routine scf if user did something special to get it to converge
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
if core.get_global_option("CC_TYPE") == "DF":
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_CC",
core.get_global_option("DF_BASIS_CC"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_CC", aux_basis)
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
# Obtain semicanonical orbitals
if (core.get_option('SCF', 'REFERENCE') == 'ROHF') and \
((name in ['ccsd(t)', 'a-ccsd(t)', 'cc2', 'cc3', 'eom-cc2', 'eom-cc3']) or
core.get_option('CCTRANSORT', 'SEMICANONICAL')):
ref_wfn.semicanonicalize()
if core.get_global_option('RUN_CCTRANSORT'):
core.cctransort(ref_wfn)
else:
try:
from psi4.driver.pasture import addins
addins.ccsort_transqt2(ref_wfn)
except:
raise PastureRequiredError("RUN_CCTRANSORT")
ccwfn = core.ccenergy(ref_wfn)
if core.get_global_option('PE'):
ccwfn.pe_state = ref_wfn.pe_state
if name == 'a-ccsd(t)':
core.cchbar(ref_wfn)
lambdawfn = core.cclambda(ref_wfn)
for k, v in lambdawfn.variables().items():
ccwfn.set_variable(k, v)
optstash.restore()
return ccwfn
def run_ccenergy_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a CCSD and CCSD(T) gradient calculation.
"""
optstash = p4util.OptionsState(
['GLOBALS', 'DERTYPE'],
['CCLAMBDA', 'WFN'],
['CCDENSITY', 'WFN'])
core.set_global_option('DERTYPE', 'FIRST')
if core.get_global_option('FREEZE_CORE') not in ["FALSE", "0"]:
raise ValidationError('Frozen core is not available for the CC gradients.')
ccwfn = run_ccenergy(name, **kwargs)
if name == 'cc2':
core.set_local_option('CCHBAR', 'WFN', 'CC2')
core.set_local_option('CCLAMBDA', 'WFN', 'CC2')
core.set_local_option('CCDENSITY', 'WFN', 'CC2')
if name == 'ccsd':
core.set_local_option('CCLAMBDA', 'WFN', 'CCSD')
core.set_local_option('CCDENSITY', 'WFN', 'CCSD')
elif name == 'ccsd(t)':
core.set_local_option('CCLAMBDA', 'WFN', 'CCSD_T')
core.set_local_option('CCDENSITY', 'WFN', 'CCSD_T')
core.cchbar(ccwfn)
core.cclambda(ccwfn)
core.ccdensity(ccwfn)
derivobj = core.Deriv(ccwfn)
grad = derivobj.compute()
del derivobj
ccwfn.set_gradient(grad)
ccwfn.set_variable(f"{name.upper()} TOTAL GRADIENT", grad)
core.set_variable(f"{name.upper()} TOTAL GRADIENT", grad)
core.set_variable("CURRENT GRADIENT", grad)
optstash.restore()
return ccwfn
def run_bccd(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a Brueckner CCD calculation.
"""
optstash = p4util.OptionsState(
['TRANSQT2', 'WFN'],
['CCSORT', 'WFN'],
['CCENERGY', 'WFN'])
if name == 'bccd':
core.set_local_option('TRANSQT2', 'WFN', 'BCCD')
core.set_local_option('CCSORT', 'WFN', 'BCCD')
core.set_local_option('CCTRANSORT', 'WFN', 'BCCD')
core.set_local_option('CCENERGY', 'WFN', 'BCCD')
elif name == 'bccd(t)':
core.set_local_option('TRANSQT2', 'WFN', 'BCCD_T')
core.set_local_option('CCSORT', 'WFN', 'BCCD_T')
core.set_local_option('CCENERGY', 'WFN', 'BCCD_T')
core.set_local_option('CCTRANSORT', 'WFN', 'BCCD_T')
core.set_local_option('CCTRIPLES', 'WFN', 'BCCD_T')
else:
raise ValidationError("proc.py:run_bccd name %s not recognized" % name)
# Bypass routine scf if user did something special to get it to converge
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
# Needed for (T).
if (core.get_option('SCF', 'REFERENCE') == 'ROHF'):
ref_wfn.semicanonicalize()
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
core.set_local_option('CCTRANSORT', 'DELETE_TEI', 'false')
bcc_iter_cnt = 0
if (core.get_global_option("RUN_CCTRANSORT")):
sort_func = core.cctransort
else:
try:
from psi4.driver.pasture import addins
core.set_local_option('TRANSQT2', 'DELETE_TEI', 'false')
sort_func = addins.ccsort_transqt2
except:
raise PastureRequiredError("RUN_CCTRANSORT")
while True:
sort_func(ref_wfn)
ref_wfn = core.ccenergy(ref_wfn)
core.print_out('Brueckner convergence check: %s\n' % bool(core.variable('BRUECKNER CONVERGED')))
if (core.variable('BRUECKNER CONVERGED') == True):
break
if bcc_iter_cnt >= core.get_option('CCENERGY', 'BCCD_MAXITER'):
core.print_out("\n\nWarning! BCCD did not converge within the maximum number of iterations.")
core.print_out("You can increase the number of BCCD iterations by changing BCCD_MAXITER.\n\n")
break
bcc_iter_cnt += 1
if name == 'bccd(t)':
core.cctriples(ref_wfn)
optstash.restore()
return ref_wfn
def run_tdscf_excitations(wfn,**kwargs):
states = core.get_option("SCF","TDSCF_STATES")
# some sanity checks
if sum(states) == 0:
raise ValidationError("TDSCF: No states requested in TDSCF_STATES")
# unwrap 1-membered list of states, regardless of symmetry
# we will apportion states per irrep later on
if len(states) == 1:
states = states[0]
# Tie TDSCF_R_CONVERGENCE to D_CONVERGENCE in SCF reference
if core.has_option_changed('SCF', 'TDSCF_R_CONVERGENCE'):
r_convergence = core.get_option('SCF', 'TDSCF_R_CONVERGENCE')
else:
r_convergence = min(1.e-4, core.get_option('SCF', 'D_CONVERGENCE') * 1.e2)
# "anonymous" return value, as we stash observables in the passed Wavefunction object internally
_ = response.scf_response.tdscf_excitations(wfn,
states=states,
triplets=core.get_option("SCF", "TDSCF_TRIPLETS"),
tda=core.get_option("SCF", "TDSCF_TDA"),
r_convergence=r_convergence,
maxiter=core.get_option("SCF", "TDSCF_MAXITER"),
guess=core.get_option("SCF", "TDSCF_GUESS"),
verbose=core.get_option("SCF", "TDSCF_PRINT"),
coeff_cutoff=core.get_option("SCF", "TDSCF_COEFF_CUTOFF"),
tdm_print=core.get_option("SCF", "TDSCF_TDM_PRINT"))
# Shove variables into global space
for k, v in wfn.variables().items():
core.set_variable(k, v)
return wfn
def run_tdscf_energy(name, **kwargs):
# Get a wfn in case we aren't given one
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
if name is None:
raise ValidationError("TDSCF: No reference wave function!")
else:
ref_wfn = run_scf(name.strip('td-'), **kwargs)
return run_tdscf_excitations(ref_wfn, **kwargs)
def run_scf_property(name, **kwargs):
"""Function encoding sequence of PSI module calls for
SCF calculations. This is a simple alias to :py:func:`~proc.run_scf`
since SCF properties all handled through oeprop.
"""
core.tstart()
optstash = proc_util.scf_set_reference_local(name)
properties = kwargs.pop('properties')
# What response do we need?
response_list_vals = list(response.scf_response.property_dicts)
oeprop_list_vals = core.OEProp.valid_methods
oe_properties = []
linear_response = []
unknown_property = []
for prop in properties:
prop = prop.upper()
if prop in response_list_vals:
linear_response.append(prop)
elif (prop in oeprop_list_vals) or ("MULTIPOLE(" in prop):
oe_properties.append(prop)
else:
unknown_property.append(prop)
if "DIPOLE" not in oe_properties:
oe_properties.append("DIPOLE")
# Throw if we dont know what something is
if len(unknown_property):
complete_options = oeprop_list_vals + response_list_vals
alt_method_name = p4util.text.find_approximate_string_matches(unknown_property[0],
complete_options, 2)
alternatives = ""
if len(alt_method_name) > 0:
alternatives = " Did you mean? %s" % (" ".join(alt_method_name))
raise ValidationError("SCF Property: Feature '%s' is not recognized. %s" % (unknown_property[0], alternatives))
# Validate OEProp
if len(oe_properties):
proc_util.oeprop_validator(oe_properties)
if len(linear_response):
optstash_jk = p4util.OptionsState(["SAVE_JK"])
core.set_global_option("SAVE_JK", True)
# Compute the Wavefunction
scf_wfn = run_scf(name, scf_do_properties=False, do_timer=False, **kwargs)
# Run OEProp
oe = core.OEProp(scf_wfn)
oe.set_title(name.upper())
for prop in oe_properties:
oe.add(prop.upper())
oe.compute()
scf_wfn.oeprop = oe
# Always must set SCF dipole (retire components at v1.5)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for cart in ["X", "Y", "Z"]:
core.set_variable("SCF DIPOLE " + cart, core.variable(name + " DIPOLE " + cart))
core.set_variable("SCF DIPOLE", core.variable(name + " DIPOLE")) # P::e SCF
# Run Linear Respsonse
if len(linear_response):
core.prepare_options_for_module("SCF")
ret = response.scf_response.cpscf_linear_response(scf_wfn, *linear_response,
conv_tol = core.get_global_option("SOLVER_CONVERGENCE"),
max_iter = core.get_global_option("SOLVER_MAXITER"),
print_lvl = (core.get_global_option("PRINT") + 1))
optstash_jk.restore()
core.tstop()
optstash.restore()
return scf_wfn
def run_cc_property(name, **kwargs):
"""Function encoding sequence of PSI module calls for
all CC property calculations.
"""
optstash = p4util.OptionsState(
['WFN'],
['DERTYPE'],
['ONEPDM'],
['PROPERTY'],
['CCLAMBDA', 'R_CONVERGENCE'],
['CCEOM', 'R_CONVERGENCE'],
['CCEOM', 'E_CONVERGENCE']) # yapf:disable
oneel_properties = core.OEProp.valid_methods
twoel_properties = []
response_properties = ['POLARIZABILITY', 'ROTATION', 'ROA', 'ROA_TENSOR']
excited_properties = ['OSCILLATOR_STRENGTH', 'ROTATIONAL_STRENGTH']
one = []
two = []
response = []
excited = []
invalid = []
if 'properties' in kwargs:
properties = kwargs['properties']
for prop in properties:
prop = prop.upper()
if prop in oneel_properties:
one.append(prop)
elif prop in twoel_properties:
two.append(prop)
elif prop in response_properties:
response.append(prop)
elif prop in excited_properties:
excited.append(prop)
else:
invalid.append(prop)
else:
raise ValidationError("""The "properties" keyword is required with the property() function.""")
# People are used to requesting dipole/quadrupole and getting dipole,quadrupole,mulliken_charges and NO_occupations
if ('DIPOLE' in one) or ('QUADRUPOLE' in one):
one = list(set(one + ['DIPOLE', 'QUADRUPOLE', 'MULLIKEN_CHARGES', 'NO_OCCUPATIONS']))
n_one = len(one)
n_two = len(two)
n_response = len(response)
n_excited = len(excited)
n_invalid = len(invalid)
if n_invalid > 0:
print("""The following properties are not currently supported: %s""" % invalid)
if n_excited > 0 and (name not in ['eom-ccsd', 'eom-cc2']):
raise ValidationError("""Excited state CC properties require EOM-CC2 or EOM-CCSD.""")
if (name in ['eom-ccsd', 'eom-cc2']) and n_response > 0:
raise ValidationError("""Cannot (yet) compute response properties for excited states.""")
if 'roa' in response:
# Perform distributed roa job
run_roa(name, **kwargs)
return # Don't do anything further
if (n_one > 0 or n_two > 0) and (n_response > 0):
print("""Computing both density- and response-based properties.""")
if name in ['ccsd', 'cc2', 'eom-ccsd', 'eom-cc2']:
this_name = name.upper().replace('-', '_')
core.set_global_option('WFN', this_name)
ccwfn = run_ccenergy(name, **kwargs)
core.set_global_option('WFN', this_name)
else:
raise ValidationError("""CC property name %s not recognized""" % name.upper())
# Need cchbar for everything
core.cchbar(ccwfn)
# Need ccdensity at this point only for density-based props
if n_one > 0 or n_two > 0:
if name == 'eom-ccsd':
core.set_global_option('WFN', 'EOM_CCSD')
core.set_global_option('DERTYPE', 'NONE')
core.set_global_option('ONEPDM', 'TRUE')
core.cceom(ccwfn)
elif name == 'eom-cc2':
core.set_global_option('WFN', 'EOM_CC2')
core.set_global_option('DERTYPE', 'NONE')
core.set_global_option('ONEPDM', 'TRUE')
core.cceom(ccwfn)
core.set_global_option('DERTYPE', 'NONE')
core.set_global_option('ONEPDM', 'TRUE')
core.cclambda(ccwfn)
core.ccdensity(ccwfn)
# Need ccresponse only for response-type props
if n_response > 0:
core.set_global_option('DERTYPE', 'RESPONSE')
core.cclambda(ccwfn)
for prop in response:
core.set_global_option('PROPERTY', prop)
core.ccresponse(ccwfn)
# Excited-state transition properties
if n_excited > 0:
if name == 'eom-ccsd':
core.set_global_option('WFN', 'EOM_CCSD')
elif name == 'eom-cc2':
core.set_global_option('WFN', 'EOM_CC2')
else:
raise ValidationError("""Unknown excited-state CC wave function.""")
core.set_global_option('DERTYPE', 'NONE')
core.set_global_option('ONEPDM', 'TRUE')
# Tight convergence unnecessary for transition properties
core.set_local_option('CCLAMBDA', 'R_CONVERGENCE', 1e-4)
core.set_local_option('CCEOM', 'R_CONVERGENCE', 1e-4)
core.set_local_option('CCEOM', 'E_CONVERGENCE', 1e-5)
core.cceom(ccwfn)
core.cclambda(ccwfn)
core.ccdensity(ccwfn)
if n_one > 0:
# call oe prop for GS density
oe = core.OEProp(ccwfn)
oe.set_title(name.upper())
for oe_name in one:
oe.add(oe_name.upper())
oe.compute()
# call oe prop for each ES density
if name.startswith('eom'):
# copy GS CC DIP/QUAD ... to CC ROOT 0 DIP/QUAD ... if we are doing multiple roots
# retire components at v1.5
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if 'dipole' in one:
core.set_variable("CC ROOT 0 DIPOLE X", core.variable("CC DIPOLE X"))
core.set_variable("CC ROOT 0 DIPOLE Y", core.variable("CC DIPOLE Y"))
core.set_variable("CC ROOT 0 DIPOLE Z", core.variable("CC DIPOLE Z"))
if 'quadrupole' in one:
core.set_variable("CC ROOT 0 QUADRUPOLE XX", core.variable("CC QUADRUPOLE XX"))
core.set_variable("CC ROOT 0 QUADRUPOLE XY", core.variable("CC QUADRUPOLE XY"))
core.set_variable("CC ROOT 0 QUADRUPOLE XZ", core.variable("CC QUADRUPOLE XZ"))
core.set_variable("CC ROOT 0 QUADRUPOLE YY", core.variable("CC QUADRUPOLE YY"))
core.set_variable("CC ROOT 0 QUADRUPOLE YZ", core.variable("CC QUADRUPOLE YZ"))
core.set_variable("CC ROOT 0 QUADRUPOLE ZZ", core.variable("CC QUADRUPOLE ZZ"))
if 'dipole' in one:
core.set_variable("CC ROOT 0 DIPOLE", core.variable("CC DIPOLE"))
# core.set_variable("CC ROOT n DIPOLE", core.variable("CC DIPOLE")) # P::e CCENERGY
if 'quadrupole' in one:
core.set_variable("CC ROOT 0 QUADRUPOLE", core.variable("CC QUADRUPOLE"))
# core.set_variable("CC ROOT n QUADRUPOLE", core.variable("CC QUADRUPOLE")) # P::e CCENERGY
n_root = sum(core.get_global_option("ROOTS_PER_IRREP"))
for rn in range(n_root):
oe.set_title("CC ROOT {}".format(rn + 1))
Da = ccwfn.variable("CC ROOT {} Da".format(rn + 1))
oe.set_Da_so(Da)
if core.get_global_option("REFERENCE") == "UHF":
Db = ccwfn.variable("CC ROOT {} Db".format(rn + 1))
oe.set_Db_so(Db)
oe.compute()
core.set_global_option('WFN', 'SCF')
core.revoke_global_option_changed('WFN')
core.set_global_option('DERTYPE', 'NONE')
core.revoke_global_option_changed('DERTYPE')
optstash.restore()
return ccwfn
def run_dfmp2_property(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a DFMP2 property calculation.
"""
optstash = p4util.OptionsState(
['DF_BASIS_SCF'],
['DF_BASIS_MP2'],
['ONEPDM'],
['OPDM_RELAX'],
['SCF_TYPE'])
core.set_global_option('ONEPDM', 'TRUE')
core.set_global_option('OPDM_RELAX', 'TRUE')
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF') # local set insufficient b/c SCF option read in DFMP2
core.print_out(""" SCF Algorithm Type (re)set to DF.\n""")
if not 'DF' in core.get_global_option('SCF_TYPE'):
raise ValidationError('DF-MP2 properties need DF-SCF reference.')
properties = kwargs.pop('properties')
proc_util.oeprop_validator(properties)
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, scf_do_properties=False, use_c1=True, **kwargs) # C1 certified
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_MP2",
core.get_option("DFMP2", "DF_BASIS_MP2"),
"RIFIT", core.get_global_option('BASIS'))
ref_wfn.set_basisset("DF_BASIS_MP2", aux_basis)
core.tstart()
core.print_out('\n')
p4util.banner('DFMP2')
core.print_out('\n')
dfmp2_wfn = core.dfmp2(ref_wfn)
grad = dfmp2_wfn.compute_gradient()
if name == 'scs-mp2':
dfmp2_wfn.set_variable('CURRENT ENERGY', dfmp2_wfn.variable('SCS-MP2 TOTAL ENERGY'))
dfmp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dfmp2_wfn.variable('SCS-MP2 CORRELATION ENERGY'))
elif name == 'mp2':
dfmp2_wfn.set_variable('CURRENT ENERGY', dfmp2_wfn.variable('MP2 TOTAL ENERGY'))
dfmp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dfmp2_wfn.variable('MP2 CORRELATION ENERGY'))
# Run OEProp
oe = core.OEProp(dfmp2_wfn)
oe.set_title(name.upper())
for prop in properties:
oe.add(prop.upper())
oe.compute()
dfmp2_wfn.oeprop = oe
# Shove variables into global space
for k, v in dfmp2_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
core.tstop()
return dfmp2_wfn
def _clean_detci(keep: bool=True):
psioh = core.IOManager.shared_object()
psio = core.IO.shared_object()
cifl = core.get_option("DETCI", "CI_FILE_START")
for fl in range(cifl, cifl + 4):
if psio.open_check(fl):
psio.close(fl, keep)
def run_detci_property(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a configuration interaction calculation, namely FCI,
CIn, MPn, and ZAPTn, computing properties.
"""
optstash = p4util.OptionsState(
['OPDM'],
['TDM'])
# Find valid properties
valid_transition = ['TRANSITION_DIPOLE', 'TRANSITION_QUADRUPOLE']
ci_prop = []
ci_trans = []
properties = kwargs.pop('properties')
for prop in properties:
if prop.upper() in valid_transition:
ci_trans.append(prop)
else:
ci_prop.append(prop)
proc_util.oeprop_validator(ci_prop)
core.set_global_option('OPDM', 'TRUE')
if len(ci_trans):
core.set_global_option('TDM', 'TRUE')
# Compute
if name in ['mcscf', 'rasscf', 'casscf']:
ciwfn = run_detcas(name, **kwargs)
else:
ciwfn = run_detci(name, **kwargs)
# All property names are just CI
if 'CI' in name.upper():
name = 'CI'
states = core.get_global_option('avg_states')
nroots = core.get_global_option('num_roots')
if len(states) != nroots:
states = range(nroots)
# Run OEProp
oe = core.OEProp(ciwfn)
oe.set_title(name.upper())
for prop in ci_prop:
oe.add(prop.upper())
# Compute "the" CI density
oe.compute()
ciwfn.oeprop = oe
# If we have more than one root, compute all data
if nroots > 1:
core.print_out("\n ===> %s properties for all CI roots <=== \n\n" % name.upper())
for root in states:
oe.set_title("%s ROOT %d" % (name.upper(), root))
if ciwfn.same_a_b_dens():
oe.set_Da_mo(ciwfn.get_opdm(root, root, "A", True))
else:
oe.set_Da_mo(ciwfn.get_opdm(root, root, "A", True))
oe.set_Db_mo(ciwfn.get_opdm(root, root, "B", True))
oe.compute()
# Transition density matrices
if (nroots > 1) and len(ci_trans):
oe.clear()
for tprop in ci_trans:
oe.add(tprop.upper())
core.print_out("\n ===> %s properties for all CI transition density matrices <=== \n\n" % name.upper())
for root in states[1:]:
oe.set_title("%s ROOT %d -> ROOT %d" % (name.upper(), 0, root))
if ciwfn.same_a_b_dens():
oe.set_Da_mo(ciwfn.get_opdm(0, root, "A", True))
else:
oe.set_Da_mo(ciwfn.get_opdm(0, root, "A", True))
oe.set_Db_mo(ciwfn.get_opdm(0, root, "B", True))
oe.compute()
_clean_detci()
optstash.restore()
return ciwfn
def run_eom_cc(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an EOM-CC calculation, namely EOM-CC2, EOM-CCSD, and EOM-CC3.
"""
optstash = p4util.OptionsState(
['TRANSQT2', 'WFN'],
['CCSORT', 'WFN'],
['CCENERGY', 'WFN'],
['CCHBAR', 'WFN'],
['CCEOM', 'WFN'])
if name == 'eom-ccsd':
core.set_local_option('TRANSQT2', 'WFN', 'EOM_CCSD')
core.set_local_option('CCSORT', 'WFN', 'EOM_CCSD')
core.set_local_option('CCENERGY', 'WFN', 'EOM_CCSD')
core.set_local_option('CCHBAR', 'WFN', 'EOM_CCSD')
core.set_local_option('CCEOM', 'WFN', 'EOM_CCSD')
ref_wfn = run_ccenergy('ccsd', **kwargs)
elif name == 'eom-cc2':
user_ref = core.get_option('CCENERGY', 'REFERENCE')
if (user_ref != 'RHF') and (user_ref != 'UHF'):
raise ValidationError('Reference %s for EOM-CC2 is not available.' % user_ref)
core.set_local_option('TRANSQT2', 'WFN', 'EOM_CC2')
core.set_local_option('CCSORT', 'WFN', 'EOM_CC2')
core.set_local_option('CCENERGY', 'WFN', 'EOM_CC2')
core.set_local_option('CCHBAR', 'WFN', 'EOM_CC2')
core.set_local_option('CCEOM', 'WFN', 'EOM_CC2')
ref_wfn = run_ccenergy('cc2', **kwargs)
elif name == 'eom-cc3':
core.set_local_option('TRANSQT2', 'WFN', 'EOM_CC3')
core.set_local_option('CCSORT', 'WFN', 'EOM_CC3')
core.set_local_option('CCENERGY', 'WFN', 'EOM_CC3')
core.set_local_option('CCHBAR', 'WFN', 'EOM_CC3')
core.set_local_option('CCEOM', 'WFN', 'EOM_CC3')
ref_wfn = run_ccenergy('cc3', **kwargs)
core.cchbar(ref_wfn)
core.cceom(ref_wfn)
optstash.restore()
return ref_wfn
# TODO ask if all these cc modules not actually changing wfn
def run_eom_cc_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an EOM-CCSD gradient calculation.
"""
optstash = p4util.OptionsState(
['CCDENSITY', 'XI'],
['CCDENSITY', 'ZETA'],
['CCLAMBDA', 'ZETA'],
['DERTYPE'],
['CCDENSITY', 'WFN'],
['CCLAMBDA', 'WFN'])
core.set_global_option('DERTYPE', 'FIRST')
if name == 'eom-ccsd':
core.set_local_option('CCLAMBDA', 'WFN', 'EOM_CCSD')
core.set_local_option('CCDENSITY', 'WFN', 'EOM_CCSD')
ref_wfn = run_eom_cc(name, **kwargs)
else:
core.print_out('DGAS: proc.py:1599 hitting an undefined sequence')
core.clean()
raise ValueError('Hit a wall in proc.py:1599')
core.set_local_option('CCLAMBDA', 'ZETA', 'FALSE')
core.set_local_option('CCDENSITY', 'ZETA', 'FALSE')
core.set_local_option('CCDENSITY', 'XI', 'TRUE')
core.cclambda(ref_wfn)
core.ccdensity(ref_wfn)
core.set_local_option('CCLAMBDA', 'ZETA', 'TRUE')
core.set_local_option('CCDENSITY', 'ZETA', 'TRUE')
core.set_local_option('CCDENSITY', 'XI', 'FALSE')
core.cclambda(ref_wfn)
core.ccdensity(ref_wfn)
derivobj = core.Deriv(ref_wfn)
grad = derivobj.compute()
ref_wfn.set_gradient(grad)
optstash.restore()
return ref_wfn
def run_adc_deprecated(*args, **kwargs):
warnings.warn("The method 'adc' has been deprecated, please use 'adc2' instead."
"The method key 'adc' will be removed Psi4 1.6.", DeprecationWarning)
return select_adc2(*args, **kwargs)
def run_adc(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an algebraic diagrammatic construction calculation.
.. caution:: Get rid of active molecule lines- should be handled in energy.
"""
if core.get_option('ADC', 'REFERENCE') != 'RHF':
raise ValidationError('ADC requires reference RHF')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
return core.adc(ref_wfn)
def run_adcc(name, **kwargs):
"""Prepare and run an ADC calculation in adcc, interpret the result and return
as a wavefunction.
"""
# TODO Maybe it would improve readability if this function was spilt
# up and the whole thing went to a separate file (like for sapt,
# interface_cfour.py, ...
try:
import adcc
from adcc.backends import InvalidReference
except ModuleNotFoundError:
raise ValidationError("adcc extras qc_module not available. Try installing "
"via 'pip install adcc' or 'conda install -c adcc adcc'.")
if core.get_option('ADC', 'REFERENCE') not in ["RHF", "UHF"]:
raise ValidationError('adcc requires reference RHF or UHF')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.pop('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs)
# Start timer
do_timer = kwargs.pop("do_timer", True)
if do_timer:
core.tstart()
#
# Build kwargs for adcc
#
kwargs.pop("molecule", None)
if ref_wfn.frzcpi()[0] > 0:
kwargs["frozen_core"] = ref_wfn.frzcpi()[0]
if ref_wfn.frzvpi()[0] > 0:
kwargs["frozen_virtual"] = ref_wfn.frzvpi()[0]
if core.get_option("ADC", "NUM_CORE_ORBITALS"):
kwargs["core_orbitals"] = core.get_option("ADC", "NUM_CORE_ORBITALS")
scf_accuracy = max(core.get_option("SCF", "E_CONVERGENCE"),
core.get_option("SCF", "D_CONVERGENCE"))
if core.get_option("ADC", "R_CONVERGENCE") < 0:
kwargs["conv_tol"] = max(100 * scf_accuracy, 1e-6)
else:
kwargs["conv_tol"] = core.get_option("ADC", "R_CONVERGENCE")
n_roots = core.get_option('ADC', 'ROOTS_PER_IRREP')
if len(n_roots) > 1:
raise ValidationError("adcc can only deal with a single irrep.")
kwargs["n_states"] = n_roots[0]
if core.get_option("ADC", "NUM_GUESSES") > 0:
kwargs["n_guesses"] = core.get_option("ADC", "NUM_GUESSES")
if core.get_option("ADC", "MAX_NUM_VECS") > 0:
kwargs["max_subspace"] = core.get_option("ADC", "MAX_NUM_VECS")
kind = core.get_option("ADC", "KIND").lower()
if isinstance(ref_wfn, core.UHF):
if not core.has_option_changed("ADC", "KIND"):
kind = "any"
elif not kind in ["any", "spin_flip"]:
raise ValidationError("For UHF references the only valid values for 'KIND' are "
"'SPIN_FLIP' or 'ANY' and not '{}.".format(kind.upper()))
elif not kind in ["singlet", "triplet", "any"]:
raise ValidationError("For RHF references the value '{}' for 'KIND' is "
"not supported.".format(kind.upper()))
kwargs["kind"] = kind
kwargs["max_iter"] = core.get_option("ADC", "MAXITER")
#
# Determine ADC function method from adcc to run ADC
#
adcrunner = {
"cvs-adc(1)": adcc.cvs_adc1, "cvs-adc(2)": adcc.cvs_adc2,
"cvs-adc(2)-x": adcc.cvs_adc2x, "cvs-adc(3)": adcc.cvs_adc3,
"adc(1)": adcc.adc1, "adc(2)": adcc.adc2,
"adc(2)-x": adcc.adc2x, "adc(3)": adcc.adc3,
}
if name not in adcrunner:
raise ValidationError(f"Unsupported ADC method: {name}")
if "cvs" in name and "core_orbitals" not in kwargs:
raise ValidationError("If a CVS-ADC method is requested, the NUM_CORE_ORBITALS option "
"needs to be set.")
if "core_orbitals" in kwargs and not "cvs" in name:
raise ValidationError("The NUM_CORE_ORBITALS option needs to be set to '0' or absent "
"unless a CVS ADC method is requested.")
if "cvs" in name and kwargs["kind"] in ["spin_flip"]:
raise ValidationError("Spin-flip for CVS-ADC variants is not available.")
#
# Check for unsupported options
#
for option in ["PR", "NORM_TOLERANCE", "POLE_MAXITER", "SEM_MAXITER",
"NEWTON_CONVERGENCE", "MEMORY", "CACHELEVEL", "NUM_AMPS_PRINT"]:
if core.has_option_changed("ADC", option):
raise ValidationError(f"ADC backend adcc does not support option '{option}'")
#
# Launch the rocket
#
# Copy thread setup from psi4
try:
adcc.set_n_threads(core.get_num_threads())
except AttributeError:
# Before adcc 0.13.3:
adcc.thread_pool.reinit(core.get_num_threads(), core.get_num_threads())
# Hack to direct the stream-like interface adcc expects to the string interface of Psi4 core
class CoreStream:
def write(self, text):
core.print_out(text)
core.print_out("\n" + adcc.banner(colour=False) + "\n")
try:
state = adcrunner[name](ref_wfn, **kwargs, output=CoreStream())
except InvalidReference as ex:
raise ValidationError("Cannot run adcc because the passed reference wavefunction is "
"not supported in adcc. Check Psi4 SCF parameters. adcc reports: "
"{}".format(str(ex)))
core.print_out("\n")
# TODO Should a non-converged calculation throw?
#
# Interpret results
#
# Note: This wavefunction is not consistent ... the density
# is e.g. not the proper one (i.e. not the MP(n) one)
adc_wfn = core.Wavefunction(ref_wfn.molecule(), ref_wfn.basisset())
adc_wfn.shallow_copy(ref_wfn)
adc_wfn.set_reference_wavefunction(ref_wfn)
adc_wfn.set_name(name)
adc_wfn.set_module("adcc")
# MP(3) energy for CVS-ADC(3) calculations is still a missing feature in adcc
# ... we store this variant here to be able to fall back to MP(2) energies.
is_cvs_adc3 = state.method.level >= 3 and state.ground_state.has_core_occupied_space
# Ground-state energies
mp = state.ground_state
mp_energy = mp.energy(state.method.level if not is_cvs_adc3 else 2)
mp_corr = 0.0
if state.method.level > 1:
core.print_out("Ground state energy breakdown:\n")
core.print_out(" Energy SCF {0:15.8g} [Eh]\n".format(ref_wfn.energy()))
for level in range(2, state.method.level + 1):
if level >= 3 and is_cvs_adc3:
continue
energy = mp.energy_correction(level)
mp_corr += energy
adc_wfn.set_variable(f"MP{level} CORRELATION ENERGY", energy)
adc_wfn.set_variable(f"MP{level} TOTAL ENERGY", mp.energy(level))
core.print_out(f" Energy correlation MP{level} {energy:15.8g} [Eh]\n")
core.print_out(" Energy total {0:15.8g} [Eh]\n".format(mp_energy))
adc_wfn.set_variable("CURRENT CORRELATION ENERGY", mp_corr) # P::e ADC
adc_wfn.set_variable("CURRENT ENERGY", mp_energy) # P::e ADC
# Set results of excited-states computation
# TODO Does not work: Can't use strings
# adc_wfn.set_variable("excitation kind", state.kind)
adc_wfn.set_variable("ADC ITERATIONS", state.n_iter) # P::e ADC
adc_wfn.set_variable(name + " excitation energies",
core.Matrix.from_array(state.excitation_energy.reshape(-1, 1)))
adc_wfn.set_variable("number of excited states", len(state.excitation_energy))
core.print_out("\n\n ==> Excited states summary <== \n")
core.print_out("\n" + state.describe(oscillator_strengths=False) + "\n")
# TODO Setting the excitation amplitude elements inside the wavefunction is a little
# challenging, since for each excitation vector one needs to extract the elements
# and map the indices from the adcc to the Psi4 convention. For this reason it
# is not yet done.
core.print_out("\n ==> Dominant amplitudes per state <== \n\n")
tol_ampl = core.get_option("ADC", "CUTOFF_AMPS_PRINT")
core.print_out(state.describe_amplitudes(tolerance=tol_ampl) + "\n\n")
# Shove variables into global space
for k, v in adc_wfn.variables().items():
core.set_variable(k, v)
if do_timer:
core.tstop()
adc_wfn.adcc_state = state
return adc_wfn
def run_adcc_property(name, **kwargs):
"""Run a ADC excited-states property calculation in adcc
and return the resulting properties.
"""
# TODO Things available in ADCC, but not yet implemented here:
# Export of difference and transition density matrices for all states
properties = [prop.upper() for prop in kwargs.pop('properties')]
valid_properties = ['DIPOLE', 'OSCILLATOR_STRENGTH', 'TRANSITION_DIPOLE',
'ROTATIONAL_STRENGTH']
unknown_properties = [prop for prop in properties if prop not in valid_properties]
if unknown_properties:
alternatives = ""
alt_method_name = p4util.text.find_approximate_string_matches(unknown_properties[0],
valid_properties, 2)
if alt_method_name:
alternatives = " Did you mean? " + " ".join(alt_method_name)
raise ValidationError("ADC property: Feature '{}' is not recognized. {}"
"".format(unknown_properties[0], alternatives))
# Start timer
do_timer = kwargs.pop("do_timer", True)
if do_timer:
core.tstart()
adc_wfn = run_adcc(name, do_timer=False, **kwargs)
state = adc_wfn.adcc_state
hf = state.reference_state
mp = state.ground_state
# Formats and indention
ind = " "
def format_vector(label, data):
assert data.ndim == 1
return f"{label:<40s} " + " ".join(f"{d:12.6g}" for d in data)
if "DIPOLE" in properties:
lines = ["\nGround state properties"]
lines += [ind + "Hartree-Fock (HF)"]
lines += [ind + ind + format_vector("Dipole moment (in a.u.)", hf.dipole_moment)]
if state.method.level > 1:
lines += [ind + "Møller Plesset 2nd order (MP2)"]
lines += [ind + ind + format_vector("Dipole moment (in a.u.)", mp.dipole_moment(2))]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for i, cart in enumerate(["X", "Y", "Z"]):
# retire components at v1.5
adc_wfn.set_variable("MP2 dipole " + cart, mp.dipole_moment(2)[i])
adc_wfn.set_variable("current dipole " + cart, mp.dipole_moment(2)[i])
adc_wfn.set_variable("MP2 dipole", mp.dipole_moment(2))
adc_wfn.set_variable("current dipole", mp.dipole_moment(2))
lines += [""]
core.print_out("\n".join(lines) + "\n")
gauge = core.get_option("ADC", "GAUGE").lower()
if gauge == "velocity":
gauge_short = "VEL"
elif gauge == "length":
gauge_short = "LEN"
else:
raise ValidationError(f"Gauge {gauge} not recognised for ADC calculations.")
computed = {}
if any(prop in properties for prop in ("TRANSITION_DIPOLE", "OSCILLATOR_STRENGTH")):
data = state.transition_dipole_moment
computed["Transition dipole moment (in a.u.)"] = data
adc_wfn.set_variable(f"{name} transition dipoles", core.Matrix.from_array(data))
if "OSCILLATOR_STRENGTH" in properties:
if gauge == "velocity":
data = state.oscillator_strength_velocity.reshape(-1, 1)
else:
data = state.oscillator_strength.reshape(-1, 1)
computed[f"Oscillator strength ({gauge} gauge)"] = data
adc_wfn.set_variable(f"{name} oscillator strengths ({gauge_short})",
core.Matrix.from_array(data))
if "ROTATIONAL_STRENGTH" in properties:
data = state.rotatory_strength.reshape(-1, 1)
computed["Rotational strength (velocity gauge)"] = data
adc_wfn.set_variable(f"{name} rotational strengths (VEL)",
core.Matrix.from_array(data))
if "DIPOLE" in properties:
data = state.state_dipole_moment
computed["State dipole moment (in a.u.)"] = data
adc_wfn.set_variable(f"{name} state dipoles", core.Matrix.from_array(data))
core.print_out("\nExcited state properties:\n")
n_states = adc_wfn.variable("number of excited states")
for i in range(int(n_states)):
lines = [ind + f"Excited state {i}"]
for prop, data in sorted(computed.items()):
lines += [ind + ind + format_vector(prop, data[i])]
core.print_out("\n".join(lines) + "\n")
# Shove variables into global space
for k, v in adc_wfn.variables().items():
core.set_variable(k, v)
if do_timer:
core.tstop()
return adc_wfn
def run_detci(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a configuration interaction calculation, namely FCI,
CIn, MPn, and ZAPTn.
"""
optstash = p4util.OptionsState(
['DETCI', 'WFN'],
['DETCI', 'MAX_NUM_VECS'],
['DETCI', 'MPN_ORDER_SAVE'],
['DETCI', 'MPN'],
['DETCI', 'FCI'],
['DETCI', 'EX_LEVEL'])
if core.get_option('DETCI', 'REFERENCE') not in ['RHF', 'ROHF']:
raise ValidationError('Reference %s for DETCI is not available.' %
core.get_option('DETCI', 'REFERENCE'))
if name == 'zapt':
core.set_local_option('DETCI', 'WFN', 'ZAPTN')
level = kwargs['level']
maxnvect = int((level + 1) / 2) + (level + 1) % 2
core.set_local_option('DETCI', 'MAX_NUM_VECS', maxnvect)
if (level + 1) % 2:
core.set_local_option('DETCI', 'MPN_ORDER_SAVE', 2)
else:
core.set_local_option('DETCI', 'MPN_ORDER_SAVE', 1)
elif name in ['mp', 'mp2', 'mp3', 'mp4']:
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'MPN', 'TRUE')
if name == 'mp2':
level = 2
elif name == 'mp3':
level = 3
elif name == 'mp4':
level = 4
else:
level = kwargs['level']
maxnvect = int((level + 1) / 2) + (level + 1) % 2
core.set_local_option('DETCI', 'MAX_NUM_VECS', maxnvect)
if (level + 1) % 2:
core.set_local_option('DETCI', 'MPN_ORDER_SAVE', 2)
else:
core.set_local_option('DETCI', 'MPN_ORDER_SAVE', 1)
elif name == 'ccsd':
# untested
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'CC', 'TRUE')
core.set_local_option('DETCI', 'CC_EX_LEVEL', 2)
elif name == 'fci':
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'FCI', 'TRUE')
elif name == 'cisd':
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'EX_LEVEL', 2)
elif name == 'cisdt':
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'EX_LEVEL', 3)
elif name == 'cisdtq':
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'EX_LEVEL', 4)
elif name == 'ci':
core.set_local_option('DETCI', 'WFN', 'DETCI')
level = kwargs['level']
core.set_local_option('DETCI', 'EX_LEVEL', level)
elif name == 'detci':
pass
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
ciwfn = core.detci(ref_wfn)
# Shove variables into global space
for k, v in ciwfn.variables().items():
core.set_variable(k, v)
print_nos = False
if core.get_option("DETCI", "NAT_ORBS"):
ciwfn.ci_nat_orbs()
print_nos = True
proc_util.print_ci_results(ciwfn, name.upper(), ciwfn.variable("HF TOTAL ENERGY"), ciwfn.variable("CURRENT ENERGY"), print_nos)
core.print_out("\t\t \"A good bug is a dead bug\" \n\n");
core.print_out("\t\t\t - Starship Troopers\n\n");
core.print_out("\t\t \"I didn't write FORTRAN. That's the problem.\"\n\n");
core.print_out("\t\t\t - Edward Valeev\n");
if core.get_global_option("DIPMOM") and ("mp" not in name.lower()):
# We always would like to print a little dipole information
oeprop = core.OEProp(ciwfn)
oeprop.set_title(name.upper())
oeprop.add("DIPOLE")
oeprop.compute()
ciwfn.oeprop = oeprop
# retire components in v1.5
with warnings.catch_warnings():
warnings.simplefilter("ignore")
core.set_variable("CURRENT DIPOLE X", core.variable(name.upper() + " DIPOLE X"))
core.set_variable("CURRENT DIPOLE Y", core.variable(name.upper() + " DIPOLE Y"))
core.set_variable("CURRENT DIPOLE Z", core.variable(name.upper() + " DIPOLE Z"))
core.set_variable("CURRENT DIPOLE", core.variable(name.upper() + " DIPOLE"))
ciwfn.cleanup_ci()
ciwfn.cleanup_dpd()
_clean_detci()
optstash.restore()
return ciwfn
def run_dfmp2(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density-fitted MP2 calculation.
"""
optstash = p4util.OptionsState(
['DF_BASIS_MP2'],
['SCF_TYPE'])
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
core.print_out(""" SCF Algorithm Type (re)set to DF.\n""")
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
core.tstart()
core.print_out('\n')
p4util.banner('DFMP2')
core.print_out('\n')
if core.get_global_option('REFERENCE') == "ROHF":
ref_wfn.semicanonicalize()
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_MP2",
core.get_option("DFMP2", "DF_BASIS_MP2"),
"RIFIT", core.get_global_option('BASIS'))
ref_wfn.set_basisset("DF_BASIS_MP2", aux_basis)
dfmp2_wfn = core.dfmp2(ref_wfn)
dfmp2_wfn.compute_energy()
if name == 'scs-mp2':
dfmp2_wfn.set_variable('CURRENT ENERGY', dfmp2_wfn.variable('SCS-MP2 TOTAL ENERGY'))
dfmp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dfmp2_wfn.variable('SCS-MP2 CORRELATION ENERGY'))
elif name == 'mp2':
dfmp2_wfn.set_variable('CURRENT ENERGY', dfmp2_wfn.variable('MP2 TOTAL ENERGY'))
dfmp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dfmp2_wfn.variable('MP2 CORRELATION ENERGY'))
# Shove variables into global space
for k, v in dfmp2_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
core.tstop()
return dfmp2_wfn
def run_dfep2(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density-fitted MP2 calculation.
"""
core.tstart()
optstash = p4util.OptionsState(
['DF_BASIS_MP2'],
['SCF_TYPE'])
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
core.print_out(""" SCF Algorithm Type (re)set to DF.\n""")
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
if core.get_global_option('REFERENCE') != "RHF":
raise ValidationError("DF-EP2 is not available for %s references.",
core.get_global_option('REFERENCE'))
# Build the wavefunction
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_EP2",
core.get_option("DFEP2", "DF_BASIS_EP2"),
"RIFIT", core.get_global_option('BASIS'))
ref_wfn.set_basisset("DF_BASIS_EP2", aux_basis)
dfep2_wfn = core.DFEP2Wavefunction(ref_wfn)
# Figure out what were doing
if core.has_option_changed('DFEP2', 'EP2_ORBITALS'):
ep2_input = core.get_global_option("EP2_ORBITALS")
else:
n_ip = core.get_global_option("EP2_NUM_IP")
n_ea = core.get_global_option("EP2_NUM_EA")
eps = np.hstack(dfep2_wfn.epsilon_a().nph)
irrep_map = np.hstack([np.ones_like(dfep2_wfn.epsilon_a().nph[x]) * x for x in range(dfep2_wfn.nirrep())])
sort = np.argsort(eps)
ip_map = sort[dfep2_wfn.nalpha() - n_ip:dfep2_wfn.nalpha()]
ea_map = sort[dfep2_wfn.nalpha():dfep2_wfn.nalpha() + n_ea]
ep2_input = [[] for x in range(dfep2_wfn.nirrep())]
nalphapi = tuple(dfep2_wfn.nalphapi())
# Add IP info
ip_info = np.unique(irrep_map[ip_map], return_counts=True)
for irrep, cnt in zip(*ip_info):
irrep = int(irrep)
ep2_input[irrep].extend(range(nalphapi[irrep] - cnt, nalphapi[irrep]))
# Add EA info
ea_info = np.unique(irrep_map[ea_map], return_counts=True)
for irrep, cnt in zip(*ea_info):
irrep = int(irrep)
ep2_input[irrep].extend(range(nalphapi[irrep], nalphapi[irrep] + cnt))
# Compute
ret = dfep2_wfn.compute(ep2_input)
# Resort it...
ret_eps = []
for h in range(dfep2_wfn.nirrep()):
ep2_data = ret[h]
inp_data = ep2_input[h]
for i in range(len(ep2_data)):
tmp = [h, ep2_data[i][0], ep2_data[i][1], dfep2_wfn.epsilon_a().get(h, inp_data[i]), inp_data[i]]
ret_eps.append(tmp)
ret_eps.sort(key=lambda x: x[3])
h2ev = constants.hartree2ev
irrep_labels = dfep2_wfn.molecule().irrep_labels()
core.print_out(" ==> Results <==\n\n")
core.print_out(" %8s %12s %12s %8s\n" % ("Orbital", "Koopmans (eV)", "EP2 (eV)", "EP2 PS"))
core.print_out(" ----------------------------------------------\n")
for irrep, ep2, ep2_ps, kt, pos in ret_eps:
label = str(pos + 1) + irrep_labels[irrep]
core.print_out(" %8s % 12.3f % 12.3f % 6.3f\n" % (label, (kt * h2ev), (ep2 * h2ev), ep2_ps))
core.set_variable("EP2 " + label.upper() + " ENERGY", ep2)
core.print_out(" ----------------------------------------------\n\n")
# Figure out the IP and EA
sorted_vals = np.array([x[1] for x in ret_eps])
ip_vals = sorted_vals[sorted_vals < 0]
ea_vals = sorted_vals[sorted_vals > 0]
ip_value = None
ea_value = None
if len(ip_vals):
core.set_variable("EP2 IONIZATION POTENTIAL", ip_vals[-1])
core.set_variable("CURRENT ENERGY", ip_vals[-1])
if len(ea_vals):
core.set_variable("EP2 ELECTRON AFFINITY", ea_vals[0])
if core.variable("EP2 IONIZATION POTENTIAL") == 0.0:
core.set_variable("CURRENT ENERGY", ea_vals[0])
core.print_out(" EP2 has completed successfully!\n\n")
core.tstop()
return dfep2_wfn
def run_dlpnomp2(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a DLPNO-MP2 calculation.
"""
optstash = p4util.OptionsState(
['DF_BASIS_MP2'],
['SCF_TYPE'])
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
core.print_out(""" SCF Algorithm Type (re)set to DF.\n""")
# DLPNO-MP2 is only DF
if core.get_global_option('MP2_TYPE') != "DF":
raise ValidationError(""" DLPNO-MP2 is only implemented with density fitting.\n"""
""" 'mp2_type' must be set to 'DF'.\n""")
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
elif ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" DLPNO-MP2 does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
if core.get_global_option('REFERENCE') != "RHF":
raise ValidationError("DLPNO-MP2 is not available for %s references.",
core.get_global_option('REFERENCE'))
core.tstart()
core.print_out('\n')
p4util.banner('DLPNO-MP2')
core.print_out('\n')
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_MP2",
core.get_option("DLPNO", "DF_BASIS_MP2"),
"RIFIT", core.get_global_option('BASIS'))
ref_wfn.set_basisset("DF_BASIS_MP2", aux_basis)
dlpnomp2_wfn = core.dlpno(ref_wfn)
dlpnomp2_wfn.compute_energy()
if name == 'scs-dlpno-mp2':
dlpnomp2_wfn.set_variable('CURRENT ENERGY', dlpnomp2_wfn.variable('SCS-MP2 TOTAL ENERGY'))
dlpnomp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dlpnomp2_wfn.variable('SCS-MP2 CORRELATION ENERGY'))
elif name == 'dlpno-mp2':
dlpnomp2_wfn.set_variable('CURRENT ENERGY', dlpnomp2_wfn.variable('MP2 TOTAL ENERGY'))
dlpnomp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dlpnomp2_wfn.variable('MP2 CORRELATION ENERGY'))
# Shove variables into global space
for k, v in dlpnomp2_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
core.tstop()
return dlpnomp2_wfn
def run_dmrgscf(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an DMRG calculation.
"""
optstash = p4util.OptionsState(
['SCF_TYPE'],
['DMRG', 'DMRG_CASPT2_CALC'])
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
if 'CASPT2' in name.upper():
core.set_local_option("DMRG", "DMRG_CASPT2_CALC", True)
dmrg_wfn = core.dmrg(ref_wfn)
optstash.restore()
# Shove variables into global space
for k, v in dmrg_wfn.variables().items():
core.set_variable(k, v)
return dmrg_wfn
def run_dmrgci(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an DMRG calculation.
"""
optstash = p4util.OptionsState(
['SCF_TYPE'],
['DMRG', 'DMRG_SCF_MAX_ITER'])
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
core.set_local_option('DMRG', 'DMRG_SCF_MAX_ITER', 1)
dmrg_wfn = core.dmrg(ref_wfn)
optstash.restore()
# Shove variables into global space
for k, v in dmrg_wfn.variables().items():
core.set_variable(k, v)
return dmrg_wfn
def run_psimrcc(name, **kwargs):
"""Function encoding sequence of PSI module calls for a PSIMRCC computation
using a reference from the MCSCF module
"""
mcscf_wfn = run_mcscf(name, **kwargs)
psimrcc_wfn = core.psimrcc(mcscf_wfn)
# Shove variables into global space
for k, v in psimrcc_wfn.variables().items():
core.set_variable(k, v)
return psimrcc_wfn
def run_psimrcc_scf(name, **kwargs):
"""Function encoding sequence of PSI module calls for a PSIMRCC computation
using a reference from the SCF module
"""
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
psimrcc_wfn = core.psimrcc(ref_wfn)
# Shove variables into global space
for k, v in psimrcc_wfn.variables().items():
core.set_variable(k, v)
return psimrcc_wfn
def run_sapt(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a SAPT calculation of any level.
"""
optstash = p4util.OptionsState(['SCF_TYPE'])
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
# Get the molecule of interest
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
sapt_dimer = kwargs.pop('molecule', core.get_active_molecule())
else:
core.print_out('Warning! SAPT argument "ref_wfn" is only able to use molecule information.')
sapt_dimer = ref_wfn.molecule()
sapt_basis = kwargs.pop('sapt_basis', 'dimer')
sapt_dimer, monomerA, monomerB = proc_util.prepare_sapt_molecule(sapt_dimer, sapt_basis)
# Need to ensure consistent orbital freezing
# between monomer and dimer computations
monomerA_basis = core.BasisSet.build(monomerA, "BASIS", core.get_global_option("BASIS"))
monomerB_basis = core.BasisSet.build(monomerB, "BASIS", core.get_global_option("BASIS"))
nfc_ab = monomerA_basis.n_frozen_core() + monomerB_basis.n_frozen_core()
if (core.get_option('SCF', 'REFERENCE') != 'RHF') and (name.upper() != "SAPT0"):
raise ValidationError('Only SAPT0 supports a reference different from \"reference rhf\".')
do_delta_mp2 = True if name.endswith('dmp2') else False
do_empirical_disp = True if '-d' in name.lower() else False
if do_empirical_disp:
## Make sure we are turning SAPT0 dispersion off
core.set_local_option('SAPT', 'SAPT0_E10', True)
core.set_local_option('SAPT', 'SAPT0_E20IND', True)
core.set_local_option('SAPT', 'SAPT0_E20Disp', False)
# raise Exception("")
ri = core.get_global_option('SCF_TYPE')
df_ints_io = core.get_option('SCF', 'DF_INTS_IO')
# inquire if above at all applies to dfmp2
core.IO.set_default_namespace('dimer')
core.print_out('\n')
p4util.banner('Dimer HF')
core.print_out('\n')
# Compute dimer wavefunction
if (sapt_basis == 'dimer') and (ri == 'DF'):
core.set_global_option('DF_INTS_IO', 'SAVE')
optstash2 = p4util.OptionsState(['NUM_FROZEN_DOCC'])
core.set_global_option("NUM_FROZEN_DOCC", nfc_ab)
core.timer_on("SAPT: Dimer SCF")
dimer_wfn = scf_helper('RHF', molecule=sapt_dimer, **kwargs)
core.timer_off("SAPT: Dimer SCF")
if do_delta_mp2:
select_mp2(name, ref_wfn=dimer_wfn, **kwargs)
mp2_corl_interaction_e = core.variable('MP2 CORRELATION ENERGY')
optstash2.restore()
if (sapt_basis == 'dimer') and (ri == 'DF'):
core.set_global_option('DF_INTS_IO', 'LOAD')
# Compute Monomer A wavefunction
if (sapt_basis == 'dimer') and (ri == 'DF'):
core.IO.change_file_namespace(97, 'dimer', 'monomerA')
core.IO.set_default_namespace('monomerA')
core.print_out('\n')
p4util.banner('Monomer A HF')
core.print_out('\n')
core.timer_on("SAPT: Monomer A SCF")
monomerA_wfn = scf_helper('RHF', molecule=monomerA, **kwargs)
core.timer_off("SAPT: Monomer A SCF")
if do_delta_mp2:
select_mp2(name, ref_wfn=monomerA_wfn, **kwargs)
mp2_corl_interaction_e -= core.variable('MP2 CORRELATION ENERGY')
# Compute Monomer B wavefunction
if (sapt_basis == 'dimer') and (ri == 'DF'):
core.IO.change_file_namespace(97, 'monomerA', 'monomerB')
core.IO.set_default_namespace('monomerB')
core.print_out('\n')
p4util.banner('Monomer B HF')
core.print_out('\n')
core.timer_on("SAPT: Monomer B SCF")
monomerB_wfn = scf_helper('RHF', molecule=monomerB, **kwargs)
core.timer_off("SAPT: Monomer B SCF")
# Delta MP2
if do_delta_mp2:
select_mp2(name, ref_wfn=monomerB_wfn, **kwargs)
mp2_corl_interaction_e -= core.variable('MP2 CORRELATION ENERGY')
core.set_variable("SAPT MP2 CORRELATION ENERGY", mp2_corl_interaction_e) # P::e SAPT
core.set_global_option('DF_INTS_IO', df_ints_io)
if core.get_option('SCF', 'REFERENCE') == 'RHF':
core.IO.change_file_namespace(psif.PSIF_SAPT_MONOMERA, 'monomerA', 'dimer')
core.IO.change_file_namespace(psif.PSIF_SAPT_MONOMERB, 'monomerB', 'dimer')
core.IO.set_default_namespace('dimer')
core.set_local_option('SAPT', 'E_CONVERGENCE', 10e-10)
core.set_local_option('SAPT', 'D_CONVERGENCE', 10e-10)
if name in ['sapt0', 'ssapt0']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT0')
elif name == 'sapt2':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2')
elif name in ['sapt2+', 'sapt2+dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+')
core.set_local_option('SAPT', 'DO_CCD_DISP', False)
elif name in ['sapt2+(3)', 'sapt2+(3)dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', False)
core.set_local_option('SAPT', 'DO_CCD_DISP', False)
elif name in ['sapt2+3', 'sapt2+3dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', True)
core.set_local_option('SAPT', 'DO_CCD_DISP', False)
elif name in ['sapt2+(ccd)', 'sapt2+(ccd)dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+')
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
elif name in ['sapt2+(3)(ccd)', 'sapt2+(3)(ccd)dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', False)
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
elif name in ['sapt2+3(ccd)', 'sapt2+3(ccd)dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', True)
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
# Make sure we are not going to run CPHF on ROHF, since its MO Hessian
# is not SPD
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
core.set_local_option('SAPT', 'COUPLED_INDUCTION', False)
core.print_out(' Coupled induction not available for ROHF.\n')
core.print_out(' Proceeding with uncoupled induction only.\n')
core.print_out(" Constructing Basis Sets for SAPT...\n\n")
aux_basis = core.BasisSet.build(dimer_wfn.molecule(), "DF_BASIS_SAPT", core.get_global_option("DF_BASIS_SAPT"),
"RIFIT", core.get_global_option("BASIS"))
dimer_wfn.set_basisset("DF_BASIS_SAPT", aux_basis)
if core.get_global_option("DF_BASIS_ELST") == "":
dimer_wfn.set_basisset("DF_BASIS_ELST", aux_basis)
else:
aux_basis = core.BasisSet.build(dimer_wfn.molecule(), "DF_BASIS_ELST", core.get_global_option("DF_BASIS_ELST"),
"RIFIT", core.get_global_option("BASIS"))
dimer_wfn.set_basisset("DF_BASIS_ELST", aux_basis)
core.print_out('\n')
p4util.banner(name.upper())
core.print_out('\n')
e_sapt = core.sapt(dimer_wfn, monomerA_wfn, monomerB_wfn)
dimer_wfn.set_module("sapt")
from psi4.driver.qcdb.psivardefs import sapt_psivars
p4util.expand_psivars(sapt_psivars())
optstash.restore()
# Get the SAPT name right if doing empirical dispersion
if do_empirical_disp:
sapt_name = "sapt0"
else:
sapt_name = name
# Make sure we got induction, otherwise replace it with uncoupled induction
which_ind = 'IND'
target_ind = 'IND'
if not core.has_variable(' '.join((sapt_name.upper(), which_ind, 'ENERGY'))):
which_ind = 'IND,U'
for term in ['ELST', 'EXCH', 'DISP', 'TOTAL']:
core.set_variable(' '.join(['SAPT', term, 'ENERGY']),
core.variable(' '.join([sapt_name.upper(), term, 'ENERGY'])))
# Special induction case
core.set_variable(' '.join(['SAPT', target_ind, 'ENERGY']),
core.variable(' '.join([sapt_name.upper(), which_ind, 'ENERGY'])))
core.set_variable('CURRENT ENERGY', core.variable('SAPT TOTAL ENERGY'))
# Empirical dispersion
if do_empirical_disp:
proc_util.sapt_empirical_dispersion(name, dimer_wfn)
return dimer_wfn
def run_sapt_ct(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a charge-transfer SAPT calcuation of any level.
"""
optstash = p4util.OptionsState(
['SCF_TYPE'])
if 'ref_wfn' in kwargs:
core.print_out('\nWarning! Argument ref_wfn is not valid for sapt computations\n')
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
# Get the molecule of interest
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
sapt_dimer = kwargs.pop('molecule', core.get_active_molecule())
else:
core.print_out('Warning! SAPT argument "ref_wfn" is only able to use molecule information.')
sapt_dimer = ref_wfn.molecule()
sapt_dimer, monomerA, monomerB = proc_util.prepare_sapt_molecule(sapt_dimer, "dimer")
monomerAm = sapt_dimer.extract_subsets(1)
monomerAm.set_name('monomerAm')
monomerBm = sapt_dimer.extract_subsets(2)
monomerBm.set_name('monomerBm')
if core.get_option('SCF', 'REFERENCE') != 'RHF':
raise ValidationError('SAPT requires requires \"reference rhf\".')
ri = core.get_global_option('SCF_TYPE')
df_ints_io = core.get_option('SCF', 'DF_INTS_IO')
# inquire if above at all applies to dfmp2
core.IO.set_default_namespace('dimer')
core.print_out('\n')
p4util.banner('Dimer HF')
core.print_out('\n')
core.set_global_option('DF_INTS_IO', 'SAVE')
dimer_wfn = scf_helper('RHF', molecule=sapt_dimer, **kwargs)
core.set_global_option('DF_INTS_IO', 'LOAD')
if (ri == 'DF'):
core.IO.change_file_namespace(97, 'dimer', 'monomerA')
core.IO.set_default_namespace('monomerA')
core.print_out('\n')
p4util.banner('Monomer A HF (Dimer Basis)')
core.print_out('\n')
monomerA_wfn = scf_helper('RHF', molecule=monomerA, **kwargs)
if (ri == 'DF'):
core.IO.change_file_namespace(97, 'monomerA', 'monomerB')
core.IO.set_default_namespace('monomerB')
core.print_out('\n')
p4util.banner('Monomer B HF (Dimer Basis)')
core.print_out('\n')
monomerB_wfn = scf_helper('RHF', molecule=monomerB, **kwargs)
core.set_global_option('DF_INTS_IO', df_ints_io)
core.IO.set_default_namespace('monomerAm')
core.print_out('\n')
p4util.banner('Monomer A HF (Monomer Basis)')
core.print_out('\n')
monomerAm_wfn = scf_helper('RHF', molecule=monomerAm, **kwargs)
core.IO.set_default_namespace('monomerBm')
core.print_out('\n')
p4util.banner('Monomer B HF (Monomer Basis)')
core.print_out('\n')
monomerBm_wfn = scf_helper('RHF', molecule=monomerBm, **kwargs)
core.IO.set_default_namespace('dimer')
core.set_local_option('SAPT', 'E_CONVERGENCE', 10e-10)
core.set_local_option('SAPT', 'D_CONVERGENCE', 10e-10)
if name == 'sapt0-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT0')
elif name == 'sapt2-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2')
elif name == 'sapt2+-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+')
elif name == 'sapt2+(3)-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', False)
elif name == 'sapt2+3-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', True)
elif name == 'sapt2+(ccd)-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+')
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
elif name == 'sapt2+(3)(ccd)-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', False)
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
elif name == 'sapt2+3(ccd)-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', True)
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
core.print_out('\n')
aux_basis = core.BasisSet.build(dimer_wfn.molecule(), "DF_BASIS_SAPT",
core.get_global_option("DF_BASIS_SAPT"),
"RIFIT", core.get_global_option("BASIS"))
dimer_wfn.set_basisset("DF_BASIS_SAPT", aux_basis)
if core.get_global_option("DF_BASIS_ELST") == "":
dimer_wfn.set_basisset("DF_BASIS_ELST", aux_basis)
else:
aux_basis = core.BasisSet.build(dimer_wfn.molecule(), "DF_BASIS_ELST",
core.get_global_option("DF_BASIS_ELST"),
"RIFIT", core.get_global_option("BASIS"))
dimer_wfn.set_basisset("DF_BASIS_ELST", aux_basis)
core.print_out('\n')
p4util.banner('SAPT Charge Transfer')
core.print_out('\n')
core.print_out('\n')
p4util.banner('Dimer Basis SAPT')
core.print_out('\n')
core.IO.change_file_namespace(psif.PSIF_SAPT_MONOMERA, 'monomerA', 'dimer')
core.IO.change_file_namespace(psif.PSIF_SAPT_MONOMERB, 'monomerB', 'dimer')
e_sapt = core.sapt(dimer_wfn, monomerA_wfn, monomerB_wfn)
CTd = core.variable('SAPT CT ENERGY')
dimer_wfn.set_module("sapt")
core.print_out('\n')
p4util.banner('Monomer Basis SAPT')
core.print_out('\n')
core.IO.change_file_namespace(psif.PSIF_SAPT_MONOMERA, 'monomerAm', 'dimer')
core.IO.change_file_namespace(psif.PSIF_SAPT_MONOMERB, 'monomerBm', 'dimer')
e_sapt = core.sapt(dimer_wfn, monomerAm_wfn, monomerBm_wfn)
CTm = core.variable('SAPT CT ENERGY')
CT = CTd - CTm
units = (1000.0, constants.hartree2kcalmol, constants.hartree2kJmol)
core.print_out('\n\n')
core.print_out(' SAPT Charge Transfer Analysis\n')
core.print_out(' ------------------------------------------------------------------------------------------------\n')
core.print_out(' SAPT Induction (Dimer Basis) %12.4lf [mEh] %12.4lf [kcal/mol] %12.4lf [kJ/mol]\n' %
tuple(CTd * u for u in units))
core.print_out(' SAPT Induction (Monomer Basis)%12.4lf [mEh] %12.4lf [kcal/mol] %12.4lf [kJ/mol]\n' %
tuple(CTm * u for u in units))
core.print_out(' SAPT Charge Transfer %12.4lf [mEh] %12.4lf [kcal/mol] %12.4lf [kJ/mol]\n\n' %
tuple(CT * u for u in units))
core.set_variable("SAPT CT ENERGY", CT) # P::e SAPT
optstash.restore()
return dimer_wfn
def run_fisapt(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an F/ISAPT0 computation
"""
optstash = p4util.OptionsState(['SCF_TYPE'])
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
# Get the molecule of interest
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
sapt_dimer = kwargs.pop('molecule', core.get_active_molecule())
else:
core.print_out('Warning! FISAPT argument "ref_wfn" is only able to use molecule information.')
sapt_dimer = ref_wfn.molecule()
sapt_dimer.update_geometry() # make sure since mol from wfn, kwarg, or P::e
# Shifting to C1 so we need to copy the active molecule
if sapt_dimer.schoenflies_symbol() != 'c1':
core.print_out(' FISAPT does not make use of molecular symmetry, further calculations in C1 point group.\n')
sapt_dimer = sapt_dimer.clone()
sapt_dimer.reset_point_group('c1')
sapt_dimer.fix_orientation(True)
sapt_dimer.fix_com(True)
sapt_dimer.update_geometry()
if core.get_option('SCF', 'REFERENCE') != 'RHF':
raise ValidationError('FISAPT requires requires \"reference rhf\".')
if ref_wfn is None:
core.timer_on("FISAPT: Dimer SCF")
ref_wfn = scf_helper('RHF', molecule=sapt_dimer, **kwargs)
core.timer_off("FISAPT: Dimer SCF")
core.print_out(" Constructing Basis Sets for FISAPT...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(),
"DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT",
core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
sapt_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SAPT", core.get_global_option("DF_BASIS_SAPT"),
"RIFIT", core.get_global_option("BASIS"),
ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SAPT", sapt_basis)
minao = core.BasisSet.build(ref_wfn.molecule(), "BASIS", core.get_global_option("MINAO_BASIS"))
ref_wfn.set_basisset("MINAO", minao)
# Turn of dispersion for -d
if "-d" in name.lower():
core.set_local_option("FISAPT", "FISAPT_DO_FSAPT_DISP", False)
fisapt_wfn = core.FISAPT(ref_wfn)
from .sapt import fisapt_proc
fisapt_wfn.compute_energy(external_potentials=kwargs.get("external_potentials", None))
# Compute -D dispersion
if "-d" in name.lower():
proc_util.sapt_empirical_dispersion(name, ref_wfn)
optstash.restore()
return ref_wfn
def run_mrcc(name, **kwargs):
"""Function that prepares environment and input files
for a calculation calling Kallay's MRCC code.
"""
# Check to see if we really need to run the SCF code.
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
vscf = ref_wfn.variable('SCF TOTAL ENERGY')
# The parse_arbitrary_order method provides us the following information
# We require that level be provided. level is a dictionary
# of settings to be passed to core.mrcc
if not('level' in kwargs):
raise ValidationError('level parameter was not provided.')
level = kwargs['level']
# Fullname is the string we need to search for in iface
fullname = level['fullname']
# User can provide 'keep' to the method.
# When provided, do not delete the MRCC scratch directory.
keep = False
if 'keep' in kwargs:
keep = kwargs['keep']
# Save current directory location
current_directory = os.getcwd()
# Find environment by merging PSIPATH and PATH environment variables
lenv = {
'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) + \
':' + os.environ.get('PATH'),
'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH')
}
# Filter out None values as subprocess will fault on them
lenv = {k: v for k, v in lenv.items() if v is not None}
# Need to move to the scratch directory, perferrably into a separate directory in that location
psi_io = core.IOManager.shared_object()
os.chdir(psi_io.get_default_path())
# Make new directory specifically for mrcc
mrcc_tmpdir = 'mrcc_' + str(os.getpid())
if 'path' in kwargs:
mrcc_tmpdir = kwargs['path']
# Check to see if directory already exists, if not, create.
if os.path.exists(mrcc_tmpdir) is False:
os.mkdir(mrcc_tmpdir)
# Move into the new directory
os.chdir(mrcc_tmpdir)
# Generate integrals and input file (dumps files to the current directory)
core.mrcc_generate_input(ref_wfn, level)
# Load the fort.56 file
# and dump a copy into the outfile
core.print_out('\n===== Begin fort.56 input for MRCC ======\n')
core.print_out(open('fort.56', 'r').read())
core.print_out('===== End fort.56 input for MRCC ======\n')
# Modify the environment:
# PGI Fortan prints warning to screen if STOP is used
lenv['NO_STOP_MESSAGE'] = '1'
# Obtain the number of threads MRCC should use
lenv['OMP_NUM_THREADS'] = str(core.get_num_threads())
# If the user provided MRCC_OMP_NUM_THREADS set the environ to it
if core.has_option_changed('MRCC', 'MRCC_OMP_NUM_THREADS') == True:
lenv['OMP_NUM_THREADS'] = str(core.get_option('MRCC', 'MRCC_OMP_NUM_THREADS'))
# Call dmrcc, directing all screen output to the output file
external_exe = 'dmrcc'
try:
retcode = subprocess.Popen([external_exe], bufsize=0, stdout=subprocess.PIPE, env=lenv)
except OSError as e:
sys.stderr.write('Program %s not found in path or execution failed: %s\n' % (external_exe, e.strerror))
core.print_out('Program %s not found in path or execution failed: %s\n' % (external_exe, e.strerror))
message = ("Program %s not found in path or execution failed: %s\n" % (external_exe, e.strerror))
raise ValidationError(message)
c4out = ''
while True:
data = retcode.stdout.readline()
if not data:
break
core.print_out(data.decode('utf-8'))
c4out += data.decode('utf-8')
# Scan iface file and grab the file energy.
ene = 0.0
for line in open('iface'):
fields = line.split()
m = fields[1]
try:
ene = float(fields[5])
if m == "MP(2)":
m = "MP2"
core.set_variable(m + ' TOTAL ENERGY', ene)
core.set_variable(m + ' CORRELATION ENERGY', ene - vscf)
except ValueError:
continue
# The last 'ene' in iface is the one the user requested.
core.set_variable('CURRENT ENERGY', ene)
core.set_variable('CURRENT CORRELATION ENERGY', ene - vscf)
# Load the iface file
iface = open('iface', 'r')
iface_contents = iface.read()
# Delete mrcc tempdir
os.chdir('..')
try:
# Delete unless we're told not to
if (keep is False and not('path' in kwargs)):
shutil.rmtree(mrcc_tmpdir)
except OSError as e:
print('Unable to remove MRCC temporary directory %s' % e, file=sys.stderr)
exit(1)
# Return to submission directory
os.chdir(current_directory)
# If we're told to keep the files or the user provided a path, do nothing.
if (keep != False or ('path' in kwargs)):
core.print_out('\nMRCC scratch files have been kept.\n')
core.print_out('They can be found in ' + mrcc_tmpdir)
# Dump iface contents to output
core.print_out('\n')
p4util.banner('Full results from MRCC')
core.print_out('\n')
core.print_out(iface_contents)
return ref_wfn
def run_fnodfcc(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a DF-CCSD(T) computation.
>>> set cc_type df
>>> energy('fno-ccsd(t)')
"""
kwargs = p4util.kwargs_lower(kwargs)
# stash user options
optstash = p4util.OptionsState(
['FNOCC', 'COMPUTE_TRIPLES'],
['FNOCC', 'DFCC'],
['FNOCC', 'NAT_ORBS'],
['FNOCC', 'RUN_CEPA'],
['FNOCC', 'DF_BASIS_CC'],
['SCF', 'DF_BASIS_SCF'],
['SCF', 'DF_INTS_IO'])
core.set_local_option('FNOCC', 'DFCC', True)
core.set_local_option('FNOCC', 'RUN_CEPA', False)
# throw an exception for open-shells
if core.get_option('SCF', 'REFERENCE') != 'RHF':
raise ValidationError(f"""Error: {name} requires 'reference rhf'.""")
def set_cholesky_from(mtd_type):
type_val = core.get_global_option(mtd_type)
if type_val == 'CD':
core.set_local_option('FNOCC', 'DF_BASIS_CC', 'CHOLESKY')
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
optstash.add_option(['SCF_TYPE'])
core.set_global_option('SCF_TYPE', 'CD')
core.print_out(""" SCF Algorithm Type (re)set to CD.\n""")
elif type_val in ['DISK_DF', 'DF']:
if core.get_option('FNOCC', 'DF_BASIS_CC') == 'CHOLESKY':
core.set_local_option('FNOCC', 'DF_BASIS_CC', '')
proc_util.check_disk_df(name.upper(), optstash)
else:
raise ValidationError("""Invalid type '%s' for DFCC""" % type_val)
# triples?
if name == 'ccsd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
set_cholesky_from('CC_TYPE')
elif name == 'ccsd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
set_cholesky_from('CC_TYPE')
elif name == 'fno-ccsd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
set_cholesky_from('CC_TYPE')
elif name == 'fno-ccsd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
set_cholesky_from('CC_TYPE')
if core.get_global_option('SCF_TYPE') not in ['CD', 'DISK_DF']:
raise ValidationError("""Invalid scf_type for DFCC.""")
# save DF or CD ints generated by SCF for use in CC
core.set_local_option('SCF', 'DF_INTS_IO', 'SAVE')
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
else:
if ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" FNOCC does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
core.print_out(" Constructing Basis Sets for FNOCC...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_CC",
core.get_global_option("DF_BASIS_CC"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_CC", aux_basis)
if core.get_global_option("RELATIVISTIC") in ["X2C", "DKH"]:
rel_bas = core.BasisSet.build(ref_wfn.molecule(), "BASIS_RELATIVISTIC",
core.get_option("SCF", "BASIS_RELATIVISTIC"),
"DECON", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset('BASIS_RELATIVISTIC',rel_bas)
fnocc_wfn = core.fnocc(ref_wfn)
# Shove variables into global space
for k, v in fnocc_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return fnocc_wfn
def run_fnocc(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a QCISD(T), CCSD(T), MP2.5, MP3, and MP4 computation.
>>> energy('fno-ccsd(t)')
"""
kwargs = p4util.kwargs_lower(kwargs)
level = kwargs.get('level', 0)
# stash user options:
optstash = p4util.OptionsState(
['TRANSQT2', 'WFN'],
['FNOCC', 'RUN_MP2'],
['FNOCC', 'RUN_MP3'],
['FNOCC', 'RUN_MP4'],
['FNOCC', 'RUN_CCSD'],
['FNOCC', 'COMPUTE_TRIPLES'],
['FNOCC', 'COMPUTE_MP4_TRIPLES'],
['FNOCC', 'DFCC'],
['FNOCC', 'RUN_CEPA'],
['FNOCC', 'USE_DF_INTS'],
['FNOCC', 'NAT_ORBS'])
core.set_local_option('FNOCC', 'DFCC', False)
core.set_local_option('FNOCC', 'RUN_CEPA', False)
core.set_local_option('FNOCC', 'USE_DF_INTS', False)
# which method?
if name == 'ccsd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'RUN_CCSD', True)
elif name == 'ccsd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'RUN_CCSD', True)
elif name == 'fno-ccsd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'RUN_CCSD', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'fno-ccsd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'RUN_CCSD', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'qcisd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'RUN_CCSD', False)
elif name == 'qcisd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'RUN_CCSD', False)
elif name == 'fno-qcisd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'RUN_CCSD', False)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'fno-qcisd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
core.set_local_option('FNOCC', 'RUN_CCSD', False)
elif name == 'mp2':
core.set_local_option('FNOCC', 'RUN_MP2', True)
elif name == 'fno-mp3':
core.set_local_option('FNOCC', 'RUN_MP3', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'fno-mp4':
core.set_local_option('FNOCC', 'RUN_MP4', True)
core.set_local_option('FNOCC', 'COMPUTE_MP4_TRIPLES', True)
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'mp4(sdq)':
core.set_local_option('FNOCC', 'RUN_MP4', True)
core.set_local_option('FNOCC', 'COMPUTE_MP4_TRIPLES', False)
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
elif name == 'fno-mp4(sdq)':
core.set_local_option('FNOCC', 'RUN_MP4', True)
core.set_local_option('FNOCC', 'COMPUTE_MP4_TRIPLES', False)
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'mp3':
core.set_local_option('FNOCC', 'RUN_MP3', True)
elif name == 'mp4':
core.set_local_option('FNOCC', 'RUN_MP4', True)
core.set_local_option('FNOCC', 'COMPUTE_MP4_TRIPLES', True)
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
# throw an exception for open-shells
if core.get_option('SCF', 'REFERENCE') != 'RHF':
raise ValidationError(f"""Error: {name} requires 'reference rhf'.""")
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
if core.get_option('FNOCC', 'USE_DF_INTS') == False:
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
else:
core.print_out(" Constructing Basis Sets for FNOCC...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
if core.get_global_option("RELATIVISTIC") in ["X2C", "DKH"]:
rel_bas = core.BasisSet.build(ref_wfn.molecule(), "BASIS_RELATIVISTIC",
core.get_option("SCF", "BASIS_RELATIVISTIC"),
"DECON", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset('BASIS_RELATIVISTIC',rel_bas)
fnocc_wfn = core.fnocc(ref_wfn)
# set current correlation energy and total energy. only need to treat mpn here.
if name in ["mp3", "fno-mp3"]:
fnocc_wfn.set_variable("CURRENT ENERGY", fnocc_wfn.variable("MP3 TOTAL ENERGY"))
fnocc_wfn.set_variable("CURRENT CORRELATION ENERGY", fnocc_wfn.variable("MP3 CORRELATION ENERGY"))
elif name in ["mp4(sdq)", "fno-mp4(sdq)"]:
fnocc_wfn.set_variable("CURRENT ENERGY", fnocc_wfn.variable("MP4(SDQ) TOTAL ENERGY"))
fnocc_wfn.set_variable("CURRENT CORRELATION ENERGY", fnocc_wfn.variable("MP4(SDQ) CORRELATION ENERGY"))
elif name in ["mp4", "fno-mp4"]:
fnocc_wfn.set_variable("CURRENT ENERGY", fnocc_wfn.variable("MP4 TOTAL ENERGY"))
fnocc_wfn.set_variable("CURRENT CORRELATION ENERGY", fnocc_wfn.variable("MP4 CORRELATION ENERGY"))
# Shove variables into global space
for k, v in fnocc_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return fnocc_wfn
def run_cepa(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a cepa-like calculation.
>>> energy('cepa(1)')
"""
kwargs = p4util.kwargs_lower(kwargs)
# save user options
optstash = p4util.OptionsState(
['TRANSQT2', 'WFN'],
['FNOCC', 'NAT_ORBS'],
['FNOCC', 'RUN_CEPA'],
['FNOCC', 'USE_DF_INTS'],
['FNOCC', 'CEPA_NO_SINGLES'])
core.set_local_option('FNOCC', 'RUN_CEPA', True)
core.set_local_option('FNOCC', 'USE_DF_INTS', False)
# what type of cepa?
if name in ['lccd', 'fno-lccd']:
cepa_level = 'cepa(0)'
core.set_local_option('FNOCC', 'CEPA_NO_SINGLES', True)
elif name in ['cepa(0)', 'fno-cepa(0)', 'lccsd', 'fno-lccsd']:
cepa_level = 'cepa(0)'
core.set_local_option('FNOCC', 'CEPA_NO_SINGLES', False)
elif name in ['cepa(1)', 'fno-cepa(1)']:
cepa_level = 'cepa(1)'
elif name in ['cepa(3)', 'fno-cepa(3)']:
cepa_level = 'cepa(3)'
elif name in ['acpf', 'fno-acpf']:
cepa_level = 'acpf'
elif name in ['aqcc', 'fno-aqcc']:
cepa_level = 'aqcc'
elif name in ['cisd', 'fno-cisd']:
cepa_level = 'cisd'
else:
raise ValidationError("""Error: %s not implemented\n""" % name)
core.set_local_option('FNOCC', 'CEPA_LEVEL', cepa_level.upper())
if name in ['fno-lccd', 'fno-lccsd', 'fno-cepa(0)', 'fno-cepa(1)', 'fno-cepa(3)',
'fno-acpf', 'fno-aqcc', 'fno-cisd']:
core.set_local_option('FNOCC', 'NAT_ORBS', True)
# throw an exception for open-shells
if core.get_option('SCF', 'REFERENCE') != 'RHF':
raise ValidationError("""Error: %s requires 'reference rhf'.""" % name)
reference = core.get_option('SCF', 'REFERENCE')
if core.get_global_option('CC_TYPE') != "CONV":
raise ValidationError("""CEPA methods from FNOCC module require 'cc_type conv'.""")
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
if core.get_option('FNOCC', 'USE_DF_INTS') == False:
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
else:
core.print_out(" Constructing Basis Sets for FISAPT...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
fnocc_wfn = core.fnocc(ref_wfn)
# one-electron properties
if core.get_option('FNOCC', 'DIPMOM'):
if cepa_level in ['cepa(1)', 'cepa(3)']:
core.print_out("""\n Error: one-electron properties not implemented for %s\n\n""" % name)
elif core.get_option('FNOCC', 'NAT_ORBS'):
core.print_out("""\n Error: one-electron properties not implemented for %s\n\n""" % name)
else:
p4util.oeprop(fnocc_wfn, 'DIPOLE', 'QUADRUPOLE', 'MULLIKEN_CHARGES', 'NO_OCCUPATIONS', title=cepa_level.upper())
# Shove variables into global space
for k, v in fnocc_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return fnocc_wfn
def run_detcas(name, **kwargs):
"""Function encoding sequence of PSI module calls for
determinant-based multireference wavefuncations,
namely CASSCF and RASSCF.
"""
optstash = p4util.OptionsState(
['DETCI', 'WFN'],
['SCF_TYPE'],
['ONEPDM'],
['OPDM_RELAX']
)
user_ref = core.get_option('DETCI', 'REFERENCE')
if user_ref not in ['RHF', 'ROHF']:
raise ValidationError('Reference %s for DETCI is not available.' % user_ref)
if name == 'rasscf':
core.set_local_option('DETCI', 'WFN', 'RASSCF')
elif name == 'casscf':
core.set_local_option('DETCI', 'WFN', 'CASSCF')
else:
raise ValidationError("Run DETCAS: Name %s not understood" % name)
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_optstash = p4util.OptionsState(
['SCF_TYPE'],
['DF_BASIS_SCF'],
['DF_BASIS_MP2'],
['ONEPDM'],
['OPDM_RELAX']
)
# No real reason to do a conventional guess
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
# If RHF get MP2 NO's
# Why doesnt this work for conv?
if (('DF' in core.get_global_option('SCF_TYPE')) and (user_ref == 'RHF') and
(core.get_option('DETCI', 'MCSCF_TYPE') in ['DF', 'AO']) and
(core.get_option("DETCI", "MCSCF_GUESS") == "MP2")):
core.set_global_option('ONEPDM', True)
core.set_global_option('OPDM_RELAX', False)
ref_wfn = run_dfmp2_gradient(name, **kwargs)
else:
ref_wfn = scf_helper(name, **kwargs)
# Ensure IWL files have been written
if (core.get_option('DETCI', 'MCSCF_TYPE') == 'CONV'):
mints = core.MintsHelper(ref_wfn.basisset())
mints.set_print(1)
mints.integrals()
ref_optstash.restore()
# The DF case
if core.get_option('DETCI', 'MCSCF_TYPE') == 'DF':
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
core.print_out(" Constructing Basis Sets for MCSCF...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
# The AO case
elif core.get_option('DETCI', 'MCSCF_TYPE') == 'AO':
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DIRECT')
# The conventional case
elif core.get_option('DETCI', 'MCSCF_TYPE') == 'CONV':
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'PK')
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
else:
raise ValidationError("Run DETCAS: MCSCF_TYPE %s not understood." % str(core.get_option('DETCI', 'MCSCF_TYPE')))
# Second-order SCF requires non-symmetric density matrix support
if core.get_option('DETCI', 'MCSCF_ALGORITHM') in ['AH', 'OS']:
proc_util.check_non_symmetric_jk_density("Second-order MCSCF")
ciwfn = mcscf.mcscf_solver(ref_wfn)
# We always would like to print a little dipole information
oeprop = core.OEProp(ciwfn)
oeprop.set_title(name.upper())
oeprop.add("DIPOLE")
oeprop.compute()
ciwfn.oeprop = oeprop
# retire components by v1.5
with warnings.catch_warnings():
warnings.simplefilter("ignore")
core.set_variable("CURRENT DIPOLE X", core.variable(name.upper() + " DIPOLE X"))
core.set_variable("CURRENT DIPOLE Y", core.variable(name.upper() + " DIPOLE Y"))
core.set_variable("CURRENT DIPOLE Z", core.variable(name.upper() + " DIPOLE Z"))
core.set_variable("CURRENT DIPOLE", core.variable(name.upper() + " DIPOLE"))
# Shove variables into global space
for k, v in ciwfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return ciwfn
def run_efp(name, **kwargs):
"""Function encoding sequence of module calls for a pure EFP
computation (ignore any QM atoms).
"""
efp_molecule = kwargs.get('molecule', core.get_active_molecule())
try:
efpobj = efp_molecule.EFP
except AttributeError:
raise ValidationError("""Method 'efp' not available without EFP fragments in molecule""")
# print efp geom in [A]
core.print_out(efpobj.banner())
core.print_out(efpobj.geometry_summary(units_to_bohr=constants.bohr2angstroms))
# set options
# * 'chtr', 'qm_exch', 'qm_disp', 'qm_chtr' may be enabled in a future libefp release
efpopts = {}
for opt in ['elst', 'exch', 'ind', 'disp',
'elst_damping', 'ind_damping', 'disp_damping']:
psiopt = 'EFP_' + opt.upper()
if core.has_option_changed('EFP', psiopt):
efpopts[opt] = core.get_option('EFP', psiopt)
efpopts['qm_elst'] = False
efpopts['qm_ind'] = False
efpobj.set_opts(efpopts, label='psi', append='psi')
do_gradient = core.get_option('EFP', 'DERTYPE') == 'FIRST'
# compute and report
efpobj.compute(do_gradient=do_gradient)
core.print_out(efpobj.energy_summary(label='psi'))
ene = efpobj.get_energy(label='psi')
core.set_variable('EFP ELST ENERGY', ene['electrostatic'] + ene['charge_penetration'] + ene['electrostatic_point_charges'])
core.set_variable('EFP IND ENERGY', ene['polarization'])
core.set_variable('EFP DISP ENERGY', ene['dispersion'])
core.set_variable('EFP EXCH ENERGY', ene['exchange_repulsion'])
core.set_variable('EFP TOTAL ENERGY', ene['total'])
core.set_variable('CURRENT ENERGY', ene['total'])
if do_gradient:
core.print_out(efpobj.gradient_summary())
torq = efpobj.get_gradient()
torq = core.Matrix.from_array(np.asarray(torq).reshape(-1, 6))
core.set_variable("EFP TORQUE", torq) # P::e EFP
return ene['total']
|
jturney/psi4
|
psi4/driver/procrouting/proc.py
|
Python
|
lgpl-3.0
| 202,348
|
[
"Psi4"
] |
295a010c8dbc531cb405431fe72b5b0aa5a6f796d761a8eeabdc7c9a9b925fee
|
from log import wl_log, add_log_file_handler, add_symlink
from random import choice
from selenium.common.exceptions import TimeoutException
from torutils import TorController
from visit import Visit
import common as cm
import os
import time
import utils as ut
class Crawler(object):
"""Provides methods to collect traffic traces."""
def __init__(self, torrc_dict, url_list, tbb_version,
experiment=cm.EXP_TYPE_WANG_AND_GOLDBERG, xvfb=False,
capture_screen=True):
# Create instance of Tor controller and sniffer used for the crawler
self.crawl_dir = None
self.crawl_logs_dir = None
self.visit = None
self.urls = url_list # keep list of urls we'll visit
self.init_crawl_dirs() # initializes crawl_dir
self.tor_log = os.path.join(self.crawl_logs_dir, "tor.log")
linkname = os.path.join(cm.RESULTS_DIR, 'latest_tor_log')
add_symlink(linkname, self.tor_log)
self.tbb_version = tbb_version
self.experiment = experiment
self.tor_controller = TorController(torrc_dict, tbb_version,
self.tor_log)
self.tor_process = None
self.tb_driver = None
self.capture_screen = capture_screen
self.xvfb = xvfb
add_log_file_handler(wl_log, self.log_file)
linkname = os.path.join(cm.RESULTS_DIR, 'latest_crawl_log')
add_symlink(linkname, self.log_file) # add a symbolic link
def crawl(self, num_batches=cm.NUM_BATCHES,
num_instances=cm.NUM_INSTANCES, start_line=0):
wl_log.info("Crawl configuration: batches: %s, instances: %s,"
" tbb_version: %s, experiment: %s, no of URLs: %s, "
"crawl dir: %s, XVFB: %s, screenshot: %s"
% (num_batches, num_instances, self.tbb_version,
self.experiment, len(self.urls), self.crawl_dir,
self.xvfb, self.capture_screen))
# for each batch
for batch_num in xrange(num_batches):
wl_log.info("********** Starting batch %s **********" % batch_num)
site_num = start_line
bg_site = None
batch_dir = ut.create_dir(os.path.join(self.crawl_dir,
str(batch_num)))
# init/reset tor process to have a different circuit.
# make sure that we're not using the same guard node again
wl_log.info("********** Restarting Tor Before Batch **********")
self.tor_controller.restart_tor()
sites_crawled_with_same_proc = 0
# for each site
for page_url in self.urls:
sites_crawled_with_same_proc += 1
if sites_crawled_with_same_proc > cm.MAX_SITES_PER_TOR_PROCESS:
wl_log.info("********** Restarting Tor Process **********")
self.tor_controller.restart_tor()
sites_crawled_with_same_proc = 0
wl_log.info("********** Crawling %s **********" % page_url)
page_url = page_url[:cm.MAX_FNAME_LENGTH]
site_dir = ut.create_dir(os.path.join(
batch_dir, ut.get_filename_from_url(page_url, site_num)))
if self.experiment == cm.EXP_TYPE_MULTITAB_ALEXA:
bg_site = choice(self.urls)
# for each visit
for instance_num in range(num_instances):
wl_log.info("********** Visit #%s to %s **********" %
(instance_num, page_url))
self.visit = None
try:
self.visit = Visit(batch_num, site_num,
instance_num, page_url,
site_dir, self.tbb_version,
self.tor_controller, bg_site,
self.experiment, self.xvfb,
self.capture_screen)
self.visit.get()
except KeyboardInterrupt: # CTRL + C
raise KeyboardInterrupt
except (ut.TimeExceededError, TimeoutException) as exc:
wl_log.critical("Visit to %s timed out! %s %s" %
(page_url, exc, type(exc)))
if self.visit:
self.visit.cleanup_visit()
except Exception:
wl_log.critical("Exception crawling %s" % page_url,
exc_info=True)
if self.visit:
self.visit.cleanup_visit()
# END - for each visit
site_num += 1
time.sleep(cm.PAUSE_BETWEEN_SITES)
def init_crawl_dirs(self):
"""Creates results and logs directories for this crawl."""
self.crawl_dir, self.crawl_logs_dir = self.create_crawl_dir()
sym_link = os.path.join(cm.RESULTS_DIR, 'latest')
add_symlink(sym_link, self.crawl_dir) # add a symbolic link
# Create crawl log
self.log_file = os.path.join(self.crawl_logs_dir, "crawl.log")
def init_logger(self):
"""Configure logging for crawler."""
add_log_file_handler(wl_log, self.log_file)
def stop_crawl(self, pack_results=True):
""" Cleans up crawl and kills tor process in case it's running."""
wl_log.info("Stopping crawl...")
if self.visit:
self.visit.cleanup_visit()
self.tor_controller.kill_tor_proc()
if pack_results:
ut.pack_crawl_data(self.crawl_dir)
def create_crawl_dir(self):
"""Create a timestamped crawl."""
ut.create_dir(cm.RESULTS_DIR) # ensure that we've a results dir
crawl_dir_wo_ts = os.path.join(cm.RESULTS_DIR, 'crawl')
crawl_dir = ut.create_dir(ut.append_timestamp(crawl_dir_wo_ts))
crawl_logs_dir = os.path.join(crawl_dir, 'logs')
ut.create_dir(crawl_logs_dir)
return crawl_dir, crawl_logs_dir
|
pankajb64/webfp-crawler-phantomjs
|
tor-browser-crawler-webfp-paper/datacollection/crawler.py
|
Python
|
gpl-2.0
| 6,244
|
[
"VisIt"
] |
760046b35649b420c0cff597b473d6388e4eb9bd96e2c3170ec997cad18e0fb3
|
#!/usr/bin/env python
"""HDSS fieldwork simulation, using openHDS"""
__email__ = "nicolas.maire@unibas.ch"
__status__ = "Alpha"
import json
import datetime
import os
import MySQLdb.cursors
import random
import numpy as np
import time
from matplotlib.path import Path
import argparse
import submission
import util
import pickle
import pprint
import logging
conf_dir = 'conf'
config = None
site = None
aggregate_url = ''
open_hds_connection = None
odk_connection = None
m_first_names = []
f_first_names = []
last_names = []
area_polygon = None
area_extent = None
locations_per_social_group = None
individuals_per_social_group = None
pop_size_baseline = 0
min_age_head_of_social_group = 0
min_age_marriage = 0
proportion_females = 0.5
birth_rate = 0
death_rate = 0
inmigration_rate = 0
outmigration_rate = 0
internal_migration_rate = 0
t = 0
hdss = {'field_workers': [], 'social_groups': []}
def init(site_config):
"""Initialization"""
global config, m_first_names, f_first_names, last_names, aggregate_url, open_hds_connection, odk_connection
global area_polygon, area_extent, locations_per_social_group, individuals_per_social_group
global pop_size_baseline, site, min_age_head_of_social_group, proportion_females, birth_rate, death_rate
global inmigration_rate, outmigration_rate, internal_migration_rate
global min_age_marriage, hdss
with open(os.path.join(conf_dir, 'config.json')) as config_file:
config = json.load(config_file)
with open(os.path.join(conf_dir, site_config + '.json')) as site_file:
site = json.load(site_file)
open_hds_connection = MySQLdb.connect(host=config['open_hds_server']['db_host'],
user=config['open_hds_server']['db_user'],
passwd=config['open_hds_server']['db_password'],
db=config['open_hds_server']['db_name'],
cursorclass=MySQLdb.cursors.DictCursor)
open_hds_connection.autocommit(True)
odk_connection = MySQLdb.connect(host=config['odk_server']['db_host'],
user=config['odk_server']['db_user'],
passwd=config['odk_server']['db_password'],
db=config['odk_server']['db_name'],
cursorclass=MySQLdb.cursors.DictCursor)
odk_connection.autocommit(True)
aggregate_url = config['odk_server']['aggregate_url']
with open(os.path.join(conf_dir, 'firstnames.csv')) as f:
first_names = list(f.read().splitlines(False))
for first_name in first_names:
fn = first_name.split(';')
if fn[0] == 'M':
m_first_names.append(fn[1])
else:
f_first_names.append(fn[1])
with open(os.path.join(conf_dir, 'lastnames.csv')) as f:
last_names = list(f.read().splitlines(False))
area_outline_vertices = []
for point in site['general']['area_polygon']:
area_outline_vertices.append(point)
area_polygon = Path(area_outline_vertices)
area_extent = area_polygon.get_extents().get_points()
pop_size_baseline = site['general']['pop_size_baseline']
locations_per_social_group = site['socialgroup']['locations_per_social_group']
individuals_per_social_group = site['socialgroup']['individuals_per_social_group']
min_age_head_of_social_group = site['socialgroup']['min_age_head']
min_age_marriage = site['relationship']['min_age_marriage']
proportion_females = 1 / (1 + site['general']['sex_ratio'])
birth_rate = site['general']['birth_rate']
death_rate = site['general']['death_rate']
inmigration_rate = site['general']['inmigration_rate']
outmigration_rate = site['general']['outmigration_rate']
internal_migration_rate = site['general']['internal_migration_rate']
#either load population from pickle, or re-init db and create hdss dictionary
if 'pickle_in' in site['general']:
clean_odk_db()
with open(os.path.join(conf_dir, site['general']['pickle_in'])) as f:
hdss = pickle.load(f)
else:
clean_dbs()
create_fws(site['fieldworker'])
create_location_hierarchy(site['locationhierarchy'])
def clean_dbs():
"""Remove any data from openhds that is not in 'openhds-required-data'"""
cursor = open_hds_connection.cursor()
cursor.execute("SET FOREIGN_KEY_CHECKS=0")
cursor.execute("DELETE FROM locationhierarchy where uuid != 'hierarchy_root'")
cursor.execute("DELETE FROM fieldworker where uuid != 'UnknownFieldWorker'")
cursor.execute("DELETE FROM individual where uuid != 'Unknown Individual'")
for table in config['open_hds_server']['tables_to_truncate']:
cursor.execute("TRUNCATE " + table)
cursor.execute("SET FOREIGN_KEY_CHECKS=1")
cursor.close()
open_hds_connection.commit()
clean_odk_db()
def clean_odk_db():
cursor = odk_connection.cursor()
for form in config['odk_server']['forms']:
cursor.execute("TRUNCATE " + form)
cursor.execute("CREATE TABLE IF NOT EXISTS `SCENARIO` "
"(`FLG_SCENARIO` int(11) NOT NULL DEFAULT '0') "
"ENGINE=InnoDB DEFAULT CHARSET=latin1;")
cursor.close()
odk_connection.commit()
def sample_coordinates(constraint=None):
"""Sample coordinates from area_polygon, possible constrained further by a constraint rectangle"""
#The northern, southern, western, and eastern bounds of the area.
nb = area_extent[1][0]
wb = area_extent[0][1]
sb = area_extent[0][0]
eb = area_extent[1][1]
if constraint is not None:
#TODO: further restrict area (e.g. fw-specific)
pass
while True:
lat = random.uniform(sb, nb)
lon = random.uniform(wb, eb)
if area_polygon.contains_point([lat, lon]):
return str(lat) + ' ' + str(lon) + ' 0 0'
def create_first_name(sex):
if sex == 'M':
return random.choice(m_first_names)
else:
return random.choice(f_first_names)
def create_last_name():
return random.choice(last_names)
def sample_age(min_age=None):
if min_age is None:
min_age = 0
MAX_AGE = 100
while True:
age = random.expovariate(death_rate)
if min_age <= age <= MAX_AGE:
return age
def get_age_in_years(dob, date_of_visit):
try:
birthday = dob.replace(year=date_of_visit.year)
except ValueError:
birthday = dob.replace(year=date_of_visit.year, day=dob.day-1)
if birthday > date_of_visit:
return date_of_visit.year - dob.year - 1
else:
return date_of_visit.year - dob.year
def sample_gender():
if random.random() < proportion_females:
return 'F'
else:
return 'M'
def makes_mistake(event):
"""Does FW make event-specific mistake?"""
if random.random() < site['fieldworker']['accuracy'][event]['rate']:
if site['fieldworker']['accuracy'][event]['max'] > 0:
site['fieldworker']['accuracy'][event]['max'] -= 1
return True
return False
def create_date(event_age, survey_date=None):
"""Return the date of an event that happen event_age (in years) before survey_date"""
if survey_date is None:
survey_date = datetime.date.now()
try:
return survey_date - datetime.timedelta(days=int(event_age * 365))
except:
# Must be 2/29!
assert survey_date.month == 2 and survey_date.day == 29
return survey_date.replace(month=2, day=28, year=survey_date.year - event_age)
def create_date_from_interval(start, end):
start_date = datetime.datetime.strptime(start, "%Y-%m-%d").date()
end_date = datetime.datetime.strptime(end, "%Y-%m-%d").date()
return start_date + datetime.timedelta(days=random.randint(0, (end_date - start_date).days))
def create_start_end_time(date_of_visit):
start_datetime = datetime.datetime.combine(date_of_visit, datetime.datetime.min.time())
#TODO: move hard coded values for times in seconds to config
#start visit between 8am and 4pm
start_datetime = start_datetime + datetime.timedelta(seconds=random.randint(0, 21600)+21599)
#end visit within 60 minutes
end_datetime = start_datetime + datetime.timedelta(seconds=random.randint(1, 3600))
return start_datetime.strftime('%Y-%m-%dT%H:%M:%S.000+03'), end_datetime.strftime('%Y-%m-%dT%H:%M:%S.000+03')
def create_fws(fieldworker):
"""Create fieldworkers in openhds"""
cursor = open_hds_connection.cursor()
#first add a default fieldworker named Data Data, username data, for use in a the standard tablet emulator
cursor.execute("INSERT INTO fieldworker (uuid, extid, firstname, lastname, deleted, passwordHash) VALUES "
"('{uu_id}','data', 'Data', 'Data', false,"
" '$2a$08$83Vl7c/z85s9vdmWLcQYOuflMxgVwdNnMQmDA77L5FvX7ao65vt0W')".format(uu_id=util.create_uuid()))
cursor.execute("INSERT INTO fieldworker (uuid, extid, firstname, lastname, deleted, passwordHash) VALUES "
"('{uu_id}','data2', 'Data2', 'Data2', false,"
" '$2a$08$83Vl7c/z85s9vdmWLcQYOuflMxgVwdNnMQmDA77L5FvX7ao65vt0W')".format(uu_id=util.create_uuid()))
number = fieldworker['number']
for i in range(1, number + 1):
first_name = create_first_name(sample_gender())
last_name = create_last_name()
#TODO: i is not what should be used according to the naming convention
ext_id = 'FW' + first_name[0] + last_name[0] + str(i)
cursor.execute("INSERT INTO fieldworker (uuid, extid, firstname, lastname, deleted, passwordHash) VALUES "
"('{uu_id}','{ext_id}', '{first_name}', '{last_name}', false,"
" '$2a$08$83Vl7c/z85s9vdmWLcQYOuflMxgVwdNnMQmDA77L5FvX7ao65vt0W')"
.format(uu_id=util.create_uuid(), ext_id=ext_id, first_name=first_name, last_name=last_name))
hdss['field_workers'].append({'ext_id': ext_id, 'center': sample_coordinates()})
cursor.close()
open_hds_connection.commit()
def create_location_hierarchy(location_hierarchy):
"""Create the location hierarchy"""
cursor = open_hds_connection.cursor()
for level in location_hierarchy['levels']:
cursor.execute("INSERT INTO locationhierarchy VALUES ('{uuid}','{extId}','{name}','{level_uuid}',"
"'{parent_uuid}')".format(**level))
cursor.close()
open_hds_connection.commit()
def create_social_group(social_group_size, round_number, start_date, end_date, id_offset):
date_of_visit = create_date_from_interval(start_date, end_date)
field_worker = random.choice(hdss['field_workers'])
cursor = open_hds_connection.cursor()
#sample location on lowest level of location hierarchy
area = util.query_db_one(cursor, "SELECT extId FROM locationhierarchy "
"WHERE level_uuid = 'hierarchyLevelId5' ORDER BY RAND() LIMIT 1")['extId']
#for now assume one location per social group
location_index = len(hdss['social_groups']) + 1 + id_offset
location_id = area + str(location_index).zfill(6)
coordinates = sample_coordinates()
visit_id = location_id + round_number.zfill(3)
sg_id = location_id + '00'
#first create the social group head
id_of_head = location_id + '1'.zfill(3)
last_name = create_last_name()
gender_of_head = sample_gender()
first_name = create_first_name(gender_of_head)
middle_name = create_first_name(gender_of_head)
start_time, end_time = create_start_end_time(date_of_visit)
submission.submit_location_registration(start_time, area, field_worker['ext_id'], location_id, last_name,
'Ten cell leader', 'RUR', coordinates,
end_time, aggregate_url)
start_time, end_time = create_start_end_time(date_of_visit)
#migration date only for in_migrations. assume inmgration during this update round for now.
date_of_migration = create_date_from_interval(start_date, str(date_of_visit))
if round_number == '0':
submission.submit_baseline_individual(start_time, end_time, location_id, visit_id, field_worker['ext_id'],
id_of_head, 'UNK', 'UNK', first_name, middle_name, last_name,
gender_of_head, str(create_date(sample_age(min_age_head_of_social_group),
date_of_visit)),
'1', str(date_of_visit), aggregate_url)
else:
submission.submit_in_migration(start_time, end_time, 'EXTERNAL_INMIGRATION', location_id, visit_id,
field_worker['ext_id'],
id_of_head, 'UNK', 'UNK', first_name, middle_name, last_name, gender_of_head,
str(create_date(sample_age(min_age_head_of_social_group), date_of_migration)),
'1', str(date_of_migration), aggregate_url)
#create a social group
start_time, end_time = create_start_end_time(date_of_visit)
submission.submit_social_group_registration(start_time, sg_id, id_of_head, field_worker['ext_id'], last_name, "FAM",
end_time, aggregate_url)
social_group = {'sg_id': sg_id, 'individuals': [], 'locations': []}
social_group['locations'].append({'location_id': location_id, 'coordinates': coordinates})
social_group['individuals'].append({'ind_id': id_of_head, 'gender': gender_of_head, 'last_seen': date_of_visit,
'status': 'present'})
#and make the head a member
start_time, end_time = create_start_end_time(date_of_visit)
if round_number == '0':
submission.submit_membership(start_time, id_of_head, sg_id, field_worker['ext_id'], '1', str(date_of_visit),
end_time, aggregate_url)
else:
submission.submit_membership(start_time, id_of_head, sg_id, field_worker['ext_id'], '1', str(date_of_migration),
end_time, aggregate_url)
for i in range(2, social_group_size):
ind_id = location_id + str(i).zfill(3)
gender = sample_gender()
first_name = create_first_name(gender)
middle_name = create_first_name(gender)
age = sample_age()
start_time, end_time = create_start_end_time(date_of_visit)
if round_number == '0':
submission.submit_baseline_individual(start_time, end_time, location_id, visit_id, field_worker['ext_id'],
ind_id, 'UNK', 'UNK', first_name, middle_name, last_name, gender,
str(create_date(age, date_of_visit)),
'1', str(date_of_visit), aggregate_url)
if makes_mistake('baseline'):
submission.submit_baseline_individual(start_time, end_time, location_id, visit_id,
field_worker['ext_id'], ind_id, 'UNK', 'UNK', first_name,
middle_name, last_name, gender,
str(create_date(age, date_of_visit)), '1', str(date_of_visit),
aggregate_url)
else:
submission.submit_in_migration(start_time, end_time, 'EXTERNAL_INMIGRATION', location_id, visit_id,
field_worker['ext_id'],
ind_id, 'UNK', 'UNK', first_name, middle_name, last_name, gender,
str(create_date(age, date_of_migration)),
'1', str(date_of_migration), aggregate_url)
#create memberships here, 2-9 for relationship
start_time, end_time = create_start_end_time(date_of_visit)
submission.submit_membership(start_time, ind_id, sg_id, field_worker['ext_id'], str(random.randint(2, 9)),
str(date_of_visit), end_time, aggregate_url)
social_group['individuals'].append({'ind_id': ind_id, 'gender': gender, 'last_seen': date_of_visit,
'status': 'present'})
#then another loop for relationship, use code 2 for marriages.
#submission.submit_relationship()
#TODO: for now, just take individual 2 and marry it to the household head (if opposite sexes and old enough)
if i == 2 and gender != gender_of_head and age > min_age_marriage:
start_time, end_time = create_start_end_time(date_of_visit)
submission.submit_relationship(start_time, id_of_head, ind_id, field_worker['ext_id'], '2',
str(date_of_visit), end_time, aggregate_url)
return social_group
def simulate_baseline(round):
"""Simulate a census. Use the population size at start. Sample random locations.
Use inmigration for all individuals.
Don't fill a visit form."""
global individuals_per_social_group
popsize = 0
while popsize < pop_size_baseline:
social_group_size = np.random.poisson(individuals_per_social_group)
social_group = create_social_group(social_group_size, str(round['roundNumber']), round['startDate'],
round['endDate'], 0)
hdss['social_groups'].append(social_group)
popsize += social_group_size
def visit_social_group(social_group, round_number, start_date, end_date):
date_of_visit = create_date_from_interval(start_date, end_date)
field_worker = random.choice(hdss['field_workers'])
#TODO: only one location per social group for now
location_id = social_group['locations'][0]['location_id']
visit_id = location_id + round_number.zfill(3)
start_time, end_time = create_start_end_time(date_of_visit)
submission.submit_visit_registration(start_time, visit_id, field_worker['ext_id'], location_id, round_number,
str(date_of_visit), social_group['individuals'][0]['ind_id'], '1', '0',
social_group['locations'][0]['coordinates'],
end_time, aggregate_url)
newly_inmigrated = []
for individual in social_group['individuals']:
#TODO: for now define death rate as per visit rate
if individual['status'] == 'present' and random.random() < death_rate:
start_time, end_time = create_start_end_time(date_of_visit)
submission.submit_death_registration(start_time, individual['ind_id'], 'first', 'last',
field_worker['ext_id'], visit_id, str(date_of_visit), 'OTHER',
'OTHERPLACE', end_time, aggregate_url)
individual['status'] = 'dead'
#TODO: dummy condition
if "isheadofhousehold" == "True":
submission.submit_death_of_hoh_registration(start_time, end_time, individual['ind_id'],
social_group['sg_id'], "TODO_NEW_HOH",
field_worker['ext_id'], individual['gender'], '1',
'VILLAGE', '1', visit_id, 'CAUSE_OF_DEATH',
str(date_of_visit), 'OTHER', 'OTHERPLACE', aggregate_url)
#TODO: for now define outmigration rate as per visit rate
if individual['status'] == 'present' and random.random() < outmigration_rate:
start_time, end_time = create_start_end_time(date_of_visit)
submission.submit_out_migration_registration(start_time, end_time, individual['ind_id'],
field_worker['ext_id'], visit_id, 'notknown', 'notknown',
str(date_of_visit), 'DESTINATION', 'MARITAL_CHANGE',
'REC', aggregate_url)
print(individual)
individual['status'] = 'outside_hdss'
#half of the external inmigration events happen into social groups
#TODO: for now assume all inmigrants are previously unknown
if random.random() < inmigration_rate/2:
next_id = int(social_group['individuals'][-1]['ind_id'][-3:]) + 1 + len(newly_inmigrated)
ind_id = location_id + str(next_id).zfill(3)
gender = sample_gender()
first_name = create_first_name(gender)
middle_name = create_first_name(gender)
last_name = create_last_name()
age = sample_age()
date_of_migration = create_date_from_interval(start_date, str(date_of_visit))
start_time, end_time = create_start_end_time(date_of_visit)
submission.submit_in_migration(start_time, end_time, 'EXTERNAL_INMIGRATION', location_id, visit_id,
field_worker['ext_id'], ind_id, 'UNK', 'UNK', first_name, middle_name,
last_name, gender, str(create_date(age, date_of_visit)),
'1', str(date_of_migration), aggregate_url)
start_time, end_time = create_start_end_time(date_of_visit)
submission.submit_membership(start_time, ind_id, social_group['sg_id'], field_worker['ext_id'],
str(random.randint(2, 9)), str(date_of_migration), end_time, aggregate_url)
newly_inmigrated.append({'ind_id': ind_id, 'gender': gender, 'last_seen': date_of_visit,
'status': 'present'})
social_group['individuals'].extend(newly_inmigrated)
def simulate_update(round):
"""Simulate an update round"""
global individuals_per_social_group
newly_inmigrated = []
for social_group in hdss['social_groups']:
if not 'no_update' in social_group:
visit_social_group(social_group, str(round['roundNumber']), round['startDate'], round['endDate'])
if random.random() < inmigration_rate/2:
social_group_size = np.random.poisson(individuals_per_social_group)
#social_group = create_social_group(social_group_size, str(round['roundNumber']), round['startDate'],
# round['endDate'], len(newly_inmigrated))
#newly_inmigrated.append(social_group)
#logging.debug(newly_inmigrated)
#hdss['social_groups'].extend(newly_inmigrated)
def submit_fixed_events(household):
household_id = household['householdId']
forms = household['forms']
for form in forms:
submission.submit_from_dict(form, aggregate_url)
social_group = next((item for item in hdss['social_groups'] if item['sg_id'] == household_id), None)
#social_group = (item for item in hdss['social_groups'] if item['sg_id'] == household_id).next()
if not social_group:
social_group = {'sg_id': household_id, 'individuals': [], 'locations': [], 'no_update': True}
hdss['social_groups'].append(social_group)
if form['id'] == 'location_registration':
location_id = form['fields'][1][1][2][1]
print("->location_id id: " + location_id)
location = {'location_id': location_id, 'coordinates': form['fields'][5][1]}
print(social_group)
social_group['locations'].append(location)
if form['id'] == 'membership':
individual_id = form['fields'][1][1][0][1]
print("->individual_id id: " + individual_id)
start_date = form['fields'][3][1]
print("->start_date: " + start_date)
#TODO: properly deal with individuals
social_group['individuals'].append({'ind_id': individual_id, 'gender': 'F', 'last_seen': start_date,
'status': 'present'})
if form['id'] == 'out_migration_registration' or form['id'] == 'death_registration':
individual_id = form['fields'][1][1][0][1]
individual = next((item for item in social_group['individuals'] if item['ind_id'] == individual_id), None)
if form['id'] == 'out_migration_registration':
individual['status'] = 'outside_hdss'
if form['id'] == 'death_registration':
individual['status'] = 'dead'
def simulate_round(round):
"""Simulate a baseline or update round. Discrete time simulation with daily time steps,
assumes number of events >> number of days per round"""
cursor = open_hds_connection.cursor()
cursor.execute("INSERT INTO round VALUES ('{uuid}','{endDate}','{remarks}','{roundNumber}',"
"'{startDate}')".format(uuid=util.create_uuid(), **round))
cursor = odk_connection.cursor()
if round['remarks'] == 'Baseline':
#enable mirth baseline channels
cursor.execute("UPDATE SCENARIO SET FLG_SCENARIO=0")
print("set 0")
else:
#enable mirth update channels
cursor.execute("UPDATE SCENARIO SET FLG_SCENARIO=1")
print("set 1")
if 'fixedEvents' in round:
for household in round['fixedEvents']:
submit_fixed_events(household)
if round['remarks'] == 'Baseline':
simulate_baseline(round)
else:
simulate_update(round)
def simulate_inter_round(round):
#wait for mirth to finish transferring data to openhds
waiting_for_mirth = True
while waiting_for_mirth:
cursor = odk_connection.cursor()
number_unprocessed = 0
processed_flag = config['odk_server']['processed_by_mirth_flag']
for odk_form in config['odk_server']['forms']:
unprocessed = util.query_db_one(cursor, "SELECT COUNT(*) AS count FROM {odk_form} WHERE {processed} = 0"
.format(odk_form=odk_form, processed=processed_flag))['count']
if unprocessed > 0:
print(odk_form + " unprocessed: " + str(unprocessed))
number_unprocessed += unprocessed
if number_unprocessed == 0:
waiting_for_mirth = False
else:
print("Still waiting for Mirth...")
time.sleep(3)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--site', help='Json file with site description, located in conf dir', required=True)
parser.add_argument('-d', '--debug', help='Debug logging', action='store_true', default=False)
parser.set_defaults(truncate=False)
args = parser.parse_args()
init(args.site)
if args.debug:
logging.basicConfig(filename='sim.log', level=logging.DEBUG)
else:
logging.basicConfig(filename='sim.log', level=logging.WARN)
for round in site['round']:
pprint.pprint(round, width=1)
simulate_round(round)
simulate_inter_round(round)
open_hds_connection.close()
odk_connection.close()
if 'pickle_out' in site['general']:
with open(os.path.join(conf_dir, site['general']['pickle_out']), 'w') as site_file:
pickle.dump(hdss, site_file)
pprint.pprint(hdss, width=1)
print("Done")
|
SwissTPH/openhds-sim
|
fieldwork_simulator.py
|
Python
|
gpl-2.0
| 27,757
|
[
"VisIt"
] |
584cbd2af4c602aa97dc89be8504380925b9bc03694105d7ad264d79ade3dbf8
|
# -*- coding: utf-8 -*-
#
# hill_tononi_Vp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# ! ===========================================
# ! NEST Topology Module: A Case-Based Tutorial
# ! ===========================================
# !
# ! :Author: Hans Ekkehard Plesser
# ! :Institution: Norwegian University of Life Sciences
# ! :Version: 0.4
# ! :Date: 21 November 2012
# ! :Copyright: The NEST Initiative (2004)
# ! :License: Creative Commons Attribution License
# !
# ! **NOTE:** The network generated by this script does generate
# ! dynamics in which the activity of the entire system, especially
# ! Rp and Vp oscillates with approx 5 Hz. This is different from
# ! the full model. Deviations are due to the different model type
# ! and the elimination of a number of connections, with no changes
# ! to the weights.
# !
# ! Introduction
# ! ============
# !
# ! This tutorial shows you how to implement a simplified version of the
# ! Hill-Tononi model of the early visual pathway using the NEST Topology
# ! module. The model is described in the paper
# !
# ! S. L. Hill and G. Tononi.
# ! Modeling Sleep and Wakefulness in the Thalamocortical System.
# ! J Neurophysiology **93**:1671-1698 (2005).
# ! Freely available via `doi 10.1152/jn.00915.2004
# ! <http://dx.doi.org/10.1152/jn.00915.2004>`_.
# !
# ! We simplify the model somewhat both to keep this tutorial a bit
# ! shorter, and because some details of the Hill-Tononi model are not
# ! currently supported by NEST. Simplifications include:
# !
# ! 1. We use the ``iaf_cond_alpha`` neuron model, which is
# ! simpler than the Hill-Tononi model.
# !
# ! #. As the ``iaf_cond_alpha`` neuron model only supports two
# ! synapses (labeled "ex" and "in"), we only include AMPA and
# ! GABA_A synapses.
# !
# ! #. We ignore the secondary pathway (Ts, Rs, Vs), since it adds just
# ! more of the same from a technical point of view.
# !
# ! #. Synaptic delays follow a Gaussian distribution in the HT
# ! model. This implies actually a Gaussian distributions clipped at
# ! some small, non-zero delay, since delays must be
# ! positive. Currently, there is a bug in the Topology module when
# ! using clipped Gaussian distribution. We therefore draw delays from a
# ! uniform distribution.
# !
# ! #. Some further adaptations are given at the appropriate locations in
# ! the script.
# !
# ! This tutorial is divided in the following sections:
# !
# ! Philosophy_
# ! Discusses the philosophy applied to model implementation in this
# ! tutorial
# !
# ! Preparations_
# ! Neccessary steps to use NEST and the Topology Module
# !
# ! `Configurable Parameters`_
# ! Define adjustable network parameters
# !
# ! `Neuron Models`_
# ! Define the neuron models needed by the network model
# !
# ! Populations_
# ! Create Populations
# !
# ! `Synapse models`_
# ! Define the synapse models used in the network model
# !
# ! Connections_
# ! Create Connections
# !
# ! `Example simulation`_
# ! Perform a small simulation for illustration. This
# ! section also discusses the setup for recording.
# ! Philosophy
# ! ==========
# ! A network models has two essential components: *populations* and
# ! *projections*. We first use NEST's ``CopyModel()`` mechanism to
# ! create specific models for all populations and subpopulations in
# ! the network, and then create the populations using the Topology
# ! modules ``CreateLayer()`` function.
# !
# ! We use a two-stage process to create the connections, mainly
# ! because the same configurations are required for a number of
# ! projections: we first define dictionaries specifying the
# ! connections, then apply these dictionaries later.
# !
# ! The way in which we declare the network model here is an
# ! example. You should not consider it the last word: we expect to see
# ! a significant development in strategies and tools for network
# ! descriptions in the future. The following contributions to CNS\*09
# ! seem particularly interesting
# !
# ! - Ralf Ansorg & Lars Schwabe. Declarative model description and
# ! code generation for hybrid individual- and population-based
# ! simulations of the early visual system (P57);
# ! - Sharon Crook, R. Angus Silver, & Padraig Gleeson. Describing
# ! and exchanging models of neurons and neuronal networks with
# ! NeuroML (F1);
# !
# ! as well as the following paper which will apply in PLoS
# ! Computational Biology shortly:
# !
# ! - Eilen Nordlie, Marc-Oliver Gewaltig, & Hans Ekkehard Plesser.
# ! Towards reproducible descriptions of neuronal network models.
# ! Preparations
# ! ============
# ! Please make sure that your ``PYTHONPATH`` is set correctly, so
# ! that Python can find the NEST Python module.
# ! **Note:** By default, the script does not show any graphics.
# ! Set ``SHOW_FIGURES`` to ``True`` to activate graphics.
# ! This example uses the function GetLeaves, which is deprecated. A
# ! deprecation warning is therefore issued. For details about deprecated
# ! functions, see documentation.
import pylab
SHOW_FIGURES = False
if not SHOW_FIGURES:
pylab_show = pylab.show
def nop(s=None):
pass
pylab.show = nop
else:
pylab.ion()
# ! Introduction
# !=============
# ! This tutorial gives a brief introduction to the ConnPlotter
# ! toolbox. It is by no means complete.
# ! Load pynest
import nest
# ! Load NEST Topoplogy module (NEST 2.2)
import nest.topology as topo
# ! Make sure we start with a clean slate, even if we re-run the script
# ! in the same Python session.
nest.ResetKernel()
# ! Import math, we need Pi
import math
# ! Configurable Parameters
# ! =======================
# !
# ! Here we define those parameters that we take to be
# ! configurable. The choice of configurable parameters is obviously
# ! arbitrary, and in practice one would have far more configurable
# ! parameters. We restrict ourselves to:
# !
# ! - Network size in neurons ``N``, each layer is ``N x N``.
# ! - Network size in subtended visual angle ``visSize``, in degree.
# ! - Temporal frequency of drifting grating input ``f_dg``, in Hz.
# ! - Spatial wavelength and direction of drifting grating input,
# ! ``lambda_dg`` and ``phi_dg``, in degree/radian.
# ! - Background firing rate of retinal nodes and modulation amplitude,
# ! ``retDC`` and ``retAC``, in Hz.
# ! - Simulation duration ``simtime``; actual simulation is split into
# ! intervals of ``sim_interval`` length, so that the network state
# ! can be visualized in those intervals. Times are in ms.
Params = {'N': 40,
'visSize': 8.0,
'f_dg': 2.0,
'lambda_dg': 2.0,
'phi_dg': 0.0,
'retDC': 30.0,
'retAC': 30.0,
'simtime': 100.0,
'sim_interval': 5.0
}
# ! Neuron Models
# ! =============
# !
# ! We declare models in two steps:
# !
# ! 1. We define a dictionary specifying the NEST neuron model to use
# ! as well as the parameters for that model.
# ! #. We create three copies of this dictionary with parameters
# ! adjusted to the three model variants specified in Table~2 of
# ! Hill & Tononi (2005) (cortical excitatory, cortical inhibitory,
# ! thalamic)
# !
# ! In addition, we declare the models for the stimulation and
# ! recording devices.
# !
# ! The general neuron model
# ! ------------------------
# !
# ! We use the ``iaf_cond_alpha`` neuron, which is an
# ! integrate-and-fire neuron with two conductance-based synapses which
# ! have alpha-function time course. Any input with positive weights
# ! will automatically directed to the synapse labeled ``_ex``, any
# ! with negative weights to the synapes labeled ``_in``. We define
# ! **all** parameters explicitly here, so that no information is
# ! hidden in the model definition in NEST. ``V_m`` is the membrane
# ! potential to which the model neurons will be initialized.
# ! The model equations and parameters for the Hill-Tononi neuron model
# ! are given on pp. 1677f and Tables 2 and 3 in that paper. Note some
# ! peculiarities and adjustments:
# !
# ! - Hill & Tononi specify their model in terms of the membrane time
# ! constant, while the ``iaf_cond_alpha`` model is based on the
# ! membrane capcitance. Interestingly, conducantces are unitless in
# ! the H&T model. We thus can use the time constant directly as
# ! membrane capacitance.
# ! - The model includes sodium and potassium leak conductances. We
# ! combine these into a single one as follows:
# $ \begin{equation}-g_{NaL}(V-E_{Na}) - g_{KL}(V-E_K)
# $ = -(g_{NaL}+g_{KL})
# $ \left(V-\frac{g_{NaL}E_{NaL}+g_{KL}E_K}{g_{NaL}g_{KL}}\right)
# $ \end{equation}
# ! - We write the resulting expressions for g_L and E_L explicitly
# ! below, to avoid errors in copying from our pocket calculator.
# ! - The paper gives a range of 1.0-1.85 for g_{KL}, we choose 1.5
# ! here.
# ! - The Hill-Tononi model has no explicit reset or refractory
# ! time. We arbitrarily set V_reset and t_ref.
# ! - The paper uses double exponential time courses for the synaptic
# ! conductances, with separate time constants for the rising and
# ! fallings flanks. Alpha functions have only a single time
# ! constant: we use twice the rising time constant given by Hill and
# ! Tononi.
# ! - In the general model below, we use the values for the cortical
# ! excitatory cells as defaults. Values will then be adapted below.
# !
nest.CopyModel('iaf_cond_alpha', 'NeuronModel',
params={'C_m': 16.0,
'E_L': (0.2 * 30.0 + 1.5 * -90.0) / (0.2 + 1.5),
'g_L': 0.2 + 1.5,
'E_ex': 0.0,
'E_in': -70.0,
'V_reset': -60.0,
'V_th': -51.0,
't_ref': 2.0,
'tau_syn_ex': 1.0,
'tau_syn_in': 2.0,
'I_e': 0.0,
'V_m': -70.0})
# ! Adaptation of models for different populations
# ! ----------------------------------------------
# ! We must copy the `NeuronModel` dictionary explicitly, otherwise
# ! Python would just create a reference.
# ! Cortical excitatory cells
# ! .........................
# ! Parameters are the same as above, so we need not adapt anything
nest.CopyModel('NeuronModel', 'CtxExNeuron')
# ! Cortical inhibitory cells
# ! .........................
nest.CopyModel('NeuronModel', 'CtxInNeuron',
params={'C_m': 8.0,
'V_th': -53.0,
't_ref': 1.0})
# ! Thalamic cells
# ! ..............
nest.CopyModel('NeuronModel', 'ThalamicNeuron',
params={'C_m': 8.0,
'V_th': -53.0,
't_ref': 1.0,
'E_in': -80.0})
# ! Input generating nodes
# ! ----------------------
# ! Input is generated by sinusoidally modulate Poisson generators,
# ! organized in a square layer of retina nodes. These nodes require a
# ! slightly more complicated initialization than all other elements of
# ! the network:
# !
# ! - Average firing rate ``rate``, firing rate modulation depth ``amplitude``,
# ! and temporal modulation frequency ``frequency`` are the same for all
# ! retinal nodes and are set directly below.
# ! - The temporal phase ``phase`` of each node depends on its position in
# ! the grating and can only be assigned after the retinal layer has
# ! been created. We therefore specify a function for initalizing the
# ! ``phase``. This function will be called for each node.
def phaseInit(pos, lam, alpha):
'''Initializer function for phase of drifting grating nodes.
pos : position (x,y) of node, in degree
lam : wavelength of grating, in degree
alpha: angle of grating in radian, zero is horizontal
Returns number to be used as phase of sinusoidal Poisson generator.
'''
return 360.0 / lam * (math.cos(alpha) * pos[0] + math.sin(alpha) * pos[1])
nest.CopyModel('sinusoidal_poisson_generator', 'RetinaNode',
params={'amplitude': Params['retAC'],
'rate': Params['retDC'],
'frequency': Params['f_dg'],
'phase': 0.0,
'individual_spike_trains': False})
# ! Recording nodes
# ! ---------------
# ! We use the new ``multimeter`` device for recording from the model
# ! neurons. At present, ``iaf_cond_alpha`` is one of few models
# ! supporting ``multimeter`` recording. Support for more models will
# ! be added soon; until then, you need to use ``voltmeter`` to record
# ! from other models.
# !
# ! We configure multimeter to record membrane potential to membrane
# ! potential at certain intervals to memory only. We record the GID of
# ! the recorded neurons, but not the time.
nest.CopyModel('multimeter', 'RecordingNode',
params={'interval': Params['sim_interval'],
'record_from': ['V_m'],
'record_to': ['memory'],
'withgid': True,
'withtime': False})
# ! Populations
# ! ===========
# ! We now create the neuron populations in the model, again in the
# ! form of Python dictionaries. We define them in order from eye via
# ! thalamus to cortex.
# !
# ! We first define a dictionary defining common properties for all
# ! populations
layerProps = {'rows': Params['N'],
'columns': Params['N'],
'extent': [Params['visSize'], Params['visSize']],
'edge_wrap': True}
# ! This dictionary does not yet specify the elements to put into the
# ! layer, since they will differ from layer to layer. We will add them
# ! below by updating the ``'elements'`` dictionary entry for each
# ! population.
# ! Retina
# ! ------
layerProps.update({'elements': 'RetinaNode'})
retina = topo.CreateLayer(layerProps)
# retina_leaves is a work-around until NEST 3.0 is released
retina_leaves = nest.hl_api.GetLeaves(retina)[0]
# ! Now set phases of retinal oscillators; we use a list comprehension instead
# ! of a loop.
[nest.SetStatus([n], {"phase": phaseInit(topo.GetPosition([n])[0],
Params["lambda_dg"],
Params["phi_dg"])})
for n in retina_leaves]
# ! Thalamus
# ! --------
# ! We first introduce specific neuron models for the thalamic relay
# ! cells and interneurons. These have identical properties, but by
# ! treating them as different models, we can address them specifically
# ! when building connections.
# !
# ! We use a list comprehension to do the model copies.
[nest.CopyModel('ThalamicNeuron', SpecificModel) for SpecificModel in
('TpRelay', 'TpInter')]
# ! Now we can create the layer, with one relay cell and one
# ! interneuron per location:
layerProps.update({'elements': ['TpRelay', 'TpInter']})
Tp = topo.CreateLayer(layerProps)
# ! Reticular nucleus
# ! -----------------
# ! We follow the same approach as above, even though we have only a
# ! single neuron in each location.
[nest.CopyModel('ThalamicNeuron', SpecificModel) for SpecificModel in
('RpNeuron',)]
layerProps.update({'elements': 'RpNeuron'})
Rp = topo.CreateLayer(layerProps)
# ! Primary visual cortex
# ! ---------------------
# ! We follow again the same approach. We differentiate neuron types
# ! between layers and between pyramidal cells and interneurons. At
# ! each location, there are two pyramidal cells and one interneuron in
# ! each of layers 2-3, 4, and 5-6. Finally, we need to differentiate
# ! between vertically and horizontally tuned populations. When creating
# ! the populations, we create the vertically and the horizontally
# ! tuned populations as separate populations.
# ! We use list comprehesions to create all neuron types:
[nest.CopyModel('CtxExNeuron', layer + 'pyr')
for layer in ('L23', 'L4', 'L56')]
[nest.CopyModel('CtxInNeuron', layer + 'in')
for layer in ('L23', 'L4', 'L56')]
# ! Now we can create the populations, suffixes h and v indicate tuning
layerProps.update({'elements': ['L23pyr', 2, 'L23in', 1,
'L4pyr', 2, 'L4in', 1,
'L56pyr', 2, 'L56in', 1]})
Vp_h = topo.CreateLayer(layerProps)
Vp_v = topo.CreateLayer(layerProps)
# ! Collect all populations
# ! -----------------------
# ! For reference purposes, e.g., printing, we collect all populations
# ! in a tuple:
populations = (retina, Tp, Rp, Vp_h, Vp_v)
# ! Inspection
# ! ----------
# ! We can now look at the network using `PrintNetwork`:
nest.hl_api.PrintNetwork()
# ! We can also try to plot a single layer in a network. For
# ! simplicity, we use Rp, which has only a single neuron per position.
topo.PlotLayer(Rp)
pylab.title('Layer Rp')
pylab.show()
# ! Synapse models
# ! ==============
# ! Actual synapse dynamics, e.g., properties such as the synaptic time
# ! course, time constants, reversal potentials, are properties of
# ! neuron models in NEST and we set them in section `Neuron models`_
# ! above. When we refer to *synapse models* in NEST, we actually mean
# ! connectors which store information about connection weights and
# ! delays, as well as port numbers at the target neuron (``rport``)
# ! and implement synaptic plasticity. The latter two aspects are not
# ! relevant here.
# !
# ! We just use NEST's ``static_synapse`` connector but copy it to
# ! synapse models ``AMPA`` and ``GABA_A`` for the sake of
# ! explicitness. Weights and delays are set as needed in section
# ! `Connections`_ below, as they are different from projection to
# ! projection. De facto, the sign of the synaptic weight decides
# ! whether input via a connection is handle by the ``_ex`` or the
# ! ``_in`` synapse.
nest.CopyModel('static_synapse', 'AMPA')
nest.CopyModel('static_synapse', 'GABA_A')
# ! Connections
# ! ====================
# ! Building connections is the most complex part of network
# ! construction. Connections are specified in Table 1 in the
# ! Hill-Tononi paper. As pointed out above, we only consider AMPA and
# ! GABA_A synapses here. Adding other synapses is tedious work, but
# ! should pose no new principal challenges. We also use a uniform in
# ! stead of a Gaussian distribution for the weights.
# !
# ! The model has two identical primary visual cortex populations,
# ! ``Vp_v`` and ``Vp_h``, tuned to vertical and horizonal gratings,
# ! respectively. The *only* difference in the connection patterns
# ! between the two populations is the thalamocortical input to layers
# ! L4 and L5-6 is from a population of 8x2 and 2x8 grid locations,
# ! respectively. Furthermore, inhibitory connection in cortex go to
# ! the opposing orientation population as to the own.
# !
# ! To save us a lot of code doubling, we thus defined properties
# ! dictionaries for all connections first and then use this to connect
# ! both populations. We follow the subdivision of connections as in
# ! the Hill & Tononi paper.
# !
# ! **Note:** Hill & Tononi state that their model spans 8 degrees of
# ! visual angle and stimuli are specified according to this. On the
# ! other hand, all connection patterns are defined in terms of cell
# ! grid positions. Since the NEST Topology Module defines connection
# ! patterns in terms of the extent given in degrees, we need to apply
# ! the following scaling factor to all lengths in connections:
dpc = Params['visSize'] / (Params['N'] - 1)
# ! We will collect all same-orientation cortico-cortical connections in
ccConnections = []
# ! the cross-orientation cortico-cortical connections in
ccxConnections = []
# ! and all cortico-thalamic connections in
ctConnections = []
# ! Horizontal intralaminar
# ! -----------------------
# ! *Note:* "Horizontal" means "within the same cortical layer" in this
# ! case.
# !
# ! We first define a dictionary with the (most) common properties for
# ! horizontal intralaminar connection. We then create copies in which
# ! we adapt those values that need adapting, and
horIntraBase = {"connection_type": "divergent",
"synapse_model": "AMPA",
"mask": {"circular": {"radius": 12.0 * dpc}},
"kernel": {"gaussian": {"p_center": 0.05, "sigma": 7.5 * dpc}},
"weights": 1.0,
"delays": {"uniform": {"min": 1.75, "max": 2.25}}}
# ! We use a loop to do the for for us. The loop runs over a list of
# ! dictionaries with all values that need updating
for conn in [{"sources": {"model": "L23pyr"}, "targets": {"model": "L23pyr"}},
{"sources": {"model": "L23pyr"}, "targets": {"model": "L23in"}},
{"sources": {"model": "L4pyr"}, "targets": {"model": "L4pyr"},
"mask": {"circular": {"radius": 7.0 * dpc}}},
{"sources": {"model": "L4pyr"}, "targets": {"model": "L4in"},
"mask": {"circular": {"radius": 7.0 * dpc}}},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L56pyr"}},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L56in"}}]:
ndict = horIntraBase.copy()
ndict.update(conn)
ccConnections.append(ndict)
# ! Vertical intralaminar
# ! -----------------------
# ! *Note:* "Vertical" means "between cortical layers" in this
# ! case.
# !
# ! We proceed as above.
verIntraBase = {"connection_type": "divergent",
"synapse_model": "AMPA",
"mask": {"circular": {"radius": 2.0 * dpc}},
"kernel": {"gaussian": {"p_center": 1.0, "sigma": 7.5 * dpc}},
"weights": 2.0,
"delays": {"uniform": {"min": 1.75, "max": 2.25}}}
for conn in [{"sources": {"model": "L23pyr"}, "targets": {"model": "L56pyr"},
"weights": 1.0},
{"sources": {"model": "L23pyr"}, "targets": {"model": "L23in"},
"weights": 1.0},
{"sources": {"model": "L4pyr"}, "targets": {"model": "L23pyr"}},
{"sources": {"model": "L4pyr"}, "targets": {"model": "L23in"}},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L23pyr"}},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L23in"}},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L4pyr"}},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L4in"}}]:
ndict = verIntraBase.copy()
ndict.update(conn)
ccConnections.append(ndict)
# ! Intracortical inhibitory
# ! ------------------------
# !
# ! We proceed as above, with the following difference: each connection
# ! is added to the same-orientation and the cross-orientation list of
# ! connections.
# !
# ! **Note:** Weights increased from -1.0 to -2.0, to make up for missing GabaB
# !
# ! Note that we have to specify the **weight with negative sign** to make
# ! the connections inhibitory.
intraInhBase = {"connection_type": "divergent",
"synapse_model": "GABA_A",
"mask": {"circular": {"radius": 7.0 * dpc}},
"kernel": {"gaussian": {"p_center": 0.25, "sigma": 7.5 * dpc}},
"weights": -2.0,
"delays": {"uniform": {"min": 1.75, "max": 2.25}}}
# ! We use a loop to do the for for us. The loop runs over a list of
# ! dictionaries with all values that need updating
for conn in [{"sources": {"model": "L23in"}, "targets": {"model": "L23pyr"}},
{"sources": {"model": "L23in"}, "targets": {"model": "L23in"}},
{"sources": {"model": "L4in"}, "targets": {"model": "L4pyr"}},
{"sources": {"model": "L4in"}, "targets": {"model": "L4in"}},
{"sources": {"model": "L56in"}, "targets": {"model": "L56pyr"}},
{"sources": {"model": "L56in"}, "targets": {"model": "L56in"}}]:
ndict = intraInhBase.copy()
ndict.update(conn)
ccConnections.append(ndict)
ccxConnections.append(ndict)
# ! Corticothalamic
# ! ---------------
corThalBase = {"connection_type": "divergent",
"synapse_model": "AMPA",
"mask": {"circular": {"radius": 5.0 * dpc}},
"kernel": {"gaussian": {"p_center": 0.5, "sigma": 7.5 * dpc}},
"weights": 1.0,
"delays": {"uniform": {"min": 7.5, "max": 8.5}}}
# ! We use a loop to do the for for us. The loop runs over a list of
# ! dictionaries with all values that need updating
for conn in [{"sources": {"model": "L56pyr"},
"targets": {"model": "TpRelay"}},
{"sources": {"model": "L56pyr"},
"targets": {"model": "TpInter"}}]:
ndict = intraInhBase.copy()
ndict.update(conn)
ctConnections.append(ndict)
# ! Corticoreticular
# ! ----------------
# ! In this case, there is only a single connection, so we write the
# ! dictionary itself; it is very similar to the corThalBase, and to
# ! show that, we copy first, then update. We need no ``targets`` entry,
# ! since Rp has only one neuron per location.
corRet = corThalBase.copy()
corRet.update({"sources": {"model": "L56pyr"}, "weights": 2.5})
# ! Build all connections beginning in cortex
# ! -----------------------------------------
# ! Cortico-cortical, same orientation
print("Connecting: cortico-cortical, same orientation")
[topo.ConnectLayers(Vp_h, Vp_h, conn) for conn in ccConnections]
[topo.ConnectLayers(Vp_v, Vp_v, conn) for conn in ccConnections]
# ! Cortico-cortical, cross-orientation
print("Connecting: cortico-cortical, other orientation")
[topo.ConnectLayers(Vp_h, Vp_v, conn) for conn in ccxConnections]
[topo.ConnectLayers(Vp_v, Vp_h, conn) for conn in ccxConnections]
# ! Cortico-thalamic connections
print("Connecting: cortico-thalamic")
[topo.ConnectLayers(Vp_h, Tp, conn) for conn in ctConnections]
[topo.ConnectLayers(Vp_v, Tp, conn) for conn in ctConnections]
topo.ConnectLayers(Vp_h, Rp, corRet)
topo.ConnectLayers(Vp_v, Rp, corRet)
# ! Thalamo-cortical connections
# ! ----------------------------
# ! **Note:** According to the text on p. 1674, bottom right, of
# ! the Hill & Tononi paper, thalamocortical connections are
# ! created by selecting from the thalamic population for each
# ! L4 pyramidal cell, ie, are *convergent* connections.
# !
# ! We first handle the rectangular thalamocortical connections.
thalCorRect = {"connection_type": "convergent",
"sources": {"model": "TpRelay"},
"synapse_model": "AMPA",
"weights": 5.0,
"delays": {"uniform": {"min": 2.75, "max": 3.25}}}
print("Connecting: thalamo-cortical")
# ! Horizontally tuned
thalCorRect.update(
{"mask": {"rectangular": {"lower_left": [-4.0 * dpc, -1.0 * dpc],
"upper_right": [4.0 * dpc, 1.0 * dpc]}}})
for conn in [{"targets": {"model": "L4pyr"}, "kernel": 0.5},
{"targets": {"model": "L56pyr"}, "kernel": 0.3}]:
thalCorRect.update(conn)
topo.ConnectLayers(Tp, Vp_h, thalCorRect)
# ! Vertically tuned
thalCorRect.update(
{"mask": {"rectangular": {"lower_left": [-1.0 * dpc, -4.0 * dpc],
"upper_right": [1.0 * dpc, 4.0 * dpc]}}})
for conn in [{"targets": {"model": "L4pyr"}, "kernel": 0.5},
{"targets": {"model": "L56pyr"}, "kernel": 0.3}]:
thalCorRect.update(conn)
topo.ConnectLayers(Tp, Vp_v, thalCorRect)
# ! Diffuse connections
thalCorDiff = {"connection_type": "convergent",
"sources": {"model": "TpRelay"},
"synapse_model": "AMPA",
"weights": 5.0,
"mask": {"circular": {"radius": 5.0 * dpc}},
"kernel": {"gaussian": {"p_center": 0.1, "sigma": 7.5 * dpc}},
"delays": {"uniform": {"min": 2.75, "max": 3.25}}}
for conn in [{"targets": {"model": "L4pyr"}},
{"targets": {"model": "L56pyr"}}]:
thalCorDiff.update(conn)
topo.ConnectLayers(Tp, Vp_h, thalCorDiff)
topo.ConnectLayers(Tp, Vp_v, thalCorDiff)
# ! Thalamic connections
# ! --------------------
# ! Connections inside thalamus, including Rp
# !
# ! *Note:* In Hill & Tononi, the inhibition between Rp cells is mediated by
# ! GABA_B receptors. We use GABA_A receptors here to provide some
# ! self-dampening of Rp.
# !
# ! **Note:** The following code had a serious bug in v. 0.1: During the first
# ! iteration of the loop, "synapse_model" and "weights" were set to "AMPA" and
# ! "0.1", respectively and remained unchanged, so that all connections were
# ! created as excitatory connections, even though they should have been
# ! inhibitory. We now specify synapse_model and weight explicitly for each
# ! connection to avoid this.
thalBase = {"connection_type": "divergent",
"delays": {"uniform": {"min": 1.75, "max": 2.25}}}
print("Connecting: intra-thalamic")
for src, tgt, conn in [(Tp, Rp, {"sources": {"model": "TpRelay"},
"synapse_model": "AMPA",
"mask": {"circular": {"radius": 2.0 * dpc}},
"kernel": {"gaussian": {"p_center": 1.0,
"sigma": 7.5 * dpc}},
"weights": 2.0}),
(Tp, Tp, {"sources": {"model": "TpInter"},
"targets": {"model": "TpRelay"},
"synapse_model": "GABA_A",
"weights": -1.0,
"mask": {"circular": {"radius": 2.0 * dpc}},
"kernel": {"gaussian":
{"p_center": 0.25,
"sigma": 7.5 * dpc}}}),
(Tp, Tp, {"sources": {"model": "TpInter"},
"targets": {"model": "TpInter"},
"synapse_model": "GABA_A",
"weights": -1.0,
"mask": {"circular": {"radius": 2.0 * dpc}},
"kernel": {"gaussian":
{"p_center": 0.25,
"sigma": 7.5 * dpc}}}),
(Rp, Tp, {"targets": {"model": "TpRelay"},
"synapse_model": "GABA_A",
"weights": -1.0,
"mask": {"circular": {"radius": 12.0 * dpc}},
"kernel": {"gaussian":
{"p_center": 0.15,
"sigma": 7.5 * dpc}}}),
(Rp, Tp, {"targets": {"model": "TpInter"},
"synapse_model": "GABA_A",
"weights": -1.0,
"mask": {"circular": {"radius": 12.0 * dpc}},
"kernel": {"gaussian":
{"p_center": 0.15,
"sigma": 7.5 * dpc}}}),
(Rp, Rp, {"targets": {"model": "RpNeuron"},
"synapse_model": "GABA_A",
"weights": -1.0,
"mask": {"circular": {"radius": 12.0 * dpc}},
"kernel": {"gaussian":
{"p_center": 0.5,
"sigma": 7.5 * dpc}}})]:
thalBase.update(conn)
topo.ConnectLayers(src, tgt, thalBase)
# ! Thalamic input
# ! --------------
# ! Input to the thalamus from the retina.
# !
# ! **Note:** Hill & Tononi specify a delay of 0 ms for this connection.
# ! We use 1 ms here.
retThal = {"connection_type": "divergent",
"synapse_model": "AMPA",
"mask": {"circular": {"radius": 1.0 * dpc}},
"kernel": {"gaussian": {"p_center": 0.75, "sigma": 2.5 * dpc}},
"weights": 10.0,
"delays": 1.0}
print("Connecting: retino-thalamic")
for conn in [{"targets": {"model": "TpRelay"}},
{"targets": {"model": "TpInter"}}]:
retThal.update(conn)
topo.ConnectLayers(retina, Tp, retThal)
# ! Checks on connections
# ! ---------------------
# ! As a very simple check on the connections created, we inspect
# ! the connections from the central node of various layers.
# ! Connections from Retina to TpRelay
topo.PlotTargets(topo.FindCenterElement(retina), Tp, 'TpRelay', 'AMPA')
pylab.title('Connections Retina -> TpRelay')
pylab.show()
# ! Connections from TpRelay to L4pyr in Vp (horizontally tuned)
topo.PlotTargets(topo.FindCenterElement(Tp), Vp_h, 'L4pyr', 'AMPA')
pylab.title('Connections TpRelay -> Vp(h) L4pyr')
pylab.show()
# ! Connections from TpRelay to L4pyr in Vp (vertically tuned)
topo.PlotTargets(topo.FindCenterElement(Tp), Vp_v, 'L4pyr', 'AMPA')
pylab.title('Connections TpRelay -> Vp(v) L4pyr')
pylab.show()
# ! Recording devices
# ! =================
# ! This recording device setup is a bit makeshift. For each population
# ! we want to record from, we create one ``multimeter``, then select
# ! all nodes of the right model from the target population and
# ! connect. ``loc`` is the subplot location for the layer.
print("Connecting: Recording devices")
recorders = {}
for name, loc, population, model in [('TpRelay', 1, Tp, 'TpRelay'),
('Rp', 2, Rp, 'RpNeuron'),
('Vp_v L4pyr', 3, Vp_v, 'L4pyr'),
('Vp_h L4pyr', 4, Vp_h, 'L4pyr')]:
recorders[name] = (nest.Create('RecordingNode'), loc)
# population_leaves is a work-around until NEST 3.0 is released
population_leaves = nest.hl_api.GetLeaves(population)[0]
tgts = [nd for nd in population_leaves
if nest.GetStatus([nd], 'model')[0] == model]
nest.Connect(recorders[name][0], tgts) # one recorder to all targets
# ! Example simulation
# ! ====================
# ! This simulation is set up to create a step-wise visualization of
# ! the membrane potential. To do so, we simulate ``sim_interval``
# ! milliseconds at a time, then read out data from the multimeters,
# ! clear data from the multimeters and plot the data as pseudocolor
# ! plots.
# ! show time during simulation
nest.SetStatus([0], {'print_time': True})
# ! lower and upper limits for color scale, for each of the four
# ! populations recorded.
vmn = [-80, -80, -80, -80]
vmx = [-50, -50, -50, -50]
nest.Simulate(Params['sim_interval'])
# ! loop over simulation intervals
for t in pylab.arange(Params['sim_interval'], Params['simtime'],
Params['sim_interval']):
# do the simulation
nest.Simulate(Params['sim_interval'])
# clear figure and choose colormap
pylab.clf()
pylab.jet()
# now plot data from each recorder in turn, assume four recorders
for name, r in recorders.items():
rec = r[0]
sp = r[1]
pylab.subplot(2, 2, sp)
d = nest.GetStatus(rec)[0]['events']['V_m']
if len(d) != Params['N'] ** 2:
# cortical layer with two neurons in each location, take average
d = 0.5 * (d[::2] + d[1::2])
# clear data from multimeter
nest.SetStatus(rec, {'n_events': 0})
pylab.imshow(pylab.reshape(d, (Params['N'], Params['N'])),
aspect='equal', interpolation='nearest',
extent=(0, Params['N'] + 1, 0, Params['N'] + 1),
vmin=vmn[sp - 1], vmax=vmx[sp - 1])
pylab.colorbar()
pylab.title(name + ', t = %6.1f ms' % nest.GetKernelStatus()['time'])
pylab.draw() # force drawing inside loop
pylab.show() # required by ``pyreport``
# ! just for some information at the end
print(nest.GetKernelStatus())
|
terhorstd/nest-simulator
|
topology/examples/hill_tononi_Vp.py
|
Python
|
gpl-2.0
| 36,473
|
[
"Gaussian",
"NEURON"
] |
5647b510648c075d83c41b95e31bef2c9d4ebdeb8a3cf63706d5d84fb2026fab
|
# Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
from setuptools import setup
def get_static_files(path):
return [os.path.join(dirpath.replace("luigi/", ""), ext)
for (dirpath, dirnames, filenames) in os.walk(path)
for ext in ["*.html", "*.js", "*.css", "*.png",
"*.eot", "*.svg", "*.ttf", "*.woff", "*.woff2"]]
luigi_package_data = sum(map(get_static_files, ["luigi/static", "luigi/templates"]), [])
readme_note = """\
.. note::
For the latest source, discussion, etc, please visit the
`GitHub repository <https://github.com/spotify/luigi>`_\n\n
"""
with open('README.rst') as fobj:
long_description = readme_note + fobj.read()
install_requires = [
'tornado>=4.0,<5',
'python-daemon<3.0',
]
if os.environ.get('READTHEDOCS', None) == 'True':
# So that we can build documentation for luigi.db_task_history and luigi.contrib.sqla
install_requires.append('sqlalchemy')
# readthedocs don't like python-daemon, see #1342
install_requires.remove('python-daemon<3.0')
setup(
name='luigi',
version='2.2.0',
description='Workflow mgmgt + task scheduling + dependency resolution',
long_description=long_description,
author='Erik Bernhardsson',
url='https://github.com/spotify/luigi',
license='Apache License 2.0',
packages=[
'luigi',
'luigi.contrib',
'luigi.contrib.hdfs',
'luigi.tools'
],
package_data={
'luigi': luigi_package_data
},
entry_points={
'console_scripts': [
'luigi = luigi.cmdline:luigi_run',
'luigid = luigi.cmdline:luigid',
'luigi-grep = luigi.tools.luigi_grep:main',
'luigi-deps = luigi.tools.deps:main',
'luigi-deps-tree = luigi.tools.deps_tree:main',
'luigi-migrate = luigi.tools.migrate:main'
]
},
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: System :: Monitoring',
],
)
|
samepage-labs/luigi
|
setup.py
|
Python
|
apache-2.0
| 3,014
|
[
"VisIt"
] |
1a3afc2d08e3be2e840819d4cb4fe159d1912a155469324a7b724292bf0693d0
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import netaddr
import testtools
from neutron.agent.common import utils # noqa
from neutron.agent.linux import ip_lib
from neutron.common import exceptions
from neutron.tests import base
NETNS_SAMPLE = [
'12345678-1234-5678-abcd-1234567890ab',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'cccccccc-cccc-cccc-cccc-cccccccccccc']
LINK_SAMPLE = [
'1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN \\'
'link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 promiscuity 0',
'2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP '
'qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff'
'\ alias openvswitch',
'3: br-int: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN '
'\ link/ether aa:bb:cc:dd:ee:ff brd ff:ff:ff:ff:ff:ff promiscuity 0',
'4: gw-ddc717df-49: <BROADCAST,MULTICAST> mtu 1500 qdisc noop '
'state DOWN \ link/ether fe:dc:ba:fe:dc:ba brd ff:ff:ff:ff:ff:ff '
'promiscuity 0',
'5: foo:foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state '
'UP qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff '
'promiscuity 0',
'6: foo@foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state '
'UP qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff '
'promiscuity 0',
'7: foo:foo@foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0',
'8: foo@foo:foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0',
'9: bar.9@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc '
' noqueue master brq0b24798c-07 state UP mode DEFAULT'
'\ link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1q id 9 <REORDER_HDR>',
'10: bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc '
' noqueue master brq0b24798c-07 state UP mode DEFAULT'
'\ link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1Q id 10 <REORDER_HDR>',
'11: bar:bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan id 11 <REORDER_HDR>',
'12: bar@bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan id 12 <REORDER_HDR>',
'13: bar:bar@bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 '
'qdisc mq state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1q id 13 <REORDER_HDR>',
'14: bar@bar:bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 '
'qdisc mq state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1Q id 14 <REORDER_HDR>']
ADDR_SAMPLE = ("""
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
inet 172.16.77.240/24 brd 172.16.77.255 scope global eth0
inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::3023:39ff:febc:22ae/64 scope link tentative
valid_lft forever preferred_lft forever
inet6 fe80::3023:39ff:febc:22af/64 scope link tentative dadfailed
valid_lft forever preferred_lft forever
inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
valid_lft forever preferred_lft forever
""")
ADDR_SAMPLE2 = ("""
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
inet 172.16.77.240/24 scope global eth0
inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::3023:39ff:febc:22ae/64 scope link tentative
valid_lft forever preferred_lft forever
inet6 fe80::3023:39ff:febc:22af/64 scope link tentative dadfailed
valid_lft forever preferred_lft forever
inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
valid_lft forever preferred_lft forever
""")
GATEWAY_SAMPLE1 = ("""
default via 10.35.19.254 metric 100
10.35.16.0/22 proto kernel scope link src 10.35.17.97
""")
GATEWAY_SAMPLE2 = ("""
default via 10.35.19.254 metric 100
""")
GATEWAY_SAMPLE3 = ("""
10.35.16.0/22 proto kernel scope link src 10.35.17.97
""")
GATEWAY_SAMPLE4 = ("""
default via 10.35.19.254
""")
GATEWAY_SAMPLE5 = ("""
default via 192.168.99.1 proto static
""")
GATEWAY_SAMPLE6 = ("""
default via 192.168.99.1 proto static metric 100
""")
GATEWAY_SAMPLE7 = ("""
default dev qg-31cd36 metric 1
""")
IPv6_GATEWAY_SAMPLE1 = ("""
default via 2001:470:9:1224:4508:b885:5fb:740b metric 100
2001:db8::/64 proto kernel scope link src 2001:470:9:1224:dfcc:aaff:feb9:76ce
""")
IPv6_GATEWAY_SAMPLE2 = ("""
default via 2001:470:9:1224:4508:b885:5fb:740b metric 100
""")
IPv6_GATEWAY_SAMPLE3 = ("""
2001:db8::/64 proto kernel scope link src 2001:470:9:1224:dfcc:aaff:feb9:76ce
""")
IPv6_GATEWAY_SAMPLE4 = ("""
default via fe80::dfcc:aaff:feb9:76ce
""")
IPv6_GATEWAY_SAMPLE5 = ("""
default via 2001:470:9:1224:4508:b885:5fb:740b metric 1024
""")
DEVICE_ROUTE_SAMPLE = ("10.0.0.0/24 scope link src 10.0.0.2")
SUBNET_SAMPLE1 = ("10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1\n"
"10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2")
SUBNET_SAMPLE2 = ("10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2\n"
"10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1")
RULE_V4_SAMPLE = ("""
0: from all lookup local
32766: from all lookup main
32767: from all lookup default
101: from 192.168.45.100 lookup 2
""")
RULE_V6_SAMPLE = ("""
0: from all lookup local
32766: from all lookup main
32767: from all lookup default
201: from 2001:db8::1 lookup 3
""")
class TestSubProcessBase(base.BaseTestCase):
def setUp(self):
super(TestSubProcessBase, self).setUp()
self.execute_p = mock.patch('neutron.agent.common.utils.execute')
self.execute = self.execute_p.start()
def test_execute_wrapper(self):
ip_lib.SubProcessBase._execute(['o'], 'link', ('list',),
run_as_root=True)
self.execute.assert_called_once_with(['ip', '-o', 'link', 'list'],
run_as_root=True,
log_fail_as_error=True)
def test_execute_wrapper_int_options(self):
ip_lib.SubProcessBase._execute([4], 'link', ('list',))
self.execute.assert_called_once_with(['ip', '-4', 'link', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_execute_wrapper_no_options(self):
ip_lib.SubProcessBase._execute([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'link', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_run_no_namespace(self):
base = ip_lib.SubProcessBase()
base._run([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'link', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_run_namespace(self):
base = ip_lib.SubProcessBase(namespace='ns')
base._run([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
'ip', 'link', 'list'],
run_as_root=True,
log_fail_as_error=True)
def test_as_root_namespace(self):
base = ip_lib.SubProcessBase(namespace='ns')
base._as_root([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
'ip', 'link', 'list'],
run_as_root=True,
log_fail_as_error=True)
class TestIpWrapper(base.BaseTestCase):
def setUp(self):
super(TestIpWrapper, self).setUp()
self.execute_p = mock.patch.object(ip_lib.IPWrapper, '_execute')
self.execute = self.execute_p.start()
@mock.patch('os.path.islink')
@mock.patch('os.listdir', return_value=['lo'])
def test_get_devices(self, mocked_listdir, mocked_islink):
retval = ip_lib.IPWrapper().get_devices()
mocked_islink.assert_called_once_with('/sys/class/net/lo')
self.assertEqual(retval, [ip_lib.IPDevice('lo')])
@mock.patch('neutron.agent.common.utils.execute')
def test_get_devices_namespaces(self, mocked_execute):
fake_str = mock.Mock()
fake_str.split.return_value = ['lo']
mocked_execute.return_value = fake_str
retval = ip_lib.IPWrapper(namespace='foo').get_devices()
mocked_execute.assert_called_once_with(
['ip', 'netns', 'exec', 'foo', 'find', '/sys/class/net',
'-maxdepth', '1', '-type', 'l', '-printf', '%f '],
run_as_root=True, log_fail_as_error=True)
self.assertTrue(fake_str.split.called)
self.assertEqual(retval, [ip_lib.IPDevice('lo', namespace='foo')])
def test_get_namespaces(self):
self.execute.return_value = '\n'.join(NETNS_SAMPLE)
retval = ip_lib.IPWrapper.get_namespaces()
self.assertEqual(retval,
['12345678-1234-5678-abcd-1234567890ab',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'cccccccc-cccc-cccc-cccc-cccccccccccc'])
self.execute.assert_called_once_with([], 'netns', ('list',))
def test_add_tuntap(self):
ip_lib.IPWrapper().add_tuntap('tap0')
self.execute.assert_called_once_with([], 'tuntap',
('add', 'tap0', 'mode', 'tap'),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_add_veth(self):
ip_lib.IPWrapper().add_veth('tap0', 'tap1')
self.execute.assert_called_once_with([], 'link',
('add', 'tap0', 'type', 'veth',
'peer', 'name', 'tap1'),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_del_veth(self):
ip_lib.IPWrapper().del_veth('fpr-1234')
self.execute.assert_called_once_with([], 'link',
('del', 'fpr-1234'),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_add_veth_with_namespaces(self):
ns2 = 'ns2'
with mock.patch.object(ip_lib.IPWrapper, 'ensure_namespace') as en:
ip_lib.IPWrapper().add_veth('tap0', 'tap1', namespace2=ns2)
en.assert_has_calls([mock.call(ns2)])
self.execute.assert_called_once_with([], 'link',
('add', 'tap0', 'type', 'veth',
'peer', 'name', 'tap1',
'netns', ns2),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_add_dummy(self):
ip_lib.IPWrapper().add_dummy('dummy0')
self.execute.assert_called_once_with([], 'link',
('add', 'dummy0',
'type', 'dummy'),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_get_device(self):
dev = ip_lib.IPWrapper(namespace='ns').device('eth0')
self.assertEqual(dev.namespace, 'ns')
self.assertEqual(dev.name, 'eth0')
def test_ensure_namespace(self):
with mock.patch.object(ip_lib, 'IPDevice') as ip_dev:
ip = ip_lib.IPWrapper()
with mock.patch.object(ip.netns, 'exists') as ns_exists:
with mock.patch('neutron.agent.common.utils.execute'):
ns_exists.return_value = False
ip.ensure_namespace('ns')
self.execute.assert_has_calls(
[mock.call([], 'netns', ('add', 'ns'),
run_as_root=True, namespace=None,
log_fail_as_error=True)])
ip_dev.assert_has_calls([mock.call('lo', namespace='ns'),
mock.call().link.set_up()])
def test_ensure_namespace_existing(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd:
ip_ns_cmd.exists.return_value = True
ns = ip_lib.IPWrapper().ensure_namespace('ns')
self.assertFalse(self.execute.called)
self.assertEqual(ns.namespace, 'ns')
def test_namespace_is_empty_no_devices(self):
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'get_devices') as get_devices:
get_devices.return_value = []
self.assertTrue(ip.namespace_is_empty())
get_devices.assert_called_once_with(exclude_loopback=True)
def test_namespace_is_empty(self):
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'get_devices') as get_devices:
get_devices.return_value = [mock.Mock()]
self.assertFalse(ip.namespace_is_empty())
get_devices.assert_called_once_with(exclude_loopback=True)
def test_garbage_collect_namespace_does_not_exist(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = False
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
self.assertFalse(ip.garbage_collect_namespace())
ip_ns_cmd_cls.assert_has_calls([mock.call().exists('ns')])
self.assertNotIn(mock.call().delete('ns'),
ip_ns_cmd_cls.return_value.mock_calls)
self.assertEqual(mock_is_empty.mock_calls, [])
def test_garbage_collect_namespace_existing_empty_ns(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = True
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
mock_is_empty.return_value = True
self.assertTrue(ip.garbage_collect_namespace())
mock_is_empty.assert_called_once_with()
expected = [mock.call().exists('ns'),
mock.call().delete('ns')]
ip_ns_cmd_cls.assert_has_calls(expected)
def test_garbage_collect_namespace_existing_not_empty(self):
lo_device = mock.Mock()
lo_device.name = 'lo'
tap_device = mock.Mock()
tap_device.name = 'tap1'
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = True
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
mock_is_empty.return_value = False
self.assertFalse(ip.garbage_collect_namespace())
mock_is_empty.assert_called_once_with()
expected = [mock.call(ip),
mock.call().exists('ns')]
self.assertEqual(ip_ns_cmd_cls.mock_calls, expected)
self.assertNotIn(mock.call().delete('ns'),
ip_ns_cmd_cls.mock_calls)
def test_add_vxlan_valid_port_length(self):
retval = ip_lib.IPWrapper().add_vxlan('vxlan0', 'vni0',
group='group0',
dev='dev0', ttl='ttl0',
tos='tos0',
local='local0', proxy=True,
port=('1', '2'))
self.assertIsInstance(retval, ip_lib.IPDevice)
self.assertEqual(retval.name, 'vxlan0')
self.execute.assert_called_once_with([], 'link',
['add', 'vxlan0', 'type',
'vxlan', 'id', 'vni0', 'group',
'group0', 'dev', 'dev0',
'ttl', 'ttl0', 'tos', 'tos0',
'local', 'local0', 'proxy',
'port', '1', '2'],
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_add_vxlan_invalid_port_length(self):
wrapper = ip_lib.IPWrapper()
self.assertRaises(exceptions.NetworkVxlanPortRangeError,
wrapper.add_vxlan, 'vxlan0', 'vni0', group='group0',
dev='dev0', ttl='ttl0', tos='tos0',
local='local0', proxy=True,
port=('1', '2', '3'))
def test_add_device_to_namespace(self):
dev = mock.Mock()
ip_lib.IPWrapper(namespace='ns').add_device_to_namespace(dev)
dev.assert_has_calls([mock.call.link.set_netns('ns')])
def test_add_device_to_namespace_is_none(self):
dev = mock.Mock()
ip_lib.IPWrapper().add_device_to_namespace(dev)
self.assertEqual(dev.mock_calls, [])
class TestIPDevice(base.BaseTestCase):
def test_eq_same_name(self):
dev1 = ip_lib.IPDevice('tap0')
dev2 = ip_lib.IPDevice('tap0')
self.assertEqual(dev1, dev2)
def test_eq_diff_name(self):
dev1 = ip_lib.IPDevice('tap0')
dev2 = ip_lib.IPDevice('tap1')
self.assertNotEqual(dev1, dev2)
def test_eq_same_namespace(self):
dev1 = ip_lib.IPDevice('tap0', 'ns1')
dev2 = ip_lib.IPDevice('tap0', 'ns1')
self.assertEqual(dev1, dev2)
def test_eq_diff_namespace(self):
dev1 = ip_lib.IPDevice('tap0', namespace='ns1')
dev2 = ip_lib.IPDevice('tap0', namespace='ns2')
self.assertNotEqual(dev1, dev2)
def test_eq_other_is_none(self):
dev1 = ip_lib.IPDevice('tap0', namespace='ns1')
self.assertIsNotNone(dev1)
def test_str(self):
self.assertEqual(str(ip_lib.IPDevice('tap0')), 'tap0')
class TestIPCommandBase(base.BaseTestCase):
def setUp(self):
super(TestIPCommandBase, self).setUp()
self.ip = mock.Mock()
self.ip.namespace = 'namespace'
self.ip_cmd = ip_lib.IpCommandBase(self.ip)
self.ip_cmd.COMMAND = 'foo'
def test_run(self):
self.ip_cmd._run([], ('link', 'show'))
self.ip.assert_has_calls([mock.call._run([], 'foo', ('link', 'show'))])
def test_run_with_options(self):
self.ip_cmd._run(['o'], ('link'))
self.ip.assert_has_calls([mock.call._run(['o'], 'foo', ('link'))])
def test_as_root_namespace_false(self):
self.ip_cmd._as_root([], ('link'))
self.ip.assert_has_calls(
[mock.call._as_root([],
'foo',
('link'),
use_root_namespace=False)])
def test_as_root_namespace_true(self):
self.ip_cmd._as_root([], ('link'), use_root_namespace=True)
self.ip.assert_has_calls(
[mock.call._as_root([],
'foo',
('link'),
use_root_namespace=True)])
def test_as_root_namespace_true_with_options(self):
self.ip_cmd._as_root('o', 'link', use_root_namespace=True)
self.ip.assert_has_calls(
[mock.call._as_root('o',
'foo',
('link'),
use_root_namespace=True)])
class TestIPDeviceCommandBase(base.BaseTestCase):
def setUp(self):
super(TestIPDeviceCommandBase, self).setUp()
self.ip_dev = mock.Mock()
self.ip_dev.name = 'eth0'
self.ip_dev._execute = mock.Mock(return_value='executed')
self.ip_cmd = ip_lib.IpDeviceCommandBase(self.ip_dev)
self.ip_cmd.COMMAND = 'foo'
def test_name_property(self):
self.assertEqual(self.ip_cmd.name, 'eth0')
class TestIPCmdBase(base.BaseTestCase):
def setUp(self):
super(TestIPCmdBase, self).setUp()
self.parent = mock.Mock()
self.parent.name = 'eth0'
def _assert_call(self, options, args):
self.parent.assert_has_calls([
mock.call._run(options, self.command, args)])
def _assert_sudo(self, options, args, use_root_namespace=False):
self.parent.assert_has_calls(
[mock.call._as_root(options, self.command, args,
use_root_namespace=use_root_namespace)])
class TestIpRuleCommand(TestIPCmdBase):
def setUp(self):
super(TestIpRuleCommand, self).setUp()
self.parent._as_root.return_value = ''
self.command = 'rule'
self.rule_cmd = ip_lib.IpRuleCommand(self.parent)
def _test_add_rule(self, ip, table, priority):
ip_version = netaddr.IPNetwork(ip).version
self.rule_cmd.add(ip, table=table, priority=priority)
self._assert_sudo([ip_version], (['show']))
self._assert_sudo([ip_version], ('add', 'from', ip,
'priority', priority, 'table', table))
def _test_add_rule_exists(self, ip, table, priority, output):
self.parent._as_root.return_value = output
ip_version = netaddr.IPNetwork(ip).version
self.rule_cmd.add(ip, table=table, priority=priority)
self._assert_sudo([ip_version], (['show']))
def _test_delete_rule(self, ip, table, priority):
ip_version = netaddr.IPNetwork(ip).version
self.rule_cmd.delete(ip, table=table, priority=priority)
self._assert_sudo([ip_version],
('del', 'priority', priority,
'table', table))
def test__parse_line(self):
def test(ip_version, line, expected):
actual = self.rule_cmd._parse_line(ip_version, line)
self.assertEqual(expected, actual)
test(4, "4030201:\tfrom 1.2.3.4/24 lookup 10203040",
{'from': '1.2.3.4/24',
'table': '10203040',
'priority': '4030201'})
test(6, "1024: from all iif qg-c43b1928-48 lookup noscope",
{'priority': '1024',
'from': '::/0',
'iif': 'qg-c43b1928-48',
'table': 'noscope'})
def test_add_rule_v4(self):
self._test_add_rule('192.168.45.100', 2, 100)
def test_add_rule_v4_exists(self):
self._test_add_rule_exists('192.168.45.100', 2, 101, RULE_V4_SAMPLE)
def test_add_rule_v6(self):
self._test_add_rule('2001:db8::1', 3, 200)
def test_add_rule_v6_exists(self):
self._test_add_rule_exists('2001:db8::1', 3, 201, RULE_V6_SAMPLE)
def test_delete_rule_v4(self):
self._test_delete_rule('192.168.45.100', 2, 100)
def test_delete_rule_v6(self):
self._test_delete_rule('2001:db8::1', 3, 200)
class TestIpLinkCommand(TestIPCmdBase):
def setUp(self):
super(TestIpLinkCommand, self).setUp()
self.parent._run.return_value = LINK_SAMPLE[1]
self.command = 'link'
self.link_cmd = ip_lib.IpLinkCommand(self.parent)
def test_set_address(self):
self.link_cmd.set_address('aa:bb:cc:dd:ee:ff')
self._assert_sudo([], ('set', 'eth0', 'address', 'aa:bb:cc:dd:ee:ff'))
def test_set_mtu(self):
self.link_cmd.set_mtu(1500)
self._assert_sudo([], ('set', 'eth0', 'mtu', 1500))
def test_set_up(self):
observed = self.link_cmd.set_up()
self.assertEqual(self.parent._as_root.return_value, observed)
self._assert_sudo([], ('set', 'eth0', 'up'))
def test_set_down(self):
observed = self.link_cmd.set_down()
self.assertEqual(self.parent._as_root.return_value, observed)
self._assert_sudo([], ('set', 'eth0', 'down'))
def test_set_netns(self):
self.link_cmd.set_netns('foo')
self._assert_sudo([], ('set', 'eth0', 'netns', 'foo'))
self.assertEqual(self.parent.namespace, 'foo')
def test_set_name(self):
self.link_cmd.set_name('tap1')
self._assert_sudo([], ('set', 'eth0', 'name', 'tap1'))
self.assertEqual(self.parent.name, 'tap1')
def test_set_alias(self):
self.link_cmd.set_alias('openvswitch')
self._assert_sudo([], ('set', 'eth0', 'alias', 'openvswitch'))
def test_delete(self):
self.link_cmd.delete()
self._assert_sudo([], ('delete', 'eth0'))
def test_address_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.address, 'cc:dd:ee:ff:ab:cd')
def test_mtu_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.mtu, 1500)
def test_qdisc_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.qdisc, 'mq')
def test_qlen_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.qlen, 1000)
def test_alias_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.alias, 'openvswitch')
def test_state_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.state, 'UP')
def test_settings_property(self):
expected = {'mtu': 1500,
'qlen': 1000,
'state': 'UP',
'qdisc': 'mq',
'brd': 'ff:ff:ff:ff:ff:ff',
'link/ether': 'cc:dd:ee:ff:ab:cd',
'alias': 'openvswitch'}
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.attributes, expected)
self._assert_call(['o'], ('show', 'eth0'))
class TestIpAddrCommand(TestIPCmdBase):
def setUp(self):
super(TestIpAddrCommand, self).setUp()
self.parent.name = 'tap0'
self.command = 'addr'
self.addr_cmd = ip_lib.IpAddrCommand(self.parent)
def test_add_address(self):
self.addr_cmd.add('192.168.45.100/24')
self._assert_sudo([4],
('add', '192.168.45.100/24',
'scope', 'global',
'dev', 'tap0',
'brd', '192.168.45.255'))
def test_add_address_scoped(self):
self.addr_cmd.add('192.168.45.100/24', scope='link')
self._assert_sudo([4],
('add', '192.168.45.100/24',
'scope', 'link',
'dev', 'tap0',
'brd', '192.168.45.255'))
def test_del_address(self):
self.addr_cmd.delete('192.168.45.100/24')
self._assert_sudo([4],
('del', '192.168.45.100/24', 'dev', 'tap0'))
def test_flush(self):
self.addr_cmd.flush(6)
self._assert_sudo([6], ('flush', 'tap0'))
def test_list(self):
expected = [
dict(scope='global', dadfailed=False, tentative=False,
dynamic=False, cidr='172.16.77.240/24'),
dict(scope='global', dadfailed=False, tentative=False,
dynamic=True, cidr='2001:470:9:1224:5595:dd51:6ba2:e788/64'),
dict(scope='link', dadfailed=False, tentative=True,
dynamic=False, cidr='fe80::3023:39ff:febc:22ae/64'),
dict(scope='link', dadfailed=True, tentative=True,
dynamic=False, cidr='fe80::3023:39ff:febc:22af/64'),
dict(scope='global', dadfailed=False, tentative=False,
dynamic=True, cidr='2001:470:9:1224:fd91:272:581e:3a32/64'),
dict(scope='global', dadfailed=False, tentative=False,
dynamic=True, cidr='2001:470:9:1224:4508:b885:5fb:740b/64'),
dict(scope='global', dadfailed=False, tentative=False,
dynamic=True, cidr='2001:470:9:1224:dfcc:aaff:feb9:76ce/64'),
dict(scope='link', dadfailed=False, tentative=False,
dynamic=False, cidr='fe80::dfcc:aaff:feb9:76ce/64')]
test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
for test_case in test_cases:
self.parent._run = mock.Mock(return_value=test_case)
self.assertEqual(expected, self.addr_cmd.list())
self._assert_call([], ('show', 'tap0'))
def test_wait_until_address_ready(self):
self.parent._run.return_value = ADDR_SAMPLE
# this address is not tentative or failed so it should return
self.assertIsNone(self.addr_cmd.wait_until_address_ready(
'2001:470:9:1224:fd91:272:581e:3a32'))
def test_wait_until_address_ready_non_existent_address(self):
self.addr_cmd.list = mock.Mock(return_value=[])
with testtools.ExpectedException(ip_lib.AddressNotReady):
self.addr_cmd.wait_until_address_ready('abcd::1234')
def test_wait_until_address_ready_timeout(self):
tentative_address = 'fe80::3023:39ff:febc:22ae'
self.addr_cmd.list = mock.Mock(return_value=[
dict(scope='link', dadfailed=False, tentative=True, dynamic=False,
cidr=tentative_address + '/64')])
with testtools.ExpectedException(ip_lib.AddressNotReady):
self.addr_cmd.wait_until_address_ready(tentative_address,
wait_time=1)
def test_list_filtered(self):
expected = [
dict(scope='global', tentative=False, dadfailed=False,
dynamic=False, cidr='172.16.77.240/24')]
test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
for test_case in test_cases:
output = '\n'.join(test_case.split('\n')[0:4])
self.parent._run.return_value = output
self.assertEqual(self.addr_cmd.list('global',
filters=['permanent']), expected)
self._assert_call([], ('show', 'tap0', 'permanent', 'scope',
'global'))
class TestIpRouteCommand(TestIPCmdBase):
def setUp(self):
super(TestIpRouteCommand, self).setUp()
self.parent.name = 'eth0'
self.command = 'route'
self.route_cmd = ip_lib.IpRouteCommand(self.parent)
self.ip_version = 4
self.table = 14
self.metric = 100
self.cidr = '192.168.45.100/24'
self.ip = '10.0.0.1'
self.gateway = '192.168.45.100'
self.test_cases = [{'sample': GATEWAY_SAMPLE1,
'expected': {'gateway': '10.35.19.254',
'metric': 100}},
{'sample': GATEWAY_SAMPLE2,
'expected': {'gateway': '10.35.19.254',
'metric': 100}},
{'sample': GATEWAY_SAMPLE3,
'expected': None},
{'sample': GATEWAY_SAMPLE4,
'expected': {'gateway': '10.35.19.254'}},
{'sample': GATEWAY_SAMPLE5,
'expected': {'gateway': '192.168.99.1'}},
{'sample': GATEWAY_SAMPLE6,
'expected': {'gateway': '192.168.99.1',
'metric': 100}},
{'sample': GATEWAY_SAMPLE7,
'expected': {'metric': 1}}]
def test_add_gateway(self):
self.route_cmd.add_gateway(self.gateway, self.metric, self.table)
self._assert_sudo([self.ip_version],
('replace', 'default',
'via', self.gateway,
'metric', self.metric,
'dev', self.parent.name,
'table', self.table))
def test_add_gateway_subtable(self):
self.route_cmd.table(self.table).add_gateway(self.gateway, self.metric)
self._assert_sudo([self.ip_version],
('replace', 'default',
'via', self.gateway,
'metric', self.metric,
'dev', self.parent.name,
'table', self.table))
def test_del_gateway_success(self):
self.route_cmd.delete_gateway(self.gateway, table=self.table)
self._assert_sudo([self.ip_version],
('del', 'default',
'via', self.gateway,
'dev', self.parent.name,
'table', self.table))
def test_del_gateway_success_subtable(self):
self.route_cmd.table(table=self.table).delete_gateway(self.gateway)
self._assert_sudo([self.ip_version],
('del', 'default',
'via', self.gateway,
'dev', self.parent.name,
'table', self.table))
def test_del_gateway_cannot_find_device(self):
self.parent._as_root.side_effect = RuntimeError("Cannot find device")
exc = self.assertRaises(exceptions.DeviceNotFoundError,
self.route_cmd.delete_gateway,
self.gateway, table=self.table)
self.assertIn(self.parent.name, str(exc))
def test_del_gateway_other_error(self):
self.parent._as_root.side_effect = RuntimeError()
self.assertRaises(RuntimeError, self.route_cmd.delete_gateway,
self.gateway, table=self.table)
def test_get_gateway(self):
for test_case in self.test_cases:
self.parent._run = mock.Mock(return_value=test_case['sample'])
self.assertEqual(self.route_cmd.get_gateway(),
test_case['expected'])
def test_pullup_route(self):
# NOTE(brian-haley) Currently we do not have any IPv6-specific usecase
# for pullup_route, hence skipping. Revisit, if required, in future.
if self.ip_version == 6:
return
# interface is not the first in the list - requires
# deleting and creating existing entries
output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE1]
def pullup_side_effect(self, *args):
result = output.pop(0)
return result
self.parent._run = mock.Mock(side_effect=pullup_side_effect)
self.route_cmd.pullup_route('tap1d7888a7-10', ip_version=4)
self._assert_sudo([4], ('del', '10.0.0.0/24', 'dev', 'qr-23380d11-d2'))
self._assert_sudo([4], ('append', '10.0.0.0/24', 'proto', 'kernel',
'src', '10.0.0.1', 'dev', 'qr-23380d11-d2'))
def test_pullup_route_first(self):
# NOTE(brian-haley) Currently we do not have any IPv6-specific usecase
# for pullup_route, hence skipping. Revisit, if required, in future.
if self.ip_version == 6:
return
# interface is first in the list - no changes
output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE2]
def pullup_side_effect(self, *args):
result = output.pop(0)
return result
self.parent._run = mock.Mock(side_effect=pullup_side_effect)
self.route_cmd.pullup_route('tap1d7888a7-10', ip_version=4)
# Check two calls - device get and subnet get
self.assertEqual(len(self.parent._run.mock_calls), 2)
def test_add_route(self):
self.route_cmd.add_route(self.cidr, self.ip, self.table)
self._assert_sudo([self.ip_version],
('replace', self.cidr,
'via', self.ip,
'dev', self.parent.name,
'table', self.table))
def test_delete_route(self):
self.route_cmd.delete_route(self.cidr, self.ip, self.table)
self._assert_sudo([self.ip_version],
('del', self.cidr,
'via', self.ip,
'dev', self.parent.name,
'table', self.table))
def test_list_onlink_routes_subtable(self):
self.parent._run.return_value = (
"10.0.0.0/22\n"
"172.24.4.0/24 proto kernel src 172.24.4.2\n")
routes = self.route_cmd.table(self.table).list_onlink_routes(
self.ip_version)
self.assertEqual(['10.0.0.0/22'], routes)
self._assert_call([self.ip_version],
('list', 'dev', self.parent.name, 'scope', 'link',
'table', self.table))
def test_add_onlink_route_subtable(self):
self.route_cmd.table(self.table).add_onlink_route(self.cidr)
self._assert_sudo([self.ip_version],
('replace', self.cidr,
'dev', self.parent.name,
'scope', 'link',
'table', self.table))
def test_delete_onlink_route_subtable(self):
self.route_cmd.table(self.table).delete_onlink_route(self.cidr)
self._assert_sudo([self.ip_version],
('del', self.cidr,
'dev', self.parent.name,
'scope', 'link',
'table', self.table))
class TestIPv6IpRouteCommand(TestIpRouteCommand):
def setUp(self):
super(TestIPv6IpRouteCommand, self).setUp()
self.ip_version = 6
self.cidr = '2001:db8::/64'
self.ip = '2001:db8::100'
self.gateway = '2001:db8::1'
self.test_cases = [{'sample': IPv6_GATEWAY_SAMPLE1,
'expected':
{'gateway': '2001:470:9:1224:4508:b885:5fb:740b',
'metric': 100}},
{'sample': IPv6_GATEWAY_SAMPLE2,
'expected':
{'gateway': '2001:470:9:1224:4508:b885:5fb:740b',
'metric': 100}},
{'sample': IPv6_GATEWAY_SAMPLE3,
'expected': None},
{'sample': IPv6_GATEWAY_SAMPLE4,
'expected':
{'gateway': 'fe80::dfcc:aaff:feb9:76ce'}},
{'sample': IPv6_GATEWAY_SAMPLE5,
'expected':
{'gateway': '2001:470:9:1224:4508:b885:5fb:740b',
'metric': 1024}}]
class TestIpNetnsCommand(TestIPCmdBase):
def setUp(self):
super(TestIpNetnsCommand, self).setUp()
self.command = 'netns'
self.netns_cmd = ip_lib.IpNetnsCommand(self.parent)
def test_add_namespace(self):
with mock.patch('neutron.agent.common.utils.execute') as execute:
ns = self.netns_cmd.add('ns')
self._assert_sudo([], ('add', 'ns'), use_root_namespace=True)
self.assertEqual(ns.namespace, 'ns')
execute.assert_called_once_with(
['ip', 'netns', 'exec', 'ns',
'sysctl', '-w', 'net.ipv4.conf.all.promote_secondaries=1'],
run_as_root=True, check_exit_code=True, extra_ok_codes=None)
def test_delete_namespace(self):
with mock.patch('neutron.agent.common.utils.execute'):
self.netns_cmd.delete('ns')
self._assert_sudo([], ('delete', 'ns'), use_root_namespace=True)
def test_namespace_exists_use_helper(self):
self.config(group='AGENT', use_helper_for_ns_read=True)
retval = '\n'.join(NETNS_SAMPLE)
# need another instance to avoid mocking
netns_cmd = ip_lib.IpNetnsCommand(ip_lib.SubProcessBase())
with mock.patch('neutron.agent.common.utils.execute') as execute:
execute.return_value = retval
self.assertTrue(
netns_cmd.exists('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'))
execute.assert_called_once_with(['ip', '-o', 'netns', 'list'],
run_as_root=True,
log_fail_as_error=True)
def test_namespace_doest_not_exist_no_helper(self):
self.config(group='AGENT', use_helper_for_ns_read=False)
retval = '\n'.join(NETNS_SAMPLE)
# need another instance to avoid mocking
netns_cmd = ip_lib.IpNetnsCommand(ip_lib.SubProcessBase())
with mock.patch('neutron.agent.common.utils.execute') as execute:
execute.return_value = retval
self.assertFalse(
netns_cmd.exists('bbbbbbbb-1111-2222-3333-bbbbbbbbbbbb'))
execute.assert_called_once_with(['ip', '-o', 'netns', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_execute(self):
self.parent.namespace = 'ns'
with mock.patch('neutron.agent.common.utils.execute') as execute:
self.netns_cmd.execute(['ip', 'link', 'list'])
execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip',
'link', 'list'],
run_as_root=True,
check_exit_code=True,
extra_ok_codes=None)
def test_execute_env_var_prepend(self):
self.parent.namespace = 'ns'
with mock.patch('neutron.agent.common.utils.execute') as execute:
env = dict(FOO=1, BAR=2)
self.netns_cmd.execute(['ip', 'link', 'list'], env)
execute.assert_called_once_with(
['ip', 'netns', 'exec', 'ns', 'env'] +
['%s=%s' % (k, v) for k, v in env.items()] +
['ip', 'link', 'list'],
run_as_root=True, check_exit_code=True, extra_ok_codes=None)
def test_execute_nosudo_with_no_namespace(self):
with mock.patch('neutron.agent.common.utils.execute') as execute:
self.parent.namespace = None
self.netns_cmd.execute(['test'])
execute.assert_called_once_with(['test'],
check_exit_code=True,
extra_ok_codes=None,
run_as_root=False)
class TestDeviceExists(base.BaseTestCase):
def test_device_exists(self):
with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
_execute.return_value = LINK_SAMPLE[1]
self.assertTrue(ip_lib.device_exists('eth0'))
_execute.assert_called_once_with(['o'], 'link', ('show', 'eth0'),
log_fail_as_error=False)
def test_device_does_not_exist(self):
with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
_execute.return_value = ''
_execute.side_effect = RuntimeError
self.assertFalse(ip_lib.device_exists('eth0'))
def test_ensure_device_is_ready(self):
ip_lib_mock = mock.Mock()
with mock.patch.object(ip_lib, 'IPDevice', return_value=ip_lib_mock):
self.assertTrue(ip_lib.ensure_device_is_ready("eth0"))
self.assertTrue(ip_lib_mock.link.set_up.called)
ip_lib_mock.reset_mock()
# device doesn't exists
ip_lib_mock.link.set_up.side_effect = RuntimeError
self.assertFalse(ip_lib.ensure_device_is_ready("eth0"))
class TestIpNeighCommand(TestIPCmdBase):
def setUp(self):
super(TestIpNeighCommand, self).setUp()
self.parent.name = 'tap0'
self.command = 'neigh'
self.neigh_cmd = ip_lib.IpNeighCommand(self.parent)
def test_add_entry(self):
self.neigh_cmd.add('192.168.45.100', 'cc:dd:ee:ff:ab:cd')
self._assert_sudo([4],
('replace', '192.168.45.100',
'lladdr', 'cc:dd:ee:ff:ab:cd',
'nud', 'permanent',
'dev', 'tap0'))
def test_delete_entry(self):
self.neigh_cmd.delete('192.168.45.100', 'cc:dd:ee:ff:ab:cd')
self._assert_sudo([4],
('del', '192.168.45.100',
'lladdr', 'cc:dd:ee:ff:ab:cd',
'dev', 'tap0'))
def test_flush(self):
self.neigh_cmd.flush(4, '192.168.0.1')
self._assert_sudo([4], ('flush', 'to', '192.168.0.1'))
class TestArpPing(TestIPCmdBase):
@mock.patch.object(ip_lib, 'IPWrapper')
@mock.patch('eventlet.spawn_n')
def test_send_ipv4_addr_adv_notif(self, spawn_n, mIPWrapper):
spawn_n.side_effect = lambda f: f()
ARPING_COUNT = 3
address = '20.0.0.1'
config = mock.Mock()
config.send_arp_for_ha = ARPING_COUNT
ip_lib.send_ip_addr_adv_notif(mock.sentinel.ns_name,
mock.sentinel.iface_name,
address,
config)
self.assertTrue(spawn_n.called)
mIPWrapper.assert_called_once_with(namespace=mock.sentinel.ns_name)
ip_wrapper = mIPWrapper(namespace=mock.sentinel.ns_name)
# Just test that arping is called with the right arguments
arping_cmd = ['arping', '-A',
'-I', mock.sentinel.iface_name,
'-c', ARPING_COUNT,
'-w', mock.ANY,
address]
ip_wrapper.netns.execute.assert_any_call(arping_cmd,
check_exit_code=True)
@mock.patch('eventlet.spawn_n')
def test_no_ipv6_addr_notif(self, spawn_n):
ipv6_addr = 'fd00::1'
config = mock.Mock()
config.send_arp_for_ha = 3
ip_lib.send_ip_addr_adv_notif(mock.sentinel.ns_name,
mock.sentinel.iface_name,
ipv6_addr,
config)
self.assertFalse(spawn_n.called)
class TestAddNamespaceToCmd(base.BaseTestCase):
def test_add_namespace_to_cmd_with_namespace(self):
cmd = ['ping', '8.8.8.8']
self.assertEqual(['ip', 'netns', 'exec', 'tmp'] + cmd,
ip_lib.add_namespace_to_cmd(cmd, 'tmp'))
def test_add_namespace_to_cmd_without_namespace(self):
cmd = ['ping', '8.8.8.8']
self.assertEqual(cmd, ip_lib.add_namespace_to_cmd(cmd, None))
|
barnsnake351/neutron
|
neutron/tests/unit/agent/linux/test_ip_lib.py
|
Python
|
apache-2.0
| 49,793
|
[
"Brian"
] |
c6745bf1aff61170081a6126d7a2e6f02bcaf83ddc60895ac676b66d5a14cf9a
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator as glm
import numpy as np
# This test will generate synthetic GLM dataset. If given to a GLM model, it should be able to perform well with
# this dataset since the assumptions associated with GLM are used to generate the dataset. However, pay attention
# to the data types and you may have to cast enum columns to factors manually since during the save, column types
# information may be lost.
#
# Apart from saving the dataset using h2o.download_csv, remember to save the column types as
# np.save('my_file.npy', dictionary) np.save('my_file.npy', varDict)
#
# when you want to load the dataset, remember to load the types dictionary as
# types_dict = np.load('my_file.npy',allow_pickle='TRUE').item()
#
# then load your synthetic dataset specifying the column type as
# train = h2o.import_file("mydata.csv", col_types=types_dict)
def test_define_dataset():
family = 'gaussian' # can be any valid GLM families
nrow = 10000
ncol = 10
realFrac = 0.5
intFrac = 0
enumFrac = 0.5
missing_fraction = 0
factorRange= 5
numericRange = 10
targetFactor = 4
glmDataSet = generate_dataset(family, nrow, ncol, realFrac, intFrac, enumFrac, missing_fraction, factorRange,
numericRange, targetFactor)
#h2o.download_csv(gamDataSet, "/Users/wendycwong/temp/dataset.csv") # save dataset
#np.save('/Users/wendycwong/temp/datasetTypes.npy', gamDataSet.types)
assert glmDataSet.nrow == nrow, "Dataset number of row: {0}, expected number of row: {1}".format(glmDataSet.nrow,
nrow)
assert glmDataSet.ncol == (1+ncol), "Dataset number of row: {0}, expected number of row: " \
"{1}".format(glmDataSet.ncol, (1+ncol))
def generate_dataset(family, nrow, ncol, realFrac, intFrac, enumFrac, missingFrac, factorRange, numericRange,
targetFactor):
if family=="binomial":
responseFactor = 2
elif family == 'multinomial' or family == 'ordinal':
responseFactor = targetFactor
else :
responseFactor = 1
trainData = random_dataset(nrow, ncol, realFrac=realFrac, intFrac=intFrac, enumFrac=enumFrac, factorR=factorRange,
integerR=numericRange, responseFactor=responseFactor, misFrac=missingFrac)
if family=='poisson':
trainData['response'] = trainData['response']+numericRange
myX = trainData.names
myY = 'response'
myX.remove(myY)
m = glm(family=family, max_iterations=1, interactions=["C1", "C2"], tweedie_link_power = 2, tweedie_variance_power=0.4, )
m.train(training_frame=trainData,x=myX,y= myY)
r = glm.getGLMRegularizationPath(m)
coeffDict = r['coefficients'][0]
coeffLen = len(coeffDict)
randCoeffVals = np.random.uniform(low=-3, high=3, size=coeffLen).tolist()
keyset = coeffDict.keys()
count = 0
for key in keyset:
coeffDict[key] = randCoeffVals[count]
count = count+1
m2 = glm.makeGLMModel(model=m,coefs=coeffDict) # model generated from setting coefficients to model
f2 = m2.predict(trainData)
finalDataset = trainData[myX]
finalDataset = finalDataset.cbind(f2[0])
finalDataset.set_name(col=finalDataset.ncols-1, name='response')
return finalDataset
def random_dataset(nrow, ncol, realFrac = 0.4, intFrac = 0.3, enumFrac = 0.3, factorR = 10, integerR=100,
responseFactor = 1, misFrac=0.01, randSeed=None):
fractions = dict()
fractions["real_fraction"] = realFrac # Right now we are dropping string columns, so no point in having them.
fractions["categorical_fraction"] = enumFrac
fractions["integer_fraction"] = intFrac
fractions["time_fraction"] = 0
fractions["string_fraction"] = 0 # Right now we are dropping string columns, so no point in having them.
fractions["binary_fraction"] = 0
df = h2o.create_frame(rows=nrow, cols=ncol, missing_fraction=misFrac, has_response=True,
response_factors=responseFactor, factors = factorR, integer_range=integerR,
real_range=integerR, seed=randSeed, **fractions)
print(df.types)
return df
if __name__ == "__main__":
pyunit_utils.standalone_test(test_define_dataset)
else:
test_define_dataset()
|
h2oai/h2o-3
|
h2o-py/tests/testdir_algos/glm/pyunit_generate_synthetic_GLM_data.py
|
Python
|
apache-2.0
| 4,559
|
[
"Gaussian"
] |
e4b6ffe4298bd008dfc21e10b0a786afe28988e058424cbdeef9d03c4d403214
|
#!/usr/bin/env python
"""
Packages for descriptors.
Usage:
desc.py [options]
Options:
-h, --help Show this message and exit.
-i IFNAME Input file name. [default: in.params.desc]
-o OFNAME Output file name. [default: in.params.desc]
"""
from __future__ import print_function
from docopt import docopt
__author__ = "RYO KOBAYASHI"
__version__ = "190520"
def read_desc(fname='in.params.desc'):
with open(fname,'r') as f:
lines = f.readlines()
r_inner = {}
for line in lines:
data = line.split()
if data[0] == '!' or data[0] == '#':
if len(data) == 1:
continue
if len(data) == 5 and data[1] == 'inner_cutoff:':
csp = data[2]
rin = float(data[3])
rout = float(data[4])
r_inner[csp] = (rin,rout)
elif len(data) == 2:
nsp = int(data[0])
nsf = int(data[1])
descs = []
elif len(data) > 4:
itype = int(data[0])
# isp = int(data[1])
# jsp = int(data[2])
cspi = data[1]
cspj = data[2]
if itype == 1: # Gaussian
rc = float(data[3])
eta = float(data[4])
rs = float(data[5])
descs.append(('gauss',cspi,cspj,rc,eta,rs))
elif itype == 2: # cosine
rc = float(data[3])
xi = float(data[4])
descs.append(('cosine',cspi,cspj,rc,xi))
elif itype == 101: # angular
cspk = data[3]
rc = float(data[4])
almbd = float(data[5])
descs.append(('angular',cspi,cspj,cspk,rc,almbd))
elif itype == 102: # angular2
cspk = data[3]
rc = float(data[4])
almbd = float(data[5])
descs.append(('angular2',cspi,cspj,cspk,rc,almbd))
elif itype == 103: # angular3
cspk = data[3]
rc = float(data[4])
a1 = float(data[5])
descs.append(('angular3',cspi,cspj,cspk,rc,a1))
elif itype == 104: # angular4
cspk = data[3]
rc = float(data[4])
a1 = float(data[5])
descs.append(('angular4',cspi,cspj,cspk,rc,a1))
return nsp,nsf,descs,r_inner
def write_desc(nsp,nsf,descs,r_inner,fname='in.params.desc'):
with open(fname,'w') as f:
if len(r_inner) != 0:
for k,v in r_inner.items():
cspi = k
rin = v[0]
rout = v[1]
f.write('# inner_cutoff: {0:s} {1:6.2f} {2:6.2f}\n'.format(cspi,
rin,
rout))
f.write(' {0:3d} {1:5d}\n'.format(nsp,nsf))
for d in descs:
sftype = d[0]
cspi = d[1]
cspj = d[2]
if sftype == 'gauss':
rc = d[3]
eta = d[4]
rs = d[5]
f.write(' 1 {0:<3s} {1:<3s} '.format(cspi,cspj))
f.write(' {0:6.2f} {1:9.4f} {2:8.4f}\n'.format(rc,eta,rs))
elif sftype == 'cosine':
rc = d[3]
xi = d[4]
f.write(' 2 {0:<3s} {1:<3s} '.format(cspi,cspj))
f.write(' {0:6.2f} {1:9.4f}\n'.format(rc,xi))
elif sftype == 'angular':
cspk = d[3]
rc = d[4]
almbd = d[5]
f.write(' 101 {0:<3s} {1:<3s} {2:<3s}'.format(cspi,cspj,cspk))
f.write(' {0:6.2f} {1:9.4f}\n'.format(rc,almbd))
return None
if __name__ == "__main__":
args = docopt(__doc__)
infname = args['-i']
outfname = args['-o']
nsp,nsf,descs,r_inner = read_desc(infname)
|
ryokbys/nap
|
nappy/nn/desc.py
|
Python
|
mit
| 3,976
|
[
"Gaussian"
] |
e272781b3e908805b180aa4fd0b2e3fe2fc7b49363cc56ffed1b3aa057b12309
|
__author__ = 'Timotheus Kampik'
import sys
import random
# import and init pygame
import pygame
pygame.init()
window = pygame.display.set_mode((640, 480))
def draw():
# get random number of gaussian distribution with mean=window_width/2 and standard deviation=60
num = random.gauss(window.get_width()/2, 60)
pygame.draw.circle(window, (255, 255, 255), (int(num), int(window.get_height()/2)), 10, 0)
pygame.display.flip()
#draw continuously:
while True:
draw()
#handle quit event
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(0)
|
TimKam/TheNatureOfCodePython
|
Introduction/Example_4_Gaussian/gaussian.py
|
Python
|
mit
| 599
|
[
"Gaussian"
] |
b2a142d2d1894b36ec7998a6bab2ef3c150341790d6e2bbc05ff6630697b23ac
|
#coding: utf8
from datetime import datetime, timedelta
from itertools import chain
import django
from django.db.models import Q
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.template import RequestContext
from django.shortcuts import get_object_or_404, render_to_response, redirect
from common_helpers import nested_commit_on_success
from user_profile.decorators import login_required
from forms import PatientForm, VisitForm, DiagnosisForm, SearchForm
from forms import DiagnosisFormset, DiagnosisModelFormset, VisitFirstForm
from models import Patient, Diagnosis, Visit
DIAGNOSIS_PREFIX = 'diagnosis'
VISIT_PREFIX = 'visit'
VISIT_FIRST_PREFIX = 'visit-first'
NIGHT_TIME = timedelta(hours=12)
TIME_HISTORY_IGNORE = timedelta(seconds=3)
def save_formset(formset, patient):
for form in formset.forms:
if len(form.cleaned_data) == 0:
continue
elif form.cleaned_data.get('DELETE', False) and \
isinstance(form.cleaned_data.get('id'), Diagnosis):
item = form.cleaned_data.get('id')
item.delete()
else:
item = form.save(commit=False)
item.patient = patient
item.save()
def get_diagnosis_text(patient):
d_txt = []
template = "%s (%s)"
for diagnosis in patient.diagnosis_set.all():
text = template % (diagnosis.name, diagnosis.code)
d_txt.append(text)
return "\n".join(d_txt)
def get_diagnosis_code(patient):
return "\n".join(patient.diagnosis_set.values_list('code', flat=True))
def clear_ids(request):
return dict([(k, v) for k, v in request.POST.iteritems() \
if (len(v) > 0 and v != u"\r\n")])
@login_required
@nested_commit_on_success
def edit(request, patient_id):
""" Просмотр и изменение информации о пациенте """
patient = get_object_or_404(Patient, pk=patient_id)
diagnosis_qs = patient.diagnosis_set.all()
avalible_error = False
try:
period_visit = datetime.now() - patient.visit_set.latest().date_created
except Visit.DoesNotExist:
period_visit = datetime.now() - datetime(1970, 1, 1)
is_need_save_visit = period_visit > timedelta(hours=12)
if request.method == "POST":
patient_form = PatientForm(request.POST,
instance=patient)
if patient_form.is_valid():
patient = patient_form.save(commit=False)
else:
avalible_error = True
visit_form = VisitForm(request.POST, prefix=VISIT_PREFIX)
if not visit_form.is_valid():
if period_visit > NIGHT_TIME:
avalible_error = True
else:
is_need_save_visit = False
visit_form = VisitForm(prefix=VISIT_PREFIX)
else:
is_need_save_visit = True
diagnosis_formset = DiagnosisModelFormset(clear_ids(request),
prefix=DIAGNOSIS_PREFIX,
queryset=diagnosis_qs)
if not diagnosis_formset.is_valid():
avalible_error = True
if not avalible_error:
save_formset(diagnosis_formset, patient)
patient.diagnosis_text = get_diagnosis_text(patient)
patient.diagnosis_text_code = get_diagnosis_code(patient)
patient.save()
messages.add_message(request,
messages.INFO,
u'Информация о пациенте изменена')
if visit_form.cleaned_data.get('is_visit', False):
visit = visit_form.save(commit=False)
#visit.mo = request.user.mo
visit.patient = patient
visit.save()
visit_form = VisitForm(prefix=VISIT_PREFIX,
initial={'mo': request.user.mo.pk})
# если все сохранилось, то правильно выводим где флажки "удалить", а где текст
diagnosis_formset = DiagnosisModelFormset(
prefix=DIAGNOSIS_PREFIX,
queryset=patient.diagnosis_set.all()
)
else:
patient_form = PatientForm(instance=patient)
diagnosis_formset = DiagnosisModelFormset(prefix=DIAGNOSIS_PREFIX,
queryset=diagnosis_qs)
visit_form = VisitForm(prefix=VISIT_PREFIX,
initial={'mo': request.user.mo.pk})
response = {'patient_form': patient_form,
'diagnosis_formset': diagnosis_formset,
'visit_form': visit_form,
'visits_qs': patient.visit_set.all(),
'patient': patient}
return render_to_response('patient_edit.html',
response,
context_instance=RequestContext(request))
@login_required
@nested_commit_on_success
def add(request):
""" Создание информации о пациенте """
avalible_error = False
error_texts = []
if request.method == "POST":
patient_form = PatientForm(request.POST)
if patient_form.is_valid():
patient = patient_form.save(commit=False)
else:
patient = None
avalible_error = True
visit_first_form = VisitFirstForm(request.POST,
prefix=VISIT_FIRST_PREFIX)
if not visit_first_form.is_valid():
avalible_error = True
visit_form = VisitForm(request.POST, prefix=VISIT_PREFIX)
if not visit_form.is_valid() and not avalible_error:
avalible_error = True
diagnosis_formset = DiagnosisFormset(request.POST,
prefix=DIAGNOSIS_PREFIX)
if not diagnosis_formset.is_valid():
avalible_error = True
avalible_data = [v for f in diagnosis_formset.forms for v in f.cleaned_data]
if len(avalible_data) < 1:
avalible_error = True
error_texts.append(u'Нужно записать хотя бы 1 диагноз')
if not avalible_error:
try:
p = patient
ps = Patient.objects.filter(last_name = p.last_name, first_name = p.first_name, patronymic = p.patronymic, birthday=p.birthday, type = p.type)
if len(ps) > 0:
raise
except:
e = u'Данный тип пациента с таким ФИО и датой рождения <a href="%s" target="_blank">уже есть в реестре</a>' % reverse('patient_edit', kwargs={'patient_id': ps[0].pk})
error_texts.append(e)
else:
visit_first = visit_first_form.save(commit=False)
visit_first.is_add = True
patient.date_registration = visit_first.date_created
patient.save()
save_formset(diagnosis_formset, patient)
visit_first.patient = patient
visit_first.save()
if visit_form.cleaned_data.get('is_visit', False):
visit = visit_form.save(commit=False)
#visit.mo = request.user.mo
visit.patient = patient
visit.save()
Patient.objects.filter(pk=patient.pk) \
.update(diagnosis_text = get_diagnosis_text(patient),
diagnosis_text_code = get_diagnosis_code(patient))
messages.add_message(request, messages.INFO, u'Пациент "%s" внесен в реестр' % patient.get_full_name())
redirect_to = request.POST.get('__redirect_to')
if redirect_to == 'edit':
url = reverse('patient_edit', kwargs={'patient_id': patient.pk})
elif redirect_to == 'add':
url = reverse('patient_add')
else:
url = reverse('patient_search')
return redirect(url)
else:
patient_form = PatientForm()
diagnosis_formset = DiagnosisFormset(prefix=DIAGNOSIS_PREFIX)
visit_form = VisitForm(prefix=VISIT_PREFIX, initial={'mo': request.user.mo.pk})
visit_first_form = VisitFirstForm(prefix=VISIT_FIRST_PREFIX,
initial={'mo': request.user.mo.pk})
response = {'patient_form': patient_form,
'diagnosis_formset': diagnosis_formset,
'visit_form': visit_form,
'visit_first_form': visit_first_form,
'error_texts': error_texts}
return render_to_response('patient_add.html',
response,
context_instance=RequestContext(request))
HEADER_SEARCH = {
unicode(Patient.NEED_CURE): u'Нуждающиеся в спец. лечении',
unicode(Patient.NOT_NEED_CURE): u'Не нуждающиеся в спец. лечении',
unicode(Patient.GET_CURE): u'Получающие спец. лечение',
unicode(Patient.RAISE_CURE): u'Снятые со спец. лечения'}
@login_required
def search(request):
""" Поиск пациентов """
patients_qs = Patient.objects.all()
form = SearchForm(request.GET)
special_cure_text = ''
header = u'Все'
if len(request.GET) == 0:
# Если поиск не запускали, то и не надо показывать всех пациентов
return render_to_response('search.html',
{'form': form, 'have_search_result': False},
context_instance=RequestContext(request))
if form.is_valid():
full_name = form.cleaned_data.get('full_name')
if full_name:
patients_qs = patients_qs.filter(all_full_names__icontains=full_name)
type_residence = form.cleaned_data.get('type_residence')
if type_residence:
patients_qs = patients_qs.filter(type_residence=type_residence)
social_status = form.cleaned_data.get('social_status')
if social_status:
patients_qs = patients_qs.filter(social_status=social_status)
birthday = form.cleaned_data.get('birthday')
if birthday:
patients_qs = patients_qs.filter(birthday=birthday)
death = form.cleaned_data.get('death')
if death:
patients_qs = patients_qs.filter(death=death)
mo_added = form.cleaned_data.get('mo_added')
if mo_added:
patients_qs = patients_qs.filter(visit__is_add=True,
visit__mo=mo_added)
special_cure = form.cleaned_data.get('special_cure')
if special_cure:
if special_cure in HEADER_SEARCH:
header = HEADER_SEARCH[special_cure]
patients_qs = patients_qs.filter(special_cure=special_cure)
diagnosis = form.cleaned_data.get('diagnosis')
if diagnosis:
q_st = Q(diagnosis__code__contains=diagnosis) | \
Q(diagnosis__name__contains=diagnosis)
with_diagnosis = patients_qs.filter(diagnosis__code__contains=diagnosis)
patients_qs = patients_qs.filter(pk__in=with_diagnosis)
patients_qs = patients_qs.values('pk', 'all_full_names',
'birthday', 'diagnosis_text_code',
'name_allocate_mo', 'gender')
response = {'patients': patients_qs,
'count': patients_qs.count(),
'special_cure_text': special_cure_text,
'form': form,
'header': header,
'have_search_result': True}
return render_to_response('search.html',
response,
context_instance=RequestContext(request))
def update_history(history, h_date, username, full_name, mo_name):
history.append(((h_date, h_date + TIME_HISTORY_IGNORE),
username,
full_name,
mo_name))
return history
@login_required
def history(request, patient_id):
""" Список кто изменял в информацию о пациенте """
patient = get_object_or_404(Patient, pk=patient_id)
histories_qs = patient.history.filter(history_type='~') \
.values_list('history_date',
'history_user__username',
'history_user__full_name',
'history_user__mo_name')
histories = [((d, d + TIME_HISTORY_IGNORE), u, n, m) for d, u, n, m in histories_qs]
visits = patient.visit_set.all()
diagnosis = patient.diagnosis_set.all()
for element in chain(visits, diagnosis):
element_h_qs = element.history.values_list('history_date',
'history_user__username',
'history_user__full_name',
'history_user__mo_name')
for element_date, username, full_name, mo_name in element_h_qs:
is_double_hist = False # Для каждого диагноза и визита пишется своя история.
for p_date, p_user, p_name, p_mo in histories:
if p_date[0] <= element_date <= p_date[1] and \
p_user == username:
is_double_hist = True
if not is_double_hist:
histories = update_history(histories,
element_date,
username,
full_name,
mo_name)
histories.sort(lambda x, y: cmp(x[0][0], y[0][0]))
response = {'histories': histories,
'patient': patient}
return render_to_response('patient_history.html',
response,
context_instance=RequestContext(request))
|
dicos/geneticist
|
apps/patient/views.py
|
Python
|
gpl-3.0
| 14,516
|
[
"VisIt"
] |
832255876be70902d7c9e191bae5aeeed51393bb6d138047bfeba292c3682c80
|
"""
simple, elegant templating
(part of web.py)
Template design:
Template string is split into tokens and the tokens are combined into nodes.
Parse tree is a nodelist. TextNode and ExpressionNode are simple nodes and
for-loop, if-loop etc are block nodes, which contain multiple child nodes.
Each node can emit some python string. python string emitted by the
root node is validated for safeeval and executed using python in the given environment.
Enough care is taken to make sure the generated code and the template has line to line match,
so that the error messages can point to exact line number in template. (It doesn't work in some cases still.)
Grammar:
template -> defwith sections
defwith -> '$def with (' arguments ')' | ''
sections -> section*
section -> block | assignment | line
assignment -> '$ ' <assignment expression>
line -> (text|expr)*
text -> <any characters other than $>
expr -> '$' pyexpr | '$(' pyexpr ')' | '${' pyexpr '}'
pyexpr -> <python expression>
"""
__all__ = [
"Template",
"Render", "render", "frender",
"ParseError", "SecurityError",
"test"
]
import tokenize
import os
import sys
import glob
import re
from UserDict import DictMixin
import warnings
from utils import storage, safeunicode, safestr, re_compile
from webapi import config
from net import websafe
def splitline(text):
r"""
Splits the given text at newline.
>>> splitline('foo\nbar')
('foo\n', 'bar')
>>> splitline('foo')
('foo', '')
>>> splitline('')
('', '')
"""
index = text.find('\n') + 1
if index:
return text[:index], text[index:]
else:
return text, ''
class Parser:
"""Parser Base.
"""
def __init__(self):
self.statement_nodes = STATEMENT_NODES
self.keywords = KEYWORDS
def parse(self, text, name="<template>"):
self.text = text
self.name = name
defwith, text = self.read_defwith(text)
suite = self.read_suite(text)
return DefwithNode(defwith, suite)
def read_defwith(self, text):
if text.startswith('$def with'):
defwith, text = splitline(text)
defwith = defwith[1:].strip() # strip $ and spaces
return defwith, text
else:
return '', text
def read_section(self, text):
r"""Reads one section from the given text.
section -> block | assignment | line
>>> read_section = Parser().read_section
>>> read_section('foo\nbar\n')
(<line: [t'foo\n']>, 'bar\n')
>>> read_section('$ a = b + 1\nfoo\n')
(<assignment: 'a = b + 1'>, 'foo\n')
read_section('$for in range(10):\n hello $i\nfoo)
"""
if text.lstrip(' ').startswith('$'):
index = text.index('$')
begin_indent, text2 = text[:index], text[index+1:]
ahead = self.python_lookahead(text2)
if ahead == 'var':
return self.read_var(text2)
elif ahead in self.statement_nodes:
return self.read_block_section(text2, begin_indent)
elif ahead in self.keywords:
return self.read_keyword(text2)
elif ahead.strip() == '':
# assignments starts with a space after $
# ex: $ a = b + 2
return self.read_assignment(text2)
return self.readline(text)
def read_var(self, text):
r"""Reads a var statement.
>>> read_var = Parser().read_var
>>> read_var('var x=10\nfoo')
(<var: x = 10>, 'foo')
>>> read_var('var x: hello $name\nfoo')
(<var: x = join_(u'hello ', escape_(name, True))>, 'foo')
"""
line, text = splitline(text)
tokens = self.python_tokens(line)
if len(tokens) < 4:
raise SyntaxError('Invalid var statement')
name = tokens[1]
sep = tokens[2]
value = line.split(sep, 1)[1].strip()
if sep == '=':
pass # no need to process value
elif sep == ':':
#@@ Hack for backward-compatability
if tokens[3] == '\n': # multi-line var statement
block, text = self.read_indented_block(text, ' ')
lines = [self.readline(x)[0] for x in block.splitlines()]
nodes = []
for x in lines:
nodes.extend(x.nodes)
nodes.append(TextNode('\n'))
else: # single-line var statement
linenode, _ = self.readline(value)
nodes = linenode.nodes
parts = [node.emit('') for node in nodes]
value = "join_(%s)" % ", ".join(parts)
else:
raise SyntaxError('Invalid var statement')
return VarNode(name, value), text
def read_suite(self, text):
r"""Reads section by section till end of text.
>>> read_suite = Parser().read_suite
>>> read_suite('hello $name\nfoo\n')
[<line: [t'hello ', $name, t'\n']>, <line: [t'foo\n']>]
"""
sections = []
while text:
section, text = self.read_section(text)
sections.append(section)
return SuiteNode(sections)
def readline(self, text):
r"""Reads one line from the text. Newline is supressed if the line ends with \.
>>> readline = Parser().readline
>>> readline('hello $name!\nbye!')
(<line: [t'hello ', $name, t'!\n']>, 'bye!')
>>> readline('hello $name!\\\nbye!')
(<line: [t'hello ', $name, t'!']>, 'bye!')
>>> readline('$f()\n\n')
(<line: [$f(), t'\n']>, '\n')
"""
line, text = splitline(text)
# supress new line if line ends with \
if line.endswith('\\\n'):
line = line[:-2]
nodes = []
while line:
node, line = self.read_node(line)
nodes.append(node)
return LineNode(nodes), text
def read_node(self, text):
r"""Reads a node from the given text and returns the node and remaining text.
>>> read_node = Parser().read_node
>>> read_node('hello $name')
(t'hello ', '$name')
>>> read_node('$name')
($name, '')
"""
if text.startswith('$$'):
return TextNode('$'), text[2:]
elif text.startswith('$#'): # comment
line, text = splitline(text)
return TextNode('\n'), text
elif text.startswith('$'):
text = text[1:] # strip $
if text.startswith(':'):
escape = False
text = text[1:] # strip :
else:
escape = True
return self.read_expr(text, escape=escape)
else:
return self.read_text(text)
def read_text(self, text):
r"""Reads a text node from the given text.
>>> read_text = Parser().read_text
>>> read_text('hello $name')
(t'hello ', '$name')
"""
index = text.find('$')
if index < 0:
return TextNode(text), ''
else:
return TextNode(text[:index]), text[index:]
def read_keyword(self, text):
line, text = splitline(text)
return StatementNode(line.strip() + "\n"), text
def read_expr(self, text, escape=True):
"""Reads a python expression from the text and returns the expression and remaining text.
expr -> simple_expr | paren_expr
simple_expr -> id extended_expr
extended_expr -> attr_access | paren_expr extended_expr | ''
attr_access -> dot id extended_expr
paren_expr -> [ tokens ] | ( tokens ) | { tokens }
>>> read_expr = Parser().read_expr
>>> read_expr("name")
($name, '')
>>> read_expr("a.b and c")
($a.b, ' and c')
>>> read_expr("a. b")
($a, '. b')
>>> read_expr("name</h1>")
($name, '</h1>')
>>> read_expr("(limit)ing")
($(limit), 'ing')
>>> read_expr('a[1, 2][:3].f(1+2, "weird string[).", 3 + 4) done.')
($a[1, 2][:3].f(1+2, "weird string[).", 3 + 4), ' done.')
"""
def simple_expr():
identifier()
extended_expr()
def identifier():
tokens.next()
def extended_expr():
lookahead = tokens.lookahead()
if lookahead is None:
return
elif lookahead.value == '.':
attr_access()
elif lookahead.value in parens:
paren_expr()
extended_expr()
else:
return
def attr_access():
from token import NAME # python token constants
dot = tokens.lookahead()
if tokens.lookahead2().type == NAME:
tokens.next() # consume dot
identifier()
extended_expr()
def paren_expr():
begin = tokens.next().value
end = parens[begin]
while True:
if tokens.lookahead().value in parens:
paren_expr()
else:
t = tokens.next()
if t.value == end:
break
return
parens = {
"(": ")",
"[": "]",
"{": "}"
}
def get_tokens(text):
"""tokenize text using python tokenizer.
Python tokenizer ignores spaces, but they might be important in some cases.
This function introduces dummy space tokens when it identifies any ignored space.
Each token is a storage object containing type, value, begin and end.
"""
readline = iter([text]).next
end = None
for t in tokenize.generate_tokens(readline):
t = storage(type=t[0], value=t[1], begin=t[2], end=t[3])
if end is not None and end != t.begin:
_, x1 = end
_, x2 = t.begin
yield storage(type=-1, value=text[x1:x2], begin=end, end=t.begin)
end = t.end
yield t
class BetterIter:
"""Iterator like object with 2 support for 2 look aheads."""
def __init__(self, items):
self.iteritems = iter(items)
self.items = []
self.position = 0
self.current_item = None
def lookahead(self):
if len(self.items) <= self.position:
self.items.append(self._next())
return self.items[self.position]
def _next(self):
try:
return self.iteritems.next()
except StopIteration:
return None
def lookahead2(self):
if len(self.items) <= self.position+1:
self.items.append(self._next())
return self.items[self.position+1]
def next(self):
self.current_item = self.lookahead()
self.position += 1
return self.current_item
tokens = BetterIter(get_tokens(text))
if tokens.lookahead().value in parens:
paren_expr()
else:
simple_expr()
row, col = tokens.current_item.end
return ExpressionNode(text[:col], escape=escape), text[col:]
def read_assignment(self, text):
r"""Reads assignment statement from text.
>>> read_assignment = Parser().read_assignment
>>> read_assignment('a = b + 1\nfoo')
(<assignment: 'a = b + 1'>, 'foo')
"""
line, text = splitline(text)
return AssignmentNode(line.strip()), text
def python_lookahead(self, text):
"""Returns the first python token from the given text.
>>> python_lookahead = Parser().python_lookahead
>>> python_lookahead('for i in range(10):')
'for'
>>> python_lookahead('else:')
'else'
>>> python_lookahead(' x = 1')
' '
"""
readline = iter([text]).next
tokens = tokenize.generate_tokens(readline)
return tokens.next()[1]
def python_tokens(self, text):
readline = iter([text]).next
tokens = tokenize.generate_tokens(readline)
return [t[1] for t in tokens]
def read_indented_block(self, text, indent):
r"""Read a block of text. A block is what typically follows a for or it statement.
It can be in the same line as that of the statement or an indented block.
>>> read_indented_block = Parser().read_indented_block
>>> read_indented_block(' a\n b\nc', ' ')
('a\nb\n', 'c')
>>> read_indented_block(' a\n b\n c\nd', ' ')
('a\n b\nc\n', 'd')
>>> read_indented_block(' a\n\n b\nc', ' ')
('a\n\n b\n', 'c')
"""
if indent == '':
return '', text
block = ""
while text:
line, text2 = splitline(text)
if line.strip() == "":
block += '\n'
elif line.startswith(indent):
block += line[len(indent):]
else:
break
text = text2
return block, text
def read_statement(self, text):
r"""Reads a python statement.
>>> read_statement = Parser().read_statement
>>> read_statement('for i in range(10): hello $name')
('for i in range(10):', ' hello $name')
"""
tok = PythonTokenizer(text)
tok.consume_till(':')
return text[:tok.index], text[tok.index:]
def read_block_section(self, text, begin_indent=''):
r"""
>>> read_block_section = Parser().read_block_section
>>> read_block_section('for i in range(10): hello $i\nfoo')
(<block: 'for i in range(10):', [<line: [t'hello ', $i, t'\n']>]>, 'foo')
>>> read_block_section('for i in range(10):\n hello $i\n foo', begin_indent=' ')
(<block: 'for i in range(10):', [<line: [t'hello ', $i, t'\n']>]>, ' foo')
>>> read_block_section('for i in range(10):\n hello $i\nfoo')
(<block: 'for i in range(10):', [<line: [t'hello ', $i, t'\n']>]>, 'foo')
"""
line, text = splitline(text)
stmt, line = self.read_statement(line)
keyword = self.python_lookahead(stmt)
# if there is some thing left in the line
if line.strip():
block = line.lstrip()
else:
def find_indent(text):
rx = re_compile(' +')
match = rx.match(text)
first_indent = match and match.group(0)
return first_indent or ""
# find the indentation of the block by looking at the first line
first_indent = find_indent(text)[len(begin_indent):]
#TODO: fix this special case
if keyword == "code":
indent = begin_indent + first_indent
else:
indent = begin_indent + min(first_indent, INDENT)
block, text = self.read_indented_block(text, indent)
return self.create_block_node(keyword, stmt, block, begin_indent), text
def create_block_node(self, keyword, stmt, block, begin_indent):
if keyword in self.statement_nodes:
return self.statement_nodes[keyword](stmt, block, begin_indent)
else:
raise ParseError, 'Unknown statement: %s' % repr(keyword)
class PythonTokenizer:
"""Utility wrapper over python tokenizer."""
def __init__(self, text):
self.text = text
readline = iter([text]).next
self.tokens = tokenize.generate_tokens(readline)
self.index = 0
def consume_till(self, delim):
"""Consumes tokens till colon.
>>> tok = PythonTokenizer('for i in range(10): hello $i')
>>> tok.consume_till(':')
>>> tok.text[:tok.index]
'for i in range(10):'
>>> tok.text[tok.index:]
' hello $i'
"""
try:
while True:
t = self.next()
if t.value == delim:
break
elif t.value == '(':
self.consume_till(')')
elif t.value == '[':
self.consume_till(']')
elif t.value == '{':
self.consume_till('}')
# if end of line is found, it is an exception.
# Since there is no easy way to report the line number,
# leave the error reporting to the python parser later
#@@ This should be fixed.
if t.value == '\n':
break
except:
#raise ParseError, "Expected %s, found end of line." % repr(delim)
# raising ParseError doesn't show the line number.
# if this error is ignored, then it will be caught when compiling the python code.
return
def next(self):
type, t, begin, end, line = self.tokens.next()
row, col = end
self.index = col
return storage(type=type, value=t, begin=begin, end=end)
class DefwithNode:
def __init__(self, defwith, suite):
if defwith:
self.defwith = defwith.replace('with', '__template__') + ':'
# offset 4 lines. for encoding, __lineoffset__, loop and self.
self.defwith += "\n __lineoffset__ = -4"
else:
self.defwith = 'def __template__():'
# offset 4 lines for encoding, __template__, __lineoffset__, loop and self.
self.defwith += "\n __lineoffset__ = -5"
self.defwith += "\n loop = ForLoop()"
self.defwith += "\n self = TemplateResult(); extend_ = self.extend"
self.suite = suite
self.end = "\n return self"
def emit(self, indent):
encoding = "# coding: utf-8\n"
return encoding + self.defwith + self.suite.emit(indent + INDENT) + self.end
def __repr__(self):
return "<defwith: %s, %s>" % (self.defwith, self.suite)
class TextNode:
def __init__(self, value):
self.value = value
def emit(self, indent, begin_indent=''):
return repr(safeunicode(self.value))
def __repr__(self):
return 't' + repr(self.value)
class ExpressionNode:
def __init__(self, value, escape=True):
self.value = value.strip()
# convert ${...} to $(...)
if value.startswith('{') and value.endswith('}'):
self.value = '(' + self.value[1:-1] + ')'
self.escape = escape
def emit(self, indent, begin_indent=''):
return 'escape_(%s, %s)' % (self.value, bool(self.escape))
def __repr__(self):
if self.escape:
escape = ''
else:
escape = ':'
return "$%s%s" % (escape, self.value)
class AssignmentNode:
def __init__(self, code):
self.code = code
def emit(self, indent, begin_indent=''):
return indent + self.code + "\n"
def __repr__(self):
return "<assignment: %s>" % repr(self.code)
class LineNode:
def __init__(self, nodes):
self.nodes = nodes
def emit(self, indent, text_indent='', name=''):
text = [node.emit('') for node in self.nodes]
if text_indent:
text = [repr(text_indent)] + text
return indent + "extend_([%s])\n" % ", ".join(text)
def __repr__(self):
return "<line: %s>" % repr(self.nodes)
INDENT = ' ' # 4 spaces
class BlockNode:
def __init__(self, stmt, block, begin_indent=''):
self.stmt = stmt
self.suite = Parser().read_suite(block)
self.begin_indent = begin_indent
def emit(self, indent, text_indent=''):
text_indent = self.begin_indent + text_indent
out = indent + self.stmt + self.suite.emit(indent + INDENT, text_indent)
return out
def __repr__(self):
return "<block: %s, %s>" % (repr(self.stmt), repr(self.suite))
class ForNode(BlockNode):
def __init__(self, stmt, block, begin_indent=''):
self.original_stmt = stmt
tok = PythonTokenizer(stmt)
tok.consume_till('in')
a = stmt[:tok.index] # for i in
b = stmt[tok.index:-1] # rest of for stmt excluding :
stmt = a + ' loop.setup(' + b.strip() + '):'
BlockNode.__init__(self, stmt, block, begin_indent)
def __repr__(self):
return "<block: %s, %s>" % (repr(self.original_stmt), repr(self.suite))
class CodeNode:
def __init__(self, stmt, block, begin_indent=''):
# compensate one line for $code:
self.code = "\n" + block
def emit(self, indent, text_indent=''):
import re
rx = re.compile('^', re.M)
return rx.sub(indent, self.code).rstrip(' ')
def __repr__(self):
return "<code: %s>" % repr(self.code)
class StatementNode:
def __init__(self, stmt):
self.stmt = stmt
def emit(self, indent, begin_indent=''):
return indent + self.stmt
def __repr__(self):
return "<stmt: %s>" % repr(self.stmt)
class IfNode(BlockNode):
pass
class ElseNode(BlockNode):
pass
class ElifNode(BlockNode):
pass
class DefNode(BlockNode):
def __init__(self, *a, **kw):
BlockNode.__init__(self, *a, **kw)
code = CodeNode("", "")
code.code = "self = TemplateResult(); extend_ = self.extend\n"
self.suite.sections.insert(0, code)
code = CodeNode("", "")
code.code = "return self\n"
self.suite.sections.append(code)
def emit(self, indent, text_indent=''):
text_indent = self.begin_indent + text_indent
out = indent + self.stmt + self.suite.emit(indent + INDENT, text_indent)
return indent + "__lineoffset__ -= 3\n" + out
class VarNode:
def __init__(self, name, value):
self.name = name
self.value = value
def emit(self, indent, text_indent):
return indent + "self[%s] = %s\n" % (repr(self.name), self.value)
def __repr__(self):
return "<var: %s = %s>" % (self.name, self.value)
class SuiteNode:
"""Suite is a list of sections."""
def __init__(self, sections):
self.sections = sections
def emit(self, indent, text_indent=''):
return "\n" + "".join([s.emit(indent, text_indent) for s in self.sections])
def __repr__(self):
return repr(self.sections)
STATEMENT_NODES = {
'for': ForNode,
'while': BlockNode,
'if': IfNode,
'elif': ElifNode,
'else': ElseNode,
'def': DefNode,
'code': CodeNode
}
KEYWORDS = [
"pass",
"break",
"continue",
"return"
]
TEMPLATE_BUILTIN_NAMES = [
"dict", "enumerate", "float", "int", "bool", "list", "long", "reversed",
"set", "slice", "tuple", "xrange",
"abs", "all", "any", "callable", "chr", "cmp", "divmod", "filter", "hex",
"id", "isinstance", "iter", "len", "max", "min", "oct", "ord", "pow", "range",
"True", "False",
"None",
"__import__", # some c-libraries like datetime requires __import__ to present in the namespace
]
import __builtin__
TEMPLATE_BUILTINS = dict([(name, getattr(__builtin__, name)) for name in TEMPLATE_BUILTIN_NAMES if name in __builtin__.__dict__])
class ForLoop:
"""
Wrapper for expression in for stament to support loop.xxx helpers.
>>> loop = ForLoop()
>>> for x in loop.setup(['a', 'b', 'c']):
... print loop.index, loop.revindex, loop.parity, x
...
1 3 odd a
2 2 even b
3 1 odd c
>>> loop.index
Traceback (most recent call last):
...
AttributeError: index
"""
def __init__(self):
self._ctx = None
def __getattr__(self, name):
if self._ctx is None:
raise AttributeError, name
else:
return getattr(self._ctx, name)
def setup(self, seq):
self._push()
return self._ctx.setup(seq)
def _push(self):
self._ctx = ForLoopContext(self, self._ctx)
def _pop(self):
self._ctx = self._ctx.parent
class ForLoopContext:
"""Stackable context for ForLoop to support nested for loops.
"""
def __init__(self, forloop, parent):
self._forloop = forloop
self.parent = parent
def setup(self, seq):
try:
self.length = len(seq)
except:
self.length = 0
self.index = 0
for a in seq:
self.index += 1
yield a
self._forloop._pop()
index0 = property(lambda self: self.index-1)
first = property(lambda self: self.index == 1)
last = property(lambda self: self.index == self.length)
odd = property(lambda self: self.index % 2 == 1)
even = property(lambda self: self.index % 2 == 0)
parity = property(lambda self: ['odd', 'even'][self.even])
revindex0 = property(lambda self: self.length - self.index)
revindex = property(lambda self: self.length - self.index + 1)
class BaseTemplate:
def __init__(self, code, filename, filter, globals, builtins):
self.filename = filename
self.filter = filter
self._globals = globals
self._builtins = builtins
if code:
self.t = self._compile(code)
else:
self.t = lambda: ''
def _compile(self, code):
env = self.make_env(self._globals or {}, self._builtins)
exec(code, env)
return env['__template__']
def __call__(self, *a, **kw):
__hidetraceback__ = True
return self.t(*a, **kw)
def make_env(self, globals, builtins):
return dict(globals,
__builtins__=builtins,
ForLoop=ForLoop,
TemplateResult=TemplateResult,
escape_=self._escape,
join_=self._join
)
def _join(self, *items):
return u"".join(items)
def _escape(self, value, escape=False):
if value is None:
value = ''
value = safeunicode(value)
if escape and self.filter:
value = self.filter(value)
return value
class Template(BaseTemplate):
CONTENT_TYPES = {
'.html' : 'text/html; charset=utf-8',
'.xhtml' : 'application/xhtml+xml; charset=utf-8',
'.txt' : 'text/plain',
}
FILTERS = {
'.html': websafe,
'.xhtml': websafe,
'.xml': websafe
}
globals = {}
def __init__(self, text, filename='<template>', filter=None, globals=None, builtins=None, extensions=None):
self.extensions = extensions or []
text = Template.normalize_text(text)
code = self.compile_template(text, filename)
_, ext = os.path.splitext(filename)
filter = filter or self.FILTERS.get(ext, None)
self.content_type = self.CONTENT_TYPES.get(ext, None)
if globals is None:
globals = self.globals
if builtins is None:
builtins = TEMPLATE_BUILTINS
BaseTemplate.__init__(self, code=code, filename=filename, filter=filter, globals=globals, builtins=builtins)
def normalize_text(text):
"""Normalizes template text by correcting \r\n, tabs and BOM chars."""
text = text.replace('\r\n', '\n').replace('\r', '\n').expandtabs()
if not text.endswith('\n'):
text += '\n'
# ignore BOM chars at the begining of template
BOM = '\xef\xbb\xbf'
if isinstance(text, str) and text.startswith(BOM):
text = text[len(BOM):]
# support fort \$ for backward-compatibility
text = text.replace(r'\$', '$$')
return text
normalize_text = staticmethod(normalize_text)
def __call__(self, *a, **kw):
__hidetraceback__ = True
import webapi as web
if 'headers' in web.ctx and self.content_type:
web.header('Content-Type', self.content_type, unique=True)
return BaseTemplate.__call__(self, *a, **kw)
def generate_code(text, filename, parser=None):
# parse the text
parser = parser or Parser()
rootnode = parser.parse(text, filename)
# generate python code from the parse tree
code = rootnode.emit(indent="").strip()
return safestr(code)
generate_code = staticmethod(generate_code)
def create_parser(self):
p = Parser()
for ext in self.extensions:
p = ext(p)
return p
def compile_template(self, template_string, filename):
code = Template.generate_code(template_string, filename, parser=self.create_parser())
def get_source_line(filename, lineno):
try:
lines = open(filename).read().splitlines()
return lines[lineno]
except:
return None
try:
# compile the code first to report the errors, if any, with the filename
compiled_code = compile(code, filename, 'exec')
except SyntaxError, e:
# display template line that caused the error along with the traceback.
try:
e.msg += '\n\nTemplate traceback:\n File %s, line %s\n %s' % \
(repr(e.filename), e.lineno, get_source_line(e.filename, e.lineno-1))
except:
pass
raise
# make sure code is safe - but not with jython, it doesn't have a working compiler module
if not sys.platform.startswith('java'):
try:
import compiler
ast = compiler.parse(code)
SafeVisitor().walk(ast, filename)
except ImportError:
warnings.warn("Unabled to import compiler module. Unable to check templates for safety.")
else:
warnings.warn("SECURITY ISSUE: You are using Jython, which does not support checking templates for safety. Your templates can execute arbitrary code.")
return compiled_code
class CompiledTemplate(Template):
def __init__(self, f, filename):
Template.__init__(self, '', filename)
self.t = f
def compile_template(self, *a):
return None
def _compile(self, *a):
return None
class Render:
"""The most preferred way of using templates.
render = web.template.render('templates')
print render.foo()
Optional parameter can be `base` can be used to pass output of
every template through the base template.
render = web.template.render('templates', base='layout')
"""
def __init__(self, loc='templates', cache=None, base=None, **keywords):
self._loc = loc
self._keywords = keywords
if cache is None:
cache = not config.get('debug', False)
if cache:
self._cache = {}
else:
self._cache = None
if base and not hasattr(base, '__call__'):
# make base a function, so that it can be passed to sub-renders
self._base = lambda page: self._template(base)(page)
else:
self._base = base
def _add_global(self, obj, name=None):
"""Add a global to this rendering instance."""
if 'globals' not in self._keywords: self._keywords['globals'] = {}
if not name:
name = obj.__name__
self._keywords['globals'][name] = obj
def _lookup(self, name):
path = os.path.join(self._loc, name)
if os.path.isdir(path):
return 'dir', path
else:
path = self._findfile(path)
if path:
return 'file', path
else:
return 'none', None
def _load_template(self, name):
kind, path = self._lookup(name)
if kind == 'dir':
return Render(path, cache=self._cache is not None, base=self._base, **self._keywords)
elif kind == 'file':
return Template(open(path).read(), filename=path, **self._keywords)
else:
raise AttributeError, "No template named " + name
def _findfile(self, path_prefix):
p = [f for f in glob.glob(path_prefix + '.*') if not f.endswith('~')] # skip backup files
p.sort() # sort the matches for deterministic order
return p and p[0]
def _template(self, name):
if self._cache is not None:
if name not in self._cache:
self._cache[name] = self._load_template(name)
return self._cache[name]
else:
return self._load_template(name)
def __getattr__(self, name):
t = self._template(name)
if self._base and isinstance(t, Template):
def template(*a, **kw):
return self._base(t(*a, **kw))
return template
else:
return self._template(name)
class GAE_Render(Render):
# Render gets over-written. make a copy here.
super = Render
def __init__(self, loc, *a, **kw):
GAE_Render.super.__init__(self, loc, *a, **kw)
import types
if isinstance(loc, types.ModuleType):
self.mod = loc
else:
name = loc.rstrip('/').replace('/', '.')
self.mod = __import__(name, None, None, ['x'])
self.mod.__dict__.update(kw.get('builtins', TEMPLATE_BUILTINS))
self.mod.__dict__.update(Template.globals)
self.mod.__dict__.update(kw.get('globals', {}))
def _load_template(self, name):
t = getattr(self.mod, name)
import types
if isinstance(t, types.ModuleType):
return GAE_Render(t, cache=self._cache is not None, base=self._base, **self._keywords)
else:
return t
render = Render
# setup render for Google App Engine.
try:
from google import appengine
render = Render = GAE_Render
except ImportError:
pass
def frender(path, **keywords):
"""Creates a template from the given file path.
"""
return Template(open(path).read(), filename=path, **keywords)
def compile_templates(root):
"""Compiles templates to python code."""
re_start = re_compile('^', re.M)
for dirpath, dirnames, filenames in os.walk(root):
filenames = [f for f in filenames if not f.startswith('.') and not f.endswith('~') and not f.startswith('__init__.py')]
for d in dirnames[:]:
if d.startswith('.'):
dirnames.remove(d) # don't visit this dir
out = open(os.path.join(dirpath, '__init__.py'), 'w')
# Wayne: change 'from web.template...' to 'from lib.web.template...' because we put web under the lib module
out.write('from lib.web.template import CompiledTemplate, ForLoop, TemplateResult\n\n')
if dirnames:
out.write("import " + ", ".join(dirnames))
out.write("\n")
for f in filenames:
path = os.path.join(dirpath, f)
if '.' in f:
name, _ = f.split('.', 1)
else:
name = f
text = open(path).read()
text = Template.normalize_text(text)
code = Template.generate_code(text, path)
code = code.replace("__template__", name, 1)
out.write(code)
out.write('\n\n')
out.write('%s = CompiledTemplate(%s, %s)\n' % (name, name, repr(path)))
out.write("join_ = %s._join; escape_ = %s._escape\n\n" % (name, name))
# create template to make sure it compiles
t = Template(open(path).read(), path)
out.close()
class ParseError(Exception):
pass
class SecurityError(Exception):
"""The template seems to be trying to do something naughty."""
pass
# Enumerate all the allowed AST nodes
ALLOWED_AST_NODES = [
"Add", "And",
# "AssAttr",
"AssList", "AssName", "AssTuple",
# "Assert",
"Assign", "AugAssign",
# "Backquote",
"Bitand", "Bitor", "Bitxor", "Break",
"CallFunc","Class", "Compare", "Const", "Continue",
"Decorators", "Dict", "Discard", "Div",
"Ellipsis", "EmptyNode",
# "Exec",
"Expression", "FloorDiv", "For",
# "From",
"Function",
"GenExpr", "GenExprFor", "GenExprIf", "GenExprInner",
"Getattr",
# "Global",
"If", "IfExp",
# "Import",
"Invert", "Keyword", "Lambda", "LeftShift",
"List", "ListComp", "ListCompFor", "ListCompIf", "Mod",
"Module",
"Mul", "Name", "Not", "Or", "Pass", "Power",
# "Print", "Printnl", "Raise",
"Return", "RightShift", "Slice", "Sliceobj",
"Stmt", "Sub", "Subscript",
# "TryExcept", "TryFinally",
"Tuple", "UnaryAdd", "UnarySub",
"While", "With", "Yield",
]
class SafeVisitor(object):
"""
Make sure code is safe by walking through the AST.
Code considered unsafe if:
* it has restricted AST nodes
* it is trying to access resricted attributes
Adopted from http://www.zafar.se/bkz/uploads/safe.txt (public domain, Babar K. Zafar)
"""
def __init__(self):
"Initialize visitor by generating callbacks for all AST node types."
self.errors = []
def walk(self, ast, filename):
"Validate each node in AST and raise SecurityError if the code is not safe."
self.filename = filename
self.visit(ast)
if self.errors:
raise SecurityError, '\n'.join([str(err) for err in self.errors])
def visit(self, node, *args):
"Recursively validate node and all of its children."
def classname(obj):
return obj.__class__.__name__
nodename = classname(node)
fn = getattr(self, 'visit' + nodename, None)
if fn:
fn(node, *args)
else:
if nodename not in ALLOWED_AST_NODES:
self.fail(node, *args)
for child in node.getChildNodes():
self.visit(child, *args)
def visitName(self, node, *args):
"Disallow any attempts to access a restricted attr."
#self.assert_attr(node.getChildren()[0], node)
pass
def visitGetattr(self, node, *args):
"Disallow any attempts to access a restricted attribute."
self.assert_attr(node.attrname, node)
def assert_attr(self, attrname, node):
if self.is_unallowed_attr(attrname):
lineno = self.get_node_lineno(node)
e = SecurityError("%s:%d - access to attribute '%s' is denied" % (self.filename, lineno, attrname))
self.errors.append(e)
def is_unallowed_attr(self, name):
return name.startswith('_') \
or name.startswith('func_') \
or name.startswith('im_')
def get_node_lineno(self, node):
return (node.lineno) and node.lineno or 0
def fail(self, node, *args):
"Default callback for unallowed AST nodes."
lineno = self.get_node_lineno(node)
nodename = node.__class__.__name__
e = SecurityError("%s:%d - execution of '%s' statements is denied" % (self.filename, lineno, nodename))
self.errors.append(e)
class TemplateResult(object, DictMixin):
"""Dictionary like object for storing template output.
The result of a template execution is usally a string, but sometimes it
contains attributes set using $var. This class provides a simple
dictionary like interface for storing the output of the template and the
attributes. The output is stored with a special key __body__. Convering
the the TemplateResult to string or unicode returns the value of __body__.
When the template is in execution, the output is generated part by part
and those parts are combined at the end. Parts are added to the
TemplateResult by calling the `extend` method and the parts are combined
seemlessly when __body__ is accessed.
>>> d = TemplateResult(__body__='hello, world', x='foo')
>>> d
<TemplateResult: {'__body__': 'hello, world', 'x': 'foo'}>
>>> print d
hello, world
>>> d.x
'foo'
>>> d = TemplateResult()
>>> d.extend([u'hello', u'world'])
>>> d
<TemplateResult: {'__body__': u'helloworld'}>
"""
def __init__(self, *a, **kw):
self.__dict__["_d"] = dict(*a, **kw)
self._d.setdefault("__body__", u'')
self.__dict__['_parts'] = []
self.__dict__["extend"] = self._parts.extend
self._d.setdefault("__body__", None)
def keys(self):
return self._d.keys()
def _prepare_body(self):
"""Prepare value of __body__ by joining parts.
"""
if self._parts:
value = u"".join(self._parts)
self._parts[:] = []
body = self._d.get('__body__')
if body:
self._d['__body__'] = body + value
else:
self._d['__body__'] = value
def __getitem__(self, name):
if name == "__body__":
self._prepare_body()
return self._d[name]
def __setitem__(self, name, value):
if name == "__body__":
self._prepare_body()
return self._d.__setitem__(name, value)
def __delitem__(self, name):
if name == "__body__":
self._prepare_body()
return self._d.__delitem__(name)
def __getattr__(self, key):
try:
return self[key]
except KeyError, k:
raise AttributeError, k
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError, k:
raise AttributeError, k
def __unicode__(self):
self._prepare_body()
return self["__body__"]
def __str__(self):
self._prepare_body()
return self["__body__"].encode('utf-8')
def __repr__(self):
self._prepare_body()
return "<TemplateResult: %s>" % self._d
def test():
r"""Doctest for testing template module.
Define a utility function to run template test.
>>> class TestResult:
... def __init__(self, t): self.t = t
... def __getattr__(self, name): return getattr(self.t, name)
... def __repr__(self): return repr(unicode(self))
...
>>> def t(code, **keywords):
... tmpl = Template(code, **keywords)
... return lambda *a, **kw: TestResult(tmpl(*a, **kw))
...
Simple tests.
>>> t('1')()
u'1\n'
>>> t('$def with ()\n1')()
u'1\n'
>>> t('$def with (a)\n$a')(1)
u'1\n'
>>> t('$def with (a=0)\n$a')(1)
u'1\n'
>>> t('$def with (a=0)\n$a')(a=1)
u'1\n'
Test complicated expressions.
>>> t('$def with (x)\n$x.upper()')('hello')
u'HELLO\n'
>>> t('$(2 * 3 + 4 * 5)')()
u'26\n'
>>> t('${2 * 3 + 4 * 5}')()
u'26\n'
>>> t('$def with (limit)\nkeep $(limit)ing.')('go')
u'keep going.\n'
>>> t('$def with (a)\n$a.b[0]')(storage(b=[1]))
u'1\n'
Test html escaping.
>>> t('$def with (x)\n$x', filename='a.html')('<html>')
u'<html>\n'
>>> t('$def with (x)\n$x', filename='a.txt')('<html>')
u'<html>\n'
Test if, for and while.
>>> t('$if 1: 1')()
u'1\n'
>>> t('$if 1:\n 1')()
u'1\n'
>>> t('$if 1:\n 1\\')()
u'1'
>>> t('$if 0: 0\n$elif 1: 1')()
u'1\n'
>>> t('$if 0: 0\n$elif None: 0\n$else: 1')()
u'1\n'
>>> t('$if 0 < 1 and 1 < 2: 1')()
u'1\n'
>>> t('$for x in [1, 2, 3]: $x')()
u'1\n2\n3\n'
>>> t('$def with (d)\n$for k, v in d.iteritems(): $k')({1: 1})
u'1\n'
>>> t('$for x in [1, 2, 3]:\n\t$x')()
u' 1\n 2\n 3\n'
>>> t('$def with (a)\n$while a and a.pop():1')([1, 2, 3])
u'1\n1\n1\n'
The space after : must be ignored.
>>> t('$if True: foo')()
u'foo\n'
Test loop.xxx.
>>> t("$for i in range(5):$loop.index, $loop.parity")()
u'1, odd\n2, even\n3, odd\n4, even\n5, odd\n'
>>> t("$for i in range(2):\n $for j in range(2):$loop.parent.parity $loop.parity")()
u'odd odd\nodd even\neven odd\neven even\n'
Test assignment.
>>> t('$ a = 1\n$a')()
u'1\n'
>>> t('$ a = [1]\n$a[0]')()
u'1\n'
>>> t('$ a = {1: 1}\n$a.keys()[0]')()
u'1\n'
>>> t('$ a = []\n$if not a: 1')()
u'1\n'
>>> t('$ a = {}\n$if not a: 1')()
u'1\n'
>>> t('$ a = -1\n$a')()
u'-1\n'
>>> t('$ a = "1"\n$a')()
u'1\n'
Test comments.
>>> t('$# 0')()
u'\n'
>>> t('hello$#comment1\nhello$#comment2')()
u'hello\nhello\n'
>>> t('$#comment0\nhello$#comment1\nhello$#comment2')()
u'\nhello\nhello\n'
Test unicode.
>>> t('$def with (a)\n$a')(u'\u203d')
u'\u203d\n'
>>> t('$def with (a)\n$a')(u'\u203d'.encode('utf-8'))
u'\u203d\n'
>>> t(u'$def with (a)\n$a $:a')(u'\u203d')
u'\u203d \u203d\n'
>>> t(u'$def with ()\nfoo')()
u'foo\n'
>>> def f(x): return x
...
>>> t(u'$def with (f)\n$:f("x")')(f)
u'x\n'
>>> t('$def with (f)\n$:f("x")')(f)
u'x\n'
Test dollar escaping.
>>> t("Stop, $$money isn't evaluated.")()
u"Stop, $money isn't evaluated.\n"
>>> t("Stop, \$money isn't evaluated.")()
u"Stop, $money isn't evaluated.\n"
Test space sensitivity.
>>> t('$def with (x)\n$x')(1)
u'1\n'
>>> t('$def with(x ,y)\n$x')(1, 1)
u'1\n'
>>> t('$(1 + 2*3 + 4)')()
u'11\n'
Make sure globals are working.
>>> t('$x')()
Traceback (most recent call last):
...
NameError: global name 'x' is not defined
>>> t('$x', globals={'x': 1})()
u'1\n'
Can't change globals.
>>> t('$ x = 2\n$x', globals={'x': 1})()
u'2\n'
>>> t('$ x = x + 1\n$x', globals={'x': 1})()
Traceback (most recent call last):
...
UnboundLocalError: local variable 'x' referenced before assignment
Make sure builtins are customizable.
>>> t('$min(1, 2)')()
u'1\n'
>>> t('$min(1, 2)', builtins={})()
Traceback (most recent call last):
...
NameError: global name 'min' is not defined
Test vars.
>>> x = t('$var x: 1')()
>>> x.x
u'1'
>>> x = t('$var x = 1')()
>>> x.x
1
>>> x = t('$var x: \n foo\n bar')()
>>> x.x
u'foo\nbar\n'
Test BOM chars.
>>> t('\xef\xbb\xbf$def with(x)\n$x')('foo')
u'foo\n'
Test for with weird cases.
>>> t('$for i in range(10)[1:5]:\n $i')()
u'1\n2\n3\n4\n'
>>> t("$for k, v in {'a': 1, 'b': 2}.items():\n $k $v")()
u'a 1\nb 2\n'
>>> t("$for k, v in ({'a': 1, 'b': 2}.items():\n $k $v")()
Traceback (most recent call last):
...
SyntaxError: invalid syntax
Test datetime.
>>> import datetime
>>> t("$def with (date)\n$date.strftime('%m %Y')")(datetime.datetime(2009, 1, 1))
u'01 2009\n'
"""
pass
if __name__ == "__main__":
import sys
if '--compile' in sys.argv:
compile_templates(sys.argv[2])
else:
import doctest
doctest.testmod()
|
lastr2d2/lastchat
|
src/lib/web/template.py
|
Python
|
gpl-2.0
| 47,974
|
[
"VisIt"
] |
a3eabb0108ecaa8ff8b43d5c85162fe77e8978a94df5742fda176a2c964a45ec
|
#!/usr/bin/env python
#
# Engineering Calculator Actions
#
# An RPN calculator that supports numbers with SI scale factors and units.
# Imports {{{1
from __future__ import division
import operator
import math
import cmath
import random
from calculator import \
Command, Constant, UnaryOp, BinaryOp, BinaryIoOp, Number, \
SetFormat, Help, Store, Recall, SetUnits, Print, Dup, Category, \
Calculator
from engfmt import toNumber, toEngFmt
# Utilities {{{1
# choose
# This function replaces if/else expressions for earlier versions of python
def choose(true, cond, false):
if cond:
return true
else:
return false
# Actions {{{1
# Create actions here, they will be registered into availableActions
# automatically. That will be used to build the list of actions to make
# available to the user based on calculator personality later.
# Arithmetic Operators {{{2
arithmeticOperators = Category("Arithmetic Operators")
# addition {{{3
addition = BinaryOp(
'+'
, operator.add
, description="%(key)s: addition"
# keep units of x if they are the same as units of y
, units=lambda calc, units: choose(units[0], units[0] == units[1], '')
, synopsis='#{x}, #{y}, ... => #{x}+#{y}, ...'
, summary="""
The values in the #{x} and #{y} registers are popped from the
stack and the sum is placed back on the stack into the #{x}
register.
"""
)
addition.addTest(
stimulus='1 1 +'
, result=1 + 1
, units=''
, text='2'
)
addition.addTest(
stimulus='100mV 25mV+'
, result=100e-3 + 25e-3
, units='V'
, text='125 mV'
)
addition.addTest(
stimulus='$100M $25M+'
, result=100e6 + 25e6
, units='$'
, text='$125M'
)
addition.addTest(
stimulus='200mV 100m+'
, result=0.2 + 0.1
, units=''
, text='300m'
)
addition.addTest(
stimulus='1 j +'
, result=1 + 1j
, units=''
, text='1 + j'
)
# subtraction {{{3
subtraction = BinaryOp(
'-'
, operator.sub
, description="%(key)s: subtraction"
# keep units of x if they are the same as units of y
, units=lambda calc, units: choose(units[0], units[0] == units[1], '')
, synopsis='#{x}, #{y}, ... => #{x}-#{y}, ...'
, summary="""
The values in the #{x} and #{y} registers are popped from the
stack and the difference is placed back on the stack into the #{x}
register.
"""
)
subtraction.addTest(
stimulus='1 1 -'
, result=0
, units=''
, text='0'
)
subtraction.addTest(
stimulus='100mV 25mV-'
, result=100e-3 - 25e-3
, units='V'
, text='75 mV'
)
subtraction.addTest(
stimulus='$100M $25M-'
, result=100e6 - 25e6
, units='$'
, text='$75M'
)
subtraction.addTest(
stimulus='200mV 100m-'
, result=0.2 - 0.1
, units=''
, text='100m'
)
subtraction.addTest(
stimulus='1 j -'
, result=1 - 1j
, units=''
, text='1 - j'
)
# multiplication {{{3
multiplication = BinaryOp(
'*'
, operator.mul
, description="%(key)s: multiplication"
, synopsis='#{x}, #{y}, ... => #{x}*#{y}, ...'
, summary="""
The values in the #{x} and #{y} registers are popped from the
stack and the product is placed back on the stack into the #{x}
register.
"""
)
multiplication.addTest(
stimulus='2 2 *'
, result=2 * 2
, units=''
, text='4'
)
multiplication.addTest(
stimulus='25MHz 2pi * "rads/s"'
, result=2 * math.pi * 25e6
, units='rads/s'
, text='157.08 Mrads/s'
)
multiplication.addTest(
stimulus='1 j *'
, result=1j
, units=''
, text='j'
)
multiplication.addTest(
stimulus='j j *'
, result=-1
, units=''
, text='-1'
)
# true division {{{3
trueDivision = BinaryOp(
'/'
, operator.truediv
, description="%(key)s: true division"
, synopsis='#{x}, #{y}, ... => #{y}/#{x}, ...'
, summary=r"""
The values in the #{x} and #{y} registers are popped from the stack and
the quotient is placed back on the stack into the #{x} register. Both
values are treated as real numbers and the result is a real number. So
\verb{
@{0}: 1 2/
@{500m}:
}
"""
)
trueDivision.addTest(
stimulus='1 2/'
, result=1/2
, units=''
, text='500m'
)
trueDivision.addTest(
stimulus='1 j /'
, result=-1j
, units=''
, text='-j'
)
# floor division {{{3
floorDivision = BinaryOp(
'//'
, operator.floordiv
, description="%(key)s: floor division"
, synopsis='#{x}, #{y}, ... => #{y}//#{x}, ...'
, summary=r"""
The values in the #{x} and #{y} registers are popped from the
stack, the quotient is computed and then converted to an integer using
the floor operation (it is replaced by the largest integer that is
smaller than the quotient), and that is placed back on the stack into
the #{x} register. So
\verb{
@{0}: 1 2//
@{0}:
}
"""
)
floorDivision.addTest(
stimulus='5 2//'
, result=5//2
, units=''
, text='2'
)
# modulus {{{3
modulus = BinaryOp(
'%'
, operator.mod
, description="%(key)s: modulus"
, synopsis='#{x}, #{y}, ... => #{y}%#{x}, ...'
, summary=r"""
The values in the #{x} and #{y} registers are popped from the stack, the
quotient is computed and the remainder is placed back on the stack into
the #{x} register. So
\verb{
@{0}: 14 3%
@{2}:
}
In this case 2 is the remainder because 3 goes evenly into 14 three
times, which leaves a remainder of 2.
"""
)
modulus.addTest(
stimulus='5 2%'
, result=5%2
, units=''
, text='1'
)
# percent change {{{3
percentChange = BinaryOp(
'%chg'
, lambda y, x: 100*(x-y)/y
, description="%(key)s: percent change"
, synopsis='#{x}, #{y}, ... => 100*(#{x}-#{y})/#{y}, ...'
, summary="""
The values in the #{x} and #{y} registers are popped from the stack and
the percent difference between #{x} and #{y} relative to #{y} is pushed
back into the #{x} register.
"""
)
percentChange.addTest(
stimulus='10 10.5 %chg'
, result=100*(10.5 - 10)/10
, units=''
, text='5'
)
# parallel combination {{{3
parallel = BinaryOp(
'||'
, lambda y, x: (x/(x+y))*y
# keep units of x if they are the same as units of y
, units=lambda calc, units: choose(units[0], units[0] == units[1], '')
, description="%(key)s: parallel combination"
, synopsis='#{x}, #{y}, ... => 1/(1/#{x}+1/#{y}), ...'
, summary="""
The values in the #{x} and #{y} registers are popped from the stack and
replaced with the reciprocal of the sum of their reciprocals. If the
values in the #{x} and #{y} registers are both resistances, both
elastances, or both inductances, then the result is the resistance,
elastance or inductance of the two in parallel. If the values are
conductances, capacitances or susceptances, then the result is the
conductance, capacitance or susceptance of the two in series.
"""
)
parallel.addTest(
stimulus='100 100 ||'
, result=(100/(100+100))*100
, units=''
, text='50'
)
parallel.addTest(
stimulus='10kOhm 10kOhm ||'
, result=(1e4/(1e4+1e4))*1e4
, units='Ohm'
, text='5 KOhm'
)
parallel.addTest(
stimulus='50_Ohm 50 ||'
, result=(50/(50+50))*50
, units=''
, text='25'
)
# negation {{{3
negation = UnaryOp(
'chs'
, operator.neg
, units=lambda calc, units: units[0]
, description="%(key)s: change sign"
, synopsis='#{x}, ... => -#{x}, ...'
, summary="""
The value in the #{x} register is replaced with its negative.
"""
)
negation.addTest(
stimulus='-3 chs'
, result=3
, units=''
, text='3'
)
negation.addTest(
stimulus='330pF chs'
, result=-330e-12
, units='F'
, text='-330 pF'
)
# reciprocal {{{3
reciprocal = UnaryOp(
'recip'
, lambda x: 1/x
, description="%(key)s: reciprocal"
, synopsis='#{x}, ... => 1/#{x}, ...'
, summary="""
The value in the #{x} register is replaced with its reciprocal.
"""
)
reciprocal.addTest(
stimulus='4 recip'
, result=1/4
, units=''
, text='250m'
)
reciprocal.addTest(
stimulus='1kOhm recip'
, result=1/1000
, units=''
, text='1m'
)
reciprocal.addTest(
stimulus='0 recip'
, error='division by zero'
)
reciprocal.addTest(
stimulus='j recip'
, result=-1j
, units=''
, text='-j'
)
# ceiling {{{3
ceiling = UnaryOp(
'ceil'
, math.ceil
, units=lambda calc, units: units[0]
, description="%(key)s: round towards positive infinity"
, synopsis='#{x}, ... => ceil(#{x}), ...'
, summary="""
The value in the #{x} register is replaced with its value rounded
towards infinity (replaced with the smallest integer greater than its
value).
"""
)
ceiling.addTest(
stimulus='1.5 ceil'
, result=math.ceil(1.5)
, units=''
, text='2'
)
ceiling.addTest(
stimulus='-1.5 ceil'
, result=math.ceil(-1.5)
, units=''
, text='-1'
)
ceiling.addTest(
stimulus='7.5_Hz ceil'
, result=math.ceil(7.5)
, units='Hz'
, text='8 Hz'
)
ceiling.addTest(
stimulus='j ceil'
, error='Function does not support a complex argument.'
)
# floor {{{3
floor = UnaryOp(
'floor'
, math.floor
, units=lambda calc, units: units[0]
, description="%(key)s: round towards negative infinity"
, synopsis='#{x}, ... => floor(#{x}), ...'
, summary="""
The value in the #{x} register is replaced with its value rounded
towards negative infinity (replaced with the largest integer smaller
than its value).
"""
)
floor.addTest(
stimulus='1.5 floor'
, result=math.floor(1.5)
, units=''
, text='1'
)
floor.addTest(
stimulus='-1.5 floor'
, result=math.floor(-1.5)
, units=''
, text='-2'
)
floor.addTest(
stimulus='7.5_Hz floor'
, result=math.floor(7.5)
, units='Hz'
, text='7 Hz'
)
floor.addTest(
stimulus='j floor'
, error='Function does not support a complex argument.'
)
# factorial {{{3
try:
factorial = UnaryOp(
'!'
, math.factorial
, description="%(key)s: factorial"
, synopsis='#{x}, ... => #{x}!, ...'
, summary="""
The value in the #{x} register is replaced with its factorial.
"""
)
factorial.addTest(
stimulus='6!'
, result=math.factorial(6)
, units=''
, text='720'
)
except AttributeError:
factorial = None
# random number {{{3
randomNumber = Constant(
'rand'
, random.random
, description="%(key)s: random number between 0 and 1"
, synopsis='... => #{rand}, ...'
, summary="""
A number between 0 and 1 is chosen at random and its value is pushed on
the stack into #{x} register.
"""
)
randomNumber.addTest('rand', units='')
# Logs, Powers, and Exponentials {{{2
powersAndLogs = Category("Powers, Roots, Exponentials and Logarithms")
# power {{{3
power = BinaryOp(
'**'
, operator.pow
, description="%(key)s: raise y to the power of x"
, synopsis='#{x}, #{y}, ... => #{y}**#{x}, ...'
, summary="""
The values in the #{x} and #{y} registers are popped from the
stack and replaced with the value of #{y} raised to the power of
#{x}.
"""
, aliases=['pow', 'ytox']
)
power.addTest(
stimulus='500 2**'
, result=500**2
, units=''
, text='250K'
)
power.addTest(
stimulus='8 1 3/ pow'
, result=2
, units=''
, text='2'
)
power.addTest(
stimulus='-8 1 3/ ytox'
, error='negative number cannot be raised to a fractional power'
)
# exponential {{{3
exponential = UnaryOp(
'exp'
# the following rather odd form implements the if/else construct, which is
# only available in python 2.5 and beyond
, lambda x: (math.exp, cmath.exp)[type(x) == complex](x)
, description="%(key)s: natural exponential"
, synopsis='#{x}, ... => exp(#{x}), ...'
, summary="""
The value in the #{x} register is replaced with its exponential.
Supports a complex argument.
"""
, aliases=['powe']
)
exponential.addTest(
stimulus='10 exp ln'
, result=10
, units=''
, text='10'
)
exponential.addTest(
stimulus='-10 powe ln'
, result=-10
, units=''
, text='-10'
)
exponential.addTest(
stimulus='j pi * exp'
, result=-1
, units=''
)
# natural logarithm {{{3
naturalLog = UnaryOp(
'ln'
# the following rather odd form implements the if/else construct, which is
# only available in python 2.5 and beyond
, lambda x: (math.log, cmath.log)[type(x) == complex or x < 0](x)
, description="%(key)s: natural logarithm"
, synopsis='#{x}, ... => ln(#{x}), ...'
, summary="""
The value in the #{x} register is replaced with its natural logarithm.
Supports a complex argument.
"""
, aliases=['loge']
)
naturalLog.addTest(
stimulus='100 ln exp'
, result=100
, units=''
, text='100'
)
naturalLog.addTest(
stimulus='-100 loge'
, result=(4.60517018599+3.14159265359j)
, units=''
, text='4.6052 + j3.1416'
)
naturalLog.addTest(
stimulus='j ln'
, result=1.57079632679j
, units=''
, text='j1.5708'
)
# raise 10 to the power of x {{{3
tenPower = UnaryOp(
'pow10'
, lambda x: 10**x
, description="%(key)s: raise 10 to the power of x"
, synopsis='#{x}, ... => 10**#{x}, ...'
, summary="""
The value in the #{x} register is replaced with 10 raised to #{x}.
"""
, aliases=['10tox']
)
tenPower.addTest(
stimulus='10 pow10 log'
, result=10
, units=''
, text='10'
)
tenPower.addTest(
stimulus='-10 10tox log'
, result=-10
, units=''
, text='-10'
)
# common logarithm {{{3
commonLog = UnaryOp(
'log'
, math.log10
, description="%(key)s: base 10 logarithm"
, synopsis='#{x}, ... => log(#{x}), ...'
, summary="""
The value in the #{x} register is replaced with its common logarithm.
"""
, aliases=['log10', 'lg']
)
commonLog.addTest(
stimulus='100 log pow10'
, result=100
, units=''
, text='100'
)
# raise 2 to the power of x {{{3
twoPower = UnaryOp(
'pow2'
, lambda x: 2**x
, description="%(key)s: raise 2 to the power of x"
, synopsis='#{x}, ... => 2**#{x}, ...'
, summary="""
The value in the #{x} register is replaced with 2 raised to #{x}.
"""
, aliases=['2tox']
)
twoPower.addTest(
stimulus='16 pow2'
, result=65536
, units=''
, text='65.536K'
)
twoPower.addTest(
stimulus='-2 2tox'
, result=0.25
, units=''
, text='250m'
)
# binary logarithm {{{3
binaryLog = UnaryOp(
'log2'
, lambda x: math.log(x)/math.log(2)
, description="%(key)s: base 2 logarithm"
, synopsis='#{x}, ... => log2(#{x}), ...'
, summary="""
The value in the #{x} register is replaced with its base 2 logarithm.
"""
, aliases=['lb']
)
binaryLog.addTest(
stimulus='65536 log2'
, result=16
, units=''
, text='16'
)
binaryLog.addTest(
stimulus='0.25 lb'
, result=-2
, units=''
, text='-2'
)
# square {{{3
square = UnaryOp(
'sqr'
, lambda x: x*x
, description="%(key)s: square"
, synopsis='#{x}, ... => #{x}**2, ...'
, summary="""
The value in the #{x} register is replaced with its square.
"""
)
square.addTest(
stimulus='4 sqr'
, result=4*4
, units=''
, text='16'
)
square.addTest(
stimulus='j sqr'
, result=-1
, units=''
, text='-1'
)
# square root {{{3
squareRoot = UnaryOp(
'sqrt'
# the following rather odd form implements the if/else construct, which is
# only available in python 2.5 and beyond
, lambda x: (math.sqrt, cmath.sqrt)[type(x) == complex or x < 0](x)
, description="%(key)s: square root"
, synopsis='#{x}, ... => sqrt(#{x}), ...'
, summary="""
The value in the #{x} register is replaced with its square root.
"""
)
squareRoot.addTest(
stimulus='16 sqrt'
, result=4
, units=''
, text='4'
)
squareRoot.addTest(
stimulus='-4 sqrt'
, result=2j
, units=''
, text='j2'
)
squareRoot.addTest(
stimulus='4 j * sqrt'
, result=math.sqrt(2) + 1j*math.sqrt(2)
, units=''
, text='1.4142 + j1.4142'
)
# cube root {{{3
try:
from ctypes import util, cdll, c_double
libm = cdll.LoadLibrary(util.find_library('m'))
libm.cbrt.restype = c_double
libm.cbrt.argtypes = [c_double]
cubeRoot = UnaryOp(
'cbrt'
, lambda x: libm.cbrt(x)
, description="%(key)s: cube root"
, synopsis='#{x}, ... => cbrt(#{x}), ...'
, summary="""
The value in the #{x} register is replaced with its cube root.
"""
)
cubeRoot.addTest(
stimulus='64 cbrt'
, result=4
, units=''
, text='4'
)
cubeRoot.addTest(
stimulus='-8 cbrt'
, result=-2
, units=''
, text='-2'
)
except ImportError:
cubeRoot = None
# Trig Functions {{{2
trigFunctions = Category("Trigonometric Functions")
# sine {{{3
sine = UnaryOp(
'sin'
, lambda x, calc: math.sin(calc.toRadians(x))
, description="%(key)s: trigonometric sine"
, needCalc=True
, synopsis='#{x}, ... => sin(#{x}), ...'
, summary="""
The value in the #{x} register is replaced with its sine.
"""
)
sine.addTest(
stimulus='90 sin'
, result=1
, units=''
, text='1'
)
sine.addTest(
stimulus='degs 270 sin'
, result=-1
, units=''
, text='-1'
)
sine.addTest(
stimulus='rads pi 2/ sin'
, result=1
, units=''
, text='1'
)
# cosine {{{3
cosine = UnaryOp(
'cos'
, lambda x, calc: math.cos(calc.toRadians(x))
, description="%(key)s: trigonometric cosine"
, needCalc=True
, synopsis='#{x}, ... => cos(#{x}), ...'
, summary="""
The value in the #{x} register is replaced with its cosine.
"""
)
cosine.addTest(
stimulus='180 cos'
, result=-1
, units=''
, text='-1'
)
cosine.addTest(
stimulus='rads pi cos'
, result=-1
, units=''
, text='-1'
)
cosine.addTest(
stimulus='degs 360 cos'
, result=1
, units=''
, text='1'
)
# tangent {{{3
tangent = UnaryOp(
'tan'
, lambda x, calc: math.tan(calc.toRadians(x))
, description="%(key)s: trigonometric tangent"
, needCalc=True
, synopsis='#{x}, ... => tan(#{x}), ...'
, summary="""
The value in the #{x} register is replaced with its tangent.
"""
)
tangent.addTest(
stimulus='45 tan'
, result=1
, units=''
, text='1'
)
tangent.addTest(
stimulus='rads pi 4/ tan'
, result=1
, units=''
, text='1'
)
tangent.addTest(
stimulus='degs -45 tan'
, result=-1
, units=''
, text='-1'
)
# arc sine {{{3
arcSine = UnaryOp(
'asin'
, lambda x, calc: calc.fromRadians(math.asin(x))
, description="%(key)s: trigonometric arc sine"
, needCalc=True
, units=lambda calc, units: calc.angleUnits()
, synopsis='#{x}, ... => asin(#{x}), ...'
, summary="""
The value in the #{x} register is replaced with its arc sine.
"""
)
arcSine.addTest(
stimulus='1 asin'
, result=90
, units='degs'
, text='90 degs'
)
arcSine.addTest(
stimulus='rads 1 sin asin'
, result=1
, units='rads'
, text='1 rads'
)
arcSine.addTest(
stimulus='degs -1 asin'
, result=-90
, units='degs'
, text='-90 degs'
)
arcSine.addTest(
stimulus='degs 2 asin'
, error='math domain error'
)
# arc cosine {{{3
arcCosine = UnaryOp(
'acos'
, lambda x, calc: calc.fromRadians(math.acos(x))
, description="%(key)s: trigonometric arc cosine"
, needCalc=True
, units=lambda calc, units: calc.angleUnits()
, synopsis='#{x}, ... => acos(#{x}), ...'
, summary="""
The value in the #{x} register is replaced with its arc cosine.
"""
)
arcCosine.addTest(
stimulus='0 acos'
, result=90
, units='degs'
, text='90 degs'
)
arcCosine.addTest(
stimulus='rads 1 acos'
, result=0
, units='rads'
, text='0 rads'
)
arcCosine.addTest(
stimulus='degs 45 cos acos'
, result=45
, units='degs'
, text='45 degs'
)
arcCosine.addTest(
stimulus='degs 2 acos'
, error='math domain error'
)
# arc tangent {{{3
arcTangent = UnaryOp(
'atan'
, lambda x, calc: calc.fromRadians(math.atan(x))
, description="%(key)s: trigonometric arc tangent"
, needCalc=True
, units=lambda calc, units: calc.angleUnits()
, synopsis='#{x}, ... => atan(#{x}), ...'
, summary="""
The value in the #{x} register is replaced with its arc tangent.
"""
)
arcTangent.addTest(
stimulus='0 atan'
, result=0
, units='degs'
, text='0 degs'
)
arcTangent.addTest(
stimulus='rads 0 atan'
, result=0
, units='rads'
, text='0 rads'
)
arcTangent.addTest(
stimulus='degs 45 tan atan'
, result=45
, units='degs'
, text='45 degs'
)
# radians {{{3
setRadiansMode = Command(
'rads'
, Calculator.useRadians
, description="%(key)s: use radians"
, summary="""
Switch the trigonometric mode to radians (functions such as #{sin},
#{cos}, #{tan}, and #{ptor} expect angles to be given in radians;
functions such as #{arg}, #{asin}, #{acos}, #{atan}, #{atan2}, and
#{rtop} should produce angles in radians).
"""
)
# degrees {{{3
setDegreesMode = Command(
'degs'
, Calculator.useDegrees
, description="%(key)s: use degrees"
, summary="""
Switch the trigonometric mode to degrees (functions such as #{sin},
#{cos}, #{tan}, and #{ptor} expect angles to be given in degrees;
functions such as #{arg}, #{asin}, #{acos}, #{atan}, #{atan2}, and
#{rtop} should produce angles in degrees).
"""
)
# Complex and Vector Functions {{{2
complexAndVectorFunctions = Category("Complex and Vector Functions")
# absolute value {{{3
# Absolute Value of a complex number.
# Also known as the magnitude, amplitude, or modulus
absoluteValue = Dup(
'abs'
, lambda x: abs(x)
, description="%(key)s: magnitude"
, units=lambda calc, units: units[0]
, synopsis='#{x}, ... => abs(#{x}), ...'
, summary="""
The absolute value of the number in the #{x} register is pushed onto the
stack if it is real. If the value is complex, the magnitude is pushed
onto the stack.
Unlike most other functions, this one does not replace the value of its
argument on the stack. Its value is simply pushed onto the stack without
first popping off the argument.
"""
, aliases=['mag']
)
absoluteValue.addTest(
stimulus='-1 abs'
, result=1
, units=''
, text='1'
)
absoluteValue.addTest(
stimulus='-1MHz abs'
, result=1e6
, units='Hz'
, text='1 MHz'
)
absoluteValue.addTest(
stimulus='j chs mag'
, result=1
, units=''
, text='1'
)
absoluteValue.addTest(
stimulus='1 j + "V" mag pop'
, result=1+1j
, units='V'
, text='1 V + j V'
)
# argument {{{3
# Argument of a complex number, also known as the phase , or angle
argument = Dup(
'arg'
, lambda x, calc: choose(
calc.fromRadians(math.atan2(x.imag,x.real))
, type(x) == complex
, 0
)
, description="%(key)s: phase"
, needCalc=True
, units=lambda calc, units: calc.angleUnits()
, synopsis='#{x}, ... => arg(#{x}), ...'
, summary="""
The argument of the number in the #{x} register is pushed onto the
stack if it is complex. If the value is real, zero is pushed
onto the stack.
"""
, aliases=['ph']
)
argument.addTest(
stimulus='1 j + arg'
, result=45
, units='degs'
, text='45 degs'
)
argument.addTest(
stimulus='rads 1 j - ph'
, result=-math.pi/4
, units='rads'
, text='-785.4 mrads'
)
argument.addTest(
stimulus='1 j + "V" ph pop'
, result=1+1j
, units='V'
, text='1 V + j V'
)
argument.addTest(
stimulus='1 j + "m/s" arg pop'
, result=1+1j
, units='m/s'
, text='1 m/s + j m/s'
)
# hypotenuse {{{3
hypotenuse = BinaryOp(
'hypot'
, math.hypot
# keep units of x if they are the same as units of y
, units=lambda calc, units: choose(units[0], units[0] == units[1], '')
, description="%(key)s: hypotenuse"
, synopsis='#{x}, #{y}, ... => sqrt(#{x}**2+#{y}**2), ...'
, summary="""
The values in the #{x} and #{y} registers are popped from the stack and
replaced with the length of the vector from the origin to the point
(#{x}, #{y}).
"""
, aliases=['len']
)
hypotenuse.addTest(
stimulus='3 4 hypot'
, result=5
, units=''
, text='5'
)
hypotenuse.addTest(
stimulus='3mm 4mm len'
, result=5e-3
, units='m'
, text='5 mm'
)
# arc tangent 2 {{{3
arcTangent2 = BinaryOp(
'atan2'
, lambda y, x, calc: calc.fromRadians(math.atan2(y, x))
, description="%(key)s: two-argument arc tangent"
, needCalc=True
, units=lambda calc, units: calc.angleUnits()
, synopsis='#{x}, #{y}, ... => atan2(#{y},#{x}), ...'
, summary="""
The values in the #{x} and #{y} registers are popped from the stack and
replaced with the angle of the vector from the origin to the point.
"""
, aliases=['angle']
)
arcTangent2.addTest(
stimulus='3 3 atan2'
, result=45
, units='degs'
, text='45 degs'
)
arcTangent2.addTest(
stimulus='rads -3 3 angle'
, result=-math.pi/4
, units='rads'
, text='-785.4 mrads'
)
arcTangent2.addTest(
stimulus='-3 -3 atan2'
, result=-135
, units='degs'
, text='-135 degs'
)
arcTangent2.addTest(
stimulus='3 -3 angle'
, result=135
, units='degs'
, text='135 degs'
)
arcTangent2.addTest(
stimulus='rads 0 0 atan2'
, result=0
, units='rads'
, text='0 rads'
)
# rectangular to polar {{{3
rectangularToPolar = BinaryIoOp(
'rtop'
, lambda y, x, calc: (math.hypot(y, x), calc.fromRadians(math.atan2(y,x)))
# keep units of x if they are the same as units of y
, xUnits=lambda calc, units: choose(units[0], units[0] == units[1], '')
, yUnits=lambda calc, units: calc.angleUnits()
, description="%(key)s: convert rectangular to polar coordinates"
, needCalc=True
, synopsis='#{x}, #{y}, ... => sqrt(#{x}**2+#{y}**2), atan2(#{y},#{x}), ...'
, summary="""
The values in the #{x} and #{y} registers are popped from the stack and
replaced with the length of the vector from the origin to the point
(#{x}, #{y}) and with the angle of the vector from the origin to the
point (#{x}, #{y}).
"""
)
rectangularToPolar.addTest(
stimulus='3 4 rtop'
, result=5
, units=''
, text='5'
)
rectangularToPolar.addTest(
stimulus='3kOhm -4kOhm rtop'
, result=5e3
, units='Ohm'
, text='5 KOhm'
)
rectangularToPolar.addTest(
stimulus='4MOhm 4MOhm rtop swap'
, result=45
, units='degs'
, text='45 degs'
)
rectangularToPolar.addTest(
stimulus='rads 4MOhm 4MOhm rtop swap'
, result=math.pi/4
, units='rads'
, text='785.4 mrads'
)
# polar to rectangular {{{3
polarToRectangular = BinaryIoOp(
'ptor'
, lambda ph, mag, calc: (
mag*math.cos(calc.toRadians(ph))
, mag*math.sin(calc.toRadians(ph))
)
, description="%(key)s: convert polar to rectangular coordinates"
, needCalc=True
, xUnits=lambda calc, units: units[0]
, yUnits=lambda calc, units: units[0]
, synopsis='#{x}, #{y}, ... => #{x}*cos(#{y}), #{x}*sin(#{y}), ...'
, summary="""
The values in the #{x} and #{y} registers are popped from the stack and
interpreted as the length and angle of a vector and are replaced with
the coordinates of the end-point of that vector.
"""
)
polarToRectangular.addTest(
stimulus='45 2 sqrt "V" ptor'
, result=1
, units='V'
, text='1 V'
)
polarToRectangular.addTest(
stimulus='45 2 sqrt "V" ptor swap'
, result=1
, units='V'
, text='1 V'
)
polarToRectangular.addTest(
stimulus='rads pi 4/ 2 sqrt "V" ptor'
, result=1
, units='V'
, text='1 V'
)
polarToRectangular.addTest(
stimulus='rads pi 4/ 2 sqrt "V" ptor swap'
, result=1
, units='V'
, text='1 V'
)
# Hyperbolic Functions {{{2
hyperbolicFunctions = Category("Hyperbolic Functions")
# hyperbolic sine {{{3
hyperbolicSine = UnaryOp(
'sinh'
, math.sinh
, description="%(key)s: hyperbolic sine"
, synopsis='#{x}, ... => sinh(#{x}), ...'
, summary="""
The value in the #{x} register is replaced with its hyperbolic sine.
"""
)
hyperbolicSine.addTest(
stimulus='1 sinh'
, result=math.sinh(1)
, units=''
, text='1.1752'
)
# hyperbolic cosine {{{3
hyperbolicCosine = UnaryOp(
'cosh'
, math.cosh
, description="%(key)s: hyperbolic cosine"
, synopsis='#{x}, ... => cosh(#{x}), ...'
, summary="""
The value in the #{x} register is replaced with its hyperbolic cosine.
"""
)
hyperbolicCosine.addTest(
stimulus='1 cosh'
, result=math.cosh(1)
, units=''
, text='1.5431'
)
# hyperbolic tangent {{{3
hyperbolicTangent = UnaryOp(
'tanh'
, math.tanh
, description="%(key)s: hyperbolic tangent"
, synopsis='#{x}, ... => tanh(#{x}), ...'
, summary="""
The value in the #{x} register is replaced with its hyperbolic tangent.
"""
)
hyperbolicTangent.addTest(
stimulus='1 tanh'
, result=math.tanh(1)
, units=''
, text='761.59m'
)
# hyperbolic arc sine {{{3
try:
hyperbolicArcSine = UnaryOp(
'asinh'
, math.asinh
, description="%(key)s: hyperbolic arc sine"
, synopsis='#{x}, ... => asinh(#{x}), ...'
, summary="""
The value in the #{x} register is replaced with its hyperbolic arc sine.
"""
)
hyperbolicArcSine.addTest(
stimulus='1 sinh asinh'
, result=1
, units=''
, text='1'
)
except AttributeError:
hyperbolicArcSine = None
# hyperbolic arc cosine {{{3
try:
hyperbolicArcCosine = UnaryOp(
'acosh'
, math.acosh
, description="%(key)s: hyperbolic arc cosine"
, synopsis='#{x}, ... => acosh(#{x}), ...'
, summary="""
The value in the #{x} register is replaced with its hyperbolic arc
cosine.
"""
)
hyperbolicArcCosine.addTest(
stimulus='1 cosh acosh'
, result=1
, units=''
, text='1'
)
except AttributeError:
hyperbolicArcCosine = None
# hyperbolic arc tangent {{{3
try:
hyperbolicArcTangent = UnaryOp(
'atanh'
, math.atanh
, description="%(key)s: hyperbolic arc tangent"
, synopsis='#{x}, ... => atanh(#{x}), ...'
, summary="""
The value in the #{x} register is replaced with its hyperbolic arc
tangent.
"""
)
hyperbolicArcTangent.addTest(
stimulus='1 tanh atanh'
, result=1
, units=''
, text='1'
)
except AttributeError:
hyperbolicArcTangent = None
# Decibel Functions {{{2
decibelFunctions = Category("Decibel Functions")
# voltage or current to decibels {{{3
decibels20 = UnaryOp(
'db'
, lambda x: 20*math.log10(x)
, description="%(key)s: convert voltage or current to dB"
, synopsis='#{x}, ... => 20*log(#{x}), ...'
, summary="""
The value in the #{x} register is replaced with its value in
decibels. It is appropriate to apply this form when
converting voltage or current to decibels.
"""
, aliases=['db20', 'v2db', 'i2db']
)
decibels20.addTest(
stimulus='100 db'
, result=40
, units=''
, text='40'
)
decibels20.addTest(
stimulus='10m db20'
, result=-40
, units=''
, text='-40'
)
decibels20.addTest(
stimulus='1000 v2db'
, result=60
, units=''
, text='60'
)
decibels20.addTest(
stimulus='1m i2db'
, result=-60
, units=''
, text='-60'
)
# decibels to voltage or current {{{3
antiDecibels20 = UnaryOp(
'adb'
, lambda x: 10**(x/20)
, description="%(key)s: convert dB to voltage or current"
, synopsis='#{x}, ... => 10**(#{x}/20), ...'
, summary="""
The value in the #{x} register is converted from decibels and that value
is placed back into the #{x} register. It is appropriate to apply this
form when converting decibels to voltage or current.
"""
, aliases=['db2v', 'db2i']
)
antiDecibels20.addTest(
stimulus='40 adb'
, result=100
, units=''
, text='100'
)
antiDecibels20.addTest(
stimulus='40 db2v'
, result=100
, units=''
, text='100'
)
antiDecibels20.addTest(
stimulus='40 db2i'
, result=100
, units=''
, text='100'
)
# power to decibels {{{3
decibels10 = UnaryOp(
'db10'
, lambda x: 10*math.log10(x)
, description="%(key)s: convert power to dB"
, synopsis='#{x}, ... => 10*log(#{x}), ...'
, summary="""
The value in the #{x} register is converted from decibels and that
value is placed back into the #{x} register. It is appropriate to
apply this form when converting power to decibels.
"""
, aliases=['p2db']
)
decibels10.addTest(
stimulus='100 db10'
, result=20
, units=''
, text='20'
)
decibels10.addTest(
stimulus='100 p2db'
, result=20
, units=''
, text='20'
)
# decibels to power {{{3
antiDecibels10 = UnaryOp(
'adb10'
, lambda x: 10**(x/10)
, description="%(key)s: convert dB to power"
, synopsis='#{x}, ... => 10**(#{x}/10), ...'
, summary="""
The value in the #{x} register is converted from decibels and that value
is placed back into the #{x} register. It is appropriate to apply this
form when converting decibels to voltage or current.
"""
, aliases=['db2p']
)
antiDecibels10.addTest(
stimulus='20 adb10'
, result=100
, units=''
, text='100'
)
antiDecibels10.addTest(
stimulus='20 db2p'
, result=100
, units=''
, text='100'
)
# voltage to dBm {{{3
voltageToDbm = UnaryOp(
'vdbm'
, lambda x, calc: 30+10*math.log10(x*x/calc.heap['Rref'][0]/2)
, description="%(key)s: convert peak voltage to dBm"
, needCalc=True
, synopsis='#{x}, ... => 30+10*log10((#{x}**2)/(2*#{Rref})), ...'
, summary="""
The value in the #{x} register is expected to be the peak voltage of a
sinusoid that is driving a load resistor equal to #{Rref} (a predefined
variable). It is replaced with the power delivered to the resistor in
decibels relative to 1 milliwatt.
"""
, aliases=['v2dbm']
)
voltageToDbm.addTest(
stimulus='1 vdbm'
, result=10
, units=''
, text='10'
)
voltageToDbm.addTest(
stimulus='0.1 v2dbm'
, result=-10
, units=''
, text='-10'
)
voltageToDbm.addTest(
stimulus='5 "Ohms" =Rref 1 vdbm'
, result=20
, units=''
, text='20'
)
# dBm to voltage {{{3
dbmToVoltage = UnaryOp(
'dbmv'
, lambda x, calc: math.sqrt(2*pow(10,(x - 30)/10)*calc.heap['Rref'][0])
, description="%(key)s: dBm to peak voltage"
, needCalc=True
, units='V'
, synopsis='#{x}, ... => sqrt(2*10**(#{x} - 30)/10)*#{Rref}), ...'
, summary="""
The value in the #{x} register is expected to be a power in decibels
relative to one milliwatt. It is replaced with the peak voltage of a
sinusoid that would be needed to deliver the same power to a load
resistor equal to #{Rref} (a predefined variable).
"""
, aliases=['dbm2v']
)
dbmToVoltage.addTest(
stimulus='10 dbmv'
, result=1
, units='V'
, text='1 V'
)
dbmToVoltage.addTest(
stimulus='-10 dbmv'
, result=0.1
, units='V'
, text='100 mV'
)
dbmToVoltage.addTest(
stimulus='5 "Ohms" =Rref 20 dbmv'
, result=1
, units='V'
, text='1 V'
)
# current to dBm {{{3
currentToDbm = UnaryOp(
'idbm'
, lambda x, calc: 30+10*math.log10(x*x*calc.heap['Rref'][0]/2)
, description="%(key)s: peak current to dBm"
, needCalc=True
, synopsis='#{x}, ... => 30+10*log10(((#{x}**2)*#{Rref}/2), ...'
, summary="""
The value in the #{x} register is expected to be the peak current of a
sinusoid that is driving a load resistor equal to #{Rref} (a predefined
variable). It is replaced with the power delivered to the resistor in
decibels relative to 1 milliwatt.
"""
, aliases=['i2dbm']
)
currentToDbm.addTest(
stimulus='2mA idbm'
, result=-10
, units=''
, text='-10'
)
currentToDbm.addTest(
stimulus='20uA i2dbm'
, result=-50
, units=''
, text='-50'
)
currentToDbm.addTest(
stimulus='5 "Ohms" =Rref 20uA idbm'
, result=-60
, units=''
, text='-60'
)
# dBm to current {{{3
dbmToCurrent = UnaryOp(
'dbmi'
, lambda x, calc: math.sqrt(2*pow(10,(x - 30)/10)/calc.heap['Rref'][0])
, description="%(key)s: dBm to peak current"
, needCalc=True
, units='A'
, synopsis='#{x}, ... => sqrt(2*10**(#{x} - 30)/10)/#{Rref}), ...'
, summary="""
The value in the #{x} register is expected to be a power in decibels
relative to one milliwatt. It is replaced with the peak current of a
sinusoid that would be needed to deliver the same power to a load
resistor equal to #{Rref} (a predefined variable).
"""
, aliases=['dbm2i']
)
dbmToCurrent.addTest(
stimulus='10 dbmi'
, result=20e-3
, units='A'
, text='20 mA'
)
dbmToCurrent.addTest(
stimulus='-10 dbmi'
, result=2e-3
, units='A'
, text='2 mA'
)
dbmToCurrent.addTest(
stimulus='5 "Ohms" =Rref -20 dbmi'
, result=2e-3
, units='A'
, text='2 mA'
)
# Constants {{{2
constants = Category("Constants")
# pi {{{3
pi = Constant(
'pi'
, lambda: math.pi
, description="%(key)s: the ratio of a circle's circumference to its diameter"
, units='rads'
, synopsis='... => #{pi}, ...'
, summary="""
The value of pi (3.141592...) is pushed on the stack into the #{x}
register.
"""
)
pi.addTest(
stimulus='pi'
, result=math.pi
, units='rads'
, text='3.1416 rads'
)
# 2 pi {{{3
twoPi = Constant(
'2pi'
, lambda: 2*math.pi
, description="%(key)s: the ratio of a circle's circumference to its radius"
, units='rads'
, synopsis='... => 2*#{pi}, ...'
, summary="""
Two times the value of pi (6.283185...) is pushed on the stack into the
#{x} register.
"""
)
twoPi.addTest(
stimulus='2pi'
, result=2*math.pi
, units='rads'
, text='6.2832 rads'
)
# sqrt 2 {{{3
squareRoot2 = Constant(
'rt2'
, lambda: math.sqrt(2)
, description="%(key)s: square root of two"
, synopsis='... => sqrt(2), ...'
, summary="""
The square root of two (1.4142...) is pushed on the stack into the #{x}
register.
"""
)
squareRoot2.addTest(
stimulus='rt2'
, result=math.sqrt(2)
, units=''
, text='1.4142'
)
# j {{{3
imaginaryUnit = Constant(
'j'
, lambda: 1j
, description="%(key)s: imaginary unit (square root of -1)"
, synopsis='... => #{j}, ...'
, summary="""
The imaginary unit (square root of -1) is pushed on the stack into
the #{x} register.
"""
)
imaginaryUnit.addTest(
stimulus='j'
, result=1j
, units=''
, text='j'
)
# j2pi {{{3
imaginaryTwoPi = Constant(
'j2pi'
, lambda: 2j*math.pi
, description="%(key)s: j*2*pi"
, units='rads'
, synopsis='... => #{j}*2*#{pi}, ...'
, summary="""
2 pi times the imaginary unit (j6.283185...) is pushed on the stack into
the #{x} register.
"""
)
imaginaryTwoPi.addTest(
stimulus='j2pi'
, result=2j*math.pi
, units='rads'
, text='j6.2832 rads'
)
# planck constant {{{3
planckConstant = Constant(
'h'
, lambda: 6.62606957e-34
, description="%(key)s: Planck constant"
, units='J-s'
, synopsis='... => #{h}, ...'
, summary="""
The Planck constant (6.62606957e-34 J-s) is pushed on the stack into
the #{x} register.
"""
)
planckConstant.addTest(
stimulus='h'
, result=6.62606957e-34
, units='J-s'
, text='662.61e-36 J-s'
)
# reduced plank constant {{{3
planckConstantReduced = Constant(
'hbar'
, lambda: 1.054571726e-34
, description="%(key)s: Reduced Planck constant"
, units='J-s'
, synopsis='... => #{h}/(2*#{pi}), ...'
, summary="""
The reduced Planck constant (1.054571726e-34 J-s) is pushed on the stack
into the #{x} register.
"""
)
planckConstantReduced.addTest(
stimulus='hbar'
, result=1.054571726e-34
, units='J-s'
, text='105.46e-36 J-s'
)
# planck length {{{3
planckLength = Constant(
'lP'
, lambda: 1.616199e-35
, description="%(key)s: Planck length"
, units='m'
, synopsis='... => #{lP}, ...'
, summary="""
The Planck length (sqrt(h*G/(2*pi*c**3)) or 1.616199e-35 m) is pushed on
the stack into the #{x} register.
"""
)
planckLength.addTest(
stimulus='lP'
, result=1.616199e-35
, units='m'
, text='16.162e-36 m'
)
# planck mass {{{3
planckMass = Constant(
'mP'
, lambda: 2.17651e-5
, description="%(key)s: Planck mass"
, units='g'
, synopsis='... => #{mP}, ...'
, summary="""
The Planck mass (sqrt(h*c/(2*pi*G)) or 2.17651e-5 g) is pushed on
the stack into the #{x} register.
"""
)
planckMass.addTest(
stimulus='mP'
, result=2.17651e-5
, units='g'
, text='21.765 ug'
)
# planck temperature {{{3
planckTemperature = Constant(
'TP'
, lambda: 1.416833e32
, description="%(key)s: Planck temperature"
, units='K'
, synopsis='... => #{TP}, ...'
, summary="""
The Planck temperature (mP*c**2/k or 1.416833e32 K) is pushed
on the stack into the #{x} register.
"""
)
planckTemperature.addTest(
stimulus='TP'
, result=1.416833e32
, units='K'
, text='141.68e30 K'
)
# planck time {{{3
planckTime = Constant(
'tP'
, lambda: 5.39106e-44
, description="%(key)s: Planck time"
, units='s'
, synopsis='... => #{tP}, ...'
, summary="""
The Planck time (sqrt(h*G/(2*pi*c**5)) or 5.39106e-44 s) is pushed on
the stack into the #{x} register.
"""
)
planckTime.addTest(
stimulus='tP'
, result=5.39106e-44
, units='s'
, text='53.911e-45 s'
)
# boltzmann constant {{{3
boltzmann = Constant(
'k'
, lambda: 1.3806488e-23
, description="%(key)s: Boltzmann constant"
, units='J/K'
, synopsis='... => #{k}, ...'
, summary="""
The Boltzmann constant (R/NA) or 1.3806488e-23 J/K) is pushed on the
stack into the #{x} register.
"""
)
boltzmann.addTest(
stimulus='k'
, result=1.3806488e-23
, units='J/K'
, text='13.806e-24 J/K'
)
# elementary charge {{{3
elementaryCharge = Constant(
'q'
, lambda: 1.602176565e-19
, description="%(key)s: elementary charge (the charge of an electron)"
, units='C'
, synopsis='... => #{q}, ...'
, summary="""
The elementary charge (the charge of an electron or 1.602176565e-19 C)
is pushed on the stack into the #{x} register.
"""
)
elementaryCharge.addTest(
stimulus='q'
, result=1.602176565e-19
, units='C'
, text='160.22e-21 C'
)
# mass of electron {{{3
massOfElectron = Constant(
'me'
, lambda: 9.10938291e-28
, description="%(key)s: mass of an electron"
, units='g'
, synopsis='... => #{me}, ...'
, summary="""
The mass of an electron (9.10938291e-28 g) is pushed on the stack into
the #{x} register.
"""
)
massOfElectron.addTest(
stimulus='me'
, result=9.10938291e-28
, units='g'
, text='910.94e-30 g'
)
# mass of proton {{{3
massOfProton = Constant(
'mp'
, lambda: 1.672621777e-24
, description="%(key)s: mass of a proton"
, units='g'
, synopsis='... => #{mp}, ...'
, summary="""
The mass of a proton (1.672621777e-24 g) is pushed on the stack into
the #{x} register.
"""
)
massOfProton.addTest(
stimulus='mp'
, result=1.672621777e-24
, units='g'
, text='1.6726e-24 g'
)
# speed of light {{{3
speedOfLight = Constant(
'c'
, lambda: 2.99792458e8
, description="%(key)s: speed of light in a vacuum"
, units='m/s'
, synopsis='... => #{c}, ...'
, summary="""
The speed of light in a vacuum (2.99792458e8 m/s) is pushed on the stack
into the #{x} register.
"""
)
speedOfLight.addTest(
stimulus='c'
, result=2.99792458e8
, units='m/s'
, text='299.79 Mm/s'
)
# gravitational constant {{{3
gravitationalConstant = Constant(
'G'
, lambda: 6.6746e-11
, description="%(key)s: universal gravitational constant"
, units="m^3/(kg-s^2)"
, synopsis='... => #{G}, ...'
, summary="""
The universal gravitational constant (6.6746e-11 m^3/(kg-s^2)) is pushed
on the stack into the #{x} register.
"""
)
gravitationalConstant.addTest(
stimulus='G'
, result=6.6746e-11
, units='m^3/(kg-s^2)'
, text='66.746 pm^3/(kg-s^2)'
)
# acceleration of gravity {{{3
standardAccelerationOfGravity = Constant(
'g'
, lambda: 9.80665
, description="%(key)s: standard acceleration of gravity"
, units='m/s^2'
, synopsis='... => #{g}, ...'
, summary="""
The standard acceleration of gravity on earth (9.80665 m/s^2)) is pushed
on the stack into the #{x} register.
"""
)
standardAccelerationOfGravity.addTest(
stimulus='g'
, result=9.80665
, units='m/s^2'
, text='9.8066 m/s^2'
)
# avogadro constant {{{3
avogadroConstant = Constant(
'NA'
, lambda: 6.02214129e23
, description="%(key)s: Avagadro Number"
, units='/mol'
, synopsis='... => #{NA}, ...'
, summary="""
Avogadro constant (6.02214129e23) is pushed on the stack into the #{x}
register.
"""
)
avogadroConstant.addTest(
stimulus='NA'
, result=6.02214129e23
, units='/mol'
, text='602.21e21 /mol'
)
# gas constant {{{3
molarGasConstant = Constant(
'R'
, lambda: 8.3144621
, description="%(key)s: molar gas constant"
, units='J/(mol-K)'
, synopsis='... => #{R}, ...'
, summary="""
The molar gas constant (8.3144621 J/(mol-K)) is pushed on the stack into
the #{x} register.
"""
)
molarGasConstant.addTest(
stimulus='R'
, result=8.3144621
, units='J/(mol-K)'
, text='8.3145 J/(mol-K)'
)
# zero celsius {{{3
zeroCelsius = Constant(
'0C'
, lambda: 273.15
, description="%(key)s: 0 Celsius in Kelvin"
, units='K'
, synopsis='... => #{0C}, ...'
, summary="""
Zero celsius in kelvin (273.15 K) is pushed on the stack into
the #{x} register.
"""
)
zeroCelsius.addTest(
stimulus='0C'
, result=273.15
, units='K'
, text='273.15 K'
)
# free space permittivity {{{3
freeSpacePermittivity = Constant(
'eps0'
, lambda: 8.854187817e-12
, description="%(key)s: permittivity of free space"
, units='F/m'
, synopsis='... => #{eps0}, ...'
, summary="""
The permittivity of free space (8.854187817e-12 F/m) is pushed on the
stack into the #{x} register.
"""
)
freeSpacePermittivity.addTest(
stimulus='eps0'
, result=8.854187817e-12
, units='F/m'
, text='8.8542 pF/m'
)
# free space permeability {{{3
freeSpacePermeability = Constant(
'mu0'
, lambda: 4e-7*math.pi
, description="%(key)s: permeability of free space"
, units='N/A^2'
, synopsis='... => #{mu0}, ...'
, summary="""
The permeability of free space (4e-7*pi N/A^2) is pushed on the
stack into the #{x} register.
"""
)
freeSpacePermeability.addTest(
stimulus='mu0'
, result=4e-7*math.pi
, units='N/A^2'
, text='1.2566 uN/A^2'
)
# free space characteristic impedance {{{3
freeSpaceCharacteristicImpedance = Constant(
'Z0'
, lambda: 376.730313461
, description="%(key)s: Characteristic impedance of free space"
, units='Ohms'
, synopsis='... => #{Z0}, ...'
, summary="""
The characteristic impedance of free space (376.730313461 Ohms) is
pushed on the stack into the #{x} register.
"""
)
freeSpaceCharacteristicImpedance.addTest(
stimulus='Z0'
, result=376.730313461
, units='Ohms'
, text='376.73 Ohms'
)
# Numbers {{{2
numbers = Category("Numbers")
# real number in engineering notation {{{3
# accepts numbers both with and without SI scale factors. If an SI scale factor
# is present, then attached trailing units can also be given. It is also
# possible to include commas in the number anywhere a digit can be given. It is
# a little crude in that it allows commas in the mantissa and adjacent to the
# decimal point, but other than that it works reasonably well.
engineeringNumber = Number(
pattern=r'\A(\$?([-+]?([0-9],?)*\.?(,?[,0-9])+)(([YZEPTGMKk_munpfazy])([a-zA-Z_]*))?)\Z'
, action=lambda matches: toNumber(matches[0].replace(',', ''))
, name='engnum'
, description="<#{N}[.#{M}][#{S}[#{U}]]>: a real number"
, synopsis='... => #{num}, ...'
, summary="""
The number is pushed on the stack into the #{x} register. #{N} is the
integer portion of the mantissa and #{M} is an optional fractional part.
#{S} is a letter that represents an SI scale factor. #{U} the optional
units (must not contain special characters). For example, 10MHz
represents 1e7 Hz.
"""
)
engineeringNumber.addTest(
stimulus='1m'
, result=1e-3
, units=''
, text='1m'
)
engineeringNumber.addTest(
stimulus='+10.1n'
, result=10.1e-9
, units=''
, text='10.1n'
)
engineeringNumber.addTest(
stimulus='-1.1GHz'
, result=-1.1e9
, units='Hz'
, text='-1.1 GHz'
)
engineeringNumber.addTest(
stimulus='$100k'
, result=1e5
, units='$'
, text='$100K'
)
engineeringNumber.addTest(
stimulus='$-20M'
, result=-20e6
, units='$'
, text='$-20M'
)
engineeringNumber.addTest(
stimulus='.2MOhms'
, result=2e5
, units='Ohms'
, text='200 KOhms'
)
engineeringNumber.addTest(
stimulus='1000'
, result=1000.0
, units=''
, text='1K'
)
engineeringNumber.addTest(
stimulus='$1,000,000'
, result=1e6
, units='$'
, text='$1M'
)
engineeringNumber.addTest(
stimulus='$1,000K'
, result=1e6
, units='$'
, text='$1M'
)
engineeringNumber.addTest(
stimulus='$1,000,000.00'
, result=1e6
, units='$'
, text='$1M'
)
engineeringNumber.addTest(
stimulus='1,000.00K'
, result=1e6
, units=''
, text='1M'
)
# real number in scientific notation {{{3
scientificNumber = Number(
pattern=r'\A(\$?)([-+]?[0-9]*\.?[0-9]+[eE][-+]?[0-9]+)([a-zA-Z_]*)\Z'
, action=lambda matches: (float(matches[1]), choose(matches[2], matches[2], matches[0]))
, name='scinum'
, description="<#{N}[.#{M}]>e<#{E}[#{U}]>: a real number in scientific notation"
, synopsis='... => #{num}, ...'
, summary="""
The number is pushed on the stack into the #{x} register. #{N} is the
integer portion of the mantissa and #{M} is an optional fractional part.
#{E} is an integer exponent. #{U} the optional units (must not contain
special characters). For example, 2.2e-8F represents 22nF.
"""
)
scientificNumber.addTest(
stimulus='20.0e12'
, result=20e12
, units=''
, text='20T'
)
scientificNumber.addTest(
stimulus='+2.0e+9'
, result=2e9
, units=''
, text='2G'
)
scientificNumber.addTest(
stimulus='-5.0e-9'
, result=-5e-9
, units=''
, text='-5n'
)
scientificNumber.addTest(
stimulus='.5e-12F'
, result=5e-13
, units='F'
, text='500 fF'
)
scientificNumber.addTest(
stimulus='$500e6'
, result=5e8
, units='$'
, text='$500M'
)
scientificNumber.addTest(
stimulus='$+20e+03'
, result=2e4
, units='$'
, text='$20K'
)
scientificNumber.addTest(
stimulus='$-2.0e-3'
, result=-2e-3
, units='$'
, text='$-2m'
)
# hexadecimal number {{{3
hexadecimalNumber = Number(
pattern=r"\A([-+]?)0[xX]([0-9a-fA-F]+)\Z"
, action=lambda matches: (int(matches[0]+matches[1], base=16), '')
, name='hexnum'
, description="0x<#{N}>: a hexadecimal number"
, synopsis='... => #{num}, ...'
, summary="""
The number is pushed on the stack into the #{x} register. #{N} is an
integer in base 16 (use a-f to represent digits greater than 9). For
example, 0xFF represents the hexadecimal number FF or the decimal number
255.
"""
)
hexadecimalNumber.addTest(
stimulus='0x1f 0xAC + hex'
, result=203
, units=''
, text='0x00cb'
)
# octal number {{{3
# oct must be before eng if we use the 0NNN form (as opposed to OoNNN form)
octalNumber = Number(
pattern=r"\A([-+]?)0[oO]([0-7]+)\Z"
, action=lambda matches: (int(matches[0]+matches[1], base=8), '')
, name='octnum'
, description="0o<#{N}>: a number in octal"
, synopsis='... => #{num}, ...'
, summary="""
The number is pushed on the stack into the #{x} register. #{N} is an
integer in base 8 (it must not contain the digits 8 or 9). For example,
0o77 represents the octal number 77 or the decimal number 63.
"""
)
octalNumber.addTest(
stimulus='0o77 0o33 + oct'
, result=90
, units=''
, text='0o0132'
)
# binary number {{{3
binaryNumber = Number(
pattern=r"\A([-+]?)0[bB]([01]+)\Z"
, action=lambda matches: (int(matches[0]+matches[1], base=2), '')
, name='binnum'
, description="0b<#{N}>: a number in binary"
, synopsis='... => #{num}, ...'
, summary="""
The number is pushed on the stack into the #{x} register. #{N} is an
integer in base 2 (it may contain only the digits 0 or 1). For example,
0b1111 represents the octal number 1111 or the decimal number 15.
"""
)
binaryNumber.addTest(
stimulus='0b1111 0b0001 +'
, result=16
, units=''
, text='16'
)
# hexadecimal number in verilog notation {{{3
# Verilog constants are incompatible with generalized units because the
# single quote in the Verilog constant conflicts with the single quotes that
# surround generalized units (ex: 6.28e6 'rads/s').
# Is okay now, I switched the quote characters to free up single quotes.
verilogHexadecimalNumber = Number(
pattern=r"\A([-+]?)'[hH]([0-9a-fA-F_]*[0-9a-fA-F])\Z"
, action=lambda matches: (int(matches[0]+matches[1].replace('_',''), base=16), '')
, name='vhexnum'
, description="'h<#{N}>: a number in Verilog hexadecimal notation"
, synopsis='... => #{num}, ...'
, summary="""
The number is pushed on the stack into the #{x} register. #{N} is an
integer in base 16 (use a-f to represent digits greater than 9). For
example, 'hFF represents the hexadecimal number FF or the decimal number
255.
"""
)
verilogHexadecimalNumber.addTest(
stimulus="'h1f 'hAC + vhex"
, result=203
, units=''
, text="'h00cb"
)
# decimal number in verilog notation {{{3
verilogDecimalNumber = Number(
pattern=r"\A([-+]?)'[dD]([0-9_]*[0-9]+)\Z"
, action=lambda matches: (int(matches[0]+matches[1].replace('_',''), base=10), '')
, name='vdecnum'
, description="'d<#{N}>: a number in Verilog decimal"
, synopsis='... => #{num}, ...'
, summary="""
The number is pushed on the stack into the #{x} register. #{N} is an
integer in base 10. For example, 'd99 represents the decimal number 99.
"""
)
verilogDecimalNumber.addTest(
stimulus="'d99 'd01 + vdec"
, result=100
, units=''
, text="'d0100"
)
# octal number in verilog notation {{{3
verilogOctalNumber = Number(
pattern=r"\A([-+]?)'[oO]([0-7_]*[0-7]+)\Z"
, action=lambda matches: (int(matches[0]+matches[1].replace('_',''), base=8), '')
, name='voctnum'
, description="'o<#{N}>: a number in Verilog octal"
, synopsis='... => #{num}, ...'
, summary="""
The number is pushed on the stack into the #{x} register. #{N} is an
integer in base 8 (it must not contain the digits 8 or 9). For example,
'o77 represents the octal number 77 or the decimal number 63.
"""
)
verilogOctalNumber.addTest(
stimulus="'o77 'o33 + voct"
, result=90
, units=''
, text="'o0132"
)
# binary number in verilog notation {{{3
verilogBinaryNumber = Number(
pattern=r"\A([-+]?)'[bB]([01_]*[01]+)\Z"
, action=lambda matches: (int(matches[0]+matches[1].replace('_',''), base=2), '')
, name='vbinnum'
, description="'b<#{N}>: a number in Verilog binary"
, synopsis='... => #{num}, ...'
, summary="""
The number is pushed on the stack into the #{x} register. #{N} is an
integer in base 2 (it may contain only the digits 0 or 1). For example,
'b1111 represents the binary number 1111 or the decimal number 15.
"""
)
verilogBinaryNumber.addTest(
stimulus="'b1111 'b0001 +"
, result=16
, units=''
, text="16"
)
# Number Formats {{{2
numberFormats = Category("Number Formats")
# fixed format {{{3
setFixedFormat = SetFormat(
pattern=r'\Afix(\d{1,2})?\Z'
#, action=lambda num, digits: '{0:.{prec}f}'.format(num, prec=digits)
, action=lambda num, digits: '%0.*f' % (digits, num)
, name='fix'
, actionTakesUnits=False
, description="%(name)s[<#{N}>]: use fixed notation"
, summary="""
Numbers are displayed with a fixed number of digits to the right of the
decimal point. If an optional whole number #{N} immediately follows
#{fix}, the number of digits to the right of the decimal point is set to
#{N}.
"""
)
setFixedFormat.addTest(
stimulus="1e6 fix0"
, result=1e6
, units=''
, text="1000000"
)
setFixedFormat.addTest(
stimulus="pi fix"
, result=math.pi
, units='rads'
, text="3.1416 rads"
)
setFixedFormat.addTest(
stimulus="pi fix8"
, result=math.pi
, units='rads'
, text="3.14159265 rads"
)
setFixedFormat.addTest(
stimulus="$100 fix2"
, result=100
, units='$'
, text="$100.00"
)
# engineering format {{{3
setEngineeringFormat = SetFormat(
pattern=r'\Aeng(\d{1,2})?\Z'
, action=lambda num, units, digits: toEngFmt(num, units, prec=digits)
, name='eng'
, actionTakesUnits=True
, description="%(name)s[<#{N}>]: use engineering notation"
, summary="""
Numbers are displayed with a fixed number of digits of precision and the
SI scale factors are used to convey the exponent when possible. If an
optional whole number #{N} immediately follows #{eng}, the precision is
set to #{N} digits.
"""
)
setEngineeringFormat.addTest(
stimulus="pi 1e3 * eng"
, result=1e3*math.pi
, units=''
, text="3.1416K"
)
setEngineeringFormat.addTest(
stimulus='pi 1e3 * "rads" eng8'
, result=1e3*math.pi
, units='rads'
, text="3.14159265 Krads"
)
# scientific format {{{3
setScientificFormat = SetFormat(
pattern=r'\Asci(\d{1,2})?\Z'
#, action=lambda num, digits: '{0:.{prec}e}'.format(num, prec=digits)
, action=lambda num, digits: '%.*e' % (digits, num)
, name='sci'
, actionTakesUnits=False
, description="%(name)s[<#{N}>]: use scientific notation"
, summary="""
Numbers are displayed with a fixed number of digits of precision and the
exponent is given explicitly as an integer. If an optional whole number
#{N} immediately follows #{sci}, the precision is set to #{N} digits.
"""
)
setScientificFormat.addTest(
stimulus="pi 1e3 * sci"
, result=1e3*math.pi
, units=''
, text="3.1416e+03"
)
setScientificFormat.addTest(
stimulus='pi 1e3 * "rads" sci8'
, result=1e3*math.pi
, units='rads'
, text="3.14159265e+03 rads"
)
setScientificFormat.addTest(
stimulus='1e-10 sci8'
, result=1e-10
, units=''
, text="1.00000000e-10"
)
setScientificFormat.addTest(
stimulus='$100 sci0'
, result=100
, units='$'
, text="$1e+02"
)
# hexadecimal format {{{3
setHexadecimalFormat = SetFormat(
pattern=r'\Ahex(\d{1,2})?\Z'
#, action=lambda num, units, digits: '{0:#0{width}x}'.format(int(round(num)), width=digits+2)
, action=lambda num, units, digits: '0x%0*x' % (digits, int(round(num)))
, name='hex'
, actionTakesUnits=True
, description="%(name)s[<#{N}>]: use hexadecimal notation"
, summary="""
Numbers are displayed in base 16 (a-f are used to represent digits
greater than 9) with a fixed number of digits. If an optional whole
number #{N} immediately follows #{hex}, the number of digits displayed
is set to #{N}.
"""
)
setHexadecimalFormat.addTest(
stimulus="0xFF hex"
, result=0xFF
, units=''
, text="0x00ff"
)
setHexadecimalFormat.addTest(
stimulus="0xBEEF hex0"
, result=0xBEEF
, units=''
, text="0xbeef"
)
setHexadecimalFormat.addTest(
stimulus="0xDeadBeef hex8"
, result=0xDeadBeef
, units=''
, text="0xdeadbeef"
)
# octal format {{{3
setOctalFormat = SetFormat(
pattern=r'\Aoct(\d{1,2})?\Z'
#, action=lambda num, units, digits: '{0:#0{width}o}'.format(int(round(num)), width=digits+2)
, action=lambda num, units, digits: '0o%0*o' % (digits, int(round(num)))
, name='oct'
, actionTakesUnits=True
, description="%(name)s[<#{N}>]: use octal notation"
, summary="""
Numbers are displayed in base 8 with a fixed number of digits. If an
optional whole number #{N} immediately follows #{oct}, the number of
digits displayed is set to #{N}.
"""
)
setOctalFormat.addTest(
stimulus="0o777 oct"
, result=0777
, units=''
, text="0o0777"
)
setOctalFormat.addTest(
stimulus="0o77 oct0"
, result=077
, units=''
, text="0o77"
)
setOctalFormat.addTest(
stimulus="0o76543210 oct8"
, result=076543210
, units=''
, text="0o76543210"
)
# binary format {{{3
try:
# must use format() because % operator does not support binary formats
# check to see if format() is supported by this version of python
junk = '{0:b}'.format(0)
setBinaryFormat = SetFormat(
pattern=r'\Abin(\d{1,2})?\Z'
, action=lambda num, units, digits: '{0:#0{width}b}'.format(int(round(num)), width=digits+2)
, name='bin'
, actionTakesUnits=True
, description="%(name)s[<#{N}>]: use binary notation"
, summary="""
Numbers are displayed in base 2 with a fixed number of digits. If
an optional whole number #{N} immediately follows #{bin}, the
number of digits displayed is set to #{N}.
"""
)
setBinaryFormat.addTest(
stimulus="0b11 bin"
, result=3
, units=''
, text="0b0011"
)
setBinaryFormat.addTest(
stimulus="0b11 bin0"
, result=3
, units=''
, text="0b11"
)
setBinaryFormat.addTest(
stimulus="0b10011001 bin8"
, result=153
, units=''
, text="0b10011001"
)
except AttributeError:
setBinaryFormat = None
# verilog hexadecimal format {{{3
setVerilogHexadecimalFormat = SetFormat(
pattern=r'\Avhex(\d{1,2})?\Z'
#, action=lambda num, units, digits: "'h{0:0{width}x}".format(int(round(num)), width=digits)
, action=lambda num, units, digits: "'h%0*x" % (digits, int(round(num)))
, name='vhex'
, actionTakesUnits=True
, description="%(name)s[<#{N}>]: use Verilog hexadecimal notation"
, summary="""
Numbers are displayed in base 16 in Verilog format (a-f are used to
represent digits greater than 9) with a fixed number of digits. If an
optional whole number #{N} immediately follows #{vhex}, the number of
digits displayed is set to #{N}.
"""
)
setVerilogHexadecimalFormat.addTest(
stimulus="'hFF vhex"
, result=0xFF
, units=''
, text="'h00ff"
)
setVerilogHexadecimalFormat.addTest(
stimulus="'hBEEF vhex0"
, result=0xBEEF
, units=''
, text="'hbeef"
)
setVerilogHexadecimalFormat.addTest(
stimulus="'hDeadBeef vhex8"
, result=0xDeadBeef
, units=''
, text="'hdeadbeef"
)
# verilog decimal format {{{3
setVerilogDecimalFormat = SetFormat(
pattern=r'\Avdec(\d{1,2})?\Z'
#, action=lambda num, units, digits: "'d{0:0{width}d}".format(int(round(num)), width=digits)
, action=lambda num, units, digits: "'d%0*d" % (digits, int(round(num)))
, name='vdec'
, actionTakesUnits=True
, description="%(name)s[<#{N}>]: use Verilog decimal notation"
, summary="""
Numbers are displayed in base 10 in Verilog format with a fixed number
of digits. If an optional whole number #{N} immediately follows
#{vdec}, the number of digits displayed is set to #{N}.
"""
)
setVerilogDecimalFormat.addTest(
stimulus="'d99 vdec"
, result=99
, units=''
, text="'d0099"
)
setVerilogDecimalFormat.addTest(
stimulus="'d0 vdec0"
, result=0
, units=''
, text="'d0"
)
setVerilogDecimalFormat.addTest(
stimulus="'d9876543210 vdec10"
, result=9876543210
, units=''
, text="'d9876543210"
)
# verilog octal format {{{3
setVerilogOctalFormat = SetFormat(
pattern=r'\Avoct(\d{1,2})?\Z'
#, action=lambda num, units, digits: "'o{0:0{width}o}".format(int(round(num)), width=digits)
, action=lambda num, units, digits: "'o%0*o" % (digits, int(round(num)))
, name='voct'
, actionTakesUnits=True
, description="%(name)s[<#{N}>]: use Verilog octal notation"
, summary="""
Numbers are displayed in base 8 in Verilog format with a fixed number of
digits. If an optional whole number #{N} immediately follows #{voct},
the number of digits displayed is set to #{N}.
"""
)
setVerilogOctalFormat.addTest(
stimulus="'o777 voct"
, result=0777
, units=''
, text="'o0777"
)
setVerilogOctalFormat.addTest(
stimulus="'o77 voct0"
, result=077
, units=''
, text="'o77"
)
setVerilogOctalFormat.addTest(
stimulus="'o76543210 voct8"
, result=076543210
, units=''
, text="'o76543210"
)
# verilog binary format {{{3
try:
# must use format() because % operator does not support binary formats
# check to see if format() is supported by this version of python
junk = '{0:b}'.format(0)
setVerilogBinaryFormat = SetFormat(
pattern=r'\Avbin(\d{1,2})?\Z'
, action=lambda num, units, digits: "'b{0:0{width}b}".format(int(round(num)), width=digits)
, name='vbin'
, actionTakesUnits=True
, description="%(name)s[<#{N}>]: use Verilog binary notation"
, summary="""
Numbers are displayed in base 2 in Verilog format with a fixed
number of digits. If an optional whole number #{N} immediately
follows #{vbin}, the number of digits displayed is set to #{N}.
"""
)
setVerilogBinaryFormat.addTest(
stimulus="'b11 vbin"
, result=3
, units=''
, text="'b0011"
)
setVerilogBinaryFormat.addTest(
stimulus="'b11 vbin0"
, result=3
, units=''
, text="'b11"
)
setVerilogBinaryFormat.addTest(
stimulus="'b10011001 vbin8"
, result=153
, units=''
, text="'b10011001"
)
except AttributeError:
setVerilogBinaryFormat = None
# Variables {{{2
variableCommands = Category("Variable Commands")
# store to variable {{{3
storeToVariable = Store(
'store'
, description='=<#{name}>: store value into a variable'
, synopsis='... => ...'
, summary="""
Store the value in the #{x} register into a variable with the given
name.
"""
)
storeToVariable.addTest(
stimulus='1MHz =freq 10us =time 2pi * * time freq *'
, result=10
, units=''
, text='10'
)
storeToVariable.addTest(
stimulus='1pF =c pop c'
, result=1e-12
, units='F'
, text='1 pF'
, warnings=['c: variable has overridden built-in.']
)
# recall from variable {{{3
recallFromVariable = Recall(
'recall'
, description='<#{name}>: recall value of a variable'
, synopsis='... => #{name}, ...'
, summary="""
Place the value of the variable with the given name into the #{x}
register.
"""
)
recallFromVariable.addTest(
stimulus='1MHz =freq 2pi * "rads" =omega 10us =time clstack freq'
, result=1e6
, units='Hz'
, text='1 MHz'
)
recallFromVariable.addTest(
stimulus='freq'
, result=0
, units=''
, text='0'
, error='freq: variable does not exist'
)
# list variables {{{3
listVariables = Command(
'vars'
, lambda calc: calc.heap.display()
, description="%(key)s: print variables"
, summary="""
List all defined variables and their values.
"""
)
listVariables.addTest(
stimulus='1MHz =freq 10us =time vars'
, result=10e-6
, units='s'
, text='10 us'
, messages=[
' Rref: 50 Ohms'
, ' freq: 1 MHz'
, ' time: 10 us'
]
)
# Stack {{{2
stackCommands = Category("Stack Commands")
# swap {{{3
swapXandY = Command(
'swap'
, Calculator.swap
, description='%(key)s: swap x and y'
, synopsis='#{x}, #{y}, ... => #{y}, #{x}, ...'
, summary="""
The values in the #{x} and #{y} registers are swapped.
"""
)
swapXandY.addTest(
stimulus='1MHz 10us swap'
, result=1e6
, units='Hz'
, text='1 MHz'
)
# dup {{{3
duplicateX = Dup(
'dup'
, None
, description="%(key)s: duplicate #{x}"
, synopsis='#{x}, ... => #{x}, #{x}, ...'
, summary="""
The value in the #{x} register is pushed onto the stack again.
"""
, aliases=['enter']
)
duplicateX.addTest(
stimulus='1MHz 10us dup'
, result=10e-6
, units='s'
, text='10 us'
)
duplicateX.addTest(
stimulus='1MHz 10us dup swap'
, result=10e-6
, units='s'
, text='10 us'
)
# pop {{{3
popX = Command(
'pop'
, Calculator.pop
, description='%(key)s: discard x'
, synopsis='#{x}, ... => ...'
, summary="""
The value in the #{x} register is pulled from the stack and discarded.
"""
, aliases=['clrx']
)
popX.addTest(
stimulus='1MHz 10us pop'
, result=1e6
, units='Hz'
, text='1 MHz'
)
popX.addTest(
stimulus='pi eps0 q pop pop pop pop'
, result=0
, units=''
, text='0'
)
# stack {{{3
listStack = Command(
'stack'
, lambda calc: calc.stack.display()
, description="%(key)s: print stack"
, summary="""
Print all the values stored on the stack.
"""
)
listStack.addTest(
stimulus='1MHz 10us q 36 stack'
, result=36
, units=''
, text='36'
, messages=[
' 1 MHz'
, ' 10 us'
, ' y: 160.22e-21 C'
, ' x: 36'
]
)
# clstack {{{3
clearStack = Command(
'clstack'
, lambda calc: calc.stack.clear()
, description="%(key)s: clear stack"
, synopsis='... =>'
, summary="""
Remove all values from the stack.
"""
)
listStack.addTest(
stimulus='1MHz 10us clstack stack'
, result=0
, units=''
, text='0'
)
# Miscellaneous {{{2
miscellaneousCommands = Category("Miscellaneous Commands")
printText = Print(
name='print'
, description='`<text>`: print text'
, summary="""\
Print "text" (the contents of the back-quotes) to the terminal.
Generally used in scripts to report and annotate results. Any instances
of $N or ${N} are replaced by the value of register N, where 0
represents the #{x} register, 1 represents the #{y} register, etc. Any
instances of $Var or ${Var} are replaced by the value of the variable
#{Var}.
"""
)
printText.addTest(
stimulus='2 1 0 `Hello world!`'
, result=0
, units=''
, text='0'
, messages=["Hello world!"]
)
printText.addTest(
stimulus='2 1 0 `$0`'
, result=0
, units=''
, text='0'
, messages=["0"]
)
printText.addTest(
stimulus='2 1 0 `$0 is x`'
, result=0
, units=''
, text='0'
, messages=["0 is x"]
)
printText.addTest(
stimulus='2 1 0 `x is $0`'
, result=0
, units=''
, text='0'
, messages=["x is 0"]
)
printText.addTest(
stimulus='2 1 0 `x is $0, y is $1`'
, result=0
, units=''
, text='0'
, messages=["x is 0, y is 1"]
)
printText.addTest(
stimulus='2 1 0 `x is ${0}, y is ${1}`'
, result=0
, units=''
, text='0'
, messages=["x is 0, y is 1"]
)
printText.addTest(
stimulus='2 1 0 `x is $0, y is $1, z = $2`'
, result=0
, units=''
, text='0'
, messages=["x is 0, y is 1, z = 2"]
)
printText.addTest(
stimulus='2 1 0 `x is $0, y is $1, z = $2, t is $3`'
, result=0
, units=''
, text='0'
, messages=["x is 0, y is 1, z = 2, t is $?3?"]
, warnings=["$3: unknown."]
)
printText.addTest(
stimulus='`I have $Rref, you have $$50`'
, result=0
, units=''
, text='0'
, messages= ["I have 50 Ohms, you have $50"]
)
printText.addTest(
stimulus='`I have ${Rref}, you have $$50`'
, result=0
, units=''
, text='0'
, messages= ["I have 50 Ohms, you have $50"]
)
printText.addTest(
stimulus='`I have $Q, you have $$50`'
, result=0
, units=''
, text='0'
, messages=["I have $?Q?, you have $50"]
, warnings=["$Q: unknown."]
)
printText.addTest(
stimulus='$100 ``'
, result=100
, units='$'
, text='$100'
, messages=["$100"]
)
setUnits = SetUnits(
name='units'
, description='"<units>": set the units of the x register'
, synopsis='x, ... => x "units", ...'
, summary="""\
The units given are applied to the value in the #{x} register.
The actual value is unchanged.
"""
)
setUnits.addTest(
stimulus='100M "V/s"'
, result=1e8
, units='V/s'
, text='100 MV/s'
)
printAbout = Command(
'about'
, Calculator.aboutMsg
, description="%(key)s: print information about this calculator"
)
printAbout.addTest(
stimulus='about'
, messages=True
)
terminate = Command(
'quit'
, Calculator.quit
, description="%(key)s: quit (:q or ^D also works)"
, aliases=[':q']
)
printHelp = Command(
'help'
, Calculator.displayHelp
, description="%(key)s: print a summary of the available features"
)
printHelp.addTest(
stimulus='help'
, messages=True
)
detailedHelp = Help(
name='?'
, description="%(name)s[<topic>]: detailed help on a particular topic"
, summary="""\
A topic, in the form of a symbol or name, may follow the question mark,
in which case a detailed description will be printed for that topic.
If no topic is given, a list of available topics is listed.
"""
)
detailedHelp.addTest(
stimulus='?'
, messages=True
)
detailedHelp.addTest(
stimulus='??'
, messages=True
)
detailedHelp.addTest(
stimulus='?XXXXXXXXXX'
, messages=True
, warnings=['XXXXXXXXXX: not found.\n']
)
# The detailed help command with arguments is tested in test.ec.py.
# Action Sublists {{{1
# Arithmetic Operators {{{2
arithmeticOperatorActions = [
arithmeticOperators,
addition,
subtraction,
multiplication,
trueDivision,
floorDivision,
modulus,
negation,
reciprocal,
ceiling,
floor,
factorial,
percentChange,
parallel,
]
# Logs, Powers, and Exponentials {{{2
logPowerExponentialActions = [
powersAndLogs,
power,
exponential,
naturalLog,
tenPower,
commonLog,
twoPower,
binaryLog,
square,
squareRoot,
cubeRoot,
]
# Trig Functions {{{2
trigFunctionActions = [
trigFunctions,
sine,
cosine,
tangent,
arcSine,
arcCosine,
arcTangent,
setRadiansMode,
setDegreesMode,
]
# Complex and Vector Functions {{{2
complexVectorFunctionActions = [
complexAndVectorFunctions,
absoluteValue,
argument,
hypotenuse,
arcTangent2,
rectangularToPolar,
polarToRectangular,
]
# Hyperbolic Functions {{{2
hyperbolicFunctionActions = [
hyperbolicFunctions,
hyperbolicSine,
hyperbolicCosine,
hyperbolicTangent,
hyperbolicArcSine,
hyperbolicArcCosine,
hyperbolicArcTangent,
]
# Decibel Functions {{{2
decibelFunctionActions = [
decibelFunctions,
decibels20,
antiDecibels20,
decibels10,
antiDecibels10,
voltageToDbm,
dbmToVoltage,
currentToDbm,
dbmToCurrent,
]
# Constants {{{2
commonConstantActions = [
constants,
pi,
twoPi,
squareRoot2,
zeroCelsius,
]
engineeringConstantActions = [
imaginaryUnit,
imaginaryTwoPi,
boltzmann,
planckConstant,
elementaryCharge,
speedOfLight,
freeSpacePermittivity,
freeSpacePermeability,
freeSpaceCharacteristicImpedance,
]
physicsConstantActions = [
planckConstant,
planckConstantReduced,
# planckLength,
# planckMass,
# planckTemperature,
# planckTime,
boltzmann,
elementaryCharge,
massOfElectron,
massOfProton,
speedOfLight,
gravitationalConstant,
standardAccelerationOfGravity,
freeSpacePermittivity,
freeSpacePermeability,
]
chemistryConstantActions = [
planckConstant,
planckConstantReduced,
boltzmann,
elementaryCharge,
massOfElectron,
massOfProton,
molarGasConstant,
avogadroConstant,
]
constantActions = (
commonConstantActions +
engineeringConstantActions +
physicsConstantActions +
chemistryConstantActions
)
# Numbers {{{2
numberActions = [
numbers,
engineeringNumber,
scientificNumber,
hexadecimalNumber,
octalNumber,
binaryNumber,
verilogHexadecimalNumber,
verilogDecimalNumber,
verilogOctalNumber,
verilogBinaryNumber,
]
realNumberActions = [
numbers,
engineeringNumber,
scientificNumber,
]
# Number Formats {{{2
numberFormatActions = [
numberFormats,
setEngineeringFormat,
setScientificFormat,
setFixedFormat,
setHexadecimalFormat,
setOctalFormat,
setBinaryFormat,
setVerilogHexadecimalFormat,
setVerilogDecimalFormat,
setVerilogOctalFormat,
setVerilogBinaryFormat,
]
realNumberFormatActions = [
numberFormats,
setEngineeringFormat,
setScientificFormat,
setFixedFormat,
]
# Variables {{{2
variableActions = [
variableCommands,
storeToVariable,
recallFromVariable,
listVariables,
]
# Stack {{{2
stackActions = [
stackCommands,
swapXandY,
duplicateX,
popX,
listStack,
clearStack,
]
# Miscellaneous {{{2
miscellaneousActions = [
miscellaneousCommands,
randomNumber,
printText,
setUnits,
printAbout,
terminate,
printHelp,
detailedHelp,
]
# Action Lists {{{1
# All actions {{{2
allActions = (
arithmeticOperatorActions +
logPowerExponentialActions +
trigFunctionActions +
complexVectorFunctionActions +
hyperbolicFunctionActions +
decibelFunctionActions +
constantActions +
numberActions +
numberFormatActions +
variableActions +
stackActions +
miscellaneousActions
)
# Engineering actions {{{2
engineeringActions = (
arithmeticOperatorActions +
logPowerExponentialActions +
trigFunctionActions +
complexVectorFunctionActions +
hyperbolicFunctionActions +
decibelFunctionActions +
commonConstantActions +
engineeringConstantActions +
numberActions +
numberFormatActions +
variableActions +
stackActions +
miscellaneousActions
)
# Physics actions {{{2
physicsActions = (
arithmeticOperatorActions +
logPowerExponentialActions +
trigFunctionActions +
complexVectorFunctionActions +
hyperbolicFunctionActions +
commonConstantActions +
physicsConstantActions +
realNumberActions +
realNumberFormatActions +
variableActions +
stackActions +
miscellaneousActions
)
# Chemistry actions {{{2
chemistryActions = (
arithmeticOperatorActions +
logPowerExponentialActions +
trigFunctionActions +
commonConstantActions +
chemistryConstantActions +
realNumberActions +
realNumberFormatActions +
variableActions +
stackActions +
miscellaneousActions
)
# Configure Calculator {{{1
# To modify the personality of the calculator, chose the set of actions to use
# and any predefined variables needed here. You can also adjust the list of
# actions by commenting out undesired ones in the lists above.
actionsToUse = allActions
if (
voltageToDbm in actionsToUse or
dbmToVoltage in actionsToUse or
currentToDbm in actionsToUse or
dbmToCurrent in actionsToUse
):
predefinedVariables = {'Rref': (50, 'Ohms')}
else:
predefinedVariables = {}
# The following variables are imported into the calculator and affect its
# default behavior.
defaultFormat = setEngineeringFormat
defaultDigits = 4
# The following variables control the generation of the documentation
# (the man page).
documentComplexNumbers = (
imaginaryUnit in actionsToUse or
imaginaryTwoPi in actionsToUse
)
documentVerilogIntegers = (
verilogHexadecimalNumber in actionsToUse or
verilogDecimalNumber in actionsToUse or
verilogOctalNumber in actionsToUse or
verilogBinaryNumber in actionsToUse or
setVerilogHexadecimalFormat in actionsToUse or
setVerilogDecimalFormat in actionsToUse or
setVerilogOctalFormat in actionsToUse or
setVerilogBinaryFormat in actionsToUse
)
documentIntegers = (
documentVerilogIntegers in actionsToUse or
hexadecimalNumber in actionsToUse or
octalNumber in actionsToUse or
binaryNumber in actionsToUse or
setHexadecimalFormat in actionsToUse or
setOctalFormat in actionsToUse or
setBinaryFormat in actionsToUse
)
|
KenKundert/ec0
|
actions.py
|
Python
|
gpl-3.0
| 79,995
|
[
"Avogadro"
] |
3c688a1d2eca5cdaa78f84a294dd77cdd0e31deb6af5e19951a9ca40dbe9fde9
|
# This demo illustrates how mtSet can be used for multi and single-trait set tests
# We consider a dataset of 192 samples and 3 flowering time phenotypes in A.thaliana from Atwell et al 2010 (Nature)
# Phenotypes were quantile-normalized to a gaussian distribution beforehand
# Here we consider 3 different models
# - mtSet: multi-trait analysis where relatedness is accounted for as a random effect
# - stSet: single-trait analysis where relatedness is accounted for as a random effect
# - mtSetPC: multi-trait analysis where relatedness is account for with the first 5 principal components of the kinship as fixed effects
import ipdb
import sys
sys.path.append('./..')
import mtSet.pycore.modules.splitter as SPLIT
import mtSet.pycore.modules.multiTraitSetTest as MTST
import mtSet.pycore.modules.chi2mixture as C2M
from mtSet.pycore.utils.utils import smartAppend
from mtSet.pycore.utils.utils import smartDumpDictHdf5
import scipy as SP
import h5py
import pylab as PL
import copy
import os
import cPickle
import time as TIME
import urllib
# data and cache files
files = {}
files['data_dir'] = 'data'
files['data'] = 'data/arab107_preprocessed.hdf5'
files['data_url'] = 'http://www.ebi.ac.uk/~casale/arab107_preprocessed.hdf5'
files['out_file'] = 'data/results.hdf5'
files['split_cache'] = 'windows_split.hdf5'
files['mtSet_null_cache'] = 'mtSet_null_cache.hdf5'
files['stSet_null_cache'] = 'stSet_null_cache.hdf5'
files['mtSetPC_null_cache'] = 'mtSetPC_null_cache.hdf5'
# settings for splitting the genome in different regions and permutations
settings = {}
settings['window_size'] = 1e4
settings['minNumberSnps'] = 4 # considers only windows with at least 4 SNPs
settings['n_windows'] = 10
settings['n_permutations'] = 10
def download_data():
if not os.path.exists(files['data']):
if not os.path.exists(files['data_dir']):
os.makedirs(files['data_dir'])
print "file not found, downloading from %s" % files['data_url']
testfile=urllib.URLopener()
testfile.retrieve(files['data_url'],files['data'])
if __name__ == "__main__":
# N = number of samples
# P = number of phenotypes
# V = number of variants
# K = number of covariates
download_data()
# import data
f = h5py.File(files['data'],'r')
phenotype = f['phenotype'][:] # phenotype matrix (NxP)
phenotypeID = f['phenotypeID'][:] # phenotype ids (P-vector)
genotype = f['genotype'] # genotype matrix (NxV)
relatedness = f['relatedness'][:] # relatedness matrix (NxN)
geno_pos = f['geno_pos'][:] # genotype positions (V-vector)
geno_chrom = f['geno_chrom'][:] # genotype choromosomes (V-vector)
covariates = f['covariates'][:] # covariate matrix (NxK)
# here we consider no covariates for mtSet and stSet
# while we consider 6 covariates for mtSetPC
# (intercept term and first 5 pcs of the relatedness matrix)
# multi trait set test class
mtSet = MTST.MultiTraitSetTest(phenotype,relatedness)
mtSetPC = MTST.MultiTraitSetTest(phenotype,F=covariates)
print '.. fit null models'
mtSet_null_info = mtSet.fitNull(cache=True,fname=files['mtSet_null_cache'],rewrite=True)
stSet_null_info = mtSet.fitNullTraitByTrait(cache=True,fname=files['stSet_null_cache'],rewrite=True)
mtSet_null_info = mtSetPC.fitNull(cache=True,fname=files['mtSetPC_null_cache'],rewrite=True)
print '.. precompute genotype windows'
split = SPLIT.Splitter(pos=geno_pos,chrom=geno_chrom)
split.splitGeno(size=settings['window_size'],minSnps=settings['minNumberSnps'],cache=True,fname=files['split_cache'])
nWindows = split.get_nWindows()
RV = {}
print '.. set test scan'
for window_idx in range(settings['n_windows']):
print '\t.. window %d'%window_idx
# consider genetic region
Iregion, rv_windows = split.getWindow(window_idx)
region = genotype[:,Iregion]
# fit models
rv_mtSet = mtSet.optimize(region)
rv_stSet = mtSet.optimizeTraitByTrait(region)
rv_mtSetPC = mtSetPC.optimize(region)
# store LLR (log likelihood ratios) and window positions
smartAppend(RV,'window_chromosome',rv_windows['chrom'][0])
smartAppend(RV,'window_start',rv_windows['start'][0])
smartAppend(RV,'window_end',rv_windows['end'][0])
smartAppend(RV,'llr_mtSet',rv_mtSet['LLR'][0])
smartAppend(RV,'llr_stSet',SP.concatenate([rv_stSet[key]['LLR'] for key in rv_stSet.keys()]))
smartAppend(RV,'llr_mtSetPC',rv_mtSetPC['LLR'][0])
# consider permutations
for permutation_i in range(settings['n_permutations']):
print '.. permutation %d' % permutation_i
# set seed and generate sample permutation
SP.random.seed(permutation_i)
permutation = SP.random.permutation(phenotype.shape[0])
for window_idx in range(settings['n_windows']):
print '\t.. window %d'%window_idx
# consider genetic region and permute
Iregion, rv_windows = split.getWindow(window_idx)
region = genotype[:,Iregion]
permuted_region = region[permutation,:]
# fit models
rv_mtSet = mtSet.optimize(permuted_region)
rv_stSet = mtSet.optimizeTraitByTrait(permuted_region)
rv_mtSetPC = mtSetPC.optimize(permuted_region)
# store permutation LLRs
smartAppend(RV,'permutation_llr_mtSet',rv_mtSet['LLR'][0])
smartAppend(RV,'permutation_llr_stSet',SP.concatenate([rv_stSet[key]['LLR'] for key in rv_stSet.keys()]))
smartAppend(RV,'permutation_llr_mtSetPC',rv_mtSetPC['LLR'][0])
# vectorize outputs
for key in RV.keys(): RV[key] = SP.array(RV[key])
ipdb.set_trace()
print '.. calculate p-values'
print '(for accurate estimate of pvalues either the number of windows or the number of permutations should be increased)'
c2m = C2M.Chi2mixture(tol=4e-3)
# obtain p-values for mtSet
c2m.estimate_chi2mixture(RV['permutation_llr_mtSet'])
RV['pv_mtSet'] = c2m.sf(RV['llr_mtSet'])
RV['permutation_pv_mtSet'] = c2m.sf(RV['permutation_llr_mtSet'])
# obtain p-values for stSet
RV['pv_stSet'] = SP.zeros_like(RV['llr_stSet'])
RV['permutation_pv_stSet'] = SP.zeros_like(RV['permutation_llr_stSet'])
for p in range(phenotype.shape[1]):
c2m.estimate_chi2mixture(RV['permutation_llr_stSet'][:,p])
RV['pv_stSet'][:,p] = c2m.sf(RV['llr_stSet'][:,p])
RV['permutation_pv_stSet'][:,p] = c2m.sf(RV['permutation_llr_stSet'][:,p])
# obtain p-values for mtSetPC
c2m.estimate_chi2mixture(RV['permutation_llr_mtSetPC'])
RV['pv_mtSetPC'] = c2m.sf(RV['llr_mtSetPC'])
RV['permutation_pv_mtSetPC'] = c2m.sf(RV['permutation_llr_mtSetPC'])
print '.. export results in %s'%files['out_file']
fout = h5py.File(files['out_file'],'w')
smartDumpDictHdf5(RV,fout)
fout.close()
|
PMBio/mtSet
|
notebooks/arab_demo.py
|
Python
|
apache-2.0
| 6,963
|
[
"Gaussian"
] |
c2aa72bcf9fde89b478af2811a9fc889f0c9cffe3aefcf60bb54c8d6ff07e878
|
"""
State Space Analysis using the Kalman Filter
References
-----------
Durbin., J and Koopman, S.J. `Time Series Analysis by State Space Methods`.
Oxford, 2001.
Hamilton, J.D. `Time Series Analysis`. Princeton, 1994.
Notes
-----
This file follows Hamilton's notation pretty closely.
"""
from scipy import optimize
import numpy as np
from var import chain_dot #TODO: move this to tools
#TODO: See Koopman and Durbin (2000)
#Fast filtering and smoothing for multivariate state space models
# and The Riksbank -- Strid and Walentin (2008)
# Block Kalman filtering for large-scale DSGE models
# but this is obviously macro model specific
def kalmansmooth(F, A, H, Q, R, y, X, xi10):
pass
def kalmanfilter(F, A, H, Q, R, y, X, xi10, ntrain, history=False):
"""
Returns the negative log-likelihood of y conditional on the information set
Assumes that the initial state and all innovations are multivariate
Gaussian.
Parameters
-----------
F : array-like
The (r x r) array holding the transition matrix for the hidden state.
A : array-like
The (nobs x k) array relating the predetermined variables to the
observed data.
H : array-like
The (nobs x r) array relating the hidden state vector to the
observed data.
Q : array-like
(r x r) variance/covariance matrix on the error term in the hidden
state transition.
R : array-like
(nobs x nobs) variance/covariance of the noise in the observation
equation.
y : array-like
The (nobs x 1) array holding the observed data.
X : array-like
The (nobs x k) array holding the predetermined variables data.
xi10 : array-like
Is the (r x 1) initial prior on the initial state vector.
ntrain : int
The number of training periods for the filter. This is the number of
observations that do not affect the likelihood.
Returns
-------
likelihood
The negative of the log likelihood
history or priors, history of posterior
If history is True.
Notes
-----
No input checking is done.
"""
# uses log of Hamilton 13.4.1
F = np.asarray(F)
H = np.asarray(H)
n = H.shape[1] # remember that H gets transposed
y = np.asarray(y)
A = np.asarray(A)
if y.ndim == 1: # note that Y is in rows for now
y = y[:,None]
nobs = y.shape[0]
xi10 = np.asarray(xi10)
if xi10.ndim == 1:
xi10[:,None]
if history:
state_vector = [xi10]
Q = np.asarray(Q)
r = xi10.shape[0]
# Eq. 12.2.21, other version says P0 = Q
# p10 = np.dot(np.linalg.inv(np.eye(r**2)-np.kron(F,F)),Q.ravel('F'))
# p10 = np.reshape(P0, (r,r), order='F')
# Assume a fixed, known intial point and set P0 = Q
p10 = Q
loglikelihood = 0
for i in range(nobs):
HTPHR = chain_dot(H.T,p10,H)+R
if HTPHR.ndim == 1:
HTPHRinv = 1./HTPHR
else:
HTPHRinv = np.linalg.inv(HTPHR) # correct
part1 = y[i] - np.dot(A.T,X) - np.dot(H.T,xi10) # correct
if i >= ntrain: # zero-index, but ntrain isn't
HTPHRdet = np.linalg.det(HTPHR) # correct
part2 = -.5*chain_dot(part1.T,HTPHRinv,part1) # correct
#TODO: Need to test with ill-conditioned problem.
loglike_interm = (-n/2.) * np.log(2*np.pi) - .5*\
np.log(HTPHRdet) + part2
loglikelihood += loglike_interm
# 13.2.15 Update current state xi_t based on y
xi11 = xi10 + chain_dot(p10, H, HTPHRinv, part1)
# 13.2.16 MSE of that state
p11 = p10 - chain_dot(p10, H, HTPHRinv, H.T, p10)
# 13.2.17 Update forecast about xi_{t+1} based on our F
xi10 = np.dot(F,xi11)
if history:
state_vector.append(xi10)
# 13.2.21 Update the MSE of the forecast
p10 = chain_dot(F,p11,F.T) + Q
if not history:
return -loglikelihood
else:
return -loglikelihood, np.asarray(state_vector[:-1])
class StateSpaceModel(object):
def __init__(self, endog, exog=None, ARMA=(0,0)):
"""
Parameters
----------
endog : array-like
A (nobs x n) array of observations.
exog : array-like, optional
A (nobs x k) array of covariates.
Notes
-----
exog are not handled right now.
Created with a (V)ARMA in mind, but not really general yet.
"""
endog = np.asarray(endog)
if endog.ndim == 1:
endog = endog[:,None]
self.endog = endog
n = endog.shape[1]
self.n = n
self.nobs = endog.shape[0]
self.exog = exog
# xi10 = np.ararray(xi10)
# if xi10.ndim == 1:
# xi10 = xi10[:,None]
# self.xi10 = xi10
# self.ntrain = ntrain
# self.p = ARMA[0]
# self.q = ARMA[1]
# self.pq = max(ARMA)
# self.r = xi10.shape[1]
# self.A = A
# self.Q = Q
# self.F = F
# self.Hmat =
# if n == 1:
# F =
def _updateloglike(self, params, ntrain, penalty, upperbounds, lowerbounds,
F,A,H,Q,R, history):
"""
"""
paramsorig = params
# are the bounds binding?
params = np.min((np.max((lowerbounds, params), axis=0),upperbounds),
axis=0)
#TODO: does it make sense for all of these to be allowed to be None?
if F != None and callable(F):
F = F(params)
elif F == None:
F = 0
if A != None and callable(A):
A = A(params)
elif A == None:
A = 0
if H != None and callable(H):
H = H(params)
elif H == None:
H = 0
if Q != None and callable(Q):
Q = Q(params)
elif Q == None:
Q = 0
if R != None and callable(R):
R = R(params)
elif R == None:
R = 0
X = self.exog
if X == None:
X = 0
y = self.endog
loglike = kalmanfilter(F,A,H,Q,R,y,X, xi10, ntrain, history)
# use a quadratic penalty function to move away from bounds
loglike += penalty * np.sum((paramsorig-params)**2)
return loglike
# r = self.r
# n = self.n
# F = np.diagonal(np.ones(r-1), k=-1) # think this will be wrong for VAR
# cf. 13.1.22 but think VAR
# F[0] = params[:p] # assumes first p start_params are coeffs
# of obs. vector, needs to be nxp for VAR?
# self.F = F
# cholQ = np.diag(start_params[p:]) # fails for bivariate
# MA(1) section
# 13.4.2
# Q = np.dot(cholQ,cholQ.T)
# self.Q = Q
# HT = np.zeros((n,r))
# xi10 = self.xi10
# y = self.endog
# ntrain = self.ntrain
# loglike = kalmanfilter(F,H,y,xi10,Q,ntrain)
def fit_kalman(self, start_params, ntrain=1, F=None, A=None, H=None, Q=None,
R=None, method="bfgs", penalty=True, upperbounds=None,
lowerbounds=None):
"""
Parameters
----------
method : str
Only "bfgs" is currently accepted.
start_params : array-like
The first guess on all parameters to be estimated. This can
be in any order as long as the F,A,H,Q, and R functions handle
the parameters appropriately.
xi10 : arry-like
The (r x 1) vector of initial states. See notes.
F,A,H,Q,R : functions or array-like, optional
If functions, they should take start_params (or the current
value of params during iteration and return the F,A,H,Q,R matrices).
See notes. If they are constant then can be given as array-like
objects. If not included in the state-space representation then
can be left as None. See example in class docstring.
penalty : bool,
Whether or not to include a penalty for solutions that violate
the bounds given by `lowerbounds` and `upperbounds`.
lowerbounds : array-like
Lower bounds on the parameter solutions. Expected to be in the
same order as `start_params`.
upperbounds : array-like
Upper bounds on the parameter solutions. Expected to be in the
same order as `start_params`
"""
y = self.endog
ntrain = ntrain
_updateloglike = self._updateloglike
params = start_params
if method.lower() == 'bfgs':
(params, llf, score, cov_params, func_calls, grad_calls,
warnflag) = optimize.fmin_bfgs(_updateloglike, params,
args = (ntrain, penalty, upperbounds, lowerbounds,
F,A,H,Q,R, False), gtol= 1e-8, epsilon=1e-5,
full_output=1)
#TODO: provide more options to user for optimize
# Getting history would require one more call to _updatelikelihood
self.params = params
self.llf = llf
self.gradient = score
self.cov_params = cov_params # how to interpret this?
self.warnflag = warnflag
def updatematrices(params, y, xi10, ntrain, penalty, upperbound, lowerbound):
"""
TODO: change API, update names
This isn't general. Copy of Luca's matlab example.
"""
paramsorig = params
# are the bounds binding?
params = np.min((np.max((lowerbound,params),axis=0),upperbound), axis=0)
rho = params[0]
sigma1 = params[1]
sigma2 = params[2]
F = np.array([[rho, 0],[0,0]])
cholQ = np.array([[sigma1,0],[0,sigma2]])
H = np.ones((2,1))
q = np.dot(cholQ,cholQ.T)
loglike = kalmanfilter(F,0,H,q,0, y, 0, xi10, ntrain)
loglike = loglike + penalty*np.sum((paramsorig-params)**2)
return loglike
if __name__ == "__main__":
import numpy as np
# Make our observations as in 13.1.13
np.random.seed(54321)
nobs = 600
y = np.zeros(nobs)
rho = [.5, -.25, .35, .25]
sigma = 2.0 # std dev. or noise
for i in range(4,nobs):
y[i] = np.dot(rho,y[i-4:i][::-1]) + np.random.normal(scale=sigma)
y = y[100:]
# make an MA(2) observation equation as in example 13.3
# y = mu + [1 theta][e_t e_t-1]'
mu = 2.
theta = .8
rho = np.array([1, theta])
np.random.randn(54321)
e = np.random.randn(101)
y = mu + rho[0]*e[1:]+rho[1]*e[:-1]
# might need to add an axis
r = len(rho)
x = np.ones_like(y)
# For now, assume that F,Q,A,H, and R are known
F = np.array([[0,0],[1,0]])
Q = np.array([[1,0],[0,0]])
A = np.array([mu])
H = rho[:,None]
R = 0
# remember that the goal is to solve recursively for the
# state vector, xi, given the data, y (in this case)
# we can also get a MSE matrix, P, associated with *each* observation
# given that our errors are ~ NID(0,variance)
# the starting E[e(1),e(0)] = [0,0]
xi0 = np.array([[0],[0]])
# with variance = 1 we know that
# P0 = np.eye(2) # really P_{1|0}
# Using the note below
P0 = np.dot(np.linalg.inv(np.eye(r**2)-np.kron(F,F)),Q.ravel('F'))
P0 = np.reshape(P0, (r,r), order='F')
# more generally, if the eigenvalues for F are in the unit circle
# (watch out for rounding error in LAPACK!) then
# the DGP of the state vector is var/cov stationary, we know that
# xi0 = 0
# Furthermore, we could start with
# vec(P0) = np.dot(np.linalg.inv(np.eye(r**2) - np.kron(F,F)),vec(Q))
# where vec(X) = np.ravel(X, order='F') with a possible [:,np.newaxis]
# if you really want a "2-d" array
# a fortran (row-) ordered raveled array
# If instead, some eigenvalues are on or outside the unit circle
# xi0 can be replaced with a best guess and then
# P0 is a positive definite matrix repr the confidence in the guess
# larger diagonal elements signify less confidence
# we also know that y1 = mu
# and MSE(y1) = variance*(1+theta**2) = np.dot(np.dot(H.T,P0),H)
state_vector = [xi0]
forecast_vector = [mu]
MSE_state = [P0] # will be a list of matrices
MSE_forecast = []
# must be numerical shortcuts for some of this...
# this should be general enough to be reused
for i in range(len(y)-1):
# update the state vector
sv = state_vector[i]
P = MSE_state[i]
HTPHR = np.dot(np.dot(H.T,P),H)+R
if np.ndim(HTPHR) < 2: # we have a scalar
HTPHRinv = 1./HTPHR
else:
HTPHRinv = np.linalg.inv(HTPHR)
FPH = np.dot(np.dot(F,P),H)
gain_matrix = np.dot(FPH,HTPHRinv) # correct
new_sv = np.dot(F,sv)
new_sv += np.dot(gain_matrix,y[i] - np.dot(A.T,x[i]) -
np.dot(H.T,sv))
state_vector.append(new_sv)
# update the MSE of the state vector forecast using 13.2.28
new_MSEf = np.dot(np.dot(F - np.dot(gain_matrix,H.T),P),F.T - np.dot(H,
gain_matrix.T)) + np.dot(np.dot(gain_matrix,R),gain_matrix.T) + Q
MSE_state.append(new_MSEf)
# update the in sample forecast of y
forecast_vector.append(np.dot(A.T,x[i+1]) + np.dot(H.T,new_sv))
# update the MSE of the forecast
MSE_forecast.append(np.dot(np.dot(H.T,new_MSEf),H) + R)
MSE_forecast = np.array(MSE_forecast).squeeze()
MSE_state = np.array(MSE_state)
forecast_vector = np.array(forecast_vector)
state_vector = np.array(state_vector).squeeze()
##########
# Luca's example
# choose parameters governing the signal extraction problem
rho = .9
sigma1 = 1
sigma2 = 1
nobs = 100
# get the state space representation (Hamilton's notation)\
F = np.array([[rho, 0],[0, 0]])
cholQ = np.array([[sigma1, 0],[0,sigma2]])
H = np.ones((2,1))
# generate random data
np.random.seed(12345)
xihistory = np.zeros((2,nobs))
for i in range(1,nobs):
xihistory[:,i] = np.dot(F,xihistory[:,i-1]) + \
np.dot(cholQ,np.random.randn(2,1)).squeeze()
# this makes an ARMA process?
# check notes, do the math
y = np.dot(H.T, xihistory)
y = y.T
params = np.array([rho, sigma1, sigma2])
penalty = 1e5
upperbounds = np.array([.999, 100, 100])
lowerbounds = np.array([-.999, .001, .001])
xi10 = xihistory[:,0]
ntrain = 1
bounds = zip(lowerbounds,upperbounds) # if you use fmin_l_bfgs_b
results = optimize.fmin_bfgs(updatematrices, params,
args=(y,xi10,ntrain,penalty,upperbounds,lowerbounds),
gtol = 1e-8, epsilon=1e-10)
# array([ 0.83111567, 1.2695249 , 0.61436685])
F = lambda x : np.array([[x[0],0],[0,0]])
def Q(x):
cholQ = np.array([[x[1],0],[0,x[2]]])
return np.dot(cholQ,cholQ.T)
H = np.ones((2,1))
ssm_model = StateSpaceModel(y) # need to pass in Xi10!
ssm_model.fit_kalman(start_params=params, F=F, Q=Q, H=H,
upperbounds=upperbounds, lowerbounds=lowerbounds)
# why does the above take 3 times as many iterations than direct max?
# compare directly to matlab output
from scipy import io
y_matlab = io.loadmat('./kalman_y.mat')['y'].reshape(-1,1)
ssm_model2 = StateSpaceModel(y_matlab)
ssm_model2.fit_kalman(start_params=params, F=F, Q=Q, H=H,
upperbounds=upperbounds, lowerbounds=lowerbounds)
# matlab output
thetaunc = np.array([0.7833, 1.1688, 0.5584])
np.testing.assert_almost_equal(ssm_model2.params, thetaunc, 4)
# WooHoo!
# maybe add a line search check to make sure we didn't get stuck in a local
# max for more complicated ssm?
|
matthew-brett/draft-statsmodels
|
scikits/statsmodels/sandbox/tsa/kalmanf.py
|
Python
|
bsd-3-clause
| 15,822
|
[
"Gaussian"
] |
fd2645187b15554aa635c485e7fda2013dc15a895dd3098ca6cc2fe33df7f154
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Andrew Jewett (jewett.aij at g mail)
# http://www.chem.ucsb.edu/~sheagroup
# License: 3-clause BSD License (See LICENSE.TXT)
# Copyright (c) 2012, Regents of the University of California
# All rights reserved.
"""
ltemplify.py
The "ltemplify.py" script can be used to convert existing LAMMPS
input script and data files into a single .lt file
(which includes both topology and force-field information
for a single molecule in your system).
Example:
ltemplify.py -name Mol file.in file.data > mol.lt
This creates a template for a new type of molecule (named "Mol"),
consisting of all the atoms in the lammps files you included,
and saves this data in a single ttree file ("mol.lt").
This file can be used with moltemplate (ttree) to
define large systems containing this molecule.
"""
import sys
try:
from .ttree_lex import *
from .lttree_styles import *
except (SystemError, ValueError):
# not installed as a package
from ttree_lex import *
from lttree_styles import *
g_program_name = __file__.split('/')[-1] # = 'ltemplify.py'
g_version_str = '0.53.1'
g_date_str = '2017-6-08'
def Intify(s):
if s.isdigit():
return int(s)
elif s[0:2] == 'id':
return int(s[2:])
elif s[0:4] == 'type':
return int(s[4:])
else:
return s
def IsNumber(s):
try:
float(s)
return True
except (ValueError, TypeError):
return False
def StringToInterval(sel_str, slice_delim='*'):
# Split a string into 1-3 tokens using the slice_delim and convert to int.
# What a mess. I should rewrite this function
i_slice = sel_str.find(slice_delim)
if i_slice == -1:
a = sel_str
b = sel_str
c = ''
else:
a = sel_str[:i_slice]
bc = sel_str[i_slice + len(slice_delim):]
b = ''
c = ''
i_slice = bc.find(slice_delim)
if i_slice == -1:
b = bc
c = ''
else:
b = bc[:i_slice]
c = bc[i_slice + len(slice_delim):]
if a == '':
a = None
elif a.isdigit():
a = int(a)
else:
raise InputError('Error: invalid selection string \"' +
sel_str + '\"\n')
if b == '':
b = None
elif b.isdigit():
b = int(b)
else:
raise InputError('Error: invalid selection string \"' +
sel_str + '\"\n')
if c == '':
c = None
elif c.isdigit():
c = int(c)
else:
raise InputError('Error: invalid selection string \"' +
sel_str + '\"\n')
if c == None:
return (a, b)
else:
return (a, b, c)
# Selections are simply lists of 2-tuples (pairs)
def LammpsSelectToIntervals(sel_str, slice_delim='*', or_delim=', '):
"""
This function converts a string such as "1*4 6 9*12 50*70*10" into
a list of tuples, for example: [(1,4), (6,6), (9,12), (50,50), (60,60), (70,70)]
In general, the of intervals has the form:
[(a1,b1), (a2,b2), (a3,b3), ... ]
An atom is considered to belong to this selection
if it happens to lie within the closed interval [a,b]
for any pair of a,b values in the list of intervals.
If for a given pair a,b, either a or b is "None", then that a or b
value is not used to disqualify membership in the interval.
(Similar to -infinity or +infinity. In other words if a is set to None,
then to belong to the interval it is enough to be less than b.)
"""
selection_list = []
# tokens = sel_str.split(or_delim) <-- Not what we want when
# len(or_delim)>1
tokens = LineLex.TextBlock2Lines(sel_str, or_delim, keep_delim=False)
for token in tokens:
token = token.strip()
interval = StringToInterval(token, slice_delim)
if len(interval) == 2:
# Normally, "interval" should be a tuple containing 2 entries
selection_list.append(interval)
else:
assert(len(interval) == 3)
# Handle 1000:2000:10 notation
# (corresponding to 1000, 1010, 1020, 1030, ..., 1990, 2000)
a = interval[0]
b = interval[1]
incr = interval[2]
i = a
while i <= b:
selection_list.append((i, i))
i += incr
return selection_list
def IntervalListToMinMax(interval_list):
min_a = None
max_b = None
for (a, b) in interval_list:
if ((not (type(a) is int)) or (not (type(b) is int))):
return None, None # only integer min/max makes sense. otherwise skip
if (min_a == None) or (a < min_a):
min_a = a
if (max_b == None) or (b > max_b):
max_b = b
return min_a, max_b
def MergeIntervals(interval_list):
"""
A crude simple function that merges consecutive intervals in the list
whenever they overlap. (This function does not bother to compare
non-consecutive entries in the interval_list.)
"""
i = 1
while i < len(interval_list):
if ((interval_list[i - 1][1] == None) or
(interval_list[i - 1][1] + 1 >= interval_list[i][0])):
interval_list[i - 1] = (interval_list[i - 1]
[0], interval_list[i][1])
del interval_list[i]
else:
i += 1
def BelongsToSel(i, sel):
if (i == None) or (sel == None) or (len(sel) == 0):
# If the user has not specified a selection for this category,
# then by default all objects are accepted
return True
elif (type(i) is str):
if i.isdigit():
i = int(i)
else:
return True
belongs = False
for interval in sel:
assert(len(interval) == 2)
if interval[0]:
if i >= interval[0]:
if (interval[1] == None) or (i <= interval[1]):
belongs = True
break
elif interval[1]:
if i <= interval[1]:
belongs = True
break
else:
# In that case, the user entered something like "*"
# which covers all possible numbers
belongs = True
break
return belongs
def main():
try:
sys.stderr.write(g_program_name + ' v' +
g_version_str + ' ' + g_date_str + '\n')
non_empty_output = False
no_warnings = True
indent = 2
cindent = 0
atomid_selection = []
atomtype_selection = []
molid_selection = []
mol_name = ''
min_sel_atomid = None
min_sel_atomtype = None
min_sel_bondid = None
min_sel_bondtype = None
min_sel_angleid = None
min_sel_angletype = None
min_sel_dihedralid = None
min_sel_dihedraltype = None
min_sel_improperid = None
min_sel_impropertype = None
max_sel_atomid = None
max_sel_atomtype = None
max_sel_bondid = None
max_sel_bondtype = None
max_sel_angleid = None
max_sel_angletype = None
max_sel_dihedralid = None
max_sel_dihedraltype = None
max_sel_improperid = None
max_sel_impropertype = None
needed_atomids = set([])
needed_atomtypes = set([])
needed_molids = set([])
needed_bondids = set([])
needed_bondtypes = set([])
needed_angleids = set([])
needed_angletypes = set([])
needed_dihedralids = set([])
needed_dihedraltypes = set([])
needed_improperids = set([])
needed_impropertypes = set([])
min_needed_atomtype = None
max_needed_atomtype = None
min_needed_bondtype = None
max_needed_bondtype = None
min_needed_angletype = None
max_needed_angletype = None
min_needed_dihedraltype = None
max_needed_dihedraltype = None
min_needed_impropertype = None
max_needed_impropertype = None
min_needed_atomid = None
max_needed_atomid = None
min_needed_molid = None
max_needed_molid = None
min_needed_bondid = None
max_needed_bondid = None
min_needed_angleid = None
max_needed_angleid = None
min_needed_dihedralid = None
max_needed_dihedralid = None
min_needed_improperid = None
max_needed_improperid = None
# To process the selections, we need to know the atom style:
atom_style_undefined = True
i_atomid = None
i_atomtype = None
i_molid = None
i_x = None
i_y = None
i_z = None
l_in_init = []
l_in_settings = []
l_in_masses = []
l_in_pair_coeffs = []
l_in_bond_coeffs = []
l_in_angle_coeffs = []
l_in_dihedral_coeffs = []
l_in_improper_coeffs = []
l_in_group = []
l_in_set = []
l_in_set_static = []
l_in_fix_shake = []
l_in_fix_rigid = []
l_in_fix_poems = []
l_in_fix_qeq = []
l_in_fix_qmmm = []
l_data_masses = []
l_data_bond_coeffs = []
l_data_angle_coeffs = []
l_data_dihedral_coeffs = []
l_data_improper_coeffs = []
l_data_pair_coeffs = []
l_data_pairij_coeffs = []
l_data_atoms = []
l_data_velocities = []
l_data_bonds = []
l_data_angles = []
l_data_dihedrals = []
l_data_impropers = []
# class2 force fields
# l_in_bondbond_coeffs = [] <--not needed, included in l_in_angle_coeff
# l_in_bondangle_coeffs = [] <--not needed, included in l_in_angle_coeff
# l_in_middlebondtorsion_coeffs = [] not needed, included in l_in_dihedral_coeff
# l_in_endbondtorsion_coeffs = [] <--not needed, included in l_in_dihedral_coeff
# l_in_angletorsion_coeffs = [] <--not needed, included in l_in_dihedral_coeff
# l_in_angleangletorsion_coeffs = [] not needed, included in l_in_dihedral_coeff
# l_in_bondbond13_coeffs = [] <--not needed, included in l_in_dihedral_coeff
# l_in_angleangle_coeffs = [] <--not needed, included in
# l_in_improper_coeff
l_data_bondbond_coeffs = []
l_data_bondangle_coeffs = []
l_data_middlebondtorsion_coeffs = []
l_data_endbondtorsion_coeffs = []
l_data_angletorsion_coeffs = []
l_data_angleangletorsion_coeffs = []
l_data_bondbond13_coeffs = []
l_data_angleangle_coeffs = []
# non-point-like particles:
l_data_ellipsoids = []
l_data_lines = []
l_data_triangles = []
# automatic generation of bonded interactions by type:
l_data_angles_by_type = []
l_data_dihedrals_by_type = []
l_data_impropers_by_type = []
atoms_already_read = False
some_pair_coeffs_read = False
complained_atom_style_mismatch = False
infer_types_from_comments = False
remove_coeffs_from_data_file = True
argv = [arg for arg in sys.argv]
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if argv[i] == '-columns':
if i + 1 >= len(argv):
raise InputError('Error: the \"' + argv[i] + '\" argument should be followed by a quoted\n'
' string which contains a space-delimited list of the names of\n'
' of columns in the \"Atoms\" section of the LAMMPS data file.\n'
' If the list contains the symbols:\n'
' \"atom-ID\" or \"atomid\", they are interpreted\n'
' as unique atom ID numbers, and columns named\n'
' \"atom-type\" or \"atomtype\" are interpreted\n'
' as atom types. Finally, columns named\n'
' \"molecule-ID\", \"molecule\", or \"mol-ID\", or \"mol\"\n'
' are interpreted as unique molecule id numbers.\n'
'Example:\n'
' ' +
argv[
i] + ' \'atom-ID atom-type q polarizability molecule-ID x y z\'\n'
' defines a custom atom_style containing the properties\n'
' atom-ID atom-type q polarizability molecule-ID x y z\n'
' Make sure you enclose the entire list in quotes.\n')
column_names = argv[i + 1].strip('\"\'').strip().split()
del argv[i:i + 2]
elif (argv[i] == '-ignore-comments'):
infer_types_from_comments = False
del argv[i:i + 1]
elif (argv[i] == '-infer-comments'):
infer_types_from_comments = True
del argv[i:i + 1]
elif ((argv[i] == '-name') or
(argv[i] == '-molname') or
(argv[i] == '-molecule-name') or
(argv[i] == '-molecule_name')):
if i + 1 >= len(argv):
raise InputError(
'Error: ' + argv[i] + ' flag should be followed by a a molecule type name.\n')
cindent = 2
indent += cindent
mol_name = argv[i + 1]
del argv[i:i + 2]
elif ((argv[i].lower() == '-atomstyle') or
(argv[i].lower() == '-atom_style') or
(argv[i].lower() == '-atom-style')):
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by a an atom_style name.\n'
' (or single quoted string which includes a space-separated\n'
' list of column names).\n')
atom_style_undefined = False
column_names = AtomStyle2ColNames(argv[i + 1])
if (argv[i + 1].strip().split()[0] in g_style_map):
l_in_init.append((' ' * indent) +
'atom_style ' + argv[i + 1] + '\n')
sys.stderr.write('\n \"Atoms\" column format:\n')
sys.stderr.write(' ' + (' '.join(column_names)) + '\n')
i_atomid, i_atomtype, i_molid = ColNames2AidAtypeMolid(
column_names)
# Which columns contain the coordinates?
ii_coords = ColNames2Coords(column_names)
assert(len(ii_coords) == 1)
i_x = ii_coords[0][0]
i_y = ii_coords[0][1]
i_z = ii_coords[0][2]
if i_molid:
sys.stderr.write(' (i_atomid=' + str(i_atomid + 1) + ', i_atomtype=' + str(
i_atomtype + 1) + ', i_molid=' + str(i_molid + 1) + ')\n\n')
else:
sys.stderr.write(' (i_atomid=' + str(i_atomid + 1) +
', i_atomtype=' + str(i_atomtype + 1) + ')\n')
del argv[i:i + 2]
elif ((argv[i].lower() == '-id') or
#(argv[i].lower() == '-a') or
#(argv[i].lower() == '-atoms') or
(argv[i].lower() == '-atomid') or
#(argv[i].lower() == '-atomids') or
(argv[i].lower() == '-atom-id')
#(argv[i].lower() == '-atom-ids') or
#(argv[i].lower() == '-$atom') or
#(argv[i].lower() == '-$atoms')
):
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by a list of integers\n'
' (or strings). These identify the group of atoms you want to\n'
' to include in the template you are creating.\n')
atomid_selection += LammpsSelectToIntervals(argv[i + 1])
min_sel_atomid, max_sel_atomid = IntervalListToMinMax(
atomid_selection)
del argv[i:i + 2]
elif ((argv[i].lower() == '-datacoeffs') or
(argv[i].lower() == '-datacoeff') or
(argv[i].lower() == '-Coeff') or
(argv[i].lower() == '-Coeffs')):
remove_coeffs_from_data_file = False
del argv[i:i + 1]
elif ((argv[i].lower() == '-type') or
#(argv[i].lower() == '-t') or
(argv[i].lower() == '-atomtype') or
(argv[i].lower() == '-atom-type')
#(argv[i].lower() == '-atomtypes') or
#(argv[i].lower() == '-atom-types') or
#(argv[i].lower() == '-@atom') or
#(argv[i].lower() == '-@atoms') or
#(argv[i].lower() == '-@atomtype') or
#(argv[i].lower() == '-@atomtypes')
):
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by a list of integers.\n'
' (or strings). These identify the group of atom types you want to\n'
' to include in the template you are creating.\n')
atomtype_selection += LammpsSelectToIntervals(argv[i + 1])
min_sel_atomtype, max_sel_atomtype = IntervalListToMinMax(
atomtype_selection)
del argv[i:i + 2]
elif ((argv[i].lower() == '-mol') or
#(argv[i].lower() == '-m') or
(argv[i].lower() == '-molid') or
#(argv[i].lower() == '-molids') or
(argv[i].lower() == '-mol-id') or
#(argv[i].lower() == '-mol-ids') or
#(argv[i].lower() == '-molecule') or
(argv[i].lower() == '-moleculeid') or
(argv[i].lower() == '-molecule-id')
#(argv[i].lower() == '-molecules') or
#(argv[i].lower() == '-molecule-ids') or
#(argv[i].lower() == '-$mol') or
#(argv[i].lower() == '-$molecule')
):
if i + 1 >= len(argv):
sys.stderr.write('Error: ' + argv[i] + ' flag should be followed by a list of integers.\n'
' (or strings). These identify the group of molecules you want to\n'
' include in the template you are creating.\n')
molid_selection += LammpsSelectToIntervals(argv[i + 1])
del argv[i:i + 2]
else:
i += 1
# We might need to parse the simulation boundary-box.
# If so, use these variables. (None means uninitialized.)
boundary_xlo = None
boundary_xhi = None
boundary_ylo = None
boundary_yhi = None
boundary_zlo = None
boundary_zhi = None
boundary_xy = None
boundary_yz = None
boundary_xz = None
# atom type names
atomtypes_name2int = {}
atomtypes_int2name = {}
# atomids_name2int = {} not needed
atomids_int2name = {}
atomids_by_type = {}
if atom_style_undefined:
# The default atom_style is "full"
column_names = AtomStyle2ColNames('full')
i_atomid, i_atomtype, i_molid = ColNames2AidAtypeMolid(column_names)
# Which columns contain the coordinates?
ii_coords = ColNames2Coords(column_names)
assert(len(ii_coords) == 1)
i_x = ii_coords[0][0]
i_y = ii_coords[0][1]
i_z = ii_coords[0][2]
#---------------------------------------------------------
#-- The remaining arguments are files that the user wants
#-- us to read and convert. It is typical to have
#-- multiple input files, because LAMMPS users often
#-- store their force field parameters in either the LAMMPS
#-- data files and input script files, or both.
#-- We want to search all of the LAMMPS input files in
#-- order to make sure we extracted all the force field
#-- parameters (coeff commands).
#---------------------------------------------------------
for i_arg in range(1, len(argv)):
fname = argv[i_arg]
try:
lammps_file = open(fname, 'r')
except IOError:
raise InputError('Error: unrecognized argument (\"' + fname + '\"),\n'
' OR unable to open file:\n'
'\n'
' \"' + fname + '\"\n'
' for reading.\n'
'\n'
' (If you were not trying to open a file with this name,\n'
' then there is a problem in your argument list.)\n')
sys.stderr.write('reading file \"' + fname + '\"\n')
atomid2type = {}
atomid2mol = {}
data_file_header_names = set(['LAMMPS Description',
'Atoms', 'Masses', 'Velocities', 'Bonds',
'Angles', 'Dihedrals', 'Impropers',
'Pair Coeffs',
'Bond Coeffs', 'Angle Coeffs',
'Dihedral Coeffs', 'Improper Coeffs',
# class2 force fields:
'BondBond Coeffs', 'BondAngle Coeffs',
'MiddleBondTorsion Coeffs', 'EndBondTorsion Coeffs',
'AngleTorsion Coeffs', 'AngleAngleTorsion Coeffs',
'BondBond13 Coeffs',
'AngleAngle Coeffs',
# non-point-like particles:
'Ellipsoids', 'Triangles', 'Lines',
# specifying bonded interactions by type:
'Angles By Type', 'Dihedrals By Type', 'Impropers By Type'
])
lex = LineLex(lammps_file, fname)
lex.source_triggers = set(['include', 'import'])
# set up lex to accept most characters in file names:
lex.wordterminators = '(){}' + lex.whitespace
# set up lex to understand the "include" statement:
lex.source = 'include'
lex.escape = '\\'
while lex:
infile = lex.infile
lineno = lex.lineno
line = lex.ReadLine()
if (lex.infile != infile):
infile = lex.infile
lineno = lex.lineno
#sys.stderr.write(' processing \"'+line.strip()+'\", (\"'+infile+'\":'+str(lineno)+')\n')
if line == '':
break
tokens = line.strip().split()
if (len(tokens) > 0):
if ((tokens[0] == 'atom_style') and
atom_style_undefined):
sys.stderr.write(
' Atom Style found. Processing: \"' + line.strip() + '\"\n')
if atoms_already_read:
raise InputError('Error: The file containing the \"atom_style\" command must\n'
' come before the data file in the argument list.\n'
' (The templify program needs to know the atom style before reading\n'
' the data file. Either change the order of arguments so that the\n'
' LAMMPS input script file is processed before the data file, or use\n'
' the \"-atom_style\" command line argument to specify the atom_style.)\n')
column_names = AtomStyle2ColNames(line.split()[1])
i_atomid, i_atomtype, i_molid = ColNames2AidAtypeMolid(
column_names)
# Which columns contain the coordinates?
ii_coords = ColNames2Coords(column_names)
assert(len(ii_coords) == 1)
i_x = ii_coords[0][0]
i_y = ii_coords[0][1]
i_z = ii_coords[0][2]
sys.stderr.write('\n \"Atoms\" column format:\n')
sys.stderr.write(' ' + (' '.join(column_names)) + '\n')
if i_molid:
sys.stderr.write(' (i_atomid=' + str(i_atomid + 1) + ', i_atomtype=' + str(
i_atomtype + 1) + ', i_molid=' + str(i_molid + 1) + ')\n\n')
else:
sys.stderr.write(
' (i_atomid=' + str(i_atomid + 1) + ', i_atomtype=' + str(i_atomtype + 1) + ')\n\n')
l_in_init.append((' ' * indent) + line.lstrip())
elif (tokens[0] in set(['units',
'angle_style',
'bond_style',
'dihedral_style',
'improper_style',
'min_style',
'pair_style',
'pair_modify',
'special_bonds',
'kspace_style',
'kspace_modify'])):
l_in_init.append((' ' * indent) + line.lstrip())
# if (line.strip() == 'LAMMPS Description'):
# sys.stderr.write(' reading \"'+line.strip()+'\"\n')
# # skip over this section
# while lex:
# line = lex.ReadLine()
# if line.strip() in data_file_header_names:
# lex.push_raw_text(line) # <- Save line for later
# break
elif (line.strip() == 'Atoms'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
atoms_already_read = True
# Before attempting to read atomic coordinates, first find
# the lattice vectors of the simulation's boundary box:
# Why do we care about the Simulation Boundary?
# Some LAMMPS data files store atomic coordinates in a
# complex format with 6 numbers, 3 floats, and 3 integers.
# The 3 floats are x,y,z coordinates. Any additional numbers
# following these are integers which tell LAMMPS which cell
# the particle belongs to, (in case it has wandered out of
# the original periodic boundary box). In order to find
# the true location of the particle, we need to offset that
# particle's position with the unit-cell lattice vectors:
# avec, bvec, cvec (or multiples thereof)
# avec, bvec, cvec are the axis of the parallelepiped which
# define the simulation's boundary. They are described here:
# http://lammps.sandia.gov/doc/Section_howto.html#howto-12
if ((boundary_xlo == None) or (boundary_xhi == None) or
(boundary_ylo == None) or (boundary_yhi == None) or
(boundary_zlo == None) or (boundary_zhi == None)):
raise InputError('Error: Either DATA file lacks a boundary-box header, or it is in the wrong\n'
' place. At the beginning of the file, you need to specify the box size:\n'
' xlo xhi ylo yhi zlo zhi (and xy xz yz if triclinic)\n'
' These numbers should appear BEFORE the other sections in the data file\n'
' (such as the \"Atoms\", \"Masses\", \"Bonds\", \"Pair Coeffs\" sections)\n'
'\n'
' Use this format (example):\n'
' -100.0 100.0 xhi xlo\n'
' 0.0 200.0 yhi ylo\n'
' -25.0 50.0 zhi zlo\n'
'\n'
'For details, see http://lammps.sandia.gov/doc/read_data.html\n'
'\n'
' (NOTE: If the atom coordinates are NOT followed by integers, then\n'
' these numbers are all ignored, however you must still specify\n'
' xlo, xhi, ylo, yhi, zlo, zhi. You can set them all to 0.0.)\n')
if not (boundary_xy and boundary_yz and boundary_xz):
# Then use a simple rectangular boundary box:
avec = (boundary_xhi - boundary_xlo, 0.0, 0.0)
bvec = (0.0, boundary_yhi - boundary_ylo, 0.0)
cvec = (0.0, 0.0, boundary_zhi - boundary_zlo)
else:
# Triclinic geometry in LAMMPS is explained here:
# http://lammps.sandia.gov/doc/Section_howto.html#howto-12
# http://lammps.sandia.gov/doc/read_data.html
avec = (boundary_xhi - boundary_xlo, 0.0, 0.0)
bvec = (boundary_xy, boundary_yhi - boundary_ylo, 0.0)
cvec = (boundary_xz, boundary_yz,
boundary_zhi - boundary_zlo)
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if ((len(tokens) <= i_atomid) or
(len(tokens) <= i_atomtype) or
((i_molid != None) and
(len(tokens) <= i_molid))):
raise InputError('Error: The number of columns in the \"Atoms\" section does\n'
' not match the atom_style (see column name list above).\n')
elif ((len(tokens) != len(column_names)) and
(len(tokens) != len(column_names) + 3) and
(not complained_atom_style_mismatch)):
complained_atom_style_mismatch = True
sys.stderr.write('Warning: The number of columns in the \"Atoms\" section does\n'
' not match the atom_style (see column name list above).\n')
# this is not a very serious warning.
# no_warnings = False <--no need. commenting
# out
atomid = Intify(tokens[i_atomid])
atomtype = Intify(tokens[i_atomtype])
molid = None
if i_molid:
molid = Intify(tokens[i_molid])
atomid2type[atomid] = atomtype
if i_molid:
atomid2mol[atomid] = molid
if (BelongsToSel(atomid, atomid_selection) and
BelongsToSel(atomtype, atomtype_selection) and
BelongsToSel(molid, molid_selection)):
tokens[i_atomid] = '$atom:id' + \
tokens[i_atomid]
#tokens[i_atomid] = '$atom:'+atomids_int2name[atomid]
# fill atomtype_int2str[] with a default name (change later):
#tokens[i_atomtype] = '@atom:type'+tokens[i_atomtype]
atomtype_name = 'type' + tokens[i_atomtype]
atomtypes_int2name[atomtype] = atomtype_name
tokens[i_atomtype] = '@atom:' + atomtype_name
# Interpreting unit-cell counters
# If present, then unit-cell "flags" must be
# added to the x,y,z coordinates.
#
# For more details on unit-cell "flags", see:
# http://lammps.sandia.gov/doc/read_data.html
# "In the data file, atom lines (all lines or
# none of them) can optionally list 3 trailing
# integer values (nx,ny,nz), which are used to
# initialize the atom’s image flags.
# If nx,ny,nz values are not listed in the
# data file, LAMMPS initializes them to 0.
# Note that the image flags are immediately
# updated if an atom’s coordinates need to
# wrapped back into the simulation box."
if (len(tokens) == len(column_names) + 3):
nx = int(tokens[-3])
ny = int(tokens[-2])
nz = int(tokens[-1])
x = float(
tokens[i_x]) + nx * avec[0] + ny * bvec[0] + nz * cvec[0]
y = float(
tokens[i_y]) + nx * avec[1] + ny * bvec[1] + nz * cvec[1]
z = float(
tokens[i_z]) + nx * avec[2] + ny * bvec[2] + nz * cvec[2]
tokens[i_x] = str(x)
tokens[i_y] = str(y)
tokens[i_z] = str(z)
# Now get rid of them:
del tokens[-3:]
# I can't use atomids_int2name or atomtypes_int2name yet
# because they probably have not been defined yet.
# (Instead assign these names in a later pass.)
if i_molid:
tokens[i_molid] = '$mol:id' + \
tokens[i_molid]
l_data_atoms.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
needed_atomids.add(atomid)
needed_atomtypes.add(atomtype)
# Not all atom_styles have molids.
# Check for this before adding.
if molid != None:
needed_molids.add(molid)
for atomtype in needed_atomtypes:
assert(type(atomtype) is int)
if ((min_needed_atomtype == None) or
(min_needed_atomtype > atomtype)):
min_needed_atomtype = atomtype
if ((max_needed_atomtype == None) or
(max_needed_atomtype < atomtype)):
max_needed_atomtype = atomtype
for atomid in needed_atomids:
assert(type(atomid) is int)
if ((min_needed_atomid == None) or
(min_needed_atomid > atomid)):
min_needed_atomid = atomid
if ((max_needed_atomid == None) or
(max_needed_atomid < atomid)):
max_needed_atomid = atomid
for molid in needed_molids:
assert(type(molid) is int)
if ((min_needed_molid == None) or
(min_needed_molid > molid)):
min_needed_molid = molid
if ((max_needed_molid == None) or
(max_needed_molid < molid)):
max_needed_molid = molid
elif (line.strip() == 'Masses'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
# Read the next line of text but don't skip comments
comment_char_backup = lex.commenters
lex.commenters = ''
line = lex.ReadLine()
lex.commenters = comment_char_backup
comment_text = ''
ic = line.find('#')
if ic != -1:
line = line[:ic]
comment_text = line[ic + 1:].strip()
line = line.rstrip()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
atomtype = Intify(tokens[0])
atomtype_name = str(atomtype)
if comment_text != '':
comment_tokens = comment_text.split()
# Assume the first word after the # is the atom
# type name
atomtype_name = comment_tokens[0]
if BelongsToSel(atomtype, atomtype_selection):
#tokens[0] = '@atom:type'+tokens[0]
l_data_masses.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
# infer atom type names from comment strings?
if infer_types_from_comments:
if atomtype_name in atomtypes_name2int:
raise InputError('Error: duplicate atom type names in mass section: \"' + atomtype_name + '\"\n'
' (By default ' + g_program_name +
' attempts to infer atom type names from\n'
' comments which appear in the \"Masses\" section of your data file.)\n'
' You can avoid this error by adding the \"-ignore-comments\" argument.\n')
atomtypes_name2int[
atomtype_name] = atomtype
atomtypes_int2name[
atomtype] = atomtype_name
else:
atomtypes_int2name[
atomtype] = 'type' + str(atomtype)
elif (line.strip() == 'Velocities'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
atomid = Intify(tokens[0])
atomtype = None
if atomid in atomid2type:
atomtype = atomid2type[atomid]
moldid = None
if atomid in atomid2mol:
molid = atomid2mol[atomid]
if (BelongsToSel(atomid, atomid_selection) and
BelongsToSel(atomtype, atomtype_selection) and
BelongsToSel(molid, molid_selection)):
tokens[0] = '$atom:id' + tokens[0]
#tokens[0] = '$atom:'+atomids_int2name[atomid]
# NOTE:I can't use "atomids_int2name" yet because
# they probably have not been defined yet.
# (Instead assign these names in a later pass.)
l_data_velocities.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
# non-point-like-particles:
elif (line.strip() == 'Ellipsoids'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
atomid = Intify(tokens[0])
atomtype = None
if atomid in atomid2type:
atomtype = atomid2type[atomid]
moldid = None
if atomid in atomid2mol:
molid = atomid2mol[atomid]
if (BelongsToSel(atomid, atomid_selection) and
BelongsToSel(atomtype, atomtype_selection) and
BelongsToSel(molid, molid_selection)):
tokens[0] = '$atom:id' + tokens[0]
#tokens[0] = '$atom:'+atomids_int2name[atomid]
# NOTE:I can't use "atomids_int2name" yet because
# they probably have not been defined yet.
# (Instead assign these names in a later pass.)
l_data_ellipsoids.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (line.strip() == 'Lines'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
atomid = Intify(tokens[0])
atomtype = None
if atomid in atomid2type:
atomtype = atomid2type[atomid]
moldid = None
if atomid in atomid2mol:
molid = atomid2mol[atomid]
if (BelongsToSel(atomid, atomid_selection) and
BelongsToSel(atomtype, atomtype_selection) and
BelongsToSel(molid, molid_selection)):
tokens[0] = '$atom:id' + tokens[0]
#tokens[0] = '$atom:'+atomids_int2name[atomid]
# NOTE:I can't use "atomids_int2name" yet because
# they probably have not been defined yet.
# (Instead assign these names in a later pass.)
l_data_lines.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (line.strip() == 'Triangles'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
atomid = Intify(tokens[0])
atomtype = None
if atomid in atomid2type:
atomtype = atomid2type[atomid]
moldid = None
if atomid in atomid2mol:
molid = atomid2mol[atomid]
if (BelongsToSel(atomid, atomid_selection) and
BelongsToSel(atomtype, atomtype_selection) and
BelongsToSel(molid, molid_selection)):
tokens[0] = '$atom:id' + tokens[0]
#tokens[0] = '$atom:'+atomids_int2name[atomid]
# NOTE:I can't use "atomids_int2name" yet because
# they probably have not been defined yet.
# (Instead assign these names in a later pass.)
l_data_triangles.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (line.strip() == 'Bonds'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 4):
raise InputError('Error: near or before ' + ErrorLeader(infile, lineno) + '\n'
' Nonsensical line in Bonds section:\n'
' \"' + line.strip() + '\"\n')
#tokens[0] = '$bond:id'+tokens[0]
#tokens[1] = '@bond:type'+tokens[1]
atomids = [None, None]
atomtypes = [None, None]
molids = [None, None]
in_selections = True
some_in_selection = False
for n in range(0, 2):
atomids[n] = Intify(tokens[2 + n])
if atomids[n] in atomid2type:
atomtypes[n] = atomid2type[atomids[n]]
if atomids[n] in atomid2mol:
molids[n] = atomid2mol[atomids[n]]
if (BelongsToSel(atomids[n], atomid_selection) and
BelongsToSel(atomtypes[n], atomtype_selection) and
BelongsToSel(molids[n], molid_selection)):
#tokens[2+n] = '$atom:id'+tokens[2+n]
#tokens[2+n] = '$atom:'+atomids_int2name[atomids[n]]
some_in_selection = True
else:
in_selections = False
if in_selections:
l_data_bonds.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif some_in_selection:
sys.stderr.write(
'WARNING: SELECTION BREAKS BONDS\n')
sys.stderr.write(
' (between atom ids: ')
for n in range(0, 2):
sys.stderr.write(str(atomids[n]) + ' ')
sys.stderr.write(')\n'
' The atoms you selected are bonded\n'
' to other atoms you didn\'t select.\n'
' Are you sure you selected the correct atoms?\n')
no_warnings = False
elif (line.strip() == 'Angles'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line == '':
break
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 5):
raise InputError('Error: near or before ' + ErrorLeader(infile, lineno) + '\n'
' Nonsensical line in Angles section:\n'
' \"' + line.strip() + '\"\n')
#tokens[0] = '$angle:id'+tokens[0]
#tokens[1] = '@angle:type'+tokens[1]
atomids = [None, None, None]
atomtypes = [None, None, None]
molids = [None, None, None]
in_selections = True
some_in_selection = False
for n in range(0, 3):
atomids[n] = Intify(tokens[2 + n])
if atomids[n] in atomid2type:
atomtypes[n] = atomid2type[atomids[n]]
if atomids[n] in atomid2mol:
molids[n] = atomid2mol[atomids[n]]
if (BelongsToSel(atomids[n], atomid_selection) and
BelongsToSel(atomtypes[n], atomtype_selection) and
BelongsToSel(molids[n], molid_selection)):
#tokens[2+n] = '$atom:id'+tokens[2+n]
#tokens[2+n] = '$atom:'+atomids_int2name[atomids[n]]
some_in_selection = True
else:
in_selections = False
if in_selections:
l_data_angles.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif some_in_selection:
sys.stderr.write(
'WARNING: SELECTION BREAKS ANGLES\n')
sys.stderr.write(
' (between atom ids: ')
for n in range(0, 3):
sys.stderr.write(str(atomids[n]) + ' ')
sys.stderr.write(')\n'
' The atoms you selected participate in 3-body \"Angle\"\n'
' interactions with other atoms you didn\'t select.\n'
' (They will be ignored.)\n'
' Are you sure you selected the correct atoms?\n')
no_warnings = False
elif (line.strip() == 'Dihedrals'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 6):
raise InputError('Error: near or before ' + ErrorLeader(infile, lineno) + '\n'
' Nonsensical line in Dihedrals section:\n'
' \"' + line.strip() + '\"\n')
#tokens[0] = '$dihedral:id'+tokens[0]
#tokens[1] = '@dihedral:type'+tokens[1]
atomids = [None, None, None, None]
atomtypes = [None, None, None, None]
molids = [None, None, None, None]
in_selections = True
some_in_selection = False
for n in range(0, 4):
atomids[n] = Intify(tokens[2 + n])
if atomids[n] in atomid2type:
atomtypes[n] = atomid2type[atomids[n]]
if atomids[n] in atomid2mol:
molids[n] = atomid2mol[atomids[n]]
if (BelongsToSel(atomids[n], atomid_selection) and
BelongsToSel(atomtypes[n], atomtype_selection) and
BelongsToSel(molids[n], molid_selection)):
#tokens[2+n] = '$atom:id'+tokens[2+n]
#tokens[2+n] = '$atom:'+atomids_int2name[atomids[n]]
some_in_selection = True
else:
in_selections = False
if in_selections:
l_data_dihedrals.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif some_in_selection:
sys.stderr.write(
'WARNING: SELECTION BREAKS DIHEDRALS\n')
sys.stderr.write(
' (between atom ids: ')
for n in range(0, 4):
sys.stderr.write(str(atomids[n]) + ' ')
sys.stderr.write(')\n'
' The atoms you selected participate in 4-body \"Dihedral\"\n'
' interactions with other atoms you didn\'t select.\n'
' (They will be ignored.)\n'
' Are you sure you selected the correct atoms?\n')
no_warnings = False
elif (line.strip() == 'Impropers'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 6):
raise InputError('Error: near or before ' + ErrorLeader(infile, lineno) + '\n'
' Nonsensical line in Impropers section:\n'
' \"' + line.strip() + '\"\n')
#tokens[0] = '$improper:id'+tokens[0]
#tokens[1] = '@improper:type'+tokens[1]
atomids = [None, None, None, None]
atomtypes = [None, None, None, None]
molids = [None, None, None, None]
in_selections = True
some_in_selection = False
for n in range(0, 4):
atomids[n] = Intify(tokens[2 + n])
if atomids[n] in atomid2type:
atomtypes[n] = atomid2type[atomids[n]]
if atomids[n] in atomid2mol:
molids[n] = atomid2mol[atomids[n]]
if (BelongsToSel(atomids[n], atomid_selection) and
BelongsToSel(atomtypes[n], atomtype_selection) and
BelongsToSel(molids[n], molid_selection)):
#tokens[2+n] = '$atom:id'+tokens[2+n]
#tokens[2+n] = '$atom:'+atomids_int2name[atomids[n]]
some_in_selection = True
else:
in_selections = False
if in_selections:
l_data_impropers.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif some_in_selection:
sys.stderr.write(
'WARNING: SELECTION BREAKS IMPROPERS\n')
sys.stderr.write(
' (between atom ids: ')
for n in range(0, 4):
sys.stderr.write(str(atomids[n]) + ' ')
sys.stderr.write(')\n'
' The atoms you selected participate in 4-body \"Improper\"\n'
' interactions with other atoms you didn\'t select.\n'
' (They will be ignored.)\n'
' Are you sure you selected the correct atoms?\n')
no_warnings = False
elif (line.strip() == 'Bond Coeffs'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@bond:type'+tokens[0]
l_data_bond_coeffs.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (line.strip() == 'Angle Coeffs'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@angle:type'+tokens[0]
l_data_angle_coeffs.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (line.strip() == 'Dihedral Coeffs'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@dihedral:type'+tokens[0]
l_data_dihedral_coeffs.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (line.strip() == 'Improper Coeffs'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@improper:type'+tokens[0]
l_data_improper_coeffs.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (line.strip() == 'Pair Coeffs'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
some_pair_coeffs_read = True
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 2):
raise InputError('Error: near or before ' + ErrorLeader(infile, lineno) + '\n'
' Nonsensical line in Pair Coeffs section:\n'
' \"' + line.strip() + '\"\n')
atomtype_i_str = tokens[0]
if '*' in atomtype_i_str:
raise InputError('PROBLEM near or before ' + ErrorLeader(infile, lineno) + '\n'
' As of 2015-8, moltemplate forbids use of the "\*\" wildcard\n'
' character in the \"Pair Coeffs\" section.\n')
else:
i = int(atomtype_i_str)
if ((not i) or
BelongsToSel(i, atomtype_selection)):
i_str = '@atom:type' + str(i)
tokens[0] = i_str
l_data_pair_coeffs.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (line.strip() == 'PairIJ Coeffs'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
some_pair_coeffs_read = True
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 2):
raise InputError('Error: near or before ' + ErrorLeader(infile, lineno) + '\n'
' Nonsensical line in Pair Coeffs section:\n'
' \"' + line.strip() + '\"\n')
atomtype_i_str = tokens[0]
atomtype_j_str = tokens[1]
if (('*' in atomtype_i_str) or ('*' in atomtype_j_str)):
raise InputError('PROBLEM near or before ' + ErrorLeader(infile, lineno) + '\n'
' As of 2015-8, moltemplate forbids use of the "\*\" wildcard\n'
' character in the \"PairIJ Coeffs\" section.\n')
else:
i = int(atomtype_i_str)
j = int(atomtype_j_str)
if (((not i) or BelongsToSel(i, atomtype_selection)) and
((not j) or BelongsToSel(j, atomtype_selection))):
i_str = '@atom:type' + str(i)
j_str = '@atom:type' + str(j)
tokens[0] = i_str
tokens[1] = j_str
l_data_pair_coeffs.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (tokens[0] == 'pair_coeff'):
some_pair_coeffs_read = True
if (len(tokens) < 3):
raise InputError('Error: near or before ' + ErrorLeader(infile, lineno) + '\n'
' Nonsensical pair_coeff command:\n'
' \"' + line.strip() + '\"\n')
l_in_pair_coeffs.append(' ' * indent + line.strip())
elif (tokens[0] == 'mass'):
some_pair_coeffs_read = True
if (len(tokens) < 3):
raise InputError('Error: near or before ' + ErrorLeader(infile, lineno) + '\n'
' Nonsensical \"mass\" command:\n'
' \"' + line.strip() + '\"\n')
l_in_masses.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (tokens[0] == 'bond_coeff'):
if (len(tokens) < 2):
raise InputError('Error: near or before ' + ErrorLeader(infile, lineno) + '\n'
' Nonsensical bond_coeff command:\n'
' \"' + line.strip() + '\"\n')
#tokens[1] = '@bond:type'+tokens[1]
l_in_bond_coeffs.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (tokens[0] == 'angle_coeff'):
if (len(tokens) < 2):
raise InputError('Error: near or before ' + ErrorLeader(infile, lineno) + '\n'
' Nonsensical angle_coeff command:\n'
' \"' + line.strip() + '\"\n')
#tokens[1] = '@angle:type'+tokens[1]
l_in_angle_coeffs.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (tokens[0] == 'dihedral_coeff'):
if (len(tokens) < 2):
raise InputError('Error: near or before ' + ErrorLeader(infile, lineno) + '\n'
' Nonsensical dihedral_coeff command:\n'
' \"' + line.strip() + '\"\n')
#tokens[1] = '@dihedral:type'+tokens[1]
l_in_dihedral_coeffs.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (tokens[0] == 'improper_coeff'):
if (len(tokens) < 2):
raise InputError('Error: near or before ' + ErrorLeader(infile, lineno) + '\n'
' Nonsensical improper_coeff command:\n'
' \"' + line.strip() + '\"\n')
#tokens[1] = '@improper:type'+tokens[1]
l_in_improper_coeffs.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
# -- class2 force fields --
elif (line.strip() == 'BondBond Coeffs'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@angle:type'+tokens[0]
l_data_bondbond_coeffs.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (line.strip() == 'BondAngle Coeffs'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@angle:type'+tokens[0]
l_data_bondangle_coeffs.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (line.strip() == 'MiddleBondTorsion Coeffs'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@dihedral:type'+tokens[0]
l_data_middlebondtorsion_coeffs.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (line.strip() == 'EndBondTorsion Coeffs'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@dihedral:type'+tokens[0]
l_data_endbondtorsion_coeffs.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (line.strip() == 'AngleTorsion Coeffs'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@dihedral:type'+tokens[0]
l_data_angletorsion_coeffs.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (line.strip() == 'AngleAngleTorsion Coeffs'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@dihedral:type'+tokens[0]
l_data_angleangletorsion_coeffs.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (line.strip() == 'BondBond13 Coeffs'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@dihedral:type'+tokens[0]
l_data_bondbond13_coeffs.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (line.strip() == 'AngleAngle Coeffs'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@improper:type'+tokens[0]
l_data_angleangle_coeffs.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (line.strip() == 'Angles By Type'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@angle:type' + tokens[0]
l_data_angles_by_type.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (line.strip() == 'Dihedrals By Type'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@dihedral:type' + tokens[0]
l_data_dihedrals_by_type.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (line.strip() == 'Impropers By Type'):
sys.stderr.write(' reading \"' + line.strip() + '\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@improper:type' + tokens[0]
l_data_impropers_by_type.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
# Figure out the size of the simulation box boundary:
elif ((len(tokens) == 4) and
(tokens[2] == 'xlo') and
(tokens[3] == 'xhi') and
IsNumber(tokens[0]) and
IsNumber(tokens[1])):
boundary_xlo = float(tokens[0])
boundary_xhi = float(tokens[1])
elif ((len(tokens) == 4) and
(tokens[2] == 'ylo') and
(tokens[3] == 'yhi') and
IsNumber(tokens[0]) and
IsNumber(tokens[1])):
boundary_ylo = float(tokens[0])
boundary_yhi = float(tokens[1])
elif ((len(tokens) == 4) and
(tokens[2] == 'zlo') and
(tokens[3] == 'zhi') and
IsNumber(tokens[0]) and
IsNumber(tokens[1])):
boundary_zlo = float(tokens[0])
boundary_zhi = float(tokens[1])
elif ((len(tokens) == 6) and
(tokens[3] == 'xy') and
(tokens[4] == 'xz') and
(tokens[5] == 'yz') and
IsNumber(tokens[0]) and
IsNumber(tokens[1]) and
IsNumber(tokens[2])):
boundary_xy = float(tokens[0])
boundary_xz = float(tokens[1])
boundary_yz = float(tokens[2])
elif (tokens[0] == 'group'):
if (len(tokens) < 3):
raise InputError('Error: near or before ' + ErrorLeader(infile, lineno) + '\n'
' Nonsensical group command:\n'
' \"' + line.strip() + '\"\n')
l_in_group.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (tokens[0] == 'set'):
if (len(tokens) < 3):
raise InputError('Error: near or before ' + ErrorLeader(infile, lineno) + '\n'
' Nonsensical set command:\n'
' \"' + line.strip() + '\"\n')
l_in_set.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif ((tokens[0] == 'fix') and (len(tokens) >= 4)):
if (tokens[3].find('rigid') == 0):
if (len(tokens) < 6):
raise InputError('Error: near or before ' + ErrorLeader(infile, lineno) + '\n'
' Nonsensical ' +
tokens[0] + ' ' +
tokens[3] + ' command:\n'
' \"' + line.strip() + '\"\n')
l_in_fix_rigid.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (tokens[3].find('shake') == 0):
if (len(tokens) < 7):
raise InputError('Error: near or before ' + ErrorLeader(infile, lineno) + '\n'
' Nonsensical ' +
tokens[0] + ' ' +
tokens[3] + ' command:\n'
' \"' + line.strip() + '\"\n')
l_in_fix_shake.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (tokens[3].find('poems') == 0):
if (len(tokens) < 4):
raise InputError('Error: near or before ' + ErrorLeader(infile, lineno) + '\n'
' Nonsensical ' +
tokens[0] + ' ' +
tokens[3] + ' command:\n'
' \"' + line.strip() + '\"\n')
l_in_fix_poems.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (tokens[3].find('qeq') == 0):
if (len(tokens) < 8):
raise InputError('Error: near or before ' + ErrorLeader(infile, lineno) + '\n'
' Nonsensical ' +
tokens[0] + ' ' +
tokens[3] + ' command:\n'
' \"' + line.strip() + '\"\n')
l_in_fix_qeq.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (tokens[3].find('qmmm') == 0):
if (len(tokens) < 8):
raise InputError('Error: near or before ' + ErrorLeader(infile, lineno) + '\n'
' Nonsensical ' +
tokens[0] + ' ' +
tokens[3] + ' command:\n'
' \"' + line.strip() + '\"\n')
l_in_fix_qmmm.append(
(' ' * indent) + (' '.join(tokens) + '\n'))
elif (tokens[3].find('restrain') == 0):
sys.stderr('WARNING: fix \"' + tokens[3] + '\" commands are NOT understood by ' + g_program_name + '.\n'
' If you need restraints, add them to your final .LT file (eg. \"system.lt\"),\n'
' (And be sure to use unique (full, long) moltemplate names for each $atom:.)\n'
' Ignoring line \"' + line.strip() + '\"\n')
else:
sys.stderr.write(' Ignoring line \"' +
line.strip() + '\"\n')
sys.stderr.write('\n\n')
sys.stderr.write(' processing \"Atoms\" section (')
# post-processing:
if len(l_data_masses) == 0:
infer_types_from_comments = False
# Pass 1 through l_data_atoms:
# Now do a second-pass throught the "l_data_atoms" section, and
# finish dealing with "infer_types_from_comments".
# During this pass, peplace the atomtype names and atomid names with
# atom type names which were inferred from comments read earlier.
sys.stderr.write('pass1')
for i in range(0, len(l_data_atoms)):
tokens = l_data_atoms[i].split()
atomid = tokens[i_atomid]
if atomid.find('$atom:') == 0:
atomid = atomid[6:]
# convert to an integer
atomid = Intify(atomid)
if infer_types_from_comments:
atomtype = tokens[i_atomtype]
# remove the "@atom:" prefix (we will put it back later)
if atomtype.find('@atom:') == 0:
atomtype = atomtype[6:]
# convert to an integer
atomtype = Intify(atomtype)
atomtype_name = atomtypes_int2name[atomtype]
if atomtype in atomids_by_type:
l_atomids = atomids_by_type[atomtype]
prev_count = len(l_atomids)
# lookup the most recently added atom of this type:
#prev_atomid_name = l_atomids[-1]
#ic = prev_atomid_name.rfind('_')
#prev_count = int(prev_atomid_name[ic+1:])
atomid_name = atomtype_name + '_' + str(prev_count + 1)
atomids_by_type[atomtype].append(atomid)
else:
atomids_by_type[atomtype] = [atomid]
atomid_name = atomtype_name + '_1'
atomids_int2name[atomid] = atomid_name
#atomids_name2str[atomid_name] = atomid
else:
atomids_int2name[atomid] = 'id' + str(atomid)
sys.stderr.write(', pass2')
# Pass 2: If any atom types only appear once, simplify their atomid names.
for i in range(0, len(l_data_atoms)):
tokens = l_data_atoms[i].split()
# remove the "@atom:" prefix (we will put it back later)
atomtype = tokens[i_atomtype]
if atomtype.find('@atom:') == 0:
atomtype = atomtype[6:]
atomtype = Intify(atomtype)
if infer_types_from_comments:
if len(atomids_by_type[atomtype]) == 1:
atomid = tokens[i_atomid]
if atomid.find('$atom:') == 0:
atomid = atomid[6:]
atomid = Intify(atomid)
atomtype_name = atomtypes_int2name[atomtype]
atomids_int2name[atomid] = atomtype_name
sys.stderr.write(', pass3')
# Pass 3: substitute the atomid names and atom type names into l_data_atoms
for i in range(0, len(l_data_atoms)):
tokens = l_data_atoms[i].split()
atomid = tokens[i_atomid]
if atomid.find('$atom:') == 0:
atomid = atomid[6:]
# convert to an integer
atomid = Intify(atomid)
atomtype = tokens[i_atomtype]
if atomtype.find('@atom:') == 0:
atomtype = atomtype[6:]
atomtype = Intify(atomtype)
tokens = l_data_atoms[i].split()
tokens[i_atomid] = '$atom:' + atomids_int2name[atomid]
tokens[i_atomtype] = '@atom:' + atomtypes_int2name[atomtype]
l_data_atoms[i] = (' ' * indent) + (' '.join(tokens) + '\n')
sys.stderr.write(')\n')
if len(l_data_atoms) == 0:
raise InputError('Error(' + g_program_name + '): You have no atoms in you selection!\n'
'\n'
' Either you have chosen a set of atoms, molecules, or atom types which\n'
' does not exist, or there is a problem with (the format of) your\n'
' arguments. Check the documentation and examples.\n')
# --- Now delete items that were not selected from the other lists ---
# --- MASSES ---
# delete masses for atom types we don't care about anymore:
i_line = 0
while i_line < len(l_data_masses):
line = l_data_masses[i_line]
tokens = line.strip().split()
atomtype = Intify(tokens[0])
if ((not (atomtype in needed_atomtypes)) and
(not ((len(atomtype_selection) > 0) and
BelongsToSel(atomtype, atomtype_selection)))):
del l_data_masses[i_line]
else:
atomtype_name = atomtypes_int2name[atomtype]
tokens[0] = '@atom:' + atomtype_name
l_data_masses[i_line] = (' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
# --- PAIR COEFFS ---
# delete data_pair_coeffs for atom types we don't care about anymore:
i_line = 0
while i_line < len(l_data_pair_coeffs):
line = l_data_pair_coeffs[i_line]
tokens = line.strip().split()
assert(len(tokens) > 0)
split_colon = tokens[0].split(':')
assert(len(split_colon) == 2)
atomtype = Intify(split_colon[1])
if ((not (atomtype in needed_atomtypes)) and
(not ((len(atomtype_selection) > 0) and
BelongsToSel(atomtype, atomtype_selection)))):
del l_data_pair_coeffs[i_line]
else:
i_line += 1
# delete data_pairij_coeffs for atom types we don't care about anymore:
i_line = 0
while i_line < len(l_data_pairij_coeffs):
line = l_data_pairij_coeffs[i_line]
tokens = line.strip().split()
assert(len(tokens) > 0)
split_colon_I = tokens[0].split(':')
assert(len(split_colon_I) == 2)
atomtype_I = Intify(split_colon_I[1])
split_colon_J = tokens[1].split(':')
assert(len(split_colon_J) == 2)
atomtype_J = Intify(split_colon_J[1])
if (((not (atomtype_I in needed_atomtypes)) and
(not ((len(atomtype_selection) > 0) and
BelongsToSel(atomtype_I, atomtype_selection))))
or
((not (atomtype_J in needed_atomtypes)) and
(not ((len(atomtype_selection) > 0) and
BelongsToSel(atomtype_J, atomtype_selection))))):
del l_data_pairij_coeffs[i_line]
else:
i_line += 1
# delete in_pair_coeffs for atom we don't care about anymore:
i_line = 0
while i_line < len(l_in_pair_coeffs):
line = l_in_pair_coeffs[i_line]
tokens = line.strip().split()
atomtype_i_str = tokens[1]
atomtype_j_str = tokens[2]
# if (('*' in atomtype_i_str) or
# ('*' in atomtype_j_str)):
# sys.stderr.write('WARNING: near or before '+ErrorLeader(infile, lineno)+'\n'
# ' pair_coeff command contains a \"*\" character.\n'
# ' Keep in mind that using moltemplate.sh you can manually change the\n'
# ' numbers assigned to each atom type (when using -a or -b). Make sure\n'
# ' nor to accidentally change the order of atom types in one of these\n'
# ' pair_coeff commands. For example, commands like\n'
# ' pair_coeff 10*4 20*10 0.15 3.6\n'
# ' can be generated by moltemplate.sh, however\n'
# ' they may be rejected by LAMMPS (because LAMMPS prefers this\n'
# ' pair_coeff 4*10 10*20 0.15 3.6)\n'
# ' Later on, you may want to check to make sure moltemplate.sh\n'
# ' is not doing this. (Fortunately you never have to worry unless\n'
# ' you are using the -a or -b arguments with moltemplate.sh)\n')
if ('*' in atomtype_i_str):
atomtype_i_tokens = atomtype_i_str.split('*')
if atomtype_i_tokens[0] == '':
if (min_sel_atomtype and
(min_sel_atomtype < min_needed_atomtype)):
i_a = min_sel_atomtype
else:
i_a = min_needed_atomtype
else:
i_a = Intify(atomtype_i_tokens[0])
if atomtype_i_tokens[1] == '':
if (max_sel_atomtype and
(max_sel_atomtype > max_needed_atomtype)):
i_b = max_sel_atomtype
else:
i_b = max_needed_atomtype
else:
i_b = Intify(atomtype_i_tokens[1])
else:
i_a = i_b = Intify(atomtype_i_str)
i_a_final = None
i_b_final = None
for i in range(i_a, i_b + 1):
if ((i in needed_atomtypes) or (min_sel_atomtype <= i)):
i_a_final = i
break
for i in reversed(range(i_a, i_b + 1)):
if ((i in needed_atomtypes) or (max_sel_atomtype >= i)):
i_b_final = i
break
# if i_a_final and i_b_final:
# if i_a_final == i_b_final:
# i_str = '@atom:type'+str(i_a_final)
# tokens[1] = i_str
# else:
# i_str = '@{atom:type'+str(i_a_final)+'}*@{atom:type'+str(i_b_final)+'}'
if ('*' in atomtype_j_str):
atomtype_j_tokens = atomtype_j_str.split('*')
if atomtype_j_tokens[0] == '':
if (min_sel_atomtype and
(min_sel_atomtype < min_needed_atomtype)):
j_a = min_sel_atomtype
else:
j_a = min_needed_atomtype
else:
j_a = Intify(atomtype_j_tokens[0])
if atomtype_j_tokens[1] == '':
if (max_sel_atomtype and
(max_sel_atomtype > max_needed_atomtype)):
j_b = max_sel_atomtype
else:
j_b = max_needed_atomtype
else:
j_b = Intify(atomtype_j_tokens[1])
else:
j_a = j_b = Intify(atomtype_j_str)
j_a_final = None
j_b_final = None
for j in range(j_a, j_b + 1):
if ((j in needed_atomtypes) or (min_sel_atomtype <= j)):
j_a_final = j
break
for j in reversed(range(j_a, j_b + 1)):
if ((j in needed_atomtypes) or (max_sel_atomtype >= j)):
j_b_final = j
break
# if j_a_final and j_b_final:
# if j_a_final == j_b_final:
# j_str = '@atom:type'+str(j_a_final)
# tokens[1] = j_str
# else:
# j_str = '@{atom:type'+str(j_a_final)+'}*@{atom:type'+str(j_b_final)+'}'
if not (i_a_final and i_b_final and j_a_final and j_b_final):
del l_in_pair_coeffs[i_line]
elif (('*' in atomtype_i_str) or ('*' in atomtype_j_str)):
del l_in_pair_coeffs[i_line]
for i in range(i_a_final, i_b_final + 1):
for j in range(j_a_final, j_b_final + 1):
if j >= i:
#tokens[1] = '@atom:type'+str(i)
#tokens[2] = '@atom:type'+str(j)
tokens[1] = '@atom:' + atomtypes_int2name[i]
tokens[2] = '@atom:' + atomtypes_int2name[j]
l_in_pair_coeffs.insert(i_line,
(' ' * indent) + (' '.join(tokens) + '\n'))
i_line += 1
else:
#tokens[1] = '@atom:type'+tokens[1]
#tokens[2] = '@atom:type'+tokens[2]
tokens[1] = '@atom:' + atomtypes_int2name[int(tokens[1])]
tokens[2] = '@atom:' + atomtypes_int2name[int(tokens[2])]
l_in_pair_coeffs[i_line] = (
' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
# delete mass commands for atom types we don't care about anymore:
i_line = 0
while i_line < len(l_in_masses):
line = l_in_masses[i_line]
tokens = line.strip().split()
atomtype_i_str = tokens[1]
# if (('*' in atomtype_i_str) or
# ('*' in atomtype_j_str)):
# sys.stderr.write('WARNING: near or before '+ErrorLeader(infile, lineno)+'\n'
# ' pair_coeff command contains a \"*\" character.\n'
# ' Keep in mind that using moltemplate.sh you can manually change the\n'
# ' numbers assigned to each atom type (when using -a or -b). Make sure\n'
# ' nor to accidentally change the order of atom types in one of these\n'
# ' pair_coeff commands. For example, commands like\n'
# ' pair_coeff 10*4 20*10 0.15 3.6\n'
# ' can be generated by moltemplate.sh, however\n'
# ' they may be rejected by LAMMPS (because LAMMPS prefers this\n'
# ' pair_coeff 4*10 10*20 0.15 3.6)\n'
# ' Later on, you may want to check to make sure moltemplate.sh\n'
# ' is not doing this. (Fortunately you never have to worry unless\n'
# ' you are using the -a or -b arguments with moltemplate.sh)\n')
if ('*' in atomtype_i_str):
atomtype_i_tokens = atomtype_i_str.split('*')
if atomtype_i_tokens[0] == '':
if (min_sel_atomtype and
(min_sel_atomtype < min_needed_atomtype)):
i_a = min_sel_atomtype
else:
i_a = min_needed_atomtype
else:
i_a = Intify(atomtype_i_tokens[0])
if atomtype_i_tokens[1] == '':
if (max_sel_atomtype and
(max_sel_atomtype > max_needed_atomtype)):
i_b = max_sel_atomtype
else:
i_b = max_needed_atomtype
else:
i_b = Intify(atomtype_i_tokens[1])
else:
i_a = i_b = Intify(atomtype_i_str)
i_a_final = None
i_b_final = None
for i in range(i_a, i_b + 1):
if ((i in needed_atomtypes) or (min_sel_atomtype <= i)):
i_a_final = i
break
for i in reversed(range(i_a, i_b + 1)):
if ((i in needed_atomtypes) or (max_sel_atomtype >= i)):
i_b_final = i
break
# if i_a_final and i_b_final:
# if i_a_final == i_b_final:
# i_str = '@atom:type'+str(i_a_final)
# tokens[1] = i_str
# else:
# i_str = '@{atom:type'+str(i_a_final)+'}*@{atom:type'+str(i_b_final)+'}'
if not (i_a_final and i_b_final and j_a_final and j_b_final):
del l_in_masses[i_line]
elif ('*' in atomtype_i_str):
del l_in_masses[i_line]
for i in range(i_a_final, i_b_final + 1):
#tokens[1] = '@atom:type'+str(i)
tokens[1] = '@atom:' + atomtypes_int2name[i]
# CONTINUEHERE: CHECK THAT THIS IS WORKING
l_in_masses.insert(i_line, (' ' * indent) +
(' '.join(tokens) + '\n'))
i_line += 1
else:
assert(i_a == i_b)
#tokens[1] = '@atom:type'+str(i_a)
tokens[1] = '@atom:' + atomtypes_int2name[i_a]
# CONTINUEHERE: CHECK THAT THIS IS WORKING
l_in_masses[i_line] = (' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
# --- BONDS AND BOND COEFFS ---
# delete lines from data_bonds if they involve atoms we don't care about
i_line = 0
while i_line < len(l_data_bonds):
line = l_data_bonds[i_line]
tokens = line.strip().split()
assert(len(tokens) == 4)
bondid = Intify(tokens[0])
bondtype = Intify(tokens[1])
atomid1 = Intify(tokens[2])
atomid2 = Intify(tokens[3])
# if ((atomid1 in needed_atomids) and
# (atomid2 in needed_atomids)):
tokens[0] = '$bond:id' + str(bondid)
tokens[1] = '@bond:type' + str(bondtype)
#tokens[2] = '$atom:id'+str(atomid1)
#tokens[3] = '$atom:id'+str(atomid2)
tokens[2] = '$atom:' + atomids_int2name[atomid1]
tokens[3] = '$atom:' + atomids_int2name[atomid2]
needed_bondids.add(bondid)
needed_bondtypes.add(bondtype)
l_data_bonds[i_line] = (' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
# else:
# del l_data_bonds[i_line]
# delete data_bond_coeffs for bondtypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_bond_coeffs):
line = l_data_bond_coeffs[i_line]
tokens = line.strip().split()
bondtype = Intify(tokens[0])
if (not (bondtype in needed_bondtypes)):
del l_data_bond_coeffs[i_line]
else:
tokens[0] = '@bond:type' + str(bondtype)
l_data_bond_coeffs[i_line] = (
' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
# delete in_bond_coeffs for bondtypes we don't care about anymore:
for bondtype in needed_bondtypes:
assert(type(bondtype) is int)
if ((min_needed_bondtype == None) or
(min_needed_bondtype > bondtype)):
min_needed_bondtype = bondtype
if ((max_needed_bondtype == None) or
(max_needed_bondtype < bondtype)):
max_needed_bondtype = bondtype
for bondid in needed_bondids:
assert(type(bondid) is int)
if ((min_needed_bondid == None) or
(min_needed_bondid > bondid)):
min_needed_bondid = bondid
if ((max_needed_bondid == None) or
(max_needed_bondid < bondid)):
max_needed_bondid = bondid
i_line = 0
while i_line < len(l_in_bond_coeffs):
line = l_in_bond_coeffs[i_line]
tokens = line.strip().split()
bondtype_str = tokens[1]
if ('*' in bondtype_str):
bondtype_tokens = bondtype_str.split('*')
if bondtype_tokens[0] == '':
i_a = min_needed_bondtype
else:
i_a = Intify(bondtype_tokens[0])
if bondtype_tokens[1] == '':
i_b = max_needed_bondtype
else:
i_b = Intify(bondtype_tokens[1])
else:
i_a = Intify(bondtype_str)
i_b = i_a
if i_a < min_needed_bondtype:
i_a = min_needed_bondtype
if i_b > max_needed_bondtype:
i_b = max_needed_bondtype
# if i_a == i_b:
# i_str = '@bond:type'+str(i_a)
# tokens[1] = i_str
# else:
# i_str = '@{bond:type'+str(j_a)+'}*@{bond:type'+str(j_b)+'}'
if ('*' in bondtype_str):
del l_in_bond_coeffs[i_line]
for i in range(i_a, i_b + 1):
if (i in needed_bondtypes):
tokens[1] = '@bond:type' + str(i)
l_in_bond_coeffs.insert(i_line,
(' ' * indent) + (' '.join(tokens) + '\n'))
i_line += 1
else:
if i_a < i_b:
raise InputError('Error: number of bond types in data file is not consistent with the\n'
' number of bond types you have define bond_coeffs for.\n')
if (i_a == i_b) and (i_a in needed_bondtypes):
tokens[1] = '@bond:type' + str(i_a)
l_in_bond_coeffs[i_line] = (
' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
else:
del l_in_bond_coeffs[i_line]
# --- ANGLES AND ANGLE COEFFS ---
# delete lines from data_angles if they involve atoms we don't care about
i_line = 0
while i_line < len(l_data_angles):
line = l_data_angles[i_line]
tokens = line.strip().split()
assert(len(tokens) == 5)
angleid = Intify(tokens[0])
angletype = Intify(tokens[1])
atomid1 = Intify(tokens[2])
atomid2 = Intify(tokens[3])
atomid3 = Intify(tokens[4])
# if ((atomid1 in needed_atomids) and
# (atomid2 in needed_atomids)):
tokens[0] = '$angle:id' + str(angleid)
tokens[1] = '@angle:type' + str(angletype)
#tokens[2] = '$atom:id'+str(atomid1)
#tokens[3] = '$atom:id'+str(atomid2)
#tokens[4] = '$atom:id'+str(atomid3)
tokens[2] = '$atom:' + atomids_int2name[atomid1]
tokens[3] = '$atom:' + atomids_int2name[atomid2]
tokens[4] = '$atom:' + atomids_int2name[atomid3]
needed_angleids.add(angleid)
needed_angletypes.add(angletype)
l_data_angles[i_line] = (' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
# else:
# del l_data_angles[i_line]
# delete data_angle_coeffs for angletypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_angle_coeffs):
line = l_data_angle_coeffs[i_line]
tokens = line.strip().split()
angletype = Intify(tokens[0])
if (not (angletype in needed_angletypes)):
del l_data_angle_coeffs[i_line]
else:
tokens[0] = '@angle:type' + str(angletype)
l_data_angle_coeffs[i_line] = (
' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
# --- class2specific ----
# Do the same for BondBond and BondAngle Coeffs:
# NOTE: LAMMPS INPUT SCRIPTS, ALL CLASS2 COEFFS are represented by:
# angle_coeff, dihedral_coeff, and improper_coeff commands.
# THERE ARE NO bondbond_coeff commands, or bondangle_coeff commands,
# etc..., so we dont have to worry about l_in_bondbond_coeffs,...
# delete data_bondbond_coeffs for angletypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_bondbond_coeffs):
line = l_data_bondbond_coeffs[i_line]
tokens = line.strip().split()
angletype = Intify(tokens[0])
if (not (angletype in needed_angletypes)):
del l_data_bondbond_coeffs[i_line]
else:
tokens[0] = '@angle:type' + str(angletype)
l_data_bondbond_coeffs[i_line] = (
' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
# delete data_bondangle_coeffs for angletypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_bondangle_coeffs):
line = l_data_bondangle_coeffs[i_line]
tokens = line.strip().split()
angletype = Intify(tokens[0])
if (not (angletype in needed_angletypes)):
del l_data_bondangle_coeffs[i_line]
else:
tokens[0] = '@angle:type' + str(angletype)
l_data_bondangle_coeffs[i_line] = (
' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
# --- end of class2specific ----
# delete in_angle_coeffs for angletypes we don't care about anymore:
for angletype in needed_angletypes:
assert(type(angletype) is int)
if ((min_needed_angletype == None) or
(min_needed_angletype > angletype)):
min_needed_angletype = angletype
if ((max_needed_angletype == None) or
(max_needed_angletype < angletype)):
max_needed_angletype = angletype
for angleid in needed_angleids:
assert(type(angleid) is int)
if ((min_needed_angleid == None) or
(min_needed_angleid > angleid)):
min_needed_angleid = angleid
if ((max_needed_angleid == None) or
(max_needed_angleid < angleid)):
max_needed_angleid = angleid
i_line = 0
while i_line < len(l_in_angle_coeffs):
line = l_in_angle_coeffs[i_line]
tokens = line.strip().split()
angletype_str = tokens[1]
if ('*' in angletype_str):
angletype_tokens = angletype_str.split('*')
if angletype_tokens[0] == '':
i_a = min_needed_angletype
else:
i_a = Intify(angletype_tokens[0])
if angletype_tokens[1] == '':
i_b = max_needed_angletype
else:
i_b = Intify(angletype_tokens[1])
else:
i_a = i_b = Intify(angletype_str)
if i_a < min_needed_angletype:
i_a = min_needed_angletype
if i_b > max_needed_angletype:
i_b = max_needed_angletype
# if i_a == i_b:
# i_str = '@angle:type'+str(i_a)
# tokens[1] = i_str
# else:
# i_str = '@{angle:type'+str(j_a)+'}*@{angle:type'+str(j_b)+'}'
if ('*' in angletype_str):
del l_in_angle_coeffs[i_line]
for i in range(i_a, i_b + 1):
if (i in needed_angletypes):
tokens[1] = '@angle:type' + str(i)
l_in_angle_coeffs.insert(i_line,
(' ' * indent) + (' '.join(tokens) + '\n'))
i_line += 1
else:
if i_a < i_b:
raise InputError('Error: number of angle types in data file is not consistent with the\n'
' number of angle types you have define angle_coeffs for.\n')
if (i_a == i_b) and (i_a in needed_angletypes):
tokens[1] = '@angle:type' + str(i_a)
l_in_angle_coeffs[i_line] = (
' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
else:
del l_in_angle_coeffs[i_line]
# --- DIHEDRALS AND DIHEDRAL COEFFS ---
# delete lines from data_dihedrals if they involve atoms we don't care
# about
i_line = 0
while i_line < len(l_data_dihedrals):
line = l_data_dihedrals[i_line]
tokens = line.strip().split()
assert(len(tokens) == 6)
dihedralid = Intify(tokens[0])
dihedraltype = Intify(tokens[1])
atomid1 = Intify(tokens[2])
atomid2 = Intify(tokens[3])
atomid3 = Intify(tokens[4])
atomid4 = Intify(tokens[5])
# if ((atomid1 in needed_atomids) and
# (atomid2 in needed_atomids)):
tokens[0] = '$dihedral:id' + str(dihedralid)
tokens[1] = '@dihedral:type' + str(dihedraltype)
#tokens[2] = '$atom:id'+str(atomid1)
#tokens[3] = '$atom:id'+str(atomid2)
#tokens[4] = '$atom:id'+str(atomid3)
#tokens[5] = '$atom:id'+str(atomid4)
tokens[2] = '$atom:' + atomids_int2name[atomid1]
tokens[3] = '$atom:' + atomids_int2name[atomid2]
tokens[4] = '$atom:' + atomids_int2name[atomid3]
tokens[5] = '$atom:' + atomids_int2name[atomid4]
needed_dihedralids.add(dihedralid)
needed_dihedraltypes.add(dihedraltype)
l_data_dihedrals[i_line] = (' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
# else:
# del l_data_dihedrals[i_line]
# delete data_dihedral_coeffs for dihedraltypes we don't care about
# anymore:
i_line = 0
while i_line < len(l_data_dihedral_coeffs):
line = l_data_dihedral_coeffs[i_line]
tokens = line.strip().split()
dihedraltype = Intify(tokens[0])
if (not (dihedraltype in needed_dihedraltypes)):
del l_data_dihedral_coeffs[i_line]
else:
tokens[0] = '@dihedral:type' + str(dihedraltype)
l_data_dihedral_coeffs[i_line] = (
' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
# --- class2specific ----
# Do the same for MiddleBondTorsion, EndBondTorsion, AngleTorsion,
# AngleAngleTorsion, and BondBond13 Coeffs
# NOTE: LAMMPS INPUT SCRIPTS, ALL CLASS2 COEFFS are represented by:
# angle_coeff, dihedral_coeff, and improper_coeff commands.
# THERE ARE NO "middlebondtorsion_coeff" commands, etc...so we don't
# have to worry about dealing with "l_in_middlebondtorsion_coeffs",...
# delete data_middlebondtorsion_coeffs for dihedraltypes we don't care
# about anymore:
i_line = 0
while i_line < len(l_data_middlebondtorsion_coeffs):
line = l_data_middlebondtorsion_coeffs[i_line]
tokens = line.strip().split()
dihedraltype = Intify(tokens[0])
if (not (dihedraltype in needed_dihedraltypes)):
del l_data_middlebondtorsion_coeffs[i_line]
else:
tokens[0] = '@dihedral:type' + str(dihedraltype)
l_data_middlebondtorsion_coeffs[i_line] = (
' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
# delete data_endbondtorsion_coeffs for dihedraltypes we don't care about
# anymore:
i_line = 0
while i_line < len(l_data_endbondtorsion_coeffs):
line = l_data_endbondtorsion_coeffs[i_line]
tokens = line.strip().split()
dihedraltype = Intify(tokens[0])
if (not (dihedraltype in needed_dihedraltypes)):
del l_data_endbondtorsion_coeffs[i_line]
else:
tokens[0] = '@dihedral:type' + str(dihedraltype)
l_data_endbondtorsion_coeffs[i_line] = (
' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
# delete data_angletorsion_coeffs for dihedraltypes we don't care about
# anymore:
i_line = 0
while i_line < len(l_data_angletorsion_coeffs):
line = l_data_angletorsion_coeffs[i_line]
tokens = line.strip().split()
dihedraltype = Intify(tokens[0])
if (not (dihedraltype in needed_dihedraltypes)):
del l_data_angletorsion_coeffs[i_line]
else:
tokens[0] = '@dihedral:type' + str(dihedraltype)
l_data_angletorsion_coeffs[i_line] = (
' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
# delete data_angleangletorsion_coeffs for dihedraltypes we don't care
# about anymore:
i_line = 0
while i_line < len(l_data_angleangletorsion_coeffs):
line = l_data_angleangletorsion_coeffs[i_line]
tokens = line.strip().split()
dihedraltype = Intify(tokens[0])
if (not (dihedraltype in needed_dihedraltypes)):
del l_data_angleangletorsion_coeffs[i_line]
else:
tokens[0] = '@dihedral:type' + str(dihedraltype)
l_data_angleangletorsion_coeffs[i_line] = (
' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
# delete data_bondbond13_coeffs for dihedraltypes we don't care about
# anymore:
i_line = 0
while i_line < len(l_data_bondbond13_coeffs):
line = l_data_bondbond13_coeffs[i_line]
tokens = line.strip().split()
dihedraltype = Intify(tokens[0])
if (not (dihedraltype in needed_dihedraltypes)):
del l_data_bondbond13_coeffs[i_line]
else:
tokens[0] = '@dihedral:type' + str(dihedraltype)
l_data_bondbond13_coeffs[i_line] = (
' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
# --- end of class2specific ----
# delete in_dihedral_coeffs for dihedraltypes we don't care about anymore:
for dihedraltype in needed_dihedraltypes:
assert(type(dihedraltype) is int)
if ((min_needed_dihedraltype == None) or
(min_needed_dihedraltype > dihedraltype)):
min_needed_dihedraltype = dihedraltype
if ((max_needed_dihedraltype == None) or
(max_needed_dihedraltype < dihedraltype)):
max_needed_dihedraltype = dihedraltype
for dihedralid in needed_dihedralids:
assert(type(dihedralid) is int)
if ((min_needed_dihedralid == None) or
(min_needed_dihedralid > dihedralid)):
min_needed_dihedralid = dihedralid
if ((max_needed_dihedralid == None) or
(max_needed_dihedralid < dihedralid)):
max_needed_dihedralid = dihedralid
i_line = 0
while i_line < len(l_in_dihedral_coeffs):
line = l_in_dihedral_coeffs[i_line]
tokens = line.strip().split()
dihedraltype_str = tokens[1]
if ('*' in dihedraltype_str):
dihedraltype_tokens = dihedraltype_str.split('*')
if dihedraltype_tokens[0] == '':
i_a = min_needed_dihedraltype
else:
i_a = Intify(dihedraltype_tokens[0])
if dihedraltype_tokens[1] == '':
i_b = max_needed_dihedraltype
else:
i_b = Intify(dihedraltype_tokens[1])
else:
i_a = i_b = Intify(dihedraltype_str)
if i_a < min_needed_dihedraltype:
i_a = min_needed_dihedraltype
if i_b > max_needed_dihedraltype:
i_b = max_needed_dihedraltype
# if i_a == i_b:
# i_str = '@dihedral:type'+str(i_a)
# tokens[1] = i_str
# else:
# i_str = '@{dihedral:type'+str(j_a)+'}*@{dihedral:type'+str(j_b)+'}'
if ('*' in dihedraltype_str):
del l_in_dihedral_coeffs[i_line]
for i in range(i_a, i_b + 1):
if (i in needed_dihedraltypes):
tokens[1] = '@dihedral:type' + str(i)
l_in_dihedral_coeffs.insert(i_line,
(' ' * indent) + (' '.join(tokens) + '\n'))
i_line += 1
else:
if i_a < i_b:
raise InputError('Error: number of dihedral types in data file is not consistent with the\n'
' number of dihedral types you have define dihedral_coeffs for.\n')
if (i_a == i_b) and (i_a in needed_dihedraltypes):
tokens[1] = '@dihedral:type' + str(i_a)
l_in_dihedral_coeffs[i_line] = (
' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
else:
del l_in_dihedral_coeffs[i_line]
# --- IMPROPERS AND IMPROPER COEFFS ---
# delete lines from data_impropers if they involve atoms we don't care
# about
i_line = 0
while i_line < len(l_data_impropers):
line = l_data_impropers[i_line]
tokens = line.strip().split()
assert(len(tokens) == 6)
improperid = Intify(tokens[0])
impropertype = Intify(tokens[1])
atomid1 = Intify(tokens[2])
atomid2 = Intify(tokens[3])
atomid3 = Intify(tokens[4])
atomid4 = Intify(tokens[5])
# if ((atomid1 in needed_atomids) and
# (atomid2 in needed_atomids)):
tokens[0] = '$improper:id' + str(improperid)
tokens[1] = '@improper:type' + str(impropertype)
#tokens[2] = '$atom:id'+str(atomid1)
#tokens[3] = '$atom:id'+str(atomid2)
#tokens[4] = '$atom:id'+str(atomid3)
#tokens[5] = '$atom:id'+str(atomid4)
tokens[2] = '$atom:' + atomids_int2name[atomid1]
tokens[3] = '$atom:' + atomids_int2name[atomid2]
tokens[4] = '$atom:' + atomids_int2name[atomid3]
tokens[5] = '$atom:' + atomids_int2name[atomid4]
needed_improperids.add(improperid)
needed_impropertypes.add(impropertype)
l_data_impropers[i_line] = (' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
# else:
# del l_data_impropers[i_line]
# delete data_improper_coeffs for impropertypes we don't care about
# anymore:
i_line = 0
while i_line < len(l_data_improper_coeffs):
line = l_data_improper_coeffs[i_line]
tokens = line.strip().split()
impropertype = Intify(tokens[0])
if (not (impropertype in needed_impropertypes)):
del l_data_improper_coeffs[i_line]
else:
tokens[0] = '@improper:type' + str(impropertype)
l_data_improper_coeffs[i_line] = (
' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
# --- class2specific ----
# Do the same for AngleAngle Coeffs
# NOTE: LAMMPS INPUT SCRIPTS, ALL CLASS2 COEFFS are represented by:
# angle_coeff, dihedral_coeff, and improper_coeff commands.
# THERE ARE NO "angleangle_coeff" commands, etc...so we don't
# have to worry about dealing with "l_in_angleangle_coeffs",...
# delete data_middlebondtorsion_coeffs for dihedraltypes we don't care about anymore:
# delete data_angleangle_coeffs for impropertypes we don't care about
# anymore:
i_line = 0
while i_line < len(l_data_angleangle_coeffs):
line = l_data_angleangle_coeffs[i_line]
tokens = line.strip().split()
impropertype = Intify(tokens[0])
if (not (impropertype in needed_impropertypes)):
del l_data_angleangle_coeffs[i_line]
else:
tokens[0] = '@improper:type' + str(impropertype)
l_data_angleangle_coeffs[i_line] = (
' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
# --- end of class2specific ----
# delete in_improper_coeffs for impropertypes we don't care about anymore:
for impropertype in needed_impropertypes:
assert(type(impropertype) is int)
if ((min_needed_impropertype == None) or
(min_needed_impropertype > impropertype)):
min_needed_impropertype = impropertype
if ((max_needed_impropertype == None) or
(max_needed_impropertype < impropertype)):
max_needed_impropertype = impropertype
for improperid in needed_improperids:
assert(type(improperid) is int)
if ((min_needed_improperid == None) or
(min_needed_improperid > improperid)):
min_needed_improperid = improperid
if ((max_needed_improperid == None) or
(max_needed_improperid < improperid)):
max_needed_improperid = improperid
i_line = 0
while i_line < len(l_in_improper_coeffs):
line = l_in_improper_coeffs[i_line]
tokens = line.strip().split()
impropertype_str = tokens[1]
if ('*' in impropertype_str):
impropertype_tokens = impropertype_str.split('*')
if impropertype_tokens[0] == '':
i_a = min_needed_impropertype
else:
i_a = Intify(impropertype_tokens[0])
if impropertype_tokens[1] == '':
i_b = max_needed_impropertype
else:
i_b = Intify(impropertype_tokens[1])
else:
i_a = i_b = Intify(impropertype_str)
if i_a < min_needed_impropertype:
i_a = min_needed_impropertype
if i_b > max_needed_impropertype:
i_b = max_needed_impropertype
# if i_a == i_b:
# i_str = '@improper:type'+str(i_a)
# tokens[1] = i_str
# else:
# i_str = '@{improper:type'+str(j_a)+'}*@{improper:type'+str(j_b)+'}'
if ('*' in impropertype_str):
del l_in_improper_coeffs[i_line]
for i in range(i_a, i_b + 1):
if (i in needed_impropertypes):
tokens[1] = '@improper:type' + str(i)
l_in_improper_coeffs.insert(i_line,
(' ' * indent) + (' '.join(tokens) + '\n'))
i_line += 1
else:
if i_a < i_b:
raise InputError('Error: number of improper types in data file is not consistent with the\n'
' number of improper types you have define improper_coeffs for.\n')
if (i_a == i_b) and (i_a in needed_impropertypes):
tokens[1] = '@improper:type' + str(i_a)
l_in_improper_coeffs[i_line] = (
' ' * indent) + (' '.join(tokens) + '\n')
i_line += 1
else:
del l_in_improper_coeffs[i_line]
# --- GROUPS ---
# Now parse through all of the "group" commands and try and figure
# out if any of these groups contain any of the atoms we are keeping.
# If so, then save the group and write it out.
# (I hate trying to parse this kind of text.)
# if len(l_in_group) > 0:
# sys.stderr.write('\n'
# ' --groups-- Attempting to parse \"group\" commands.\n'
# ' This may cause '+g_program_name+' to crash.\n'
# ' If so, comment out all group commands in your input script(s), and\n'
# ' try again. (And please report the error. -Andrew 2014-10-30)\n')
i_line = 0
groups_needed = set(['all'])
while i_line < len(l_in_group):
line = l_in_group[i_line]
tokens = line.strip().split()
delete_this_command = False
explicit_definition = False
if len(tokens) < 3:
delete_this_command = True
group_name = tokens[1]
specifier_style = tokens[2]
str_logical = ''
str_selection = ''
if specifier_style[0:4] == 'type':
str_logical += specifier_style[4:]
explicit_definition = True
specifier_style = 'type'
elif specifier_style == 'id':
str_logical += specifier_style[2:]
explicit_definition = True
specifier_style = 'id'
elif specifier_style == 'molecule':
str_logical += specifier_style[8:]
specifier_style = 'molecule'
explicit_definition = True
if explicit_definition:
i_token_sel_min = 3
if len(tokens) <= i_token_sel_min:
sys.stderr.write('WARNING: possible syntax error on this line:\n'
+ ' ' + l_in_group[i_line] + '\n')
delete_this_command = True
if str_logical == '':
str_logical = tokens[i_token_sel_min]
if not str_logical[0].isdigit():
i_token_sel_min += 1
if len(tokens) <= i_token_sel_min:
tokens.append('')
else:
tokens.insert(i_token_sel_min, str_logical)
i_token_sel_max = len(tokens) - 1
for i in range(i_token_sel_min, len(tokens)):
if tokens[i].isdigit():
break
else:
i_token_sel_max = i
assert(len(tokens) > i_token_sel_min)
if str_logical[0:2] in ('<=', '>=', '==', '!=', '<>'):
tokens[i_token_sel_min] = str_logical[
2:] + tokens[i_token_sel_min]
str_logical = str_logical[0:2]
if str_logical == '<=':
l_group_selection = [(None, int(tokens[i_token_sel_min]))]
elif str_logical == '>=':
l_group_selection = [(int(tokens[i_token_sel_min]), None)]
elif str_logical == '==':
l_group_selection = [(int(tokens[i_token_sel_min]),
int(tokens[i_token_sel_min]))]
elif str_logical == '!=':
l_group_selection = [(None, int(tokens[i_token_sel_min]) - 1),
(int(tokens[i_token_sel_min]) + 1, None)]
elif str_logical == '<>':
l_group_selection = [(int(tokens[i_token_sel_min]),
int(tokens[i_token_sel_max]))]
elif str_logical[0:1] in ('<', '>'):
tokens[i_token_sel_min] = str_logical[
1:] + tokens[i_token_sel_min]
str_logical = str_logical[0:1]
if str_logical == '<':
l_group_selection = [
(None, int(tokens[i_token_sel_min]) - 1)]
elif str_logical == '>':
l_group_selection = [
(int(tokens[i_token_sel_min]) + 1, None)]
else:
str_selection = ' '.join(
tokens[i_token_sel_min:i_token_sel_max + 1])
l_group_selection = LammpsSelectToIntervals(str_selection,
slice_delim=':',
or_delim=' ')
mn, mx = IntervalListToMinMax(l_group_selection)
if mn == None:
mn = 1
filtered_selection = []
if specifier_style == 'type':
if mx == None:
mx = max_needed_atomtype
for i in range(mn, mx + 1):
if (BelongsToSel(i, l_group_selection)
and (i in needed_atomtypes)):
filtered_selection.append((i, i))
elif specifier_style == 'id':
if mx == None:
mx = max_needed_atomid
for i in range(mn, mx + 1):
if (BelongsToSel(i, l_group_selection)
and (i in needed_atomids)):
filtered_selection.append((i, i))
elif specifier_style == 'molecule':
if mx == None:
mx = max_needed_molid
for i in range(mn, mx + 1):
if (BelongsToSel(i, l_group_selection)
and (i in needed_molids)):
filtered_selection.append((i, i))
MergeIntervals(filtered_selection)
if len(filtered_selection) > 0:
tokens = ['group', group_name, specifier_style]
for interval in filtered_selection:
a = interval[0]
b = interval[1]
if specifier_style == 'type':
if a == b:
tokens.append('@atom:type' + str(a))
else:
tokens.append('@{atom:type' + str(a) +
'}:@{atom:type' + str(b) + '}')
if specifier_style == 'id':
if a == b:
tokens.append('$atom:id' + str(a))
else:
tokens.append('${atom:id' + str(a)
+ '}:${atom:id' + str(b) + '}')
if specifier_style == 'molecule':
if a == b:
tokens.append('$mol:id' + str(a))
else:
tokens.append('${mol:id' + str(a) +
'}:${mol:id' + str(b) + '}')
# Commenting out next two lines. (This is handled later.)
#l_in_group[i_line] = ' '.join(tokens)
# groups_needed.add(group_name)
else:
delete_this_command = True
else:
if len(tokens) > 3:
if tokens[2] == 'union':
i_token = 3
while i_token < len(tokens):
if not (tokens[i_token] in groups_needed):
del tokens[i_token]
else:
i_token += 1
# if none of the groups contain atoms we need,
# then delete the entire command
if len(tokens) <= 3:
delete_this_command = True
elif tokens[2] == 'intersect':
i_token = 3
while i_token < len(tokens):
if not (tokens[i_token] in groups_needed):
# if any of the groups we need are empty
# then delete the command
delete_this_command = True
break
i_token += 1
elif (tokens[2] == 'subtract') and (len(tokens) >= 5):
if not (tokens[3] in groups_needed):
delete_this_command = True
i_token = 4
while i_token < len(tokens):
if not (tokens[i_token] in groups_needed):
del tokens[i_token]
else:
i_token += 1
else:
# Otherwise I don't recongize the syntax of this
# group command. In that case, I just delete it.
delete_this_command = True
elif tokens[2] == 'clear':
pass
elif tokens[2] == 'delete':
pass
else:
delete_this_command = True
if delete_this_command:
sys.stderr.write('WARNING: Ignoring line \n\"' +
l_in_group[i_line].rstrip() + '\"\n')
del l_in_group[i_line]
else:
groups_needed.add(group_name)
l_in_group[i_line] = (' ' * indent) + ' '.join(tokens) + '\n'
i_line += 1
# --- fix rigid ---
i_line = 0
while i_line < len(l_in_fix_rigid):
line = l_in_fix_rigid[i_line]
tokens = line.strip().split()
if len(tokens) < 4:
break
fixid = tokens[1]
group_name = tokens[2]
delete_this_command = True
assert(tokens[3].find('rigid') == 0)
if group_name in groups_needed:
delete_this_command = False
if delete_this_command:
sys.stderr.write('WARNING: Ignoring line \n\"' +
l_in_fix_rigid[i_line].rstrip() + '\"\n')
del l_in_fix_rigid[i_line]
else:
l_in_fix_rigid[i_line] = (' ' * indent) + ' '.join(tokens) + '\n'
i_line += 1
# --- set ---
i_line = 0
while i_line < len(l_in_set):
line = l_in_set[i_line]
tokens = line.strip().split()
l_new_set_commands = []
l_new_set_static_commands = []
if len(tokens) < 4:
break
if tokens[1] == 'type':
pattern = tokens[2].split('*')
if pattern[0] == '':
types_lo = min_needed_atomtype
else:
types_lo = types_hi = int(pattern[0])
if types_lo < min_needed_atomtype:
types_lo = min_needed_atomtype
if len(pattern) == 2:
if pattern[1] == '':
types_hi = max_needed_atomtype
else:
types_hi = min(int(pattern[1]), max_needed_atomtype)
for i in range(types_lo, types_hi+1):
if i in needed_atomtypes:
l_new_set_static_commands.append((' ' * indent) +
' '.join(tokens[0:2])+' '+
'@atom:type'+str(i) + ' ' +
' '.join(tokens[3:]))
elif tokens[1] == 'atom':
pattern = tokens[2].split('*')
if pattern[0] == '':
atomids_lo = min_needed_atomid
else:
atomids_lo = atomids_hi = int(pattern[0])
if atomids_lo < min_needed_atomid:
atomids_lo = min_needed_atomid
if len(pattern) == 2:
if pattern[1] == '':
atomids_hi = max_needed_atomid
else:
atomids_hi = min(int(pattern[1]), max_needed_atomid)
for i in range(atomids_lo, atomids_hi+1):
if i in needed_atomids:
l_new_set_commands.append((' ' * indent) +
tokens[0:2].join(' ')+' '+
str(i) + ' ' +
tokens[3:].join(' '))
elif tokens[1] == 'mol':
pattern = tokens[2].split('*')
if pattern[0] == '':
molids_lo = min_needed_molid
else:
molids_lo = molids_hi = int(pattern[0])
if molids_lo < min_needed_molid:
molids_lo = min_needed_molid
if len(pattern) == 2:
if pattern[1] == '':
molids_hi = max_needed_molid
else:
molids_hi = min(int(pattern[1]), max_needed_molid)
for i in range(molids_lo, molids_hi+1):
if i in needed_molids:
l_new_set_commands.append(tokens[0:2].join(' ')+' '+
str(i) + ' ' +
tokens[3:].join(' '))
elif tokens[0] == 'group':
group_name = tokens[2]
if group_name in groups_needed:
l_new_set_static_commands = [l_in_set[i_line]]
if len(l_new_set_commands) > 0:
l_in_set[i_line:i_line+1] = l_new_set_commands
i_line += len(l_new_set_commands)
elif len(l_new_set_static_commands) > 0:
l_in_set_static += l_new_set_static_commands
del l_in_set[i_line]
else:
sys.stderr.write('WARNING: Ignoring line \n\"' +
l_in_set[i_line].rstrip() + '\"\n')
del l_in_set[i_line]
# --- fix shake ---
i_line = 0
while i_line < len(l_in_fix_shake):
line = l_in_fix_shake[i_line]
tokens = line.strip().split()
if len(tokens) < 4:
break
fixid = tokens[1]
group_name = tokens[2]
delete_this_command = True
assert(tokens[3].find('shake') == 0)
# parse the list of angle types
#i_token = tokens.index('a')
for i_token in range(0, len(tokens)):
if tokens[i_token] == 'a':
break
if i_token != len(tokens):
i_token += 1
while (i_token < len(tokens)) and tokens[i_token].isdigit():
# delete angle types from the list which
# do not belong to the selection
btype = int(tokens[i_token])
if int(tokens[i_token]) in needed_angletypes:
tokens[i_token] = '@angle:type' + tokens[i_token]
i_token += 1
delete_this_command = False
else:
del tokens[i_token]
# parse the list of bond types
#i_token = tokens.index('b')
for i_token in range(0, len(tokens)):
if tokens[i_token] == 'b':
break
if i_token != len(tokens):
i_token += 1
while (i_token < len(tokens)) and tokens[i_token].isdigit():
# delete bond types from the list which
# do not belong to the selection
btype = int(tokens[i_token])
if int(tokens[i_token]) in needed_bondtypes:
tokens[i_token] = '@bond:type' + tokens[i_token]
i_token += 1
delete_this_command = False
else:
del tokens[i_token]
# parse the list of atom types
# i_token = tokens.index('t')
for i_token in range(0, len(tokens)):
if tokens[i_token] == 't':
break
if i_token != len(tokens):
i_token += 1
while (i_token < len(tokens)) and tokens[i_token].isdigit():
# delete atom types from the list which
# do not belong to the selection
btype = int(tokens[i_token])
if int(tokens[i_token]) in needed_atomtypes:
tokens[i_token] = '@atom:type' + tokens[i_token]
i_token += 1
delete_this_command = False
else:
del tokens[i_token]
# Selecting atoms by mass feature should still work, so we
# don't need to delete or ignore these kinds of commands.
# for i_token in range(0, len(tokens)):
# if tokens[i_token] == 'm':
# break
# if i_token != len(tokens):
# delete_this_command = True
if 'mol' in tokens:
delete_this_command = True
if not (group_name in groups_needed):
delete_this_command = True
if delete_this_command:
sys.stderr.write('WARNING: Ignoring line \n\"' +
l_in_fix_shake[i_line].rstrip() + '\"\n')
del l_in_fix_shake[i_line]
else:
l_in_fix_shake[i_line] = (' ' * indent) + ' '.join(tokens) + '\n'
i_line += 1
# --- fix poems ---
i_line = 0
while i_line < len(l_in_fix_poems):
line = l_in_fix_poems[i_line]
tokens = line.strip().split()
if len(tokens) < 4:
break
fixid = tokens[1]
group_name = tokens[2]
delete_this_command = True
assert(tokens[3].find('poems') == 0)
if group_name in groups_needed:
delete_this_command = False
if tokens[4] != 'molecule':
delete_this_command = True
sys.stderr.write('WARNING: ' + g_program_name + ' ONLY supports \"fix poems\" commands\n'
' which use the \"molecule\" keyword.\n')
if tokens[4] == 'file':
sys.stderr.write(' If you want use external files with fix poems, then you will have to\n'
' generate the file yourself. You ask use moltemplate to generate\n'
' this file for you, by manually adding a section at the end of your\n'
' final .LT file (eg. \"system.lt\") which resembles the following:\n\n'
'write(\"poems_file.txt\") {\n'
' 1 1 $atom:idname1a $atom:idname2a $atom:idname3a ...\n'
' 2 1 $atom:idname1b $atom:idname2b $atom:idname3b ...\n'
' 3 1 $atom:idname1c $atom:idname2c $atom:idname3c ...\n'
' : : etc...\n'
'}\n\n'
' ...where $atom:idname1a, $atom:idname2a, ... are moltemplate-compatible\n'
' unique (full,long) id-names for the atoms in each rigid body.\n'
' This will insure the atom-id numbers in this file are correct.\n'
' See the documentation for fix poems for details.\n')
if delete_this_command:
sys.stderr.write('WARNING: Ignoring line \n\"' +
l_in_fix_poems[i_line].rstrip() + '\"\n')
del l_in_fix_poems[i_line]
else:
l_in_fix_poems[i_line] = (' ' * indent) + ' '.join(tokens) + '\n'
i_line += 1
# --- fix qeq ---
i_line = 0
while i_line < len(l_in_fix_qeq):
line = l_in_fix_qeq[i_line]
tokens = line.strip().split()
if len(tokens) < 4:
break
fixid = tokens[1]
group_name = tokens[2]
delete_this_command = True
assert(tokens[3].find('qeq') == 0)
if group_name in groups_needed:
delete_this_command = False
if delete_this_command:
sys.stderr.write('WARNING: Ignoring line \n\"' +
l_in_fix_qeq[i_line].rstrip() + '\"\n')
del l_in_fix_qeq[i_line]
else:
l_in_fix_qeq[i_line] = (' ' * indent) + ' '.join(tokens) + '\n'
i_line += 1
# --- fix qmmm ---
i_line = 0
while i_line < len(l_in_fix_qmmm):
line = l_in_fix_qmmm[i_line]
tokens = line.strip().split()
if len(tokens) < 4:
break
fixid = tokens[1]
group_name = tokens[2]
delete_this_command = True
assert(tokens[3].find('qmmm') == 0)
if group_name in groups_needed:
delete_this_command = False
if delete_this_command:
sys.stderr.write('WARNING: Ignoring line \n\"' +
l_in_fix_qmmm[i_line].rstrip() + '\"\n')
del l_in_fix_qmmm[i_line]
else:
l_in_fix_qmmm[i_line] = (' ' * indent) + ' '.join(tokens) + '\n'
i_line += 1
########################################
### Now begin writing the template. ###
########################################
if not some_pair_coeffs_read:
sys.stderr.write('Warning: No \"pair coeffs\" set.\n'
' (No interactions between non-bonded atoms defined.)\n')
no_warnings = False
# sys.stderr.write('Writing ttree data to standard out.\n'
# ' You can redirect this to a file using:\n'+
# ' '+' '.join(sys.argv)+' > filename.ttree\n'
# ' ----------------------\n')
if mol_name != '':
sys.stdout.write(mol_name + ' {\n')
if len(l_in_init) > 0:
sys.stdout.write('\n ### LAMMPS commands for initialization\n'
' ### (These can be overridden later.)\n\n')
l_in_init.insert(0, (' ' * cindent) +
'write_once(\"' + in_init + '\") {\n')
l_in_init.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_init))
if len(l_in_settings) > 0:
sys.stdout.write('\n ### LAMMPS commands for settings\n'
' ### (These can be overridden later.)\n\n')
l_in_settings.insert(0, (' ' * cindent) +
'write_once(\"' + in_settings + '\") {\n')
l_in_settings.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_settings))
non_empty_output = True
if len(l_in_masses) > 0:
l_in_masses.insert(0, (' ' * cindent) +
'write_once(\"' + in_settings + '\") {\n')
l_in_masses.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_masses))
non_empty_output = True
if remove_coeffs_from_data_file:
if len(l_data_pair_coeffs) > 0:
for line in l_data_pair_coeffs:
tokens = line.strip().split()
atomtype_str = tokens[0]
l_in_pair_coeffs.append((' ' * cindent) + ' pair_coeff ' + atomtype_str +
' ' + atomtype_str + ' ' + ' '.join(tokens[1:]) + '\n')
l_data_pair_coeffs = []
if len(l_data_pairij_coeffs) > 0:
for line in l_data_pairij_coeffs:
l_in_pair_coeffs.append(
(' ' * cindent) + ' pair_coeff ' + line.strip() + '\n')
l_data_pairij_coeffs = []
if len(l_in_pair_coeffs) > 0:
l_in_pair_coeffs.insert(0, (' ' * cindent) +
'write_once(\"' + in_settings + '\") {\n')
l_in_pair_coeffs.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_pair_coeffs))
non_empty_output = True
if (remove_coeffs_from_data_file and (len(l_data_bond_coeffs) > 0)):
for line in l_data_bond_coeffs:
l_in_bond_coeffs.append(
(' ' * cindent) + ' bond_coeff ' + line.strip() + '\n')
l_data_bond_coeffs = []
if len(l_in_bond_coeffs) > 0:
l_in_bond_coeffs.insert(0, (' ' * cindent) +
'write_once(\"' + in_settings + '\") {\n')
l_in_bond_coeffs.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_bond_coeffs))
non_empty_output = True
if (remove_coeffs_from_data_file and (len(l_data_angle_coeffs) > 0)):
for line in l_data_angle_coeffs:
l_in_angle_coeffs.append(
(' ' * cindent) + ' angle_coeff ' + line.strip() + '\n')
l_data_angle_coeffs = []
for line in l_data_bondbond_coeffs:
tokens = line.strip().split()
l_in_angle_coeffs.append(
(' ' * cindent) + ' angle_coeff ' + tokens[0] + ' bb ' + ' '.join(tokens[1:]) + '\n')
l_data_bondbond_coeffs = []
for line in l_data_bondangle_coeffs:
tokens = line.strip().split()
l_in_angle_coeffs.append(
(' ' * cindent) + ' angle_coeff ' + tokens[0] + ' ba ' + ' '.join(tokens[1:]) + '\n')
l_data_bondangle_coeffs = []
if len(l_in_angle_coeffs) > 0:
l_in_angle_coeffs.insert(
0, (' ' * cindent) + 'write_once(\"' + in_settings + '\") {\n')
l_in_angle_coeffs.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_angle_coeffs))
non_empty_output = True
if (remove_coeffs_from_data_file and (len(l_data_dihedral_coeffs) > 0)):
for line in l_data_dihedral_coeffs:
l_in_dihedral_coeffs.append(
(' ' * cindent) + ' dihedral_coeff ' + line.strip() + '\n')
l_data_dihedral_coeffs = []
for line in l_data_middlebondtorsion_coeffs:
tokens = line.strip().split()
l_in_dihedral_coeffs.append(
(' ' * cindent) + ' dihedral_coeff ' + tokens[0] + ' mbt ' + ' '.join(tokens[1:]) + '\n')
l_data_middlebondtorsion_coeffs = []
for line in l_data_endbondtorsion_coeffs:
tokens = line.strip().split()
l_in_dihedral_coeffs.append(
(' ' * cindent) + ' dihedral_coeff ' + tokens[0] + ' ebt ' + ' '.join(tokens[1:]) + '\n')
l_data_endbondtorsion_coeffs = []
for line in l_data_angletorsion_coeffs:
tokens = line.strip().split()
l_in_dihedral_coeffs.append(
(' ' * cindent) + ' dihedral_coeff ' + tokens[0] + ' at ' + ' '.join(tokens[1:]) + '\n')
l_data_angletorsion_coeffs = []
for line in l_data_angleangletorsion_coeffs:
tokens = line.strip().split()
l_in_dihedral_coeffs.append(
(' ' * cindent) + ' dihedral_coeff ' + tokens[0] + ' aat ' + ' '.join(tokens[1:]) + '\n')
l_data_angleangletorsion_coeffs = []
for line in l_data_bondbond13_coeffs:
tokens = line.strip().split()
l_in_dihedral_coeffs.append(
(' ' * cindent) + ' dihedral_coeff ' + tokens[0] + ' bb13 ' + ' '.join(tokens[1:]) + '\n')
l_data_bondbond13_coeffs = []
if len(l_in_dihedral_coeffs) > 0:
l_in_dihedral_coeffs.insert(
0, (' ' * cindent) + 'write_once(\"' + in_settings + '\") {\n')
l_in_dihedral_coeffs.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_dihedral_coeffs))
non_empty_output = True
if (remove_coeffs_from_data_file and (len(l_data_improper_coeffs) > 0)):
for line in l_data_improper_coeffs:
l_in_improper_coeffs.append(
(' ' * cindent) + ' improper_coeff ' + line.strip() + '\n')
l_data_improper_coeffs = []
for line in l_data_angleangle_coeffs:
tokens = line.strip().split()
l_in_improper_coeffs.append(
(' ' * cindent) + ' improper_coeff ' + tokens[0] + ' aa ' + ' '.join(tokens[1:]) + '\n')
l_data_angleangle_coeffs = []
if len(l_in_improper_coeffs) > 0:
l_in_improper_coeffs.insert(
0, (' ' * cindent) + 'write_once(\"' + in_settings + '\") {\n')
l_in_improper_coeffs.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_improper_coeffs))
non_empty_output = True
if non_empty_output:
sys.stdout.write('\n\n ### DATA sections\n\n')
if len(l_data_masses) > 0:
l_data_masses.insert(0, (' ' * cindent) +
'write_once(\"' + data_masses + '\") {\n')
l_data_masses.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_masses))
non_empty_output = True
if len(l_data_bond_coeffs) > 0:
l_data_bond_coeffs.insert(
0, (' ' * cindent) + 'write_once(\"' + data_bond_coeffs + '\") {\n')
l_data_bond_coeffs.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bond_coeffs))
non_empty_output = True
if len(l_data_angle_coeffs) > 0:
l_data_angle_coeffs.insert(
0, (' ' * cindent) + 'write_once(\"' + data_angle_coeffs + '\") {\n')
l_data_angle_coeffs.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angle_coeffs))
non_empty_output = True
if len(l_data_dihedral_coeffs) > 0:
l_data_dihedral_coeffs.insert(
0, (' ' * cindent) + 'write_once(\"' + data_dihedral_coeffs + '\") {\n')
l_data_dihedral_coeffs.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_dihedral_coeffs))
non_empty_output = True
if len(l_data_improper_coeffs) > 0:
l_data_improper_coeffs.insert(
0, (' ' * cindent) + 'write_once(\"' + data_improper_coeffs + '\") {\n')
l_data_improper_coeffs.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_improper_coeffs))
non_empty_output = True
if len(l_data_pair_coeffs) > 0:
l_data_pair_coeffs.insert(
0, (' ' * cindent) + 'write_once(\"' + data_pair_coeffs + '\") {\n')
l_data_pair_coeffs.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_pair_coeffs))
non_empty_output = True
if len(l_data_pairij_coeffs) > 0:
l_data_pairij_coeffs.insert(
0, (' ' * cindent) + 'write_once(\"' + data_pairij_coeffs + '\") {\n')
l_data_pairij_coeffs.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_pairij_coeffs))
non_empty_output = True
# class2 force fields:
if len(l_data_bondbond_coeffs) > 0:
l_data_bondbond_coeffs.insert(
0, (' ' * cindent) + 'write_once(\"' + data_bondbond_coeffs + '\") {\n')
l_data_bondbond_coeffs.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bondbond_coeffs))
non_empty_output = True
if len(l_data_bondangle_coeffs) > 0:
l_data_bondangle_coeffs.insert(
0, (' ' * cindent) + 'write_once(\"' + data_bondangle_coeffs + '\") {\n')
l_data_bondangle_coeffs.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bondangle_coeffs))
non_empty_output = True
if len(l_data_middlebondtorsion_coeffs) > 0:
l_data_middlebondtorsion_coeffs.insert(
0, (' ' * cindent) + 'write_once(\"' + data_middlebondtorsion_coeffs + '\") {\n')
l_data_middlebondtorsion_coeffs.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_middlebondtorsion_coeffs))
non_empty_output = True
if len(l_data_endbondtorsion_coeffs) > 0:
l_data_endbondtorsion_coeffs.insert(
0, (' ' * cindent) + 'write_once(\"' + data_endbondtorsion_coeffs + '\") {\n')
l_data_endbondtorsion_coeffs.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_endbondtorsion_coeffs))
non_empty_output = True
if len(l_data_angletorsion_coeffs) > 0:
l_data_angletorsion_coeffs.insert(
0, (' ' * cindent) + 'write_once(\"' + data_angletorsion_coeffs + '\") {\n')
l_data_angletorsion_coeffs.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angletorsion_coeffs))
non_empty_output = True
if len(l_data_angleangletorsion_coeffs) > 0:
l_data_angleangletorsion_coeffs.insert(
0, (' ' * cindent) + 'write_once(\"' + data_angleangletorsion_coeffs + '\") {\n')
l_data_angleangletorsion_coeffs.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angleangletorsion_coeffs))
non_empty_output = True
if len(l_data_bondbond13_coeffs) > 0:
l_data_bondbond13_coeffs.insert(
0, (' ' * cindent) + 'write_once(\"' + data_bondbond13_coeffs + '\") {\n')
l_data_bondbond13_coeffs.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bondbond13_coeffs))
non_empty_output = True
if len(l_data_angleangle_coeffs) > 0:
l_data_angleangle_coeffs.insert(
0, (' ' * cindent) + 'write_once(\"' + data_angleangle_coeffs + '\") {\n')
l_data_angleangle_coeffs.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angleangle_coeffs))
non_empty_output = True
# automatic generation of bonded interactions by type:
if len(l_data_angles_by_type) > 0:
l_data_angles_by_type.insert(
0, (' ' * cindent) + 'write_once(\"' + data_angles_by_type + '\") {\n')
l_data_angles_by_type.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angles_by_type))
non_empty_output = True
if len(l_data_dihedrals_by_type) > 0:
l_data_dihedrals_by_type.insert(
0, (' ' * cindent) + 'write_once(\"' + data_dihedrals_by_type + '\") {\n')
l_data_dihedrals_by_type.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_dihedrals_by_type))
non_empty_output = True
if len(l_data_impropers_by_type) > 0:
l_data_impropers_by_type.insert(
0, (' ' * cindent) + 'write_once(\"' + data_impropers_by_type + '\") {\n')
l_data_impropers_by_type.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_impropers_by_type))
non_empty_output = True
if len(l_data_atoms) > 0:
l_data_atoms.insert(0, (' ' * cindent) +
'write(\"' + data_atoms + '\") {\n')
l_data_atoms.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_atoms))
non_empty_output = True
else:
sys.stderr.write('Warning: missing \"Atoms\" section.\n'
' (Did you include a LAMMPS data file in your argument list?)\n')
no_warnings = False
# non-point-like particles
if len(l_data_ellipsoids) > 0:
l_data_ellipsoids.insert(
0, (' ' * cindent) + 'write(\"' + data_ellipsoids + '\") {\n')
l_data_ellipsoids.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_ellipsoids))
if len(l_data_lines) > 0:
l_data_lines.insert(0, (' ' * cindent) +
'write(\"' + data_lines + '\") {\n')
l_data_lines.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_lines))
if len(l_data_triangles) > 0:
l_data_triangles.insert(0, (' ' * cindent) +
'write(\"' + data_triangles + '\") {\n')
l_data_triangles.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_triangles))
# DO NOT WRITE OUT VELOCITY DATA
# (Why: because it makes it difficult to combine this molecular template
# with molecule templates from other sources which lack velocity data.
# LAMMPS (and topotools) will crash if the number of entries in the
# Velocities section of a data file does not match the number of atoms.)
# COMMENTING OUT:
# if len(l_data_velocities) > 0:
# l_data_velocities.insert(0, (' '*cindent)+'write(\"'+data_velocities+'\") {\n')
# l_data_velocities.append((' '*cindent)+'}\n')
# sys.stdout.write('\n')
# sys.stdout.write(''.join(l_data_velocities))
if len(l_data_bonds) > 0:
l_data_bonds.insert(0, (' ' * cindent) +
'write(\"' + data_bonds + '\") {\n')
l_data_bonds.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bonds))
non_empty_output = True
if len(l_data_angles) > 0:
l_data_angles.insert(0, (' ' * cindent) +
'write(\"' + data_angles + '\") {\n')
l_data_angles.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angles))
non_empty_output = True
if len(l_data_dihedrals) > 0:
l_data_dihedrals.insert(0, (' ' * cindent) +
'write(\"' + data_dihedrals + '\") {\n')
l_data_dihedrals.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_dihedrals))
non_empty_output = True
if len(l_data_impropers) > 0:
l_data_impropers.insert(0, (' ' * cindent) +
'write(\"' + data_impropers + '\") {\n')
l_data_impropers.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_impropers))
non_empty_output = True
if len(l_in_group) > 0:
no_warnings = False
l_in_group.insert(0, (' ' * cindent) +
'write(\"' + in_settings + '\") {\n')
l_in_group.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_group))
# sys.stderr.write('######################################################\n'
# 'WARNING: One or more \"group\" commands appear to refer to relevant atoms.\n'
# ' Please check to make sure that the group(s) generated by\n'
# ' '+g_program_name+' contain the correct atoms. (-Andrew 2014-10-30)\n'
# '######################################################\n')
assert(non_empty_output)
if len(l_in_set) > 0:
l_in_set.insert(0, ((' ' * cindent) +
'write(\"' + in_settings + '\") {'))
l_in_set.append((' ' * cindent) + '} # end of list of \"set\" commands\n')
sys.stdout.write('\n')
sys.stdout.write((' ' * cindent) + '# list of \"set\" commands:\n')
sys.stdout.write('\n'.join(l_in_set))
if len(l_in_set_static) > 0:
l_in_set_static.insert(0, ((' ' * cindent) +
'write_once(\"' + in_settings + '\") {'))
l_in_set_static.append((' ' * cindent) + '} # end of list of (static) \"set\" commands\n')
sys.stdout.write('\n')
sys.stdout.write((' ' * cindent) + '# list of (static) \"set\" commands:\n')
sys.stdout.write('\n'.join(l_in_set_static))
if len(l_in_fix_rigid) > 0:
no_warnings = False
l_in_fix_rigid.insert(0, (' ' * cindent) +
'write(\"' + in_settings + '\") {\n')
l_in_fix_rigid.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_fix_rigid))
sys.stderr.write('WARNING: \"fix rigid\" style command(s) applied to selected atoms.\n'
' Please make sure that the fix group(s) are defined correctly.\n'
'######################################################\n')
assert(non_empty_output)
if len(l_in_fix_shake) > 0:
no_warnings = False
l_in_fix_shake.insert(0, (' ' * cindent) +
'write(\"' + in_settings + '\") {\n')
l_in_fix_shake.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_fix_shake))
sys.stderr.write('WARNING: \"fix shake\" style command(s) applied to selected atoms.\n'
' Please check to make sure that the fix group(s) are defined correctly,\n'
' and also check that the atom, bond, and angle types are correct.\n'
'######################################################\n')
assert(non_empty_output)
if len(l_in_fix_poems) > 0:
no_warnings = False
l_in_fix_poems.insert(0, (' ' * cindent) +
'write(\"' + in_settings + '\") {\n')
l_in_fix_poems.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_fix_poems))
sys.stderr.write('WARNING: \"fix poems\" style command(s) applied to selected atoms.\n'
' Please make sure that the fix group(s) are defined correctly.\n'
'######################################################\n')
assert(non_empty_output)
if len(l_in_fix_qeq) > 0:
no_warnings = False
l_in_fix_qeq.insert(0, (' ' * cindent) +
'write(\"' + in_settings + '\") {\n')
l_in_fix_qeq.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_fix_qeq))
sys.stderr.write('WARNING: \"fix qeq\" style command(s) applied to selected atoms.\n'
' Please make sure that the fix group(s) are defined correctly.\n'
'######################################################\n')
assert(non_empty_output)
if len(l_in_fix_qmmm) > 0:
no_warnings = False
l_in_fix_qmmm.insert(0, (' ' * cindent) +
'write(\"' + in_settings + '\") {\n')
l_in_fix_qmmm.append((' ' * cindent) + '}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_fix_qmmm))
sys.stderr.write('WARNING: \"fix qmmm\" style command(s) applied to selected atoms.\n'
' Please make sure that the fix group(s) are defined correctly.\n'
'######################################################\n')
assert(non_empty_output)
if mol_name != '':
sys.stdout.write('\n} # end of \"' + mol_name + '\" type definition\n')
# if non_empty_output and no_warnings:
if non_empty_output:
sys.stderr.write('WARNING: The ' + g_program_name + ' script has not been rigorously tested.\n'
' Exotic (many-body) pair-styles and pair-styles with\n'
' unusual syntax (such hbond/dreiding) are not understood\n'
' by ' + g_program_name +
' (...although they are supported by moltemplate).\n'
' Please look over the resulting LT file and check for errors.\n'
' Convert any remaining atom, bond, angle, dihedral, or improper id\n'
' or type numbers to the corresponding $ or @-style counter variables.\n'
' Feel free to report any bugs you find. (-Andrew Jewett 2015-8-02)\n')
except (ValueError, InputError) as err:
sys.stderr.write('\n' + str(err) + '\n')
sys.exit(-1)
return
if __name__ == '__main__':
main()
|
ramisetti/lammps
|
tools/moltemplate/moltemplate/ltemplify.py
|
Python
|
gpl-2.0
| 180,315
|
[
"LAMMPS"
] |
20d283e1845fdb4db31a3690c2ee3a04d8344d1028b58050765fc2ae921a5169
|
from __future__ import division, print_function
import numpy as np
import os
from auxiliary import *
from scipy.integrate import odeint, ode
import matplotlib.pyplot as plt
from smatrix import compute_S_matrix, compute_S_matrix_fast, mean_poisson
from ttsolution import TTsolution
import sys
try:
xrange
except NameError:
xrange = range
def takagitaupin(scantype,scan,constant,hkl,crystal,thickness,bending = 'None'):
'''
1D TT-solver.
Input:
scantype = 'energy' or 'angle'
scan = relative to the Bragg's energy in meV (energy scan) OR relative to the Bragg's angle in arcsec (angle scan)
constant = incidence angle in degrees (energy scan) OR photon energy in keV (angle scan)
hkl = [h,k,l] (Miller indices)
crystal = currently only 'si' is supported
thickness = crystal thickness in microns
bending = 'None' OR ('spherical',R_bend) OR ('cylindrical',R1,R2), where R_bend, R1, and R2 are in meters
'''
if scantype == 'energy':
is_escan = True
scantype = 'energy'
elif scantype == 'angle' or scantype == 'angular':
is_escan = False
scantype = 'angle'
#type conversions
scan=np.array(scan)
#Unit conversions
thickness_tuple = (thickness, 'microns')
thickness = thickness*1e-6 #wafer thickness in meters
#constants
hc=1.23984193*0.001 #in meV/m
d=dspace(hkl,crystal)*1e-10 #in m
#Setup scan variables and constants
if is_escan:
escan=scan
th=np.radians(constant)
#Direction cosines
gamma0=np.sin(th)
gammah=-np.sin(th)
#Conversion of incident photon energy to wavelength
E0 = hc/(2*d*np.sin(th)) #in meV
wavelength = hc/(E0+escan) #in m
else:
E0 = constant*1e6 #in meV
wavelength = hc/E0 #in m
if not hc/(2*d*E0) > 1:
th = np.arcsin(hc/(2*d*E0))
else:
th = np.pi/2
ascan = scan*np.pi/648000 #from arcsec to rad
#Direction cosines
gamma0=np.sin(th+ascan)
gammah=-np.sin(th+ascan)
#construct the path for chitables
hklstring = str(hkl[0]) + '_' + str(hkl[1]) + '_' + str(hkl[2])
filename = 'chitable_' + crystal.lower() + '_' + hklstring + '.dat'
filestring = os.path.join(os.path.dirname(__file__),'chitables_300K',filename)
#load the chitable
try:
chi = np.loadtxt(filestring)
except:
print('Error loading chitable! Check that ' + filestring \
+ ' exists and is correctly formatted!')
raise Exception()
#conversion to meV
chienergy = chi[:,0]*1e6
print('Computing elastic line for ' + str(hkl) + ' reflection of ' \
+ crystal[0].upper() + crystal[1:].lower() + '-crystal')
if is_escan:
print('Scanning the incident energy')
else:
print('Scanning the incidence angle')
#Polarization (TODO: include pi-polarization)
C = 1;
print('Assuming sigma-polarization')
#Interpolation
if is_escan:
chi0 = np.interp(E0+escan, chienergy, chi[:,1]) + 1j*np.interp(E0+escan, chienergy, chi[:,2])
chih = np.interp(E0+escan, chienergy, chi[:,3]) + 1j*np.interp(E0+escan, chienergy, chi[:,4])
chihbar = np.interp(E0+escan, chienergy, chi[:,5]) + 1j*np.interp(E0+escan, chienergy, chi[:,6])
else:
chi0 = np.interp(E0, chienergy, chi[:,1]) + 1j*np.interp(E0, chienergy, chi[:,2])
chih = np.interp(E0, chienergy, chi[:,3]) + 1j*np.interp(E0, chienergy, chi[:,4])
chihbar = np.interp(E0, chienergy, chi[:,5]) + 1j*np.interp(E0, chienergy, chi[:,6])
#Deviation from backscattering
deltawavelength = wavelength-2*d
if is_escan:
th2 = th
else:
th2 = th+ascan
#if is_escan:
# deltath = th-np.pi/2
#else:
# deltath = th+ascan-np.pi/2
#Extinction length
L = wavelength * np.sqrt(gamma0*np.abs(gammah)) / (np.abs(C)*np.sqrt(chih*chihbar))
#Incidence parameter
eta = np.sqrt(gamma0/np.abs(gammah)) / (np.abs(C)*np.sqrt(chih*chihbar)) \
* (-wavelength/d*(wavelength/(2*d)-np.sin(th2)) - chi0*(gammah/gamma0-1)/2)
#normalization coefficient
normcoef = np.sqrt(chih*chihbar)/chihbar*np.sign(C)*np.sqrt(gamma0/np.abs(gammah))
#Calculate mean poisson's ratio
nu = 0
if not bending == 'None':
#TODO: different bendings have their own quirks, check for cylindrical
S_matrix, C_matrix = compute_S_matrix_fast(hkl,crystal)
#nu = mean_poisson(S_matrix)
#test
S=S_matrix
if bending[0] == 'cylindrical':
if bending[1] == 'inf':
invR1 = 0
else:
invR1 = 1/bending[1]
if bending[2] == 'inf':
invR2 = 0
else:
invR2 = 1/bending[2]
elif bending[0] == 'spherical':
if bending[1] == 'inf':
invR1 = 0
invR2 = 0
else:
invR1 = 1/bending[1]
invR2 = 1/bending[1]
#This takes into account the rotation of the diffractive planes by the bending deep in the crystal
rotational_parameter = np.sqrt(gamma0/np.abs(gammah)) / (np.abs(C)*np.sqrt(chih*chihbar)) \
*wavelength/d*np.cos(th2)**2/np.sin(th2)*invR1
#Parameter according to http://arxiv.org/abs/1502.03059
bending_parameter = S[2,0]*(S[0,1]*invR2-S[1,1]*invR1)+S[2,1]*(S[1,0]*invR1-S[0,0]*invR2)
bending_parameter = -0.5*bending_parameter/(S[0,1]*S[1,0]-S[0,0]*S[1,1])
print(bending_parameter)
#INTEGRATION
reflectivity=[]
#Define ODE and its Jacobian
def tt_equation(z,ksi,L,gamma0,gammah,eta,d,bending,thickness,nu,rot):
if bending == 'None':
return np.pi*1j/L*(ksi**2-2*(np.sign(gammah)*eta)*ksi-np.sign(gammah))
else:
return np.pi*1j/L*(ksi**2-2*(np.sign(gammah)*(eta+rot*z)+L*2*bending_parameter*(z-thickness/2)/d)*ksi-np.sign(gammah))
def tt_jacobian(z,ksi,L,gamma0,gammah,eta,d,bending,thickness,nu,rot):
if bending == 'None':
return np.pi*1j/L*(2*ksi-2*(np.sign(gammah)*eta))
else:
return np.pi*1j/L*(2*ksi-2*(np.sign(gammah)*(eta+rot*z)+L*2*bending_parameter*(z-thickness/2)/d))
#Solve the equation
sys.stdout.write('Solving...0%')
sys.stdout.flush()
for step in xrange(len(scan)):
def tt2solve(z,ksi):
if is_escan:
return tt_equation(z,ksi,L[step],gamma0,gammah,eta[step],d,bending,thickness,nu,rotational_parameter[step])
else:
return tt_equation(z,ksi,L[step],gamma0[step],gammah[step],eta[step],d,bending,thickness,nu,rotational_parameter[step])
def jac(z,ksi):
if is_escan:
return tt_jacobian(z,ksi,L[step],gamma0,gammah,eta[step],d,bending,thickness,nu,rotational_parameter[step])
else:
return tt_jacobian(z,ksi,L[step],gamma0[step],gammah[step],eta[step],d,bending,thickness,nu,rotational_parameter[step])
r=ode(tt2solve,jac).set_integrator('zvode',method='bdf',with_jacobian=True,min_step=1e-10,max_step=1e-4,nsteps=50000)
r.set_initial_value(0,thickness)
res=r.integrate(0)
reflectivity.append(np.abs(normcoef[step]*res[0])**2)
sys.stdout.write('\rSolving...%0.1f%%' % (100*(step+1)/len(scan),))
sys.stdout.flush()
sys.stdout.write('\r\nDone.\n')
sys.stdout.flush()
#solution class
if is_escan:
scan = (scan, 'meV')
constant = (constant,'deg')
else:
scan = (scan, 'arcsec')
constant = (constant,'keV')
#TODO: add also the type of bending to the ttsolution
if bending == 'None':
R_bend = 0
else:
R_bend = bending[1]
result = TTsolution(scan,reflectivity,scantype,crystal.lower(),hkl,(R_bend,'m'),thickness_tuple,constant)
return result
|
aripekka/pytakagitaupin
|
takagitaupin.py
|
Python
|
mit
| 8,086
|
[
"CRYSTAL"
] |
92650c5e1509173a0a2e7f3c6e99d1892dfba8c990194c8524874ec61fe307d8
|
# (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Module to support the loading of a NetCDF file into an Iris cube.
See also: `netCDF4 python <http://code.google.com/p/netcdf4-python/>`_.
Also refer to document 'NetCDF Climate and Forecast (CF) Metadata Conventions',
Version 1.4, 27 February 2009.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
import collections
import os
import os.path
import string
import warnings
import biggus
import netCDF4
import numpy as np
import numpy.ma as ma
from pyke import knowledge_engine
import iris.analysis
from iris.aux_factory import HybridHeightFactory, HybridPressureFactory, \
OceanSigmaZFactory, OceanSigmaFactory, OceanSFactory, OceanSg1Factory, \
OceanSg2Factory
import iris.coord_systems
import iris.coords
import iris.cube
import iris.exceptions
import iris.fileformats.cf
import iris.fileformats._pyke_rules
import iris.io
import iris.util
# Show Pyke inference engine statistics.
DEBUG = False
# Pyke CF related file names.
_PYKE_RULE_BASE = 'fc_rules_cf'
_PYKE_FACT_BASE = 'facts_cf'
# Standard CML spatio-temporal axis names.
SPATIO_TEMPORAL_AXES = ['t', 'z', 'y', 'x']
# Pass through CF attributes:
# - comment
# - Conventions
# - flag_masks
# - flag_meanings
# - flag_values
# - history
# - institution
# - reference
# - source
# - title
# - positive
#
_CF_ATTRS = ['add_offset', 'ancillary_variables', 'axis', 'bounds', 'calendar',
'cell_measures', 'cell_methods', 'climatology', 'compress',
'coordinates', '_FillValue', 'formula_terms', 'grid_mapping',
'leap_month', 'leap_year', 'long_name', 'missing_value',
'month_lengths', 'scale_factor', 'standard_error_multiplier',
'standard_name', 'units', 'valid_max', 'valid_min', 'valid_range']
# CF attributes that should not be global.
_CF_DATA_ATTRS = ['flag_masks', 'flag_meanings', 'flag_values',
'instance_dimension', 'sample_dimension',
'standard_error_multiplier']
# CF attributes that should only be global.
_CF_GLOBAL_ATTRS = ['conventions', 'featureType', 'history', 'title']
# UKMO specific attributes that should not be global.
_UKMO_DATA_ATTRS = ['STASH', 'um_stash_source', 'ukmo__process_flags']
CF_CONVENTIONS_VERSION = 'CF-1.5'
_FactoryDefn = collections.namedtuple('_FactoryDefn', ('primary', 'std_name',
'formula_terms_format'))
_FACTORY_DEFNS = {
HybridHeightFactory: _FactoryDefn(
primary='delta',
std_name='atmosphere_hybrid_height_coordinate',
formula_terms_format='a: {delta} b: {sigma} orog: {orography}'),
HybridPressureFactory: _FactoryDefn(
primary='delta',
std_name='atmosphere_hybrid_sigma_pressure_coordinate',
formula_terms_format='ap: {delta} b: {sigma} '
'ps: {surface_air_pressure}'),
OceanSigmaZFactory: _FactoryDefn(
primary='zlev',
std_name='ocean_sigma_z_coordinate',
formula_terms_format='sigma: {sigma} eta: {eta} depth: {depth} '
'depth_c: {depth_c} nsigma: {nsigma} zlev: {zlev}'),
OceanSigmaFactory: _FactoryDefn(
primary='sigma',
std_name='ocean_sigma_coordinate',
formula_terms_format='sigma: {sigma} eta: {eta} depth: {depth}'),
OceanSFactory: _FactoryDefn(
primary='s',
std_name='ocean_s_coordinate',
formula_terms_format='s: {s} eta: {eta} depth: {depth} a: {a} b: {b} '
'depth_c: {depth_c}'),
OceanSg1Factory: _FactoryDefn(
primary='s',
std_name='ocean_s_coordinate_g1',
formula_terms_format='s: {s} c: {c} eta: {eta} depth: {depth} '
'depth_c: {depth_c}'),
OceanSg2Factory: _FactoryDefn(
primary='s',
std_name='ocean_s_coordinate_g2',
formula_terms_format='s: {s} c: {c} eta: {eta} depth: {depth} '
'depth_c: {depth_c}')
}
class CFNameCoordMap(object):
"""Provide a simple CF name to CF coordinate mapping."""
_Map = collections.namedtuple('_Map', ['name', 'coord'])
def __init__(self):
self._map = []
def append(self, name, coord):
"""
Append the given name and coordinate pair to the mapping.
Args:
* name:
CF name of the associated coordinate.
* coord:
The coordinate of the associated CF name.
Returns:
None.
"""
self._map.append(CFNameCoordMap._Map(name, coord))
@property
def names(self):
"""Return all the CF names."""
return [pair.name for pair in self._map]
@property
def coords(self):
"""Return all the coordinates."""
return [pair.coord for pair in self._map]
def name(self, coord):
"""
Return the CF name, given a coordinate
Args:
* coord:
The coordinate of the associated CF name.
Returns:
Coordinate.
"""
result = None
for pair in self._map:
if coord == pair.coord:
result = pair.name
break
if result is None:
msg = 'Coordinate is not mapped, {!r}'.format(coord)
raise KeyError(msg)
return result
def coord(self, name):
"""
Return the coordinate, given a CF name.
Args:
* name:
CF name of the associated coordinate.
Returns:
CF name.
"""
result = None
for pair in self._map:
if name == pair.name:
result = pair.coord
break
if result is None:
msg = 'Name is not mapped, {!r}'.format(name)
raise KeyError(msg)
return result
def _pyke_kb_engine():
"""Return the PyKE knowledge engine for CF->cube conversion."""
pyke_dir = os.path.join(os.path.dirname(__file__), '_pyke_rules')
compile_dir = os.path.join(pyke_dir, 'compiled_krb')
engine = None
if os.path.exists(compile_dir):
tmpvar = [os.path.getmtime(os.path.join(compile_dir, fname)) for
fname in os.listdir(compile_dir) if not
fname.startswith('_')]
if tmpvar:
oldest_pyke_compile_file = min(tmpvar)
rule_age = os.path.getmtime(
os.path.join(pyke_dir, _PYKE_RULE_BASE + '.krb'))
if oldest_pyke_compile_file >= rule_age:
# Initialise the pyke inference engine.
engine = knowledge_engine.engine(
(None, 'iris.fileformats._pyke_rules.compiled_krb'))
if engine is None:
engine = knowledge_engine.engine(iris.fileformats._pyke_rules)
return engine
class NetCDFDataProxy(object):
"""A reference to the data payload of a single NetCDF file variable."""
__slots__ = ('shape', 'dtype', 'path', 'variable_name', 'fill_value')
def __init__(self, shape, dtype, path, variable_name, fill_value):
self.shape = shape
self.dtype = dtype
self.path = path
self.variable_name = variable_name
self.fill_value = fill_value
@property
def ndim(self):
return len(self.shape)
def __getitem__(self, keys):
dataset = netCDF4.Dataset(self.path)
try:
variable = dataset.variables[self.variable_name]
# Get the NetCDF variable data and slice.
data = variable[keys]
finally:
dataset.close()
return data
def __repr__(self):
fmt = '<{self.__class__.__name__} shape={self.shape}' \
' dtype={self.dtype!r} path={self.path!r}' \
' variable_name={self.variable_name!r}>'
return fmt.format(self=self)
def __getstate__(self):
return {attr: getattr(self, attr) for attr in self.__slots__}
def __setstate__(self, state):
for key, value in six.iteritems(state):
setattr(self, key, value)
def _assert_case_specific_facts(engine, cf, cf_group):
# Initialise pyke engine "provides" hooks.
engine.provides['coordinates'] = []
# Assert facts for CF coordinates.
for cf_name in six.iterkeys(cf_group.coordinates):
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'coordinate',
(cf_name,))
# Assert facts for CF auxiliary coordinates.
for cf_name in six.iterkeys(cf_group.auxiliary_coordinates):
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'auxiliary_coordinate',
(cf_name,))
# Assert facts for CF cell measures.
for cf_name in six.iterkeys(cf_group.cell_measures):
engine.add_case_specific_fact(_PYKE_FACT_BASE,
'cell_measure', (cf_name,))
# Assert facts for CF grid_mappings.
for cf_name in six.iterkeys(cf_group.grid_mappings):
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'grid_mapping',
(cf_name,))
# Assert facts for CF labels.
for cf_name in six.iterkeys(cf_group.labels):
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'label',
(cf_name,))
# Assert facts for CF formula terms associated with the cf_group
# of the CF data variable.
formula_root = set()
for cf_var in six.itervalues(cf.cf_group.formula_terms):
for cf_root, cf_term in six.iteritems(cf_var.cf_terms_by_root):
# Only assert this fact if the formula root variable is
# defined in the CF group of the CF data variable.
if cf_root in cf_group:
formula_root.add(cf_root)
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'formula_term',
(cf_var.cf_name, cf_root,
cf_term))
for cf_root in formula_root:
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'formula_root',
(cf_root,))
def _pyke_stats(engine, cf_name):
if DEBUG:
print('-' * 80)
print('CF Data Variable: %r' % cf_name)
engine.print_stats()
print('Rules Triggered:')
for rule in sorted(list(engine.rule_triggered)):
print('\t%s' % rule)
print('Case Specific Facts:')
kb_facts = engine.get_kb(_PYKE_FACT_BASE)
for key in six.iterkeys(kb_facts.entity_lists):
for arg in kb_facts.entity_lists[key].case_specific_facts:
print('\t%s%s' % (key, arg))
def _set_attributes(attributes, key, value):
"""Set attributes dictionary, converting unicode strings appropriately."""
if isinstance(value, six.text_type):
try:
attributes[str(key)] = str(value)
except UnicodeEncodeError:
attributes[str(key)] = value
else:
attributes[str(key)] = value
def _load_cube(engine, cf, cf_var, filename):
"""Create the cube associated with the CF-netCDF data variable."""
# Figure out what the eventual data type will be after any scale/offset
# transforms.
dummy_data = np.zeros(1, dtype=cf_var.dtype)
if hasattr(cf_var, 'scale_factor'):
dummy_data = cf_var.scale_factor * dummy_data
if hasattr(cf_var, 'add_offset'):
dummy_data = cf_var.add_offset + dummy_data
# Create cube with deferred data, but no metadata
fill_value = getattr(cf_var.cf_data, '_FillValue',
netCDF4.default_fillvals[cf_var.dtype.str[1:]])
proxy = NetCDFDataProxy(cf_var.shape, dummy_data.dtype,
filename, cf_var.cf_name, fill_value)
data = biggus.OrthoArrayAdapter(proxy)
cube = iris.cube.Cube(data)
# Reset the pyke inference engine.
engine.reset()
# Initialise pyke engine rule processing hooks.
engine.cf_var = cf_var
engine.cube = cube
engine.provides = {}
engine.requires = {}
engine.rule_triggered = set()
engine.filename = filename
# Assert any case-specific facts.
_assert_case_specific_facts(engine, cf, cf_var.cf_group)
# Run pyke inference engine with forward chaining rules.
engine.activate(_PYKE_RULE_BASE)
# Populate coordinate attributes with the untouched attributes from the
# associated CF-netCDF variable.
coordinates = engine.provides.get('coordinates', [])
attribute_predicate = lambda item: item[0] not in _CF_ATTRS
for coord, cf_var_name in coordinates:
tmpvar = filter(attribute_predicate,
cf.cf_group[cf_var_name].cf_attrs_unused())
for attr_name, attr_value in tmpvar:
_set_attributes(coord.attributes, attr_name, attr_value)
tmpvar = filter(attribute_predicate, cf_var.cf_attrs_unused())
# Attach untouched attributes of the associated CF-netCDF data variable to
# the cube.
for attr_name, attr_value in tmpvar:
_set_attributes(cube.attributes, attr_name, attr_value)
# Show pyke session statistics.
_pyke_stats(engine, cf_var.cf_name)
return cube
def _load_aux_factory(engine, cube):
"""
Convert any CF-netCDF dimensionless coordinate to an AuxCoordFactory.
"""
formula_type = engine.requires.get('formula_type')
if formula_type in ['atmosphere_hybrid_height_coordinate',
'atmosphere_hybrid_sigma_pressure_coordinate',
'ocean_sigma_z_coordinate', 'ocean_sigma_coordinate',
'ocean_s_coordinate', 'ocean_s_coordinate_g1',
'ocean_s_coordinate_g2']:
def coord_from_term(term):
# Convert term names to coordinates (via netCDF variable names).
name = engine.requires['formula_terms'][term]
for coord, cf_var_name in engine.provides['coordinates']:
if cf_var_name == name:
return coord
warnings.warn('Unable to find coordinate for variable '
'{!r}'.format(name))
if formula_type == 'atmosphere_hybrid_height_coordinate':
delta = coord_from_term('a')
sigma = coord_from_term('b')
orography = coord_from_term('orog')
factory = HybridHeightFactory(delta, sigma, orography)
elif formula_type == 'atmosphere_hybrid_sigma_pressure_coordinate':
# Hybrid pressure has two valid versions of its formula terms:
# "p0: var1 a: var2 b: var3 ps: var4" or
# "ap: var1 b: var2 ps: var3" where "ap = p0 * a"
try:
# Attempt to get the "ap" term.
delta = coord_from_term('ap')
except (KeyError, ValueError):
# The "ap" term is unavailable, so try getting terms "p0"
# and "a" terms in order to derive an "ap" equivalent term.
coord_p0 = coord_from_term('p0')
if coord_p0.shape != (1,):
msg = 'Expecting {!r} to be a scalar reference pressure ' \
'coordinate, got shape {!r}'.format(coord_p0.var_name,
coord_p0.shape)
raise ValueError(msg)
if coord_p0.has_bounds():
msg = 'Ignoring atmosphere hybrid sigma pressure scalar ' \
'coordinate {!r} bounds.'.format(coord_p0.name())
warnings.warn(msg)
coord_a = coord_from_term('a')
delta = coord_a * coord_p0.points[0]
delta.units = coord_a.units * coord_p0.units
delta.rename('vertical pressure')
delta.var_name = 'ap'
cube.add_aux_coord(delta, cube.coord_dims(coord_a))
sigma = coord_from_term('b')
surface_air_pressure = coord_from_term('ps')
factory = HybridPressureFactory(delta, sigma, surface_air_pressure)
elif formula_type == 'ocean_sigma_z_coordinate':
sigma = coord_from_term('sigma')
eta = coord_from_term('eta')
depth = coord_from_term('depth')
depth_c = coord_from_term('depth_c')
nsigma = coord_from_term('nsigma')
zlev = coord_from_term('zlev')
factory = OceanSigmaZFactory(sigma, eta, depth,
depth_c, nsigma, zlev)
elif formula_type == 'ocean_sigma_coordinate':
sigma = coord_from_term('sigma')
eta = coord_from_term('eta')
depth = coord_from_term('depth')
factory = OceanSigmaFactory(sigma, eta, depth)
elif formula_type == 'ocean_s_coordinate':
s = coord_from_term('s')
eta = coord_from_term('eta')
depth = coord_from_term('depth')
a = coord_from_term('a')
depth_c = coord_from_term('depth_c')
b = coord_from_term('b')
factory = OceanSFactory(s, eta, depth, a, b, depth_c)
elif formula_type == 'ocean_s_coordinate_g1':
s = coord_from_term('s')
c = coord_from_term('c')
eta = coord_from_term('eta')
depth = coord_from_term('depth')
depth_c = coord_from_term('depth_c')
factory = OceanSg1Factory(s, c, eta, depth,
depth_c)
elif formula_type == 'ocean_s_coordinate_g2':
s = coord_from_term('s')
c = coord_from_term('c')
eta = coord_from_term('eta')
depth = coord_from_term('depth')
depth_c = coord_from_term('depth_c')
factory = OceanSg2Factory(s, c, eta, depth,
depth_c)
cube.add_aux_factory(factory)
def load_cubes(filenames, callback=None):
"""
Loads cubes from a list of NetCDF filenames/URLs.
Args:
* filenames (string/list):
One or more NetCDF filenames/DAP URLs to load from.
Kwargs:
* callback (callable function):
Function which can be passed on to :func:`iris.io.run_callback`.
Returns:
Generator of loaded NetCDF :class:`iris.cubes.Cube`.
"""
# Initialise the pyke inference engine.
engine = _pyke_kb_engine()
if isinstance(filenames, six.string_types):
filenames = [filenames]
for filename in filenames:
# Ingest the netCDF file.
cf = iris.fileformats.cf.CFReader(filename)
# Process each CF data variable.
data_variables = (list(cf.cf_group.data_variables.values()) +
list(cf.cf_group.promoted.values()))
for cf_var in data_variables:
cube = _load_cube(engine, cf, cf_var, filename)
# Process any associated formula terms and attach
# the corresponding AuxCoordFactory.
try:
_load_aux_factory(engine, cube)
except ValueError as e:
warnings.warn('{}'.format(e))
# Perform any user registered callback function.
cube = iris.io.run_callback(callback, cube, cf_var, filename)
# Callback mechanism may return None, which must not be yielded
if cube is None:
continue
yield cube
class Saver(object):
"""A manager for saving netcdf files."""
def __init__(self, filename, netcdf_format):
"""
A manager for saving netcdf files.
Args:
* filename (string):
Name of the netCDF file to save the cube.
* netcdf_format (string):
Underlying netCDF file format, one of 'NETCDF4', 'NETCDF4_CLASSIC',
'NETCDF3_CLASSIC' or 'NETCDF3_64BIT'. Default is 'NETCDF4' format.
Returns:
None.
For example::
# Initialise Manager for saving
with Saver(filename, netcdf_format) as sman:
# Iterate through the cubelist.
for cube in cubes:
sman.write(cube)
"""
if netcdf_format not in ['NETCDF4', 'NETCDF4_CLASSIC',
'NETCDF3_CLASSIC', 'NETCDF3_64BIT']:
raise ValueError('Unknown netCDF file format, got %r' %
netcdf_format)
# All persistent variables
#: CF name mapping with iris coordinates
self._name_coord_map = CFNameCoordMap()
#: List of dimension coordinates added to the file
self._dim_coords = []
#: List of grid mappings added to the file
self._coord_systems = []
#: A dictionary, listing dimension names and corresponding length
self._existing_dim = {}
#: A dictionary, mapping formula terms to owner cf variable name
self._formula_terms_cache = {}
#: NetCDF dataset
try:
self._dataset = netCDF4.Dataset(filename, mode='w',
format=netcdf_format)
except RuntimeError:
dir_name = os.path.dirname(filename)
if not os.path.isdir(dir_name):
msg = 'No such file or directory: {}'.format(dir_name)
raise IOError(msg)
if not os.access(dir_name, os.R_OK | os.W_OK):
msg = 'Permission denied: {}'.format(filename)
raise IOError(msg)
else:
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Flush any buffered data to the CF-netCDF file before closing."""
self._dataset.sync()
self._dataset.close()
def write(self, cube, local_keys=None, unlimited_dimensions=None,
zlib=False, complevel=4, shuffle=True, fletcher32=False,
contiguous=False, chunksizes=None, endian='native',
least_significant_digit=None):
"""
Wrapper for saving cubes to a NetCDF file.
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
Kwargs:
* local_keys (iterable of strings):
An interable of cube attribute keys. Any cube attributes with
matching keys will become attributes on the data variable rather
than global attributes.
* unlimited_dimensions (iterable of strings and/or
:class:`iris.coords.Coord` objects):
Explicit list of coordinate names (or coordinate objects)
corresponding to coordinate dimensions of `cube` to save with the
NetCDF dimension variable length 'UNLIMITED'. By default, the
outermost (first) dimension for each cube is used. Only the
'NETCDF4' format supports multiple 'UNLIMITED' dimensions. To save
no unlimited dimensions, use `unlimited_dimensions=[]` (an empty
list).
* zlib (bool):
If `True`, the data will be compressed in the netCDF file using
gzip compression (default `False`).
* complevel (int):
An integer between 1 and 9 describing the level of compression
desired (default 4). Ignored if `zlib=False`.
* shuffle (bool):
If `True`, the HDF5 shuffle filter will be applied before
compressing the data (default `True`). This significantly improves
compression. Ignored if `zlib=False`.
* fletcher32 (bool):
If `True`, the Fletcher32 HDF5 checksum algorithm is activated to
detect errors. Default `False`.
* contiguous (bool):
If `True`, the variable data is stored contiguously on disk.
Default `False`. Setting to `True` for a variable with an unlimited
dimension will trigger an error.
* chunksizes (tuple of int):
Used to manually specify the HDF5 chunksizes for each dimension of
the variable. A detailed discussion of HDF chunking and I/O
performance is available here:
http://www.hdfgroup.org/HDF5/doc/H5.user/Chunking.html. Basically,
you want the chunk size for each dimension to match as closely as
possible the size of the data block that users will read from the
file. `chunksizes` cannot be set if `contiguous=True`.
* endian (string):
Used to control whether the data is stored in little or big endian
format on disk. Possible values are 'little', 'big' or 'native'
(default). The library will automatically handle endian conversions
when the data is read, but if the data is always going to be read
on a computer with the opposite format as the one used to create
the file, there may be some performance advantage to be gained by
setting the endian-ness.
* least_significant_digit (int):
If `least_significant_digit` is specified, variable data will be
truncated (quantized). In conjunction with `zlib=True` this
produces 'lossy', but significantly more efficient compression. For
example, if `least_significant_digit=1`, data will be quantized
using `numpy.around(scale*data)/scale`, where `scale = 2**bits`,
and `bits` is determined so that a precision of 0.1 is retained (in
this case `bits=4`). From
http://www.esrl.noaa.gov/psd/data/gridded/conventions/cdc_netcdf_standard.shtml:
"least_significant_digit -- power of ten of the smallest decimal
place in unpacked data that is a reliable value". Default is
`None`, or no quantization, or 'lossless' compression.
Returns:
None.
.. note::
The `zlib`, `complevel`, `shuffle`, `fletcher32`, `contiguous`,
`chunksizes` and `endian` keywords are silently ignored for netCDF
3 files that do not use HDF5.
.. deprecated:: 1.8.0
NetCDF default saving behaviour currently assigns the outermost
dimension as unlimited. This behaviour is to be deprecated, in
favour of no automatic assignment. To switch to the new behaviour,
set `iris.FUTURE.netcdf_no_unlimited` to True.
"""
if unlimited_dimensions is None:
if iris.FUTURE.netcdf_no_unlimited:
unlimited_dimensions = []
else:
_no_unlim_dep_warning()
cf_profile_available = (iris.site_configuration.get('cf_profile') not
in [None, False])
if cf_profile_available:
# Perform a CF profile of the cube. This may result in an exception
# being raised if mandatory requirements are not satisfied.
profile = iris.site_configuration['cf_profile'](cube)
# Get suitable dimension names.
dimension_names = self._get_dim_names(cube)
# Create the CF-netCDF data dimensions.
self._create_cf_dimensions(cube, dimension_names, unlimited_dimensions)
# Create the associated cube CF-netCDF data variable.
cf_var_cube = self._create_cf_data_variable(
cube, dimension_names, local_keys, zlib=zlib, complevel=complevel,
shuffle=shuffle, fletcher32=fletcher32, contiguous=contiguous,
chunksizes=chunksizes, endian=endian,
least_significant_digit=least_significant_digit)
# Add coordinate variables.
self._add_dim_coords(cube, dimension_names)
# Add the auxiliary coordinate variable names and associate the data
# variable to them
self._add_aux_coords(cube, cf_var_cube, dimension_names)
# Add the cell_measures variable names and associate the data
# variable to them
self._add_cell_measures(cube, cf_var_cube, dimension_names)
# Add the formula terms to the appropriate cf variables for each
# aux factory in the cube.
self._add_aux_factories(cube, cf_var_cube, dimension_names)
# Add data variable-only attribute names to local_keys.
if local_keys is None:
local_keys = set()
else:
local_keys = set(local_keys)
local_keys.update(_CF_DATA_ATTRS, _UKMO_DATA_ATTRS)
# Add global attributes taking into account local_keys.
global_attributes = {k: v for k, v in six.iteritems(cube.attributes)
if (k not in local_keys and
k.lower() != 'conventions')}
self.update_global_attributes(global_attributes)
if cf_profile_available:
cf_patch = iris.site_configuration.get('cf_patch')
if cf_patch is not None:
# Perform a CF patch of the dataset.
cf_patch(profile, self._dataset, cf_var_cube)
else:
msg = 'cf_profile is available but no {} defined.'.format(
'cf_patch')
warnings.warn(msg)
def update_global_attributes(self, attributes=None, **kwargs):
"""
Update the CF global attributes based on the provided
iterable/dictionary and/or keyword arguments.
Args:
* attributes (dict or iterable of key, value pairs):
CF global attributes to be updated.
"""
if attributes is not None:
# Handle sequence e.g. [('fruit', 'apple'), ...].
if not hasattr(attributes, 'keys'):
attributes = dict(attributes)
for attr_name in sorted(attributes):
self._dataset.setncattr(attr_name, attributes[attr_name])
for attr_name in sorted(kwargs):
self._dataset.setncattr(attr_name, kwargs[attr_name])
def _create_cf_dimensions(self, cube, dimension_names,
unlimited_dimensions=None):
"""
Create the CF-netCDF data dimensions.
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` in which to lookup coordinates.
Kwargs:
* unlimited_dimensions (iterable of strings and/or
:class:`iris.coords.Coord` objects):
List of coordinates to make unlimited. By default, the
outermost dimension is made unlimited.
Returns:
None.
"""
unlimited_dim_names = []
if (unlimited_dimensions is None and
not iris.FUTURE.netcdf_no_unlimited):
if dimension_names:
unlimited_dim_names.append(dimension_names[0])
else:
for coord in unlimited_dimensions:
try:
coord = cube.coord(name_or_coord=coord, dim_coords=True)
except iris.exceptions.CoordinateNotFoundError:
# coordinate isn't used for this cube, but it might be
# used for a different one
pass
else:
dim_name = self._get_coord_variable_name(cube, coord)
unlimited_dim_names.append(dim_name)
for dim_name in dimension_names:
if dim_name not in self._dataset.dimensions:
if dim_name in unlimited_dim_names:
size = None
else:
size = self._existing_dim[dim_name]
self._dataset.createDimension(dim_name, size)
def _add_aux_coords(self, cube, cf_var_cube, dimension_names):
"""
Add aux. coordinate to the dataset and associate with the data variable
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
* cf_var_cube (:class:`netcdf.netcdf_variable`):
cf variable cube representation.
* dimension_names (list):
Names associated with the dimensions of the cube.
"""
auxiliary_coordinate_names = []
# Add CF-netCDF variables for the associated auxiliary coordinates.
for coord in sorted(cube.aux_coords, key=lambda coord: coord.name()):
# Create the associated coordinate CF-netCDF variable.
if coord not in self._name_coord_map.coords:
cf_name = self._create_cf_variable(cube, dimension_names,
coord)
self._name_coord_map.append(cf_name, coord)
else:
cf_name = self._name_coord_map.name(coord)
if cf_name is not None:
auxiliary_coordinate_names.append(cf_name)
# Add CF-netCDF auxiliary coordinate variable references to the
# CF-netCDF data variable.
if auxiliary_coordinate_names:
cf_var_cube.coordinates = ' '.join(
sorted(auxiliary_coordinate_names))
def _add_cell_measures(self, cube, cf_var_cube, dim_names):
"""
Add cell measures to the dataset and associate with the data variable
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
* cf_var_cube (:class:`netcdf.netcdf_variable`):
cf variable cube representation.
* dimension_names (list):
Names associated with the dimensions of the cube.
"""
cell_measure_names = []
# Add CF-netCDF variables for the associated cell measures.
for cm in sorted(cube.cell_measures(), key=lambda cm: cm.name()):
# Create the associated cell measure CF-netCDF variable.
if cm not in self._name_coord_map.coords:
cf_name = self._create_cf_cell_measure_variable(cube,
dim_names,
cm)
self._name_coord_map.append(cf_name, cm)
else:
cf_name = self._name_coord_map.name(cm)
if cf_name is not None:
cell_measure_names.append('{}: {}'.format(cm.measure, cf_name))
# Add CF-netCDF cell measure variable references to the
# CF-netCDF data variable.
if cell_measure_names:
cf_var_cube.cell_measures = ' '.join(
sorted(cell_measure_names))
def _add_dim_coords(self, cube, dimension_names):
"""
Add coordinate variables to NetCDF dataset.
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
* dimension_names (list):
Names associated with the dimensions of the cube.
"""
# Ensure we create the netCDF coordinate variables first.
for coord in cube.dim_coords:
# Create the associated coordinate CF-netCDF variable.
if coord not in self._name_coord_map.coords:
cf_name = self._create_cf_variable(cube, dimension_names,
coord)
self._name_coord_map.append(cf_name, coord)
def _add_aux_factories(self, cube, cf_var_cube, dimension_names):
"""
Modifies the variables of the NetCDF dataset to represent
the presence of dimensionless vertical coordinates based on
the aux factories of the cube (if any).
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
* cf_var_cube (:class:`netcdf.netcdf_variable`)
CF variable cube representation.
* dimension_names (list):
Names associated with the dimensions of the cube.
"""
primaries = []
for factory in cube.aux_factories:
factory_defn = _FACTORY_DEFNS.get(type(factory), None)
if factory_defn is None:
msg = 'Unable to determine formula terms ' \
'for AuxFactory: {!r}'.format(factory)
warnings.warn(msg)
else:
# Override `standard_name`, `long_name`, and `axis` of the
# primary coord that signals the presense of a dimensionless
# vertical coord, then set the `formula_terms` attribute.
primary_coord = factory.dependencies[factory_defn.primary]
if primary_coord in primaries:
msg = 'Cube {!r} has multiple aux factories that share ' \
'a common primary coordinate {!r}. Unable to save ' \
'to netCDF as having multiple formula terms on a ' \
'single coordinate is not supported.'
raise ValueError(msg.format(cube, primary_coord.name()))
primaries.append(primary_coord)
cf_name = self._name_coord_map.name(primary_coord)
cf_var = self._dataset.variables[cf_name]
names = {key: self._name_coord_map.name(coord) for
key, coord in six.iteritems(factory.dependencies)}
formula_terms = factory_defn.formula_terms_format.format(
**names)
std_name = factory_defn.std_name
if hasattr(cf_var, 'formula_terms'):
if cf_var.formula_terms != formula_terms or \
cf_var.standard_name != std_name:
# TODO: We need to resolve this corner-case where
# the dimensionless vertical coordinate containing the
# formula_terms is a dimension coordinate of the
# associated cube and a new alternatively named
# dimensionless vertical coordinate is required with
# new formula_terms and a renamed dimension.
if cf_name in dimension_names:
msg = 'Unable to create dimensonless vertical ' \
'coordinate.'
raise ValueError(msg)
key = (cf_name, std_name, formula_terms)
name = self._formula_terms_cache.get(key)
if name is None:
# Create a new variable
name = self._create_cf_variable(cube,
dimension_names,
primary_coord)
cf_var = self._dataset.variables[name]
cf_var.standard_name = std_name
cf_var.axis = 'Z'
# Update the formula terms.
ft = formula_terms.split()
ft = [name if t == cf_name else t for t in ft]
cf_var.formula_terms = ' '.join(ft)
# Update the cache.
self._formula_terms_cache[key] = name
# Update the associated cube variable.
coords = cf_var_cube.coordinates.split()
coords = [name if c == cf_name else c for c in coords]
cf_var_cube.coordinates = ' '.join(coords)
else:
cf_var.standard_name = std_name
cf_var.axis = 'Z'
cf_var.formula_terms = formula_terms
def _get_dim_names(self, cube):
"""
Determine suitable CF-netCDF data dimension names.
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
Returns:
List of dimension names with length equal the number of dimensions
in the cube.
"""
dimension_names = []
for dim in range(cube.ndim):
coords = cube.coords(dimensions=dim, dim_coords=True)
if coords:
coord = coords[0]
dim_name = self._get_coord_variable_name(cube, coord)
# Add only dimensions that have not already been added.
if coord not in self._dim_coords:
# Determine unique dimension name
while (dim_name in self._existing_dim or
dim_name in self._name_coord_map.names):
dim_name = self._increment_name(dim_name)
# Update names added, current cube dim names used and
# unique coordinates added.
self._existing_dim[dim_name] = coord.shape[0]
dimension_names.append(dim_name)
self._dim_coords.append(coord)
else:
# Return the dim_name associated with the existing
# coordinate.
dim_name = self._name_coord_map.name(coord)
dimension_names.append(dim_name)
else:
# No CF-netCDF coordinates describe this data dimension.
dim_name = 'dim%d' % dim
if dim_name in self._existing_dim:
# Increment name if conflicted with one already existing.
if self._existing_dim[dim_name] != cube.shape[dim]:
while (dim_name in self._existing_dim and
self._existing_dim[dim_name] !=
cube.shape[dim] or
dim_name in self._name_coord_map.names):
dim_name = self._increment_name(dim_name)
# Update dictionary with new entry
self._existing_dim[dim_name] = cube.shape[dim]
else:
# Update dictionary with new entry
self._existing_dim[dim_name] = cube.shape[dim]
dimension_names.append(dim_name)
return dimension_names
def _cf_coord_identity(self, coord):
"""
Determine a suitable units from a given coordinate.
Args:
* coord (:class:`iris.coords.Coord`):
A coordinate of a cube.
Returns:
The (standard_name, long_name, unit) of the given
:class:`iris.coords.Coord` instance.
"""
units = str(coord.units)
# Set the 'units' of 'latitude' and 'longitude' coordinates specified
# in 'degrees' to 'degrees_north' and 'degrees_east' respectively,
# as defined in the CF conventions for netCDF files: sections 4.1 and
# 4.2.
if ((isinstance(coord.coord_system, iris.coord_systems.GeogCS) or
coord.coord_system is None) and coord.units == 'degrees'):
if coord.standard_name == "latitude":
units = 'degrees_north'
elif coord.standard_name == "longitude":
units = 'degrees_east'
return coord.standard_name, coord.long_name, units
def _ensure_valid_dtype(self, values, src_name, src_object):
# NetCDF3 does not support int64 or unsigned ints, so we check
# if we can store them as int32 instead.
if ((np.issubdtype(values.dtype, np.int64) or
np.issubdtype(values.dtype, np.unsignedinteger)) and
self._dataset.file_format in ('NETCDF3_CLASSIC',
'NETCDF3_64BIT')):
# Cast to an integer type supported by netCDF3.
if not np.can_cast(values.max(), np.int32) or \
not np.can_cast(values.min(), np.int32):
msg = 'The data type of {} {!r} is not supported by {} and' \
' its values cannot be safely cast to a supported' \
' integer type.'
msg = msg.format(src_name, src_object,
self._dataset.file_format)
raise ValueError(msg)
values = values.astype(np.int32)
return values
def _create_cf_bounds(self, coord, cf_var, cf_name):
"""
Create the associated CF-netCDF bounds variable.
Args:
* coord (:class:`iris.coords.Coord`):
A coordinate of a cube.
* cf_var:
CF-netCDF variable
* cf_name (string):
name of the CF-NetCDF variable.
Returns:
None
"""
if coord.has_bounds():
# Get the values in a form which is valid for the file format.
bounds = self._ensure_valid_dtype(coord.bounds,
'the bounds of coordinate',
coord)
n_bounds = bounds.shape[-1]
if n_bounds == 2:
bounds_dimension_name = 'bnds'
else:
bounds_dimension_name = 'bnds_%s' % n_bounds
if bounds_dimension_name not in self._dataset.dimensions:
# Create the bounds dimension with the appropriate extent.
self._dataset.createDimension(bounds_dimension_name, n_bounds)
cf_var.bounds = cf_name + '_bnds'
cf_var_bounds = self._dataset.createVariable(
cf_var.bounds, bounds.dtype.newbyteorder('='),
cf_var.dimensions + (bounds_dimension_name,))
cf_var_bounds[:] = bounds
def _get_cube_variable_name(self, cube):
"""
Returns a CF-netCDF variable name for the given cube.
Args:
* cube (class:`iris.cube.Cube`):
An instance of a cube for which a CF-netCDF variable
name is required.
Returns:
A CF-netCDF variable name as a string.
"""
if cube.var_name is not None:
cf_name = cube.var_name
else:
# Convert to lower case and replace whitespace by underscores.
cf_name = '_'.join(cube.name().lower().split())
return cf_name
def _get_coord_variable_name(self, cube, coord):
"""
Returns a CF-netCDF variable name for the given coordinate.
Args:
* cube (:class:`iris.cube.Cube`):
The cube that contains the given coordinate.
* coord (:class:`iris.coords.Coord`):
An instance of a coordinate for which a CF-netCDF variable
name is required.
Returns:
A CF-netCDF variable name as a string.
"""
if coord.var_name is not None:
cf_name = coord.var_name
else:
name = coord.standard_name or coord.long_name
if not name or set(name).intersection(string.whitespace):
# Auto-generate name based on associated dimensions.
name = ''
for dim in cube.coord_dims(coord):
name += 'dim{}'.format(dim)
# Handle scalar coordinate (dims == ()).
if not name:
name = 'unknown_scalar'
# Convert to lower case and replace whitespace by underscores.
cf_name = '_'.join(name.lower().split())
return cf_name
def _create_cf_cell_measure_variable(self, cube, dimension_names,
cell_measure):
"""
Create the associated CF-netCDF variable in the netCDF dataset for the
given cell_measure.
Args:
* cube (:class:`iris.cube.Cube`):
The associated cube being saved to CF-netCDF file.
* dimension_names (list):
Names for each dimension of the cube.
* cell_measure (:class:`iris.coords.CellMeasure`):
The cell measure to be saved to CF-netCDF file.
Returns:
The string name of the associated CF-netCDF variable saved.
"""
cf_name = self._get_coord_variable_name(cube, cell_measure)
while cf_name in self._dataset.variables:
cf_name = self._increment_name(cf_name)
# Derive the data dimension names for the coordinate.
cf_dimensions = [dimension_names[dim] for dim in
cube.cell_measure_dims(cell_measure)]
# Get the values in a form which is valid for the file format.
data = self._ensure_valid_dtype(cell_measure.data, 'coordinate',
cell_measure)
# Create the CF-netCDF variable.
cf_var = self._dataset.createVariable(
cf_name, data.dtype.newbyteorder('='), cf_dimensions)
# Add the data to the CF-netCDF variable.
cf_var[:] = data
if cell_measure.units != 'unknown':
cf_var.units = str(cell_measure.units)
if cell_measure.standard_name is not None:
cf_var.standard_name = cell_measure.standard_name
if cell_measure.long_name is not None:
cf_var.long_name = cell_measure.long_name
# Add any other custom coordinate attributes.
for name in sorted(cell_measure.attributes):
value = cell_measure.attributes[name]
# Don't clobber existing attributes.
if not hasattr(cf_var, name):
setattr(cf_var, name, value)
return cf_name
def _create_cf_variable(self, cube, dimension_names, coord):
"""
Create the associated CF-netCDF variable in the netCDF dataset for the
given coordinate. If required, also create the CF-netCDF bounds
variable and associated dimension.
Args:
* cube (:class:`iris.cube.Cube`):
The associated cube being saved to CF-netCDF file.
* dimension_names (list):
Names for each dimension of the cube.
* coord (:class:`iris.coords.Coord`):
The coordinate to be saved to CF-netCDF file.
Returns:
The string name of the associated CF-netCDF variable saved.
"""
cf_name = self._get_coord_variable_name(cube, coord)
while cf_name in self._dataset.variables:
cf_name = self._increment_name(cf_name)
# Derive the data dimension names for the coordinate.
cf_dimensions = [dimension_names[dim] for dim in
cube.coord_dims(coord)]
if np.issubdtype(coord.points.dtype, np.str):
string_dimension_depth = coord.points.dtype.itemsize
if coord.points.dtype.kind == 'U':
string_dimension_depth //= 4
string_dimension_name = 'string%d' % string_dimension_depth
# Determine whether to create the string length dimension.
if string_dimension_name not in self._dataset.dimensions:
self._dataset.createDimension(string_dimension_name,
string_dimension_depth)
# Add the string length dimension to dimension names.
cf_dimensions.append(string_dimension_name)
# Create the label coordinate variable.
cf_var = self._dataset.createVariable(cf_name, '|S1',
cf_dimensions)
# Add the payload to the label coordinate variable.
if len(cf_dimensions) == 1:
cf_var[:] = list('%- *s' % (string_dimension_depth,
coord.points[0]))
else:
for index in np.ndindex(coord.points.shape):
index_slice = tuple(list(index) + [slice(None, None)])
cf_var[index_slice] = list('%- *s' %
(string_dimension_depth,
coord.points[index]))
else:
# Identify the collection of coordinates that represent CF-netCDF
# coordinate variables.
cf_coordinates = cube.dim_coords
if coord in cf_coordinates:
# By definition of a CF-netCDF coordinate variable this
# coordinate must be 1-D and the name of the CF-netCDF variable
# must be the same as its dimension name.
cf_name = cf_dimensions[0]
# Get the values in a form which is valid for the file format.
points = self._ensure_valid_dtype(coord.points, 'coordinate',
coord)
# Create the CF-netCDF variable.
cf_var = self._dataset.createVariable(
cf_name, points.dtype.newbyteorder('='), cf_dimensions)
# Add the axis attribute for spatio-temporal CF-netCDF coordinates.
if coord in cf_coordinates:
axis = iris.util.guess_coord_axis(coord)
if axis is not None and axis.lower() in SPATIO_TEMPORAL_AXES:
cf_var.axis = axis.upper()
# Add the data to the CF-netCDF variable.
cf_var[:] = points
# Create the associated CF-netCDF bounds variable.
self._create_cf_bounds(coord, cf_var, cf_name)
# Deal with CF-netCDF units and standard name.
standard_name, long_name, units = self._cf_coord_identity(coord)
if units != 'unknown':
cf_var.units = units
if standard_name is not None:
cf_var.standard_name = standard_name
if long_name is not None:
cf_var.long_name = long_name
# Add the CF-netCDF calendar attribute.
if coord.units.calendar:
cf_var.calendar = coord.units.calendar
# Add any other custom coordinate attributes.
for name in sorted(coord.attributes):
value = coord.attributes[name]
if name == 'STASH':
# Adopting provisional Metadata Conventions for representing MO
# Scientific Data encoded in NetCDF Format.
name = 'um_stash_source'
value = str(value)
# Don't clobber existing attributes.
if not hasattr(cf_var, name):
setattr(cf_var, name, value)
return cf_name
def _create_cf_cell_methods(self, cube, dimension_names):
"""
Create CF-netCDF string representation of a cube cell methods.
Args:
* cube (:class:`iris.cube.Cube`) or cubelist
(:class:`iris.cube.CubeList`):
A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or list of
cubes to be saved to a netCDF file.
* dimension_names (list):
Names associated with the dimensions of the cube.
Returns:
CF-netCDF string representation of a cube cell methods.
"""
cell_methods = []
# Identify the collection of coordinates that represent CF-netCDF
# coordinate variables.
cf_coordinates = cube.dim_coords
for cm in cube.cell_methods:
names = ''
for name in cm.coord_names:
coord = cube.coords(name)
if coord:
coord = coord[0]
if coord in cf_coordinates:
name = dimension_names[cube.coord_dims(coord)[0]]
names += '%s: ' % name
interval = ' '.join(['interval: %s' % interval for interval in
cm.intervals or []])
comment = ' '.join(['comment: %s' % comment for comment in
cm.comments or []])
extra = ' '.join([interval, comment]).strip()
if extra:
extra = ' (%s)' % extra
cell_methods.append(names + cm.method + extra)
return ' '.join(cell_methods)
def _create_cf_grid_mapping(self, cube, cf_var_cube):
"""
Create CF-netCDF grid mapping variable and associated CF-netCDF
data variable grid mapping attribute.
Args:
* cube (:class:`iris.cube.Cube`) or cubelist
(:class:`iris.cube.CubeList`):
A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or list of
cubes to be saved to a netCDF file.
* cf_var_cube (:class:`netcdf.netcdf_variable`):
cf variable cube representation.
Returns:
None
"""
cs = cube.coord_system('CoordSystem')
if cs is not None:
# Grid var not yet created?
if cs not in self._coord_systems:
while cs.grid_mapping_name in self._dataset.variables:
cs.grid_mapping_name = (
self._increment_name(cs.grid_mapping_name))
cf_var_grid = self._dataset.createVariable(
cs.grid_mapping_name, np.int32)
cf_var_grid.grid_mapping_name = cs.grid_mapping_name
def add_ellipsoid(ellipsoid):
cf_var_grid.longitude_of_prime_meridian = (
ellipsoid.longitude_of_prime_meridian)
semi_major = ellipsoid.semi_major_axis
semi_minor = ellipsoid.semi_minor_axis
if semi_minor == semi_major:
cf_var_grid.earth_radius = semi_major
else:
cf_var_grid.semi_major_axis = semi_major
cf_var_grid.semi_minor_axis = semi_minor
# latlon
if isinstance(cs, iris.coord_systems.GeogCS):
add_ellipsoid(cs)
# rotated latlon
elif isinstance(cs, iris.coord_systems.RotatedGeogCS):
if cs.ellipsoid:
add_ellipsoid(cs.ellipsoid)
cf_var_grid.grid_north_pole_latitude = (
cs.grid_north_pole_latitude)
cf_var_grid.grid_north_pole_longitude = (
cs.grid_north_pole_longitude)
cf_var_grid.north_pole_grid_longitude = (
cs.north_pole_grid_longitude)
# tmerc
elif isinstance(cs, iris.coord_systems.TransverseMercator):
if cs.ellipsoid:
add_ellipsoid(cs.ellipsoid)
cf_var_grid.longitude_of_central_meridian = (
cs.longitude_of_central_meridian)
cf_var_grid.latitude_of_projection_origin = (
cs.latitude_of_projection_origin)
cf_var_grid.false_easting = cs.false_easting
cf_var_grid.false_northing = cs.false_northing
cf_var_grid.scale_factor_at_central_meridian = (
cs.scale_factor_at_central_meridian)
# osgb (a specific tmerc)
elif isinstance(cs, iris.coord_systems.OSGB):
warnings.warn('OSGB coordinate system not yet handled')
# other
else:
warnings.warn('Unable to represent the horizontal '
'coordinate system. The coordinate system '
'type %r is not yet implemented.' % type(cs))
self._coord_systems.append(cs)
# Refer to grid var
cf_var_cube.grid_mapping = cs.grid_mapping_name
def _create_cf_data_variable(self, cube, dimension_names, local_keys=None,
**kwargs):
"""
Create CF-netCDF data variable for the cube and any associated grid
mapping.
Args:
* cube (:class:`iris.cube.Cube`):
The associated cube being saved to CF-netCDF file.
* dimension_names (list):
String names for each dimension of the cube.
Kwargs:
* local_keys (iterable of strings):
An interable of cube attribute keys. Any cube attributes
with matching keys will become attributes on the data variable.
All other keywords are passed through to the dataset's `createVariable`
method.
Returns:
The newly created CF-netCDF data variable.
"""
cf_name = self._get_cube_variable_name(cube)
while cf_name in self._dataset.variables:
cf_name = self._increment_name(cf_name)
# if netcdf3 avoid streaming due to dtype handling
if (not cube.has_lazy_data()
or self._dataset.file_format in ('NETCDF3_CLASSIC',
'NETCDF3_64BIT')):
# Determine whether there is a cube MDI value.
fill_value = None
if isinstance(cube.data, ma.core.MaskedArray):
fill_value = cube.data.fill_value
# Get the values in a form which is valid for the file format.
data = self._ensure_valid_dtype(cube.data, 'cube', cube)
# Create the cube CF-netCDF data variable with data payload.
cf_var = self._dataset.createVariable(
cf_name, data.dtype.newbyteorder('='), dimension_names,
fill_value=fill_value, **kwargs)
cf_var[:] = data
else:
# Create the cube CF-netCDF data variable.
# Explicitly assign the fill_value, which will be the type default
# in the case of an unmasked array.
cf_var = self._dataset.createVariable(
cf_name, cube.lazy_data().dtype.newbyteorder('='),
dimension_names, fill_value=cube.lazy_data().fill_value,
**kwargs)
# stream the data
biggus.save([cube.lazy_data()], [cf_var], masked=True)
if cube.standard_name:
cf_var.standard_name = cube.standard_name
if cube.long_name:
cf_var.long_name = cube.long_name
if cube.units != 'unknown':
cf_var.units = str(cube.units)
# Add data variable-only attribute names to local_keys.
if local_keys is None:
local_keys = set()
else:
local_keys = set(local_keys)
local_keys.update(_CF_DATA_ATTRS, _UKMO_DATA_ATTRS)
# Add any cube attributes whose keys are in local_keys as
# CF-netCDF data variable attributes.
attr_names = set(cube.attributes).intersection(local_keys)
for attr_name in sorted(attr_names):
# Do not output 'conventions' attribute.
if attr_name.lower() == 'conventions':
continue
value = cube.attributes[attr_name]
if attr_name == 'STASH':
# Adopting provisional Metadata Conventions for representing MO
# Scientific Data encoded in NetCDF Format.
attr_name = 'um_stash_source'
value = str(value)
if attr_name == "ukmo__process_flags":
value = " ".join([x.replace(" ", "_") for x in value])
if attr_name in _CF_GLOBAL_ATTRS:
msg = '{attr_name!r} is being added as CF data variable ' \
'attribute, but {attr_name!r} should only be a CF ' \
'global attribute.'.format(attr_name=attr_name)
warnings.warn(msg)
setattr(cf_var, attr_name, value)
# Create the CF-netCDF data variable cell method attribute.
cell_methods = self._create_cf_cell_methods(cube, dimension_names)
if cell_methods:
cf_var.cell_methods = cell_methods
# Create the CF-netCDF grid mapping.
self._create_cf_grid_mapping(cube, cf_var)
return cf_var
def _increment_name(self, varname):
"""
Increment string name or begin increment.
Avoidance of conflicts between variable names, where the name is
incremented to distinguish it from others.
Args:
* varname (string):
Variable name to increment.
Returns:
Incremented varname.
"""
num = 0
try:
name, endnum = varname.rsplit('_', 1)
if endnum.isdigit():
num = int(endnum) + 1
varname = name
except ValueError:
pass
return '{}_{}'.format(varname, num)
def save(cube, filename, netcdf_format='NETCDF4', local_keys=None,
unlimited_dimensions=None, zlib=False, complevel=4, shuffle=True,
fletcher32=False, contiguous=False, chunksizes=None, endian='native',
least_significant_digit=None):
"""
Save cube(s) to a netCDF file, given the cube and the filename.
* Iris will write CF 1.5 compliant NetCDF files.
* The attributes dictionaries on each cube in the saved cube list
will be compared and common attributes saved as NetCDF global
attributes where appropriate.
* Keyword arguments specifying how to save the data are applied
to each cube. To use different settings for different cubes, use
the NetCDF Context manager (:class:`~Saver`) directly.
* The save process will stream the data payload to the file using biggus,
enabling large data payloads to be saved and maintaining the 'lazy'
status of the cube's data payload, unless the netcdf_format is explicitly
specified to be 'NETCDF3' or 'NETCDF3_CLASSIC'.
Args:
* cube (:class:`iris.cube.Cube` or :class:`iris.cube.CubeList`):
A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or other
iterable of cubes to be saved to a netCDF file.
* filename (string):
Name of the netCDF file to save the cube(s).
Kwargs:
* netcdf_format (string):
Underlying netCDF file format, one of 'NETCDF4', 'NETCDF4_CLASSIC',
'NETCDF3_CLASSIC' or 'NETCDF3_64BIT'. Default is 'NETCDF4' format.
* local_keys (iterable of strings):
An interable of cube attribute keys. Any cube attributes with
matching keys will become attributes on the data variable rather
than global attributes.
* unlimited_dimensions (iterable of strings and/or
:class:`iris.coords.Coord` objects):
Explicit list of coordinate names (or coordinate objects) corresponding
to coordinate dimensions of `cube` to save with the NetCDF dimension
variable length 'UNLIMITED'. By default, the outermost (first)
dimension for each cube is used. Only the 'NETCDF4' format supports
multiple 'UNLIMITED' dimensions. To save no unlimited dimensions, use
`unlimited_dimensions=[]` (an empty list).
* zlib (bool):
If `True`, the data will be compressed in the netCDF file using gzip
compression (default `False`).
* complevel (int):
An integer between 1 and 9 describing the level of compression desired
(default 4). Ignored if `zlib=False`.
* shuffle (bool):
If `True`, the HDF5 shuffle filter will be applied before compressing
the data (default `True`). This significantly improves compression.
Ignored if `zlib=False`.
* fletcher32 (bool):
If `True`, the Fletcher32 HDF5 checksum algorithm is activated to
detect errors. Default `False`.
* contiguous (bool):
If `True`, the variable data is stored contiguously on disk. Default
`False`. Setting to `True` for a variable with an unlimited dimension
will trigger an error.
* chunksizes (tuple of int):
Used to manually specify the HDF5 chunksizes for each dimension of the
variable. A detailed discussion of HDF chunking and I/O performance is
available here: http://www.hdfgroup.org/HDF5/doc/H5.user/Chunking.html.
Basically, you want the chunk size for each dimension to match as
closely as possible the size of the data block that users will read
from the file. `chunksizes` cannot be set if `contiguous=True`.
* endian (string):
Used to control whether the data is stored in little or big endian
format on disk. Possible values are 'little', 'big' or 'native'
(default). The library will automatically handle endian conversions
when the data is read, but if the data is always going to be read on a
computer with the opposite format as the one used to create the file,
there may be some performance advantage to be gained by setting the
endian-ness.
* least_significant_digit (int):
If `least_significant_digit` is specified, variable data will be
truncated (quantized). In conjunction with `zlib=True` this produces
'lossy', but significantly more efficient compression. For example, if
`least_significant_digit=1`, data will be quantized using
`numpy.around(scale*data)/scale`, where `scale = 2**bits`, and `bits`
is determined so that a precision of 0.1 is retained (in this case
`bits=4`). From
http://www.esrl.noaa.gov/psd/data/gridded/conventions/cdc_netcdf_standard.shtml:
"least_significant_digit -- power of ten of the smallest decimal place
in unpacked data that is a reliable value". Default is `None`, or no
quantization, or 'lossless' compression.
Returns:
None.
.. note::
The `zlib`, `complevel`, `shuffle`, `fletcher32`, `contiguous`,
`chunksizes` and `endian` keywords are silently ignored for netCDF 3
files that do not use HDF5.
.. seealso::
NetCDF Context manager (:class:`~Saver`).
.. deprecated:: 1.8.0
NetCDF default saving behaviour currently assigns the outermost
dimensions to unlimited. This behaviour is to be deprecated, in
favour of no automatic assignment. To switch to the new behaviour,
set `iris.FUTURE.netcdf_no_unlimited` to True.
"""
if unlimited_dimensions is None:
if iris.FUTURE.netcdf_no_unlimited:
unlimited_dimensions = []
else:
_no_unlim_dep_warning()
if isinstance(cube, iris.cube.Cube):
cubes = iris.cube.CubeList()
cubes.append(cube)
else:
cubes = cube
if local_keys is None:
local_keys = set()
else:
local_keys = set(local_keys)
# Determine the attribute keys that are common across all cubes and
# thereby extend the collection of local_keys for attributes
# that should be attributes on data variables.
attributes = cubes[0].attributes
common_keys = set(attributes)
for cube in cubes[1:]:
keys = set(cube.attributes)
local_keys.update(keys.symmetric_difference(common_keys))
common_keys.intersection_update(keys)
different_value_keys = []
for key in common_keys:
if np.any(attributes[key] != cube.attributes[key]):
different_value_keys.append(key)
common_keys.difference_update(different_value_keys)
local_keys.update(different_value_keys)
# Initialise Manager for saving
with Saver(filename, netcdf_format) as sman:
# Iterate through the cubelist.
for cube in cubes:
sman.write(cube, local_keys, unlimited_dimensions, zlib, complevel,
shuffle, fletcher32, contiguous, chunksizes, endian,
least_significant_digit)
conventions = CF_CONVENTIONS_VERSION
# Perform a CF patch of the conventions attribute.
cf_profile_available = (iris.site_configuration.get('cf_profile') not
in [None, False])
if cf_profile_available:
conventions_patch = iris.site_configuration.get(
'cf_patch_conventions')
if conventions_patch is not None:
conventions = conventions_patch(conventions)
else:
msg = 'cf_profile is available but no {} defined.'.format(
'cf_patch_conventions')
warnings.warn(msg)
# Add conventions attribute.
sman.update_global_attributes(Conventions=conventions)
def _no_unlim_dep_warning():
msg = ('NetCDF default saving behaviour currently assigns the '
'outermost dimensions to unlimited. This behaviour is to be '
'deprecated, in favour of no automatic assignment. To switch '
'to the new behaviour, set iris.FUTURE.netcdf_no_unlimited to '
'True.')
warnings.warn(msg)
|
davidnmurray/iris
|
lib/iris/fileformats/netcdf.py
|
Python
|
gpl-3.0
| 73,435
|
[
"NetCDF"
] |
9472aa1ac0a88d9d2cf03cc7ace52012c1bfcebfbac042f64877dd8f8a5995fc
|
import matplotlib.pyplot as plt
from gpaw import GPAW
calc = GPAW('ferro.gpw', txt=None)
ef = calc.get_fermi_level()
# Plot s, p, d projected LDOS:
for c in 'spd':
energies, ldos = calc.get_orbital_ldos(a=0, spin=0, angular=c, width=0.4)
plt.plot(energies - ef, ldos, label=c + '-up')
energies, ldos = calc.get_orbital_ldos(a=0, spin=1, angular=c, width=0.4)
plt.plot(energies - ef, ldos, label=c + '-down')
plt.legend()
plt.show()
|
robwarm/gpaw-symm
|
doc/exercises/dos/pdos.py
|
Python
|
gpl-3.0
| 459
|
[
"GPAW"
] |
fcdd5a9be960cc085f4ba9713fa63cb9efbec963f984421661a13c093eea4d6e
|
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2014-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Unit test for timeseries module
"""
import os.path
from itertools import (chain, product)
from unittest import mock
import pytest
import numpy
from numpy import testing as nptest
from scipy import signal
from astropy import units
from ...frequencyseries import (FrequencySeries, SpectralVariance)
from ...segments import (Segment, SegmentList, DataQualityFlag)
from ...signal import filter_design
from ...signal.window import planck
from ...spectrogram import Spectrogram
from ...table import EventTable
from ...testing import (mocks, utils)
from ...testing.errors import (
pytest_skip_cvmfs_read_error,
pytest_skip_network_error,
)
from ...types import Index
from ...time import LIGOTimeGPS
from ...utils.misc import null_context
from .. import (TimeSeries, TimeSeriesDict, TimeSeriesList, StateTimeSeries)
from ..io.gwf import get_default_gwf_api
from .test_core import (TestTimeSeriesBase as _TestTimeSeriesBase,
TestTimeSeriesBaseDict as _TestTimeSeriesBaseDict,
TestTimeSeriesBaseList as _TestTimeSeriesBaseList)
SKIP_CVMFS_GWOSC = pytest.mark.skipif(
not os.path.isdir('/cvmfs/gwosc.osgstorage.org/'),
reason="GWOSC CVMFS repository not available",
)
SKIP_FRAMECPP = utils.skip_missing_dependency('LDAStools.frameCPP')
SKIP_FRAMEL = utils.skip_missing_dependency('framel')
SKIP_LAL = utils.skip_missing_dependency('lal')
SKIP_LALFRAME = utils.skip_missing_dependency('lalframe')
SKIP_PYCBC_PSD = utils.skip_missing_dependency('pycbc.psd')
try:
get_default_gwf_api()
except ImportError:
HAVE_GWF_API = False
else:
HAVE_GWF_API = True
SKIP_GWF_API = pytest.mark.skipif(not HAVE_GWF_API, reason="no GWF API")
GWF_APIS = [
pytest.param(None, marks=SKIP_GWF_API),
pytest.param('lalframe', marks=SKIP_LALFRAME),
pytest.param('framecpp', marks=SKIP_FRAMECPP),
pytest.param('framel', marks=SKIP_FRAMEL),
]
LIVETIME = DataQualityFlag(
name='X1:TEST-FLAG:1',
active=SegmentList([
Segment(0, 32),
Segment(34, 34.5),
]),
known=SegmentList([Segment(0, 64)]),
isgood=True,
)
GWOSC_DATAFIND_SERVER = "datafind.gw-openscience.org"
GWOSC_GW150914_IFO = "L1"
GWOSC_GW150914_CHANNEL = "L1:GWOSC-16KHZ_R1_STRAIN"
NDS2_GW150914_CHANNEL = "L1:DCS-CALIB_STRAIN_C02"
GWOSC_GW150914_FRAMETYPE = "L1_LOSC_16_V1"
GWOSC_GW150914 = 1126259462
GWOSC_GW150914_SEGMENT = Segment(GWOSC_GW150914-2, GWOSC_GW150914+2)
GWOSC_GW150914_DQ_BITS = {
'hdf5': [
'data present',
'passes cbc CAT1 test',
'passes cbc CAT2 test',
'passes cbc CAT3 test',
'passes burst CAT1 test',
'passes burst CAT2 test',
'passes burst CAT3 test',
],
'gwf': [
'DATA',
'CBC_CAT1',
'CBC_CAT2',
'CBC_CAT3',
'BURST_CAT1',
'BURST_CAT2',
'BURST_CAT3',
],
}
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
def _gwosc_cvmfs(func):
"""Decorate ``func`` with all necessary CVMFS-related decorators
"""
for dec in (
pytest.mark.cvmfs,
pytest_skip_cvmfs_read_error,
SKIP_CVMFS_GWOSC,
SKIP_FRAMECPP,
):
func = dec(func)
return func
class TestTimeSeries(_TestTimeSeriesBase):
TEST_CLASS = TimeSeries
# -- fixtures -------------------------------
@pytest.fixture(scope='class')
@pytest_skip_network_error
def gw150914(self):
return self.TEST_CLASS.fetch_open_data(
GWOSC_GW150914_IFO,
*GWOSC_GW150914_SEGMENT,
)
@pytest.fixture(scope='class')
@pytest_skip_network_error
def gw150914_16384(self):
return self.TEST_CLASS.fetch_open_data(
GWOSC_GW150914_IFO,
*GWOSC_GW150914_SEGMENT,
sample_rate=16384,
)
# -- test class functionality ---------------
def test_ligotimegps(self):
# test that LIGOTimeGPS works
array = self.create(t0=LIGOTimeGPS(0))
assert array.t0.value == 0
array.t0 = LIGOTimeGPS(10)
assert array.t0.value == 10
array.x0 = LIGOTimeGPS(1000000000)
assert array.t0.value == 1000000000
# check epoch access
array.epoch = LIGOTimeGPS(10)
assert array.t0.value == 10
def test_epoch(self):
array = self.create()
assert array.epoch.gps == array.x0.value
# -- test I/O -------------------------------
@pytest.mark.parametrize('format', ['txt', 'csv'])
def test_read_write_ascii(self, array, format):
utils.test_read_write(
array, format,
assert_equal=utils.assert_quantity_sub_equal,
assert_kw={'exclude': ['name', 'channel', 'unit']})
def test_read_ascii_header(self, tmpdir):
"""Check that ASCII files with headers are read without extra options
[regression: https://github.com/gwpy/gwpy/issues/1473]
"""
txt = tmpdir / "text.txt"
txt.write_text(
"# time (s)\tdata (strain)\n0\t1\n1\t2\n2\t3",
encoding="utf-8",
)
data = self.TEST_CLASS.read(txt, format="txt")
utils.assert_array_equal(data.times, Index((0, 1, 2), unit="s"))
utils.assert_array_equal(data.value, (1, 2, 3))
@pytest.mark.parametrize('api', GWF_APIS)
def test_read_write_gwf(self, tmp_path, api):
array = self.create(name='TEST')
# map API to format name
if api is None:
fmt = 'gwf'
else:
fmt = 'gwf.%s' % api
# test basic write/read
try:
utils.test_read_write(
array, fmt, extension='gwf', read_args=[array.name],
assert_equal=utils.assert_quantity_sub_equal,
assert_kw={'exclude': ['channel']})
except ImportError as e: # pragma: no-cover
pytest.skip(str(e))
# test read keyword arguments
tmp = tmp_path / "test.gwf"
array.write(tmp, format=fmt)
def read_(**kwargs):
return type(array).read(tmp, array.name, format=fmt,
**kwargs)
# test start, end
start, end = array.span.contract(10)
t = read_(start=start, end=end)
utils.assert_quantity_sub_equal(t, array.crop(start, end),
exclude=['channel'])
assert t.span == (start, end)
t = read_(start=start)
utils.assert_quantity_sub_equal(t, array.crop(start=start),
exclude=['channel'])
t = read_(end=end)
utils.assert_quantity_sub_equal(t, array.crop(end=end),
exclude=['channel'])
@pytest.mark.parametrize('api', GWF_APIS)
def test_read_write_gwf_deprecated_kwargs(self, tmp_path, api):
fmt = "gwf" if api is None else "gwf." + api
array = self.create(name='TEST')
tmp = tmp_path / "test.gwf"
array.write(tmp, format=fmt)
# test dtype - DEPRECATED
with pytest.deprecated_call():
t = self.TEST_CLASS.read(
tmp,
array.name,
format=fmt,
dtype='float32',
)
assert t.dtype is numpy.dtype('float32')
with pytest.deprecated_call():
t = self.TEST_CLASS.read(
tmp,
array.name,
format=fmt,
dtype={array.name: 'float64'},
)
assert t.dtype is numpy.dtype('float64')
@pytest.mark.parametrize('api', GWF_APIS)
def test_read_write_gwf_gps_errors(self, tmp_path, api):
fmt = "gwf" if api is None else "gwf." + api
array = self.create(name='TEST')
tmp = tmp_path / "test.gwf"
array.write(tmp, format=fmt)
# check that reading past the end of the array fails
with pytest.raises((ValueError, RuntimeError)):
self.TEST_CLASS.read(
tmp,
array.name,
format=fmt,
start=array.span[1],
)
# check that reading before the start of the array also fails
with pytest.raises((ValueError, RuntimeError)):
self.TEST_CLASS.read(
tmp,
array.name,
format=fmt,
end=array.span[0]-1,
)
@pytest.mark.parametrize('api', GWF_APIS)
def test_read_write_gwf_multiple(self, tmp_path, api):
fmt = "gwf" if api is None else "gwf." + api
a1 = self.create(name='TEST')
a2 = self.create(name='TEST', t0=a1.span[1], dt=a1.dx)
tmp1 = tmp_path / "test1.gwf"
tmp2 = tmp_path / "test3.gwf"
a1.write(tmp1, format=fmt)
a2.write(tmp2, format=fmt)
cache = [tmp1, tmp2]
comb = self.TEST_CLASS.read(cache, 'TEST', format=fmt, nproc=2)
utils.assert_quantity_sub_equal(
comb, a1.append(a2, inplace=False),
exclude=['channel'])
@pytest.mark.parametrize('api', [
pytest.param('framecpp', marks=SKIP_FRAMECPP),
])
def test_read_write_gwf_error(self, tmp_path, api, gw150914):
tmp = tmp_path / "test.gwf"
gw150914.write(tmp, format="gwf.{}".format(api))
with pytest.raises(ValueError) as exc:
self.TEST_CLASS.read(tmp, "another channel",
format="gwf.{}".format(api))
assert str(exc.value) == (
"no Fr{Adc,Proc,Sim}Data structures with the "
"name another channel"
)
with pytest.raises(ValueError) as exc:
self.TEST_CLASS.read(
tmp,
gw150914.name,
start=gw150914.span[0]-1,
end=gw150914.span[0],
format="gwf.{}".format(api),
)
assert str(exc.value).startswith(
"Failed to read {0!r} from {1!r}".format(gw150914.name, str(tmp))
)
@SKIP_LALFRAME
def test_read_gwf_scaled_lalframe(self):
with pytest.warns(None) as record:
data = self.TEST_CLASS.read(
utils.TEST_GWF_FILE,
"L1:LDAS-STRAIN",
format="gwf.lalframe",
)
assert not record.list # no warning
with pytest.warns(UserWarning):
data2 = self.TEST_CLASS.read(
utils.TEST_GWF_FILE,
"L1:LDAS-STRAIN",
format="gwf.lalframe",
scaled=True,
)
utils.assert_quantity_sub_equal(data, data2)
@SKIP_FRAMECPP # we need framecpp to extract frdata types
@pytest.mark.parametrize("ctype", ("adc", "proc", "sim", None))
@pytest.mark.parametrize("api", GWF_APIS)
def test_write_gwf_type(self, gw150914, tmp_path, api, ctype):
from ...io.gwf import get_channel_type
# on debian, python=3, python-ldas-tools-framecpp < 2.6.9,
# the simdata test causes a segfault
import platform
import sys
if (
api == "framecpp"
and ctype == "sim"
and sys.version_info[0] >= 3
and "debian" in platform.platform()
):
pytest.xfail(
"reading Sim data with "
"python-ldas-tools-framecpp < 2.6.9 is broken"
)
fmt = "gwf" if api is None else "gwf." + api
expected_ctype = ctype if ctype else "proc"
tmp = tmp_path / "test.gwf"
gw150914.write(tmp, type=ctype, format=fmt)
assert get_channel_type(gw150914.name, tmp) == expected_ctype
try:
new = type(gw150914).read(tmp, gw150914.name, format=fmt)
except OverflowError:
# python-ldas-tools-framecpp < 2.6.9
if api == "framecpp" and ctype == "sim":
pytest.xfail(
"reading Sim data with "
"python-ldas-tools-framecpp < 2.6.9 is broken"
)
raise
# epoch seems to mismatch at O(1e-12), which is unfortunate
utils.assert_quantity_sub_equal(
gw150914,
new,
exclude=("channel", "x0"),
)
@pytest.mark.parametrize("api", GWF_APIS)
def test_write_gwf_channel_name(self, tmp_path, api):
"""Test that writing GWF when `channel` is set but `name` is not
uses the `channel` name
"""
array = self.create(channel="data")
assert not array.name
tmp = tmp_path / "test.gwf"
fmt = "gwf" if api is None else "gwf." + api
array.write(tmp, format=fmt)
array2 = type(array).read(tmp, str(array.channel), format="gwf")
assert array2.name == str(array.channel)
utils.assert_quantity_sub_equal(
array,
array2,
exclude=("name", "channel"),
)
@pytest.mark.parametrize('ext', ('hdf5', 'h5'))
@pytest.mark.parametrize('channel', [
None,
'test',
'X1:TEST-CHANNEL',
])
def test_read_write_hdf5(self, tmp_path, ext, channel):
array = self.create()
array.channel = channel
tmp = tmp_path / "test.{}".format(ext)
# check array with no name fails
with pytest.raises(ValueError) as exc:
array.write(tmp, overwrite=True)
assert str(exc.value).startswith('Cannot determine HDF5 path')
array.name = 'TEST'
# write array (with auto-identify)
array.write(tmp, overwrite=True)
# check reading gives the same data (with/without auto-identify)
ts = type(array).read(tmp, format='hdf5')
utils.assert_quantity_sub_equal(array, ts)
ts = type(array).read(tmp)
utils.assert_quantity_sub_equal(array, ts)
# check that we can't then write the same data again
with pytest.raises(IOError):
array.write(tmp)
with pytest.raises((IOError, OSError, RuntimeError, ValueError)):
array.write(tmp, append=True)
# check reading with start/end works
start, end = array.span.contract(25)
t = type(array).read(tmp, start=start, end=end)
utils.assert_quantity_sub_equal(t, array.crop(start, end))
def test_read_write_wav(self):
array = self.create(dtype='float32')
utils.test_read_write(
array, 'wav', read_kw={'mmap': True}, write_kw={'scale': 1},
assert_equal=utils.assert_quantity_sub_equal,
assert_kw={'exclude': ['unit', 'name', 'channel', 'x0']})
@pytest.mark.parametrize("pre, post", [
pytest.param(None, None, id="none"),
pytest.param(0, 0, id="zero"),
pytest.param(None, 1, id="right"),
pytest.param(1, None, id="left"),
pytest.param(1, 1, id="both"),
])
def test_read_pad(self, pre, post):
a = self.TEST_CLASS.read(
utils.TEST_HDF5_FILE,
"H1:LDAS-STRAIN",
)
start = None if pre is None else a.span[0] - pre
end = None if post is None else a.span[1] + post
b = self.TEST_CLASS.read(
utils.TEST_HDF5_FILE,
"H1:LDAS-STRAIN",
pad=0.,
start=start,
end=end,
)
pres = 0 if not pre else int(pre * a.sample_rate.value)
posts = 0 if not post else int(post * a.sample_rate.value)
utils.assert_quantity_sub_equal(
a.pad(
(pres, posts),
mode="constant",
constant_values=(0,),
),
b,
)
def test_read_pad_raise(self):
"""Check that `TimeSeries.read` with `gap='raise'` actually
raises appropriately.
[regression: https://github.com/gwpy/gwpy/issues/1211]
"""
from gwpy.io.cache import file_segment
span = file_segment(utils.TEST_HDF5_FILE)
with pytest.raises(ValueError):
self.TEST_CLASS.read(
utils.TEST_HDF5_FILE,
"H1:LDAS-STRAIN",
pad=0.,
start=span[0],
end=span[1]+1.,
gap="raise",
)
@utils.skip_missing_dependency('nds2')
def test_from_nds2_buffer_dynamic_scaled(self):
# build fake buffer for LIGO channel
nds_buffer = mocks.nds2_buffer(
'H1:TEST',
self.data,
1000000000,
self.data.shape[0],
'm',
name='test',
slope=2,
offset=1,
)
# check scaling defaults to off
utils.assert_array_equal(
self.TEST_CLASS.from_nds2_buffer(nds_buffer).value,
nds_buffer.data,
)
utils.assert_array_equal(
self.TEST_CLASS.from_nds2_buffer(nds_buffer, scaled=False).value,
nds_buffer.data,
)
utils.assert_array_equal(
self.TEST_CLASS.from_nds2_buffer(nds_buffer, scaled=True).value,
nds_buffer.data * 2 + 1,
)
# -- test remote data access ----------------
@pytest.mark.parametrize('format', [
'hdf5',
pytest.param('gwf', marks=SKIP_FRAMECPP),
])
@pytest_skip_network_error
def test_fetch_open_data(self, gw150914, format):
ts = self.TEST_CLASS.fetch_open_data(
GWOSC_GW150914_IFO,
*GWOSC_GW150914_SEGMENT,
format=format,
verbose=True,
)
utils.assert_quantity_sub_equal(ts, gw150914,
exclude=['name', 'unit', 'channel'])
# try again with 16384 Hz data
ts = self.TEST_CLASS.fetch_open_data(
GWOSC_GW150914_IFO,
*GWOSC_GW150914_SEGMENT,
format=format,
sample_rate=16384,
)
assert ts.sample_rate == 16384 * units.Hz
@pytest_skip_network_error
def test_fetch_open_data_error(self):
"""Test that TimeSeries.fetch_open_data raises errors it receives
from the `gwosc` module.
"""
with pytest.raises(ValueError):
self.TEST_CLASS.fetch_open_data(
GWOSC_GW150914_IFO,
0,
1,
)
@utils.skip_missing_dependency('nds2')
@pytest.mark.parametrize('protocol', (1, 2))
def test_fetch(self, protocol):
ts = self.create(name='L1:TEST', t0=1000000000, unit='m')
nds_buffer = mocks.nds2_buffer_from_timeseries(ts)
nds_connection = mocks.nds2_connection(buffers=[nds_buffer],
protocol=protocol)
with mock.patch('nds2.connection') as mock_connection, \
mock.patch('nds2.buffer', nds_buffer):
mock_connection.return_value = nds_connection
# use verbose=True to hit more lines
ts2 = self.TEST_CLASS.fetch('L1:TEST', *ts.span, verbose=True)
utils.assert_quantity_sub_equal(ts, ts2, exclude=['channel'])
# check open connection works
ts2 = self.TEST_CLASS.fetch('L1:TEST', *ts.span, verbose=True,
connection=nds_connection)
utils.assert_quantity_sub_equal(ts, ts2, exclude=['channel'])
# check padding works (with warning for nds2-server connections)
ctx = pytest.warns(UserWarning) if protocol > 1 else null_context()
with ctx:
ts2 = self.TEST_CLASS.fetch('L1:TEST', *ts.span.protract(10),
pad=-100., host='anything')
assert ts2.span == ts.span.protract(10)
assert ts2[0] == -100. * ts.unit
assert ts2[10] == ts[0]
assert ts2[-11] == ts[-1]
assert ts2[-1] == -100. * ts.unit
@utils.skip_missing_dependency('nds2')
def test_fetch_empty_iterate_error(self):
# test that the correct error is raised if nds2.connection.iterate
# yields no buffers (and no errors)
# mock connection with no data
nds_connection = mocks.nds2_connection()
def find_channels(name, *args, **kwargs):
return [mocks.nds2_channel(name, 128, '')]
nds_connection.find_channels = find_channels
# run fetch and assert error
with mock.patch('nds2.connection') as mock_connection:
mock_connection.return_value = nds_connection
with pytest.raises(RuntimeError) as exc:
self.TEST_CLASS.fetch('L1:TEST', 0, 1, host='nds.gwpy')
assert 'no data received' in str(exc.value)
@_gwosc_cvmfs
@mock.patch.dict(
"os.environ",
{"LIGO_DATAFIND_SERVER": GWOSC_DATAFIND_SERVER},
)
def test_find(self, gw150914_16384):
ts = self.TEST_CLASS.find(
GWOSC_GW150914_CHANNEL,
*GWOSC_GW150914_SEGMENT,
frametype=GWOSC_GW150914_FRAMETYPE,
)
utils.assert_quantity_sub_equal(ts, gw150914_16384,
exclude=['name', 'channel', 'unit'])
# test observatory
ts2 = self.TEST_CLASS.find(
GWOSC_GW150914_CHANNEL,
*GWOSC_GW150914_SEGMENT,
frametype=GWOSC_GW150914_FRAMETYPE,
observatory=GWOSC_GW150914_IFO[0],
)
utils.assert_quantity_sub_equal(ts, ts2)
with pytest.raises(RuntimeError):
self.TEST_CLASS.find(
GWOSC_GW150914_CHANNEL,
*GWOSC_GW150914_SEGMENT,
frametype=GWOSC_GW150914_FRAMETYPE,
observatory='X',
)
@_gwosc_cvmfs
@mock.patch.dict(
"os.environ",
{"LIGO_DATAFIND_SERVER": GWOSC_DATAFIND_SERVER},
)
def test_find_best_frametype_in_find(self, gw150914_16384):
ts = self.TEST_CLASS.find(
GWOSC_GW150914_CHANNEL,
*GWOSC_GW150914_SEGMENT,
)
utils.assert_quantity_sub_equal(
ts,
gw150914_16384,
exclude=['name', 'channel', 'unit'],
)
@_gwosc_cvmfs
@mock.patch.dict(
# force 'import nds2' to fail so that we are actually testing
# the gwdatafind API or nothing
"sys.modules",
{"nds2": None},
)
@mock.patch.dict(
"os.environ",
{"LIGO_DATAFIND_SERVER": GWOSC_DATAFIND_SERVER},
)
def test_get_datafind(self, gw150914_16384):
try:
ts = self.TEST_CLASS.get(
GWOSC_GW150914_CHANNEL,
*GWOSC_GW150914_SEGMENT,
frametype_match=r'V1\Z',
)
except (ImportError, RuntimeError) as e: # pragma: no-cover
pytest.skip(str(e))
utils.assert_quantity_sub_equal(
ts,
gw150914_16384,
exclude=['name', 'channel', 'unit'],
)
@utils.skip_missing_dependency('nds2')
@utils.skip_kerberos_credential
@mock.patch.dict(os.environ)
def test_get_nds2(self, gw150914_16384):
# get using NDS2 (if datafind could have been used to start with)
os.environ.pop('LIGO_DATAFIND_SERVER', None)
ts = self.TEST_CLASS.get(
NDS2_GW150914_CHANNEL,
*GWOSC_GW150914_SEGMENT,
)
utils.assert_quantity_sub_equal(
ts,
gw150914_16384,
exclude=['name', 'channel', 'unit'],
)
# -- signal processing methods --------------
def test_fft(self, gw150914):
fs = gw150914.fft()
assert isinstance(fs, FrequencySeries)
assert fs.size == gw150914.size // 2 + 1
assert fs.f0 == 0 * units.Hz
assert fs.df == 1 / gw150914.duration
assert fs.channel is gw150914.channel
nptest.assert_almost_equal(
fs.value.max(), 9.793003238789471e-20+3.5377863373683966e-21j)
# test with nfft arg
fs = gw150914.fft(nfft=256)
assert fs.size == 129
assert fs.dx == gw150914.sample_rate / 256
def test_average_fft(self, gw150914):
# test all defaults
fs = gw150914.average_fft()
utils.assert_quantity_sub_equal(fs, gw150914.detrend().fft())
# test fftlength
fs = gw150914.average_fft(fftlength=0.5)
assert fs.size == 0.5 * gw150914.sample_rate.value // 2 + 1
assert fs.df == 2 * units.Hertz
fs = gw150914.average_fft(fftlength=0.4, overlap=0.2)
def test_psd_default_overlap(self, gw150914):
utils.assert_quantity_sub_equal(
gw150914.psd(.5, method="median", window="hann"),
gw150914.psd(.5, .25, method="median", window="hann"),
)
@SKIP_LAL
def test_psd_lal_median_mean(self, gw150914):
# check that warnings and errors get raised in the right place
# for a median-mean PSD with the wrong data size or parameters
# single segment should raise error
with pytest.raises(ValueError), pytest.deprecated_call():
gw150914.psd(abs(gw150914.span), method='lal_median_mean')
# odd number of segments should warn
# pytest hides the second DeprecationWarning that should have been
# triggered here, for some reason
with pytest.warns(UserWarning):
gw150914.psd(1, .5, method='lal_median_mean')
@pytest.mark.parametrize('method', ('welch', 'bartlett', 'median'))
def test_psd(self, noisy_sinusoid, method):
fftlength = .5
overlap = .25
fs = noisy_sinusoid.psd(
fftlength=fftlength,
overlap=overlap,
method=method,
)
assert fs.unit == noisy_sinusoid.unit ** 2 / "Hz"
assert fs.max() == fs.value_at(500)
assert fs.size == fftlength * noisy_sinusoid.sample_rate.value // 2 + 1
assert fs.f0 == 0 * units.Hz
assert fs.df == units.Hz / fftlength
assert fs.name == noisy_sinusoid.name
assert fs.channel is noisy_sinusoid.channel
@pytest.mark.parametrize('library, method', chain(
product(['pycbc.psd'], ['welch', 'bartlett', 'median', 'median_mean']),
product(['lal'], ['welch', 'bartlett', 'median', 'median_mean']),
))
def test_psd_deprecated(self, noisy_sinusoid, library, method):
"""Test deprecated average methods for TimeSeries.psd
"""
pytest.importorskip(library)
fftlength = .5
overlap = .25
# remove final .25 seconds to stop median-mean complaining
# (means an even number of overlapping FFT segments)
if method == "median_mean":
end = noisy_sinusoid.span[1]
noisy_sinusoid = noisy_sinusoid.crop(end=end-overlap)
# get actual method name
library = library.split('.', 1)[0]
with pytest.deprecated_call():
psd = noisy_sinusoid.psd(fftlength=fftlength, overlap=overlap,
method="{0}-{1}".format(library, method))
assert isinstance(psd, FrequencySeries)
assert psd.unit == noisy_sinusoid.unit ** 2 / "Hz"
assert psd.max() == psd.value_at(500)
def test_asd(self, gw150914):
kw = {
"method": "median",
}
utils.assert_quantity_sub_equal(
gw150914.asd(1, **kw),
gw150914.psd(1, **kw) ** (1/2.),
)
def test_csd(self, noisy_sinusoid, corrupt_noisy_sinusoid):
# test that csd(self) is the same as psd()
fs = noisy_sinusoid.csd(noisy_sinusoid)
utils.assert_quantity_sub_equal(
fs,
noisy_sinusoid.psd(method="welch"),
exclude=['name'],
)
# test fftlength
fs = noisy_sinusoid.csd(corrupt_noisy_sinusoid, fftlength=0.5)
assert fs.size == 0.5 * noisy_sinusoid.sample_rate.value // 2 + 1
assert fs.df == 2 * units.Hertz
utils.assert_quantity_sub_equal(
fs,
noisy_sinusoid.csd(corrupt_noisy_sinusoid, fftlength=0.5,
overlap=0.25),
)
@staticmethod
def _window_helper(series, fftlength, window='hamming'):
nfft = int(series.sample_rate.value * fftlength)
return signal.get_window(window, nfft)
@pytest.mark.parametrize('method', [
'scipy-welch',
'scipy-bartlett',
'scipy-median',
pytest.param('lal-welch', marks=SKIP_LAL),
pytest.param('lal-bartlett', marks=SKIP_LAL),
pytest.param('lal-median', marks=SKIP_LAL),
pytest.param('pycbc-welch', marks=SKIP_PYCBC_PSD),
pytest.param('pycbc-bartlett', marks=SKIP_PYCBC_PSD),
pytest.param('pycbc-median', marks=SKIP_PYCBC_PSD),
])
@pytest.mark.parametrize(
'window', (None, 'hann', ('kaiser', 24), 'array'),
)
def test_spectrogram(self, gw150914, method, window):
# generate window for 'array'
win = self._window_helper(gw150914, 1) if window == 'array' else window
if method.startswith(("lal", "pycbc")):
ctx = pytest.deprecated_call
else:
ctx = null_context
# generate spectrogram
with ctx():
sg = gw150914.spectrogram(1, method=method, window=win)
# validate
assert isinstance(sg, Spectrogram)
assert sg.shape == (abs(gw150914.span),
gw150914.sample_rate.value // 2 + 1)
assert sg.f0 == 0 * units.Hz
assert sg.df == 1 * units.Hz
assert sg.channel is gw150914.channel
assert sg.unit == gw150914.unit ** 2 / units.Hz
assert sg.epoch == gw150914.epoch
assert sg.span == gw150914.span
# check the first time-bin is the same result as .psd()
n = int(gw150914.sample_rate.value)
if window == 'hann' and not method.endswith('bartlett'):
n *= 1.5 # default is 50% overlap
with ctx():
psd = gw150914[:int(n)].psd(fftlength=1, method=method, window=win)
# FIXME: epoch should not be excluded here (probably)
utils.assert_quantity_sub_equal(sg[0], psd, exclude=['epoch'],
almost_equal=True)
def test_spectrogram_fftlength(self, gw150914):
sg = gw150914.spectrogram(1, fftlength=0.5, method="median")
assert sg.shape == (abs(gw150914.span),
0.5 * gw150914.sample_rate.value // 2 + 1)
assert sg.df == 2 * units.Hertz
assert sg.dt == 1 * units.second
def test_spectrogram_overlap(self, gw150914):
kw = {
"fftlength": 0.5,
"window": "hann",
"method": "median",
}
sg = gw150914.spectrogram(1, **kw)
sg2 = gw150914.spectrogram(1, overlap=.25, **kw)
utils.assert_quantity_sub_equal(sg, sg2, almost_equal=True)
def test_spectrogram_multiprocessing(self, gw150914):
kw = {
"fftlength": 0.5,
"window": "hann",
"method": "median",
}
sg = gw150914.spectrogram(1, **kw)
sg2 = gw150914.spectrogram(1, nproc=2, **kw)
utils.assert_quantity_sub_equal(sg, sg2, almost_equal=True)
@pytest.mark.parametrize('library', [
pytest.param('lal', marks=SKIP_LAL),
pytest.param('pycbc', marks=SKIP_PYCBC_PSD),
])
def test_spectrogram_median_mean(self, gw150914, library):
method = '{0}-median-mean'.format(library)
# median-mean warn on LAL if not given the correct data for an
# even number of FFTs.
# pytest only asserts a single warning, and UserWarning will take
# precedence apparently, so check that for lal
if library == 'lal':
warn_ctx = pytest.warns(UserWarning)
else:
warn_ctx = pytest.deprecated_call()
with warn_ctx:
sg = gw150914.spectrogram(
1.5,
fftlength=.5,
overlap=0,
method=method,
)
assert sg.dt == 1.5 * units.second
assert sg.df == 2 * units.Hertz
def test_spectrogram2(self, gw150914):
# test defaults
sg = gw150914.spectrogram2(1, overlap=0)
utils.assert_quantity_sub_equal(
sg,
gw150914.spectrogram(
1,
fftlength=1,
overlap=0,
method='scipy-welch',
window='hann',
),
)
# test fftlength
sg = gw150914.spectrogram2(0.5)
assert sg.shape == (16, 0.5 * gw150914.sample_rate.value // 2 + 1)
assert sg.df == 2 * units.Hertz
assert sg.dt == 0.25 * units.second
# test overlap
sg = gw150914.spectrogram2(fftlength=0.25, overlap=0.24)
assert sg.shape == (399, 0.25 * gw150914.sample_rate.value // 2 + 1)
assert sg.df == 4 * units.Hertz
# note: bizarre stride length because 4096/100 gets rounded
assert sg.dt == 0.010009765625 * units.second
def test_fftgram(self, gw150914):
fgram = gw150914.fftgram(1)
fs = int(gw150914.sample_rate.value)
f, t, sxx = signal.spectrogram(
gw150914, fs,
window='hann',
nperseg=fs,
mode='complex',
)
utils.assert_array_equal(gw150914.t0.value + t, fgram.xindex.value)
utils.assert_array_equal(f, fgram.yindex.value)
utils.assert_array_equal(sxx.T, fgram)
fgram = gw150914.fftgram(1, overlap=0.5)
f, t, sxx = signal.spectrogram(
gw150914, fs,
window='hann',
nperseg=fs,
noverlap=fs//2,
mode='complex',
)
utils.assert_array_equal(gw150914.t0.value + t, fgram.xindex.value)
utils.assert_array_equal(f, fgram.yindex.value)
utils.assert_array_equal(sxx.T, fgram)
def test_spectral_variance(self, gw150914):
variance = gw150914.spectral_variance(.5, method="median")
assert isinstance(variance, SpectralVariance)
assert variance.x0 == 0 * units.Hz
assert variance.dx == 2 * units.Hz
assert variance.max() == 8
def test_rayleigh_spectrum(self, gw150914):
# assert single FFT creates Rayleigh of 0
ray = gw150914.rayleigh_spectrum()
assert isinstance(ray, FrequencySeries)
assert ray.unit is units.Unit('')
assert ray.name == 'Rayleigh spectrum of %s' % gw150914.name
assert ray.epoch == gw150914.epoch
assert ray.channel is gw150914.channel
assert ray.f0 == 0 * units.Hz
assert ray.df == 1 / gw150914.duration
assert ray.sum().value == 0
# actually test properly
ray = gw150914.rayleigh_spectrum(.5) # no overlap
assert ray.df == 2 * units.Hz
nptest.assert_almost_equal(ray.max().value, 2.1239253590490157)
assert ray.frequencies[ray.argmax()] == 1322 * units.Hz
ray = gw150914.rayleigh_spectrum(.5, .25) # 50 % overlap
nptest.assert_almost_equal(ray.max().value, 1.8814775174483833)
assert ray.frequencies[ray.argmax()] == 136 * units.Hz
def test_csd_spectrogram(self, gw150914):
# test defaults
sg = gw150914.csd_spectrogram(gw150914, 1)
assert isinstance(sg, Spectrogram)
assert sg.shape == (4, gw150914.sample_rate.value // 2 + 1)
assert sg.f0 == 0 * units.Hz
assert sg.df == 1 * units.Hz
assert sg.channel is gw150914.channel
assert sg.unit == gw150914.unit ** 2 / units.Hertz
assert sg.epoch == gw150914.epoch
assert sg.span == gw150914.span
# check the same result as CSD
cropped = gw150914[:int(gw150914.sample_rate.value)]
csd = cropped.csd(cropped)
utils.assert_quantity_sub_equal(sg[0], csd, exclude=['name', 'epoch'])
# test fftlength
sg = gw150914.csd_spectrogram(gw150914, 1, fftlength=0.5)
assert sg.shape == (4, 0.5 * gw150914.sample_rate.value // 2 + 1)
assert sg.df == 2 * units.Hertz
assert sg.dt == 1 * units.second
# test overlap
sg = gw150914.csd_spectrogram(
gw150914,
0.5,
fftlength=0.25,
overlap=0.125,
)
assert sg.shape == (8, 0.25 * gw150914.sample_rate.value // 2 + 1)
assert sg.df == 4 * units.Hertz
assert sg.dt == 0.5 * units.second
# test multiprocessing
sg2 = gw150914.csd_spectrogram(
gw150914,
0.5,
fftlength=0.25,
overlap=0.125,
nproc=2,
)
utils.assert_quantity_sub_equal(sg, sg2)
def test_resample(self, gw150914):
"""Test :meth:`gwpy.timeseries.TimeSeries.resample`
"""
# test IIR decimation
l2 = gw150914.resample(1024, ftype='iir')
# FIXME: this test needs to be more robust
assert l2.sample_rate == 1024 * units.Hz
def test_resample_noop(self):
data = self.TEST_CLASS([1, 2, 3, 4, 5])
with pytest.warns(UserWarning):
new = data.resample(data.sample_rate)
assert data is new
def test_rms(self, gw150914):
rms = gw150914.rms(1.)
assert rms.sample_rate == 1 * units.Hz
@mock.patch('gwpy.segments.DataQualityFlag.query',
return_value=LIVETIME)
def test_mask(self, dqflag):
# craft a timeseries of ones that can be easily tested against
# a few interesting corner cases
data = TimeSeries(numpy.ones(8192), sample_rate=128)
masked = data.mask(flag='X1:TEST-FLAG:1')
# create objects to test against
window = planck(128, nleft=64, nright=64)
times = (data.t0 + numpy.arange(data.size) * data.dt).value
(live, ) = numpy.nonzero([t in LIVETIME.active for t in times])
(dead, ) = numpy.nonzero([t not in LIVETIME.active for t in times])
# verify the mask is correct
assert data.is_compatible(masked)
assert live.size + dead.size == data.size
assert numpy.all(numpy.isfinite(masked.value[live]))
assert numpy.all(numpy.isnan(masked.value[dead]))
utils.assert_allclose(masked.value[:4032], numpy.ones(4032))
utils.assert_allclose(masked.value[4032:4096], window[-64:])
utils.assert_allclose(masked.value[4352:4416],
window[:64] * window[-64:])
def test_demodulate(self):
# create a timeseries that is simply one loud sinusoidal oscillation
# at a particular frequency, then demodulate at that frequency and
# recover the amplitude and phase
amp, phase, f = 1., numpy.pi/4, 30
duration, sample_rate, stride = 600, 4096, 60
t = numpy.linspace(0, duration, duration*sample_rate)
data = TimeSeries(amp * numpy.cos(2*numpy.pi*f*t + phase),
unit='', times=t)
# test with exp=True
demod = data.demodulate(f, stride=stride, exp=True)
assert demod.unit == data.unit
assert demod.size == duration // stride
utils.assert_allclose(numpy.abs(demod.value), amp, rtol=1e-5)
utils.assert_allclose(numpy.angle(demod.value), phase, rtol=1e-5)
# test with exp=False, deg=True
mag, ph = data.demodulate(f, stride=stride)
assert mag.unit == data.unit
assert mag.size == ph.size
assert ph.unit == 'deg'
utils.assert_allclose(mag.value, amp, rtol=1e-5)
utils.assert_allclose(ph.value, numpy.rad2deg(phase), rtol=1e-5)
# test with exp=False, deg=False
mag, ph = data.demodulate(f, stride=stride, deg=False)
assert ph.unit == 'rad'
utils.assert_allclose(ph.value, phase, rtol=1e-5)
def test_heterodyne(self):
# create a timeseries that is simply one loud sinusoidal oscillation,
# with a frequency and frequency derivative, then heterodyne using the
# phase evolution to recover the amplitude and phase
amp, phase, f, fdot = 1., numpy.pi/4, 30, 1e-4
duration, sample_rate, stride = 600, 4096, 60
t = numpy.linspace(0, duration, duration*sample_rate)
phases = 2*numpy.pi*(f*t + 0.5*fdot*t**2)
data = TimeSeries(amp * numpy.cos(phases + phase),
unit='', times=t)
# test exceptions
with pytest.raises(TypeError):
data.heterodyne(1.0)
with pytest.raises(ValueError):
data.heterodyne(phases[0:len(phases) // 2])
# test with default settings
het = data.heterodyne(phases, stride=stride)
assert het.unit == data.unit
assert het.size == duration // stride
utils.assert_allclose(numpy.abs(het.value), 0.5*amp, rtol=1e-4)
utils.assert_allclose(numpy.angle(het.value), phase, rtol=2e-4)
# test with singlesided=True
het = data.heterodyne(
phases, stride=stride, singlesided=True
)
assert het.unit == data.unit
assert het.size == duration // stride
utils.assert_allclose(numpy.abs(het.value), amp, rtol=1e-4)
utils.assert_allclose(numpy.angle(het.value), phase, rtol=2e-4)
def test_taper(self):
# create a flat timeseries, then taper it
t = numpy.linspace(0, 1, 2048)
data = TimeSeries(numpy.cos(10*numpy.pi*t), times=t, unit='')
tapered = data.taper()
# check that the tapered timeseries goes to zero at its ends,
# and that the operation does not change the original data
assert tapered[0].value == 0
assert tapered[-1].value == 0
assert tapered.unit == data.unit
assert tapered.size == data.size
utils.assert_allclose(data.value, numpy.cos(10*numpy.pi*t))
# run the same tests for a user-specified taper duration
dtapered = data.taper(duration=0.1)
assert dtapered[0].value == 0
assert dtapered[-1].value == 0
assert dtapered.unit == data.unit
assert dtapered.size == data.size
utils.assert_allclose(data.value, numpy.cos(10*numpy.pi*t))
# run the same tests for a user-specified number of samples to taper
stapered = data.taper(nsamples=10)
assert stapered[0].value == 0
assert stapered[-1].value == 0
assert stapered.unit == data.unit
assert stapered.size == data.size
utils.assert_allclose(data.value, numpy.cos(10*numpy.pi*t))
def test_inject(self):
# create a timeseries out of an array of zeros
duration, sample_rate = 1, 4096
data = TimeSeries(numpy.zeros(duration*sample_rate), t0=0,
sample_rate=sample_rate, unit='')
# create a second timeseries to inject into the first
w_times = data.times.value[:2048]
waveform = TimeSeries(numpy.cos(2*numpy.pi*30*w_times), times=w_times)
# test that we recover this waveform when we add it to data,
# and that the operation does not change the original data
new_data = data.inject(waveform)
assert new_data.unit == data.unit
assert new_data.size == data.size
ind, = new_data.value.nonzero()
assert len(ind) == waveform.size
utils.assert_allclose(new_data.value[ind], waveform.value)
utils.assert_allclose(data.value, numpy.zeros(duration*sample_rate))
def test_gate(self):
# generate Gaussian noise with std = 0.5
noise = self.TEST_CLASS(numpy.random.normal(scale=0.5, size=16384*64),
sample_rate=16384, epoch=-32)
# generate a glitch with amplitude 20 at 1000 Hz
glitchtime = 0.0
glitch = signal.gausspulse(noise.times.value - glitchtime,
bw=100) * 20
data = noise + glitch
# check that the glitch is at glitchtime as expected
tmax = data.times.value[data.argmax()]
nptest.assert_almost_equal(tmax, glitchtime)
# gating method will be called with whiten = False to decouple
# whitening method from gating method
tzero = 1.0
tpad = 1.0
threshold = 10.0
gated = data.gate(tzero=tzero, tpad=tpad, threshold=threshold,
whiten=False)
# check that the maximum value is not within the region set to zero
tleft = glitchtime - tzero
tright = glitchtime + tzero
assert not tleft < gated.times.value[gated.argmax()] < tright
# check that there are no remaining values above the threshold
assert gated.max() < threshold
def test_whiten(self):
# create noise with a glitch in it at 1000 Hz
noise = self.TEST_CLASS(
numpy.random.normal(loc=1, scale=.5, size=16384 * 64),
sample_rate=16384, epoch=-32).zpk([], [0], 1)
glitchtime = 0.5
glitch = signal.gausspulse(noise.times.value - glitchtime,
bw=100) * 1e-4
data = noise + glitch
# when the input is stationary Gaussian noise, the output should have
# zero mean and unit variance
whitened = noise.whiten(detrend='linear', method="median")
assert whitened.size == noise.size
nptest.assert_almost_equal(whitened.mean().value, 0.0, decimal=2)
nptest.assert_almost_equal(whitened.std().value, 1.0, decimal=2)
# when a loud signal is present, the max amplitude should be recovered
# at the time of that signal
tmax = data.times[data.argmax()]
assert not numpy.isclose(tmax.value, glitchtime)
whitened = data.whiten(detrend='linear', method="median")
tmax = whitened.times[whitened.argmax()]
nptest.assert_almost_equal(tmax.value, glitchtime)
def test_convolve(self):
data = self.TEST_CLASS(
signal.hann(1024), sample_rate=512, epoch=-1
)
filt = numpy.array([1, 0])
# check that the 'valid' data are unchanged by this filter
convolved = data.convolve(filt)
assert convolved.size == data.size
utils.assert_allclose(convolved.value[1:-1], data.value[1:-1])
def test_correlate(self):
# create noise and a glitch template at 1000 Hz
noise = self.TEST_CLASS(
numpy.random.normal(size=16384 * 64),
sample_rate=16384,
epoch=-32,
).zpk([], [1], 1)
glitchtime = -16.5
glitch = self.TEST_CLASS(
signal.gausspulse(numpy.arange(-1, 1, 1./16384), bw=100),
sample_rate=16384,
epoch=glitchtime-1,
)
# check that, without a signal present, we only see background
snr = noise.correlate(glitch, whiten=True, method="median")
tmax = snr.times[snr.argmax()]
assert snr.size == noise.size
assert not numpy.isclose(tmax.value, glitchtime)
nptest.assert_almost_equal(snr.mean().value, 0.0, decimal=1)
nptest.assert_almost_equal(snr.std().value, 1.0, decimal=1)
# inject and recover the glitch
data = noise.inject(glitch * 1e-4)
snr = data.correlate(glitch, whiten=True, method="median")
tmax = snr.times[snr.argmax()]
nptest.assert_almost_equal(tmax.value, glitchtime)
def test_detrend(self, gw150914):
assert not numpy.isclose(gw150914.value.mean(), 0.0, atol=1e-21)
detrended = gw150914.detrend()
assert numpy.isclose(detrended.value.mean(), 0.0)
def test_filter(self, gw150914):
zpk = [], [], 1
fts = gw150914.filter(zpk, analog=True)
utils.assert_quantity_sub_equal(gw150914, fts)
# check SOS filters can be used directly
zpk = filter_design.highpass(50, sample_rate=gw150914.sample_rate)
sos = signal.zpk2sos(*zpk)
utils.assert_quantity_almost_equal(
gw150914.filter(zpk),
gw150914.filter(sos),
)
def test_zpk(self, gw150914):
zpk = [10, 10], [1, 1], 100
utils.assert_quantity_sub_equal(
gw150914.zpk(*zpk), gw150914.filter(*zpk, analog=True))
def test_notch(self, gw150914):
# test notch runs end-to-end
gw150914.notch(60)
# test breaks when you try and 'fir' notch
with pytest.raises(NotImplementedError):
gw150914.notch(10, type='fir')
def test_q_gram(self, gw150914):
# test simple q-transform
qgram = gw150914.q_gram()
assert isinstance(qgram, EventTable)
assert qgram.meta['q'] == 45.25483399593904
assert qgram['energy'].min() >= 5.5**2 / 2
nptest.assert_almost_equal(qgram['energy'].max(), 10559.25, decimal=2)
def test_q_transform(self, gw150914):
# test simple q-transform
qspecgram = gw150914.q_transform(method='scipy-welch', fftlength=2)
assert isinstance(qspecgram, Spectrogram)
assert qspecgram.shape == (1000, 2403)
assert qspecgram.q == 5.65685424949238
nptest.assert_almost_equal(qspecgram.value.max(), 155.93567, decimal=5)
# test whitening args
asd = gw150914.asd(2, 1, method='scipy-welch')
qsg2 = gw150914.q_transform(method='scipy-welch', whiten=asd)
utils.assert_quantity_sub_equal(qspecgram, qsg2, almost_equal=True)
asd = gw150914.asd(.5, .25, method='scipy-welch')
qsg2 = gw150914.q_transform(method='scipy-welch', whiten=asd)
qsg3 = gw150914.q_transform(
method='scipy-welch',
fftlength=.5,
overlap=.25,
)
utils.assert_quantity_sub_equal(qsg2, qsg3, almost_equal=True)
# make sure frequency too high presents warning
with pytest.warns(UserWarning):
qspecgram = gw150914.q_transform(
method='scipy-welch',
frange=(0, 10000),
)
nptest.assert_almost_equal(
qspecgram.yspan[1],
1291.5316,
decimal=4,
)
# test other normalisations work (or don't)
q2 = gw150914.q_transform(method='scipy-welch', norm='median')
utils.assert_quantity_sub_equal(qspecgram, q2, almost_equal=True)
gw150914.q_transform(method='scipy-welch', norm='mean')
gw150914.q_transform(method='scipy-welch', norm=False)
with pytest.raises(ValueError):
gw150914.q_transform(method='scipy-welch', norm='blah')
def test_q_transform_logf(self, gw150914):
# test q-transform with log frequency spacing
qspecgram = gw150914.q_transform(
method='scipy-welch',
fftlength=2,
logf=True,
)
assert isinstance(qspecgram, Spectrogram)
assert qspecgram.shape == (1000, 500)
assert qspecgram.q == 5.65685424949238
nptest.assert_almost_equal(qspecgram.value.max(), 155.93774, decimal=5)
def test_q_transform_nan(self):
data = TimeSeries(numpy.empty(256*10) * numpy.nan, sample_rate=256)
with pytest.raises(ValueError) as exc:
data.q_transform(method="median")
assert str(exc.value) == 'Input signal contains non-numerical values'
def test_boolean_statetimeseries(self, array):
comp = array >= 2 * array.unit
assert isinstance(comp, StateTimeSeries)
assert comp.unit is units.Unit('')
assert comp.name == '%s >= 2.0' % (array.name)
assert (array == array).name == '{0} == {0}'.format(array.name)
@pytest_skip_network_error
def test_transfer_function(self):
tsh = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
tsl = TimeSeries.fetch_open_data('L1', 1126259446, 1126259478)
tf = tsh.transfer_function(tsl, fftlength=1.0, overlap=0.5)
assert tf.df == 1 * units.Hz
assert tf.frequencies[abs(tf).argmax()] == 516 * units.Hz
@pytest_skip_network_error
def test_coherence(self):
tsh = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
tsl = TimeSeries.fetch_open_data('L1', 1126259446, 1126259478)
coh = tsh.coherence(tsl, fftlength=1.0)
assert coh.df == 1 * units.Hz
assert coh.frequencies[coh.argmax()] == 60 * units.Hz
@pytest_skip_network_error
def test_coherence_spectrogram(self):
tsh = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
tsl = TimeSeries.fetch_open_data('L1', 1126259446, 1126259478)
cohsg = tsh.coherence_spectrogram(tsl, 4, fftlength=1.0)
assert cohsg.t0 == tsh.t0
assert cohsg.dt == 4 * units.second
assert cohsg.df == 1 * units.Hz
tmax, fmax = numpy.unravel_index(cohsg.argmax(), cohsg.shape)
assert cohsg.frequencies[fmax] == 60 * units.Hz
# -- TimeSeriesDict -----------------------------------------------------------
class TestTimeSeriesDict(_TestTimeSeriesBaseDict):
channels = ['H1:LDAS-STRAIN', 'L1:LDAS-STRAIN']
TEST_CLASS = TimeSeriesDict
ENTRY_CLASS = TimeSeries
@SKIP_FRAMEL
def test_read_write_gwf(self, instance, tmp_path):
tmp = tmp_path / "test.gwf"
instance.write(tmp)
new = self.TEST_CLASS.read(tmp, instance.keys())
for key in new:
utils.assert_quantity_sub_equal(new[key], instance[key],
exclude=['channel'])
def test_read_write_hdf5(self, instance, tmp_path):
tmp = tmp_path / "test.h5"
instance.write(tmp, overwrite=True)
new = self.TEST_CLASS.read(tmp, instance.keys())
for key in new:
utils.assert_quantity_sub_equal(new[key], instance[key])
# check auto-detection of names
new = self.TEST_CLASS.read(tmp)
for key in new:
utils.assert_quantity_sub_equal(new[key], instance[key])
# -- TimeSeriesList -----------------------------------------------------------
class TestTimeSeriesList(_TestTimeSeriesBaseList):
TEST_CLASS = TimeSeriesList
ENTRY_CLASS = TimeSeries
|
gwpy/gwpy
|
gwpy/timeseries/tests/test_timeseries.py
|
Python
|
gpl-3.0
| 54,741
|
[
"Gaussian"
] |
dd815a264b5bf0bc84d81d1a0914bc28e713b547312c96db92e7b2c94e5a9a28
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2004-2007 Donald N. Allingham
# Copyright (C) 2010 Brian G. Matherly
# Copyright (C) 2014 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Class handling language-specific displaying of names.
Specific symbols for parts of a name are defined:
====== ===============================================================
Symbol Description
====== ===============================================================
't' title
'f' given (first names)
'l' full surname (lastname)
'c' callname
'x' nick name, call, or otherwise first first name (common name)
'i' initials of the first names
'm' primary surname (main)
'0m' primary surname prefix
'1m' primary surname surname
'2m' primary surname connector
'y' pa/matronymic surname (father/mother) - assumed unique
'0y' pa/matronymic prefix
'1y' pa/matronymic surname
'2y' pa/matronymic connector
'o' surnames without pa/matronymic and primary
'r' non primary surnames (rest)
'p' list of all prefixes
'q' surnames without prefixes and connectors
's' suffix
'n' nick name
'g' family nick name
====== ===============================================================
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import re
import logging
LOG = logging.getLogger(".gramps.gen")
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ..const import ARABIC_COMMA, ARABIC_SEMICOLON, GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from ..lib.name import Name
from ..lib.nameorigintype import NameOriginType
try:
from ..config import config
WITH_GRAMPS_CONFIG=True
except ImportError:
WITH_GRAMPS_CONFIG=False
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
_FIRSTNAME = 4
_SURNAME_LIST = 5
_SUFFIX = 6
_TITLE = 7
_TYPE = 8
_GROUP = 9
_SORT = 10
_DISPLAY = 11
_CALL = 12
_NICK = 13
_FAMNICK = 14
_SURNAME_IN_LIST = 0
_PREFIX_IN_LIST = 1
_PRIMARY_IN_LIST = 2
_TYPE_IN_LIST = 3
_CONNECTOR_IN_LIST = 4
_ORIGINPATRO = NameOriginType.PATRONYMIC
_ORIGINMATRO = NameOriginType.MATRONYMIC
_ACT = True
_INA = False
_F_NAME = 0 # name of the format
_F_FMT = 1 # the format string
_F_ACT = 2 # if the format is active
_F_FN = 3 # name format function
_F_RAWFN = 4 # name format raw function
PAT_AS_SURN = False
#-------------------------------------------------------------------------
#
# Local functions
#
#-------------------------------------------------------------------------
# Because of occurring in an exec(), this couldn't be in a lambda:
# we sort names first on longest first, then last letter first, this to
# avoid translations of shorter terms which appear in longer ones, eg
# namelast may not be mistaken with name, so namelast must first be
# converted to %k before name is converted.
##def _make_cmp(a, b): return -cmp((len(a[1]),a[1]), (len(b[1]), b[1]))
def _make_cmp_key(a): return (len(a[1]),a[1]) # set reverse to True!!
#-------------------------------------------------------------------------
#
# NameDisplayError class
#
#-------------------------------------------------------------------------
class NameDisplayError(Exception):
"""
Error used to report that the name display format string is invalid.
"""
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return self.value
#-------------------------------------------------------------------------
#
# Functions to extract data from raw lists (unserialized objects)
#
#-------------------------------------------------------------------------
def _raw_full_surname(raw_surn_data_list):
"""method for the 'l' symbol: full surnames"""
result = ""
for raw_surn_data in raw_surn_data_list:
result += "%s %s %s " % (raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split()).strip()
def _raw_primary_surname(raw_surn_data_list):
"""method for the 'm' symbol: primary surname"""
global PAT_AS_SURN
nrsur = len(raw_surn_data_list)
for raw_surn_data in raw_surn_data_list:
if raw_surn_data[_PRIMARY_IN_LIST]:
#if there are multiple surnames, return the primary. If there
#is only one surname, then primary has little meaning, and we
#assume a pa/matronymic should not be given as primary as it
#normally is defined independently
if not PAT_AS_SURN and nrsur == 1 and \
(raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO
or raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
return ''
else:
result = "%s %s %s" % (raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_primary_surname_only(raw_surn_data_list):
"""method to obtain the raw primary surname data, so this returns a string
"""
global PAT_AS_SURN
nrsur = len(raw_surn_data_list)
for raw_surn_data in raw_surn_data_list:
if raw_surn_data[_PRIMARY_IN_LIST]:
if not PAT_AS_SURN and nrsur == 1 and \
(raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO
or raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
return ''
else:
return raw_surn_data[_SURNAME_IN_LIST]
return ''
def _raw_primary_prefix_only(raw_surn_data_list):
"""method to obtain the raw primary surname data"""
global PAT_AS_SURN
nrsur = len(raw_surn_data_list)
for raw_surn_data in raw_surn_data_list:
if raw_surn_data[_PRIMARY_IN_LIST]:
if not PAT_AS_SURN and nrsur == 1 and \
(raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO
or raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
return ''
else:
return raw_surn_data[_PREFIX_IN_LIST]
return ''
def _raw_primary_conn_only(raw_surn_data_list):
"""method to obtain the raw primary surname data"""
global PAT_AS_SURN
nrsur = len(raw_surn_data_list)
for raw_surn_data in raw_surn_data_list:
if raw_surn_data[_PRIMARY_IN_LIST]:
if not PAT_AS_SURN and nrsur == 1 and \
(raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO
or raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
return ''
else:
return raw_surn_data[_CONNECTOR_IN_LIST]
return ''
def _raw_patro_surname(raw_surn_data_list):
"""method for the 'y' symbol: patronymic surname"""
for raw_surn_data in raw_surn_data_list:
if (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or
raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
result = "%s %s %s" % (raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_patro_surname_only(raw_surn_data_list):
"""method for the '1y' symbol: patronymic surname only"""
for raw_surn_data in raw_surn_data_list:
if (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or
raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
result = "%s" % (raw_surn_data[_SURNAME_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_patro_prefix_only(raw_surn_data_list):
"""method for the '0y' symbol: patronymic prefix only"""
for raw_surn_data in raw_surn_data_list:
if (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or
raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
result = "%s" % (raw_surn_data[_PREFIX_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_patro_conn_only(raw_surn_data_list):
"""method for the '2y' symbol: patronymic conn only"""
for raw_surn_data in raw_surn_data_list:
if (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or
raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
result = "%s" % (raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_nonpatro_surname(raw_surn_data_list):
"""method for the 'o' symbol: full surnames without pa/matronymic or
primary
"""
result = ""
for raw_surn_data in raw_surn_data_list:
if ((not raw_surn_data[_PRIMARY_IN_LIST]) and
raw_surn_data[_TYPE_IN_LIST][0] != _ORIGINPATRO and
raw_surn_data[_TYPE_IN_LIST][0] != _ORIGINMATRO):
result += "%s %s %s " % (raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split()).strip()
def _raw_nonprimary_surname(raw_surn_data_list):
"""method for the 'r' symbol: nonprimary surnames"""
result = ''
for raw_surn_data in raw_surn_data_list:
if not raw_surn_data[_PRIMARY_IN_LIST]:
result = "%s %s %s %s" % (result, raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split())
def _raw_prefix_surname(raw_surn_data_list):
"""method for the 'p' symbol: all prefixes"""
result = ""
for raw_surn_data in raw_surn_data_list:
result += "%s " % (raw_surn_data[_PREFIX_IN_LIST])
return ' '.join(result.split()).strip()
def _raw_single_surname(raw_surn_data_list):
"""method for the 'q' symbol: surnames without prefix and connectors"""
result = ""
for raw_surn_data in raw_surn_data_list:
result += "%s " % (raw_surn_data[_SURNAME_IN_LIST])
return ' '.join(result.split()).strip()
def cleanup_name(namestring):
"""Remove too long white space due to missing name parts,
so "a b" becomes "a b" and "a , b" becomes "a, b"
"""
parts = namestring.split()
if not parts:
return ""
result = parts[0]
for val in parts[1:]:
if len(val) == 1 and val in [',', ';', ':',
ARABIC_COMMA, ARABIC_SEMICOLON]:
result += val
else:
result += ' ' + val
return result
#-------------------------------------------------------------------------
#
# NameDisplay class
#
#-------------------------------------------------------------------------
class NameDisplay:
"""
Base class for displaying of Name instances.
Property:
*default_format*
the default name format to use
*pas_as_surn*
if only one surname, see if pa/ma should be considered as 'the' surname.
"""
format_funcs = {}
raw_format_funcs = {}
def __init__(self, xlocale=glocale):
"""
Initialize the NameDisplay class.
If xlocale is passed in (a GrampsLocale), then
the translated script will be returned instead.
:param xlocale: allow selection of the displayer script
:type xlocale: a GrampsLocale instance
"""
global WITH_GRAMPS_CONFIG
global PAT_AS_SURN
# translators: needed for Arabic, ignore otherwise
COMMAGLYPH = xlocale.translation.gettext(',')
self.STANDARD_FORMATS = [
(Name.DEF, _("Default format (defined by Gramps preferences)"),
'', _ACT),
(Name.LNFN, _("Surname, Given Suffix"),
'%l' + COMMAGLYPH + ' %f %s', _ACT),
(Name.FN, _("Given"),
'%f', _ACT),
(Name.FNLN, _("Given Surname Suffix"),
'%f %l %s', _ACT),
# primary name primconnector other, given pa/matronynic suffix, primprefix
# translators: long string, have a look at Preferences dialog
(Name.LNFNP, _("Main Surnames, Given Patronymic Suffix Prefix"),
'%1m %2m %o' + COMMAGLYPH + ' %f %1y %s %0m', _ACT),
# DEPRECATED FORMATS
(Name.PTFN, _("Patronymic, Given"),
'%y' + COMMAGLYPH + ' %s %f', _INA),
]
self.LNFN_STR = "%s" + COMMAGLYPH + " %s %s"
self.name_formats = {}
if WITH_GRAMPS_CONFIG:
self.default_format = config.get('preferences.name-format')
if self.default_format == 0:
self.default_format = Name.LNFN
config.set('preferences.name-format', self.default_format)
#if only one surname, see if pa/ma should be considered as
# 'the' surname.
PAT_AS_SURN = config.get('preferences.patronimic-surname')
config.connect('preferences.patronimic-surname', self.change_pa_sur)
else:
self.default_format = Name.LNFN
PAT_AS_SURN = False
#preinit the name formats, this should be updated with the data
#in the database once a database is loaded
self.set_name_format(self.STANDARD_FORMATS)
def change_pa_sur(self, *args):
""" How to handle single patronymic as surname is changed"""
global PAT_AS_SURN
PAT_AS_SURN = config.get('preferences.patronimic-surname')
def get_pat_as_surn(self):
global PAT_AS_SURN
return PAT_AS_SURN
def _format_fn(self, fmt_str):
return lambda x: self.format_str(x, fmt_str)
def _format_raw_fn(self, fmt_str):
return lambda x: self.format_str_raw(x, fmt_str)
def _raw_lnfn(self, raw_data):
result = self.LNFN_STR % (_raw_full_surname(raw_data[_SURNAME_LIST]),
raw_data[_FIRSTNAME],
raw_data[_SUFFIX])
return ' '.join(result.split())
def _raw_fnln(self, raw_data):
result = "%s %s %s" % (raw_data[_FIRSTNAME],
_raw_full_surname(raw_data[_SURNAME_LIST]),
raw_data[_SUFFIX])
return ' '.join(result.split())
def _raw_fn(self, raw_data):
result = raw_data[_FIRSTNAME]
return ' '.join(result.split())
def clear_custom_formats(self):
self.name_formats = {num: value
for num, value in self.name_formats.items()
if num >= 0}
def set_name_format(self, formats):
raw_func_dict = {
Name.LNFN : self._raw_lnfn,
Name.FNLN : self._raw_fnln,
Name.FN : self._raw_fn,
}
for (num, name, fmt_str, act) in formats:
func = self._format_fn(fmt_str)
func_raw = raw_func_dict.get(num, self._format_raw_fn(fmt_str))
self.name_formats[num] = (name, fmt_str, act, func, func_raw)
self.set_default_format(self.get_default_format())
def add_name_format(self, name, fmt_str):
for num in self.name_formats:
if fmt_str in self.name_formats.get(num):
return num
num = -1
while num in self.name_formats:
num -= 1
self.set_name_format([(num, name, fmt_str,_ACT)])
return num
def edit_name_format(self, num, name, fmt_str):
self.set_name_format([(num, name, fmt_str,_ACT)])
if self.default_format == num:
self.set_default_format(num)
def del_name_format(self, num):
try:
del self.name_formats[num]
except:
pass
def set_default_format(self, num):
if num not in self.name_formats:
num = Name.LNFN
# if user sets default format to the Gramps default format,
# then we select LNFN as format.
if num == Name.DEF:
num = Name.LNFN
self.default_format = num
self.name_formats[Name.DEF] = (self.name_formats[Name.DEF][_F_NAME],
self.name_formats[Name.DEF][_F_FMT],
self.name_formats[Name.DEF][_F_ACT],
self.name_formats[num][_F_FN],
self.name_formats[num][_F_RAWFN])
def get_default_format(self):
return self.default_format
def set_format_inactive(self, num):
try:
self.name_formats[num] = (self.name_formats[num][_F_NAME],
self.name_formats[num][_F_FMT],
_INA,
self.name_formats[num][_F_FN],
self.name_formats[num][_F_RAWFN])
except:
pass
def get_name_format(self, also_default=False,
only_custom=False,
only_active=True):
"""
Get a list of tuples (num, name,fmt_str,act)
"""
the_list = []
keys = sorted(self.name_formats, key=self.cmp_to_key(self._sort_name_format))
for num in keys:
if ((also_default or num) and
(not only_custom or (num < 0)) and
(not only_active or self.name_formats[num][_F_ACT])):
the_list.append((num,) + self.name_formats[num][_F_NAME:_F_FN])
return the_list
def cmp_to_key(self, mycmp):
"""
python 2 to 3 conversion, python recipe http://code.activestate.com/recipes/576653/
Convert a :func:`cmp` function into a :func:`key` function
We use this in Gramps as understanding the old compare function is
not trivial. This should be replaced by a proper key function
"""
class K:
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
def _sort_name_format(self, x, y):
if x < 0:
if y < 0:
return x+y
else:
return -x+y
else:
if y < 0:
return -x+y
else:
return x-y
def _is_format_valid(self, num):
try:
if not self.name_formats[num][_F_ACT]:
num = 0
except:
num = 0
return num
#-------------------------------------------------------------------------
def _gen_raw_func(self, format_str):
"""The job of building the name from a format string is rather
expensive and it is called lots and lots of times. So it is worth
going to some length to optimise it as much as possible.
This method constructs a new function that is specifically written
to format a name given a particular format string. This is worthwhile
because the format string itself rarely changes, so by caching the new
function and calling it directly when asked to format a name to the
same format string again we can be as quick as possible.
The new function is of the form::
def fn(raw_data):
return "%s %s %s" % (raw_data[_TITLE],
raw_data[_FIRSTNAME],
raw_data[_SUFFIX])
Specific symbols for parts of a name are defined (keywords given):
't' : title = title
'f' : given = given (first names)
'l' : surname = full surname (lastname)
'c' : call = callname
'x' : common = nick name, call, otherwise first first name (common name)
'i' : initials = initials of the first names
'm' : primary = primary surname (main)
'0m': primary[pre]= prefix primary surname (main)
'1m': primary[sur]= surname primary surname (main)
'2m': primary[con]= connector primary surname (main)
'y' : patronymic = pa/matronymic surname (father/mother) - assumed unique
'0y': patronymic[pre] = prefix "
'1y': patronymic[sur] = surname "
'2y': patronymic[con] = connector "
'o' : notpatronymic = surnames without pa/matronymic and primary
'r' : rest = non primary surnames
'p' : prefix = list of all prefixes
'q' : rawsurnames = surnames without prefixes and connectors
's' : suffix = suffix
'n' : nickname = nick name
'g' : familynick = family nick name
"""
# we need the names of each of the variables or methods that are
# called to fill in each format flag.
# Dictionary is "code": ("expression", "keyword", "i18n-keyword")
d = {"t": ("raw_data[_TITLE]", "title",
_("Person|title")),
"f": ("raw_data[_FIRSTNAME]", "given",
_("given")),
"l": ("_raw_full_surname(raw_data[_SURNAME_LIST])", "surname",
_("surname")),
"s": ("raw_data[_SUFFIX]", "suffix",
_("suffix")),
"c": ("raw_data[_CALL]", "call",
_("Name|call")),
"x": ("(raw_data[_NICK] or raw_data[_CALL] or raw_data[_FIRSTNAME].split(' ')[0])",
"common",
_("Name|common")),
"i": ("''.join([word[0] +'.' for word in ('. ' +" +
" raw_data[_FIRSTNAME]).split()][1:])",
"initials",
_("initials")),
"m": ("_raw_primary_surname(raw_data[_SURNAME_LIST])",
"primary",
_("Name|primary")),
"0m": ("_raw_primary_prefix_only(raw_data[_SURNAME_LIST])",
"primary[pre]",
_("primary[pre]")),
"1m": ("_raw_primary_surname_only(raw_data[_SURNAME_LIST])",
"primary[sur]",
_("primary[sur]")),
"2m": ("_raw_primary_conn_only(raw_data[_SURNAME_LIST])",
"primary[con]",
_("primary[con]")),
"y": ("_raw_patro_surname(raw_data[_SURNAME_LIST])", "patronymic",
_("patronymic")),
"0y": ("_raw_patro_prefix_only(raw_data[_SURNAME_LIST])", "patronymic[pre]",
_("patronymic[pre]")),
"1y": ("_raw_patro_surname_only(raw_data[_SURNAME_LIST])", "patronymic[sur]",
_("patronymic[sur]")),
"2y": ("_raw_patro_conn_only(raw_data[_SURNAME_LIST])", "patronymic[con]",
_("patronymic[con]")),
"o": ("_raw_nonpatro_surname(raw_data[_SURNAME_LIST])", "notpatronymic",
_("notpatronymic")),
"r": ("_raw_nonprimary_surname(raw_data[_SURNAME_LIST])",
"rest",
_("Remaining names|rest")),
"p": ("_raw_prefix_surname(raw_data[_SURNAME_LIST])",
"prefix",
_("prefix")),
"q": ("_raw_single_surname(raw_data[_SURNAME_LIST])",
"rawsurnames",
_("rawsurnames")),
"n": ("raw_data[_NICK]", "nickname",
_("nickname")),
"g": ("raw_data[_FAMNICK]", "familynick",
_("familynick")),
}
args = "raw_data"
return self._make_fn(format_str, d, args)
def _gen_cooked_func(self, format_str):
"""The job of building the name from a format string is rather
expensive and it is called lots and lots of times. So it is worth
going to some length to optimise it as much as possible.
This method constructs a new function that is specifically written
to format a name given a particular format string. This is worthwhile
because the format string itself rarely changes, so by caching the new
function and calling it directly when asked to format a name to the
same format string again we can be as quick as possible.
The new function is of the form::
def fn(first, raw_surname_list, suffix, title, call,):
return "%s %s" % (first,suffix)
Specific symbols for parts of a name are defined (keywords given):
't' : title = title
'f' : given = given (first names)
'l' : surname = full surname (lastname)
'c' : call = callname
'x' : common = nick name, call, or otherwise first first name (common name)
'i' : initials = initials of the first names
'm' : primary = primary surname (main)
'0m': primary[pre]= prefix primary surname (main)
'1m': primary[sur]= surname primary surname (main)
'2m': primary[con]= connector primary surname (main)
'y' : patronymic = pa/matronymic surname (father/mother) - assumed unique
'0y': patronymic[pre] = prefix "
'1y': patronymic[sur] = surname "
'2y': patronymic[con] = connector "
'o' : notpatronymic = surnames without pa/matronymic and primary
'r' : rest = non primary surnames
'p' : prefix = list of all prefixes
'q' : rawsurnames = surnames without prefixes and connectors
's' : suffix = suffix
'n' : nickname = nick name
'g' : familynick = family nick name
"""
# we need the names of each of the variables or methods that are
# called to fill in each format flag.
# Dictionary is "code": ("expression", "keyword", "i18n-keyword")
d = {"t": ("title", "title",
_("Person|title")),
"f": ("first", "given",
_("given")),
"l": ("_raw_full_surname(raw_surname_list)", "surname",
_("surname")),
"s": ("suffix", "suffix",
_("suffix")),
"c": ("call", "call",
_("Name|call")),
"x": ("(nick or call or first.split(' ')[0])", "common",
_("Name|common")),
"i": ("''.join([word[0] +'.' for word in ('. ' + first).split()][1:])",
"initials",
_("initials")),
"m": ("_raw_primary_surname(raw_surname_list)", "primary",
_("Name|primary")),
"0m":("_raw_primary_prefix_only(raw_surname_list)",
"primary[pre]", _("primary[pre]")),
"1m":("_raw_primary_surname_only(raw_surname_list)",
"primary[sur]",_("primary[sur]")),
"2m":("_raw_primary_conn_only(raw_surname_list)",
"primary[con]", _("primary[con]")),
"y": ("_raw_patro_surname(raw_surname_list)", "patronymic",
_("patronymic")),
"0y":("_raw_patro_prefix_only(raw_surname_list)", "patronymic[pre]",
_("patronymic[pre]")),
"1y":("_raw_patro_surname_only(raw_surname_list)", "patronymic[sur]",
_("patronymic[sur]")),
"2y":("_raw_patro_conn_only(raw_surname_list)", "patronymic[con]",
_("patronymic[con]")),
"o": ("_raw_nonpatro_surname(raw_surname_list)", "notpatronymic",
_("notpatronymic")),
"r": ("_raw_nonprimary_surname(raw_surname_list)", "rest",
_("Remaining names|rest")),
"p": ("_raw_prefix_surname(raw_surname_list)", "prefix",
_("prefix")),
"q": ("_raw_single_surname(raw_surname_list)", "rawsurnames",
_("rawsurnames")),
"n": ("nick", "nickname",
_("nickname")),
"g": ("famnick", "familynick",
_("familynick")),
}
args = "first,raw_surname_list,suffix,title,call,nick,famnick"
return self._make_fn(format_str, d, args)
def format_str(self, name, format_str):
return self._format_str_base(name.first_name, name.surname_list,
name.suffix, name.title,
name.call, name.nick, name.famnick,
format_str)
def format_str_raw(self, raw_data, format_str):
"""
Format a name from the raw name list. To make this as fast as possible
this uses :func:`_gen_raw_func` to generate a new method for each new
format_string.
Is does not call :meth:`_format_str_base` because it would introduce an
extra method call and we need all the speed we can squeeze out of this.
"""
func = self.__class__.raw_format_funcs.get(format_str)
if func is None:
func = self._gen_raw_func(format_str)
self.__class__.raw_format_funcs[format_str] = func
return func(raw_data)
def _format_str_base(self, first, surname_list, suffix, title, call,
nick, famnick, format_str):
"""
Generates name from a format string.
The following substitutions are made:
'%t' : title
'%f' : given (first names)
'%l' : full surname (lastname)
'%c' : callname
'%x' : nick name, call, or otherwise first first name (common name)
'%i' : initials of the first names
'%m' : primary surname (main)
'%0m': prefix primary surname (main)
'%1m': surname primary surname (main)
'%2m': connector primary surname (main)
'%y' : pa/matronymic surname (father/mother) - assumed unique
'%0y': prefix "
'%1y': surname "
'%2y': connector "
'%o' : surnames without patronymic
'%r' : non-primary surnames (rest)
'%p' : list of all prefixes
'%q' : surnames without prefixes and connectors
'%s' : suffix
'%n' : nick name
'%g' : family nick name
The capital letters are substituted for capitalized name components.
The %% is substituted with the single % character.
All the other characters in the fmt_str are unaffected.
"""
func = self.__class__.format_funcs.get(format_str)
if func is None:
func = self._gen_cooked_func(format_str)
self.__class__.format_funcs[format_str] = func
try:
s = func(first, [surn.serialize() for surn in surname_list],
suffix, title, call, nick, famnick)
except (ValueError, TypeError,):
raise NameDisplayError("Incomplete format string")
return s
#-------------------------------------------------------------------------
def primary_surname(self, name):
global PAT_AS_SURN
nrsur = len(name.surname_list)
sur = name.get_primary_surname()
if not PAT_AS_SURN and nrsur <= 1 and \
(sur.get_origintype().value == _ORIGINPATRO
or sur.get_origintype().value == _ORIGINMATRO):
return ''
return sur.get_surname()
def sort_string(self, name):
return "%-25s%-30s%s" % (self.primary_surname(name),
name.first_name, name.suffix)
def sorted(self, person):
"""
Return a text string representing the :class:`~.person.Person`
instance's :class:`~.name.Name` in a manner that should be used for
displaying a sortedname.
:param person: :class:`~.person.Person` instance that contains the
:class:`~.name.Name` that is to be displayed. The
primary name is used for the display.
:type person: :class:`~.person.Person`
:returns: Returns the :class:`~.person.Person` instance's name
:rtype: str
"""
name = person.get_primary_name()
return self.sorted_name(name)
def sorted_name(self, name):
"""
Return a text string representing the :class:`~.name.Name` instance
in a manner that should be used for sorting the name in a list.
:param name: :class:`~.name.Name` instance that is to be displayed.
:type name: :class:`~.name.Name`
:returns: Returns the :class:`~.name.Name` string representation
:rtype: str
"""
num = self._is_format_valid(name.sort_as)
return self.name_formats[num][_F_FN](name)
def truncate(self, full_name, max_length=15, elipsis="..."):
name_out = ""
if len(full_name) <= max_length:
name_out = full_name
else:
last_space = full_name.rfind(" ", max_length)
if (last_space) > -1:
name_out = full_name[:last_space]
else:
name_out = full_name[:max_length]
name_out += " " + elipsis
return name_out
def raw_sorted_name(self, raw_data):
"""
Return a text string representing the :class:`~.name.Name` instance
in a manner that should be used for sorting the name in a list.
:param name: raw unserialized data of name that is to be displayed.
:type name: tuple
:returns: Returns the :class:`~.name.Name` string representation
:rtype: str
"""
num = self._is_format_valid(raw_data[_SORT])
return self.name_formats[num][_F_RAWFN](raw_data)
def display(self, person):
"""
Return a text string representing the :class:`~.person.Person`
instance's :class:`~.name.Name` in a manner that should be used for
normal displaying.
:param person: :class:`~.person.Person` instance that contains the
:class:`~.name.Name` that is to be displayed. The
primary name is used for the display.
:type person: :class:`~.person.Person`
:returns: Returns the :class:`~.person.Person` instance's name
:rtype: str
"""
name = person.get_primary_name()
return self.display_name(name)
def display_format(self, person, num):
"""
Return a text string representing the L{gen.lib.Person} instance's
L{Name} using num format.
@param person: L{gen.lib.Person} instance that contains the
L{Name} that is to be displayed. The primary name is used for
the display.
@type person: L{gen.lib.Person}
@param num: num of the format to be used, as return by
name_displayer.add_name_format('name','format')
@type num: int
@returns: Returns the L{gen.lib.Person} instance's name
@rtype: str
"""
name = person.get_primary_name()
return self.name_formats[num][_F_FN](name)
def display_formal(self, person):
"""
Return a text string representing the :class:`~.person.Person`
instance's :class:`~.name.Name` in a manner that should be used for
formal displaying.
:param person: :class:`~.person.Person` instance that contains the
:class:`~.name.Name` that is to be displayed. The
primary name is used for the display.
:type person: :class:`~.person.Person`
:returns: Returns the :class:`~.person.Person` instance's name
:rtype: str
"""
# FIXME: At this time, this is just duplicating display() method
name = person.get_primary_name()
return self.display_name(name)
def display_name(self, name):
"""
Return a text string representing the :class:`~.name.Name` instance
in a manner that should be used for normal displaying.
:param name: :class:`~.name.Name` instance that is to be displayed.
:type name: :class:`~.name.Name`
:returns: Returns the :class:`~.name.Name` string representation
:rtype: str
"""
if name is None:
return ""
num = self._is_format_valid(name.display_as)
return self.name_formats[num][_F_FN](name)
def raw_display_name(self, raw_data):
"""
Return a text string representing the :class:`~.name.Name` instance
in a manner that should be used for normal displaying.
:param name: raw unserialized data of name that is to be displayed.
:type name: tuple
:returns: Returns the :class:`~.name.Name` string representation
:rtype: str
"""
num = self._is_format_valid(raw_data[_DISPLAY])
return self.name_formats[num][_F_RAWFN](raw_data)
def display_given(self, person):
return self.format_str(person.get_primary_name(),'%f')
def name_grouping(self, db, person):
"""
Return the name under which to group this person. This is defined as:
1. if group name is defined on primary name, use that
2. if group name is defined for the primary surname of the primary
name, use that
3. use primary surname of primary name otherwise
"""
return self.name_grouping_name(db, person.primary_name)
def name_grouping_name(self, db, pn):
"""
Return the name under which to group. This is defined as:
1. if group name is defined, use that
2. if group name is defined for the primary surname, use that
3. use primary surname itself otherwise
:param pn: :class:`~.name.Name` object
:type pn: :class:`~.name.Name` instance
:returns: Returns the groupname string representation
:rtype: str
"""
if pn.group_as:
return pn.group_as
return db.get_name_group_mapping(pn.get_primary_surname().get_surname())
def name_grouping_data(self, db, pn):
"""
Return the name under which to group. This is defined as:
1. if group name is defined, use that
2. if group name is defined for the primary surname, use that
3. use primary surname itself otherwise
:param pn: raw unserialized data of name
:type pn: tuple
:returns: Returns the groupname string representation
:rtype: str
"""
if pn[_GROUP]:
return pn[_GROUP]
return db.get_name_group_mapping(_raw_primary_surname_only(
pn[_SURNAME_LIST]))
def _make_fn(self, format_str, d, args):
"""
Create the name display function and handles dependent
punctuation.
"""
# d is a dict: dict[code] = (expr, word, translated word)
# First, go through and do internationalization-based
# key-word replacement. Just replace ikeywords with
# %codes (ie, replace "irstnamefay" with "%f", and
# "IRSTNAMEFAY" for %F)
if (len(format_str) > 2 and
format_str[0] == format_str[-1] == '"'):
pass
else:
d_keys = [(code, _tuple[2]) for code, _tuple in d.items()]
d_keys.sort(key=_make_cmp_key, reverse=True) # reverse on length and by ikeyword
for (code, ikeyword) in d_keys:
exp, keyword, ikeyword = d[code]
format_str = format_str.replace(ikeyword, "%"+ code)
format_str = format_str.replace(ikeyword.title(), "%"+ code)
format_str = format_str.replace(ikeyword.upper(), "%"+ code.upper())
# Next, go through and do key-word replacement.
# Just replace keywords with
# %codes (ie, replace "firstname" with "%f", and
# "FIRSTNAME" for %F)
if (len(format_str) > 2 and
format_str[0] == format_str[-1] == '"'):
pass
else:
d_keys = [(code, _tuple[1]) for code, _tuple in d.items()]
d_keys.sort(key=_make_cmp_key, reverse=True) # reverse sort on length and by keyword
# if in double quotes, just use % codes
for (code, keyword) in d_keys:
exp, keyword, ikeyword = d[code]
format_str = format_str.replace(keyword, "%"+ code)
format_str = format_str.replace(keyword.title(), "%"+ code)
format_str = format_str.replace(keyword.upper(), "%"+ code.upper())
# Get lower and upper versions of codes:
codes = list(d.keys()) + [c.upper() for c in d]
# Next, list out the matching patterns:
# If it starts with "!" however, treat the punctuation verbatim:
if len(format_str) > 0 and format_str[0] == "!":
patterns = ["%(" + ("|".join(codes)) + ")", # %s
]
format_str = format_str[1:]
else:
patterns = [
",\W*\"%(" + ("|".join(codes)) + ")\"", # ,\W*"%s"
",\W*\(%(" + ("|".join(codes)) + ")\)", # ,\W*(%s)
",\W*%(" + ("|".join(codes)) + ")", # ,\W*%s
"\"%(" + ("|".join(codes)) + ")\"", # "%s"
"_%(" + ("|".join(codes)) + ")_", # _%s_
"\(%(" + ("|".join(codes)) + ")\)", # (%s)
"%(" + ("|".join(codes)) + ")", # %s
]
new_fmt = format_str
# replace the specific format string flags with a
# flag that works in standard python format strings.
new_fmt = re.sub("|".join(patterns), "%s", new_fmt)
# replace special meaning codes we need to have verbatim in output
if (len(new_fmt) > 2 and new_fmt[0] == new_fmt[-1] == '"'):
new_fmt = new_fmt.replace('\\', r'\\')
new_fmt = new_fmt[1:-1].replace('"', r'\"')
else:
new_fmt = new_fmt.replace('\\', r'\\')
new_fmt = new_fmt.replace('"', '\\\"')
# find each format flag in the original format string
# for each one we find the variable name that is needed to
# replace it and add this to a list. This list will be used to
# generate the replacement tuple.
# This compiled pattern should match all of the format codes.
pat = re.compile("|".join(patterns))
param = ()
mat = pat.search(format_str)
while mat:
match_pattern = mat.group(0) # the matching pattern
# prefix, code, suffix:
p, code, s = re.split("%(.)", match_pattern)
if code in '0123456789':
code = code + s[0]
s = s[1:]
field = d[code.lower()][0]
if code.isupper():
field += ".upper()"
if p == '' and s == '':
param = param + (field,)
else:
param = param + ("ifNotEmpty(%s,'%s','%s')" % (field, p, s), )
mat = pat.search(format_str, mat.end())
s = """
def fn(%s):
def ifNotEmpty(str,p,s):
if str == '':
return ''
else:
return p + str + s
return cleanup_name("%s" %% (%s))""" % (args, new_fmt, ",".join(param))
try:
exec(s) in globals(), locals()
return locals()['fn']
except:
LOG.error("\n" + 'Wrong name format string %s' % new_fmt
+"\n" + ("ERROR, Edit Name format in Preferences->Display to correct")
+"\n" + _('Wrong name format string %s') % new_fmt
+"\n" + ("ERROR, Edit Name format in Preferences->Display to correct")
)
def errfn(*arg):
return _("ERROR, Edit Name format in Preferences")
return errfn
displayer = NameDisplay()
|
prculley/gramps
|
gramps/gen/display/name.py
|
Python
|
gpl-2.0
| 46,036
|
[
"Brian"
] |
6c20e1ba5fca9e4ebb02174252d099922520cd00520f16419ac39f6284c777e8
|
"""
Views related to the Custom Courses feature.
"""
import csv
import datetime
import functools
import json
import logging
import pytz
from copy import deepcopy
from cStringIO import StringIO
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import (
HttpResponse,
HttpResponseForbidden,
)
from django.contrib import messages
from django.db import transaction
from django.http import Http404
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import ensure_csrf_cookie
from django.contrib.auth.models import User
from courseware.access import has_access
from courseware.courses import get_course_by_id
from courseware.field_overrides import disable_overrides
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR, assign_role
from django_comment_common.utils import seed_permissions_roles
from edxmako.shortcuts import render_to_response
from lms.djangoapps.grades.new.course_grade_factory import CourseGradeFactory
from opaque_keys.edx.keys import CourseKey
from ccx_keys.locator import CCXLocator
from student.roles import CourseCcxCoachRole
from student.models import CourseEnrollment
from xmodule.modulestore.django import SignalHandler
from lms.djangoapps.instructor.views.api import _split_input_list
from lms.djangoapps.instructor.views.gradebook_api import get_grade_book_page
from lms.djangoapps.instructor.enrollment import (
enroll_email,
get_email_params,
)
from lms.djangoapps.ccx.models import CustomCourseForEdX
from lms.djangoapps.ccx.overrides import (
get_override_for_ccx,
override_field_for_ccx,
clear_ccx_field_info_from_ccx_map,
bulk_delete_ccx_override_fields,
)
from lms.djangoapps.ccx.utils import (
add_master_course_staff_to_ccx,
assign_staff_role_to_ccx,
ccx_course,
ccx_students_enrolling_center,
get_ccx_for_coach,
get_ccx_by_ccx_id,
get_ccx_creation_dict,
get_date,
parse_date,
prep_course_for_grading,
)
log = logging.getLogger(__name__)
TODAY = datetime.datetime.today # for patching in tests
def coach_dashboard(view):
"""
View decorator which enforces that the user have the CCX coach role on the
given course and goes ahead and translates the course_id from the Django
route into a course object.
"""
@functools.wraps(view)
def wrapper(request, course_id):
"""
Wraps the view function, performing access check, loading the course,
and modifying the view's call signature.
"""
course_key = CourseKey.from_string(course_id)
ccx = None
if isinstance(course_key, CCXLocator):
ccx_id = course_key.ccx
try:
ccx = CustomCourseForEdX.objects.get(pk=ccx_id)
except CustomCourseForEdX.DoesNotExist:
raise Http404
if ccx:
course_key = ccx.course_id
course = get_course_by_id(course_key, depth=None)
if not course.enable_ccx:
raise Http404
else:
is_staff = has_access(request.user, 'staff', course)
is_instructor = has_access(request.user, 'instructor', course)
if is_staff or is_instructor:
# if user is staff or instructor then he can view ccx coach dashboard.
return view(request, course, ccx)
else:
# if there is a ccx, we must validate that it is the ccx for this coach
role = CourseCcxCoachRole(course_key)
if not role.has_user(request.user):
return HttpResponseForbidden(_('You must be a CCX Coach to access this view.'))
elif ccx is not None:
coach_ccx = get_ccx_by_ccx_id(course, request.user, ccx.id)
if coach_ccx is None:
return HttpResponseForbidden(
_('You must be the coach for this ccx to access this view')
)
return view(request, course, ccx)
return wrapper
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def dashboard(request, course, ccx=None):
"""
Display the CCX Coach Dashboard.
"""
# right now, we can only have one ccx per user and course
# so, if no ccx is passed in, we can sefely redirect to that
if ccx is None:
ccx = get_ccx_for_coach(course, request.user)
if ccx:
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(course.id, unicode(ccx.id))}
)
return redirect(url)
context = {
'course': course,
'ccx': ccx,
}
context.update(get_ccx_creation_dict(course))
if ccx:
ccx_locator = CCXLocator.from_course_locator(course.id, unicode(ccx.id))
# At this point we are done with verification that current user is ccx coach.
assign_staff_role_to_ccx(ccx_locator, request.user, course.id)
schedule = get_ccx_schedule(course, ccx)
grading_policy = get_override_for_ccx(
ccx, course, 'grading_policy', course.grading_policy)
context['schedule'] = json.dumps(schedule, indent=4)
context['save_url'] = reverse(
'save_ccx', kwargs={'course_id': ccx_locator})
context['ccx_members'] = CourseEnrollment.objects.filter(course_id=ccx_locator, is_active=True)
context['gradebook_url'] = reverse(
'ccx_gradebook', kwargs={'course_id': ccx_locator})
context['grades_csv_url'] = reverse(
'ccx_grades_csv', kwargs={'course_id': ccx_locator})
context['grading_policy'] = json.dumps(grading_policy, indent=4)
context['grading_policy_url'] = reverse(
'ccx_set_grading_policy', kwargs={'course_id': ccx_locator})
with ccx_course(ccx_locator) as course:
context['course'] = course
else:
context['create_ccx_url'] = reverse(
'create_ccx', kwargs={'course_id': course.id})
return render_to_response('ccx/coach_dashboard.html', context)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def create_ccx(request, course, ccx=None):
"""
Create a new CCX
"""
name = request.POST.get('name')
if hasattr(course, 'ccx_connector') and course.ccx_connector:
# if ccx connector url is set in course settings then inform user that he can
# only create ccx by using ccx connector url.
context = get_ccx_creation_dict(course)
messages.error(request, context['use_ccx_con_error_message'])
return render_to_response('ccx/coach_dashboard.html', context)
# prevent CCX objects from being created for deprecated course ids.
if course.id.deprecated:
messages.error(request, _(
"You cannot create a CCX from a course using a deprecated id. "
"Please create a rerun of this course in the studio to allow "
"this action."))
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course.id})
return redirect(url)
ccx = CustomCourseForEdX(
course_id=course.id,
coach=request.user,
display_name=name)
ccx.save()
# Make sure start/due are overridden for entire course
start = TODAY().replace(tzinfo=pytz.UTC)
override_field_for_ccx(ccx, course, 'start', start)
override_field_for_ccx(ccx, course, 'due', None)
# Enforce a static limit for the maximum amount of students that can be enrolled
override_field_for_ccx(ccx, course, 'max_student_enrollments_allowed', settings.CCX_MAX_STUDENTS_ALLOWED)
# Hide anything that can show up in the schedule
hidden = 'visible_to_staff_only'
for chapter in course.get_children():
override_field_for_ccx(ccx, chapter, hidden, True)
for sequential in chapter.get_children():
override_field_for_ccx(ccx, sequential, hidden, True)
for vertical in sequential.get_children():
override_field_for_ccx(ccx, vertical, hidden, True)
ccx_id = CCXLocator.from_course_locator(course.id, unicode(ccx.id))
# Create forum roles
seed_permissions_roles(ccx_id)
# Assign administrator forum role to CCX coach
assign_role(ccx_id, request.user, FORUM_ROLE_ADMINISTRATOR)
url = reverse('ccx_coach_dashboard', kwargs={'course_id': ccx_id})
# Enroll the coach in the course
email_params = get_email_params(course, auto_enroll=True, course_key=ccx_id, display_name=ccx.display_name)
enroll_email(
course_id=ccx_id,
student_email=request.user.email,
auto_enroll=True,
email_students=True,
email_params=email_params,
)
assign_staff_role_to_ccx(ccx_id, request.user, course.id)
add_master_course_staff_to_ccx(course, ccx_id, ccx.display_name)
# using CCX object as sender here.
responses = SignalHandler.course_published.send(
sender=ccx,
course_key=CCXLocator.from_course_locator(course.id, unicode(ccx.id))
)
for rec, response in responses:
log.info('Signal fired when course is published. Receiver: %s. Response: %s', rec, response)
return redirect(url)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def save_ccx(request, course, ccx=None):
"""
Save changes to CCX.
"""
if not ccx:
raise Http404
def override_fields(parent, data, graded, earliest=None, ccx_ids_to_delete=None):
"""
Recursively apply CCX schedule data to CCX by overriding the
`visible_to_staff_only`, `start` and `due` fields for units in the
course.
"""
if ccx_ids_to_delete is None:
ccx_ids_to_delete = []
blocks = {
str(child.location): child
for child in parent.get_children()}
for unit in data:
block = blocks[unit['location']]
override_field_for_ccx(
ccx, block, 'visible_to_staff_only', unit['hidden'])
start = parse_date(unit['start'])
if start:
if not earliest or start < earliest:
earliest = start
override_field_for_ccx(ccx, block, 'start', start)
else:
ccx_ids_to_delete.append(get_override_for_ccx(ccx, block, 'start_id'))
clear_ccx_field_info_from_ccx_map(ccx, block, 'start')
# Only subsection (aka sequential) and unit (aka vertical) have due dates.
if 'due' in unit: # checking that the key (due) exist in dict (unit).
due = parse_date(unit['due'])
if due:
override_field_for_ccx(ccx, block, 'due', due)
else:
ccx_ids_to_delete.append(get_override_for_ccx(ccx, block, 'due_id'))
clear_ccx_field_info_from_ccx_map(ccx, block, 'due')
else:
# In case of section aka chapter we do not have due date.
ccx_ids_to_delete.append(get_override_for_ccx(ccx, block, 'due_id'))
clear_ccx_field_info_from_ccx_map(ccx, block, 'due')
if not unit['hidden'] and block.graded:
graded[block.format] = graded.get(block.format, 0) + 1
children = unit.get('children', None)
# For a vertical, override start and due dates of all its problems.
if unit.get('category', None) == u'vertical':
for component in block.get_children():
# override start and due date of problem (Copy dates of vertical into problems)
if start:
override_field_for_ccx(ccx, component, 'start', start)
if due:
override_field_for_ccx(ccx, component, 'due', due)
if children:
override_fields(block, children, graded, earliest, ccx_ids_to_delete)
return earliest, ccx_ids_to_delete
graded = {}
earliest, ccx_ids_to_delete = override_fields(course, json.loads(request.body), graded, [])
bulk_delete_ccx_override_fields(ccx, ccx_ids_to_delete)
if earliest:
override_field_for_ccx(ccx, course, 'start', earliest)
# Attempt to automatically adjust grading policy
changed = False
policy = get_override_for_ccx(
ccx, course, 'grading_policy', course.grading_policy
)
policy = deepcopy(policy)
grader = policy['GRADER']
for section in grader:
count = graded.get(section.get('type'), 0)
if count < section.get('min_count', 0):
changed = True
section['min_count'] = count
if changed:
override_field_for_ccx(ccx, course, 'grading_policy', policy)
# using CCX object as sender here.
responses = SignalHandler.course_published.send(
sender=ccx,
course_key=CCXLocator.from_course_locator(course.id, unicode(ccx.id))
)
for rec, response in responses:
log.info('Signal fired when course is published. Receiver: %s. Response: %s', rec, response)
return HttpResponse(
json.dumps({
'schedule': get_ccx_schedule(course, ccx),
'grading_policy': json.dumps(policy, indent=4)}),
content_type='application/json',
)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def set_grading_policy(request, course, ccx=None):
"""
Set grading policy for the CCX.
"""
if not ccx:
raise Http404
override_field_for_ccx(
ccx, course, 'grading_policy', json.loads(request.POST['policy']))
# using CCX object as sender here.
responses = SignalHandler.course_published.send(
sender=ccx,
course_key=CCXLocator.from_course_locator(course.id, unicode(ccx.id))
)
for rec, response in responses:
log.info('Signal fired when course is published. Receiver: %s. Response: %s', rec, response)
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(course.id, unicode(ccx.id))}
)
return redirect(url)
def get_ccx_schedule(course, ccx):
"""
Generate a JSON serializable CCX schedule.
"""
def visit(node, depth=1):
"""
Recursive generator function which yields CCX schedule nodes.
We convert dates to string to get them ready for use by the js date
widgets, which use text inputs.
Visits students visible nodes only; nodes children of hidden ones
are skipped as well.
Dates:
Only start date is applicable to a section. If ccx coach did not override start date then
getting it from the master course.
Both start and due dates are applicable to a subsection (aka sequential). If ccx coach did not override
these dates then getting these dates from corresponding subsection in master course.
Unit inherits start date and due date from its subsection. If ccx coach did not override these dates
then getting them from corresponding subsection in master course.
"""
for child in node.get_children():
# in case the children are visible to staff only, skip them
if child.visible_to_staff_only:
continue
hidden = get_override_for_ccx(
ccx, child, 'visible_to_staff_only',
child.visible_to_staff_only)
start = get_date(ccx, child, 'start')
if depth > 1:
# Subsection has both start and due dates and unit inherit dates from their subsections
if depth == 2:
due = get_date(ccx, child, 'due')
elif depth == 3:
# Get start and due date of subsection in case unit has not override dates.
due = get_date(ccx, child, 'due', node)
start = get_date(ccx, child, 'start', node)
visited = {
'location': str(child.location),
'display_name': child.display_name,
'category': child.category,
'start': start,
'due': due,
'hidden': hidden,
}
else:
visited = {
'location': str(child.location),
'display_name': child.display_name,
'category': child.category,
'start': start,
'hidden': hidden,
}
if depth < 3:
children = tuple(visit(child, depth + 1))
if children:
visited['children'] = children
yield visited
else:
yield visited
with disable_overrides():
return tuple(visit(course))
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_schedule(request, course, ccx=None): # pylint: disable=unused-argument
"""
get json representation of ccx schedule
"""
if not ccx:
raise Http404
schedule = get_ccx_schedule(course, ccx)
json_schedule = json.dumps(schedule, indent=4)
return HttpResponse(json_schedule, content_type='application/json')
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_invite(request, course, ccx=None):
"""
Invite users to new ccx
"""
if not ccx:
raise Http404
action = request.POST.get('enrollment-button')
identifiers_raw = request.POST.get('student-ids')
identifiers = _split_input_list(identifiers_raw)
email_students = 'email-students' in request.POST
course_key = CCXLocator.from_course_locator(course.id, unicode(ccx.id))
email_params = get_email_params(course, auto_enroll=True, course_key=course_key, display_name=ccx.display_name)
ccx_students_enrolling_center(action, identifiers, email_students, course_key, email_params, ccx.coach)
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course_key})
return redirect(url)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_student_management(request, course, ccx=None):
"""
Manage the enrollment of individual students in a CCX
"""
if not ccx:
raise Http404
action = request.POST.get('student-action', None)
student_id = request.POST.get('student-id', '')
email_students = 'email-students' in request.POST
identifiers = [student_id]
course_key = CCXLocator.from_course_locator(course.id, unicode(ccx.id))
email_params = get_email_params(course, auto_enroll=True, course_key=course_key, display_name=ccx.display_name)
errors = ccx_students_enrolling_center(action, identifiers, email_students, course_key, email_params, ccx.coach)
for error_message in errors:
messages.error(request, error_message)
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course_key})
return redirect(url)
# Grades can potentially be written - if so, let grading manage the transaction.
@transaction.non_atomic_requests
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_gradebook(request, course, ccx=None):
"""
Show the gradebook for this CCX.
"""
if not ccx:
raise Http404
ccx_key = CCXLocator.from_course_locator(course.id, unicode(ccx.id))
with ccx_course(ccx_key) as course:
prep_course_for_grading(course, request)
student_info, page = get_grade_book_page(request, course, course_key=ccx_key)
return render_to_response('courseware/gradebook.html', {
'page': page,
'page_url': reverse('ccx_gradebook', kwargs={'course_id': ccx_key}),
'students': student_info,
'course': course,
'course_id': course.id,
'staff_access': request.user.is_staff,
'ordered_grades': sorted(
course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True),
})
# Grades can potentially be written - if so, let grading manage the transaction.
@transaction.non_atomic_requests
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_grades_csv(request, course, ccx=None):
"""
Download grades as CSV.
"""
if not ccx:
raise Http404
ccx_key = CCXLocator.from_course_locator(course.id, unicode(ccx.id))
with ccx_course(ccx_key) as course:
prep_course_for_grading(course, request)
enrolled_students = User.objects.filter(
courseenrollment__course_id=ccx_key,
courseenrollment__is_active=1
).order_by('username').select_related("profile")
grades = CourseGradeFactory().iter(enrolled_students, course)
header = None
rows = []
for student, course_grade, __ in grades:
if course_grade:
# We were able to successfully grade this student for this
# course.
if not header:
# Encode the header row in utf-8 encoding in case there are
# unicode characters
header = [section['label'].encode('utf-8')
for section in course_grade.summary[u'section_breakdown']]
rows.append(["id", "email", "username", "grade"] + header)
percents = {
section['label']: section.get('percent', 0.0)
for section in course_grade.summary[u'section_breakdown']
if 'label' in section
}
row_percents = [percents.get(label, 0.0) for label in header]
rows.append([student.id, student.email, student.username,
course_grade.percent] + row_percents)
buf = StringIO()
writer = csv.writer(buf)
for row in rows:
writer.writerow(row)
response = HttpResponse(buf.getvalue(), content_type='text/csv')
response['Content-Disposition'] = 'attachment'
return response
|
fintech-circle/edx-platform
|
lms/djangoapps/ccx/views.py
|
Python
|
agpl-3.0
| 22,571
|
[
"VisIt"
] |
8a0d14dbf165cb4e6f300d242c905e29a3573c0a495168c2434dd54fa3dce08b
|
#!/usr/bin/env python
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. #
"""
SPECSENS calulates the calibration curve given an observation, a standard star,
and the extinction curve for the site. The task assumes a 1-D spectrum that
has already been sensed from the original observations.
Author Version Date
-----------------------------------------------
S. M. Crawford (SAAO) 1.0 21 Mar 2011
TODO
----
LIMITATIONS
-----------
"""
# Ensure python 2.5 compatibility
from __future__ import with_statement
import os
import sys
import time
import numpy as np
import pyfits
from matplotlib.pyplot import *
from pyraf import iraf
import saltstat
import saltsafekey as saltkey
import saltsafeio as saltio
from saltsafelog import logging
import spectools as st
from spectools import SALTSpecError
from PySpectrograph.Spectra import Spectrum
from saltfit import interfit
from pylab import *
debug = True
# -----------------------------------------------------------
# core routine
def specsens(specfile, outfile, stdfile, extfile, airmass=None, exptime=None,
stdzp=3.68e-20, function='polynomial', order=3, thresh=3, niter=5,
fitter='gaussian', clobber=True, logfile='salt.log', verbose=True):
with logging(logfile, debug) as log:
# read in the specfile and create a spectrum object
obs_spectra = st.readspectrum(specfile.strip(), error=True, ftype='ascii')
# smooth the observed spectrum
# read in the std file and convert from magnitudes to fnu
# then convert it to fwave (ergs/s/cm2/A)
std_spectra = st.readspectrum(stdfile.strip(), error=False, ftype='ascii')
std_spectra.flux = Spectrum.magtoflux(std_spectra.flux, stdzp)
std_spectra.flux = Spectrum.fnutofwave(
std_spectra.wavelength, std_spectra.flux)
# Get the typical bandpass of the standard star,
std_bandpass = np.diff(std_spectra.wavelength).mean()
# Smooth the observed spectrum to that bandpass
obs_spectra.flux = st.boxcar_smooth(obs_spectra, std_bandpass)
# read in the extinction file (leave in magnitudes)
ext_spectra = st.readspectrum(extfile.strip(), error=False, ftype='ascii')
# determine the airmass if not specified
if saltio.checkfornone(airmass) is None:
message = 'Airmass was not supplied'
raise SALTSpecError(message)
# determine the exptime if not specified
if saltio.checkfornone(exptime) is None:
message = 'Exposure Time was not supplied'
raise SALTSpecError(message)
# calculate the calibrated spectra
log.message('Calculating the calibration curve for %s' % specfile)
cal_spectra = sensfunc(
obs_spectra, std_spectra, ext_spectra, airmass, exptime)
# plot(cal_spectra.wavelength, cal_spectra.flux * std_spectra.flux)
# fit the spectra--first take a first cut of the spectra
# using the median absolute deviation to throw away bad points
cmed = np.median(cal_spectra.flux)
cmad = saltstat.mad(cal_spectra.flux)
mask = (abs(cal_spectra.flux - cmed) < thresh * cmad)
mask = np.logical_and(mask, (cal_spectra.flux > 0))
# now fit the data
# Fit using a gaussian process.
if fitter=='gaussian':
from sklearn.gaussian_process import GaussianProcess
#Instanciate a Gaussian Process model
dy = obs_spectra.var[mask] ** 0.5
dy /= obs_spectra.flux[mask] / cal_spectra.flux[mask]
y = cal_spectra.flux[mask]
gp = GaussianProcess(corr='squared_exponential', theta0=1e-2,
thetaL=1e-4, thetaU=0.1, nugget=(dy / y) ** 2.0)
X = np.atleast_2d(cal_spectra.wavelength[mask]).T
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
x = np.atleast_2d(cal_spectra.wavelength).T
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred = gp.predict(x)
cal_spectra.flux = y_pred
else:
fit=interfit(cal_spectra.wavelength[mask], cal_spectra.flux[mask], function=function, order=order, thresh=thresh, niter=niter)
fit.interfit()
cal_spectra.flux=fit(cal_spectra.wavelength)
# write the spectra out
st.writespectrum(cal_spectra, outfile, ftype='ascii')
def sensfunc(obs_spectra, std_spectra, ext_spectra, airmass, exptime):
"""Given an observe spectra, calculate the calibration curve for the
spectra. All data is interpolated to the binning of the obs_spectra.
The calibrated spectra is then calculated from
C = F_obs/ F_std / 10**(-0.4*A*E)/T/dW
where F_obs is the observed flux from the source, F_std is the
standard spectra, A is the airmass, E is the
extinction in mags, T is the exposure time and dW is the bandpass
Parameters
-----------
obs_spectra--spectrum of the observed star (counts/A)
std_spectra--know spectrum of the standard star (ergs/s/cm2/A)
ext_spectra--spectrum of the extinction curve (in mags)
airmass--airmass of the observations
exptime--exposure time of the observations
function
"""
# re-interpt the std_spectra over the same wavelength
std_spectra.interp(obs_spectra.wavelength)
# re-interp the ext_spetra over the same wavelength
ext_spectra.interp(obs_spectra.wavelength)
# create the calibration spectra
cal_spectra = Spectrum.Spectrum(
obs_spectra.wavelength, obs_spectra.flux.copy(), stype='continuum')
# set up the bandpass
bandpass = np.diff(obs_spectra.wavelength).mean()
# correct for extinction
cal_spectra.flux = cal_spectra.flux / \
10 ** (-0.4 * airmass * ext_spectra.flux)
# correct for the exposure time and calculation the sensitivity curve
cal_spectra.flux = cal_spectra.flux / exptime / bandpass / std_spectra.flux
return cal_spectra
# main code
parfile = iraf.osfn("saltspec$specsens.par")
t = iraf.IrafTaskFactory(
taskname="specsens", value=parfile, function=specsens, pkgname='saltspec')
|
saltastro/pysalt
|
saltspec/specsens.py
|
Python
|
bsd-3-clause
| 6,366
|
[
"Gaussian"
] |
aee4a75c73c0c818533358f22c1b18606aec571658233b0de0b9dd6dad3569f9
|
import logging
from edx_rest_api_client.client import EdxRestApiClient
from acceptance_tests import config
from acceptance_tests.pages import LMSLoginPage
log = logging.getLogger(__name__)
class LoginMixin:
"""Mixin used for log in through LMS login page."""
def setUp(self):
super().setUp()
self.lms_login_page = LMSLoginPage(self.browser)
def login_with_lms(self):
"""Visit LMS and login."""
email = config.LMS_EMAIL
password = config.LMS_PASSWORD
self.browser.get(self.lms_login_page.url) # pylint: disable=not-callable
self.lms_login_page.login(email, password)
class CredentialsApiMixin:
"""Mixin used for login on credentials."""
def setUp(self):
super().setUp()
self.data = None
@property
def credential_api_client(self):
try:
api_client = EdxRestApiClient(config.CREDENTIALS_API_URL, oauth_access_token=config.ACCESS_TOKEN)
except Exception: # pylint: disable=broad-except
log.exception("Failed to initialize the API client with url '%s'.", config.CREDENTIALS_API_URL)
return
return api_client
def create_credential(self):
"""Create user credential for a program."""
self.data = self.credential_api_client.credentials.post(
{"username": config.LMS_USERNAME, "credential": {"program_uuid": config.PROGRAM_UUID}, "attributes": []}
)
def change_credential_status(self, status):
"""Update the credential status to awarded or revoked."""
self.data["status"] = status
self.credential_api_client.credentials(self.data["uuid"]).patch(self.data)
|
edx/credentials
|
acceptance_tests/e2e/mixins.py
|
Python
|
agpl-3.0
| 1,690
|
[
"VisIt"
] |
b1fb1c94e0e25254e81befe9f0c67f8ad34d6724f123b05cabcfa0be9baff452
|
# -*- coding: utf-8 -*-
# @Author: Zachary Priddy
# @Date: 2016-04-26 23:06:59
# @Last Modified by: Zachary Priddy
# @Last Modified time: 2016-10-12 23:14:47
import logging
from core.models.app import App
from core.models.command import Command as ffCommand
class App(App):
METADATA = {
'title' : 'Firefly Advanced Lighting - LUX',
'type' : 'app',
'package' : 'ffAdvancedLighting',
'module' : 'lux',
'inputs' : {
'sensors' : {'type':'device', 'capability':'sensor', 'multi':True, 'help':'Sensors to trigger lights', 'required':True},
'lights' : {'type':'device', 'capability':'switch', 'multi':True, 'help':'Lights and Switches to be triggered by LUX changes', 'required':True},
'lux_level': {'type':'number', 'help':'The level that is trigger for level low/high', 'required':True},
'actions_lux_high' : {'type':'action', 'multi':True, 'help':'Non generic actions when lux goes high.'},
'actions_lux_low' : {'type':'action', 'multi':True, 'help':'Non generic actions when lux goes low.'}
},
'options' : {
'delay_time' : {'type':'number', 'help':'Delay time in minutes from last activity before off actions applied', 'required':True},
'run_modes' : {'type':'mode', 'multi':True, 'help':'Modes to run in.'},
'no_run_modes' : {'type':'mode', 'multi':True, 'help':'Modes to not run in.'},
'run_dark' : {'type':'boolean', 'help':'Run after senset'},
'run_light' : {'type': 'boolean', 'help':'Run before sunset'},
'run_conditions' : {'type':'device-states', 'multi':True, 'help':'Run only if devices in these states'},
'no_run_conditions' : {'type':'device-states', 'multi':True, 'help':'Dont run if devices in these states'},
'allow_chain' : {'type':'boolean', 'default':False, 'help':'Allow the actions of this event to trigger other events listening to switched devices. This is not recommened because it can cause looping.'}
}
}
def __init__(self, config, args={}):
#METADAT is set above so that we can pull it during install
#self.METADATA = METADATA
self.INPUTS = {
'sensors' : config.get('sensors'),
'lights' : config.get('lights'),
'actions_lux_high' : config.get('actions_lux_high'),
'actions_lux_low' : config.get('actions_lux_low')
}
self.OPTIONS = {
'lux_level' : config.get('lux_level'),
'delay_time' : config.get('delay_time'),
'run_modes' : config.get('run_modes'),
'no_run_modes' : config.get('no_run_modes'),
'run_dark' : config.get('run_dark'),
'run_light' : config.get('run_light'),
'run_conditions' : config.get('run_conditions'),
'no_run_conditions' : config.get('no_run_modes')
}
self.EVENTS = {
'sensors' : self.sensorHandler
}
self.COMMANDS = {
'disable' : self.setDisable
}
self.REQUESTS = {
'disable' : self.getDisable
}
super(App, self).__init__(config, args)
self._disabled = False
self._send_event = True if config.get('allow_chain') is True else False
#########################################
# END OF SETUP
#########################################
def setDisable(self, value):
logging.critical('Setting Disabled to ' + str(value))
if self._disabled:
self._disabled = False
else:
self._disabled = True
def getDisable(self, args={}):
return self._disabled
def sensorHandler(self, event={}):
from core import ffScheduler
from core import ffLocation
logging.critical("ENTERNING LUX HANDELER")
lux = int(event.event.get('luminance')) if event.event.get('luminance') is not None else None
if lux is None or lux == '':
return -2
logging.critical('LUX: ' + str(lux))
change_value = self.lux_level
if self._disabled:
logging.critical('Lux Events Disabled')
return -2
if self.run_modes:
if ffLocation.mode not in self.run_modes:
logging.critical("Not in mode to run")
return -2
if self.no_run_modes:
if ffLocation.mode in self.no_run_modes:
logging.critical("In no run mode")
return -2
if self.run_dark is not None:
if not ffLocation.isDark:
logging.critical("Not running because is dark")
retunr -2
if self.run_light is not None:
if not ffLocation.isLight:
logging.critical("Not running because is light")
return -2
if lux <= change_value:
if self.lights:
for light in self.lights:
ffCommand(light,"on", send_event=self._send_event)
if self.actions_lux_low:
for device, action in self.actions_lux_low.iteritems():
ffCommand(device, action, send_event=self._send_event)
ffScheduler.cancel(self._id)
if lux > change_value:
if self.delay_time is None:
self.TurnLightsOff()
else:
ffScheduler.runInM(self.delay_time, self.TurnLightsOff, replace=True, job_id=self._id)
def TurnLightsOff(self):
from core import ffScheduler
from core import ffLocation
if self._disabled:
logging.critical('LUX Events Disabled')
return -2
if self.run_modes:
if ffLocation.mode not in self.run_modes:
logging.critical("Not in mode to run")
return -2
if self.no_run_modes:
if ffLocation.mode in self.no_run_modes:
logging.critical("In no run mode")
return -2
if self.lights:
for light in self.lights:
ffCommand(light, "off", send_event=self._send_event)
if self.actions_lux_high:
for device, action in self.actions_lux_high.iteritems():
ffCommand(device, action, send_event=self._send_event)
if 'hue' in device:
sleep(0.5)
|
zpriddy/Firefly
|
Firefly/apps/ffAdvancedLighting/lux.py
|
Python
|
apache-2.0
| 5,780
|
[
"Firefly"
] |
1a069b2e63328acc25f2aa6d0a5d4a0490bfb2a559cb79fdb6c1fd530f62eedb
|
#!/usr/bin/env python
#
# $File: demoFunc.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
def demo(gen):
return [500 + gen*10, 1000 + gen*10]
pop = sim.Population(size=[500, 1000], infoFields='migrate_to')
pop.evolve(
initOps=sim.InitSex(),
preOps=sim.Migrator(rate=[[0.8, 0.2], [0.4, 0.6]]),
matingScheme=sim.RandomMating(subPopSize=demo),
postOps=[
sim.Stat(popSize=True),
sim.PyEval(r'"%s\n" % subPopSize')
],
gen = 3
)
|
BoPeng/simuPOP
|
docs/demoFunc.py
|
Python
|
gpl-2.0
| 1,483
|
[
"VisIt"
] |
cf36f61c4ac9279452c269d0c22f2ed1620309e5360dcc62f90350fba1d41a59
|
import argparse
import itertools
import logging
import os
import re
import simplejson as json
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB import NeighborSearch
from config import PDB_RESIDUE_TYPES_BY_RESIDUE
LEVEL_MAP = {
'A': 'atom',
'R': 'residue',
'C': 'chain'
}
# MATCHES PDB ACCESSIONS
# E.G. 1XKK
PDB_REGEX = r'([0-9]{1}[a-zA-Z]{3})'
# MATCHES PDB-BIOMOLID
# E.G. 1XKK-1
PDB_BIOMOL_REGEX = PDB_REGEX + r'-([0-9]+)'
# MATCHES RSCB STYLE ASSEMBLIES
# E.G. 1XKK.pdb1
PDB_RCSB_ASM_REGEX = PDB_REGEX + r'\.pdb([0-9]+)'
PDB_REGEX = re.compile(PDB_REGEX)
PDB_BIOMOL_REGEX = re.compile(PDB_BIOMOL_REGEX)
PDB_RCSB_ASM_REGEX = re.compile(PDB_RCSB_ASM_REGEX)
def cap(string, char=','):
'''Pad the beginning and end of string with a character.'''
return '{}{}{}'.format(char, string, char)
if __name__ == '__main__':
# ARGUMENT PARSING
parser = argparse.ArgumentParser(description='''
# Determine Interactions
Calculate all atom-atom, residue-residue, and chain-chain interactions
in PDB files.
This program assumes that the PDB file has already been cleaned and only takes
into account the first model.
''', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'inputfile', type=str, help='Path to the PDB file to be analysed.')
parser.add_argument('-i', '--interacting', type=float,
default=5.0, help='Distance cutoff for interactions.')
parser.add_argument('-o', '--outputs', type=str,
default='cr', help='Granularity of output: string '
'including letters "c" for chain level, "r" for '
'residue level, "a" for atom level.')
parser.add_argument('-tf', '--type-filter', type=str,
default='*', help='Filter which types of residue are '
'included in atom and residue level calculations. '
'Will consider all interactions made between residues '
'of that/those type(s), and other entities, e.g., '
'filtering by \'dna\' would include DNA-protein '
'interactions. \n'
'Options are: * for all, or: peptide, peptide_like, '
'dna, rna, saccharide, non_polymer, water. Seperate '
'multiple residue types with ')
parser.add_argument('-v', '--verbose',
action='store_true', help='Be chatty.')
args = parser.parse_args()
# SET ARGS TO CONSTANTS
INPUT_FILE = args.inputfile
INPUT_FILE_SPLITEXT = os.path.splitext(INPUT_FILE)[0]
INPUT_FILENAME = os.path.split(INPUT_FILE)[1]
INTERACTION_THRESHOLD = args.interacting
TYPE_FILTER = args.type_filter
OUTPUTS = args.outputs.upper()
# LOGGING
if args.verbose:
logging.basicConfig(
level=logging.INFO,
format='%(levelname)s//%(asctime)s.%(msecs).03d//%(message)s',
datefmt='%H:%M:%S')
else:
logging.basicConfig(
level=logging.WARN,
format='%(levelname)s//%(asctime)s.%(msecs).03d//%(message)s',
datefmt='%H:%M:%S')
logging.info('Program begin.')
# DETECT PDB ACCESSION FROM FILENAME IF POSSIBLE
PDB = ''
BIOMOL_ID = ''
pdb_match = re.search(PDB_REGEX, INPUT_FILENAME)
if pdb_match:
PDB = pdb_match.group(1)
pdb_biomol_match = re.search(PDB_BIOMOL_REGEX, INPUT_FILENAME)
if pdb_biomol_match:
PDB = pdb_biomol_match.group(1)
BIOMOL_ID = pdb_biomol_match.group(2)
pdb_rcsb_asm_match = re.search(PDB_RCSB_ASM_REGEX, INPUT_FILENAME)
if pdb_rcsb_asm_match:
PDB = pdb_rcsb_asm_match.group(1)
BIOMOL_ID = pdb_rcsb_asm_match.group(2)
# LOAD STRUCTURE
structure = PDBParser().get_structure('structure', INPUT_FILE)
structure_atoms = list(structure.get_atoms())
logging.info('Loaded PDB structure (BioPython).')
# CONSTRUCT KDTREE
neighborsearch = NeighborSearch(structure_atoms)
logging.info('Constructured NeighborSearch.')
# GET INTERACTIONS
logging.info('Calculating interactions...')
for interaction_level in 'ARC':
if interaction_level in OUTPUTS:
logging.info('Calculating interactions for {}s...'.format(
LEVEL_MAP[interaction_level]))
pairs = neighborsearch.search_all(INTERACTION_THRESHOLD,
level=interaction_level)
logging.info('Search complete for {}s.'.format(
LEVEL_MAP[interaction_level]))
logging.info('Organising interactions for {}s...'.format(
LEVEL_MAP[interaction_level]))
interactions = {}
for entities in pairs:
entity1, entity2 = entities
# NO SELFIES
if (entity1 is entity2) or (entity1 == entity2):
continue
id1 = entity1.get_full_id()
id2 = entity2.get_full_id()
if interaction_level == 'A':
res1 = entity1.get_parent()
res2 = entity2.get_parent()
res1 = res1.resname.strip()
res2 = res2.resname.strip()
entity1 = cap(','.join(
[id1[2], str(id1[3][1]) + id1[3][2].strip() + '`' + res1, entity1.name]))
entity2 = cap(','.join(
[id2[2], str(id2[3][1]) + id2[3][2].strip() + '`' + res2, entity2.name]))
elif interaction_level == 'R':
entity1 = cap(','.join(
[id1[2], str(id1[3][1]) + id1[3][2].strip() + '`' + entity1.resname.strip()]))
entity2 = cap(','.join(
[id2[2], str(id2[3][1]) + id2[3][2].strip() + '`' + entity2.resname.strip()]))
elif interaction_level == 'C':
entity1 = cap(entity1.id)
entity2 = cap(entity2.id)
# ADD INTERACTING ENTITY TO LIST OF INTERACTORS
if entity1 not in interactions:
interactions[entity1] = []
if entity2 not in interactions:
interactions[entity2] = []
if entity2 not in interactions[entity1]:
interactions[entity1].append(entity2)
if entity1 not in interactions[entity2]:
interactions[entity2].append(entity1)
for entity in interactions:
interactions[entity] = sorted(interactions[entity])
logging.info('Organisation complete for {}s.'.format(
LEVEL_MAP[interaction_level]))
logging.info('Constructing JSON for {}s...'.format(
LEVEL_MAP[interaction_level]))
json_output = {
'input': INPUT_FILE,
'pdb': PDB,
'biomol_id': BIOMOL_ID,
'level': LEVEL_MAP[interaction_level],
'interactions': interactions
}
# TYPE RESIDUES IF POSSIBLE
if interaction_level in 'AR' and PDB_RESIDUE_TYPES_BY_RESIDUE:
logging.info('Typing residues for {} output...'.format(
LEVEL_MAP[interaction_level]))
json_output['residue_types'] = {}
for entity in json_output['interactions']:
resname = None
if interaction_level == 'A':
resname = entity.split(',')[-3].split('`')[1]
if interaction_level == 'R':
resname = entity.split(',')[-2].split('`')[1]
if resname:
restype = None
try:
restype = PDB_RESIDUE_TYPES_BY_RESIDUE[resname]
except:
logging.warn('Could not type residue: {}'.format(entity))
json_output['residue_types'][
entity] = restype
# TYPE FILTER
if TYPE_FILTER != '*' and not PDB_RESIDUE_TYPES_BY_RESIDUE:
logging.warn('Not applying type filtering, because PDB '
'residue typing data is not available. '
'See https://github.com/harryjubb/pdb_interactions#residue-typing for information.')
if TYPE_FILTER != '*' and interaction_level in 'AR' and PDB_RESIDUE_TYPES_BY_RESIDUE:
logging.info('Filtering interactions by residue type for {} output...'.format(
LEVEL_MAP[interaction_level]))
# REMOVE INTERACTIONS NOT IN TYPE FILTER
json_output['interactions'] = {
entity: interactors for entity, interactors in json_output['interactions'].iteritems() if
json_output['residue_types'][entity] in TYPE_FILTER
}
# REMOVE ANY ENTITIES NOT INTERACTING FROM THE RESIDUE TYPES DICTIONARY
remaining_interacting_entities = set(list(itertools.chain(
*([entity] + interactors for entity, interactors in json_output['interactions'].iteritems())
)))
json_output['residue_types'] = {
entity: etype for entity, etype in json_output['residue_types'].iteritems()
if entity in remaining_interacting_entities
}
# WRITE OUT JSON OUTPUT
logging.info('Writing JSON for {}s...'.format(
LEVEL_MAP[interaction_level]))
with open('.'.join([INPUT_FILE_SPLITEXT, LEVEL_MAP[interaction_level], 'interactions' if TYPE_FILTER == '*' else '_'.join(TYPE_FILTER.split()), 'json']), 'wb') as fo:
json.dump(json_output, fo)
logging.info('JSON output written for {}s.'.format(
LEVEL_MAP[interaction_level]))
# FINISH UP
logging.info('Program end.')
|
harryjubb/pdb_interactions
|
determine_interactions.py
|
Python
|
mit
| 10,147
|
[
"Biopython"
] |
4070b8c7815dfc4c23e56c5a7a62c4caf507296cd5dbd962e4228e322f2642aa
|
#!/usr/bin/env python
#
# Copyright 2017 Phedorabot
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
from phedorabot.api import PhedorabotAPIClient
from phedorabot import exceptions
__all__ = (
'PhedorabotOneTimeSchedulerAPIClient')
'''A Phedorabot One Time Task Scheduler API Client Library.
For full API documentation, visit https://www.phedorabot.com/api/docs/.
Typical usage:
client = phedorabot.PhedorabotClient(
api_key=..., api_secret=..., access_token=...)
onetime = PhedorabotOneTimeSchedulerAPIClient()
resp = client.request.send(onetime)
if resp.is_error():
print response.get_error()
else:
data = resp.get_raw_data()
'''
class PhedorabotOneTimeSchedulerAPIClient(PhedorabotAPIClient):
''' Schedule one time tasks on Phedorabot for executions'''
def __init__(self):
super(PhedorabotOneTimeSchedulerAPIClient, self).__init__()
self.parameters = None
self.set_starting_at(0)
self.set_paging_limit(100)
def set_subscription_id(self, sid):
''' Subscription ID
Set the one time subscription id as shown on your Phedorabot One Time
subscription page
'''
if sid is not None:
self.set_parameter('subscription_id', str(sid))
def set_job_id(self, jid):
''' Set the Job ID of the one time task '''
if jid is not None:
self.set_parameter('job_id', jid)
def set_task_name(self, task_name):
''' Task Name
Set the one time task to be used for identifying the task, not more than
64 characters
'''
if task_name is not None:
self.set_parameter('task_name', str(task_name))
def set_task_description(self, task_description):
''' Task Description
Set the one time task description to be used for explaining what the
tast is doing, not more than 160 characters
'''
if task_description is not None:
self.set_parameter('task_description', str(task_description))
def add_custom_property(self, prop_key, prop_value):
''' Add Custom Properties
This will be returned along with the Instant Execution Notification
payload that will be sent to you
'''
if prop_key is not None and prop_value is not None:
props = self.get_parameter('onetime_properties', {})
props[str(prop_key)] = prop_value
self.set_parameter('onetime_properties', props)
def add_custom_header(self, head_key, head_value):
''' Custom Headers
This will be added to the header of the Instant Execution Notification
payload that will be sent to your server
'''
if head_key is not None and head_value is not None:
headers = self.get_parameter('onetime_headers', {})
headers[str(head_key)] = head_value
self.set_parameter('onetime_headers', props)
def set_time_unit(self, time_unit):
''' Time Unit
Time unit is part of what make up the duration of the task or how long
it will take for the task to executed, valid time units are
(hour, day, week, month or year) for example if you want to execute a
task in 2 weeks time then the time unit will be 'week'
'''
valid_time_units = {'hour':1,'day':1,'week':1,'month':1,'year':1}
if not time_unit or not len(time_unit):
raise exceptions.PhedorabotAPIError(
'invalid_time_unit'
, 'Time unit '+time_unit+' is not valid')
if not valid_time_units.has_key(time_unit):
raise exceptions.PhedorabotAPIError(
'invalid_time_unit'
, 'Provided time unit '+time_unit+' is not a valid time unit')
self.set_parameter('time_unit', time_unit)
def set_period_length(self, period_length):
''' Period Length
Period length is part of what makes up the duration of the task or how
long it will take for the task to be executed, valid period length are
positive numbers. For example if you want a task to be executed 1 month
from today, then the period length is 1 and the time unit is month
'''
try:
period_length = int(period_length)
except Exception as e:
raise exceptions.PhedorabotAPIError(
'invalid_period_length'
, 'Period length is not valid {0}'.format(str(e)))
self.set_parameter('period_length', period_length)
def set_start_date(self, start_date):
''' Start Date
This is the contextual date that should be used for calculating when
this task will execute for the first time, acceptable dates is of the
format 'Year-Month-Day Hour:Minutes:seconds' for example
'2017-06-14 10:30:00' is a valid date, which means for example if you
set time_unit to be 'month' and period_length to be '1' then this task
will be executed on the 14th of July 2017 at 10:30 am
'''
parts = start_date.split(' ')
if len(parts) != 2:
raise exceptions.PhedorabotAPIError(
'invalid_start_date'
, 'Start date should consist of both the date and the time')
date_parts = parts[0].split('-')
time_parts = parts[1].split(':')
if len(date_parts) != 3:
raise exceptions.PhedorabotAPIError(
'invalid_start_date'
, 'The date portion of the start date is not valid')
if len(time_parts) != 3:
raise exceptions.PhedorabotAPIError(
'invalid_start_date'
, 'The time portion of the start date is not valid')
self.set_parameter('start_date', start_date)
def set_day_of_month(self, day):
''' Day of month
Day of the month is a number between 1 to 31, this number means
that you want Phedorabot to use the day of the month as the contextual
date for calculating when the task should start executing.
For example, if today is February 15 and you set this day to be 14,
then the contextual date will be set to 14th of March. If you set the
day to be 18, then the contextual date will be set to February 18th;
if you set the day to be 15, then the contextual date will be set to
today, using this contextual date we can then compute when the
task should start executing.
If you want the contextual date to be at the end of the month
regardless of the month, then set the day to 31
'''
valid_ranges = range(1, 32)
try:
day = int(day)
if not day in valid_ranges:
raise Exception('Day of month {0} is not valid'.format(day))
except Exception as e:
raise exceptions.PhedorabotAPIError('invalid_day_of_month', str(e))
self.set_parameter('day_of_month', day)
def should_start_immediately(self):
''' Start Immediately
If you do not want to set a start date or a day of the month you can
call this method, it will tell Phedorabot to use the current date and
time at which you request was recieved by the api to calculate when
the task will be executed
'''
self.set_parameter('start_immediately', true)
def with_exclude_weekends(self):
''' Exclude Weekends (Sartuday and Sundays)
With the given contextual date from which to calculate when the task
will be executed, if this option is set, Phedorabot will calculate the
normal task execute date from this, then it will scan through to remove
all Sartudays and Sundays, the implication is that execution date will
be pushed forward by some days depending on how many weekends where
found
'''
self.set_parameter('exclude_weekends', true)
def set_callback_uri(self, uri):
''' Callback uri
The callback uri is a fully qualified url for receiving
Instant Execution Notification (IEN)
An example listener is something like this:
http://website.com/task/execution_listener.php
this url will always receive a POST from Phedorabot
'''
if uri is not None:
if len(str(uri)) >= 2087:
raise exceptions.PhedorabotAPIError(
'invalid_callback_uri'
, 'Callback uri is not valid must be a valid uri and \
not more than 2087 characters in length')
self.set_parameter('callback_uri', str(uri))
|
Phedorabot/phedorabot-python-sdk
|
phedorabot/onetime.py
|
Python
|
apache-2.0
| 9,192
|
[
"VisIt"
] |
af27226f477d8a84ffa6962c72e5d1c298da431b4bba3fac49061fb6ae481980
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from paddle.utils import gast
from paddle.fluid import unique_name
from paddle.fluid.dygraph.dygraph_to_static.utils import index_in_list
from paddle.fluid.dygraph.dygraph_to_static.break_continue_transformer import ForToWhileTransformer
from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import create_fill_constant_node
from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code
__all__ = [
'RETURN_NO_VALUE_MAGIC_NUM', 'RETURN_NO_VALUE_VAR_NAME', 'ReturnTransformer'
]
# Constant for the name of the variable which stores the boolean state that we
# should return
RETURN_PREFIX = '__return'
# Constant for the name of the variable which stores the final return value
RETURN_VALUE_PREFIX = '__return_value'
# Constant for the name of variables to initialize the __return_value
RETURN_VALUE_INIT_NAME = '__return_value_init'
# Constant magic number representing returning no value. This constant amis to
# support returning various lengths of variables. Static graph must have fixed
# size of fetched output while dygraph can have flexible lengths of output, to
# solve it in dy2stat, we put float64 value with this magic number at Static
# graph as a place holder to indicate the returning placeholder means no value
# should return.
RETURN_NO_VALUE_MAGIC_NUM = 1.77113e+279
RETURN_NO_VALUE_VAR_NAME = "__no_value_return_var"
def get_return_size(return_node):
assert isinstance(return_node, gast.Return), "Input is not gast.Return node"
return_length = 0
if return_node.value is not None:
if isinstance(return_node.value, gast.Tuple):
return_length = len(return_node.value.elts)
else:
return_length = 1
return return_length
class ReplaceReturnNoneTransformer(gast.NodeTransformer):
"""
Replace 'return None' to 'return' because 'None' cannot be a valid input
in control flow. In ReturnTransformer single 'Return' will be appended no
value placeholder
"""
def __init__(self, root_node):
self.root = root_node
def transform(self):
self.visit(self.root)
def visit_Return(self, node):
if isinstance(node.value, gast.Name) and node.value.id == 'None':
node.value = None
return node
if isinstance(node.value, gast.Constant) and node.value.value == None:
node.value = None
return node
return node
class ReturnAnalysisVisitor(gast.NodeVisitor):
"""
Visits gast Tree and analyze the information about 'return'.
"""
def __init__(self, root_node):
self.root = root_node
# A list to store where the current function is.
self.function_def = []
# Mapping from gast.FunctionDef node to the number of return statements
# Python allows define function inside function so we have to handle it
self.count_return = {}
# Mapping from gast.FunctionDef node to the maximum number of variables
# returned by the function's return statement
self.max_return_length = {}
self.visit(self.root)
def visit_FunctionDef(self, node):
self.function_def.append(node)
self.count_return[node] = 0
self.max_return_length[node] = 0
self.generic_visit(node)
self.function_def.pop()
return node
def visit_Return(self, node):
assert len(
self.function_def) > 0, "Found 'return' statement out of function."
cur_func = self.function_def[-1]
if cur_func in self.count_return:
self.count_return[cur_func] += 1
else:
self.count_return[cur_func] = 1
return_length = get_return_size(node)
if cur_func in self.max_return_length:
self.max_return_length[cur_func] = max(
self.max_return_length[cur_func], return_length)
else:
self.max_return_length[cur_func] = return_length
self.generic_visit(node)
def get_func_return_count(self, func_node):
return self.count_return[func_node]
def get_func_max_return_length(self, func_node):
return self.max_return_length[func_node]
class ReturnTransformer(gast.NodeTransformer):
"""
Transforms return statements into equivalent python statements containing
only one return statement at last. The basics idea is using a return value
variable to store the early return statements and boolean states with
if-else to skip the statements after the return.
"""
def __init__(self, wrapper_root):
self.wrapper_root = wrapper_root
self.root = wrapper_root.node
pre_transformer = ReplaceReturnNoneTransformer(self.root)
pre_transformer.transform()
self.ancestor_nodes = []
# The name of the variable which stores the final return value
# Mapping from FunctionDef node to string
self.return_value_name = {}
# The names of the variable which stores the boolean state that skip
# statments. Mapping from FunctionDef node to list
self.return_name = {}
# The names of the variable which is placeholder to handle various-
# length return. Mapping from FunctionDef node to list
self.return_no_value_name = {}
# A list of FunctionDef to store where the current function is.
self.function_def = []
self.pre_analysis = None
def transform(self):
self.visit(self.root)
def generic_visit(self, node):
# Because we change ancestor nodes during visit_Return, not current
# node, original generic_visit of NodeTransformer will visit node
# which may be deleted. To prevent that node being added into
# transformed AST, We self-write a generic_visit and visit
for field, value in gast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, gast.AST):
self.visit(item)
elif isinstance(value, gast.AST):
self.visit(value)
def visit(self, node):
"""
Self-defined visit for appending ancestor
"""
self.ancestor_nodes.append(node)
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
ret = visitor(node)
self.ancestor_nodes.pop()
return ret
def visit_FunctionDef(self, node):
self.function_def.append(node)
self.return_value_name[node] = None
self.return_name[node] = []
self.return_no_value_name[node] = []
self.pre_analysis = ReturnAnalysisVisitor(node)
max_return_length = self.pre_analysis.get_func_max_return_length(node)
while self.pre_analysis.get_func_return_count(node) > 1:
self.generic_visit(node)
self.pre_analysis = ReturnAnalysisVisitor(node)
if max_return_length == 0:
self.function_def.pop()
return node
# Prepend initialization of final return and append final return statement
value_name = self.return_value_name[node]
if value_name is not None:
node.body.append(
gast.Return(value=gast.Name(
id=value_name,
ctx=gast.Load(),
annotation=None,
type_comment=None)))
init_names = [
unique_name.generate(RETURN_VALUE_INIT_NAME)
for i in range(max_return_length)
]
assign_zero_nodes = [
create_fill_constant_node(iname, 0.0) for iname in init_names
]
if len(init_names) == 1:
return_value_nodes = gast.Name(
id=init_names[0],
ctx=gast.Load(),
annotation=None,
type_comment=None)
else:
# We need to initialize return value as a tuple because control
# flow requires some inputs or outputs have same structure
return_value_nodes = gast.Tuple(
elts=[
gast.Name(
id=iname,
ctx=gast.Load(),
annotation=None,
type_comment=None) for iname in init_names
],
ctx=gast.Load())
assign_return_value_node = gast.Assign(
targets=[
gast.Name(
id=value_name,
ctx=gast.Store(),
annotation=None,
type_comment=None)
],
value=return_value_nodes)
node.body.insert(0, assign_return_value_node)
node.body[:0] = assign_zero_nodes
# Prepend no value placeholders
for name in self.return_no_value_name[node]:
assign_no_value_node = create_fill_constant_node(
name, RETURN_NO_VALUE_MAGIC_NUM)
node.body.insert(0, assign_no_value_node)
self.function_def.pop()
return node
def visit_Return(self, node):
cur_func_node = self.function_def[-1]
return_name = unique_name.generate(RETURN_PREFIX)
self.return_name[cur_func_node].append(return_name)
max_return_length = self.pre_analysis.get_func_max_return_length(
cur_func_node)
parent_node_of_return = self.ancestor_nodes[-2]
for ancestor_index in reversed(range(len(self.ancestor_nodes) - 1)):
ancestor = self.ancestor_nodes[ancestor_index]
cur_node = self.ancestor_nodes[ancestor_index + 1]
if hasattr(ancestor,
"body") and index_in_list(ancestor.body, cur_node) != -1:
if cur_node == node:
self._replace_return_in_stmt_list(
ancestor.body, cur_node, return_name, max_return_length,
parent_node_of_return)
self._replace_after_node_to_if_in_stmt_list(
ancestor.body, cur_node, return_name, parent_node_of_return)
elif hasattr(ancestor, "orelse") and index_in_list(ancestor.orelse,
cur_node) != -1:
if cur_node == node:
self._replace_return_in_stmt_list(
ancestor.orelse, cur_node, return_name,
max_return_length, parent_node_of_return)
self._replace_after_node_to_if_in_stmt_list(
ancestor.orelse, cur_node, return_name,
parent_node_of_return)
# If return node in while loop, add `not return_name` in gast.While.test
if isinstance(ancestor, gast.While):
cond_var_node = gast.UnaryOp(
op=gast.Not(),
operand=gast.Name(
id=return_name,
ctx=gast.Load(),
annotation=None,
type_comment=None))
ancestor.test = gast.BoolOp(
op=gast.And(), values=[ancestor.test, cond_var_node])
continue
# If return node in for loop, add `not return_name` in gast.While.test
if isinstance(ancestor, gast.For):
cond_var_node = gast.UnaryOp(
op=gast.Not(),
operand=gast.Name(
id=return_name,
ctx=gast.Load(),
annotation=None,
type_comment=None))
parent_node = self.ancestor_nodes[ancestor_index - 1]
for_to_while = ForToWhileTransformer(parent_node, ancestor,
cond_var_node)
new_stmts = for_to_while.transform()
while_node = new_stmts[-1]
self.ancestor_nodes[ancestor_index] = while_node
if ancestor == cur_func_node:
break
# return_node is replaced so we shouldn't return here
def _replace_return_in_stmt_list(self, stmt_list, return_node, return_name,
max_return_length, parent_node_of_return):
assert max_return_length >= 0, "Input illegal max_return_length"
i = index_in_list(stmt_list, return_node)
if i == -1:
return False
assign_nodes = []
# Here assume that the parent node of return is gast.If
if isinstance(parent_node_of_return, gast.If):
# Prepend control flow boolean nodes such as '__return@1 = True'
node_str = "{} = paddle.jit.dy2static.create_bool_as_type({}, True)".format(
return_name,
ast_to_source_code(parent_node_of_return.test).strip())
assign_true_node = gast.parse(node_str).body[0]
assign_nodes.append(assign_true_node)
cur_func_node = self.function_def[-1]
return_length = get_return_size(return_node)
if return_length < max_return_length:
# In this case we should append RETURN_NO_VALUE placeholder
#
# max_return_length must be >= 1 here because return_length will be
# 0 at least.
if self.return_value_name[cur_func_node] is None:
self.return_value_name[cur_func_node] = unique_name.generate(
RETURN_VALUE_PREFIX)
no_value_names = [
unique_name.generate(RETURN_NO_VALUE_VAR_NAME)
for j in range(max_return_length - return_length)
]
self.return_no_value_name[cur_func_node].extend(no_value_names)
# Handle tuple/non-tuple case
if max_return_length == 1:
assign_nodes.append(
gast.Assign(
targets=[
gast.Name(
id=self.return_value_name[cur_func_node],
ctx=gast.Store(),
annotation=None,
type_comment=None)
],
value=gast.Name(
id=no_value_names[0],
ctx=gast.Load(),
annotation=None,
type_comment=None)))
else:
# max_return_length > 1 which means we should assign tuple
fill_tuple = [
gast.Name(
id=n,
ctx=gast.Load(),
annotation=None,
type_comment=None) for n in no_value_names
]
if return_node.value is not None:
if isinstance(return_node.value, gast.Tuple):
fill_tuple[:0] = return_node.value.elts
else:
fill_tuple.insert(0, return_node.value)
assign_nodes.append(
gast.Assign(
targets=[
gast.Name(
id=self.return_value_name[cur_func_node],
ctx=gast.Store(),
annotation=None,
type_comment=None)
],
value=gast.Tuple(
elts=fill_tuple, ctx=gast.Load())))
else:
# In this case we should NOT append RETURN_NO_VALUE placeholder
if return_node.value is not None:
cur_func_node = self.function_def[-1]
if self.return_value_name[cur_func_node] is None:
self.return_value_name[
cur_func_node] = unique_name.generate(
RETURN_VALUE_PREFIX)
assign_nodes.append(
gast.Assign(
targets=[
gast.Name(
id=self.return_value_name[cur_func_node],
ctx=gast.Store(),
annotation=None,
type_comment=None)
],
value=return_node.value))
stmt_list[i:] = assign_nodes
return True
def _replace_after_node_to_if_in_stmt_list(
self, stmt_list, node, return_name, parent_node_of_return):
i = index_in_list(stmt_list, node)
if i < 0 or i >= len(stmt_list):
return False
if i == len(stmt_list) - 1:
# No need to add, we consider this as added successfully
return True
if_stmt = gast.If(test=gast.UnaryOp(
op=gast.Not(),
operand=gast.Name(
id=return_name,
ctx=gast.Store(),
annotation=None,
type_comment=None)),
body=stmt_list[i + 1:],
orelse=[])
stmt_list[i + 1:] = [if_stmt]
# Here assume that the parent node of return is gast.If
if isinstance(parent_node_of_return, gast.If):
# Prepend control flow boolean nodes such as '__return@1 = False'
node_str = "{} = paddle.jit.dy2static.create_bool_as_type({}, False)".format(
return_name,
ast_to_source_code(parent_node_of_return.test).strip())
assign_false_node = gast.parse(node_str).body[0]
stmt_list[i:i] = [assign_false_node]
return True
|
PaddlePaddle/Paddle
|
python/paddle/fluid/dygraph/dygraph_to_static/return_transformer.py
|
Python
|
apache-2.0
| 18,556
|
[
"VisIt"
] |
9cb9c35cb019b5c42f237449da61dd733612e8fea33826ce0a54d75412838278
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Module to support the loading of a NetCDF file into an Iris cube.
See also: `netCDF4 python <http://code.google.com/p/netcdf4-python/>`_.
Also refer to document 'NetCDF Climate and Forecast (CF) Metadata Conventions',
Version 1.4, 27 February 2009.
"""
import collections
from itertools import repeat, zip_longest
import os
import os.path
import re
import string
import warnings
import dask.array as da
import netCDF4
import numpy as np
import numpy.ma as ma
from pyke import knowledge_engine
import iris.analysis
from iris.aux_factory import (
HybridHeightFactory,
HybridPressureFactory,
OceanSigmaZFactory,
OceanSigmaFactory,
OceanSFactory,
OceanSg1Factory,
OceanSg2Factory,
)
import iris.config
import iris.coord_systems
import iris.coords
import iris.cube
import iris.exceptions
import iris.fileformats.cf
import iris.fileformats._pyke_rules
import iris.io
import iris.util
from iris._lazy_data import as_lazy_data
# Show Pyke inference engine statistics.
DEBUG = False
# Pyke CF related file names.
_PYKE_RULE_BASE = "fc_rules_cf"
_PYKE_FACT_BASE = "facts_cf"
# Standard CML spatio-temporal axis names.
SPATIO_TEMPORAL_AXES = ["t", "z", "y", "x"]
# Pass through CF attributes:
# - comment
# - Conventions
# - flag_masks
# - flag_meanings
# - flag_values
# - history
# - institution
# - reference
# - source
# - title
# - positive
#
_CF_ATTRS = [
"add_offset",
"ancillary_variables",
"axis",
"bounds",
"calendar",
"cell_measures",
"cell_methods",
"climatology",
"compress",
"coordinates",
"_FillValue",
"formula_terms",
"grid_mapping",
"leap_month",
"leap_year",
"long_name",
"missing_value",
"month_lengths",
"scale_factor",
"standard_error_multiplier",
"standard_name",
"units",
]
# CF attributes that should not be global.
_CF_DATA_ATTRS = [
"flag_masks",
"flag_meanings",
"flag_values",
"instance_dimension",
"missing_value",
"sample_dimension",
"standard_error_multiplier",
]
# CF attributes that should only be global.
_CF_GLOBAL_ATTRS = ["conventions", "featureType", "history", "title"]
# UKMO specific attributes that should not be global.
_UKMO_DATA_ATTRS = ["STASH", "um_stash_source", "ukmo__process_flags"]
CF_CONVENTIONS_VERSION = "CF-1.7"
_FactoryDefn = collections.namedtuple(
"_FactoryDefn", ("primary", "std_name", "formula_terms_format")
)
_FACTORY_DEFNS = {
HybridHeightFactory: _FactoryDefn(
primary="delta",
std_name="atmosphere_hybrid_height_coordinate",
formula_terms_format="a: {delta} b: {sigma} orog: {orography}",
),
HybridPressureFactory: _FactoryDefn(
primary="delta",
std_name="atmosphere_hybrid_sigma_pressure_coordinate",
formula_terms_format="ap: {delta} b: {sigma} "
"ps: {surface_air_pressure}",
),
OceanSigmaZFactory: _FactoryDefn(
primary="zlev",
std_name="ocean_sigma_z_coordinate",
formula_terms_format="sigma: {sigma} eta: {eta} depth: {depth} "
"depth_c: {depth_c} nsigma: {nsigma} zlev: {zlev}",
),
OceanSigmaFactory: _FactoryDefn(
primary="sigma",
std_name="ocean_sigma_coordinate",
formula_terms_format="sigma: {sigma} eta: {eta} depth: {depth}",
),
OceanSFactory: _FactoryDefn(
primary="s",
std_name="ocean_s_coordinate",
formula_terms_format="s: {s} eta: {eta} depth: {depth} a: {a} b: {b} "
"depth_c: {depth_c}",
),
OceanSg1Factory: _FactoryDefn(
primary="s",
std_name="ocean_s_coordinate_g1",
formula_terms_format="s: {s} c: {c} eta: {eta} depth: {depth} "
"depth_c: {depth_c}",
),
OceanSg2Factory: _FactoryDefn(
primary="s",
std_name="ocean_s_coordinate_g2",
formula_terms_format="s: {s} c: {c} eta: {eta} depth: {depth} "
"depth_c: {depth_c}",
),
}
# Cell methods.
_CM_KNOWN_METHODS = [
"point",
"sum",
"mean",
"maximum",
"minimum",
"mid_range",
"standard_deviation",
"variance",
"mode",
"median",
]
_CM_COMMENT = "comment"
_CM_EXTRA = "extra"
_CM_INTERVAL = "interval"
_CM_METHOD = "method"
_CM_NAME = "name"
_CM_PARSE = re.compile(
r"""
(?P<name>([\w_]+\s*?:\s+)+)
(?P<method>[\w_\s]+(?![\w_]*\s*?:))\s*
(?:
\(\s*
(?P<extra>[^\)]+)
\)\s*
)?
""",
re.VERBOSE,
)
class UnknownCellMethodWarning(Warning):
pass
def parse_cell_methods(nc_cell_methods):
"""
Parse a CF cell_methods attribute string into a tuple of zero or
more CellMethod instances.
Args:
* nc_cell_methods (str):
The value of the cell methods attribute to be parsed.
Returns:
* cell_methods
An iterable of :class:`iris.coords.CellMethod`.
Multiple coordinates, intervals and comments are supported.
If a method has a non-standard name a warning will be issued, but the
results are not affected.
"""
cell_methods = []
if nc_cell_methods is not None:
for m in _CM_PARSE.finditer(nc_cell_methods):
d = m.groupdict()
method = d[_CM_METHOD]
method = method.strip()
# Check validity of method, allowing for multi-part methods
# e.g. mean over years.
method_words = method.split()
if method_words[0].lower() not in _CM_KNOWN_METHODS:
msg = "NetCDF variable contains unknown cell method {!r}"
warnings.warn(
msg.format("{}".format(method_words[0])),
UnknownCellMethodWarning,
)
d[_CM_METHOD] = method
name = d[_CM_NAME]
name = name.replace(" ", "")
name = name.rstrip(":")
d[_CM_NAME] = tuple([n for n in name.split(":")])
interval = []
comment = []
if d[_CM_EXTRA] is not None:
#
# tokenise the key words and field colon marker
#
d[_CM_EXTRA] = d[_CM_EXTRA].replace(
"comment:", "<<comment>><<:>>"
)
d[_CM_EXTRA] = d[_CM_EXTRA].replace(
"interval:", "<<interval>><<:>>"
)
d[_CM_EXTRA] = d[_CM_EXTRA].split("<<:>>")
if len(d[_CM_EXTRA]) == 1:
comment.extend(d[_CM_EXTRA])
else:
next_field_type = comment
for field in d[_CM_EXTRA]:
field_type = next_field_type
index = field.rfind("<<interval>>")
if index == 0:
next_field_type = interval
continue
elif index > 0:
next_field_type = interval
else:
index = field.rfind("<<comment>>")
if index == 0:
next_field_type = comment
continue
elif index > 0:
next_field_type = comment
if index != -1:
field = field[:index]
field_type.append(field.strip())
#
# cater for a shared interval over multiple axes
#
if len(interval):
if len(d[_CM_NAME]) != len(interval) and len(interval) == 1:
interval = interval * len(d[_CM_NAME])
#
# cater for a shared comment over multiple axes
#
if len(comment):
if len(d[_CM_NAME]) != len(comment) and len(comment) == 1:
comment = comment * len(d[_CM_NAME])
d[_CM_INTERVAL] = tuple(interval)
d[_CM_COMMENT] = tuple(comment)
cell_method = iris.coords.CellMethod(
d[_CM_METHOD],
coords=d[_CM_NAME],
intervals=d[_CM_INTERVAL],
comments=d[_CM_COMMENT],
)
cell_methods.append(cell_method)
return tuple(cell_methods)
class CFNameCoordMap:
"""Provide a simple CF name to CF coordinate mapping."""
_Map = collections.namedtuple("_Map", ["name", "coord"])
def __init__(self):
self._map = []
def append(self, name, coord):
"""
Append the given name and coordinate pair to the mapping.
Args:
* name:
CF name of the associated coordinate.
* coord:
The coordinate of the associated CF name.
Returns:
None.
"""
self._map.append(CFNameCoordMap._Map(name, coord))
@property
def names(self):
"""Return all the CF names."""
return [pair.name for pair in self._map]
@property
def coords(self):
"""Return all the coordinates."""
return [pair.coord for pair in self._map]
def name(self, coord):
"""
Return the CF name, given a coordinate
Args:
* coord:
The coordinate of the associated CF name.
Returns:
Coordinate.
"""
result = None
for pair in self._map:
if coord == pair.coord:
result = pair.name
break
if result is None:
msg = "Coordinate is not mapped, {!r}".format(coord)
raise KeyError(msg)
return result
def coord(self, name):
"""
Return the coordinate, given a CF name.
Args:
* name:
CF name of the associated coordinate.
Returns:
CF name.
"""
result = None
for pair in self._map:
if name == pair.name:
result = pair.coord
break
if result is None:
msg = "Name is not mapped, {!r}".format(name)
raise KeyError(msg)
return result
def _pyke_kb_engine():
"""Return the PyKE knowledge engine for CF->cube conversion."""
pyke_dir = os.path.join(os.path.dirname(__file__), "_pyke_rules")
compile_dir = os.path.join(pyke_dir, "compiled_krb")
engine = None
if os.path.exists(compile_dir):
tmpvar = [
os.path.getmtime(os.path.join(compile_dir, fname))
for fname in os.listdir(compile_dir)
if not fname.startswith("_")
]
if tmpvar:
oldest_pyke_compile_file = min(tmpvar)
rule_age = os.path.getmtime(
os.path.join(pyke_dir, _PYKE_RULE_BASE + ".krb")
)
if oldest_pyke_compile_file >= rule_age:
# Initialise the pyke inference engine.
engine = knowledge_engine.engine(
(None, "iris.fileformats._pyke_rules.compiled_krb")
)
if engine is None:
engine = knowledge_engine.engine(iris.fileformats._pyke_rules)
return engine
class NetCDFDataProxy:
"""A reference to the data payload of a single NetCDF file variable."""
__slots__ = ("shape", "dtype", "path", "variable_name", "fill_value")
def __init__(self, shape, dtype, path, variable_name, fill_value):
self.shape = shape
self.dtype = dtype
self.path = path
self.variable_name = variable_name
self.fill_value = fill_value
@property
def ndim(self):
return len(self.shape)
def __getitem__(self, keys):
dataset = netCDF4.Dataset(self.path)
try:
variable = dataset.variables[self.variable_name]
# Get the NetCDF variable data and slice.
var = variable[keys]
finally:
dataset.close()
return np.asanyarray(var)
def __repr__(self):
fmt = (
"<{self.__class__.__name__} shape={self.shape}"
" dtype={self.dtype!r} path={self.path!r}"
" variable_name={self.variable_name!r}>"
)
return fmt.format(self=self)
def __getstate__(self):
return {attr: getattr(self, attr) for attr in self.__slots__}
def __setstate__(self, state):
for key, value in state.items():
setattr(self, key, value)
def _assert_case_specific_facts(engine, cf, cf_group):
# Initialise pyke engine "provides" hooks.
engine.provides["coordinates"] = []
# Assert facts for CF coordinates.
for cf_name in cf_group.coordinates.keys():
engine.add_case_specific_fact(
_PYKE_FACT_BASE, "coordinate", (cf_name,)
)
# Assert facts for CF auxiliary coordinates.
for cf_name in cf_group.auxiliary_coordinates.keys():
engine.add_case_specific_fact(
_PYKE_FACT_BASE, "auxiliary_coordinate", (cf_name,)
)
# Assert facts for CF cell measures.
for cf_name in cf_group.cell_measures.keys():
engine.add_case_specific_fact(
_PYKE_FACT_BASE, "cell_measure", (cf_name,)
)
# Assert facts for CF grid_mappings.
for cf_name in cf_group.grid_mappings.keys():
engine.add_case_specific_fact(
_PYKE_FACT_BASE, "grid_mapping", (cf_name,)
)
# Assert facts for CF labels.
for cf_name in cf_group.labels.keys():
engine.add_case_specific_fact(_PYKE_FACT_BASE, "label", (cf_name,))
# Assert facts for CF formula terms associated with the cf_group
# of the CF data variable.
formula_root = set()
for cf_var in cf.cf_group.formula_terms.values():
for cf_root, cf_term in cf_var.cf_terms_by_root.items():
# Only assert this fact if the formula root variable is
# defined in the CF group of the CF data variable.
if cf_root in cf_group:
formula_root.add(cf_root)
engine.add_case_specific_fact(
_PYKE_FACT_BASE,
"formula_term",
(cf_var.cf_name, cf_root, cf_term),
)
for cf_root in formula_root:
engine.add_case_specific_fact(
_PYKE_FACT_BASE, "formula_root", (cf_root,)
)
def _pyke_stats(engine, cf_name):
if DEBUG:
print("-" * 80)
print("CF Data Variable: %r" % cf_name)
engine.print_stats()
print("Rules Triggered:")
for rule in sorted(list(engine.rule_triggered)):
print("\t%s" % rule)
print("Case Specific Facts:")
kb_facts = engine.get_kb(_PYKE_FACT_BASE)
for key in kb_facts.entity_lists.keys():
for arg in kb_facts.entity_lists[key].case_specific_facts:
print("\t%s%s" % (key, arg))
def _set_attributes(attributes, key, value):
"""Set attributes dictionary, converting unicode strings appropriately."""
if isinstance(value, str):
try:
attributes[str(key)] = str(value)
except UnicodeEncodeError:
attributes[str(key)] = value
else:
attributes[str(key)] = value
def _get_actual_dtype(cf_var):
# Figure out what the eventual data type will be after any scale/offset
# transforms.
dummy_data = np.zeros(1, dtype=cf_var.dtype)
if hasattr(cf_var, "scale_factor"):
dummy_data = cf_var.scale_factor * dummy_data
if hasattr(cf_var, "add_offset"):
dummy_data = cf_var.add_offset + dummy_data
return dummy_data.dtype
def _get_cf_var_data(cf_var, filename):
# Get lazy chunked data out of a cf variable.
dtype = _get_actual_dtype(cf_var)
# Create cube with deferred data, but no metadata
fill_value = getattr(
cf_var.cf_data,
"_FillValue",
netCDF4.default_fillvals[cf_var.dtype.str[1:]],
)
proxy = NetCDFDataProxy(
cf_var.shape, dtype, filename, cf_var.cf_name, fill_value
)
# Get the chunking specified for the variable : this is either a shape, or
# maybe the string "contiguous".
chunks = cf_var.cf_data.chunking()
# In the "contiguous" case, pass chunks=None to 'as_lazy_data'.
if chunks == "contiguous":
chunks = None
return as_lazy_data(proxy, chunks=chunks)
def _load_cube(engine, cf, cf_var, filename):
"""Create the cube associated with the CF-netCDF data variable."""
data = _get_cf_var_data(cf_var, filename)
cube = iris.cube.Cube(data)
# Reset the pyke inference engine.
engine.reset()
# Initialise pyke engine rule processing hooks.
engine.cf_var = cf_var
engine.cube = cube
engine.provides = {}
engine.requires = {}
engine.rule_triggered = set()
engine.filename = filename
# Assert any case-specific facts.
_assert_case_specific_facts(engine, cf, cf_var.cf_group)
# Run pyke inference engine with forward chaining rules.
engine.activate(_PYKE_RULE_BASE)
# Populate coordinate attributes with the untouched attributes from the
# associated CF-netCDF variable.
coordinates = engine.provides.get("coordinates", [])
def attribute_predicate(item):
return item[0] not in _CF_ATTRS
for coord, cf_var_name in coordinates:
tmpvar = filter(
attribute_predicate, cf.cf_group[cf_var_name].cf_attrs_unused()
)
for attr_name, attr_value in tmpvar:
_set_attributes(coord.attributes, attr_name, attr_value)
tmpvar = filter(attribute_predicate, cf_var.cf_attrs_unused())
# Attach untouched attributes of the associated CF-netCDF data variable to
# the cube.
for attr_name, attr_value in tmpvar:
_set_attributes(cube.attributes, attr_name, attr_value)
names = {
coord.var_name: coord.standard_name or coord.var_name or "unknown"
for coord in cube.coords()
}
cube.cell_methods = [
iris.coords.CellMethod(
method=method.method,
intervals=method.intervals,
comments=method.comments,
coords=[
names[coord_name] if coord_name in names else coord_name
for coord_name in method.coord_names
],
)
for method in cube.cell_methods
]
# Show pyke session statistics.
_pyke_stats(engine, cf_var.cf_name)
return cube
def _load_aux_factory(engine, cube):
"""
Convert any CF-netCDF dimensionless coordinate to an AuxCoordFactory.
"""
formula_type = engine.requires.get("formula_type")
if formula_type in [
"atmosphere_hybrid_height_coordinate",
"atmosphere_hybrid_sigma_pressure_coordinate",
"ocean_sigma_z_coordinate",
"ocean_sigma_coordinate",
"ocean_s_coordinate",
"ocean_s_coordinate_g1",
"ocean_s_coordinate_g2",
]:
def coord_from_term(term):
# Convert term names to coordinates (via netCDF variable names).
name = engine.requires["formula_terms"].get(term, None)
if name is not None:
for coord, cf_var_name in engine.provides["coordinates"]:
if cf_var_name == name:
return coord
warnings.warn(
"Unable to find coordinate for variable "
"{!r}".format(name)
)
if formula_type == "atmosphere_hybrid_height_coordinate":
delta = coord_from_term("a")
sigma = coord_from_term("b")
orography = coord_from_term("orog")
factory = HybridHeightFactory(delta, sigma, orography)
elif formula_type == "atmosphere_hybrid_sigma_pressure_coordinate":
# Hybrid pressure has two valid versions of its formula terms:
# "p0: var1 a: var2 b: var3 ps: var4" or
# "ap: var1 b: var2 ps: var3" where "ap = p0 * a"
# Attempt to get the "ap" term.
delta = coord_from_term("ap")
if delta is None:
# The "ap" term is unavailable, so try getting terms "p0"
# and "a" terms in order to derive an "ap" equivalent term.
coord_p0 = coord_from_term("p0")
if coord_p0 is not None:
if coord_p0.shape != (1,):
msg = (
"Expecting {!r} to be a scalar reference "
"pressure coordinate, got shape {!r}".format(
coord_p0.var_name, coord_p0.shape
)
)
raise ValueError(msg)
if coord_p0.has_bounds():
msg = (
"Ignoring atmosphere hybrid sigma pressure "
"scalar coordinate {!r} bounds.".format(
coord_p0.name()
)
)
warnings.warn(msg)
coord_a = coord_from_term("a")
if coord_a is not None:
delta = coord_a * coord_p0.points[0]
delta.units = coord_a.units * coord_p0.units
delta.rename("vertical pressure")
delta.var_name = "ap"
cube.add_aux_coord(delta, cube.coord_dims(coord_a))
sigma = coord_from_term("b")
surface_air_pressure = coord_from_term("ps")
factory = HybridPressureFactory(delta, sigma, surface_air_pressure)
elif formula_type == "ocean_sigma_z_coordinate":
sigma = coord_from_term("sigma")
eta = coord_from_term("eta")
depth = coord_from_term("depth")
depth_c = coord_from_term("depth_c")
nsigma = coord_from_term("nsigma")
zlev = coord_from_term("zlev")
factory = OceanSigmaZFactory(
sigma, eta, depth, depth_c, nsigma, zlev
)
elif formula_type == "ocean_sigma_coordinate":
sigma = coord_from_term("sigma")
eta = coord_from_term("eta")
depth = coord_from_term("depth")
factory = OceanSigmaFactory(sigma, eta, depth)
elif formula_type == "ocean_s_coordinate":
s = coord_from_term("s")
eta = coord_from_term("eta")
depth = coord_from_term("depth")
a = coord_from_term("a")
depth_c = coord_from_term("depth_c")
b = coord_from_term("b")
factory = OceanSFactory(s, eta, depth, a, b, depth_c)
elif formula_type == "ocean_s_coordinate_g1":
s = coord_from_term("s")
c = coord_from_term("c")
eta = coord_from_term("eta")
depth = coord_from_term("depth")
depth_c = coord_from_term("depth_c")
factory = OceanSg1Factory(s, c, eta, depth, depth_c)
elif formula_type == "ocean_s_coordinate_g2":
s = coord_from_term("s")
c = coord_from_term("c")
eta = coord_from_term("eta")
depth = coord_from_term("depth")
depth_c = coord_from_term("depth_c")
factory = OceanSg2Factory(s, c, eta, depth, depth_c)
cube.add_aux_factory(factory)
def load_cubes(filenames, callback=None):
"""
Loads cubes from a list of NetCDF filenames/URLs.
Args:
* filenames (string/list):
One or more NetCDF filenames/DAP URLs to load from.
Kwargs:
* callback (callable function):
Function which can be passed on to :func:`iris.io.run_callback`.
Returns:
Generator of loaded NetCDF :class:`iris.cubes.Cube`.
"""
# Initialise the pyke inference engine.
engine = _pyke_kb_engine()
if isinstance(filenames, str):
filenames = [filenames]
for filename in filenames:
# Ingest the netCDF file.
cf = iris.fileformats.cf.CFReader(filename)
# Process each CF data variable.
data_variables = list(cf.cf_group.data_variables.values()) + list(
cf.cf_group.promoted.values()
)
for cf_var in data_variables:
cube = _load_cube(engine, cf, cf_var, filename)
# Process any associated formula terms and attach
# the corresponding AuxCoordFactory.
try:
_load_aux_factory(engine, cube)
except ValueError as e:
warnings.warn("{}".format(e))
# Perform any user registered callback function.
cube = iris.io.run_callback(callback, cube, cf_var, filename)
# Callback mechanism may return None, which must not be yielded
if cube is None:
continue
yield cube
def _bytes_if_ascii(string):
"""
Convert the given string to a byte string (str in py2k, bytes in py3k)
if the given string can be encoded to ascii, else maintain the type
of the inputted string.
Note: passing objects without an `encode` method (such as None) will
be returned by the function unchanged.
"""
if isinstance(string, str):
try:
return string.encode(encoding="ascii")
except (AttributeError, UnicodeEncodeError):
pass
return string
def _setncattr(variable, name, attribute):
"""
Put the given attribute on the given netCDF4 Data type, casting
attributes as we go to bytes rather than unicode.
"""
attribute = _bytes_if_ascii(attribute)
return variable.setncattr(name, attribute)
class _FillValueMaskCheckAndStoreTarget:
"""
To be used with da.store. Remembers whether any element was equal to a
given value and whether it was masked, before passing the chunk to the
given target.
"""
def __init__(self, target, fill_value=None):
self.target = target
self.fill_value = fill_value
self.contains_value = False
self.is_masked = False
def __setitem__(self, keys, arr):
if self.fill_value is not None:
self.contains_value = self.contains_value or self.fill_value in arr
self.is_masked = self.is_masked or ma.is_masked(arr)
self.target[keys] = arr
class Saver:
"""A manager for saving netcdf files."""
def __init__(self, filename, netcdf_format):
"""
A manager for saving netcdf files.
Args:
* filename (string):
Name of the netCDF file to save the cube.
* netcdf_format (string):
Underlying netCDF file format, one of 'NETCDF4', 'NETCDF4_CLASSIC',
'NETCDF3_CLASSIC' or 'NETCDF3_64BIT'. Default is 'NETCDF4' format.
Returns:
None.
For example::
# Initialise Manager for saving
with Saver(filename, netcdf_format) as sman:
# Iterate through the cubelist.
for cube in cubes:
sman.write(cube)
"""
if netcdf_format not in [
"NETCDF4",
"NETCDF4_CLASSIC",
"NETCDF3_CLASSIC",
"NETCDF3_64BIT",
]:
raise ValueError(
"Unknown netCDF file format, got %r" % netcdf_format
)
# All persistent variables
#: CF name mapping with iris coordinates
self._name_coord_map = CFNameCoordMap()
#: List of dimension coordinates added to the file
self._dim_coords = []
#: List of grid mappings added to the file
self._coord_systems = []
#: A dictionary, listing dimension names and corresponding length
self._existing_dim = {}
#: A dictionary, mapping formula terms to owner cf variable name
self._formula_terms_cache = {}
#: NetCDF dataset
try:
self._dataset = netCDF4.Dataset(
filename, mode="w", format=netcdf_format
)
except RuntimeError:
dir_name = os.path.dirname(filename)
if not os.path.isdir(dir_name):
msg = "No such file or directory: {}".format(dir_name)
raise IOError(msg)
if not os.access(dir_name, os.R_OK | os.W_OK):
msg = "Permission denied: {}".format(filename)
raise IOError(msg)
else:
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Flush any buffered data to the CF-netCDF file before closing."""
self._dataset.sync()
self._dataset.close()
def write(
self,
cube,
local_keys=None,
unlimited_dimensions=None,
zlib=False,
complevel=4,
shuffle=True,
fletcher32=False,
contiguous=False,
chunksizes=None,
endian="native",
least_significant_digit=None,
packing=None,
fill_value=None,
):
"""
Wrapper for saving cubes to a NetCDF file.
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
Kwargs:
* local_keys (iterable of strings):
An interable of cube attribute keys. Any cube attributes with
matching keys will become attributes on the data variable rather
than global attributes.
* unlimited_dimensions (iterable of strings and/or
:class:`iris.coords.Coord` objects):
List of coordinate names (or coordinate objects)
corresponding to coordinate dimensions of `cube` to save with the
NetCDF dimension variable length 'UNLIMITED'. By default, no
unlimited dimensions are saved. Only the 'NETCDF4' format
supports multiple 'UNLIMITED' dimensions.
* zlib (bool):
If `True`, the data will be compressed in the netCDF file using
gzip compression (default `False`).
* complevel (int):
An integer between 1 and 9 describing the level of compression
desired (default 4). Ignored if `zlib=False`.
* shuffle (bool):
If `True`, the HDF5 shuffle filter will be applied before
compressing the data (default `True`). This significantly improves
compression. Ignored if `zlib=False`.
* fletcher32 (bool):
If `True`, the Fletcher32 HDF5 checksum algorithm is activated to
detect errors. Default `False`.
* contiguous (bool):
If `True`, the variable data is stored contiguously on disk.
Default `False`. Setting to `True` for a variable with an unlimited
dimension will trigger an error.
* chunksizes (tuple of int):
Used to manually specify the HDF5 chunksizes for each dimension of
the variable. A detailed discussion of HDF chunking and I/O
performance is available here:
http://www.hdfgroup.org/HDF5/doc/H5.user/Chunking.html. Basically,
you want the chunk size for each dimension to match as closely as
possible the size of the data block that users will read from the
file. `chunksizes` cannot be set if `contiguous=True`.
* endian (string):
Used to control whether the data is stored in little or big endian
format on disk. Possible values are 'little', 'big' or 'native'
(default). The library will automatically handle endian conversions
when the data is read, but if the data is always going to be read
on a computer with the opposite format as the one used to create
the file, there may be some performance advantage to be gained by
setting the endian-ness.
* least_significant_digit (int):
If `least_significant_digit` is specified, variable data will be
truncated (quantized). In conjunction with `zlib=True` this
produces 'lossy', but significantly more efficient compression. For
example, if `least_significant_digit=1`, data will be quantized
using `numpy.around(scale*data)/scale`, where `scale = 2**bits`,
and `bits` is determined so that a precision of 0.1 is retained (in
this case `bits=4`). From
http://www.esrl.noaa.gov/psd/data/gridded/conventions/cdc_netcdf_standard.shtml:
"least_significant_digit -- power of ten of the smallest decimal
place in unpacked data that is a reliable value". Default is
`None`, or no quantization, or 'lossless' compression.
* packing (type or string or dict or list): A numpy integer datatype
(signed or unsigned) or a string that describes a numpy integer
dtype(i.e. 'i2', 'short', 'u4') or a dict of packing parameters as
described below. This provides support for netCDF data packing as
described in
http://www.unidata.ucar.edu/software/netcdf/docs/BestPractices.html#bp_Packed-Data-Values
If this argument is a type (or type string), appropriate values of
scale_factor and add_offset will be automatically calculated based
on `cube.data` and possible masking. For more control, pass a dict
with one or more of the following keys: `dtype` (required),
`scale_factor` and `add_offset`. Note that automatic calculation of
packing parameters will trigger loading of lazy data; set them
manually using a dict to avoid this. The default is `None`, in
which case the datatype is determined from the cube and no packing
will occur.
* fill_value:
The value to use for the `_FillValue` attribute on the netCDF
variable. If `packing` is specified the value of `fill_value`
should be in the domain of the packed data.
Returns:
None.
.. note::
The `zlib`, `complevel`, `shuffle`, `fletcher32`, `contiguous`,
`chunksizes` and `endian` keywords are silently ignored for netCDF
3 files that do not use HDF5.
"""
if unlimited_dimensions is None:
unlimited_dimensions = []
cf_profile_available = iris.site_configuration.get(
"cf_profile"
) not in [None, False]
if cf_profile_available:
# Perform a CF profile of the cube. This may result in an exception
# being raised if mandatory requirements are not satisfied.
profile = iris.site_configuration["cf_profile"](cube)
# Ensure that attributes are CF compliant and if possible to make them
# compliant.
self.check_attribute_compliance(cube, cube.lazy_data())
for coord in cube.coords():
self.check_attribute_compliance(coord, coord.points)
# Get suitable dimension names.
dimension_names = self._get_dim_names(cube)
# Create the CF-netCDF data dimensions.
self._create_cf_dimensions(cube, dimension_names, unlimited_dimensions)
# Create the associated cube CF-netCDF data variable.
cf_var_cube = self._create_cf_data_variable(
cube,
dimension_names,
local_keys,
zlib=zlib,
complevel=complevel,
shuffle=shuffle,
fletcher32=fletcher32,
contiguous=contiguous,
chunksizes=chunksizes,
endian=endian,
least_significant_digit=least_significant_digit,
packing=packing,
fill_value=fill_value,
)
# Add coordinate variables.
self._add_dim_coords(cube, dimension_names)
# Add the auxiliary coordinate variables and associate the data
# variable to them
self._add_aux_coords(cube, cf_var_cube, dimension_names)
# Add the cell_measures variables and associate the data
# variable to them
self._add_cell_measures(cube, cf_var_cube, dimension_names)
# Add the ancillary_variables variables and associate the data variable
# to them
self._add_ancillary_variables(cube, cf_var_cube, dimension_names)
# Add the formula terms to the appropriate cf variables for each
# aux factory in the cube.
self._add_aux_factories(cube, cf_var_cube, dimension_names)
# Add data variable-only attribute names to local_keys.
if local_keys is None:
local_keys = set()
else:
local_keys = set(local_keys)
local_keys.update(_CF_DATA_ATTRS, _UKMO_DATA_ATTRS)
# Add global attributes taking into account local_keys.
global_attributes = {
k: v
for k, v in cube.attributes.items()
if (k not in local_keys and k.lower() != "conventions")
}
self.update_global_attributes(global_attributes)
if cf_profile_available:
cf_patch = iris.site_configuration.get("cf_patch")
if cf_patch is not None:
# Perform a CF patch of the dataset.
cf_patch(profile, self._dataset, cf_var_cube)
else:
msg = "cf_profile is available but no {} defined.".format(
"cf_patch"
)
warnings.warn(msg)
@staticmethod
def check_attribute_compliance(container, data):
def _coerce_value(val_attr, val_attr_value, data_dtype):
val_attr_tmp = np.array(val_attr_value, dtype=data_dtype)
if (val_attr_tmp != val_attr_value).any():
msg = '"{}" is not of a suitable value ({})'
raise ValueError(msg.format(val_attr, val_attr_value))
return val_attr_tmp
data_dtype = data.dtype
# Ensure that conflicting attributes are not provided.
if (
container.attributes.get("valid_min") is not None
or container.attributes.get("valid_max") is not None
) and container.attributes.get("valid_range") is not None:
msg = (
'Both "valid_range" and "valid_min" or "valid_max" '
"attributes present."
)
raise ValueError(msg)
# Ensure correct datatype
for val_attr in ["valid_range", "valid_min", "valid_max"]:
val_attr_value = container.attributes.get(val_attr)
if val_attr_value is not None:
val_attr_value = np.asarray(val_attr_value)
if data_dtype.itemsize == 1:
# Allow signed integral type
if val_attr_value.dtype.kind == "i":
continue
new_val = _coerce_value(val_attr, val_attr_value, data_dtype)
container.attributes[val_attr] = new_val
def update_global_attributes(self, attributes=None, **kwargs):
"""
Update the CF global attributes based on the provided
iterable/dictionary and/or keyword arguments.
Args:
* attributes (dict or iterable of key, value pairs):
CF global attributes to be updated.
"""
if attributes is not None:
# Handle sequence e.g. [('fruit', 'apple'), ...].
if not hasattr(attributes, "keys"):
attributes = dict(attributes)
for attr_name in sorted(attributes):
_setncattr(self._dataset, attr_name, attributes[attr_name])
for attr_name in sorted(kwargs):
_setncattr(self._dataset, attr_name, kwargs[attr_name])
def _create_cf_dimensions(
self, cube, dimension_names, unlimited_dimensions=None
):
"""
Create the CF-netCDF data dimensions.
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` in which to lookup coordinates.
Kwargs:
* unlimited_dimensions (iterable of strings and/or
:class:`iris.coords.Coord` objects):
List of coordinates to make unlimited (None by default).
Returns:
None.
"""
unlimited_dim_names = []
for coord in unlimited_dimensions:
try:
coord = cube.coord(name_or_coord=coord, dim_coords=True)
except iris.exceptions.CoordinateNotFoundError:
# coordinate isn't used for this cube, but it might be
# used for a different one
pass
else:
dim_name = self._get_coord_variable_name(cube, coord)
unlimited_dim_names.append(dim_name)
for dim_name in dimension_names:
if dim_name not in self._dataset.dimensions:
if dim_name in unlimited_dim_names:
size = None
else:
size = self._existing_dim[dim_name]
self._dataset.createDimension(dim_name, size)
def _add_inner_related_vars(
self,
cube,
cf_var_cube,
dimension_names,
coordlike_elements,
saver_create_method,
role_attribute_name,
):
# Common method to create a set of file variables and attach them to
# the parent data variable.
element_names = []
# Add CF-netCDF variables for the associated auxiliary coordinates.
for element in sorted(
coordlike_elements, key=lambda element: element.name()
):
# Create the associated CF-netCDF variable.
if element not in self._name_coord_map.coords:
cf_name = saver_create_method(cube, dimension_names, element)
self._name_coord_map.append(cf_name, element)
else:
cf_name = self._name_coord_map.name(element)
if cf_name is not None:
if role_attribute_name == "cell_measures":
# In the case of cell-measures, the attribute entries are not just
# a var_name, but each have the form "<measure>: <varname>".
cf_name = "{}: {}".format(element.measure, cf_name)
element_names.append(cf_name)
# Add CF-netCDF references to the primary data variable.
if element_names:
variable_names = " ".join(sorted(element_names))
_setncattr(cf_var_cube, role_attribute_name, variable_names)
def _add_aux_coords(self, cube, cf_var_cube, dimension_names):
"""
Add aux. coordinate to the dataset and associate with the data variable
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
* cf_var_cube (:class:`netcdf.netcdf_variable`):
cf variable cube representation.
* dimension_names (list):
Names associated with the dimensions of the cube.
"""
return self._add_inner_related_vars(
cube,
cf_var_cube,
dimension_names,
cube.aux_coords,
self._create_cf_coord_variable,
"coordinates",
)
def _add_cell_measures(self, cube, cf_var_cube, dimension_names):
"""
Add cell measures to the dataset and associate with the data variable
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
* cf_var_cube (:class:`netcdf.netcdf_variable`):
cf variable cube representation.
* dimension_names (list):
Names associated with the dimensions of the cube.
"""
return self._add_inner_related_vars(
cube,
cf_var_cube,
dimension_names,
cube.cell_measures(),
self._create_cf_cell_measure_variable,
"cell_measures",
)
def _add_ancillary_variables(self, cube, cf_var_cube, dimension_names):
"""
Add ancillary variables measures to the dataset and associate with the
data variable
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
* cf_var_cube (:class:`netcdf.netcdf_variable`):
cf variable cube representation.
* dimension_names (list):
Names associated with the dimensions of the cube.
"""
return self._add_inner_related_vars(
cube,
cf_var_cube,
dimension_names,
cube.ancillary_variables(),
self._create_cf_ancildata_variable,
"ancillary_variables",
)
def _add_dim_coords(self, cube, dimension_names):
"""
Add coordinate variables to NetCDF dataset.
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
* dimension_names (list):
Names associated with the dimensions of the cube.
"""
# Ensure we create the netCDF coordinate variables first.
for coord in cube.dim_coords:
# Create the associated coordinate CF-netCDF variable.
if coord not in self._name_coord_map.coords:
cf_name = self._create_cf_coord_variable(
cube, dimension_names, coord
)
self._name_coord_map.append(cf_name, coord)
def _add_aux_factories(self, cube, cf_var_cube, dimension_names):
"""
Modifies the variables of the NetCDF dataset to represent
the presence of dimensionless vertical coordinates based on
the aux factories of the cube (if any).
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
* cf_var_cube (:class:`netcdf.netcdf_variable`)
CF variable cube representation.
* dimension_names (list):
Names associated with the dimensions of the cube.
"""
primaries = []
for factory in cube.aux_factories:
factory_defn = _FACTORY_DEFNS.get(type(factory), None)
if factory_defn is None:
msg = (
"Unable to determine formula terms "
"for AuxFactory: {!r}".format(factory)
)
warnings.warn(msg)
else:
# Override `standard_name`, `long_name`, and `axis` of the
# primary coord that signals the presense of a dimensionless
# vertical coord, then set the `formula_terms` attribute.
primary_coord = factory.dependencies[factory_defn.primary]
if primary_coord in primaries:
msg = (
"Cube {!r} has multiple aux factories that share "
"a common primary coordinate {!r}. Unable to save "
"to netCDF as having multiple formula terms on a "
"single coordinate is not supported."
)
raise ValueError(msg.format(cube, primary_coord.name()))
primaries.append(primary_coord)
cf_name = self._name_coord_map.name(primary_coord)
cf_var = self._dataset.variables[cf_name]
names = {
key: self._name_coord_map.name(coord)
for key, coord in factory.dependencies.items()
}
formula_terms = factory_defn.formula_terms_format.format(
**names
)
std_name = factory_defn.std_name
if hasattr(cf_var, "formula_terms"):
if (
cf_var.formula_terms != formula_terms
or cf_var.standard_name != std_name
):
# TODO: We need to resolve this corner-case where
# the dimensionless vertical coordinate containing the
# formula_terms is a dimension coordinate of the
# associated cube and a new alternatively named
# dimensionless vertical coordinate is required with
# new formula_terms and a renamed dimension.
if cf_name in dimension_names:
msg = (
"Unable to create dimensonless vertical "
"coordinate."
)
raise ValueError(msg)
key = (cf_name, std_name, formula_terms)
name = self._formula_terms_cache.get(key)
if name is None:
# Create a new variable
name = self._create_cf_coord_variable(
cube, dimension_names, primary_coord
)
cf_var = self._dataset.variables[name]
_setncattr(cf_var, "standard_name", std_name)
_setncattr(cf_var, "axis", "Z")
# Update the formula terms.
ft = formula_terms.split()
ft = [name if t == cf_name else t for t in ft]
_setncattr(cf_var, "formula_terms", " ".join(ft))
# Update the cache.
self._formula_terms_cache[key] = name
# Update the associated cube variable.
coords = cf_var_cube.coordinates.split()
coords = [name if c == cf_name else c for c in coords]
_setncattr(
cf_var_cube, "coordinates", " ".join(coords)
)
else:
_setncattr(cf_var, "standard_name", std_name)
_setncattr(cf_var, "axis", "Z")
_setncattr(cf_var, "formula_terms", formula_terms)
def _get_dim_names(self, cube):
"""
Determine suitable CF-netCDF data dimension names.
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
Returns:
List of dimension names with length equal the number of dimensions
in the cube.
"""
dimension_names = []
for dim in range(cube.ndim):
coords = cube.coords(dimensions=dim, dim_coords=True)
if coords:
coord = coords[0]
dim_name = self._get_coord_variable_name(cube, coord)
# Add only dimensions that have not already been added.
if coord not in self._dim_coords:
# Determine unique dimension name
while (
dim_name in self._existing_dim
or dim_name in self._name_coord_map.names
):
dim_name = self._increment_name(dim_name)
# Update names added, current cube dim names used and
# unique coordinates added.
self._existing_dim[dim_name] = coord.shape[0]
dimension_names.append(dim_name)
self._dim_coords.append(coord)
else:
# Return the dim_name associated with the existing
# coordinate.
dim_name = self._name_coord_map.name(coord)
dimension_names.append(dim_name)
else:
# No CF-netCDF coordinates describe this data dimension.
dim_name = "dim%d" % dim
if dim_name in self._existing_dim:
# Increment name if conflicted with one already existing.
if self._existing_dim[dim_name] != cube.shape[dim]:
while (
dim_name in self._existing_dim
and self._existing_dim[dim_name] != cube.shape[dim]
or dim_name in self._name_coord_map.names
):
dim_name = self._increment_name(dim_name)
# Update dictionary with new entry
self._existing_dim[dim_name] = cube.shape[dim]
else:
# Update dictionary with new entry
self._existing_dim[dim_name] = cube.shape[dim]
dimension_names.append(dim_name)
return dimension_names
@staticmethod
def cf_valid_var_name(var_name):
"""
Return a valid CF var_name given a potentially invalid name.
Args:
* var_name (str):
The var_name to normalise
Returns:
A var_name suitable for passing through for variable creation.
"""
# Replace invalid charaters with an underscore ("_").
var_name = re.sub(r"[^a-zA-Z0-9]", "_", var_name)
# Ensure the variable name starts with a letter.
if re.match(r"^[^a-zA-Z]", var_name):
var_name = "var_{}".format(var_name)
return var_name
@staticmethod
def _cf_coord_identity(coord):
"""
Determine a suitable units from a given coordinate.
Args:
* coord (:class:`iris.coords.Coord`):
A coordinate of a cube.
Returns:
The (standard_name, long_name, unit) of the given
:class:`iris.coords.Coord` instance.
"""
units = str(coord.units)
# Set the 'units' of 'latitude' and 'longitude' coordinates specified
# in 'degrees' to 'degrees_north' and 'degrees_east' respectively,
# as defined in the CF conventions for netCDF files: sections 4.1 and
# 4.2.
if (
isinstance(coord.coord_system, iris.coord_systems.GeogCS)
or coord.coord_system is None
) and coord.units == "degrees":
if coord.standard_name == "latitude":
units = "degrees_north"
elif coord.standard_name == "longitude":
units = "degrees_east"
return coord.standard_name, coord.long_name, units
def _ensure_valid_dtype(self, values, src_name, src_object):
# NetCDF3 and NetCDF4 classic do not support int64 or unsigned ints,
# so we check if we can store them as int32 instead.
if (
np.issubdtype(values.dtype, np.int64)
or np.issubdtype(values.dtype, np.unsignedinteger)
) and self._dataset.file_format in (
"NETCDF3_CLASSIC",
"NETCDF3_64BIT",
"NETCDF4_CLASSIC",
):
# Cast to an integer type supported by netCDF3.
if not np.can_cast(values.max(), np.int32) or not np.can_cast(
values.min(), np.int32
):
msg = (
"The data type of {} {!r} is not supported by {} and"
" its values cannot be safely cast to a supported"
" integer type."
)
msg = msg.format(
src_name, src_object, self._dataset.file_format
)
raise ValueError(msg)
values = values.astype(np.int32)
return values
def _create_cf_bounds(self, coord, cf_var, cf_name):
"""
Create the associated CF-netCDF bounds variable.
Args:
* coord (:class:`iris.coords.Coord`):
A coordinate of a cube.
* cf_var:
CF-netCDF variable
* cf_name (string):
name of the CF-NetCDF variable.
Returns:
None
"""
if coord.has_bounds():
# Get the values in a form which is valid for the file format.
bounds = self._ensure_valid_dtype(
coord.bounds, "the bounds of coordinate", coord
)
n_bounds = bounds.shape[-1]
if n_bounds == 2:
bounds_dimension_name = "bnds"
else:
bounds_dimension_name = "bnds_%s" % n_bounds
if coord.climatological:
property_name = "climatology"
varname_extra = "climatology"
else:
property_name = "bounds"
varname_extra = "bnds"
if bounds_dimension_name not in self._dataset.dimensions:
# Create the bounds dimension with the appropriate extent.
self._dataset.createDimension(bounds_dimension_name, n_bounds)
boundsvar_name = "{}_{}".format(cf_name, varname_extra)
_setncattr(cf_var, property_name, boundsvar_name)
cf_var_bounds = self._dataset.createVariable(
boundsvar_name,
bounds.dtype.newbyteorder("="),
cf_var.dimensions + (bounds_dimension_name,),
)
cf_var_bounds[:] = bounds
def _get_cube_variable_name(self, cube):
"""
Returns a CF-netCDF variable name for the given cube.
Args:
* cube (class:`iris.cube.Cube`):
An instance of a cube for which a CF-netCDF variable
name is required.
Returns:
A CF-netCDF variable name as a string.
"""
if cube.var_name is not None:
cf_name = cube.var_name
else:
# Convert to lower case and replace whitespace by underscores.
cf_name = "_".join(cube.name().lower().split())
cf_name = self.cf_valid_var_name(cf_name)
return cf_name
def _get_coord_variable_name(self, cube, coord):
"""
Returns a CF-netCDF variable name for the given coordinate.
Args:
* cube (:class:`iris.cube.Cube`):
The cube that contains the given coordinate.
* coord (:class:`iris.coords.Coord`):
An instance of a coordinate for which a CF-netCDF variable
name is required.
Returns:
A CF-netCDF variable name as a string.
"""
if coord.var_name is not None:
cf_name = coord.var_name
else:
name = coord.standard_name or coord.long_name
if not name or set(name).intersection(string.whitespace):
# Auto-generate name based on associated dimensions.
name = ""
for dim in cube.coord_dims(coord):
name += "dim{}".format(dim)
# Handle scalar coordinate (dims == ()).
if not name:
name = "unknown_scalar"
# Convert to lower case and replace whitespace by underscores.
cf_name = "_".join(name.lower().split())
cf_name = self.cf_valid_var_name(cf_name)
return cf_name
def _inner_create_cf_cellmeasure_or_ancil_variable(
self, cube, dimension_names, dimensional_metadata
):
"""
Create the associated CF-netCDF variable in the netCDF dataset for the
given dimensional_metadata.
Args:
* cube (:class:`iris.cube.Cube`):
The associated cube being saved to CF-netCDF file.
* dimension_names (list):
Names for each dimension of the cube.
* dimensional_metadata (:class:`iris.coords.CellMeasure`):
A cell measure OR ancillary variable to be saved to the
CF-netCDF file.
In either case, provides data, units and standard/long/var names.
Returns:
The string name of the associated CF-netCDF variable saved.
"""
cf_name = self._get_coord_variable_name(cube, dimensional_metadata)
while cf_name in self._dataset.variables:
cf_name = self._increment_name(cf_name)
# Derive the data dimension names for the coordinate.
cf_dimensions = [
dimension_names[dim]
for dim in dimensional_metadata.cube_dims(cube)
]
# Get the data values.
data = dimensional_metadata.data
if isinstance(dimensional_metadata, iris.coords.CellMeasure):
# Disallow saving of *masked* cell measures.
# NOTE: currently, this is the only functional difference required
# between variable creation for an ancillary and a cell measure.
if ma.is_masked(data):
# We can't save masked points properly, as we don't maintain a
# suitable fill_value. (Load will not record one, either).
msg = "Cell measures with missing data are not supported."
raise ValueError(msg)
# Get the values in a form which is valid for the file format.
data = self._ensure_valid_dtype(
data, "coordinate", dimensional_metadata
)
# Create the CF-netCDF variable.
cf_var = self._dataset.createVariable(
cf_name, data.dtype.newbyteorder("="), cf_dimensions
)
# Add the data to the CF-netCDF variable.
cf_var[:] = data
if dimensional_metadata.units != "unknown":
_setncattr(cf_var, "units", str(dimensional_metadata.units))
if dimensional_metadata.standard_name is not None:
_setncattr(
cf_var, "standard_name", dimensional_metadata.standard_name
)
if dimensional_metadata.long_name is not None:
_setncattr(cf_var, "long_name", dimensional_metadata.long_name)
# Add any other custom coordinate attributes.
for name in sorted(dimensional_metadata.attributes):
value = dimensional_metadata.attributes[name]
# Don't clobber existing attributes.
if not hasattr(cf_var, name):
_setncattr(cf_var, name, value)
return cf_name
def _create_cf_cell_measure_variable(
self, cube, dimension_names, cell_measure
):
"""
Create the associated CF-netCDF variable in the netCDF dataset for the
given cell_measure.
Args:
* cube (:class:`iris.cube.Cube`):
The associated cube being saved to CF-netCDF file.
* dimension_names (list):
Names for each dimension of the cube.
* cell_measure (:class:`iris.coords.CellMeasure`):
The cell measure to be saved to CF-netCDF file.
Returns:
The string name of the associated CF-netCDF variable saved.
"""
# Note: currently shares variable creation code with ancillary-variables.
return self._inner_create_cf_cellmeasure_or_ancil_variable(
cube, dimension_names, cell_measure
)
def _create_cf_ancildata_variable(
self, cube, dimension_names, ancillary_variable
):
"""
Create the associated CF-netCDF variable in the netCDF dataset for the
given ancillary variable.
Args:
* cube (:class:`iris.cube.Cube`):
The associated cube being saved to CF-netCDF file.
* dimension_names (list):
Names for each dimension of the cube.
* ancillary_variable (:class:`iris.coords.AncillaryVariable`):
The ancillary variable to be saved to the CF-netCDF file.
Returns:
The string name of the associated CF-netCDF variable saved.
"""
# Note: currently shares variable creation code with cell-measures.
return self._inner_create_cf_cellmeasure_or_ancil_variable(
cube, dimension_names, ancillary_variable
)
def _create_cf_coord_variable(self, cube, dimension_names, coord):
"""
Create the associated CF-netCDF variable in the netCDF dataset for the
given coordinate. If required, also create the CF-netCDF bounds
variable and associated dimension.
Args:
* cube (:class:`iris.cube.Cube`):
The associated cube being saved to CF-netCDF file.
* dimension_names (list):
Names for each dimension of the cube.
* coord (:class:`iris.coords.Coord`):
The coordinate to be saved to CF-netCDF file.
Returns:
The string name of the associated CF-netCDF variable saved.
"""
cf_name = self._get_coord_variable_name(cube, coord)
while cf_name in self._dataset.variables:
cf_name = self._increment_name(cf_name)
# Derive the data dimension names for the coordinate.
cf_dimensions = [
dimension_names[dim] for dim in cube.coord_dims(coord)
]
if np.issubdtype(coord.points.dtype, np.str_):
string_dimension_depth = coord.points.dtype.itemsize
if coord.points.dtype.kind == "U":
string_dimension_depth //= 4
string_dimension_name = "string%d" % string_dimension_depth
# Determine whether to create the string length dimension.
if string_dimension_name not in self._dataset.dimensions:
self._dataset.createDimension(
string_dimension_name, string_dimension_depth
)
# Add the string length dimension to dimension names.
cf_dimensions.append(string_dimension_name)
# Create the label coordinate variable.
cf_var = self._dataset.createVariable(
cf_name, "|S1", cf_dimensions
)
# Add the payload to the label coordinate variable.
if len(cf_dimensions) == 1:
cf_var[:] = list(
"%- *s" % (string_dimension_depth, coord.points[0])
)
else:
for index in np.ndindex(coord.points.shape):
index_slice = tuple(list(index) + [slice(None, None)])
cf_var[index_slice] = list(
"%- *s" % (string_dimension_depth, coord.points[index])
)
else:
# Identify the collection of coordinates that represent CF-netCDF
# coordinate variables.
cf_coordinates = cube.dim_coords
if coord in cf_coordinates:
# By definition of a CF-netCDF coordinate variable this
# coordinate must be 1-D and the name of the CF-netCDF variable
# must be the same as its dimension name.
cf_name = cf_dimensions[0]
# Get the values in a form which is valid for the file format.
points = self._ensure_valid_dtype(
coord.points, "coordinate", coord
)
# Create the CF-netCDF variable.
cf_var = self._dataset.createVariable(
cf_name, points.dtype.newbyteorder("="), cf_dimensions
)
# Add the axis attribute for spatio-temporal CF-netCDF coordinates.
if coord in cf_coordinates:
axis = iris.util.guess_coord_axis(coord)
if axis is not None and axis.lower() in SPATIO_TEMPORAL_AXES:
_setncattr(cf_var, "axis", axis.upper())
# Add the data to the CF-netCDF variable.
cf_var[:] = points
# Create the associated CF-netCDF bounds variable.
self._create_cf_bounds(coord, cf_var, cf_name)
# Deal with CF-netCDF units and standard name.
standard_name, long_name, units = self._cf_coord_identity(coord)
if units != "unknown":
_setncattr(cf_var, "units", units)
if standard_name is not None:
_setncattr(cf_var, "standard_name", standard_name)
if long_name is not None:
_setncattr(cf_var, "long_name", long_name)
# Add the CF-netCDF calendar attribute.
if coord.units.calendar:
_setncattr(cf_var, "calendar", coord.units.calendar)
# Add any other custom coordinate attributes.
for name in sorted(coord.attributes):
value = coord.attributes[name]
if name == "STASH":
# Adopting provisional Metadata Conventions for representing MO
# Scientific Data encoded in NetCDF Format.
name = "um_stash_source"
value = str(value)
# Don't clobber existing attributes.
if not hasattr(cf_var, name):
_setncattr(cf_var, name, value)
return cf_name
def _create_cf_cell_methods(self, cube, dimension_names):
"""
Create CF-netCDF string representation of a cube cell methods.
Args:
* cube (:class:`iris.cube.Cube`) or cubelist
(:class:`iris.cube.CubeList`):
A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or list of
cubes to be saved to a netCDF file.
* dimension_names (list):
Names associated with the dimensions of the cube.
Returns:
CF-netCDF string representation of a cube cell methods.
"""
cell_methods = []
# Identify the collection of coordinates that represent CF-netCDF
# coordinate variables.
cf_coordinates = cube.dim_coords
for cm in cube.cell_methods:
names = ""
for name in cm.coord_names:
coord = cube.coords(name)
if coord:
coord = coord[0]
if coord in cf_coordinates:
name = dimension_names[cube.coord_dims(coord)[0]]
names += "%s: " % name
interval = " ".join(
["interval: %s" % interval for interval in cm.intervals or []]
)
comment = " ".join(
["comment: %s" % comment for comment in cm.comments or []]
)
extra = " ".join([interval, comment]).strip()
if extra:
extra = " (%s)" % extra
cell_methods.append(names + cm.method + extra)
return " ".join(cell_methods)
def _create_cf_grid_mapping(self, cube, cf_var_cube):
"""
Create CF-netCDF grid mapping variable and associated CF-netCDF
data variable grid mapping attribute.
Args:
* cube (:class:`iris.cube.Cube`) or cubelist
(:class:`iris.cube.CubeList`):
A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or list of
cubes to be saved to a netCDF file.
* cf_var_cube (:class:`netcdf.netcdf_variable`):
cf variable cube representation.
Returns:
None
"""
cs = cube.coord_system("CoordSystem")
if cs is not None:
# Grid var not yet created?
if cs not in self._coord_systems:
while cs.grid_mapping_name in self._dataset.variables:
aname = self._increment_name(cs.grid_mapping_name)
cs.grid_mapping_name = aname
cf_var_grid = self._dataset.createVariable(
cs.grid_mapping_name, np.int32
)
_setncattr(
cf_var_grid, "grid_mapping_name", cs.grid_mapping_name
)
def add_ellipsoid(ellipsoid):
cf_var_grid.longitude_of_prime_meridian = (
ellipsoid.longitude_of_prime_meridian
)
semi_major = ellipsoid.semi_major_axis
semi_minor = ellipsoid.semi_minor_axis
if semi_minor == semi_major:
cf_var_grid.earth_radius = semi_major
else:
cf_var_grid.semi_major_axis = semi_major
cf_var_grid.semi_minor_axis = semi_minor
# latlon
if isinstance(cs, iris.coord_systems.GeogCS):
add_ellipsoid(cs)
# rotated latlon
elif isinstance(cs, iris.coord_systems.RotatedGeogCS):
if cs.ellipsoid:
add_ellipsoid(cs.ellipsoid)
cf_var_grid.grid_north_pole_latitude = (
cs.grid_north_pole_latitude
)
cf_var_grid.grid_north_pole_longitude = (
cs.grid_north_pole_longitude
)
cf_var_grid.north_pole_grid_longitude = (
cs.north_pole_grid_longitude
)
# tmerc
elif isinstance(cs, iris.coord_systems.TransverseMercator):
if cs.ellipsoid:
add_ellipsoid(cs.ellipsoid)
cf_var_grid.longitude_of_central_meridian = (
cs.longitude_of_central_meridian
)
cf_var_grid.latitude_of_projection_origin = (
cs.latitude_of_projection_origin
)
cf_var_grid.false_easting = cs.false_easting
cf_var_grid.false_northing = cs.false_northing
cf_var_grid.scale_factor_at_central_meridian = (
cs.scale_factor_at_central_meridian
)
# merc
elif isinstance(cs, iris.coord_systems.Mercator):
if cs.ellipsoid:
add_ellipsoid(cs.ellipsoid)
cf_var_grid.longitude_of_projection_origin = (
cs.longitude_of_projection_origin
)
# The Mercator class has implicit defaults for certain
# parameters
cf_var_grid.false_easting = 0.0
cf_var_grid.false_northing = 0.0
cf_var_grid.scale_factor_at_projection_origin = 1.0
# lcc
elif isinstance(cs, iris.coord_systems.LambertConformal):
if cs.ellipsoid:
add_ellipsoid(cs.ellipsoid)
cf_var_grid.standard_parallel = cs.secant_latitudes
cf_var_grid.latitude_of_projection_origin = cs.central_lat
cf_var_grid.longitude_of_central_meridian = cs.central_lon
cf_var_grid.false_easting = cs.false_easting
cf_var_grid.false_northing = cs.false_northing
# stereo
elif isinstance(cs, iris.coord_systems.Stereographic):
if cs.true_scale_lat is not None:
warnings.warn(
"Stereographic coordinate systems with "
"true scale latitude specified are not "
"yet handled"
)
else:
if cs.ellipsoid:
add_ellipsoid(cs.ellipsoid)
cf_var_grid.longitude_of_projection_origin = (
cs.central_lon
)
cf_var_grid.latitude_of_projection_origin = (
cs.central_lat
)
cf_var_grid.false_easting = cs.false_easting
cf_var_grid.false_northing = cs.false_northing
# The Stereographic class has an implicit scale
# factor
cf_var_grid.scale_factor_at_projection_origin = 1.0
# osgb (a specific tmerc)
elif isinstance(cs, iris.coord_systems.OSGB):
warnings.warn("OSGB coordinate system not yet handled")
# lambert azimuthal equal area
elif isinstance(
cs, iris.coord_systems.LambertAzimuthalEqualArea
):
if cs.ellipsoid:
add_ellipsoid(cs.ellipsoid)
cf_var_grid.longitude_of_projection_origin = (
cs.longitude_of_projection_origin
)
cf_var_grid.latitude_of_projection_origin = (
cs.latitude_of_projection_origin
)
cf_var_grid.false_easting = cs.false_easting
cf_var_grid.false_northing = cs.false_northing
# albers conical equal area
elif isinstance(cs, iris.coord_systems.AlbersEqualArea):
if cs.ellipsoid:
add_ellipsoid(cs.ellipsoid)
cf_var_grid.longitude_of_central_meridian = (
cs.longitude_of_central_meridian
)
cf_var_grid.latitude_of_projection_origin = (
cs.latitude_of_projection_origin
)
cf_var_grid.false_easting = cs.false_easting
cf_var_grid.false_northing = cs.false_northing
cf_var_grid.standard_parallel = cs.standard_parallels
# vertical perspective
elif isinstance(cs, iris.coord_systems.VerticalPerspective):
if cs.ellipsoid:
add_ellipsoid(cs.ellipsoid)
cf_var_grid.longitude_of_projection_origin = (
cs.longitude_of_projection_origin
)
cf_var_grid.latitude_of_projection_origin = (
cs.latitude_of_projection_origin
)
cf_var_grid.false_easting = cs.false_easting
cf_var_grid.false_northing = cs.false_northing
cf_var_grid.perspective_point_height = (
cs.perspective_point_height
)
# geostationary
elif isinstance(cs, iris.coord_systems.Geostationary):
if cs.ellipsoid:
add_ellipsoid(cs.ellipsoid)
cf_var_grid.longitude_of_projection_origin = (
cs.longitude_of_projection_origin
)
cf_var_grid.latitude_of_projection_origin = (
cs.latitude_of_projection_origin
)
cf_var_grid.false_easting = cs.false_easting
cf_var_grid.false_northing = cs.false_northing
cf_var_grid.perspective_point_height = (
cs.perspective_point_height
)
cf_var_grid.sweep_angle_axis = cs.sweep_angle_axis
# other
else:
warnings.warn(
"Unable to represent the horizontal "
"coordinate system. The coordinate system "
"type %r is not yet implemented." % type(cs)
)
self._coord_systems.append(cs)
# Refer to grid var
_setncattr(cf_var_cube, "grid_mapping", cs.grid_mapping_name)
def _create_cf_data_variable(
self,
cube,
dimension_names,
local_keys=None,
packing=None,
fill_value=None,
**kwargs,
):
"""
Create CF-netCDF data variable for the cube and any associated grid
mapping.
Args:
* cube (:class:`iris.cube.Cube`):
The associated cube being saved to CF-netCDF file.
* dimension_names (list):
String names for each dimension of the cube.
Kwargs:
* local_keys (iterable of strings):
* see :func:`iris.fileformats.netcdf.Saver.write`
* packing (type or string or dict or list):
* see :func:`iris.fileformats.netcdf.Saver.write`
* fill_value:
* see :func:`iris.fileformats.netcdf.Saver.write`
All other keywords are passed through to the dataset's `createVariable`
method.
Returns:
The newly created CF-netCDF data variable.
"""
if packing:
if isinstance(packing, dict):
if "dtype" not in packing:
msg = "The dtype attribute is required for packing."
raise ValueError(msg)
dtype = np.dtype(packing["dtype"])
scale_factor = packing.get("scale_factor", None)
add_offset = packing.get("add_offset", None)
valid_keys = {"dtype", "scale_factor", "add_offset"}
invalid_keys = set(packing.keys()) - valid_keys
if invalid_keys:
msg = (
"Invalid packing key(s) found: '{}'. The valid "
"keys are '{}'.".format(
"', '".join(invalid_keys), "', '".join(valid_keys)
)
)
raise ValueError(msg)
else:
# We compute the scale_factor and add_offset based on the
# min/max of the data. This requires the data to be loaded.
masked = ma.isMaskedArray(cube.data)
dtype = np.dtype(packing)
cmax = cube.data.max()
cmin = cube.data.min()
n = dtype.itemsize * 8
if masked:
scale_factor = (cmax - cmin) / (2 ** n - 2)
else:
scale_factor = (cmax - cmin) / (2 ** n - 1)
if dtype.kind == "u":
add_offset = cmin
elif dtype.kind == "i":
if masked:
add_offset = (cmax + cmin) / 2
else:
add_offset = cmin + 2 ** (n - 1) * scale_factor
def set_packing_ncattrs(cfvar):
"""Set netCDF packing attributes."""
if packing:
if scale_factor:
_setncattr(cfvar, "scale_factor", scale_factor)
if add_offset:
_setncattr(cfvar, "add_offset", add_offset)
cf_name = self._get_cube_variable_name(cube)
while cf_name in self._dataset.variables:
cf_name = self._increment_name(cf_name)
# if netcdf3 avoid streaming due to dtype handling
if not cube.has_lazy_data() or self._dataset.file_format in (
"NETCDF3_CLASSIC",
"NETCDF3_64BIT",
):
# Get the values in a form which is valid for the file format.
data = self._ensure_valid_dtype(cube.data, "cube", cube)
def store(data, cf_var, fill_value):
cf_var[:] = data
is_masked = ma.is_masked(data)
contains_value = fill_value is not None and fill_value in data
return is_masked, contains_value
else:
data = cube.lazy_data()
def store(data, cf_var, fill_value):
# Store lazy data and check whether it is masked and contains
# the fill value
target = _FillValueMaskCheckAndStoreTarget(cf_var, fill_value)
da.store([data], [target])
return target.is_masked, target.contains_value
if not packing:
dtype = data.dtype.newbyteorder("=")
# Create the cube CF-netCDF data variable with data payload.
cf_var = self._dataset.createVariable(
cf_name, dtype, dimension_names, fill_value=fill_value, **kwargs
)
set_packing_ncattrs(cf_var)
# If packing attributes are specified, don't bother checking whether
# the fill value is in the data.
if packing:
fill_value_to_check = None
elif fill_value is not None:
fill_value_to_check = fill_value
else:
fill_value_to_check = netCDF4.default_fillvals[dtype.str[1:]]
# Store the data and check if it is masked and contains the fill value
is_masked, contains_fill_value = store(
data, cf_var, fill_value_to_check
)
if dtype.itemsize == 1 and fill_value is None:
if is_masked:
msg = (
"Cube '{}' contains byte data with masked points, but "
"no fill_value keyword was given. As saved, these "
"points will read back as valid values. To save as "
"masked byte data, please explicitly specify the "
"'fill_value' keyword."
)
warnings.warn(msg.format(cube.name()))
elif contains_fill_value:
msg = (
"Cube '{}' contains unmasked data points equal to the "
"fill-value, {}. As saved, these points will read back "
"as missing data. To save these as normal values, please "
"specify a 'fill_value' keyword not equal to any valid "
"data points."
)
warnings.warn(msg.format(cube.name(), fill_value))
if cube.standard_name:
_setncattr(cf_var, "standard_name", cube.standard_name)
if cube.long_name:
_setncattr(cf_var, "long_name", cube.long_name)
if cube.units != "unknown":
_setncattr(cf_var, "units", str(cube.units))
# Add the CF-netCDF calendar attribute.
if cube.units.calendar:
_setncattr(cf_var, "calendar", cube.units.calendar)
# Add data variable-only attribute names to local_keys.
if local_keys is None:
local_keys = set()
else:
local_keys = set(local_keys)
local_keys.update(_CF_DATA_ATTRS, _UKMO_DATA_ATTRS)
# Add any cube attributes whose keys are in local_keys as
# CF-netCDF data variable attributes.
attr_names = set(cube.attributes).intersection(local_keys)
for attr_name in sorted(attr_names):
# Do not output 'conventions' attribute.
if attr_name.lower() == "conventions":
continue
value = cube.attributes[attr_name]
if attr_name == "STASH":
# Adopting provisional Metadata Conventions for representing MO
# Scientific Data encoded in NetCDF Format.
attr_name = "um_stash_source"
value = str(value)
if attr_name == "ukmo__process_flags":
value = " ".join([x.replace(" ", "_") for x in value])
if attr_name in _CF_GLOBAL_ATTRS:
msg = (
"{attr_name!r} is being added as CF data variable "
"attribute, but {attr_name!r} should only be a CF "
"global attribute.".format(attr_name=attr_name)
)
warnings.warn(msg)
_setncattr(cf_var, attr_name, value)
# Create the CF-netCDF data variable cell method attribute.
cell_methods = self._create_cf_cell_methods(cube, dimension_names)
if cell_methods:
_setncattr(cf_var, "cell_methods", cell_methods)
# Create the CF-netCDF grid mapping.
self._create_cf_grid_mapping(cube, cf_var)
return cf_var
def _increment_name(self, varname):
"""
Increment string name or begin increment.
Avoidance of conflicts between variable names, where the name is
incremented to distinguish it from others.
Args:
* varname (string):
Variable name to increment.
Returns:
Incremented varname.
"""
num = 0
try:
name, endnum = varname.rsplit("_", 1)
if endnum.isdigit():
num = int(endnum) + 1
varname = name
except ValueError:
pass
return "{}_{}".format(varname, num)
def save(
cube,
filename,
netcdf_format="NETCDF4",
local_keys=None,
unlimited_dimensions=None,
zlib=False,
complevel=4,
shuffle=True,
fletcher32=False,
contiguous=False,
chunksizes=None,
endian="native",
least_significant_digit=None,
packing=None,
fill_value=None,
):
"""
Save cube(s) to a netCDF file, given the cube and the filename.
* Iris will write CF 1.5 compliant NetCDF files.
* The attributes dictionaries on each cube in the saved cube list
will be compared and common attributes saved as NetCDF global
attributes where appropriate.
* Keyword arguments specifying how to save the data are applied
to each cube. To use different settings for different cubes, use
the NetCDF Context manager (:class:`~Saver`) directly.
* The save process will stream the data payload to the file using dask,
enabling large data payloads to be saved and maintaining the 'lazy'
status of the cube's data payload, unless the netcdf_format is explicitly
specified to be 'NETCDF3' or 'NETCDF3_CLASSIC'.
Args:
* cube (:class:`iris.cube.Cube` or :class:`iris.cube.CubeList`):
A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or other
iterable of cubes to be saved to a netCDF file.
* filename (string):
Name of the netCDF file to save the cube(s).
Kwargs:
* netcdf_format (string):
Underlying netCDF file format, one of 'NETCDF4', 'NETCDF4_CLASSIC',
'NETCDF3_CLASSIC' or 'NETCDF3_64BIT'. Default is 'NETCDF4' format.
* local_keys (iterable of strings):
An interable of cube attribute keys. Any cube attributes with
matching keys will become attributes on the data variable rather
than global attributes.
* unlimited_dimensions (iterable of strings and/or
:class:`iris.coords.Coord` objects):
List of coordinate names (or coordinate objects) corresponding
to coordinate dimensions of `cube` to save with the NetCDF dimension
variable length 'UNLIMITED'. By default, no unlimited dimensions are
saved. Only the 'NETCDF4' format supports multiple 'UNLIMITED'
dimensions.
* zlib (bool):
If `True`, the data will be compressed in the netCDF file using gzip
compression (default `False`).
* complevel (int):
An integer between 1 and 9 describing the level of compression desired
(default 4). Ignored if `zlib=False`.
* shuffle (bool):
If `True`, the HDF5 shuffle filter will be applied before compressing
the data (default `True`). This significantly improves compression.
Ignored if `zlib=False`.
* fletcher32 (bool):
If `True`, the Fletcher32 HDF5 checksum algorithm is activated to
detect errors. Default `False`.
* contiguous (bool):
If `True`, the variable data is stored contiguously on disk. Default
`False`. Setting to `True` for a variable with an unlimited dimension
will trigger an error.
* chunksizes (tuple of int):
Used to manually specify the HDF5 chunksizes for each dimension of the
variable. A detailed discussion of HDF chunking and I/O performance is
available here: http://www.hdfgroup.org/HDF5/doc/H5.user/Chunking.html.
Basically, you want the chunk size for each dimension to match as
closely as possible the size of the data block that users will read
from the file. `chunksizes` cannot be set if `contiguous=True`.
* endian (string):
Used to control whether the data is stored in little or big endian
format on disk. Possible values are 'little', 'big' or 'native'
(default). The library will automatically handle endian conversions
when the data is read, but if the data is always going to be read on a
computer with the opposite format as the one used to create the file,
there may be some performance advantage to be gained by setting the
endian-ness.
* least_significant_digit (int):
If `least_significant_digit` is specified, variable data will be
truncated (quantized). In conjunction with `zlib=True` this produces
'lossy', but significantly more efficient compression. For example, if
`least_significant_digit=1`, data will be quantized using
`numpy.around(scale*data)/scale`, where `scale = 2**bits`, and `bits`
is determined so that a precision of 0.1 is retained (in this case
`bits=4`). From
http://www.esrl.noaa.gov/psd/data/gridded/conventions/cdc_netcdf_standard.shtml:
"least_significant_digit -- power of ten of the smallest decimal place
in unpacked data that is a reliable value". Default is `None`, or no
quantization, or 'lossless' compression.
* packing (type or string or dict or list): A numpy integer datatype
(signed or unsigned) or a string that describes a numpy integer dtype
(i.e. 'i2', 'short', 'u4') or a dict of packing parameters as described
below or an iterable of such types, strings, or dicts.
This provides support for netCDF data packing as described in
http://www.unidata.ucar.edu/software/netcdf/docs/BestPractices.html#bp_Packed-Data-Values
If this argument is a type (or type string), appropriate values of
scale_factor and add_offset will be automatically calculated based
on `cube.data` and possible masking. For more control, pass a dict with
one or more of the following keys: `dtype` (required), `scale_factor`
and `add_offset`. Note that automatic calculation of packing parameters
will trigger loading of lazy data; set them manually using a dict to
avoid this. The default is `None`, in which case the datatype is
determined from the cube and no packing will occur. If this argument is
a list it must have the same number of elements as `cube` if `cube` is
a `:class:`iris.cube.CubeList`, or one element, and each element of
this argument will be applied to each cube separately.
* fill_value (numeric or list):
The value to use for the `_FillValue` attribute on the netCDF variable.
If `packing` is specified the value of `fill_value` should be in the
domain of the packed data. If this argument is a list it must have the
same number of elements as `cube` if `cube` is a
`:class:`iris.cube.CubeList`, or a single element, and each element of
this argument will be applied to each cube separately.
Returns:
None.
.. note::
The `zlib`, `complevel`, `shuffle`, `fletcher32`, `contiguous`,
`chunksizes` and `endian` keywords are silently ignored for netCDF 3
files that do not use HDF5.
.. seealso::
NetCDF Context manager (:class:`~Saver`).
"""
if unlimited_dimensions is None:
unlimited_dimensions = []
if isinstance(cube, iris.cube.Cube):
cubes = iris.cube.CubeList()
cubes.append(cube)
else:
cubes = cube
if local_keys is None:
local_keys = set()
else:
local_keys = set(local_keys)
# Determine the attribute keys that are common across all cubes and
# thereby extend the collection of local_keys for attributes
# that should be attributes on data variables.
attributes = cubes[0].attributes
common_keys = set(attributes)
for cube in cubes[1:]:
keys = set(cube.attributes)
local_keys.update(keys.symmetric_difference(common_keys))
common_keys.intersection_update(keys)
different_value_keys = []
for key in common_keys:
if np.any(attributes[key] != cube.attributes[key]):
different_value_keys.append(key)
common_keys.difference_update(different_value_keys)
local_keys.update(different_value_keys)
def is_valid_packspec(p):
""" Only checks that the datatype is valid. """
if isinstance(p, dict):
if "dtype" in p:
return is_valid_packspec(p["dtype"])
else:
msg = "The argument to packing must contain the key 'dtype'."
raise ValueError(msg)
elif isinstance(p, str) or isinstance(p, type) or isinstance(p, str):
pdtype = np.dtype(p) # Does nothing if it's already a numpy dtype
if pdtype.kind != "i" and pdtype.kind != "u":
msg = "The packing datatype must be a numpy integer type."
raise ValueError(msg)
return True
elif p is None:
return True
else:
return False
if is_valid_packspec(packing):
packspecs = repeat(packing)
else:
# Assume iterable, make sure packing is the same length as cubes.
for cube, packspec in zip_longest(cubes, packing, fillvalue=-1):
if cube == -1 or packspec == -1:
msg = (
"If packing is a list, it must have the "
"same number of elements as the argument to"
"cube."
)
raise ValueError(msg)
if not is_valid_packspec(packspec):
msg = "Invalid packing argument: {}.".format(packspec)
raise ValueError(msg)
packspecs = packing
# Make fill-value(s) into an iterable over cubes.
if isinstance(fill_value, str):
# Strings are awkward -- handle separately.
fill_values = repeat(fill_value)
else:
try:
fill_values = tuple(fill_value)
except TypeError:
fill_values = repeat(fill_value)
else:
if len(fill_values) != len(cubes):
msg = (
"If fill_value is a list, it must have the "
"same number of elements as the cube argument."
)
raise ValueError(msg)
# Initialise Manager for saving
with Saver(filename, netcdf_format) as sman:
# Iterate through the cubelist.
for cube, packspec, fill_value in zip(cubes, packspecs, fill_values):
sman.write(
cube,
local_keys,
unlimited_dimensions,
zlib,
complevel,
shuffle,
fletcher32,
contiguous,
chunksizes,
endian,
least_significant_digit,
packing=packspec,
fill_value=fill_value,
)
if iris.config.netcdf.conventions_override:
# Set to the default if custom conventions are not available.
conventions = cube.attributes.get(
"Conventions", CF_CONVENTIONS_VERSION
)
else:
conventions = CF_CONVENTIONS_VERSION
# Perform a CF patch of the conventions attribute.
cf_profile_available = iris.site_configuration.get(
"cf_profile"
) not in [None, False]
if cf_profile_available:
conventions_patch = iris.site_configuration.get(
"cf_patch_conventions"
)
if conventions_patch is not None:
conventions = conventions_patch(conventions)
else:
msg = "cf_profile is available but no {} defined.".format(
"cf_patch_conventions"
)
warnings.warn(msg)
# Add conventions attribute.
sman.update_global_attributes(Conventions=conventions)
|
pp-mo/iris
|
lib/iris/fileformats/netcdf.py
|
Python
|
lgpl-3.0
| 100,575
|
[
"NetCDF"
] |
83a7eb76353d541fd15b21d01a981faa598a8a9f2c688baae0e8920b2653ec2a
|
"""Perform streaming post-alignment preparation -- de-duplication and sorting.
Centralizes a pipelined approach to generating sorted, de-duplicated BAM output
from sequencer results.
sambamba: https://github.com/lomereiter/sambamba
samblaster: http://arxiv.org/pdf/1403.7486v1.pdf
biobambam bammarkduplicates: http://arxiv.org/abs/1306.0836
"""
import contextlib
from distutils.version import LooseVersion
import os
from bcbio import bam, utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.log import logger
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do, programs
@contextlib.contextmanager
def tobam_cl(data, out_file, is_paired=False):
"""Prepare command line for producing de-duplicated sorted output.
- If no deduplication, sort and prepare a BAM file.
- If paired, then use samblaster and prepare discordant outputs.
- If unpaired, use biobambam's bammarkduplicates
"""
do_dedup = _check_dedup(data)
with file_transaction(data, out_file) as tx_out_file:
if not do_dedup:
yield (sam_to_sortbam_cl(data, tx_out_file), tx_out_file)
elif is_paired:
sr_file = "%s-sr.bam" % os.path.splitext(out_file)[0]
disc_file = "%s-disc.bam" % os.path.splitext(out_file)[0]
with file_transaction(data, sr_file) as tx_sr_file:
with file_transaction(data, disc_file) as tx_disc_file:
yield (samblaster_dedup_sort(data, tx_out_file, tx_sr_file, tx_disc_file),
tx_out_file)
else:
yield (_biobambam_dedup_sort(data, tx_out_file), tx_out_file)
def _get_cores_memory(data, downscale=2):
"""Retrieve cores and memory, using samtools as baseline.
For memory, scaling down because we share with alignment and de-duplication.
"""
resources = config_utils.get_resources("samtools", data["config"])
num_cores = data["config"]["algorithm"].get("num_cores", 1)
max_mem = config_utils.adjust_memory(resources.get("memory", "2G"),
downscale, "decrease").upper()
return num_cores, max_mem
def sam_to_sortbam_cl(data, tx_out_file, name_sort=False):
"""Convert to sorted BAM output.
Set name_sort to True to sort reads by queryname
"""
samtools = config_utils.get_program("samtools", data["config"])
cores, mem = _get_cores_memory(data, downscale=2)
tmp_file = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0]
sort_flag = "-n" if name_sort else ""
return ("{samtools} sort -@ {cores} -m {mem} {sort_flag} "
"-T {tmp_file} -o {tx_out_file} /dev/stdin".format(**locals()))
def samblaster_dedup_sort(data, tx_out_file, tx_sr_file, tx_disc_file):
"""Deduplicate and sort with samblaster, produces split read and discordant pair files.
"""
samblaster = config_utils.get_program("samblaster", data["config"])
samtools = config_utils.get_program("samtools", data["config"])
cores, mem = _get_cores_memory(data, downscale=3)
tmp_prefix = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0]
for ext in ["spl", "disc", "full"]:
utils.safe_makedir("%s-%s" % (tmp_prefix, ext))
if data.get("align_split"):
full_tobam_cmd = _nosort_tobam_cmd(data)
else:
full_tobam_cmd = ("samtools view -b -u - | "
"sambamba sort -t {cores} -m {mem} "
"--tmpdir {tmp_prefix}-{dext} -o {out_file} /dev/stdin")
tobam_cmd = ("{samtools} sort -@ {cores} -m {mem} "
"-T {tmp_prefix}-{dext} -o {out_file} /dev/stdin")
# samblaster 0.1.22 and better require the -M flag for compatibility with bwa-mem
# https://github.com/GregoryFaust/samblaster/releases/tag/v.0.1.22
if LooseVersion(programs.get_version_manifest("samblaster", data=data, required=True)) >= LooseVersion("0.1.22"):
opts = "-M"
else:
opts = ""
splitter_cmd = tobam_cmd.format(out_file=tx_sr_file, dext="spl", **locals())
discordant_cmd = tobam_cmd.format(out_file=tx_disc_file, dext="disc", **locals())
dedup_cmd = full_tobam_cmd.format(out_file=tx_out_file, dext="full", **locals())
cmd = ("{samblaster} {opts} --splitterFile >({splitter_cmd}) --discordantFile >({discordant_cmd}) "
"| {dedup_cmd}")
return cmd.format(**locals())
def _nosort_tobam_cmd(data):
"""Handle converting to BAM for queryname sorted inputs, correcting HD headers.
"""
if dd.get_aligner(data).startswith("bwa"):
fix_hd = "(echo '@HD VN:1.3 SO:queryname' && cat) | "
else:
fix_hd = "sed 's/SO:unsorted/SO:queryname/g' | "
return fix_hd + "{samtools} view -b - -o {out_file}"
def _biobambam_dedup_sort(data, tx_out_file):
"""Perform streaming deduplication and sorting with biobambam's bammarkduplicates2.
"""
samtools = config_utils.get_program("samtools", data["config"])
bammarkduplicates = config_utils.get_program("bammarkduplicates", data["config"])
cores, mem = _get_cores_memory(data, downscale=2)
tmp_file = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0]
if data.get("align_split"):
out_file = tx_out_file
return _nosort_tobam_cmd(data).format(**locals())
else:
return ("{samtools} sort -n -@ {cores} -m {mem} -O bam -T {tmp_file}-namesort - | "
"{bammarkduplicates} tmpfile={tmp_file}-markdup "
"markthreads={cores} level=0 | "
"{samtools} sort -@ {cores} -m {mem} -T {tmp_file}-finalsort "
"-o {tx_out_file} /dev/stdin").format(**locals())
def _check_dedup(data):
"""Check configuration for de-duplication, handling back compatibility.
"""
dup_param = utils.get_in(data, ("config", "algorithm", "mark_duplicates"), True)
if dup_param and isinstance(dup_param, basestring):
logger.info("Warning: bcbio no longer support explicit setting of mark_duplicate algorithm. "
"Using best-practice choice based on input data.")
dup_param = True
return dup_param
def dedup_bam(in_bam, data):
"""Perform non-stream based deduplication of BAM input files using biobambam.
"""
if _check_dedup(data):
out_file = "%s-dedup%s" % utils.splitext_plus(in_bam)
if not utils.file_exists(out_file):
with tx_tmpdir(data) as tmpdir:
with file_transaction(data, out_file) as tx_out_file:
bammarkduplicates = config_utils.get_program("bammarkduplicates", data["config"])
base_tmp = os.path.join(tmpdir, os.path.splitext(os.path.basename(tx_out_file))[0])
cores, mem = _get_cores_memory(data, downscale=2)
cmd = ("{bammarkduplicates} tmpfile={base_tmp}-markdup "
"markthreads={cores} I={in_bam} O={tx_out_file}")
do.run(cmd.format(**locals()), "De-duplication with biobambam")
bam.index(out_file, data["config"])
return out_file
else:
return in_bam
|
hjanime/bcbio-nextgen
|
bcbio/ngsalign/postalign.py
|
Python
|
mit
| 7,133
|
[
"BWA"
] |
59249e350e2d98ddba78911592876c7598c09fab3c1aadbff2da47dd3eb47b9f
|
#
# ast_builder_test.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest
from antlr4 import *
from pynestml.meta_model.ast_nestml_compilation_unit import ASTNestMLCompilationUnit
from pynestml.meta_model.ast_source_location import ASTSourceLocation
from pynestml.generated.PyNestMLLexer import PyNestMLLexer
from pynestml.generated.PyNestMLParser import PyNestMLParser
from pynestml.symbol_table.symbol_table import SymbolTable
from pynestml.symbols.predefined_functions import PredefinedFunctions
from pynestml.symbols.predefined_types import PredefinedTypes
from pynestml.symbols.predefined_units import PredefinedUnits
from pynestml.symbols.predefined_variables import PredefinedVariables
from pynestml.utils.logger import LoggingLevel, Logger
from pynestml.visitors.ast_builder_visitor import ASTBuilderVisitor
# setups the infrastructure
PredefinedUnits.register_units()
PredefinedTypes.register_types()
PredefinedFunctions.register_functions()
PredefinedVariables.register_variables()
SymbolTable.initialize_symbol_table(ASTSourceLocation(start_line=0, start_column=0, end_line=0, end_column=0))
Logger.init_logger(LoggingLevel.NO)
class ASTBuildingTest(unittest.TestCase):
@classmethod
def test(cls):
for filename in os.listdir(os.path.realpath(os.path.join(os.path.dirname(__file__),
os.path.join('..', 'models')))):
if filename.endswith(".nestml"):
# print('Start creating AST for ' + filename + ' ...'),
input_file = FileStream(
os.path.join(os.path.dirname(__file__), os.path.join(os.path.join('..', 'models'), filename)))
lexer = PyNestMLLexer(input_file)
# create a token stream
stream = CommonTokenStream(lexer)
stream.fill()
# parse the file
parser = PyNestMLParser(stream)
# process the comments
compilation_unit = parser.nestMLCompilationUnit()
# now build the meta_model
ast_builder_visitor = ASTBuilderVisitor(stream.tokens)
ast = ast_builder_visitor.visit(compilation_unit)
assert isinstance(ast, ASTNestMLCompilationUnit)
if __name__ == '__main__':
unittest.main()
|
kperun/nestml
|
tests/ast_builder_test.py
|
Python
|
gpl-2.0
| 2,997
|
[
"VisIt"
] |
8036c4692b399dd5ee9b26cfa511f6e8d9691a9400a0401d1dee4c77e071015a
|
# coding: utf-8
from __future__ import division, unicode_literals, print_function
import math
import os
import subprocess
import tempfile
import numpy as np
from monty.dev import requires
from monty.json import jsanitize
from monty.os import cd
from monty.os.path import which
from scipy.constants import e, m_e
from scipy.spatial import distance
from pymatgen.core.lattice import Lattice
from pymatgen.core.units import Energy, Length
from pymatgen.electronic_structure.bandstructure import \
BandStructureSymmLine, Kpoint
from pymatgen.electronic_structure.core import Orbital
from pymatgen.electronic_structure.dos import Dos, Spin, CompleteDos
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.symmetry.bandstructure import HighSymmKpath
"""
This module provides classes to run and analyze boltztrap on pymatgen band
structure objects. Boltztrap is a software interpolating band structures and
computing materials properties from this band structure using Boltzmann
semi-classical transport theory.
Boltztrap has been developed by Georg Madsen.
http://www.icams.de/content/departments/ams/madsen/boltztrap.html
You need version 1.2.3 or higher
References are::
Madsen, G. K. H., and Singh, D. J. (2006).
BoltzTraP. A code for calculating band-structure dependent quantities.
Computer Physics Communications, 175, 67-71
"""
__author__ = "Geoffroy Hautier, Zachary Gibbs, Francesco Ricci, Anubhav Jain"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "1.1"
__maintainer__ = "Geoffroy Hautier"
__email__ = "geoffroy@uclouvain.be"
__status__ = "Development"
__date__ = "August 23, 2013"
class BoltztrapRunner(object):
"""
This class is used to run Boltztrap on a band structure object.
Args:
bs:
A band structure object
nelec:
the number of electrons
dos_type:
two options for the band structure integration: "HISTO"
(histogram) or "TETRA" using the tetrahedon method. TETRA
typically gives better results (especially for DOSes)
but takes more time
energy_grid:
the energy steps used for the integration (eV)
lpfac:
the number of interpolation points in the real space. By
default 10 gives 10 time more points in the real space than
the number of kpoints given in reciprocal space
run_type:
type of boltztrap usage. by default
- BOLTZ: (default) compute transport coefficients
- BANDS: interpolate all bands contained in the energy range
specified in energy_span_around_fermi variable, along specified
k-points
- DOS: compute total and partial dos (custom BoltzTraP code
needed!)
- FERMI: compute fermi surface or more correctly to
get certain bands interpolated
band_nb:
indicates a band number. Used for Fermi Surface interpolation
(run_type="FERMI")
spin:
specific spin component (1: up, -1: down) of the band selected
in FERMI mode (mandatory).
cond_band:
if a conduction band is specified in FERMI mode,
set this variable as True
tauref:
reference relaxation time. Only set to a value different than
zero if we want to model beyond the constant relaxation time.
tauexp:
exponent for the energy in the non-constant relaxation time
approach
tauen:
reference energy for the non-constant relaxation time approach
soc:
results from spin-orbit coupling (soc) computations give
typically non-polarized (no spin up or down) results but single
electron occupations. If the band structure comes from a soc
computation, you should set soc to True (default False)
doping:
the fixed doping levels you want to compute. Boltztrap provides
both transport values depending on electron chemical potential
(fermi energy) and for a series of fixed carrier
concentrations. By default, this is set to 1e16 to 1e22 in
increments of factors of 10.
energy_span_around_fermi:
usually the interpolation is not needed on the entire energy
range but on a specific range around the fermi level.
This energy gives this range in eV. by default it is 1.5 eV.
If DOS or BANDS type are selected, this range is automatically
set to cover the entire energy range.
scissor:
scissor to apply to the band gap (eV). This applies a scissor
operation moving the band edges without changing the band
shape. This is useful to correct the often underestimated band
gap in DFT. Default is 0.0 (no scissor)
kpt_line:
list of fractional coordinates of kpoints as arrays or list of
Kpoint objects for BANDS mode calculation (standard path of
high symmetry k-points is automatically set as default)
tmax:
Maximum temperature (K) for calculation (default=1300)
tgrid:
Temperature interval for calculation (default=50)
"""
@requires(which('x_trans'),
"BoltztrapRunner requires the executables 'x_trans' to be in "
"the path. Please download the Boltztrap at "
"http://www.icams.de/content/departments/ams/madsen/boltztrap"
".html and follow the instructions in the README to compile "
"Bolztrap accordingly. Then add x_trans to your path")
def __init__(self, bs, nelec, dos_type="HISTO", energy_grid=0.005,
lpfac=10, run_type="BOLTZ", band_nb=None, tauref=0, tauexp=0,
tauen=0, soc=False, doping=None, energy_span_around_fermi=1.5,
scissor=0.0, kpt_line=None, spin=None, cond_band=False,
tmax=1300, tgrid=50):
self.lpfac = lpfac
self._bs = bs
self._nelec = nelec
self.dos_type = dos_type
self.energy_grid = energy_grid
self.error = []
self.run_type = run_type
self.band_nb = band_nb
self.spin = spin
self.cond_band = cond_band
self.tauref = tauref
self.tauexp = tauexp
self.tauen = tauen
self.soc = soc
self.kpt_line = kpt_line
if doping:
self.doping = doping
else:
self.doping = []
for d in [1e16, 1e17, 1e18, 1e19, 1e20, 1e21]:
self.doping.extend([1*d, 2.5*d, 5*d, 7.5*d])
self.doping.append(1e22)
self.energy_span_around_fermi = energy_span_around_fermi
self.scissor = scissor
self.tmax = tmax
self.tgrid = tgrid
if self.run_type in ("DOS", "BANDS"):
self._auto_set_energy_range()
def _auto_set_energy_range(self):
"""
automatically determine the energy range as min/max eigenvalue
minus/plus the buffer_in_ev
"""
emins = [min([e_k[0] for e_k in self._bs.bands[Spin.up]])]
emaxs = [max([e_k[0] for e_k in self._bs.bands[Spin.up]])]
if self._bs.is_spin_polarized:
emins.append(min([e_k[0] for e_k in
self._bs.bands[Spin.down]]))
emaxs.append(max([e_k[0] for e_k in
self._bs.bands[Spin.down]]))
min_eigenval = Energy(min(emins) - self._bs.efermi, "eV").\
to("Ry")
max_eigenval = Energy(max(emaxs) - self._bs.efermi, "eV").\
to("Ry")
# set energy range to buffer around min/max EV
# buffer does not increase CPU time but will help get equal
# energies for spin up/down for band structure
const = Energy(2, "eV").to("Ry")
self._ll = min_eigenval - const
self._hl = max_eigenval + const
en_range = Energy(max((abs(self._ll), abs(self._hl))),
"Ry").to("eV")
self.energy_span_around_fermi = en_range * 1.01
print("energy_span_around_fermi = ",
self.energy_span_around_fermi)
@property
def bs(self):
return self._bs
@property
def nelec(self):
return self._nelec
def write_energy(self, output_file):
with open(output_file, 'w') as f:
f.write("test\n")
f.write("{}\n".format(len(self._bs.kpoints)))
if self.run_type == "FERMI":
sign = -1.0 if self.cond_band else 1.0
for i in range(len(self._bs.kpoints)):
eigs = []
eigs.append(Energy(
self._bs.bands[Spin(self.spin)][self.band_nb][i] -
self._bs.efermi, "eV").to("Ry"))
f.write("%12.8f %12.8f %12.8f %d\n"
% (self._bs.kpoints[i].frac_coords[0],
self._bs.kpoints[i].frac_coords[1],
self._bs.kpoints[i].frac_coords[2],
len(eigs)))
for j in range(len(eigs)):
f.write("%18.8f\n" % (sign * float(eigs[j])))
else:
for i, kpt in enumerate(self._bs.kpoints):
eigs = []
if self.run_type == "DOS":
spin_lst = [self.spin]
else:
spin_lst = self._bs.bands
for spin in spin_lst:
# use 90% of bottom bands since highest eigenvalues
# are usually incorrect
# ask Geoffroy Hautier for more details
nb_bands = int(math.floor(self._bs.nb_bands * 0.9))
for j in range(nb_bands):
eigs.append(
Energy(self._bs.bands[Spin(spin)][j][i] -
self._bs.efermi, "eV").to("Ry"))
eigs.sort()
if self.run_type == "DOS" and self._bs.is_spin_polarized:
eigs.insert(0, self._ll)
eigs.append(self._hl)
f.write("%12.8f %12.8f %12.8f %d\n"
% (kpt.frac_coords[0],
kpt.frac_coords[1],
kpt.frac_coords[2],
len(eigs)))
for j in range(len(eigs)):
f.write("%18.8f\n" % (float(eigs[j])))
def write_struct(self, output_file):
sym = SpacegroupAnalyzer(self._bs.structure, symprec=0.01)
with open(output_file, 'w') as f:
f.write("{} {}\n".format(self._bs.structure.composition.formula,
sym.get_space_group_symbol()))
f.write("{}\n".format("\n".join(
[" ".join(["%.5f" % Length(i, "ang").to("bohr") for i in row])
for row in self._bs.structure.lattice.matrix])))
ops = sym.get_symmetry_dataset()['rotations']
f.write("{}\n".format(len(ops)))
for c in ops:
for row in c:
f.write("{}\n".format(" ".join(str(i) for i in row)))
def write_def(self, output_file):
# This function is useless in std version of BoltzTraP code
# because x_trans script overwrite BoltzTraP.def
with open(output_file, 'w') as f:
so = ""
if self._bs.is_spin_polarized or self.soc:
so = "so"
f.write("5, 'boltztrap.intrans', 'old', 'formatted',0\n" +
"6,'boltztrap.outputtrans', 'unknown', "
"'formatted',0\n" +
"20,'boltztrap.struct', 'old', 'formatted',0\n"
+ "10,'boltztrap.energy" + so + "', 'old', "
"'formatted',0\n" +
"48,'boltztrap.engre', 'unknown', "
"'unformatted',0\n" +
"49,'boltztrap.transdos', 'unknown', "
"'formatted',0\n" +
"50,'boltztrap.sigxx', 'unknown', 'formatted',"
"0\n" +
"51,'boltztrap.sigxxx', 'unknown', 'formatted',"
"0\n" +
"21,'boltztrap.trace', 'unknown', "
"'formatted',0\n" +
"22,'boltztrap.condtens', 'unknown', "
"'formatted',0\n" +
"24,'boltztrap.halltens', 'unknown', "
"'formatted',0\n" +
"30,'boltztrap_BZ.cube', 'unknown', "
"'formatted',0\n")
def write_proj(self, output_file_proj, output_file_def):
# This function is useless in std version of BoltzTraP code
# because x_trans script overwrite BoltzTraP.def
for oi,o in enumerate(Orbital):
for site_nb in range(0, len(self._bs.structure.sites)):
if oi < len(self._bs.projections[Spin.up][0][0]):
with open(output_file_proj + "_" + str(site_nb) + "_" + str(o),
'w') as f:
f.write(self._bs.structure.composition.formula + "\n")
f.write(str(len(self._bs.kpoints)) + "\n")
for i in range(len(self._bs.kpoints)):
tmp_proj = []
for j in range(
int(math.floor(self._bs.nb_bands * 0.9))):
tmp_proj.append(
self._bs.projections[Spin(self.spin)][j][
i][oi][site_nb])
# TODO deal with the sorting going on at
# the energy level!!!
# tmp_proj.sort()
if self.run_type == "DOS" and \
self._bs.is_spin_polarized:
tmp_proj.insert(0, self._ll)
tmp_proj.append(self._hl)
f.write("%12.8f %12.8f %12.8f %d\n"
% (self._bs.kpoints[i].frac_coords[0],
self._bs.kpoints[i].frac_coords[1],
self._bs.kpoints[i].frac_coords[2],
len(tmp_proj)))
for j in range(len(tmp_proj)):
f.write("%18.8f\n" % float(tmp_proj[j]))
with open(output_file_def, 'w') as f:
so = ""
if self._bs.is_spin_polarized:
so = "so"
f.write("5, 'boltztrap.intrans', 'old', 'formatted',0\n" +
"6,'boltztrap.outputtrans', 'unknown', "
"'formatted',0\n" +
"20,'boltztrap.struct', 'old', 'formatted',0\n"
+ "10,'boltztrap.energy" + so + "', 'old', "
"'formatted',0\n" +
"48,'boltztrap.engre', 'unknown', "
"'unformatted',0\n" +
"49,'boltztrap.transdos', 'unknown', "
"'formatted',0\n" +
"50,'boltztrap.sigxx', 'unknown', 'formatted',"
"0\n" +
"51,'boltztrap.sigxxx', 'unknown', 'formatted',"
"0\n" +
"21,'boltztrap.trace', 'unknown', "
"'formatted',0\n" +
"22,'boltztrap.condtens', 'unknown', "
"'formatted',0\n" +
"24,'boltztrap.halltens', 'unknown', "
"'formatted',0\n" +
"30,'boltztrap_BZ.cube', 'unknown', "
"'formatted',0\n")
i = 1000
for oi,o in enumerate(Orbital):
for site_nb in range(0, len(self._bs.structure.sites)):
if oi < len(self._bs.projections[Spin.up][0][0]):
f.write(str(i) + ",\'" + "boltztrap.proj_" + str(
site_nb) + "_" + str(o.name) +
"\' \'old\', \'formatted\',0\n")
i += 1
def write_intrans(self, output_file):
setgap = 1 if self.scissor > 0.0001 else 0
if self.run_type == "BOLTZ" or self.run_type == "DOS":
with open(output_file, 'w') as fout:
fout.write("GENE # use generic interface\n")
fout.write(
"1 0 %d %f # iskip (not presently used) idebug "
"setgap shiftgap \n"
% (setgap, Energy(self.scissor, "eV").to("Ry")))
fout.write(
"0.0 %f %f %6.1f # Fermilevel (Ry),energygrid,energy "
"span around Fermilevel, number of electrons\n"
% (Energy(self.energy_grid, "eV").to("Ry"),
Energy(self.energy_span_around_fermi, "eV").to("Ry"),
self._nelec))
fout.write(
"CALC # CALC (calculate expansion "
"coeff), NOCALC read from file\n")
fout.write(
"%d # lpfac, number of latt-points "
"per k-point\n" % self.lpfac)
fout.write(
"%s # run mode (only BOLTZ is "
"supported)\n" % self.run_type)
fout.write(
".15 # (efcut) energy range of "
"chemical potential\n")
fout.write(
"{} {} # Tmax, temperature grid\n".\
format(self.tmax, self.tgrid))
fout.write(
"-1. # energyrange of bands given DOS output sig_xxx and "
"dos_xxx (xxx is band number)\n")
fout.write(self.dos_type + "\n") # e.g., HISTO or TETRA
fout.write("{} {} {} 0 0 0\n".format(
self.tauref, self.tauexp, self.tauen))
fout.write("{}\n".format(2 * len(self.doping)))
for d in self.doping:
fout.write(str(d) + "\n")
for d in self.doping:
fout.write(str(-d) + "\n")
elif self.run_type == "FERMI":
with open(output_file, 'w') as fout:
fout.write("GENE # use generic interface\n")
fout.write(
"1 0 0 0.0 # iskip (not presently used) idebug "
"setgap shiftgap \n")
fout.write(
"0.0 %f 0.1 %6.1f # Fermilevel (Ry),energygrid,"
"energy span around Fermilevel, "
"number of electrons\n"
% (Energy(self.energy_grid, "eV").to("Ry"), self._nelec))
fout.write(
"CALC # CALC (calculate expansion "
"coeff), NOCALC read from file\n")
fout.write(
"%d # lpfac, number of latt-points "
"per k-point\n" % self.lpfac)
fout.write(
"FERMI # run mode (only BOLTZ is "
"supported)\n")
fout.write(str(1) +
" # actual band selected: " +
str(self.band_nb + 1) + " spin: " + str(self.spin))
elif self.run_type == "BANDS":
if self.kpt_line is None:
kpath = HighSymmKpath(self._bs.structure)
self.kpt_line = [Kpoint(k, self._bs.structure.lattice) for k
in
kpath.get_kpoints(coords_are_cartesian=False)[
0]]
self.kpt_line = [kp.frac_coords for kp in self.kpt_line]
elif type(self.kpt_line[0]) == Kpoint:
self.kpt_line = [kp.frac_coords for kp in self.kpt_line]
with open(output_file, 'w') as fout:
fout.write("GENE # use generic interface\n")
fout.write(
"1 0 %d %f # iskip (not presently used) idebug "
"setgap shiftgap \n"
% (setgap, Energy(self.scissor, "eV").to("Ry")))
fout.write(
"0.0 %f %f %6.1f # Fermilevel (Ry),energygrid,energy "
"span around Fermilevel, "
"number of electrons\n"
% (Energy(self.energy_grid, "eV").to("Ry"),
Energy(self.energy_span_around_fermi, "eV").to("Ry"),
self._nelec))
fout.write(
"CALC # CALC (calculate expansion "
"coeff), NOCALC read from file\n")
fout.write(
"%d # lpfac, number of latt-points "
"per k-point\n" % self.lpfac)
fout.write(
"BANDS # run mode (only BOLTZ is "
"supported)\n")
fout.write("P " + str(len(self.kpt_line)) + "\n")
for kp in self.kpt_line:
fout.writelines([str(k) + " " for k in kp])
fout.write('\n')
def write_input(self, output_dir):
if self._bs.is_spin_polarized or self.soc:
self.write_energy(os.path.join(output_dir, "boltztrap.energyso"))
else:
self.write_energy(os.path.join(output_dir, "boltztrap.energy"))
self.write_struct(os.path.join(output_dir, "boltztrap.struct"))
self.write_intrans(os.path.join(output_dir, "boltztrap.intrans"))
self.write_def(os.path.join(output_dir, "BoltzTraP.def"))
if len(self.bs.projections) != 0 and self.run_type == "DOS":
self.write_proj(os.path.join(output_dir, "boltztrap.proj"),
os.path.join(output_dir, "BoltzTraP.def"))
def run(self, path_dir=None, convergence=True, write_input=True,
clear_dir=False, max_lpfac=150, min_egrid=0.00005):
"""
Write inputs (optional), run BoltzTraP, and ensure
convergence (optional)
Args:
path_dir (str): directory in which to run BoltzTraP
convergence (bool): whether to check convergence and make
corrections if needed
write_input: (bool) whether to write input files before the run
(required for convergence mode)
clear_dir: (bool) whether to remove all files in the path_dir
before starting
max_lpfac: (float) maximum lpfac value to try before reducing egrid
in convergence mode
min_egrid: (float) minimum egrid value to try before giving up in
convergence mode
Returns:
"""
# TODO: consider making this a part of custodian rather than pymatgen
# A lot of this functionality (scratch dirs, handlers, monitors)
# is built into custodian framework
if convergence and not write_input:
raise ValueError("Convergence mode requires write_input to be "
"true")
if self.run_type in ("BANDS", "DOS", "FERMI"):
convergence = False
if self.lpfac > max_lpfac:
max_lpfac = self.lpfac
if self.run_type == "BANDS" and self.bs.is_spin_polarized:
print("Reminder: for run_type " + str(
self.run_type) + ", spin component are not separated! "
"(you have a spin polarized band structure)")
if self.run_type in ("FERMI", "DOS") and self.spin is None:
if self.bs.is_spin_polarized:
raise BoltztrapError(
"Spin parameter must be specified for spin polarized "
"band structures!")
else:
self.spin = 1
dir_bz_name = "boltztrap"
if path_dir is None:
temp_dir = tempfile.mkdtemp()
path_dir = os.path.join(temp_dir, dir_bz_name)
else:
path_dir = os.path.abspath(
os.path.join(path_dir, dir_bz_name))
if not os.path.exists(path_dir):
os.mkdir(path_dir)
elif clear_dir:
for c in os.listdir(path_dir):
os.remove(os.path.join(path_dir, c))
with cd(path_dir):
lpfac_start = self.lpfac
converged = False
while self.energy_grid >= min_egrid and not converged:
self.lpfac = lpfac_start
print("lpfac, energy_grid: ", self.lpfac, self.energy_grid)
while self.lpfac <= max_lpfac and not converged:
if write_input:
self.write_input(path_dir)
bt_exe = ["x_trans", "BoltzTraP"]
if self._bs.is_spin_polarized or self.soc:
bt_exe.append("-so")
p = subprocess.Popen(bt_exe, stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
p.wait()
for c in p.communicate():
print(c)
if "STOP error in factorization" in c:
raise BoltztrapError("STOP error in factorization")
warning = ""
with open(os.path.join(path_dir,
dir_bz_name + ".outputtrans")) as f:
for l in f:
if "Option unknown" in l:
raise BoltztrapError(
"DOS mode needs a custom version of "
"BoltzTraP code is needed")
if "WARNING" in l:
warning = l
break
if "Error - Fermi level was not found" in l:
warning = l
break
if not warning and convergence:
# check convergence for warning
analyzer = BoltztrapAnalyzer.from_files(path_dir)
for doping in ['n', 'p']:
for c in analyzer.mu_doping[doping]:
if len(analyzer.mu_doping[doping][c]) != len(
analyzer.doping[doping]):
warning = "length of mu_doping array is " \
"incorrect"
break
if doping == 'p' and \
sorted(
analyzer.mu_doping[doping][
c], reverse=True) != \
analyzer.mu_doping[doping][c]:
warning = "sorting of mu_doping array " \
"incorrect for p-type"
break
# ensure n-type doping sorted correctly
if doping == 'n' and sorted(
analyzer.mu_doping[doping][c]) != \
analyzer.mu_doping[doping][c]:
warning = "sorting of mu_doping array " \
"incorrect for n-type"
break
if warning:
self.lpfac += 10
print("Warning detected: {}! Increase lpfac to "
"{}".format(warning, self.lpfac))
else:
converged = True
if not converged:
self.energy_grid /= 10
print("Could not converge with max lpfac; "
"Decrease egrid to {}".format(self.energy_grid))
if not converged:
raise BoltztrapError(
"Doping convergence not reached with lpfac=" + str(
self.lpfac) + ", energy_grid=" + str(self.energy_grid))
return path_dir
class BoltztrapError(Exception):
"""
Exception class for boltztrap.
Raised when the boltztrap gives an error
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "BoltztrapError : " + self.msg
class BoltztrapAnalyzer(object):
"""
Class used to store all the data from a boltztrap run
"""
def __init__(self, gap=None, mu_steps=None, cond=None, seebeck=None,
kappa=None, hall=None, doping=None,
mu_doping=None, seebeck_doping=None, cond_doping=None,
kappa_doping=None,
hall_doping=None, intrans = None, dos=None, dos_partial=None,
carrier_conc=None, vol=None, warning=None,
bz_bands=None, bz_kpoints=None, fermi_surface_data=None):
"""
Constructor taking directly all the data generated by Boltztrap. You
won't probably use it directly but instead use the from_files and
from_dict methods.
Args:
gap: The gap after interpolation in eV
mu_steps: The steps of electron chemical potential (or Fermi
level) in eV.
cond: The electronic conductivity tensor divided by a constant
relaxation time (sigma/tau) at different temperature and
fermi levels.
The format is {temperature: [array of 3x3 tensors at each
fermi level in mu_steps]}. The units are 1/(Ohm*m*s).
seebeck: The Seebeck tensor at different temperatures and fermi
levels. The format is {temperature: [array of 3x3 tensors at
each fermi level in mu_steps]}. The units are V/K
kappa: The electronic thermal conductivity tensor divided by a
constant relaxation time (kappa/tau) at different temperature
and fermi levels. The format is {temperature: [array of 3x3
tensors at each fermi level in mu_steps]}
The units are W/(m*K*s)
hall: The hall tensor at different temperature and fermi levels
The format is {temperature: [array of 27 coefficients list at
each fermi level in mu_steps]}
The units are m^3/C
doping: The different doping levels that have been given to
Boltztrap. The format is {'p':[],'n':[]} with an array of
doping levels. The units are cm^-3
mu_doping: Gives the electron chemical potential (or Fermi level)
for a given set of doping.
Format is {'p':{temperature: [fermi levels],'n':{temperature:
[fermi levels]}}
the fermi level array is ordered according to the doping
levels in doping units for doping are in cm^-3 and for Fermi
level in eV
seebeck_doping: The Seebeck tensor at different temperatures and
doping levels. The format is {'p': {temperature: [Seebeck
tensors]}, 'n':{temperature: [Seebeck tensors]}}
The [Seebeck tensors] array is ordered according to the
doping levels in doping units for doping are in cm^-3 and for
Seebeck in V/K
cond_doping: The electronic conductivity tensor divided by a
constant relaxation time (sigma/tau) at different
temperatures and doping levels
The format is {'p':{temperature: [conductivity tensors]},
'n':{temperature: [conductivity tensors]}}
The [conductivity tensors] array is ordered according to the
doping levels in doping units for doping are in cm^-3 and for
conductivity in 1/(Ohm*m*s)
kappa_doping: The thermal conductivity tensor divided by a constant
relaxation time (kappa/tau) at different temperatures and
doping levels.
The format is {'p':{temperature: [thermal conductivity
tensors]},'n':{temperature: [thermal conductivity tensors]}}
The [thermal conductivity tensors] array is ordered according
to the doping levels in doping units for doping are in cm^-3
and for thermal conductivity in W/(m*K*s)
hall_doping: The Hall tensor at different temperatures and doping
levels.
The format is {'p':{temperature: [Hall tensors]},
'n':{temperature: [Hall tensors]}}
The [Hall tensors] array is ordered according to the doping
levels in doping and each Hall tensor is represented by a 27
coefficients list.
The units are m^3/C
intrans: a dictionary of inputs e.g. {"scissor": 0.0}
carrier_conc: The concentration of carriers in electron (or hole)
per unit cell
dos: The dos computed by Boltztrap given as a pymatgen Dos object
dos_partial: Data for the partial DOS projected on sites and
orbitals
vol: Volume of the unit cell in angstrom cube (A^3)
warning: string if BoltzTraP outputted a warning, else None
bz_bands: Data for interpolated bands on a k-point line
(run_type=BANDS)
bz_kpoints: k-point in reciprocal coordinates for interpolated
bands (run_type=BANDS)
fermi_surface_data: energy values in a 3D grid imported from the
output .cube file using ase.io.cube.read_cube
"""
self.gap = gap
self.mu_steps = mu_steps
self._cond = cond
self._seebeck = seebeck
self._kappa = kappa
self._hall = hall
self.warning = warning
self.doping = doping
self.mu_doping = mu_doping
self._seebeck_doping = seebeck_doping
self._cond_doping = cond_doping
self._kappa_doping = kappa_doping
self._hall_doping = hall_doping
self.intrans = intrans
self._carrier_conc = carrier_conc
self.dos = dos
self.vol = vol
self._dos_partial = dos_partial
self._bz_bands = bz_bands
self._bz_kpoints = bz_kpoints
self.fermi_surface_data = fermi_surface_data
def get_symm_bands(self, structure, efermi, kpt_line=None,
labels_dict=None):
"""
Function useful to read bands from Boltztrap output and get a
BandStructureSymmLine object comparable with that one from a DFT
calculation (if the same kpt_line is provided). Default kpt_line
and labels_dict is the standard path of high symmetry k-point for
the specified structure. They could be extracted from the
BandStructureSymmLine object that you want to compare with. efermi
variable must be specified to create the BandStructureSymmLine
object (usually it comes from DFT or Boltztrap calc)
"""
try:
if kpt_line is None:
kpath = HighSymmKpath(structure)
kpt_line = [Kpoint(k, structure.lattice.reciprocal_lattice) for k in
kpath.get_kpoints(coords_are_cartesian=False)[0]]
labels_dict = {l: k for k, l in zip(
*kpath.get_kpoints(coords_are_cartesian=False)) if l}
kpt_line = [kp.frac_coords for kp in kpt_line]
elif type(kpt_line[0]) == Kpoint:
kpt_line = [kp.frac_coords for kp in kpt_line]
labels_dict = {k: labels_dict[k].frac_coords for k in
labels_dict}
idx_list = []
# kpt_dense=np.array([kp for kp in self._bz_kpoints])
for i, kp in enumerate(kpt_line):
w = []
prec = 1e-05
while len(w) == 0:
w = np.where(np.all(
np.abs(kp - self._bz_kpoints) < [prec] * 3,
axis=1))[0]
prec *= 10
# print( prec )
idx_list.append([i, w[0]])
# if len(w)>0:
# idx_list.append([i,w[0]])
# else:
# w=np.where(np.all(np.abs(kp.frac_coords-self._bz_kpoints)
# <[1e-04,1e-04,1e-04],axis=1))[0]
# idx_list.append([i,w[0]])
idx_list = np.array(idx_list)
# print( idx_list.shape )
bands_dict = {Spin.up: (self._bz_bands * Energy(1, "Ry").to(
"eV") + efermi).T[:, idx_list[:, 1]].tolist()}
# bz_kpoints = bz_kpoints[idx_list[:,1]].tolist()
sbs = BandStructureSymmLine(kpt_line, bands_dict,
structure.lattice.reciprocal_lattice, efermi,
labels_dict=labels_dict)
return sbs
except:
raise BoltztrapError(
"Bands are not in output of BoltzTraP.\nBolztrapRunner must "
"be run with run_type=BANDS")
@staticmethod
def check_acc_bzt_bands(sbs_bz, sbs_ref, warn_thr=(0.03,0.03)):
"""
Compare sbs_bz BandStructureSymmLine calculated with boltztrap with
the sbs_ref BandStructureSymmLine as reference (from MP for
instance), computing correlation and energy difference for eight bands
around the gap (semiconductors) or fermi level (metals).
warn_thr is a threshold to get a warning in the accuracy of Boltztap
interpolated bands.
Return a dictionary with these keys:
- "N": the index of the band compared; inside each there are:
- "Corr": correlation coefficient for the 8 compared bands
- "Dist": energy distance for the 8 compared bands
- "branch_name": energy distance for that branch
- "avg_corr": average of correlation coefficient over the 8 bands
- "avg_dist": average of energy distance over the 8 bands
- "nb_list": list of indexes of the 8 compared bands
- "acc_thr": list of two float corresponing to the two warning
thresholds in input
- "acc_err": list of two bools:
True if the avg_corr > warn_thr[0], and
True if the avg_dist > warn_thr[1]
See also compare_sym_bands function doc
"""
if not sbs_ref.is_metal() and not sbs_bz.is_metal():
vbm_idx = sbs_bz.get_vbm()['band_index'][Spin.up][-1]
cbm_idx = sbs_bz.get_cbm()['band_index'][Spin.up][0]
nb_list = range(vbm_idx-3,cbm_idx+4)
else:
bnd_around_efermi=[]
delta=0
spin=sbs_bz.bands.keys()[0]
while len(bnd_around_efermi)<8:
delta+=0.1
bnd_around_efermi=[]
for nb in range(len(sbs_bz.bands[spin])):
for kp in range(len(sbs_bz.bands[spin][nb])):
if abs(sbs_bz.bands[spin][nb][kp]-sbs_bz.efermi)<delta:
bnd_around_efermi.append(nb)
break
nb_list = bnd_around_efermi[:8]
#print(nb_list)
bcheck = compare_sym_bands(sbs_bz, sbs_ref, nb_list)
#print(bcheck)
acc_err = [False,False]
avg_corr = sum([item[1]['Corr'] for item in bcheck.iteritems()])/8
avg_distance = sum([item[1]['Dist'] for item in bcheck.iteritems()])/8
if avg_corr > warn_thr[0]: acc_err[0] = True
if avg_distance > warn_thr[0]: acc_err[1] = True
bcheck['avg_corr'] = avg_corr
bcheck['avg_distance'] = avg_distance
bcheck['acc_err'] = acc_err
bcheck['acc_thr'] = warn_thr
bcheck['nb_list'] = nb_list
if True in acc_err:
print("Warning! some bands around gap are not accurate")
return bcheck
def get_seebeck(self, output='eigs', doping_levels=True):
"""
Gives the seebeck coefficient (microV/K) in either a
full 3x3 tensor form, as 3 eigenvalues, or as the average value
(trace/3.0) If doping_levels=True, the results are given at
different p and n doping
levels (given by self.doping), otherwise it is given as a series
of electron chemical potential values
Args:
output (string): the type of output. 'tensor' give the full
3x3 tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}.
The 'p' links to Seebeck at p-type doping
and 'n' to the Seebeck at n-type doping. Otherwise, returns a
{temp:[]} dictionary
The result contains either the sorted three eigenvalues of
the symmetric
Seebeck tensor (output='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
units are microV/K
"""
return BoltztrapAnalyzer._format_to_output(self._seebeck,
self._seebeck_doping,
output,
doping_levels, 1e6)
def get_conductivity(self, output='eigs', doping_levels=True,
relaxation_time=1e-14):
"""
Gives the conductivity (1/Ohm*m) in either a full 3x3 tensor
form, as 3 eigenvalues, or as the average value
(trace/3.0) If doping_levels=True, the results are given at
different p and n doping
levels (given by self.doping), otherwise it is given as a series
of electron chemical potential values
Args:
output (string): the type of output. 'tensor' give the full
3x3 tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
relaxation_time (float): constant relaxation time in secs
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}.
The 'p' links to conductivity
at p-type doping and 'n' to the conductivity at n-type
doping. Otherwise,
returns a {temp:[]} dictionary. The result contains either
the sorted three eigenvalues of the symmetric
conductivity tensor (format='eigs') or a full tensor (3x3
array) (output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time
units are 1/Ohm*m
"""
return BoltztrapAnalyzer._format_to_output(self._cond,
self._cond_doping, output,
doping_levels,
relaxation_time)
def get_power_factor(self, output='eigs', doping_levels=True,
relaxation_time=1e-14):
"""
Gives the power factor (Seebeck^2 * conductivity) in units
microW/(m*K^2) in either a full 3x3 tensor form,
as 3 eigenvalues, or as the average value (trace/3.0) If
doping_levels=True, the results are given at
different p and n doping levels (given by self.doping), otherwise it
is given as a series of
electron chemical potential values
Args:
output (string): the type of output. 'tensor' give the full 3x3
tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
relaxation_time (float): constant relaxation time in secs
Returns:
If doping_levels=True, a dictionnary {temp:{'p':[],'n':[]}}. The
'p' links to power factor
at p-type doping and 'n' to the conductivity at n-type doping.
Otherwise,
returns a {temp:[]} dictionary. The result contains either the
sorted three eigenvalues of the symmetric
power factor tensor (format='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time
units are microW/(m K^2)
"""
result = None
result_doping = None
if doping_levels:
result_doping = {doping: {t: [] for t in
self._seebeck_doping[doping]} for
doping in self._seebeck_doping}
for doping in result_doping:
for t in result_doping[doping]:
for i in range(len(self.doping[doping])):
full_tensor = np.dot(self._cond_doping[doping][t][i],
np.dot(
self._seebeck_doping[doping][
t][i],
self._seebeck_doping[doping][
t][i]))
result_doping[doping][t].append(full_tensor)
else:
result = {t: [] for t in self._seebeck}
for t in result:
for i in range(len(self.mu_steps)):
full_tensor = np.dot(self._cond[t][i],
np.dot(self._seebeck[t][i],
self._seebeck[t][i]))
result[t].append(full_tensor)
return BoltztrapAnalyzer._format_to_output(result, result_doping,
output, doping_levels,
multi=1e6 * relaxation_time)
def get_thermal_conductivity(self, output='eigs', doping_levels=True,
k_el=True, relaxation_time=1e-14):
"""
Gives the electronic part of the thermal conductivity in either a
full 3x3 tensor form,
as 3 eigenvalues, or as the average value (trace/3.0) If
doping_levels=True, the results are given at
different p and n doping levels (given by self.doping), otherwise it
is given as a series of
electron chemical potential values
Args:
output (string): the type of output. 'tensor' give the full 3x3
tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
k_el (boolean): True for k_0-PF*T, False for k_0
relaxation_time (float): constant relaxation time in secs
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The
'p' links to thermal conductivity
at p-type doping and 'n' to the thermal conductivity at n-type
doping. Otherwise,
returns a {temp:[]} dictionary. The result contains either the
sorted three eigenvalues of the symmetric
conductivity tensor (format='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time
units are W/mK
"""
result = None
result_doping = None
if doping_levels:
result_doping = {doping: {t: [] for t in
self._seebeck_doping[doping]} for
doping in self._seebeck_doping}
for doping in result_doping:
for t in result_doping[doping]:
for i in range(len(self.doping[doping])):
if k_el:
pf_tensor = np.dot(self._cond_doping[doping][t][i],
np.dot(self._seebeck_doping[doping][t][i],
self._seebeck_doping[doping][t][i]))
result_doping[doping][t].append((
self._kappa_doping[doping][t][i] - pf_tensor * t))
else:
result_doping[doping][t].append((
self._kappa_doping[doping][t][i]))
else:
result = {t: [] for t in self._seebeck}
for t in result:
for i in range(len(self.mu_steps)):
if k_el:
pf_tensor = np.dot(self._cond[t][i],
np.dot(self._seebeck[t][i],
self._seebeck[t][i]))
result[t].append((self._kappa[t][i] - pf_tensor * t))
else:
result[t].append((self._kappa[t][i]))
return BoltztrapAnalyzer._format_to_output(result, result_doping,
output, doping_levels,
multi=relaxation_time)
def get_zt(self, output='eigs', doping_levels=True, relaxation_time=1e-14,
kl=1.0):
"""
Gives the ZT coefficient (S^2*cond*T/thermal cond) in either a full
3x3 tensor form,
as 3 eigenvalues, or as the average value (trace/3.0) If
doping_levels=True, the results are given at
different p and n doping levels (given by self.doping), otherwise it
is given as a series of
electron chemical potential values. We assume a constant relaxation
time and a constant
lattice thermal conductivity
Args:
output (string): the type of output. 'tensor' give the full 3x3
tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
relaxation_time (float): constant relaxation time in secs
k_l (float): lattice thermal cond in W/(m*K)
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The
'p' links to ZT
at p-type doping and 'n' to the ZT at n-type doping. Otherwise,
returns a {temp:[]} dictionary. The result contains either the
sorted three eigenvalues of the symmetric
ZT tensor (format='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time and lattice
thermal conductivity
"""
result = None
result_doping = None
if doping_levels:
result_doping = {doping: {t: [] for t in
self._seebeck_doping[doping]} for
doping in self._seebeck_doping}
for doping in result_doping:
for t in result_doping[doping]:
for i in range(len(self.doping[doping])):
pf_tensor = np.dot(self._cond_doping[doping][t][i],
np.dot(
self._seebeck_doping[doping][t][
i],
self._seebeck_doping[doping][t][
i]))
thermal_conduct = (self._kappa_doping[doping][t][i]
- pf_tensor * t) * relaxation_time
result_doping[doping][t].append(
np.dot(pf_tensor * relaxation_time * t,
np.linalg.inv(
thermal_conduct + kl * np.eye(3, 3))))
else:
result = {t: [] for t in self._seebeck}
for t in result:
for i in range(len(self.mu_steps)):
pf_tensor = np.dot(self._cond[t][i],
np.dot(self._seebeck[t][i],
self._seebeck[t][i]))
thermal_conduct = (self._kappa[t][i]
- pf_tensor * t) * relaxation_time
result[t].append(np.dot(pf_tensor * relaxation_time * t,
np.linalg.inv(
thermal_conduct + kl *
np.eye(3, 3))))
return BoltztrapAnalyzer._format_to_output(result, result_doping,
output, doping_levels)
def get_average_eff_mass(self, output='eigs'):
"""
Gives the average effective mass tensor. We call it average because
it takes into account all the bands
and regions in the Brillouin zone. This is different than the standard
textbook effective mass which relates
often to only one (parabolic) band.
The average effective mass tensor is defined as the integrated
average of the second derivative of E(k)
This effective mass tensor takes into account:
-non-parabolicity
-multiple extrema
-multiple bands
For more information about it. See:
Hautier, G., Miglio, A., Waroquiers, D., Rignanese, G., & Gonze,
X. (2014).
How Does Chemistry Influence Electron Effective Mass in Oxides?
A High-Throughput Computational Analysis. Chemistry of Materials,
26(19), 5447–5458. doi:10.1021/cm404079a
or
Hautier, G., Miglio, A., Ceder, G., Rignanese, G.-M., & Gonze,
X. (2013).
Identification and design principles of low hole effective mass
p-type transparent conducting oxides.
Nature Communications, 4, 2292. doi:10.1038/ncomms3292
Depending on the value of output, we have either the full 3x3
effective mass tensor,
its 3 eigenvalues or an average
Args:
output (string): 'eigs' for eigenvalues, 'tensor' for the full
tensor and 'average' for an average (trace/3)
Returns:
a dictionary {'p':{temp:[]},'n':{temp:[]}} with an array of
effective mass tensor, eigenvalues of average
value (depending on output) for each temperature and for each
doping level.
The 'p' links to hole effective mass tensor and 'n' to electron
effective mass tensor.
"""
result_doping = {doping: {t: [] for t in self._cond_doping[doping]} for
doping in self.doping}
for doping in result_doping:
for temp in result_doping[doping]:
for i in range(len(self.doping[doping])):
if output == 'tensor':
result_doping[doping][temp].append(np.linalg.inv(
np.array(self._cond_doping[doping][temp][i])) \
*
self.doping[doping][
i] * 10 ** 6 *
e ** 2 /
m_e)
elif output in ['eig', 'eigs']:
result_doping[doping][temp].append(
sorted(np.linalg.eigh(np.linalg.inv(
np.array(self._cond_doping[doping][temp][i])) *
self.doping[doping][
i] * 10 ** 6 * e ** 2 \
/ m_e)[0]))
else:
full_tensor = np.linalg.inv(
np.array(self._cond_doping[doping][temp][i])) \
* self.doping[doping][
i] * 10 ** 6 * e ** 2 / m_e
result_doping[doping][temp].append(
(full_tensor[0][0] + full_tensor[1][1] +
full_tensor[2][2]) / 3.0)
return result_doping
def get_extreme(self, target_prop, maximize=True, min_temp=None,
max_temp=None, min_doping=None, max_doping=None,
isotropy_tolerance=0.05, use_average=True):
"""
This method takes in eigenvalues over a range of carriers,
temperatures, and doping levels, and tells you what is the "best"
value that can be achieved for the given target_property. Note that
this method searches the doping dict only, not the full mu dict.
Args:
target_prop: target property, i.e. "seebeck", "power factor",
"conductivity", "kappa", or "zt"
maximize: True to maximize, False to minimize (e.g. kappa)
min_temp: minimum temperature allowed
max_temp: maximum temperature allowed
min_doping: minimum doping allowed (e.g., 1E18)
max_doping: maximum doping allowed (e.g., 1E20)
isotropy_tolerance: tolerance for isotropic (0.05 = 5%)
use_average: True for avg of eigenval, False for max eigenval
Returns:
A dictionary with keys {"p", "n", "best"} with sub-keys:
{"value", "temperature", "doping", "isotropic"}
"""
def is_isotropic(x, isotropy_tolerance):
"""
Internal method to tell you if 3-vector "x" is isotropic
Args:
x: the vector to determine isotropy for
isotropy_tolerance: tolerance, e.g. 0.05 is 5%
"""
if len(x) != 3:
raise ValueError("Invalid input to is_isotropic!")
st = sorted(x)
return bool(all([st[0],st[1],st[2]]) and \
(abs((st[1]-st[0])/st[1]) <= isotropy_tolerance) and \
(abs((st[2]-st[0]))/st[2] <= isotropy_tolerance) and \
(abs((st[2]-st[1])/st[2]) <= isotropy_tolerance))
if target_prop.lower() == "seebeck":
d = self.get_seebeck(output="eigs", doping_levels=True)
elif target_prop.lower() == "power factor":
d = self.get_power_factor(output="eigs", doping_levels=True)
elif target_prop.lower() == "conductivity":
d = self.get_conductivity(output="eigs", doping_levels=True)
elif target_prop.lower() == "kappa":
d = self.get_thermal_conductivity(output="eigs",
doping_levels=True)
elif target_prop.lower() == "zt":
d = self.get_zt(output="eigs", doping_levels=True)
else:
raise ValueError("Target property: {} not recognized!".
format(target_prop))
absval = True # take the absolute value of properties
x_val = None
x_temp = None
x_doping = None
x_isotropic = None
output = {}
min_temp = min_temp or 0
max_temp = max_temp or float('inf')
min_doping = min_doping or 0
max_doping = max_doping or float('inf')
for pn in ('p', 'n'):
for t in d[pn]: # temperatures
if min_temp <= float(t) <= max_temp:
for didx, evs in enumerate(d[pn][t]):
doping_lvl = self.doping[pn][didx]
if min_doping <= doping_lvl <= max_doping:
isotropic = is_isotropic(evs, isotropy_tolerance)
if absval:
evs = [abs(x) for x in evs]
if use_average:
val = float(sum(evs))/len(evs)
else:
val = max(evs)
if x_val is None or (val > x_val and maximize) \
or (val < x_val and not maximize):
x_val = val
x_temp = t
x_doping = doping_lvl
x_isotropic = isotropic
output[pn] = {'value': x_val, 'temperature': x_temp,
'doping': x_doping, 'isotropic': x_isotropic}
x_val = None
if maximize:
max_type = 'p' if output['p']['value'] >= \
output['n']['value'] else 'n'
else:
max_type = 'p' if output['p']['value'] <= \
output['n']['value'] else 'n'
output['best'] = output[max_type]
output['best']['carrier_type'] = max_type
return output
@staticmethod
def _format_to_output(tensor, tensor_doping, output, doping_levels,
multi=1.0):
if doping_levels:
full_tensor = tensor_doping
result = {doping: {t: [] for t in tensor_doping[doping]} for doping
in tensor_doping}
for doping in full_tensor:
for temp in full_tensor[doping]:
for i in range(len(full_tensor[doping][temp])):
if output in ['eig', 'eigs']:
result[doping][temp].append(sorted(
np.linalg.eigh(full_tensor[doping][temp][i])[
0] * multi))
elif output == 'tensor':
result[doping][temp].append(
np.array(full_tensor[doping][temp][i]) * multi)
elif output == 'average':
result[doping][temp].append(
(full_tensor[doping][temp][i][0][0] \
+ full_tensor[doping][temp][i][1][1] \
+ full_tensor[doping][temp][i][2][
2]) * multi / 3.0)
else:
raise ValueError("Unknown output format: "
"{}".format(output))
else:
full_tensor = tensor
result = {t: [] for t in tensor}
for temp in full_tensor:
for i in range(len(tensor[temp])):
if output in ['eig', 'eigs']:
result[temp].append(sorted(
np.linalg.eigh(full_tensor[temp][i])[0] * multi))
elif output == 'tensor':
result[temp].append(
np.array(full_tensor[temp][i]) * multi)
elif output == 'average':
result[temp].append((full_tensor[temp][i][0][0]
+ full_tensor[temp][i][1][1]
+ full_tensor[temp][i][2][
2]) * multi / 3.0)
else:
raise ValueError("Unknown output format: {}".
format(output))
return result
def get_complete_dos(self, structure, analyzer_for_second_spin=None):
"""
Gives a CompleteDos object with the DOS from the interpolated
projected band structure
Args:
the structure (necessary to identify sites for projection)
analyzer_for_second_spin must be specified to have a
CompleteDos with both Spin components
Returns:
a CompleteDos object
Example of use in case of spin polarized case:
BoltztrapRunner(bs=bs,nelec=10,run_type="DOS",spin=1).run(path_dir='dos_up/')
an_up=BoltztrapAnalyzer.from_files("dos_up/boltztrap/",dos_spin=1)
BoltztrapRunner(bs=bs,nelec=10,run_type="DOS",spin=-1).run(path_dir='dos_dw/')
an_dw=BoltztrapAnalyzer.from_files("dos_dw/boltztrap/",dos_spin=-1)
cdos=an_up.get_complete_dos(bs.structure,an_dw)
"""
pdoss = {}
spin_1 = list(self.dos.densities.keys())[0]
if analyzer_for_second_spin:
if not np.all(self.dos.energies ==
analyzer_for_second_spin.dos.energies):
raise BoltztrapError(
"Dos merging error: energies of the two dos are different")
spin_2 = list(analyzer_for_second_spin.dos.densities.keys())[0]
if spin_1 == spin_2:
raise BoltztrapError(
"Dos merging error: spin component are the same")
for s in self._dos_partial:
if structure.sites[int(s)] not in pdoss:
pdoss[structure.sites[int(s)]] = {}
for o in self._dos_partial[s]:
if Orbital[o] not in pdoss[structure.sites[int(s)]]:
pdoss[structure.sites[int(s)]][Orbital[o]] = {}
pdoss[structure.sites[int(s)]][Orbital[o]][
spin_1] = self._dos_partial[s][o]
if analyzer_for_second_spin:
pdoss[structure.sites[int(s)]][Orbital[o]][
spin_2] = analyzer_for_second_spin._dos_partial[s][o]
if analyzer_for_second_spin:
tdos = Dos(self.dos.efermi, self.dos.energies,
{spin_1: self.dos.densities[spin_1],
spin_2: analyzer_for_second_spin.dos.densities[
spin_2]})
else:
tdos = self.dos
return CompleteDos(structure, total_dos=tdos, pdoss=pdoss)
def get_mu_bounds(self, temp=300):
return min(self.mu_doping['p'][temp]), max(self.mu_doping['n'][temp])
def get_carrier_concentration(self):
"""
gives the carrier concentration (in cm^-3)
Returns
a dictionary {temp:[]} with an array of carrier concentration
(in cm^-3) at each temperature
The array relates to each step of electron chemical potential
"""
return {temp: [1e24 * i / self.vol for i in self._carrier_conc[temp]]
for temp in self._carrier_conc}
def get_hall_carrier_concentration(self):
"""
gives the Hall carrier concentration (in cm^-3). This is the trace of
the Hall tensor (see Boltztrap source code) Hall carrier concentration
are not always exactly the same than carrier concentration.
Returns
a dictionary {temp:[]} with an array of Hall carrier concentration
(in cm^-3) at each temperature The array relates to each step of
electron chemical potential
"""
result = {temp: [] for temp in self._hall}
for temp in self._hall:
for i in self._hall[temp]:
trace = (i[1][2][0] + i[2][0][1] + i[0][1][2]) / 3.0
if trace != 0.0:
result[temp].append(1e-6 / (trace * e))
else:
result[temp].append(0.0)
return result
@staticmethod
def parse_outputtrans(path_dir):
"""
Parses .outputtrans file
Args:
path_dir: dir containing boltztrap.outputtrans
Returns:
tuple - (run_type, warning, efermi, gap, doping_levels)
"""
run_type = None
warning = None
efermi = None
gap = None
doping_levels = []
with open(os.path.join(path_dir, "boltztrap.outputtrans"), 'r') \
as f:
for line in f:
if "WARNING" in line:
warning = line
elif "Calc type:" in line:
run_type = line.split()[-1]
elif line.startswith("VBM"):
efermi = Energy(line.split()[1], "Ry").to("eV")
elif line.startswith("Egap:"):
gap = Energy(float(line.split()[1]), "Ry").to("eV")
elif line.startswith("Doping level number"):
doping_levels.append(float(line.split()[6]))
return run_type, warning, efermi, gap, doping_levels
@staticmethod
def parse_transdos(path_dir, efermi, dos_spin=1, trim_dos=False):
"""
Parses .transdos (total DOS) and .transdos_x_y (partial DOS) files
Args:
path_dir: (str) dir containing DOS files
efermi: (float) Fermi energy
dos_spin: (int) -1 for spin down, +1 for spin up
trim_dos: (bool) whether to post-process / trim DOS
Returns:
tuple - (DOS, dict of partial DOS)
"""
data_dos = {'total': [], 'partial': {}}
# parse the total DOS data
## format is energy, DOS, integrated DOS
with open(os.path.join(path_dir, "boltztrap.transdos"), 'r') as f:
count_series = 0 # TODO: why is count_series needed?
for line in f:
if line.lstrip().startswith("#"):
count_series += 1
if count_series > 1:
break
else:
data_dos['total'].append(
[Energy(float(line.split()[0]), "Ry").to("eV"),
float(line.split()[1])])
total_elec = float(line.split()[2])
if trim_dos:
# Francesco knows what this does
# It has something to do with a trick of adding fake energies
# at the endpoints of the DOS, and then re-trimming it. This is
# to get the same energy scale for up and down spin DOS.
tmp_data = np.array(data_dos['total'])
tmp_den = np.trim_zeros(tmp_data[:, 1], 'f')[1:]
lw_l = len(tmp_data[:, 1]) - len(tmp_den)
tmp_ene = tmp_data[lw_l:, 0]
tmp_den = np.trim_zeros(tmp_den, 'b')[:-1]
hg_l = len(tmp_ene) - len(tmp_den)
tmp_ene = tmp_ene[:-hg_l]
tmp_data = np.vstack((tmp_ene, tmp_den)).T
data_dos['total'] = tmp_data.tolist()
# parse partial DOS data
for file_name in os.listdir(path_dir):
if file_name.endswith(
"transdos") and file_name != 'boltztrap.transdos':
tokens = file_name.split(".")[1].split("_")
site = tokens[1]
orb = '_'.join(tokens[2:])
with open(os.path.join(path_dir, file_name), 'r') as f:
for line in f:
if not line.lstrip().startswith(" #"):
if site not in data_dos['partial']:
data_dos['partial'][site] = {}
if orb not in data_dos['partial'][site]:
data_dos['partial'][site][orb] = []
data_dos['partial'][site][orb].append(
float(line.split()[1]))
data_dos['partial'][site][orb] = data_dos['partial'][site][
orb][lw_l:-hg_l]
dos_full = {'energy': [], 'density': []}
for t in data_dos['total']:
dos_full['energy'].append(t[0])
dos_full['density'].append(t[1])
dos = Dos(efermi, dos_full['energy'],
{Spin(dos_spin): dos_full['density']})
dos_partial = data_dos['partial'] # TODO: make this real DOS object?
return dos, dos_partial
@staticmethod
def parse_intrans(path_dir):
"""
Parses boltztrap.intrans mainly to extract the value of scissor applied to the bands or some other inputs
Args:
path_dir: (str) dir containing the boltztrap.intrans file
Returns:
intrans (dict): a dictionary containing various inputs that had been used in the Boltztrap run.
"""
intrans = {}
with open(os.path.join(path_dir, "boltztrap.intrans"), 'r') as f:
for line in f:
if "iskip" in line:
intrans["scissor"] = Energy(float(line.split(" ")[3]), "Ry").to("eV")
break
return intrans
@staticmethod
def parse_struct(path_dir):
"""
Parses boltztrap.struct file (only the volume)
Args:
path_dir: (str) dir containing the boltztrap.struct file
Returns:
(float) volume
"""
with open(os.path.join(path_dir, "boltztrap.struct"), 'r') as f:
tokens = f.readlines()
return Lattice([[Length(float(tokens[i].split()[j]), "bohr").
to("ang") for j in range(3)] for i in
range(1, 4)]).volume
@staticmethod
def parse_cond_and_hall(path_dir, doping_levels=None):
"""
Parses the conductivity and Hall tensors
Args:
path_dir: Path containing .condtens / .halltens files
doping_levels: ([float]) - doping lvls, parse outtrans to get this
Returns:
mu_steps, cond, seebeck, kappa, hall, pn_doping_levels,
mu_doping, seebeck_doping, cond_doping, kappa_doping,
hall_doping, carrier_conc
"""
# Step 1: parse raw data but do not convert to final format
t_steps = set()
mu_steps = set()
data_full = []
data_hall = []
data_doping_full = []
data_doping_hall = []
doping_levels = doping_levels or []
# parse the full conductivity/Seebeck/kappa0/etc data
## also initialize t_steps and mu_steps
with open(os.path.join(path_dir, "boltztrap.condtens"), 'r') as f:
for line in f:
if not line.startswith("#"):
mu_steps.add(float(line.split()[0]))
t_steps.add(int(float(line.split()[1])))
data_full.append([float(c) for c in line.split()])
# parse the full Hall tensor
with open(os.path.join(path_dir, "boltztrap.halltens"), 'r') as f:
for line in f:
if not line.startswith("#"):
data_hall.append([float(c) for c in line.split()])
if len(doping_levels) != 0:
# parse doping levels version of full cond. tensor, etc.
with open(
os.path.join(path_dir, "boltztrap.condtens_fixdoping"),
'r') as f:
for line in f:
if not line.startswith("#") and len(line) > 2:
data_doping_full.append([float(c)
for c in line.split()])
# parse doping levels version of full hall tensor
with open(
os.path.join(path_dir, "boltztrap.halltens_fixdoping"),
'r') as f:
for line in f:
if not line.startswith("#") and len(line) > 2:
data_doping_hall.append(
[float(c) for c in line.split()])
# Step 2: convert raw data to final format
# sort t and mu_steps (b/c they are sets not lists)
# and convert to correct energy
t_steps = sorted([t for t in t_steps])
mu_steps = sorted([Energy(m, "Ry").to("eV") for m in mu_steps])
# initialize output variables - could use defaultdict instead
# I am leaving things like this for clarity
cond = {t: [] for t in t_steps}
seebeck = {t: [] for t in t_steps}
kappa = {t: [] for t in t_steps}
hall = {t: [] for t in t_steps}
carrier_conc = {t: [] for t in t_steps}
dos_full = {'energy': [], 'density': []}
mu_doping = {'p': {t: [] for t in t_steps},
'n': {t: [] for t in t_steps}}
seebeck_doping = {'p': {t: [] for t in t_steps},
'n': {t: [] for t in t_steps}}
cond_doping = {'p': {t: [] for t in t_steps},
'n': {t: [] for t in t_steps}}
kappa_doping = {'p': {t: [] for t in t_steps},
'n': {t: [] for t in t_steps}}
hall_doping = {'p': {t: [] for t in t_steps},
'n': {t: [] for t in t_steps}}
# process doping levels
pn_doping_levels = {'p': [], 'n': []}
for d in doping_levels:
if d > 0:
pn_doping_levels['p'].append(d)
else:
pn_doping_levels['n'].append(-d)
# process raw conductivity data, etc.
for d in data_full:
temp, doping = d[1], d[2]
carrier_conc[temp].append(doping)
cond[temp].append(np.reshape(d[3:12], (3, 3)).tolist())
seebeck[temp].append(np.reshape(d[12:21], (3, 3)).tolist())
kappa[temp].append(np.reshape(d[21:30], (3, 3)).tolist())
# process raw Hall data
for d in data_hall:
temp, doping = d[1], d[2]
hall_tens = [np.reshape(d[3:12], (3, 3)).tolist(),
np.reshape(d[12:21], (3, 3)).tolist(),
np.reshape(d[21:30], (3, 3)).tolist()]
hall[temp].append(hall_tens)
# process doping conductivity data, etc.
for d in data_doping_full:
temp, doping, mu = d[0], d[1], d[-1]
pn = 'p' if doping > 0 else 'n'
mu_doping[pn][temp].append(Energy(mu, "Ry").to("eV"))
cond_doping[pn][temp].append(
np.reshape(d[2:11], (3, 3)).tolist())
seebeck_doping[pn][temp].append(
np.reshape(d[11:20], (3, 3)).tolist())
kappa_doping[pn][temp].append(
np.reshape(d[20:29], (3, 3)).tolist())
# process doping Hall data
for d in data_doping_hall:
temp, doping, mu = d[0], d[1], d[-1]
pn = 'p' if doping > 0 else 'n'
hall_tens = [np.reshape(d[2:11], (3, 3)).tolist(),
np.reshape(d[11:20], (3, 3)).tolist(),
np.reshape(d[20:29], (3, 3)).tolist()]
hall_doping[pn][temp].append(hall_tens)
return mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, \
mu_doping, seebeck_doping, cond_doping, kappa_doping, \
hall_doping, carrier_conc
@staticmethod
def from_files(path_dir, dos_spin=1):
"""
get a BoltztrapAnalyzer object from a set of files
Args:
path_dir: directory where the boltztrap files are
dos_spin: in DOS mode, set to 1 for spin up and -1 for spin down
Returns:
a BoltztrapAnalyzer object
"""
run_type, warning, efermi, gap, doping_levels = \
BoltztrapAnalyzer.parse_outputtrans(path_dir)
vol = BoltztrapAnalyzer.parse_struct(path_dir)
intrans = BoltztrapAnalyzer.parse_intrans(path_dir)
if run_type == "BOLTZ":
dos, pdos = BoltztrapAnalyzer.parse_transdos(
path_dir, efermi, dos_spin=dos_spin, trim_dos=False)
mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, mu_doping,\
seebeck_doping, cond_doping, kappa_doping, hall_doping,\
carrier_conc = BoltztrapAnalyzer.\
parse_cond_and_hall(path_dir, doping_levels)
return BoltztrapAnalyzer(
gap, mu_steps, cond, seebeck, kappa, hall, pn_doping_levels,
mu_doping, seebeck_doping, cond_doping, kappa_doping,
hall_doping, intrans, dos, pdos, carrier_conc, vol, warning)
elif run_type == "DOS":
dos, pdos = BoltztrapAnalyzer.parse_transdos(
path_dir, efermi, dos_spin=dos_spin, trim_dos=True)
return BoltztrapAnalyzer(gap=gap, dos=dos, dos_partial=pdos,
warning=warning, vol=vol)
elif run_type == "BANDS":
bz_kpoints = np.loadtxt(
os.path.join(path_dir, "boltztrap_band.dat"))[:, -3:]
bz_bands = np.loadtxt(
os.path.join(path_dir, "boltztrap_band.dat"))[:, 1:-6]
return BoltztrapAnalyzer(bz_bands=bz_bands, bz_kpoints=bz_kpoints,
warning=warning, vol=vol)
elif run_type == "FERMI":
# TODO: There is no way to get this shitty ASE crap working.
# If you want to read CUBE files, write a proper IO class in
# pymatgen. I refuse to let pymatgen be bogged down by ASE crap.
"""
from ase.io.cube import read_cube
if os.path.exists(os.path.join(path_dir, 'boltztrap_BZ.cube')):
fs_data = read_cube(open(os.path.join(path_dir, 'boltztrap_BZ.cube'), "rt"), read_data=True)
if os.path.exists(os.path.join(path_dir, 'fort.30')):
fs_data = read_cube(open(os.path.join(path_dir, 'fort.30'), "rt"), read_data=True)
else:
raise BoltztrapError("No data file found for fermi surface")
return BoltztrapAnalyzer(fermi_surface_data=fermi_surface_data)
"""
raise ValueError("FERMI mode parsing is currently unavailable!")
else:
raise ValueError("Run type: {} not recognized!".format(run_type))
def as_dict(self):
results = {'gap': self.gap,
'mu_steps': self.mu_steps,
'scissor': self.intrans["scissor"],
'cond': self._cond,
'seebeck': self._seebeck,
'kappa': self._kappa,
'hall': self._hall,
'doping': self.doping,
'mu_doping': self.mu_doping,
'seebeck_doping': self._seebeck_doping,
'cond_doping': self._cond_doping,
'kappa_doping': self._kappa_doping,
'hall_doping': self._hall_doping,
'dos': self.dos.as_dict(),
'dos_partial': self._dos_partial,
'carrier_conc': self._carrier_conc,
'vol': self.vol,
'warning': self.warning}
return jsanitize(results)
@staticmethod
def from_dict(data):
def _make_float_array(a):
res = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
for i in range(3):
for j in range(3):
res[i][j] = float(a[i][j])
return res
def _make_float_hall(a):
return [i for i in a[:27]]
gap = data.get('gap')
mu_steps = [float(d) for d in data['mu_steps']] if \
'mu_steps' in data else None
cond = {int(d): [_make_float_array(v) for v in data['cond'][d]]
for d in data['cond']} if 'cond' in data else None
seebeck = {int(d): [_make_float_array(v) for v in data['seebeck'][d]]
for d in data['seebeck']} if 'seebeck' in data else None
kappa = {int(d): [_make_float_array(v) for v in data['kappa'][d]]
for d in data['kappa']} if 'kappa' in data else None
hall = {int(d): [_make_float_hall(v) for v in data['hall'][d]]
for d in data['hall']} if 'hall' in data else None
doping = {'p': [float(d) for d in data['doping']['p']],
'n': [float(d) for d in data['doping']['n']]} if \
'doping' in data else None
mu_doping = {'p': {int(d): [
float(v) for v in data['mu_doping']['p'][d]] for d in
data['mu_doping']['p']}, 'n':
{int(d): [float(v) for v in data['mu_doping']['n'][d]]
for d in data['mu_doping']['n']}} if 'mu_doping' in data else None
seebeck_doping = {'p': {int(d): [
_make_float_array(v) for v in data['seebeck_doping']['p'][d]]
for d in data['seebeck_doping']['p']}, 'n':
{int(d): [_make_float_array(v) for v in
data['seebeck_doping']['n'][d]] for d in
data['seebeck_doping']['n']}} if 'seebeck_doping' in data \
else None
cond_doping = {'p': {int(d): [_make_float_array(v)
for v in data['cond_doping']['p'][d]]
for d in data['cond_doping']['p']}, 'n':
{int(d): [_make_float_array(v) for v in
data['cond_doping']['n'][d]] for
d in data['cond_doping']['n']}} if 'cond_doping' in data else None
kappa_doping = {'p': {int(d): [_make_float_array(v)
for v in data['kappa_doping']['p'][d]]
for d in data['kappa_doping']['p']},
'n': {int(d): [_make_float_array(v) for v in
data['kappa_doping']['n'][d]]
for d in data['kappa_doping']['n']}}\
if 'kappa_doping' in data else None
hall_doping = {'p': {int(d): [_make_float_hall(v) for v in
data['hall_doping']['p'][d]] for d in
data['hall_doping']['p']}, 'n':
{int(d): [_make_float_hall(v) for v in
data['hall_doping']['n'][d]] for d in
data['hall_doping']['n']}} if "hall_doping" in data else None
dos = Dos.from_dict(data['dos']) if 'dos' in data else None
dos_partial = data.get('dos_partial')
carrier_conc = data.get('carrier_conc')
vol = data.get('vol')
warning = data.get('warning')
return BoltztrapAnalyzer(gap, mu_steps, cond, seebeck, kappa, hall,
doping, mu_doping, seebeck_doping,
cond_doping, kappa_doping, hall_doping, dos,
dos_partial, carrier_conc, vol, warning)
def compare_sym_bands(bands_obj, bands_ref_obj, nb=None):
"""
Compute the mean of correlation between bzt and vasp bandstructure on
sym line, for all bands and locally (for each branches) the difference
squared (%) if nb is specified.
"""
nkpt = len(bands_obj.kpoints)
if bands_ref_obj.is_spin_polarized:
nbands = min(bands_obj.nb_bands, 2 * bands_ref_obj.nb_bands)
else:
# TODO: why is this needed? Shouldn't pmg take care of nb_bands?
nbands = min(len(bands_obj.bands[Spin.up]),
len(bands_ref_obj.bands[Spin.up]))
# print(nbands)
arr_bands = np.array(bands_obj.bands[Spin.up][:nbands])
# arr_bands_lavg = (arr_bands-np.mean(arr_bands,axis=1).reshape(nbands,1))
if bands_ref_obj.is_spin_polarized:
arr_bands_ref_up = np.array(bands_ref_obj.bands[Spin.up])
arr_bands_ref_dw = np.array(bands_ref_obj.bands[Spin.down])
# print(arr_bands_ref_up.shape)
arr_bands_ref = np.vstack((arr_bands_ref_up, arr_bands_ref_dw))
arr_bands_ref = np.sort(arr_bands_ref, axis=0)[:nbands]
# print(arr_bands_ref.shape)
else:
arr_bands_ref = np.array(bands_ref_obj.bands[Spin.up][:nbands])
# arr_bands_ref_lavg =
# (arr_bands_ref-np.mean(arr_bands_ref,axis=1).reshape(nbands,1))
# err = np.sum((arr_bands_lavg-arr_bands_ref_lavg)**2,axis=1)/nkpt
corr = np.array(
[distance.correlation(arr_bands[idx], arr_bands_ref[idx]) for idx in
range(nbands)])
if type(nb) == int: nb = [nb]
bcheck={}
if max(nb) < nbands:
branches = [[s['start_index'], s['end_index'], s['name']] for s in
bands_ref_obj.branches]
if not bands_obj.is_metal() and not bands_ref_obj.is_metal():
zero_ref = bands_ref_obj.get_vbm()['energy']
zero = bands_obj.get_vbm()['energy']
if not zero:
vbm = bands_ref_obj.get_vbm()['band_index'][Spin.up][-1]
zero = max(arr_bands[vbm])
else:
zero_ref = 0#bands_ref_obj.efermi
zero = 0#bands_obj.efermi
print(zero,zero_ref)
for nbi in nb:
bcheck[nbi]={}
bcheck[nbi]['Dist'] = np.mean(abs(arr_bands[nbi] - zero - arr_bands_ref[nbi] + zero_ref))
bcheck[nbi]['Corr'] = corr[nbi]
for start, end, name in branches:
# werr.append((sum((arr_bands_corr[nb][start:end+1] -
# arr_bands_ref_corr[nb][start:end+1])**2)/(end+1-start)*100,name))
bcheck[nbi][name] = np.mean(abs(arr_bands[nbi][start:end + 1] - zero -
arr_bands_ref[nbi][start:end + 1] + zero_ref))
# abs(arr_bands_ref[nb][start:end + 1])) / (end + 1 - start) * 100
else:
bcheck = "No nb given"
return bcheck
|
aykol/pymatgen
|
pymatgen/electronic_structure/boltztrap.py
|
Python
|
mit
| 92,713
|
[
"ASE",
"BoltzTrap",
"VASP",
"pymatgen"
] |
53ab5e002d57f61cd5f8fd2a24107ef2248aaca5a801f1a3ecc68b6a2b55e24b
|
import subprocess
import ssbio.utils
import os
import os.path as op
from collections import OrderedDict
def run_freesasa(infile, outfile, include_hetatms=True, outdir=None, force_rerun=False):
"""Run freesasa on a PDB file, output using the NACCESS RSA format.
Args:
infile (str): Path to PDB file (only PDB file format is accepted)
outfile (str): Path or filename of output file
include_hetatms (bool): If heteroatoms should be included in the SASA calculations
outdir (str): Path to output file if not specified in outfile
force_rerun (bool): If freesasa should be rerun even if outfile exists
Returns:
str: Path to output SASA file
"""
if not outdir:
outdir = ''
outfile = op.join(outdir, outfile)
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
if op.exists(outfile):
os.remove(outfile)
if include_hetatms:
shell_command = 'freesasa --format=rsa --hetatm {} -o {}'.format(infile, outfile)
else:
shell_command = 'freesasa --format=rsa {} -o {}'.format(infile, outfile)
command = subprocess.Popen(shell_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
out, err = command.communicate()
return outfile
def parse_rsa_data(rsa_outfile, ignore_hets=True):
"""Process a NACCESS or freesasa RSA output file. Adapted from Biopython NACCESS modele.
Args:
rsa_outfile (str): Path to RSA output file
ignore_hets (bool): If HETATMs should be excluded from the final dictionary. This is extremely important
when loading this information into a ChainProp's SeqRecord, since this will throw off the sequence matching.
Returns:
dict: Per-residue dictionary of RSA values
"""
naccess_rel_dict = OrderedDict()
with open(rsa_outfile, 'r') as f:
for line in f:
if line.startswith('RES'):
res_name = line[4:7]
chain_id = line[8]
resseq = int(line[9:13])
icode = line[13]
res_id = (' ', resseq, icode)
all_atoms_abs = line[16:22].strip()
all_atoms_rel = line[23:28].strip()
side_chain_abs = line[29:35].strip()
side_chain_rel = line[36:41].strip()
main_chain_abs = line[42:48].strip()
main_chain_rel = line[49:54].strip()
non_polar_abs = line[55:61].strip()
non_polar_rel = line[62:67].strip()
all_polar_abs = line[68:74].strip()
all_polar_rel = line[75:80].strip()
if all_atoms_rel =='N/A' and main_chain_rel =='N/A' and all_polar_rel =='N/A' and non_polar_rel =='N/A' and side_chain_rel =='N/A' and ignore_hets:
continue
naccess_rel_dict[(chain_id, res_id)] = {
'res_name' : res_name,
'all_atoms_abs' : ssbio.utils.conv_to_float(all_atoms_abs, inf_str='N/A'),
'all_atoms_rel' : ssbio.utils.conv_to_float(all_atoms_rel, inf_str='N/A'),
'side_chain_abs': ssbio.utils.conv_to_float(side_chain_abs, inf_str='N/A'),
'side_chain_rel': ssbio.utils.conv_to_float(side_chain_rel, inf_str='N/A'),
'main_chain_abs': ssbio.utils.conv_to_float(main_chain_abs, inf_str='N/A'),
'main_chain_rel': ssbio.utils.conv_to_float(main_chain_rel, inf_str='N/A'),
'non_polar_abs' : ssbio.utils.conv_to_float(non_polar_abs, inf_str='N/A'),
'non_polar_rel' : ssbio.utils.conv_to_float(non_polar_rel, inf_str='N/A'),
'all_polar_abs' : ssbio.utils.conv_to_float(all_polar_abs, inf_str='N/A'),
'all_polar_rel' : ssbio.utils.conv_to_float(all_polar_rel, inf_str='N/A')}
return naccess_rel_dict
|
SBRG/ssbio
|
ssbio/protein/structure/properties/freesasa.py
|
Python
|
mit
| 4,076
|
[
"Biopython"
] |
8c6ae81d8821227b6b02b7eb2ffb109d0b31c9547fcd0609a01bc988e3a13696
|
## Author: Aziz Khan
## License: GPL v3
## Copyright 2017 Aziz Khan <azez.khan__AT__gmail.com>
from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponse, HttpResponseRedirect
from django.http import JsonResponse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q, Max
from .models import Matrix, MatrixAnnotation, MatrixSpecies, Tax, TaxExt, MatrixData, MatrixProtein, Tffm, Post
from utils import utils
from itertools import chain
from operator import attrgetter
from sets import Set as set
from .forms import InferenceForm, ContactForm, AlignForm, SearchForm
import os, sys, re
from django.core.mail import send_mail, BadHeaderError
from django.urls import reverse
from utils.motif_inferrer.inferrer import motif_infer, write
from jaspar.settings import BASE_DIR, BIN_DIR, TEMP_DIR, TEMP_LIFE, SEND_TO_EMAIL
def index(request):
'''
This loads the homepage
'''
setattr(request, 'view', 'index')
exp_type = MatrixAnnotation.objects.filter(tag='type').values('val').distinct().order_by('val')
tf_family = MatrixAnnotation.objects.filter(tag='family').values('val').distinct().order_by('val')
tf_class = MatrixAnnotation.objects.filter(tag='class').values('val').distinct().order_by('val')
search_form = SearchForm()
context ={
'exp_type': exp_type,
'tf_family': tf_family,
'tf_class': tf_class,
'search_form': search_form,
}
home_version = request.GET.get('home', None)
if home_version == 'v1':
return render(request, 'portal/index_v1.html', context)
else:
return render(request, 'portal/index.html', context)
def search(request):
'''
This function returns the results based the on the search query
'''
query_string = request.GET.get('q', None)
tax_group = request.GET.get('tax_group', None)
collection = request.GET.get('collection', None)
exp_type = request.GET.get('type', None)
tf_class = request.GET.get('class', None)
tf_family = request.GET.get('family', None)
version = request.GET.get('version', None)
#has_tffm = request.GET.get('has_tffm', None)
setattr(request, 'view', 'search')
#Pagination
page = request.GET.get('page', 1)
page_size = request.GET.get('page_size', 10)
if page_size =='' or int(page_size) > 1000:
page_size = 10
queryset = None
if query_string is not None:
#check if user is searching with matrix id, then return to detail page else pass
id_query = query_string.split('.')
if len(id_query) == 2:
if len(Matrix.objects.filter(base_id=id_query[0], version=id_query[1])) > 0:
return redirect('/matrix/'+query_string)
else:
pass
#If collection is not set, set it to CORE
if collection is None or collection =='':
collection = 'CORE'
#Get matrix ids by searching from different models
if collection == 'all':
queryset = Matrix.objects.all().order_by('base_id')
else:
queryset = Matrix.objects.filter(collection=collection.upper()).order_by('base_id')
#if has_tffm:
# base_ids = Tffm.objects.values_list('matrix_base_id',flat=True)
# queryset = queryset.filter(base_id__in=base_ids)
#get matrix ids
matrix_ids = queryset.values_list('id', flat=True)
#filter based on tax group
if tax_group and tax_group !='' and tax_group !='all':
matrix_ids = MatrixAnnotation.objects.values_list('matrix_id', flat=True).filter(
tag='tax_group', val=tax_group.lower(), matrix_id__in=matrix_ids)
#filter based on experiment type
if exp_type and exp_type !='' and exp_type !='all':
matrix_ids = MatrixAnnotation.objects.values_list('matrix_id', flat=True).filter(
tag='type', val=exp_type, matrix_id__in=matrix_ids)
#filter based on tf class
if tf_class and tf_class !='' and tf_class !='all':
matrix_ids = MatrixAnnotation.objects.values_list('matrix_id', flat=True).filter(
tag='class', val=tf_class, matrix_id__in=matrix_ids)
#filter based on tf family
if tf_family and tf_family !='' and tf_family !='all':
matrix_ids = MatrixAnnotation.objects.values_list('matrix_id', flat=True).filter(
tag='family', val=tf_family, matrix_id__in=matrix_ids)
#filter based on query_sting
if query_string !='':
#filter based on matrix model
if collection !='all':
matrix_matrix_ids = list(queryset.values_list('id', flat=True).filter(
Q(name__icontains=query_string) |
Q(base_id__icontains=query_string),
collection=collection, id__in=matrix_ids)
)
else:
matrix_matrix_ids = list(queryset.values_list('id', flat=True).filter(
Q(name__icontains=query_string) |
Q(base_id__icontains=query_string), id__in=matrix_ids))
#filter based of MatrixAnnotation model
matrix_annotation_ids = list(MatrixAnnotation.objects.values_list('matrix_id', flat=True).filter(
val__icontains=query_string,
matrix_id__in=matrix_ids))
#filter based of MatrixProtein model
matrix_protein_ids = list(MatrixProtein.objects.values_list('matrix_id', flat=True).filter(
acc=query_string, matrix_id__in=matrix_ids))
#filter based of MatrixSpecies model
matrix_species_ids = list(MatrixSpecies.objects.values_list('matrix_id', flat=True).filter(
Q(tax_id=query_string) |
Q(tax_id__species__icontains=query_string),
matrix_id__in=matrix_ids)
)
#Make a union of all the matrix ids
matrix_ids = list(
set(matrix_matrix_ids) |
set(matrix_annotation_ids) |
set(matrix_protein_ids) |
set(matrix_species_ids)
)
#filter matrix query based on ids
queryset = queryset.filter(id__in=matrix_ids).distinct()
#If version is set to latest, then get the latest version
if version == 'latest':
Q_statement = Q()
latest_versions = queryset.values('base_id').annotate(latest_version=Max('version')).order_by()
for version in latest_versions:
Q_statement |=(Q(base_id__exact=version['base_id']) & Q(version=version['latest_version']))
queryset = queryset.filter(Q_statement).distinct()
##paginate the queryset
paginator = Paginator(queryset, page_size)
try:
queryset = paginator.page(page)
except PageNotAnInteger:
queryset = paginator.page(1)
except EmptyPage:
queryset = paginator.page(paginator.num_pages)
##create a data dictionary with more information from other tables by looping through the matrix ids
results = _get_matrix_detail_info(queryset)
else:
results = None
#exp_type for the search form drop-down
exp_type = MatrixAnnotation.objects.filter(tag='type').values('val').distinct().order_by('val')
tf_family = MatrixAnnotation.objects.filter(tag='family').values('val').distinct().order_by('val')
tf_class = MatrixAnnotation.objects.filter(tag='class').values('val').distinct().order_by('val')
#import json
#results = json.dumps(list(results))
context = {
'matrices': results,
'exp_type': exp_type,
'pages': queryset,
'tf_family': tf_family,
'tf_class': tf_class,
}
#return render(request, 'portal/search.html', context)
return render(request, 'portal/search_paginator.html', context)
def browse_collection(request, collection):
'''
Browse Collection
'''
setattr(request, 'view', 'collection')
page = request.GET.get('page', 1)
page_size = request.GET.get('page_size', 10)
if page_size =='':
page_size = 10
setattr(request, 'collection', collection.upper())
queryset = Matrix.objects.filter(collection=collection.upper()).order_by('base_id')
paginator = Paginator(queryset, page_size)
try:
queryset = paginator.page(page)
except PageNotAnInteger:
queryset = paginator.page(1)
except EmptyPage:
queryset = paginator.page(paginator.num_pages)
results = _get_matrix_detail_info(queryset)
context = {
'matrices': results,
'pages': queryset,
'collection': collection,
}
return render(request, 'portal/browse_collection.html', context)
def _get_matrix_detail_info(queryset):
'''
Get detail matrix info based on queryset
@input:
queryset {QuerySet}
@return:
results {dictionary}
'''
results = []
for matrix in queryset:
data_dict = {}
data_dict['id'] = matrix.id
data_dict['matrix_id'] = matrix.base_id+'.'+str(matrix.version)
data_dict['base_id'] = matrix.base_id
data_dict['version'] = matrix.version
data_dict['collection'] = matrix.collection
data_dict['name'] = matrix.name
data_dict['logo'] = _get_sequence_logo(matrix.base_id+'.'+str(matrix.version))
#Get annotations for each matrix id
annotation_queryset = MatrixAnnotation.objects.filter(matrix_id=matrix.id)
data_dict.update(_map_annotations(annotation_queryset))
#Get species for each matrix id
species = MatrixSpecies.objects.filter(matrix_id=matrix.id)
#loop through species and get specie details
species_dict = []
if species:
for specie in species:
try:
species_dict.append([
specie.tax_id.tax_id,
specie.tax_id.species,
])
except:
pass
data_dict.update({'species': species_dict})
results.append(data_dict)
return results
def _map_annotations(queryset):
'''
Internal method to map annotations in a structured data
@input:
queryset {QuerySet}
@output:
annotations {dictionary}
'''
annotations = {}
tf_class = []
tf_family = []
tfe_ids = []
pubmed_ids = []
pazar_tf_ids =[]
#loop through annotations and get what needed
for annotation in queryset:
if annotation.tag == 'tax_group':
annotations['tax_group'] = annotation.val
elif annotation.tag == 'type':
annotations['type'] = annotation.val
elif annotation.tag == 'class':
tf_class.append(annotation.val)
elif annotation.tag == 'family':
tf_family.append(annotation.val)
elif annotation.tag == 'tfe_id':
tfe_ids.append(annotation.val)
elif annotation.tag == 'included_models':
annotations['included_models'] = annotation.val.replace(',', ' ')
elif annotation.tag == 'medline':
pubmed_ids.append(annotation.val)
elif annotation.tag == 'pazar_tf_id':
pazar_tf_ids.append(annotation.val)
else:
annotations[annotation.tag] = annotation.val
annotations['class'] = tf_class
annotations['family'] = tf_family
annotations['tfe_ids'] = tfe_ids
annotations['pubmed_ids'] = pubmed_ids
annotations['pazar_tf_ids'] = pazar_tf_ids
return annotations
def _get_sequence_logo(matrix_id, input_type='sites', output_type='png', size='medium'):
'''
Takes matrix ID and returns URL for sequence logo
'''
from Bio import motifs
logo_name = matrix_id+'.'+output_type
#get absolute path
input_file = BASE_DIR+'/download/'+input_type+'/'+matrix_id+'.'+input_type
output_logo = BASE_DIR+'/static/logos/'+logo_name
if os.path.exists(output_logo):
return logo_name
else:
try:
with open(input_file) as handle:
motif = motifs.read(handle, input_type)
motif.weblogo(output_logo, format=output_type, show_errorbars=False, size='large', xaxis_label='Position', yaxis_label=' ')
#By using WebLogo
#cmd = "weblogo -f "+input_file+" -o "+output_logo+" --resolution 300 -x ' ' -y 'position' --format "+output_type+" --show_errorbars False --size "+size
#os.system(cmd)
except:
pass
return logo_name
## This function is not used
def svg_logo(request, matrix_id):
from Bio import motifs
output_type = 'jpg'
input_type = 'pfm'
logo_name = matrix_id+'.'+output_type
#get absolute path
input_file = BASE_DIR+'/download/'+input_type+'/'+matrix_id+'.'+input_type
output_logo = BASE_DIR+'/static/logos/svg/'+logo_name
if os.path.exists(output_logo):
return logo_name
else:
try:
#with open(input_file) as handle:
# motif = motifs.read(handle, input_type)
#motif.weblogo(output_logo, format=output_type, show_errorbars=False, size='large', xaxis_label='Position', yaxis_label=' ')
#By using WebLogo
cmd = "weblogo -f "+input_file+" -o "+output_logo+" --resolution 300 -x ' ' -y 'position' --format "+output_type+" --show_errorbars False --size large"
os.system(cmd)
except:
pass
return HttpResponse(output_logo)
def html_binding_sites(request, matrix_id):
sites_file = BASE_DIR+'/download/sites/'+matrix_id+'.sites'
sites = _get_html_binding_sties(sites_file)
context = {
'sites': sites,
'matrix_id': matrix_id,
}
return render(request, 'portal/html_binding_sites_external.html', context)
def _get_html_binding_sties(sites):
from Bio import SeqIO
split_sites = []
fasta_sequences = SeqIO.parse(open(sites),'fasta')
for fasta in fasta_sequences:
sequence = str(fasta.seq)
site = re.sub('[^A-Z]', '', sequence)
split_site = sequence.split(site)
split_site.append(site)
split_sites.append(split_site)
return split_sites
def matrix_detail(request, matrix_id):
'''
This will show the details of a matrix based on base_id and version
'''
(base_id, version) = utils.split_id(matrix_id)
matrix = Matrix.objects.get(base_id=base_id, version=version)
#annotation = MatrixAnnotation.objects.values_list('tag','val').filter(id=matrix.id)
annotation_queryset = MatrixAnnotation.objects.filter(id=matrix.id)
#map annotations
annotation = _map_annotations(annotation_queryset)
#matrixdata = MatrixData.objects.all().filter(id=matrix.id)
proteins = MatrixProtein.objects.filter(matrix_id=matrix.id)
species = MatrixSpecies.objects.filter(matrix_id=matrix.id)
try:
tffm = Tffm.objects.get(matrix_base_id=base_id, matrix_version=version)
except Tffm.DoesNotExist:
tffm = None
if request.method == 'GET' and 'revcomp' in request.GET:
revcomp_value = request.GET['revcomp']
if revcomp_value is not None and revcomp_value != '':
if revcomp_value == '1':
revcomp = True
else:
revcomp = False
else:
revcomp = False
tfbs_info = utils.tfbs_info_exist(base_id=base_id, version=version)
matrixdata = _get_matrix_data(matrix.id, revcomp=revcomp)
versions = _get_versions_data(base_id)
#Check if cart and remove the current profile if it's in the cart
cart = request.session.get('imatrix_ids', None)
removed = False
if cart:
matrix_id = request.GET.get('remove')
if matrix_id and matrix_id in cart:
cart.remove(matrix_id)
request.session['imatrix_ids'] = cart
removed = matrix_id
queryset = Matrix.objects.filter(id__in=cart)
results = _get_matrix_detail_info(queryset)
request.session['cart'] = results[:5]
context = {
'matrix': matrix,
'proteins': proteins,
'species': species,
'versions': versions,
'tffm': tffm,
}
context.update(tfbs_info)
context.update(matrixdata)
context.update(annotation)
return render(request, 'portal/matrix_detail.html', context)
def profile_inference(request):
'''
This will inference a profile based on a protein sequence
'''
setattr(request, 'view', 'inference')
form_class = InferenceForm
# if request is not post, initialize an empty form
form = form_class(request.POST or None)
if request.method == 'POST':
#create a form instance and populate it with data from the request:
form = InferenceForm(request.POST)
# check whether it's valid:
if form.is_valid():
#process the data in form.cleaned_data as required
sequence = form.cleaned_data['sequence']
#Call motif_infer function, to get inferred motifs for the sequence
inferences = motif_infer(sequence)
matrices = []
for key, values in inferences.items():
for value in values:
data = {
'matrix_id': value[1],
'name': value[0],
'evalue': value[2],
'dbd': value[3]
}
if value[1] != '':
internal_id = utils.get_internal_id(value[1])
#values.append(internal_id)
data['id'] = internal_id
matrices.append(data)
#Create context to pass it to the profile_inference_results template
context = {
'matrices': matrices,
}
return render(request, 'portal/profile_inference_results.html', context)
#if it's not a POST request, show the form
else:
#create form object
form = InferenceForm()
#Create context to pass it to the profile_inference template
context = {
"form": form,
}
return render(request, 'portal/profile_inference.html', context)
def matrix_align(request):
'''
This will take a custom matrix or IUPAC string and align it to CORE (default) or to a selected collection
'''
form_class = AlignForm
# if request is not post, initialize an empty form
form = form_class(request.POST or None)
setattr(request, 'view', 'align')
if request.method == 'POST':
pfm1_input = request.POST.get('matrix')
matrix_type = request.POST.get('matrix_type')
form = AlignForm(request.POST)
# check whether it's valid:
if form.is_valid():
# process the data in form as required
pfm1 = TEMP_DIR+'/'+_get_current_date()+'_matrix_align_'+str(os.getpid())+'.pfm'
#import re
#pattern = re.compile("^([\[\]][ACGT]+)+$")
#if pattern.match(pfm1_input):
if matrix_type == 'jaspar':
pfm1_input = pfm1_input.replace('[',"")
pfm1_input = pfm1_input.replace(']',"")
pfm1_input = pfm1_input.replace('A',"")
pfm1_input = pfm1_input.replace('C',"")
pfm1_input = pfm1_input.replace('G',"")
pfm1_input = pfm1_input.replace('T',"")
elif matrix_type == 'iupac':
#need to be implemented
pfm1_input = pfm1_input
else:
pass
width = float(len(pfm1_input.split('\n')[0].split(' ')))
pfm1_file = open(pfm1, 'w')
pfm1_file.write(pfm1_input)
pfm1_file.close()
collection = form.cleaned_data['collection']
#get matrix data
queryset = Matrix.objects.filter(collection=collection)
version = 'latest'
#If version is set to latest, then get the latest version
if version == 'latest':
Q_statement = Q()
latest_versions = queryset.values('base_id').annotate(latest_version=Max('version')).order_by()
for version in latest_versions:
Q_statement |=(Q(base_id__exact=version['base_id']) & Q(version=version['latest_version']))
queryset = queryset.filter(Q_statement).distinct()
matrices = []
import subprocess
for query in queryset:
#pfm2 = _print_matrix_data(_get_matrix_data(query.id), format='pfm')
pfm2 = BASE_DIR+'/download/all_data/FlatFileDir/'+query.base_id+'.'+str(query.version)+'.pfm'
cmd = BIN_DIR+'/matrixaligner/matrix_aligner '+pfm1+' '+pfm2
#cmd = 'matrix_aligner '+pfm1+' '+pfm2
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
score = 1
try:
line = None
#for line in iter(proc.stdout.readline, ""):
for line in proc.stdout:
pass
if line is not None:
results = line.split('\t')
score = float(results[3])
except:
pass
pfm2_length = MatrixData.objects.filter(id=query.id, row='A').order_by('-col')[0]
if(pfm2_length.col < width):
width = pfm2_length.col
rel_score = 100.0*score/float(width*2)
data = {
'id': query.id,
'matrix_id': query.base_id+'.'+str(query.version),
'name': query.name,
'collection': query.collection,
'score': score,
'percent_score': rel_score
}
matrices.append(data)
context = {
'matrices': matrices,
'matrix': request.POST.get('matrix'),
}
return render(request, 'portal/align_results.html', context)
else:
context = {
"form": form,
}
return render(request, 'portal/align.html', context)
def analysis(request):
'''
This will perform analyis on the selected profiles or profiles in the cart
'''
#delete older temp files
_delete_temp_files(path=TEMP_DIR, days=TEMP_LIFE)
if request.method == 'POST':
#populate data from the request:
cart = request.POST.get('cart_data', None)
if cart:
##Internal matrix ids in cart
imatrix_ids = request.session.get('imatrix_ids')
else:
#Internal matrix ids
imatrix_ids = request.POST.getlist('matrix_id')
#Check the analysis type
if request.POST.get('scan'):
analysis_type = 'Scan'
(results, matrix_ids) = _scan_matrix(imatrix_ids,
request.POST.get('scan_sequence'),
request.POST.get('threshold_score')
)
elif request.POST.get('cart'):
analysis_type = 'Add to cart'
#call the add to cart function
profiles_added =_add_to_cart(request, imatrix_ids)
request.session['message'] = "You have added "+str(profiles_added)+" profile(s) to the cart."
collection_data = request.POST.get('collection_data')
page_number = request.POST.get('page_number')
page_size = request.POST.get('page_size')
inference_data = request.POST.get('inference_data')
profile_data = request.POST.get('profile_data')
if profile_data:
redirect_url = request.META.get('HTTP_REFERER')+'?cart=1'
elif collection_data and not page_number and not page_size:
redirect_url = request.META.get('HTTP_REFERER')+'?cart=1'
elif inference_data:
return redirect('view_cart')
else:
redirect_url = request.META.get('HTTP_REFERER')+"&cart=1"
return HttpResponseRedirect(redirect_url)
elif request.POST.get('permute'):
analysis_type = 'Permute'
#call the permute function
(results, matrix_ids) = _permute_matrix(imatrix_ids,
request.POST.get('permute_type'),
request.POST.get('permute_format')
)
elif request.POST.get('cluster'):
analysis_type = 'Cluster'
(results, matrix_ids) = _cluster_matrix(imatrix_ids,
request.POST.get('tree'),
request.POST.get('align'),
request.POST.get('ma'),
request.POST.get('cc')
)
elif request.POST.get('randomize'):
analysis_type = 'Randomize'
(results, matrix_ids) = _randomize_matrix(imatrix_ids,
request.POST.get('randomize_format'),
request.POST.get('randomize_count')
)
elif request.POST.get('download'):
analysis_type = 'Download'
(results, matrix_ids) = _download_matrix(imatrix_ids,
request.POST.get('download_type','individual'),
request.POST.get('download_format', 'jaspar')
)
else:
results = 'Something went wrong!'
context = {
"results": results,
'matrix_ids': matrix_ids,
'analysis_type': analysis_type,
'temp_life': TEMP_LIFE,
}
return render(request, 'portal/analysis_results.html', context)
else:
results = "Please select models to perform analysis."
context = {
"results": results,
}
return render(request, 'portal/analysis.html', context)
def _add_to_cart(request, imatrix_ids):
'''
Add profiles to card using ajax to download together
'''
#imatrix_ids = request.POST.getlist('matrix_id')
cart = request.session.get('imatrix_ids')
profiles_added = len(imatrix_ids)
if cart:
imatrix_ids.extend(request.session['imatrix_ids'])
request.session['imatrix_ids'] = list(set(imatrix_ids))
queryset = Matrix.objects.filter(id__in=request.session['imatrix_ids']).order_by('name')[:5]
results = _get_matrix_detail_info(queryset)
request.session['cart'] = results
profiles_added = abs(len(request.session['imatrix_ids']) - len(cart))
else:
request.session['imatrix_ids'] = imatrix_ids
queryset = Matrix.objects.filter(id__in=imatrix_ids).order_by('name')[:5]
results = _get_matrix_detail_info(queryset)
request.session['cart'] = results
data ={
'imatrix_ids': request.session['imatrix_ids']
}
return profiles_added
def _scan_matrix(imatrix_ids, fasta_sequence, threshold_score=80):
'''
Scan the matrix models for a fasta sequence
@input:
matrix_ids {list}, fasta_sequence {string}, threshold_score {float}
@return:
results {list}, matrix_ids {list}
'''
from Bio import motifs
import math
from operator import itemgetter
import Bio.SeqIO
from Bio.Alphabet import generic_dna
from Bio.Alphabet.IUPAC import IUPACUnambiguousDNA as unambiguousDNA
threshold_score = float(threshold_score)/100
matrix_ids = []
results = []
fasta_file = _get_current_date()+'_scan_fasta_file_'+str(os.getpid())+'.txt'
input_file = open(TEMP_DIR+'/'+fasta_file, 'w+')
input_file.write(fasta_sequence)
input_file.close()
for imatrix_id in imatrix_ids:
(matrix_id, matrix_name) = _get_matrix_id_name(imatrix_id)
matrix_ids.append(matrix_id)
with open(BASE_DIR+'/download/all_data/FlatFileDir/'+matrix_id+'.pfm') as handle:
motif = motifs.read(handle, "pfm")
motif.pseudocounts = motifs.jaspar.calculate_pseudocounts(motif)
pssm = motif.pssm
max_score = pssm.max
min_score = pssm.min
abs_score_threshold = (max_score - min_score) * threshold_score + min_score
for record in Bio.SeqIO.parse(TEMP_DIR+'/'+fasta_file, "fasta", generic_dna):
record.seq.alphabet = unambiguousDNA()
for position, score in pssm.search(record.seq, threshold=abs_score_threshold):
if not math.isnan(score):
rel_score = (score - min_score) / (max_score - min_score)
if rel_score:
position_max = position
strand = "+"
if position_max < 0:
strand = "-"
position_max = position + len(record.seq)
start_position = position_max + 1
end_position = position_max + pssm.length
sequence = record.seq[start_position-1:end_position]
if strand == "-":
sequence = record.seq[start_position-1:end_position].reverse_complement()
results.append({
'matrix_id': matrix_id,
'name': matrix_name,
'position': position,
'seq_id': record.id,
'start': start_position,
'end': end_position,
'strand': strand,
'score': score,
'rel_score': rel_score,
'sequence': sequence
})
return results, matrix_ids
def _permute_matrix(imatrix_ids, permute_type='intra', permute_format='pfm'):
'''
Permute the selected matrix models's columns inter or intra matrcies
@input:
matrix_ids {list}, permute_type{string}, permute_format {string}
@return:
permuted_file_name {string}, matrix_ids {list}
'''
permuted_file_name = _get_current_date()+'_permuted_matrices_'+str(os.getpid())+'.txt'
matrix_ids = []
input_file = open(TEMP_DIR+'/'+permuted_file_name, 'w')
if permute_type == "intra":
for imatrix_id in imatrix_ids:
(matrix_id, matrix_name) = _get_matrix_id_name(imatrix_id)
input_file.write(_print_matrix_data(_get_matrix_data(imatrix_id, permute=True), matrix_id, matrix_name, format=permute_format))
matrix_ids.append(matrix_id)
input_file.close()
return permuted_file_name, matrix_ids
elif permute_type == "inter":
list_A, list_C, list_G, list_T = [], [], [], []
sizes = []
matrix_names = []
for imatrix_id in imatrix_ids:
(matrix_id, matrix_name) = _get_matrix_id_name(imatrix_id)
data_dic = _get_matrix_data(imatrix_id)
list_A.extend(data_dic['A'])
list_C.extend(data_dic['C'])
list_G.extend(data_dic['G'])
list_T.extend(data_dic['T'])
sizes.append(len(data_dic['A']))
matrix_names.append(matrix_name)
matrix_ids.append(matrix_id)
#Permute columns between matrices
import numpy as np
pfm_array = np.array([ list_A, list_C, list_G, list_T ])
pfm_array = pfm_array[:, np.random.permutation(pfm_array.shape[1])]
#update data_dic with permuted columns
i, j, k = 0, 0, 0
for matrix_id in matrix_ids:
k = k+sizes[i]-1
data_dic = {
'A': pfm_array[0][j:k],
'C': pfm_array[1][j:k],
'G': pfm_array[2][j:k],
'T': pfm_array[3][j:k],
}
input_file.write(_print_matrix_data(data_dic, matrix_id, matrix_names[i], format=permute_format))
j = k+1
i = i+1
input_file.close()
return permuted_file_name, matrix_ids
else:
raise ValueError("Unknown Permute Type %s" % permute_type)
def _randomize_matrix(imatrix_ids, randomize_format='raw', randomize_count=200):
'''
Randomize the matrix models
@input:
matrix_ids {list}
@return:
randomized_file_name {string}, matrix_ids {list}
'''
randomized_file_name = _get_current_date()+'_randomized_matrices_'+str(os.getpid())+'.txt'
matrix_file = _get_current_date()+'_randomized_matrixfile_'+str(os.getpid())+'.txt'
matrix_ids = []
input_file = open(TEMP_DIR+'/'+matrix_file, 'w+')
for imatrix_id in imatrix_ids:
(matrix_id, matrix_name) = _get_matrix_id_name(imatrix_id)
input_file.write(_print_matrix_data(_get_matrix_data(imatrix_id), matrix_id, matrix_name, 'jaspar'))
matrix_ids.append(matrix_id)
input_file.close()
#Run PWMrandom
#PWMrandom <inputmatrix> <outputmatrix> <nmatrix> <width>
pwm_path = BIN_DIR+'/PWMrandomization/'
cmd = 'cd '+pwm_path+' && ./PWMrandom '+matrix_file+' '+TEMP_DIR+'/'+randomized_file_name+' '+str(randomize_count)
#cmd = 'cd '+pwm_path+' && ./PWMrandom '+matrix_file+' '+randomized_file_name+' '+str(randomize_count)+' > '+randomized_file_name
#cmd = 'PWMrandom '+matrix_file+' '+randomized_file_name+' '+str(randomize_count)+' > '+randomized_file_name
os.system(cmd)
try:
os.remove(matrix_file)
except:
pass
return randomized_file_name, matrix_ids
def _cluster_matrix(imatrix_ids, tree="UPGMA", align="SWA", ma="PPA", cc="PCC"):
'''
Cluster the matrix models using STAMP
cc: Column comparison metric
ma: Multiple alignment method
tree: Tree-building method
align: Alignment method
@input:
matrix_ids {list}, cc
@return:
results {string}
'''
matrix_file = _get_current_date()+'_clustered_matrixfile_'+str(os.getpid())+'.txt'
matrix_ids = []
input_file = open(TEMP_DIR+'/'+matrix_file, 'w')
for imatrix_id in imatrix_ids:
(matrix_id, matrix_name) = _get_matrix_id_name(imatrix_id)
input_file.write(_print_matrix_data(_get_matrix_data(imatrix_id), matrix_id, matrix_name, 'transfac'))
matrix_ids.append(matrix_id)
input_file.close()
score_file = BIN_DIR+'/stamp/ScoreDists/JaspRand_'+cc+'_'+align+'.scores'
stamp_output = 'stamp_output_'+str(os.getpid())
#Prepare STAMP srguments
#-tf [input file] - Input dataset of motifs in TRANSFAC format [required!]
#-sd [score file] - Input file with random score distributions [required!]
#-cc: Column comparison metric
#-ma - Multiple alignment method
#-tree - Tree-building method
#-align - Alignment method
arguments = '-tf '+TEMP_DIR+'/'+matrix_file+' -sd '+score_file+' -tree '+tree+' -align '+align+' -ma '+ma+' -cc '+cc+' -out '+TEMP_DIR+'/'+stamp_output
cmd = BIN_DIR+'/stamp/stamp '+arguments+' > '+TEMP_DIR+'/'+stamp_output+'.txt'
#cmd = 'stamp '+arguments+' > '+TEMP_DIR+'/'+stamp_output+'.txt'
#Run STAMP
try:
os.system(cmd)
except:
pass
#delete the temp input file
try:
os.remove(matrix_file)
except:
pass
import zipfile
from os.path import basename
#Create a zip file of STAMP output files or play with the STAMP output files.
file_name = _get_current_date()+'_STAMP_output_'+str(os.getpid())+'.zip'
stampzip = zipfile.ZipFile(TEMP_DIR+'/'+file_name, 'w', zipfile.ZIP_DEFLATED)
try:
input_file = TEMP_DIR+'/'+stamp_output+'.txt'
stampzip.write(input_file, basename(input_file))
input_file = TEMP_DIR+'/'+stamp_output+'.tree'
stampzip.write(input_file, basename(input_file))
input_file = TEMP_DIR+'/'+stamp_output+'FBP.txt'
stampzip.write(input_file, basename(input_file))
#input_file = TEMP_DIR+'/'+stamp_output+'_match_pairs.txt'
#stampzip.write(input_file, basename(input_file))
#input_file = TEMP_DIR+'/'+stamp_output+'_matched.transfac'
#stampzip.write(input_file, basename(input_file))
except:
pass
stampzip.close()
return file_name, matrix_ids
def _download_matrix(imatrix_ids, download_type='individual', download_format='pfm'):
'''
Download the matrix models
@input:
matrix_ids {list}
@return:
file_name {string}, matrix_ids {list}
'''
results = []
matrix_ids = []
if download_type == "individual":
import zipfile
from os.path import basename
#Create a zip file
file_name = _get_current_date()+'_JASPAR2018_individual_matrices_'+str(os.getpid())+'_'+download_format+'.zip'
matriceszip = zipfile.ZipFile(TEMP_DIR+'/'+file_name, 'w', zipfile.ZIP_DEFLATED)
for imatrix_id in imatrix_ids:
(matrix_id, matrix_name) = _get_matrix_id_name(imatrix_id)
matrix_ids.append(matrix_id)
input_file_path = TEMP_DIR+'/'+matrix_id+'.'+download_format
input_file = open(input_file_path, 'w')
if download_format == 'meme':
input_file.write(_write_meme_header())
input_file.write(_print_matrix_data(_get_matrix_data(imatrix_id), matrix_id, matrix_name, format=download_format))
input_file.close()
matriceszip.write(input_file_path, basename(input_file_path))
matriceszip.close()
return file_name, matrix_ids
elif download_type == "combined":
file_name = _get_current_date()+'_JASPAR2018_combined_matrices_'+str(os.getpid())+'_'+download_format+'.txt'
download_file = open(TEMP_DIR+'/'+file_name, 'w')
if download_format == 'meme':
download_file.write(_write_meme_header())
for imatrix_id in imatrix_ids:
(matrix_id, matrix_name) = _get_matrix_id_name(imatrix_id)
matrix_ids.append(matrix_id)
#with open(BASE_DIR+'/download/'+download_format+'/'+matrix_id+'.'+download_format) as input_file:
download_file.write(_print_matrix_data(_get_matrix_data(imatrix_id), matrix_id, matrix_name, format=download_format))
download_file.close()
return file_name, matrix_ids
else:
raise ValueError("Unknown Download Type %s" % download_type)
def _write_meme_header(version='4'):
letters = "ACGT"
lines = []
line = "MEME version "+version+"\n\n"
lines.append(line)
line = "ALPHABET= {0}\n\n".format(letters)
lines.append(line)
line = "strands: {0} {1}\n\n".format('+','-')
lines.append(line)
line = "Background letter frequencies\n"
lines.append(line)
line = "A {0} C {1} G {2} T {3}\n\n".format('0.25','0.25','0.25','0.25')
lines.append(line)
return "".join(lines)
def _delete_temp_files(path=TEMP_DIR, days=TEMP_LIFE):
'''
Delete older temp files based on TEMP_DIR and TEMP_LIFE.
Please change the number of days in the jaspar.settings files
@input
path{string}, days{integer}
'''
import time
current_time = time.time()
for f in os.listdir(path):
f = os.path.join(path, f)
if os.stat(f).st_mtime < current_time - days * 86400:
os.remove(f)
def _get_current_date():
import datetime
now = datetime.datetime.now()
return str(str(now.year)+str(now.month).zfill(2)+str(now.day).zfill(2))
def _get_versions_data(base_id):
'''
Return all the versions for a given base_id
'''
queryset = Matrix.objects.filter(base_id=base_id)
results = _get_matrix_detail_info(queryset)
return results
def _get_matrix_id_name(id):
'''
Get matrix id and named from internal matrix id
'''
queryset = Matrix.objects.get(pk=id)
return (queryset.base_id+'.'+str(queryset.version), queryset.name)
def _get_matrix_data(id, revcomp=False, permute=False):
'''
Takes internal matrix id and returns matrix data as a dictionary object
'''
data_dic = {}
for base in ('A','C','G','T'):
data_dic[base] = MatrixData.objects.values_list('val', flat=True).filter(id=id, row=base).order_by('col')
#If reverse complement is true, compute revcomp
if revcomp == True:
revcomp_data_dic = {}
revcomp_data_dic['A'] = data_dic['T'].reverse()
revcomp_data_dic['C'] = data_dic['G'].reverse()
revcomp_data_dic['G'] = data_dic['C'].reverse()
revcomp_data_dic['T'] = data_dic['A'].reverse()
data_dic = revcomp_data_dic
#If permute is true, permute the matrix
if permute == True:
import numpy as np
pfm_array = np.array([ data_dic['A'], data_dic['C'], data_dic['G'], data_dic['T'] ])
pfm_array = pfm_array[:, np.random.permutation(pfm_array.shape[1])]
#update data_dic with permuted columns
(data_dic['A'], data_dic['C'], data_dic['G'], data_dic['T']) = (pfm_array[0], pfm_array[1], pfm_array[2], pfm_array[3])
return data_dic
def _print_matrix_data(matrix_data, matrix_id=None, matrix_name=None, format='pfm'):
"""
Return the representation of motifs in "pfm" "jaspar" or trnasfac format.
Addopted from BioPython jaspar module.
"""
letters = "ACGT"
length = len(matrix_data[letters[0]])
lines = []
if format == 'pfm':
if matrix_id and matrix_name:
line = ">{0} {1}\n".format(matrix_id, matrix_name)
lines.append(line)
for letter in letters:
terms = ["{0:6.2f}".format(float(value)) for value in matrix_data[letter]]
line = "{0}\n".format(" ".join(terms))
lines.append(line)
elif format == 'flate_pfm':
for letter in letters:
terms = ["{0:6.2f}".format(float(value)) for value in matrix_data[letter]]
line = "{0}\n".format(" ".join(terms))
lines.append(line)
elif format == 'jaspar':
line = ">{0}\t{1}\n".format(matrix_id, matrix_name)
lines.append(line)
for letter in letters:
terms = ["{0:6.0f}".format(float(value)) for value in matrix_data[letter]]
line = "{0} [{1} ]\n".format(letter, " ".join(terms))
lines.append(line)
elif format == 'transfac':
line = "AC {0}\n".format(matrix_id)
lines.append(line)
line = "XX\n"
lines.append(line)
line = "ID {0}\n".format(matrix_name)
lines.append(line)
line = "XX\n"
lines.append(line)
line = "DE {0} {1} {2}\n".format(matrix_id, matrix_name, '; From JASPAR 2018')
lines.append(line)
line = "PO\t{0}\t{1}\t{2}\t{3}\n".format('A','C','G','T')
lines.append(line)
for i in range(len(matrix_data[letters[0]])):
line = "{0}\t{1}\t{2}\t{3}\t{4}\n".format(str(i+1).zfill(2), matrix_data['A'][i], matrix_data['C'][i], matrix_data['G'][i], matrix_data['T'][i])
lines.append(line)
line = "XX\n"
lines.append(line)
elif format == 'meme':
line = "MOTIF {0} {1}\n".format(matrix_id, matrix_name)
lines.append(line)
nsites = float(matrix_data['A'][0]) + float(matrix_data['C'][0]) + float(matrix_data['G'][0]) + float(matrix_data['T'][0])
line = "letter-probability matrix: alength= {0} w= {1} nsites= {2} E= {3}\n".format(4,length,int(nsites),'0')
lines.append(line)
for i in range(len(matrix_data[letters[0]])):
line = " {0:6.6f} {1:6.6f} {2:6.6f} {3:6.6f}\n".format(float(matrix_data['A'][i])/nsites, float(matrix_data['C'][i])/nsites, float(matrix_data['G'][i])/nsites, float(matrix_data['T'][i])/nsites)
lines.append(line)
line = "URL {0}\n\n".format('http://jaspar.genereg.net/matrix/'+matrix_id)
lines.append(line)
else:
raise ValueError("Unknown matrix format %s" % format)
# Finished; glue the lines together
text = "".join(lines)
return text
def _get_latest_version(based_id):
'''
Get latest version of a matrix model
'''
matrix = Matrix.objects.order_by('version')[0:1].get(base_id=base_id)
return matrix.version
def _get_pssm(matrix_id):
from Bio import motifs
with open(BASE_DIR+'/download/all_data/FlatFileDir/'+matrix_id+'.pfm') as handle:
motif = motifs.read(handle, "pfm")
motif.pseudocounts = motifs.jaspar.calculate_pseudocounts(motif)
pssm = motif.pssm
return pssm
def _parse_pubmed_id(pubmed_id):
'''
It takes pubmed_id and returns citation as string
'''
import requests
url = "https://www.ncbi.nlm.nih.gov/pmc/utils/ctxp?ids="+str(pubmed_id)+"&report=citeproc"
r = requests.get(url)
data = r.json()
citation = data['author'][0]['family']+" "+data['author'][0]['given'][0]+". et al. "+str(data['issued']['date-parts'][0][0])+", "+data['container-title-short']
return citation
def view_cart(request):
'''
This will show the cart page with list of profiles
'''
setattr(request, 'view', 'cart')
cart = request.session.get('imatrix_ids')
removed = False
if cart:
matrix_id = request.GET.get('remove')
if matrix_id and matrix_id in cart:
cart.remove(matrix_id)
request.session['imatrix_ids'] = cart
removed = matrix_id
queryset = Matrix.objects.filter(id__in=cart)
results = _get_matrix_detail_info(queryset)
request.session['cart'] = results[:5]
else:
results = None
return render(request, 'portal/cart.html', {'matrices': results, 'removed': removed})
def empty_cart(request):
'''
This will empty the cart
'''
setattr(request, 'view', 'cart')
if request.session['imatrix_ids']:
del request.session['imatrix_ids']
if request.session['cart']:
del request.session['cart']
return render(request, 'portal/cart.html', {'matrices': None})
def matrix_clustering(request):
setattr(request, 'view', 'cluster')
return render(request, 'portal/clustering_home.html')
def radial_tree(request, tax_group):
setattr(request, 'view', 'cluster')
return render(request, 'portal/clustering_detail.html', {'tax_group': tax_group})
def genome_tracks(request):
setattr(request, 'view', 'tracks')
return render(request, 'portal/genome_tracks.html')
def documentation(request):
'''
This will show the documentation page
'''
setattr(request, 'view', 'docs')
return render(request, 'portal/documentation.html')
def download_data(request):
'''
This will show the downloads page
'''
setattr(request, 'view', 'downloads')
tax_groups = ['Vertebrates', 'Plants','Insects','Nematodes','Fungi','Urochordates']
collections = ['CORE', 'CNE', 'PHYLOFACTS','SPLICE','POLII','FAM','PBM','PBM_HOMEO','PBM_HLH']
return render(request, 'portal/downloads.html', {'tax_groups': tax_groups, 'collections':collections})
def internal_download_data(request):
'''
This function prepares the data for downloads page
To run this function enable the below line in urls.py and open the ROOT/downloads-internal/ URL in the browser.
#url(r'^downloads-internal/$', views.internal_download_data, name='internal_download_data'),
'''
results = []
matrix_ids = []
matrix_version = request.GET.get('version', 'all')
#download_format = request.GET.get('format', 'jaspar')
download_formats = ['jaspar','meme','transfac']
#download_formats = ['meme']
collections = ['CORE', 'CNE', 'PHYLOFACTS','SPLICE','POLII','FAM','PBM','PBM_HOMEO','PBM_HLH']
#collections = ['CORE']
if matrix_version == 'latest':
redundancy = 'non-redundant'
else:
redundancy = 'redundant'
for collection in collections:
queryset = Matrix.objects.filter(collection=collection.upper())
#If version is set to latest, then get the latest version
if matrix_version == 'latest':
Q_statement = Q()
latest_versions = queryset.values('base_id').annotate(latest_version=Max('version')).order_by()
for version in latest_versions:
Q_statement |=(Q(base_id__exact=version['base_id']) & Q(version=version['latest_version']))
queryset = queryset.filter(Q_statement).distinct()
imatrix_ids = queryset.values_list('id', flat=True)
import zipfile
from os.path import basename
#Create a zip file
for download_format in download_formats:
zip_file_name = 'JASPAR2018_'+str(collection)+'_'+redundancy+'_pfms_'+str(download_format)+'.zip'
txt_file_name = 'JASPAR2018_'+str(collection)+'_'+redundancy+'_pfms_'+str(download_format)+'.txt'
matriceszip = zipfile.ZipFile(TEMP_DIR+'/'+zip_file_name, 'w', zipfile.ZIP_DEFLATED)
download_file = open(TEMP_DIR+'/'+txt_file_name, 'w')
if download_format == 'meme':
download_file.write(_write_meme_header())
for imatrix_id in imatrix_ids:
(matrix_id, matrix_name) = _get_matrix_id_name(imatrix_id)
matrix_ids.append(matrix_id)
input_file_path = TEMP_DIR+'/'+matrix_id+'.'+download_format
input_file = open(input_file_path, 'w')
if download_format == 'meme':
input_file.write(_write_meme_header())
input_file.write(_print_matrix_data(_get_matrix_data(imatrix_id), matrix_id, matrix_name, format=download_format))
input_file.close()
matriceszip.write(input_file_path, basename(input_file_path))
download_file.write(_print_matrix_data(_get_matrix_data(imatrix_id), matrix_id, matrix_name, format=download_format))
matriceszip.close()
download_file.close()
tax_groups = ['vertebrates', 'plants','insects','nematodes','fungi','urochordates']
#tax_groups = ['plants']
for tax_group in tax_groups:
#collections = ['CORE']
queryset = Matrix.objects.filter(collection='CORE')
#get matrix ids
imatrix_ids = queryset.values_list('id', flat=True)
#filter based on tax group
if tax_group and tax_group !='' and tax_group !='all':
imatrix_ids = MatrixAnnotation.objects.values_list('matrix_id', flat=True).filter(
tag='tax_group', val=tax_group.lower(), matrix_id__in=imatrix_ids)
#filter matrix query based on ids
queryset = queryset.filter(id__in=imatrix_ids).distinct()
#If version is set to latest, then get the latest version
if matrix_version == 'latest':
Q_statement = Q()
latest_versions = queryset.values('base_id').annotate(latest_version=Max('version')).order_by()
for version in latest_versions:
Q_statement |=(Q(base_id__exact=version['base_id']) & Q(version=version['latest_version']))
queryset = queryset.filter(Q_statement).distinct()
imatrix_ids = queryset.values_list('id', flat=True)
import zipfile
from os.path import basename
#Create a zip file
for download_format in download_formats:
zip_file_name = 'JASPAR2018_'+collection+'_'+tax_group+'_'+redundancy+'_pfms_'+download_format+'.zip'
txt_file_name = 'JASPAR2018_'+collection+'_'+tax_group+'_'+redundancy+'_pfms_'+download_format+'.txt'
matriceszip = zipfile.ZipFile(TEMP_DIR+'/'+zip_file_name, 'w', zipfile.ZIP_DEFLATED)
download_file = open(TEMP_DIR+'/'+txt_file_name, 'w')
if download_format == 'meme':
download_file.write(_write_meme_header())
for imatrix_id in imatrix_ids:
(matrix_id, matrix_name) = _get_matrix_id_name(imatrix_id)
matrix_ids.append(matrix_id)
input_file_path = TEMP_DIR+'/'+matrix_id+'.'+download_format
input_file = open(input_file_path, 'w')
if download_format == 'meme':
input_file.write(_write_meme_header())
input_file.write(_print_matrix_data(_get_matrix_data(imatrix_id), matrix_id, matrix_name, format=download_format))
input_file.close()
matriceszip.write(input_file_path, basename(input_file_path))
download_file.write(_print_matrix_data(_get_matrix_data(imatrix_id), matrix_id, matrix_name, format=download_format))
matriceszip.close()
download_file.close()
return render(request, 'portal/index.html')
def api_documentation(request):
'''
This will show the api documentation page
'''
setattr(request, 'view', 'api-home')
#setattr(request, 'view', 'apidocs')
#return render(request, 'portal/api_documentation.html')
return render(request, 'rest_framework/api_home.html')
def tools(request):
'''
This will show the tools page
'''
setattr(request, 'view', 'tools')
return render(request, 'portal/tools.html')
def contact_us(request):
'''
Contact us and feednback page to send email
'''
setattr(request, 'view', 'contact_us')
from django.core.mail import EmailMessage
if request.method == 'GET':
form = ContactForm()
else:
form = ContactForm(request.POST)
if form.is_valid():
subject = form.cleaned_data['subject']
from_email = form.cleaned_data['from_email']
#from_name = form.cleaned_data['from_name']
message = form.cleaned_data['message']
email = EmailMessage(
subject,
'From: '+from_email+'\n\nMessage: '+message,
from_email,
SEND_TO_EMAIL,
reply_to=[from_email],
)
try:
#send_mail(subject, message, from_email, SEND_TO_EMAIL)
email.send()
except BadHeaderError:
context ={'message': 'Invalid header found. Your message did not went through.', 'message_type': 'error', }
return render(request, 'portal/contact_us.html', context)
context = {'message': 'Thank you! Your message has been sent successfully. We will get back to you shortly.', 'message_type': 'success'}
return render(request, 'portal/contact_us.html', context)
return render(request, 'portal/contact_us.html', {'form': form})
def faq(request):
'''
This will show the FAQ page
'''
setattr(request, 'view', 'faq')
return render(request, 'portal/faq.html')
def changelog(request):
'''
This will show the changelog page
'''
setattr(request, 'view', 'changelog')
return render(request, 'portal/changelog.html')
def tour_video(request):
'''
This will show the tour video page
'''
setattr(request, 'view', 'tour')
return render(request, 'portal/tour_video.html')
def about(request):
'''
This will show the about page
'''
setattr(request, 'view', 'about')
matrix_ids = Matrix.objects.filter(collection="CORE").values_list('id', flat=True).distinct()
#count the number of profiles in each taxonomic group
vertibrates = MatrixAnnotation.objects.filter(tag='tax_group', val='vertebrates', matrix_id__in=matrix_ids).values_list('matrix_id', flat=True).distinct().count()
plants = MatrixAnnotation.objects.filter(tag='tax_group', val='plants', matrix_id__in=matrix_ids).values_list('matrix_id', flat=True).distinct().count()
insects = MatrixAnnotation.objects.filter(tag='tax_group', val='insects', matrix_id__in=matrix_ids).values_list('matrix_id', flat=True).distinct().count()
nematodes = MatrixAnnotation.objects.filter(tag='tax_group', val='nematodes', matrix_id__in=matrix_ids).values_list('matrix_id', flat=True).distinct().count()
fungi = MatrixAnnotation.objects.filter(tag='tax_group', val='fungi', matrix_id__in=matrix_ids).values_list('matrix_id', flat=True).distinct().count()
urochordates = MatrixAnnotation.objects.filter(tag='tax_group', val='urochordates', matrix_id__in=matrix_ids).values_list('matrix_id', flat=True).distinct().count()
context = {
'vertibrates': vertibrates,
'plants': plants,
'insects': insects,
'nematodes': nematodes,
'fungi': fungi,
'urochordates': urochordates,
}
return render(request, 'portal/about.html', context)
def post_details(request, year, month, day, slug):
'''
Show individual news/update
'''
post = get_object_or_404(Post, slug=slug)
posts = Post.objects.all().order_by('-date')[:5]
return render(request, 'portal/blog_single.html', {
'post': post,
'posts': posts,
})
def post_list(request):
'''
List all news/updates
'''
posts = Post.objects.all().order_by('-date')
return render(request, 'portal/blog.html', {
'posts': posts,
})
def matrix_versions(request, base_id):
'''
This will show the details of a matrix versions on base_id
'''
setattr(request, 'view', 'versions')
queryset = Matrix.objects.filter(base_id=base_id)
results = _get_matrix_detail_info(queryset)
return render(request, 'portal/search.html', {'matrices': results})
def profile_versions(request):
'''
This will show the profile versions page
'''
setattr(request, 'view', 'profile-versions')
return render(request, 'portal/profile_versions.html')
def url_redirection(request):
'''
Redirect the old URL request to new URL structure
'''
matrix_id = request.GET.get('ID', None)
collection = request.GET.get('db', None)
tax_group = request.GET.get('tax_group', None)
url_path = request.get_full_path()
if matrix_id:
return redirect('/matrix/'+matrix_id)
elif collection and tax_group:
return redirect('/search/?q=&collection='+collection.upper()+'&tax_group='+tax_group)
elif url_path == '/html/DOWNLOAD':
return redirect('/downloads')
else:
#return render(request, '404.html', status=404)
return redirect('index')
def page_not_found(request):
'''
Return custome 404 error page
'''
return render(request, '404.html', status=404)
def server_error(request):
'''
Return custome 500 error page
'''
return render(request, '500.html', status=500)
|
asntech/jaspar
|
portal/views.py
|
Python
|
bsd-3-clause
| 52,326
|
[
"Biopython"
] |
b69ce33171147b4feb307af9c85c682a84c093c5b4c5bae8cb18191941cada94
|
import csv
import datetime
import json
import mock
import os
import re
import shutil
import tempfile
import urllib
import pyquery
from cStringIO import StringIO
from nose.tools import eq_, ok_, assert_raises
from nose.plugins.skip import SkipTest
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.models import (
User,
AnonymousUser,
Group,
Permission
)
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.contrib.contenttypes.models import ContentType
from crashstats.base.tests.testbase import DjangoTestCase
from crashstats.crashstats import models
from crashstats.crashstats.management import PERMISSIONS
from .test_models import Response
SAMPLE_STATUS = {
"breakpad_revision": "1035",
"hits": [
{
"date_oldest_job_queued": "2012-09-28T20:39:33+00:00",
"date_recently_completed": "2012-09-28T20:40:00+00:00",
"processors_count": 1,
"avg_wait_sec": 16.407,
"waiting_job_count": 56,
"date_created": "2012-09-28T20:40:02+00:00",
"id": 410655,
"avg_process_sec": 0.914149
},
{
"date_oldest_job_queued": "2012-09-28T20:34:33+00:00",
"date_recently_completed": "2012-09-28T20:35:00+00:00",
"processors_count": 1,
"avg_wait_sec": 13.8293,
"waiting_job_count": 48,
"date_created": "2012-09-28T20:35:01+00:00",
"id": 410654,
"avg_process_sec": 1.24177
},
{
"date_oldest_job_queued": "2012-09-28T20:29:32+00:00",
"date_recently_completed": "2012-09-28T20:30:01+00:00",
"processors_count": 1,
"avg_wait_sec": 14.8803,
"waiting_job_count": 1,
"date_created": "2012-09-28T20:30:01+00:00",
"id": 410653,
"avg_process_sec": 1.19637
}
],
"total": 12,
"socorro_revision": "017d7b3f7042ce76bc80949ae55b41d1e915ab62",
"schema_revision": "schema_12345"
}
SAMPLE_META = """ {
"InstallTime": "1339289895",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "%s",
"Vendor": "Mozilla",
"URL": "%s"
} """
SAMPLE_UNREDACTED = """ {
"client_crash_date": "2012-06-11T06:08:45",
"dump": "%s",
"signature": "FakeSignature1",
"user_comments": "%s",
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": null,
"distributor_version": null,
"truncated": true,
"process_type": null,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": true,
"json_dump": {
"status": "OK",
"sensitive": {
"exploitability": "high"
}
}
} """
BUG_STATUS = """ {
"hits": [{"id": "222222",
"signature": "FakeSignature1"},
{"id": "333333",
"signature": "FakeSignature1"},
{"id": "444444",
"signature": "Other FakeSignature"}
]
} """
class RobotsTestViews(DjangoTestCase):
@override_settings(ENGAGE_ROBOTS=True)
def test_robots_txt(self):
url = '/robots.txt'
response = self.client.get(url)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'text/plain')
ok_('Allow: /' in response.content)
@override_settings(ENGAGE_ROBOTS=False)
def test_robots_txt_disengage(self):
url = '/robots.txt'
response = self.client.get(url)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'text/plain')
ok_('Disallow: /' in response.content)
class FaviconTestViews(DjangoTestCase):
def test_favicon(self):
tmp_static_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmp_static_root)
favicon_dir = os.path.join(tmp_static_root, 'img')
os.makedirs(favicon_dir)
favicon_path = os.path.join(favicon_dir, 'favicon.ico')
with open(favicon_path, 'wb') as icon:
icon.write('totally fake')
with self.settings(STATIC_ROOT=tmp_static_root):
response = self.client.get('/favicon.ico')
eq_(response.status_code, 200)
ok_('image/x-icon' in response['Content-Type'])
class BaseTestViews(DjangoTestCase):
@mock.patch('requests.get')
def setUp(self, rget):
super(BaseTestViews, self).setUp()
# checking settings.CACHES isn't as safe as `cache.__class__`
if 'LocMemCache' not in cache.__class__.__name__:
raise ImproperlyConfigured(
'The tests requires that you use LocMemCache when running'
)
# we do this here so that the current/versions thing
# is cached since that's going to be called later
# in every view more or less
def mocked_get(url, params, **options):
now = datetime.datetime.utcnow()
yesterday = now - datetime.timedelta(days=1)
if 'products/' in url:
return Response("""
{"products": [
"WaterWolf",
"NightTrain",
"SeaMonkey",
"LandCrab"
],
"hits": {
"WaterWolf": [
{"product": "WaterWolf",
"throttle": "100.00",
"end_date": "%(end_date)s",
"start_date": "2012-03-08",
"featured": true,
"version": "19.0",
"release": "Beta",
"id": 922},
{"product": "WaterWolf",
"throttle": "100.00",
"end_date": "%(end_date)s",
"start_date": "2012-03-08",
"featured": true,
"version": "18.0",
"release": "Stable",
"id": 920},
{"product": "WaterWolf",
"throttle": "100.00",
"end_date": "2012-03-09",
"start_date": "2012-03-08",
"featured": true,
"version": "19.1",
"release": "Nightly",
"id": 928},
{"product": "WaterWolf",
"throttle": "100.00",
"end_date": "%(end_date)s",
"start_date": "2012-03-08",
"featured": true,
"version": "20.0",
"release": "Nightly",
"id": 923}
],
"NightTrain":[
{"product": "NightTrain",
"throttle": "100.00",
"end_date": "%(end_date)s",
"start_date": "2012-03-08",
"featured": true,
"version": "18.0",
"release": "Aurora",
"id": 924},
{"product": "NightTrain",
"throttle": "100.00",
"end_date": "%(end_date)s",
"start_date": "2012-03-08",
"featured": true,
"version": "19.0",
"release": "Nightly",
"id": 925}
],
"SeaMonkey": [
{"product": "SeaMonkey",
"throttle": "99.00",
"end_date": "%(yesterday)s",
"start_date": "2012-03-08",
"featured": true,
"version": "9.5",
"release": "Alpha",
"id": 921},
{"product": "SeaMonkey",
"throttle": "99.00",
"end_date": "%(end_date)s",
"start_date": "2012-03-08",
"featured": true,
"version": "10.5",
"release": "nightly",
"id": 926}
],
"LandCrab": [
{"product": "LandCrab",
"throttle": "99.00",
"end_date": "%(end_date)s",
"start_date": "2012-03-08",
"featured": false,
"version": "1.5",
"release": "Release",
"id": 927}
]
},
"total": 4
}
""" % {'end_date': now.strftime('%Y-%m-%d'),
'yesterday': yesterday.strftime('%Y-%m-%d')})
raise NotImplementedError(url)
rget.side_effect = mocked_get
from crashstats.crashstats.models import CurrentVersions
api = CurrentVersions()
api.get()
def tearDown(self):
super(BaseTestViews, self).tearDown()
cache.clear()
def _login(self):
user = User.objects.create_user('test', 'test@mozilla.com', 'secret')
assert self.client.login(username='test', password='secret')
return user
def _logout(self):
self.client.logout()
def _add_permission(self, user, codename, group_name='Hackers'):
group = self._create_group_with_permission(codename)
user.groups.add(group)
def _create_group_with_permission(self, codename, group_name='Group'):
appname = 'crashstats'
ct, __ = ContentType.objects.get_or_create(
model='',
app_label=appname,
defaults={'name': appname}
)
permission, __ = Permission.objects.get_or_create(
codename=codename,
name=PERMISSIONS[codename],
content_type=ct
)
group, __ = Group.objects.get_or_create(
name=group_name,
)
group.permissions.add(permission)
return group
class TestGoogleAnalytics(BaseTestViews):
@override_settings(GOOGLE_ANALYTICS_ID='xyz123')
@override_settings(GOOGLE_ANALYTICS_DOMAIN='test.biz')
@mock.patch('requests.get')
def test_google_analytics(self, rget):
url = reverse('crashstats:home', args=('WaterWolf',))
def mocked_get(url, params, **options):
if 'products' in url:
return Response("""
{
"hits": [{
"is_featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"build_type": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}],
"total": 1
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('xyz123' in response.content)
ok_('test.biz' in response.content)
class TestViews(BaseTestViews):
@mock.patch('requests.get')
def test_handler500(self, rget):
root_urlconf = __import__(
settings.ROOT_URLCONF,
globals(),
locals(),
['urls'],
-1
)
# ...so that we can access the 'handler500' defined in there
par, end = root_urlconf.handler500.rsplit('.', 1)
# ...which is an importable reference to the real handler500 function
views = __import__(par, globals(), locals(), [end], -1)
# ...and finally we have the handler500 function at hand
handler500 = getattr(views, end)
# to make a mock call to the django view functions you need a request
fake_request = RequestFactory().request(**{'wsgi.input': None})
# Need a fake user for the persona bits on crashstats_base
fake_request.user = AnonymousUser()
# the reason for first causing an exception to be raised is because
# the handler500 function is only called by django when an exception
# has been raised which means sys.exc_info() is something.
try:
raise NameError('sloppy code')
except NameError:
# do this inside a frame that has a sys.exc_info()
response = handler500(fake_request)
eq_(response.status_code, 500)
ok_('Internal Server Error' in response.content)
ok_('id="products_select"' not in response.content)
def test_handler404(self):
url = reverse('crashstats:home', args=('Unknown',))
response = self.client.get(url)
eq_(response.status_code, 404)
ok_('Page not Found' in response.content)
ok_('id="products_select"' not in response.content)
def test_homepage_redirect(self):
response = self.client.get('/')
eq_(response.status_code, 302)
destination = reverse('crashstats:home',
args=[settings.DEFAULT_PRODUCT])
ok_(destination in response['Location'])
def test_homepage_products_redirect_without_versions(self):
url = reverse('crashstats:home', args=['WaterWolf'])
# some legacy URLs have this
url += '/versions/'
response = self.client.get(url)
redirect_code = settings.PERMANENT_LEGACY_REDIRECTS and 301 or 302
eq_(response.status_code, redirect_code)
destination = reverse('crashstats:home', args=['WaterWolf'])
ok_(destination in response['Location'])
def test_legacy_query_redirect(self):
response = self.client.get('/query/query?foo=bar')
redirect_code = settings.PERMANENT_LEGACY_REDIRECTS and 301 or 302
eq_(response.status_code, redirect_code)
ok_(reverse('crashstats:query') + '?foo=bar' in response['Location'])
@mock.patch('requests.get')
def test_buginfo(self, rget):
url = reverse('crashstats:buginfo')
def mocked_get(url, params, **options):
if 'bug?id=' in url:
return Response('{"bugs": [{"product": "allizom.org"}]}')
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 400)
response = self.client.get(url, {'bug_ids': '123,456'})
eq_(response.status_code, 400)
response = self.client.get(url, {'include_fields': 'product'})
eq_(response.status_code, 400)
response = self.client.get(url, {'bug_ids': ' 123, 456 ',
'include_fields': ' product'})
eq_(response.status_code, 200)
struct = json.loads(response.content)
ok_(struct['bugs'])
eq_(struct['bugs'][0]['product'], 'allizom.org')
@mock.patch('requests.get')
def test_buginfo_with_caching(self, rget):
url = reverse('crashstats:buginfo')
def mocked_get(url, params, **options):
if 'bug?id=' in url:
return Response("""{"bugs": [
{"id": "987",
"product": "allizom.org",
"summary": "Summary 1"},
{"id": "654",
"product": "mozilla.org",
"summary": "Summary 2"}
]}""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {
'bug_ids': '987,654',
'include_fields': 'product,summary'
})
eq_(response.status_code, 200)
struct = json.loads(response.content)
eq_(struct['bugs'][0]['product'], 'allizom.org')
eq_(struct['bugs'][0]['summary'], 'Summary 1')
eq_(struct['bugs'][0]['id'], '987')
eq_(struct['bugs'][1]['product'], 'mozilla.org')
eq_(struct['bugs'][1]['summary'], 'Summary 2')
eq_(struct['bugs'][1]['id'], '654')
# expect to be able to find this in the cache now
cache_key = 'buginfo:987'
eq_(cache.get(cache_key), struct['bugs'][0])
@mock.patch('requests.get')
def test_home(self, rget):
url = reverse('crashstats:home', args=('WaterWolf',))
def mocked_get(url, params, **options):
if '/products' in url and 'versions' not in params:
return Response("""
{
"products": [
"WaterWolf"
],
"hits": {
"WaterWolf": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"release": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}]
},
"total": 1
}
""")
elif '/products' in url:
return Response("""
{
"hits": [{
"is_featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"build_type": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}],
"total": 1
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
# Testing with unknown product
url = reverse('crashstats:home', args=('InternetExplorer',))
response = self.client.get(url)
eq_(response.status_code, 404)
# Testing with unknown version for product
url = reverse('crashstats:home', args=('WaterWolf', '99'))
response = self.client.get(url)
eq_(response.status_code, 404)
# Testing with valid version for product
url = reverse('crashstats:home', args=('WaterWolf', '19.0'))
response = self.client.get(url)
eq_(response.status_code, 200)
@mock.patch('requests.get')
def test_frontpage_json(self, rget):
url = reverse('crashstats:frontpage_json')
def mocked_get(url, params, **options):
if '/crashes/daily' in url:
return Response("""
{
"hits": {
"WaterWolf:19.0": {
"2012-10-08": {
"product": "WaterWolf",
"adu": 30000,
"crash_hadu": 71.099999999999994,
"version": "19.0",
"report_count": 2133,
"date": "2012-10-08"
},
"2012-10-02": {
"product": "WaterWolf",
"adu": 30000,
"crash_hadu": 77.299999999999997,
"version": "19.0",
"report_count": 2319,
"date": "2012-10-02"
}
}
}
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {'product': 'WaterWolf'})
eq_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
ok_(struct['product_versions'])
eq_(struct['count'], 1)
@mock.patch('requests.get')
def test_frontpage_json_bad_request(self, rget):
url = reverse('crashstats:frontpage_json')
def mocked_get(url, params, **options):
assert '/crashes/daily' in url, url
if 'product' in params and params['product'] == 'WaterWolf':
return Response("""
{
"hits": {
"WaterWolf:19.0": {
"2012-10-08": {
"product": "WaterWolf",
"adu": 30000,
"crash_hadu": 71.099999999999994,
"version": "19.0",
"report_count": 2133,
"date": "2012-10-08"
},
"2012-10-02": {
"product": "WaterWolf",
"adu": 30000,
"crash_hadu": 77.299999999999997,
"version": "19.0",
"report_count": 2319,
"date": "2012-10-02"
}
}
}
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {'product': 'Neverheardof'})
eq_(response.status_code, 400)
response = self.client.get(url, {'versions': '999.1'})
eq_(response.status_code, 400)
response = self.client.get(url, {
'product': 'WaterWolf',
'versions': '99.9' # mismatch
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'product': 'WaterWolf',
'versions': '19.0'
})
eq_(response.status_code, 200)
response = self.client.get(url, {
'product': 'WaterWolf',
'duration': 'xxx'
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'product': 'WaterWolf',
'duration': '-100'
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'product': 'WaterWolf',
'duration': '10'
})
eq_(response.status_code, 200)
response = self.client.get(url, {
'product': 'WaterWolf',
'date_range_type': 'junk'
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'product': 'WaterWolf',
'date_range_type': 'build'
})
eq_(response.status_code, 200)
response = self.client.get(url, {
'product': 'WaterWolf',
'date_range_type': 'report'
})
eq_(response.status_code, 200)
@mock.patch('requests.get')
def test_frontpage_json_no_data_for_version(self, rget):
url = reverse('crashstats:frontpage_json')
def mocked_get(url, params, **options):
assert '/crashes/daily' in url, url
if 'product' in params and params['product'] == 'WaterWolf':
return Response("""
{
"hits": {}
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {
'product': 'WaterWolf',
'versions': '20.0'
})
eq_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
# Even though there was no data, the product_versions
# property should still exist and be populated.
eq_(struct['count'], 0)
ok_(struct['product_versions'])
selected_product = struct['product_versions'][0]
eq_(selected_product['product'], 'WaterWolf')
eq_(selected_product['version'], '20.0')
@mock.patch('requests.get')
def test_products_list(self, rget):
url = reverse('crashstats:products_list')
def mocked_get(url, params, **options):
if '/products' in url:
return Response("""
{
"products": [
"WaterWolf",
"Fennec"
],
"hits": [
{
"sort": "1",
"default_version": "15.0.1",
"release_name": "firefox",
"rapid_release_version": "5.0",
"product_name": "WaterWolf"
},
{
"sort": "3",
"default_version": "10.0.6esr",
"release_name": "mobile",
"rapid_release_version": "5.0",
"product_name": "Fennec"
}],
"total": "2"
}
""")
rget.side_effect = mocked_get
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@mock.patch('requests.get')
def test_gccrashes(self, rget):
url = reverse('crashstats:gccrashes', args=('WaterWolf',))
unknown_product_url = reverse('crashstats:gccrashes',
args=('NotKnown',))
invalid_version_url = reverse('crashstats:gccrashes',
args=('WaterWolf', '99'))
def mocked_get(**options):
if '/products' in options['url']:
return Response("""
{
"products": ["WaterWolf"],
"hits": [
{
"product": "WaterWolf",
"version": "20.0",
"release": "Nightly"
}
],
"total": "1"
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Total Volume of GC Crashes for WaterWolf 19.1'
in response.content)
response = self.client.get(invalid_version_url)
eq_(response.status_code, 200)
doc = pyquery.PyQuery(response.content)
eq_(doc('.django-form-error li b')[0].text, 'Version:')
response = self.client.get(unknown_product_url)
eq_(response.status_code, 404)
@mock.patch('requests.get')
def test_gccrashes_json(self, rget):
url = reverse('crashstats:gccrashes_json')
def mocked_get(url, params, **options):
if '/gccrashes' in url:
return Response("""
{
"hits": [
[
"20140203000001",
366
]
],
"total": 1
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {
'product': 'WaterWolf',
'version': '20.0',
'start_date': '2014-01-27',
'end_date': '2014-02-04'
})
ok_(response.status_code, 200)
ok_('application/json' in response['content-type'])
@mock.patch('requests.get')
def test_gccrashes_json_bad_request(self, rget):
url = reverse('crashstats:gccrashes_json')
def mocked_get(url, **options):
if 'gccrashes/' in url:
return Response("""
{
"hits": [
[
"20140203000001",
366
]
],
"total": 1
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {
'product': 'WaterWolf',
'version': '20.0',
'start_date': 'XXXXXX', # not even close
'end_date': '2014-02-04'
})
ok_(response.status_code, 400)
response = self.client.get(url, {
'product': 'WaterWolf',
'version': '20.0',
'start_date': '2014-02-33', # crazy date
'end_date': '2014-02-04'
})
ok_(response.status_code, 400)
# same but on the end_date
response = self.client.get(url, {
'product': 'WaterWolf',
'version': '20.0',
'start_date': '2014-02-13',
'end_date': '2014-02-44' # crazy date
})
ok_(response.status_code, 400)
# start_date > end_date
response = self.client.get(url, {
'product': 'WaterWolf',
'version': '20.0',
'start_date': '2014-02-02',
'end_date': '2014-01-01' # crazy date
})
ok_(response.status_code, 400)
@mock.patch('requests.get')
def test_crash_trends(self, rget):
url = reverse('crashstats:crash_trends', args=('WaterWolf',))
no_nightly_url = reverse('crashstats:crash_trends', args=('LandCrab',))
inconsistent_case_url = reverse('crashstats:crash_trends',
args=('SeaMonkey',))
unkown_product_url = reverse('crashstats:crash_trends',
args=('NotKnown',))
def mocked_get(**options):
if '/products' in options['url']:
return Response("""
{
"products": ["WaterWolf"],
"hits": [
{
"product": "WaterWolf",
"version": "5.0a1",
"release": "Release",
"throttle": 10.0
}
],
"total": "1"
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Nightly Crash Trends For WaterWolf' in response.content)
response = self.client.get(unkown_product_url)
eq_(response.status_code, 404)
# This used to cause a 500 because there is no Nightly associated
# with this product, should 200 now.
response = self.client.get(no_nightly_url)
eq_(response.status_code, 200)
ok_('Nightly Crash Trends For LandCrab' in response.content)
# This used to cause a 500 because of inconsistent case for
# release names in the DB, causing some releases to be returned
# as 'nightly' instead of 'Nightly'. This should now return 200.
response = self.client.get(inconsistent_case_url)
eq_(response.status_code, 200)
ok_('Nightly Crash Trends For SeaMonkey' in response.content)
@mock.patch('requests.get')
def test_get_nightlies_for_product_json(self, rget):
url = reverse('crashstats:get_nightlies_for_product_json')
def mocked_get(**options):
if '/products' in options['url']:
return Response("""
{
"hits": [
{
"sort": "1",
"default_version": "5.0a1",
"release_name": "waterwolf",
"rapid_release_version": "5.0",
"product_name": "WaterWolf"
}],
"total": "1"
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {'product': 'WaterWolf'})
ok_('application/json' in response['content-type'])
eq_(response.status_code, 200)
ok_(response.content, ['20.0'])
response = self.client.get(url, {'product': 'NightTrain'})
eq_(response.status_code, 200)
ok_(response.content, ['18.0', '19.0'])
response = self.client.get(url, {'product': 'Unknown'})
ok_(response.content, [])
@mock.patch('requests.get')
def test_crashtrends_json(self, rget):
url = reverse('crashstats:crashtrends_json')
def mocked_get(url, params, **options):
ok_('start_date' in params)
eq_('2012-10-01', params['start_date'])
ok_('end_date' in params)
eq_('2012-10-10', params['end_date'])
if '/crashtrends' in url:
return Response("""
{
"crashtrends": [{
"build_date": "2012-10-10",
"version_string": "5.0a1",
"product_version_id": 1,
"days_out": 6,
"report_count": 144,
"report_date": "2012-10-04",
"product_name": "WaterWolf"
},
{
"build_date": "2012-10-06",
"version_string": "5.0a1",
"product_version_id": 1,
"days_out": 2,
"report_count": 162,
"report_date": "2012-10-08",
"product_name": "WaterWolf"
},
{
"build_date": "2012-09-29",
"version_string": "5.0a1",
"product_version_id": 1,
"days_out": 5,
"report_count": 144,
"report_date": "2012-10-04",
"product_name": "WaterWolf"
}]
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {
'product': 'WaterWolf',
'version': '20.0',
'start_date': '2012-10-01',
'end_date': '2012-10-10'
})
ok_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
eq_(struct['total'], 2)
# Test with product that does not have a nightly
response = self.client.get(url, {
'product': 'LandCrab',
'version': '9.5',
'start_date': '2012-10-01',
'end_date': '2012-10-10'
})
ok_(response.status_code, 400)
ok_('text/html' in response['content-type'])
ok_(
'LandCrab is not one of the available choices'
in response.content
)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_topcrasher_ranks_bybug(self, rget, rpost):
url = reverse('crashstats:topcrasher_ranks_bybug')
def mocked_post(**options):
assert '/bugs' in options['url'], options['url']
return Response("""
{"hits": [{"id": "123456789", "signature": "FakeSignature 1"},
{"id": "123456789", "signature": "FakeSignature 3"}]}
""")
def mocked_get(url, params, **options):
if '/signaturesummary' in url:
return Response("""
[
{
"version_string": "18.0",
"percentage": "48.440",
"report_count": 52311,
"product_name": "WaterWolf",
"category": "XXX",
"crashes": "1234",
"installations": "5679",
"null_count" : "456",
"low_count": "789",
"medium_count": "123",
"high_count": "1200",
"report_date": "2013-01-01",
"cpu_abi": "XXX",
"manufacturer": "YYY",
"model": "ZZZ",
"version": "1.2.3",
"vendor_hex" : "0x8086",
"adapter_hex": " 0x2972",
"vendor_name": "abc",
"adapter_name" : "def"
},
{
"version_string": "18.0",
"percentage": "48.440",
"report_count": 52311,
"product_name": "NightTrain",
"category": "XXX",
"crashes": "1234",
"installations": "5679",
"null_count" : "456",
"low_count": "789",
"medium_count": "123",
"high_count": "1200",
"report_date": "2013-01-01",
"cpu_abi": "XXX",
"manufacturer": "YYY",
"model": "ZZZ",
"version": "1.2.3",
"vendor_hex" : "0x8086",
"adapter_hex": " 0x2972",
"vendor_name": "abc",
"adapter_name" : "def"
},
{
"version_string": "13.0b4",
"percentage": "9.244",
"report_count": 9983,
"product_name": "WaterWolf",
"category": "YYY",
"crashes": "3210",
"installations": "9876",
"null_count" : "123",
"low_count": "456",
"medium_count": "789",
"high_count": "1100",
"report_date": "2013-01-02",
"cpu_abi": "AAA",
"manufacturer": "BBB",
"model": "CCC",
"version": "4.5.6",
"vendor_hex": "0x10de",
"adapter_hex": "0x9804",
"vendor_name": "",
"adapter_name": ""
}
]
""")
if '/crashes/signatures' in url:
return Response(u"""
{"crashes": [
{
"count": 188,
"mac_count": 66,
"content_count": 0,
"first_report": "2012-06-21",
"startup_percent": 0.0,
"currentRank": 0,
"previousRank": 1,
"first_report_exact": "2012-06-21T21:28:08",
"versions":
"2.0, 2.1, 3.0a2, 3.0b2, 3.1b1, 4.0a1, 4.0a2, 5.0a1",
"percentOfTotal": 0.24258064516128999,
"win_count": 56,
"changeInPercentOfTotal": 0.011139597126354983,
"linux_count": 66,
"hang_count": 0,
"signature": "FakeSignature 1",
"versions_count": 8,
"changeInRank": 1,
"plugin_count": 0,
"previousPercentOfTotal": 0.23144104803493501,
"is_gc_count": 10
},
{
"count": 188,
"mac_count": 66,
"content_count": 0,
"first_report": "2012-06-21",
"startup_percent": 0.0,
"currentRank": 0,
"previousRank": 1,
"first_report_exact": "2012-06-21T21:28:08",
"versions":
"2.0, 2.1, 3.0a2, 3.0b2, 3.1b1, 4.0a1, 4.0a2, 5.0a1",
"percentOfTotal": 0.24258064516128999,
"win_count": 56,
"changeInPercentOfTotal": 0.011139597126354983,
"linux_count": 66,
"hang_count": 0,
"signature": "FakeSignature 2",
"versions_count": 8,
"changeInRank": 1,
"plugin_count": 0,
"previousPercentOfTotal": 0.23144104803493501,
"is_gc_count": 10
}
],
"totalPercentage": 0,
"start_date": "2012-05-10",
"end_date": "2012-05-24",
"totalNumberOfCrashes": 2}
""")
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
response = self.client.get(url, {'bug_number': '123456789'})
ok_('FakeSignature 1' in response.content)
ok_('FakeSignature 2' not in response.content)
ok_('FakeSignature 3' in response.content)
report_list_url = reverse('crashstats:report_list')
report_list_url1 = (
'%s?signature=%s' % (
report_list_url,
urllib.quote_plus('FakeSignature 1')
)
)
ok_(report_list_url1 in response.content)
report_list_url3 = (
'%s?signature=%s' % (
report_list_url,
urllib.quote_plus('FakeSignature 3')
)
)
ok_(report_list_url3 in response.content)
# ensure that multiple products appear
doc = pyquery.PyQuery(response.content)
eq_(doc('td[class=product]')[0].text, 'WaterWolf')
eq_(doc('td[class=product]')[1].text, 'NightTrain')
eq_(response.status_code, 200)
# we also have a signature with no active product+version
ok_('Not found in active topcrash lists' in response.content)
response = self.client.get(url, {'bug_number': '123bad'})
eq_(response.status_code, 400)
response = self.client.get(url, {'bug_number': '1234564654564646'})
eq_(response.status_code, 400)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_topcrasher(self, rget, rpost):
# first without a version
no_version_url = reverse('crashstats:topcrasher',
args=('WaterWolf',))
url = reverse('crashstats:topcrasher',
args=('WaterWolf', '19.0'))
has_builds_url = reverse('crashstats:topcrasher',
args=('WaterWolf', '19.0', 'build'))
reports_count_default = reverse('crashstats:topcrasher',
args=('WaterWolf', '19.0'))
reports_count_100 = reverse('crashstats:topcrasher',
args=('WaterWolf', '19.0', None, None,
None, '100'))
response = self.client.get(no_version_url)
ok_(url in response['Location'])
def mocked_post(**options):
assert '/bugs/' in options['url'], options['url']
return Response("""{
"hits": [
{"id": 123456789,
"signature": "Something"},
{"id": 22222,
"signature": "FakeSignature1 \u7684 Japanese"},
{"id": 33333,
"signature": "FakeSignature1 \u7684 Japanese"}
]
}
""")
def mocked_get(url, params, **options):
if '/crashes/signatures' in url:
return Response(u"""
{"crashes": [
{
"count": 188,
"mac_count": 66,
"content_count": 0,
"first_report": "2012-06-21",
"startup_percent": 0.0,
"currentRank": 0,
"previousRank": 1,
"first_report_exact": "2012-06-21T21:28:08",
"versions":
"2.0, 2.1, 3.0a2, 3.0b2, 3.1b1, 4.0a1, 4.0a2, 5.0a1",
"percentOfTotal": 0.24258064516128999,
"win_count": 56,
"changeInPercentOfTotal": 0.011139597126354983,
"linux_count": 66,
"hang_count": 0,
"signature": "FakeSignature1 \u7684 Japanese",
"versions_count": 8,
"changeInRank": 1,
"plugin_count": 0,
"previousPercentOfTotal": 0.23144104803493501,
"is_gc_count": 10
}
],
"totalPercentage": 0,
"start_date": "2012-05-10",
"end_date": "2012-05-24",
"totalNumberOfCrashes": 0}
""")
if '/products' in url:
return Response("""
{
"hits": [
{
"is_featured": true,
"throttle": 1.0,
"end_date": "string",
"start_date": "integer",
"build_type": "string",
"product": "WaterWolf",
"version": "19.0",
"has_builds": true
}],
"total": "1"
}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('By Crash Date' in response.content)
response = self.client.get(has_builds_url)
eq_(response.status_code, 200)
ok_('By Build Date' in response.content)
response = self.client.get(reports_count_default)
eq_(response.status_code, 200)
doc = pyquery.PyQuery(response.content)
selected_count = doc('.tc-result-count a[class="selected"]')
eq_(selected_count.text(), '50')
# there's actually only one such TD
bug_ids = [x.text for x in doc('td.bug_ids_more > a')]
# higher bug number first
eq_(bug_ids, ['33333', '22222'])
response = self.client.get(reports_count_100)
eq_(response.status_code, 200)
doc = pyquery.PyQuery(response.content)
selected_count = doc('.tc-result-count a[class="selected"]')
eq_(selected_count.text(), '100')
# also, render the CSV
response = self.client.get(url, {'format': 'csv'})
eq_(response.status_code, 200)
ok_('text/csv' in response['Content-Type'])
# know your fixtures :)
ok_('WaterWolf' in response['Content-Disposition'])
ok_('19.0' in response['Content-Disposition'])
# we should be able unpack it
reader = csv.reader(StringIO(response.content))
line1, line2 = reader
eq_(line1[0], 'Rank')
try:
eq_(int(line2[0]), 1)
except Exception:
raise SkipTest
# bytestring when exported as CSV with UTF-8 encoding
eq_(line2[4], 'FakeSignature1 \xe7\x9a\x84 Japanese')
def test_topcrasher_with_invalid_version(self):
# 0.1 is not a valid release version
url = reverse('crashstats:topcrasher',
args=('WaterWolf', '0.1'))
response = self.client.get(url)
eq_(response.status_code, 404)
def test_topcrasher_with_product_sans_release(self):
# SnowLion is not a product at all
url = reverse('crashstats:topcrasher',
args=('SnowLion', '0.1'))
response = self.client.get(url)
eq_(response.status_code, 404)
# SeaMonkey is a product but has no active releases
url = reverse('crashstats:topcrasher',
args=('SeaMonkey', '9.5'))
response = self.client.get(url)
eq_(response.status_code, 404)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_topcrasher_without_any_signatures(self, rget, rpost):
# first without a version
no_version_url = reverse('crashstats:topcrasher',
args=('WaterWolf',))
url = reverse('crashstats:topcrasher',
args=('WaterWolf', '19.0'))
has_builds_url = reverse('crashstats:topcrasher',
args=('WaterWolf', '19.0', 'build'))
response = self.client.get(no_version_url)
ok_(url in response['Location'])
def mocked_post(**options):
assert '/bugs' in options['url'], options['url']
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
def mocked_get(url, params, **options):
if '/crashes/signatures' in url:
return Response(u"""
{"crashes": [],
"totalPercentage": 0,
"start_date": "2012-05-10",
"end_date": "2012-05-24",
"totalNumberOfCrashes": 0}
""")
if '/products' in url:
return Response("""
{
"hits": [
{
"is_featured": true,
"throttle": 1.0,
"end_date": "string",
"start_date": "integer",
"build_type": "string",
"product": "WaterWolf",
"version": "19.0",
"has_builds": true
}],
"total": "1"
}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('By Crash Date' in response.content)
response = self.client.get(has_builds_url)
eq_(response.status_code, 200)
ok_('By Build Date' in response.content)
# also, render the CSV
response = self.client.get(url, {'format': 'csv'})
eq_(response.status_code, 200)
ok_('text/csv' in response['Content-Type'])
# know your fixtures :)
ok_('WaterWolf' in response['Content-Disposition'])
ok_('19.0' in response['Content-Disposition'])
#
# no signatures, the CSV is empty apart from the header
eq_(len(response.content.splitlines()), 1)
reader = csv.reader(StringIO(response.content))
line1, = reader
eq_(line1[0], 'Rank')
def test_topcrasher_without_versions_redirect(self):
response = self.client.get('/topcrasher/products/WaterWolf/versions/')
redirect_code = settings.PERMANENT_LEGACY_REDIRECTS and 301 or 302
eq_(response.status_code, redirect_code)
actual_url = reverse('crashstats:topcrasher',
kwargs={'product': 'WaterWolf'})
ok_(response['location'].endswith(actual_url))
@mock.patch('requests.get')
def test_exploitable_crashes_without_product(self, rget):
url = reverse('crashstats:exploitable_crashes_legacy')
user = self._login()
group = self._create_group_with_permission('view_exploitability')
user.groups.add(group)
assert user.has_perm('crashstats.view_exploitability')
response = self.client.get(url)
eq_(response.status_code, 301)
correct_url = reverse(
'crashstats:exploitable_crashes',
args=(settings.DEFAULT_PRODUCT,)
)
ok_(response['location'].endswith(correct_url))
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_exploitable_crashes(self, rget, rpost):
url = reverse(
'crashstats:exploitable_crashes',
args=(settings.DEFAULT_PRODUCT,)
)
def mocked_post(url, **options):
assert '/bugs' in url, url
return Response({
"hits": [
{"id": "111111111", "signature": "FakeSignature 1"},
{"id": "222222222", "signature": "FakeSignature 3"},
{"id": "101010101", "signature": "FakeSignature"}
]
})
rpost.side_effect = mocked_post
def mocked_get(url, params, **options):
assert '/crashes/exploitability' in url
ok_('product' in params)
eq_('WaterWolf', params['product'])
return Response("""
{
"hits": [
{
"signature": "FakeSignature",
"report_date": "2013-06-06",
"high_count": 4,
"medium_count": 3,
"low_count": 2,
"none_count": 1,
"product_name": "%s",
"version_string": "2.0"
}
],
"total": 1
}
""" % (settings.DEFAULT_PRODUCT,))
rget.side_effect = mocked_get
response = self.client.get(url)
ok_(settings.LOGIN_URL in response['Location'] + '?next=%s' % url)
ok_(response.status_code, 302)
user = self._login()
response = self.client.get(url)
eq_(response.status_code, 302)
ok_(settings.LOGIN_URL in response['Location'] + '?next=%s' % url)
group = self._create_group_with_permission('view_exploitability')
user.groups.add(group)
assert user.has_perm('crashstats.view_exploitability')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('FakeSignature' in response.content)
# only this bug ID should be shown
ok_('101010101' in response.content)
# not these bug IDs
ok_('222222222' not in response.content)
ok_('111111111' not in response.content)
# if you try to mess with the paginator it should just load page 1
response = self.client.get(url, {'page': 'meow'})
ok_(response.status_code, 200)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_exploitable_crashes_by_product_and_version(self, rget, rpost):
url = reverse(
'crashstats:exploitable_crashes',
args=(settings.DEFAULT_PRODUCT, '19.0')
)
def mocked_post(url, **options):
assert '/bugs' in url, url
return Response({
"hits": [
{"id": "111111111", "signature": "FakeSignature 1"},
{"id": "222222222", "signature": "FakeSignature 3"},
{"id": "101010101", "signature": "FakeSignature"}
]
})
rpost.side_effect = mocked_post
def mocked_get(url, params, **options):
assert '/crashes/exploitability' in url
ok_('product' in params)
eq_('WaterWolf', params['product'])
ok_('version' in params)
eq_('19.0', params['version'])
return Response("""
{
"hits": [
{
"signature": "FakeSignature",
"report_date": "2013-06-06",
"high_count": 4,
"medium_count": 3,
"low_count": 2,
"none_count": 1,
"product_name": "%s",
"version_string": "123.0"
}
],
"total": 1
}
""" % (settings.DEFAULT_PRODUCT,))
rget.side_effect = mocked_get
user = self._login()
group = self._create_group_with_permission('view_exploitability')
user.groups.add(group)
assert user.has_perm('crashstats.view_exploitability')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('FakeSignature' in response.content)
@mock.patch('requests.get')
def test_exploitable_crashes_by_unknown_version(self, rget):
url = reverse(
'crashstats:exploitable_crashes',
args=(settings.DEFAULT_PRODUCT, '999.0')
)
user = self._login()
group = self._create_group_with_permission('view_exploitability')
user.groups.add(group)
assert user.has_perm('crashstats.view_exploitability')
response = self.client.get(url)
eq_(response.status_code, 404)
@mock.patch('requests.get')
def test_daily(self, rget):
url = reverse('crashstats:daily')
def mocked_get(url, params, **options):
if '/products' in url:
return Response("""
{
"products": [
"WaterWolf",
"NightTrain"
],
"hits": {
"WaterWolf": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"release": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}],
"NightTrain": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "NightTrain",
"release": "Nightly",
"version": "18.0",
"has_builds": true,
"start_date": "2012-09-25"
}]
},
"total": 2
}
""")
if '/crashes' in url:
# This list needs to match the versions as done in the common
# fixtures set up in setUp() above.
return Response("""
{
"hits": {
"WaterWolf:20.0": {
"2012-09-23": {
"adu": 80388,
"crash_hadu": 12.279,
"date": "2012-08-23",
"product": "WaterWolf",
"report_count": 9871,
"throttle": 0.1,
"version": "20.0"
}
},
"WaterWolf:19.0": {
"2012-08-23": {
"adu": 80388,
"crash_hadu": 12.279,
"date": "2012-08-23",
"product": "WaterWolf",
"report_count": 9871,
"throttle": 0.1,
"version": "19.0"
}
},
"WaterWolf:18.0": {
"2012-08-13": {
"adu": 80388,
"crash_hadu": 12.279,
"date": "2012-08-23",
"product": "WaterWolf",
"report_count": 9871,
"throttle": 0.1,
"version": "18.0"
}
}
}
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {
'p': 'WaterWolf',
'v': ['20.0', '19.0']
})
eq_(response.status_code, 200)
# XXX any basic tests with can do on response.content?
ok_('18.0' in response.content.split('id="version3"')[1].
split("</select>")[0])
ok_('18.0' in response.content.split('id="version2"')[1].
split("</select>")[0])
ok_('18.0' in response.content.split('id="version1"')[1].
split("</select>")[0])
ok_('18.0' in response.content.split('id="version0"')[1].
split("</select>")[0])
# check that the CSV version is working too
response = self.client.get(url, {
'p': 'WaterWolf',
'v': ['20.0', '19.0'],
'format': 'csv'
})
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'text/csv')
# also, I should be able to read it
reader = csv.reader(response)
# because response is an iterator that will return a blank line first
# we skip till the next time
rows = list(reader)[1:]
ok_(rows)
head_row = rows[0]
eq_(head_row[0], 'Date')
eq_(
head_row[1:],
[
'WaterWolf 20.0 Crashes',
'WaterWolf 20.0 ADI',
'WaterWolf 20.0 Throttle',
'WaterWolf 20.0 Ratio',
'WaterWolf 19.0 Crashes',
'WaterWolf 19.0 ADI',
'WaterWolf 19.0 Throttle',
'WaterWolf 19.0 Ratio'
]
)
first_row = rows[1]
eq_(first_row[0], '2012-09-23')
# Test dates don't cause problems
response = self.client.get(url, {
'p': 'WaterWolf',
'v': ['20.0', '19.0'],
'date_start': '2010-01-01'
})
eq_(response.status_code, 200)
@mock.patch('crashstats.crashstats.models.Platforms')
@mock.patch('requests.get')
def test_daily_by_os(self, rget, platforms_get):
url = reverse('crashstats:daily')
def mocked_get(url, params, **options):
if '/products' in url:
return Response("""
{
"products": [
"WaterWolf",
"NightTrain"
],
"hits": {
"WaterWolf": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"release": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}],
"NightTrain": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "NightTrain",
"release": "Nightly",
"version": "18.0",
"has_builds": true,
"start_date": "2012-09-25"
}]
},
"total": 2
}
""")
if '/crashes' in url:
ok_('separated_by' in params)
eq_('os', params['separated_by'])
ok_('os' in params)
eq_(['Windows', 'Amiga'], params['os'])
# This list needs to match the versions as done in the common
# fixtures set up in setUp() above.
return Response("""
{
"hits": {
"WaterWolf:20.0:win": {
"2012-09-23": {
"os": "Windows",
"adu": 80388,
"crash_hadu": 12.279,
"date": "2012-08-23",
"product": "WaterWolf",
"report_count": 9871,
"throttle": 0.1,
"version": "20.0"
}
},
"WaterWolf:20.0:ami": {
"2012-09-23": {
"os": "Amiga",
"adu": 7377,
"crash_hadu": 12.279,
"date": "2012-08-23",
"product": "WaterWolf",
"report_count": 871,
"throttle": 0.1,
"version": "20.0"
}
}
}
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_platforms_get():
return [
{'code': 'win', 'name': 'Windows', 'display': True},
{'code': 'ami', 'name': 'Amiga', 'display': True},
{'code': 'win', 'name': 'Windows95'}, # not displayed
]
platforms_get().get.side_effect = mocked_platforms_get
response = self.client.get(url, {
'p': 'WaterWolf',
'v': '20.0',
'form_selection': 'by_os'
})
eq_(response.status_code, 200)
# XXX any basic tests with can do on response.content?
# check that the CSV version is working too
response = self.client.get(url, {
'p': 'WaterWolf',
'v': '20.0',
'format': 'csv',
'form_selection': 'by_os'
})
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'text/csv')
# also, we should be able to read it
reader = csv.reader(response)
# because response is an iterator that will return a blank line first
# we skip till the next time
rows = list(reader)[1:]
head_row = rows[0]
first_row = rows[1]
eq_(head_row[0], 'Date')
eq_(
head_row[1:],
[
'WaterWolf 20.0 on Windows Crashes',
'WaterWolf 20.0 on Windows ADI',
'WaterWolf 20.0 on Windows Throttle',
'WaterWolf 20.0 on Windows Ratio',
'WaterWolf 20.0 on Amiga Crashes',
'WaterWolf 20.0 on Amiga ADI',
'WaterWolf 20.0 on Amiga Throttle',
'WaterWolf 20.0 on Amiga Ratio'
]
)
eq_(first_row[0], '2012-09-23')
def test_daily_legacy_redirect(self):
url = reverse('crashstats:daily')
response = self.client.get(url + '?p=WaterWolf&v[]=Something')
eq_(response.status_code, 301)
ok_('p=WaterWolf' in response['Location'].split('?')[1])
ok_('v=Something' in response['Location'].split('?')[1])
response = self.client.get(
url + '?p=WaterWolf&os[]=Something&os[]=Else'
)
eq_(response.status_code, 301)
ok_('p=WaterWolf' in response['Location'].split('?')[1])
ok_('os=Something' in response['Location'].split('?')[1])
ok_('os=Else' in response['Location'].split('?')[1])
@mock.patch('requests.get')
def test_daily_with_bad_input(self, rget):
url = reverse('crashstats:daily')
def mocked_get(url, params, **options):
if '/products' in url:
return Response("""
{
"products": [
"WaterWolf",
"NightTrain"
],
"hits": {
"WaterWolf": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"release": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}],
"NightTrain": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "NightTrain",
"release": "Nightly",
"version": "18.0",
"has_builds": true,
"start_date": "2012-09-25"
}]
},
"total": 2
}
""")
if '/crashes' in url:
# This list needs to match the versions as done in the common
# fixtures set up in setUp() above.
return Response("""
{
"hits": {}
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {
'p': 'WaterWolf',
'date_start': u' \x00'
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'p': 'WaterWolf',
'date_range_type': 'any old crap'
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'p': 'WaterWolf',
'hang_type': 'any old crap'
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'p': 'WaterWolf',
'format': 'csv',
})
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'text/csv')
# last sanity check
response = self.client.get(url, {
'p': 'WaterWolf',
})
eq_(response.status_code, 200)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_query(self, rget, rpost):
def mocked_post(**options):
assert '/bugs' in options['url'], options['url']
return Response("""
{"hits": [
{
"id": "123456",
"signature": "nsASDOMWindowEnumerator::GetNext()"
}
],
"total": 1
}
""")
def mocked_get(url, params, **options):
assert '/search/signatures' in url
if 'products' in params and 'WaterWolf' in params['products']:
return Response("""{
"hits": [
{
"count": 586,
"signature": "nsASDOMWindowEnumerator::GetNext()",
"numcontent": 0,
"is_windows": 586,
"is_linux": 0,
"numplugin": 56,
"is_mac": 0,
"numhang": 0
},
{
"count": 13,
"signature": "mySignatureIsCool",
"numcontent": 0,
"is_windows": 10,
"is_linux": 2,
"numplugin": 0,
"is_mac": 1,
"numhang": 0
},
{
"count": 2,
"signature": "mineIsCoolerThanYours",
"numcontent": 0,
"is_windows": 0,
"is_linux": 0,
"numplugin": 0,
"is_mac": 2,
"numhang": 2
},
{
"count": 2,
"signature": null,
"numcontent": 0,
"is_windows": 0,
"is_linux": 0,
"numplugin": 0,
"is_mac": 2,
"numhang": 2
}
],
"total": 4
} """)
elif 'products' in params and 'NightTrain' in params['products']:
return Response('{"hits": [], "total": 0}')
elif 'products' in params and 'SeaMonkey' in params['products']:
ok_('plugin_search_mode' in params)
eq_(params['plugin_search_mode'], 'is_exactly')
return Response("""
{"hits": [
{
"count": 586,
"signature": "nsASDOMWindowEnumerator::GetNext()",
"numcontent": 0,
"is_windows": 586,
"is_linux": 0,
"numplugin": 533,
"is_mac": 0,
"numhang": 0,
"pluginname": "superAddOn",
"pluginfilename": "addon.dll",
"pluginversion": "1.2.3"
}],
"total": 1
}
""")
else:
return Response("""
{"hits": [
{
"count": 586,
"signature": "nsASDOMWindowEnumerator::GetNext()",
"numcontent": 0,
"is_windows": 586,
"is_linux": 0,
"numplugin": 0,
"is_mac": 0,
"numhang": 0
}],
"total": 1
}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:query')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' not in response.content)
ok_('table id="signatureList"' not in response.content)
# Verify that the passed product is selected in search form
response = self.client.get(url, {'product': 'NightTrain'})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' not in response.content)
ok_('table id="signatureList"' not in response.content)
ok_('value="NightTrain" selected' in response.content)
# Verify that the passed version is selected in nav
response = self.client.get(url, {
'product': 'NightTrain',
'version': 'NightTrain:18.0'
})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' not in response.content)
ok_('table id="signatureList"' not in response.content)
# Because versions in the search form only gets set on DOM ready,
# we here ensure that the version was passed and set by checking
# that the correct version is selected in the versions drop-down.
ok_('option value="18.0" selected' in response.content)
response = self.client.get(url, {
'product': 'WaterWolf',
'date': '2012-01-01'
})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' in response.content)
ok_('table id="signatureList"' in response.content)
ok_('nsASDOMWindowEnumerator::GetNext()' in response.content)
ok_('mySignatureIsCool' in response.content)
ok_('mineIsCoolerThanYours' in response.content)
ok_('(null signature)' in response.content)
# Test that the default value for query_type is 'contains'
ok_('<option value="contains" selected' in response.content)
# Test with empty results
response = self.client.get(url, {
'product': 'NightTrain',
'date': '2012-01-01'
})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' in response.content)
ok_('The maximum query date' not in response.content)
ok_('table id="signatureList"' not in response.content)
ok_('Results within' in response.content)
ok_('No results were found' in response.content)
response = self.client.get(url, {'query': 'nsASDOMWindowEnumerator'})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' in response.content)
ok_('table id="signatureList"' in response.content)
ok_('nsASDOMWindowEnumerator::GetNext()' in response.content)
ok_('123456' in response.content)
# Test that the signature parameter is used as default value
response = self.client.get(url, {'signature': 'myFunctionIsCool'})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' not in response.content)
ok_('table id="signatures-list"' not in response.content)
ok_('value="myFunctionIsCool"' in response.content)
# Test a simple search containing a crash id
crash_id = '1234abcd-ef56-7890-ab12-abcdef130802'
response = self.client.get(url, {
'query': crash_id,
'query_type': 'simple'
})
eq_(response.status_code, 302)
ok_(crash_id in response['Location'])
# Test a simple search containing a crash id and spaces
crash_id = ' 1234abcd-ef56-7890-ab12-abcdef130802 '
response = self.client.get(url, {
'query': crash_id,
'query_type': 'simple'
})
eq_(response.status_code, 302)
ok_(urllib.quote(crash_id) not in response['Location'])
ok_(crash_id.strip() in response['Location'])
# Test that null bytes break the page cleanly
response = self.client.get(url, {'date': u' \x00'})
eq_(response.status_code, 400)
ok_('<h2>Query Results</h2>' not in response.content)
ok_('Enter a valid date/time' in response.content)
# Test that do_query forces the query
response = self.client.get(url, {
'do_query': 1,
'product': 'WaterWolf'
})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' in response.content)
ok_('table id="signatureList"' in response.content)
ok_('nsASDOMWindowEnumerator::GetNext()' in response.content)
# Test that old query types are changed
# Test that plugin data is displayed
response = self.client.get(url, {
'do_query': 1,
'product': 'SeaMonkey',
'plugin_query_type': 'exact',
'process_type': 'plugin',
})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' in response.content)
ok_('table id="signatureList"' in response.content)
ok_('nsASDOMWindowEnumerator::GetNext()' in response.content)
ok_('Plugin Filename' in response.content)
ok_('Plugin Name/Ver' in response.content)
ok_('addon.dll' in response.content)
ok_('superAddOn 1.2.3' in response.content)
# Test 'all' is an accepted value for report_type and hang_type
response = self.client.get(url, {
'do_query': 1,
'product': 'WaterWolf',
'hang_type': 'all',
'process_type': 'all',
})
eq_(response.status_code, 200)
ok_('table id="signatureList"' in response.content)
ok_('value="any" checked' in response.content)
# Test defaut date
expected = datetime.datetime.utcnow()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(expected.strftime('%m/%d/%Y %H:00:00') in response.content)
# Test passed date
response = self.client.get(url, {
'date': '11/27/2031 10:10:10'
})
eq_(response.status_code, 200)
ok_('11/27/2031 10:10:10' in response.content)
# Test value of build ids
response = self.client.get(url, {
'build_id': '12345'
})
eq_(response.status_code, 200)
ok_('value="12345"' in response.content)
response = self.client.get(url, {
'build_id': '12345,54321'
})
eq_(response.status_code, 200)
ok_('value="12345, 54321"' in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_query_range(self, rget, rpost):
def mocked_post(**options):
return Response('{"hits": [], "total": 0}')
def mocked_get(url, params, **options):
assert '/search/signatures' in url
response = ','.join('''
{
"count": %(x)s,
"signature": "sig%(x)s",
"numcontent": 0,
"is_windows": %(x)s,
"is_linux": 0,
"numplugin": 0,
"is_mac": 0,
"numhang": 0
}
''' % {'x': x} for x in range(150))
return Response('{"hits": [%s], "total": 150}' % response)
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:query')
# Test an out-of-range date range
response = self.client.get(url, {
'query': 'js::',
'range_unit': 'weeks',
'range_value': 9
})
eq_(response.status_code, 200)
ok_('The maximum query date' in response.content)
ok_('Admins may log in' in response.content)
ok_('name="range_value" value="%s"' % settings.QUERY_RANGE_DEFAULT_DAYS
in response.content)
ok_('value="days" selected' in response.content)
# Test an out-of-range date range for a logged in user
user = self._login()
group = self._create_group_with_permission('run_long_queries')
user.groups.add(group)
response = self.client.get(url, {
'query': 'js::',
'range_unit': 'weeks',
'range_value': 9
})
eq_(response.status_code, 200)
# we're logged in, that works now
ok_('The maximum query date' not in response.content)
# ... but this doesn't
response = self.client.get(url, {
'query': 'js::',
'range_unit': 'weeks',
'range_value': 30
})
eq_(response.status_code, 200)
ok_('The maximum query date' in response.content)
# an admin won't see that message
ok_('Admins may log in' not in response.content)
ok_('name="range_value" value="%s"' % settings.QUERY_RANGE_DEFAULT_DAYS
in response.content)
ok_('value="days" selected' in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_query_pagination(self, rget, rpost):
def mocked_post(**options):
return Response('{"hits": [], "total": 0}')
def mocked_get(url, params, **options):
assert '/search/signatures' in url
response = ','.join('''
{
"count": %(x)s,
"signature": "sig%(x)s",
"numcontent": 0,
"is_windows": %(x)s,
"is_linux": 0,
"numplugin": 0,
"is_mac": 0,
"numhang": 0
}
''' % {'x': x} for x in range(150))
return Response('{"hits": [%s], "total": 150}' % response)
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:query')
response = self.client.get(url, {'do_query': 1})
eq_(response.status_code, 200)
next_page_url = '%s?do_query=1&page=2' % url
ok_(next_page_url in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_query_summary(self, rget, rpost):
def mocked_post(**options):
return Response('{"hits": [], "total": 0}')
def mocked_get(url, params, **options):
return Response('{"hits": [], "total": 0}')
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:query')
response = self.client.get(url, {
'query': 'test',
'query_type': 'contains'
})
eq_(response.status_code, 200)
ok_('Results within' in response.content)
ok_("crash signature contains 'test'" in response.content)
ok_('the crashing process was of any type' in response.content)
response = self.client.get(url, {
'query': 'test',
'query_type': 'is_exactly',
'build_id': '1234567890',
'product': ['WaterWolf', 'NightTrain'],
'version': ['WaterWolf:18.0'],
'platform': ['mac'],
'process_type': 'plugin',
'plugin_query_type': 'starts_with',
'plugin_query_field': 'filename',
'plugin_query': 'lib'
})
eq_(response.status_code, 200)
ok_('Results within' in response.content)
ok_("crash signature is exactly 'test'" in response.content)
ok_('product is one of WaterWolf, NightTrain' in response.content)
ok_('version is one of WaterWolf:18.0' in response.content)
ok_('platform is one of Mac OS X' in response.content)
ok_('for build 1234567890' in response.content)
ok_('the crashing process was a plugin' in response.content)
ok_('and its filename starts with lib' in response.content)
@override_settings(SEARCH_MIDDLEWARE_IMPL='elasticsearch')
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_query_force_impl_settings(self, rget, rpost):
def mocked_post(**options):
return Response('{"hits": [], "total": 0}')
def mocked_get(url, params, **options):
ok_('_force_api_impl' in params)
eq_('elasticsearch', params['_force_api_impl'])
return Response('{"hits": [], "total": 0}')
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:query')
response = self.client.get(url, {
'do_query': 1,
})
eq_(response.status_code, 200)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_query_force_impl_url(self, rget, rpost):
def mocked_post(**options):
return Response('{"hits": [], "total": 0}')
def mocked_get(url, params, **options):
ok_('_force_api_impl' in params)
eq_('postgres', params['_force_api_impl'])
return Response('{"hits": [], "total": 0}')
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:query')
response = self.client.get(url, {
'do_query': 1,
'_force_api_impl': 'postgres'
})
eq_(response.status_code, 200)
@override_settings(SEARCH_MIDDLEWARE_IMPL='mongodb')
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_query_force_impl_url_over_settings(self, rget, rpost):
def mocked_post(**options):
return Response('{"hits": [], "total": 0}')
def mocked_get(url, params, **options):
ok_('_force_api_impl' in params)
eq_('mysql', params['_force_api_impl'])
return Response('{"hits": [], "total": 0}')
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:query')
response = self.client.get(url, {
'do_query': 1,
'_force_api_impl': 'mysql'
})
eq_(response.status_code, 200)
@mock.patch('requests.get')
def test_plot_signature(self, rget):
def mocked_get(url, params, **options):
if '/crashes/signature_history' in url:
return Response("""
{
"hits": [],
"total": 0
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
# missing signature
url = reverse('crashstats:plot_signature',
args=('WaterWolf', '19.0',
'2011-12-01', '2011-12-02', ''))
response = self.client.get(url)
eq_(response.status_code, 400)
# invalid start date
url = reverse('crashstats:plot_signature',
args=('WaterWolf', '19.0',
'2012-02-33', '2012-12-01',
'Read::Bytes'))
response = self.client.get(url)
eq_(response.status_code, 400)
# invalid end date
url = reverse('crashstats:plot_signature',
args=('WaterWolf', '19.0',
'2012-02-28', '2012-13-01',
'Read::Bytes'))
response = self.client.get(url)
eq_(response.status_code, 400)
# valid dates
url = reverse('crashstats:plot_signature',
args=('WaterWolf', '19.0',
'2011-12-01', '2011-12-02',
'Read::Bytes'))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
ok_(struct['signature'])
@mock.patch('requests.get')
def test_explosive_view_without_explosives(self, rget):
url = reverse('crashstats:explosive')
def mocked_get(url, params, **options):
if '/suspicious' in url:
return Response("""
{"hits": [], "total": 0}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
resp = self.client.get(url)
eq_(resp.status_code, 200)
assert 'No explosive crashes found' in resp.content
@mock.patch('requests.get')
def test_explosive_view_with_explosives(self, rget):
url = reverse('crashstats:explosive')
def mocked_get(url, params, **options):
if '/suspicious' in url:
return Response("""
{"hits": [
{"date": "2013-09-01",
"signatures": ["signature1", "signature2"]
}
], "total": 1}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
resp = self.client.get(url)
eq_(resp.status_code, 200)
assert 'is explosive' in resp.content
@mock.patch('requests.get')
def test_explosive_data(self, rget):
url = reverse('crashstats:explosive_data',
args=('signature', '2013-03-05'))
def mocked_get(url, params, **options):
if '/crashes/count_by_day' in url:
return Response("""{
"hits": {
"2013-02-26": 100,
"2013-02-27": 100,
"2013-02-28": 100,
"2013-03-01": 100,
"2013-03-02": 100,
"2013-03-03": 100,
"2013-03-04": 100,
"2013-03-05": 100,
"2013-03-06": 100,
"2013-03-07": 100,
"2013-03-08": 100
}
}""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
resp = json.loads(response.content)
ok_('counts' in resp)
# returns 11 days of data since we are after it.
# the first day is 7 days prior, the last is 3 days after.
eq_(len(resp['counts']), 11)
eq_(resp['counts'][0][0], '2013-02-26')
eq_(resp['counts'][0][1], 100)
eq_(resp['counts'][-1][0], '2013-03-08')
eq_(resp['counts'][-1][1], 100)
@mock.patch('requests.get')
def test_explosive_data_today(self, rget):
now = datetime.datetime.utcnow()
start = now - datetime.timedelta(10)
now = now.strftime('%Y-%m-%d')
start = start.strftime('%Y-%m-%d')
url = reverse('crashstats:explosive_data', args=('signature', now))
def mocked_get(url, params, **options):
if '/crashes/count_by_day' in url:
dates = []
current = datetime.datetime.strptime(start, "%Y-%m-%d")
end = datetime.datetime.strptime(now, "%Y-%m-%d")
while current <= end:
dates.append(current.strftime("%Y-%m-%d"))
current += datetime.timedelta(1)
return Response("""{
"hits": {
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100
}
}""" % tuple(dates))
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
resp = json.loads(response.content)
eq_(resp['counts'][0][0], start)
eq_(resp['counts'][0][1], 100)
eq_(resp['counts'][-1][0], now)
eq_(resp['counts'][-1][1], 100)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_topchangers(self, rget, rpost):
url = reverse('crashstats:topchangers',
args=('WaterWolf', '19.0'))
bad_url = reverse('crashstats:topchangers',
args=('SeaMonkey', '19.0'))
bad_url2 = reverse('crashstats:topchangers',
args=('WaterWolf', '19.999'))
url_wo_version = reverse('crashstats:topchangers',
args=('WaterWolf',))
def mocked_post(**options):
assert 'by=signatures' in options['url'], options['url']
return Response("""
{"bug_associations": [{"bug_id": "123456789",
"signature": "Something"}]}
""")
def mocked_get(url, params, **options):
if '/crashes/signatures' in url:
return Response("""
{"crashes": [
{
"count": 188,
"mac_count": 66,
"content_count": 0,
"first_report": "2012-06-21",
"startup_percent": 0.0,
"currentRank": 0,
"previousRank": 1,
"first_report_exact": "2012-06-21T21:28:08",
"versions":
"2.0, 2.1, 3.0a2, 3.0b2, 3.1b1, 4.0a1, 4.0a2, 5.0a1",
"percentOfTotal": 0.24258064516128999,
"win_count": 56,
"changeInPercentOfTotal": 0.011139597126354983,
"linux_count": 66,
"hang_count": 0,
"signature": "FakeSignature1",
"versions_count": 8,
"changeInRank": 0,
"plugin_count": 0,
"previousPercentOfTotal": 0.23144104803493501,
"is_gc_count": 10
}
],
"totalPercentage": 0,
"start_date": "2012-05-10",
"end_date": "2012-05-24",
"totalNumberOfCrashes": 0}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
response = self.client.get(url_wo_version)
eq_(response.status_code, 200)
# invalid version for the product name
response = self.client.get(bad_url)
eq_(response.status_code, 404)
# invalid version for the product name
response = self.client.get(bad_url2)
eq_(response.status_code, 404)
response = self.client.get(url)
eq_(response.status_code, 200)
def test_topchangers_without_versions_redirect(self):
response = self.client.get('/topchangers/products/WaterWolf/versions/')
redirect_code = settings.PERMANENT_LEGACY_REDIRECTS and 301 or 302
eq_(response.status_code, redirect_code)
actual_url = reverse('crashstats:topchangers',
kwargs={'product': 'WaterWolf'})
ok_(response['location'].endswith(actual_url))
@mock.patch('requests.get')
def test_signature_summary(self, rget):
def mocked_get(url, params, **options):
if '/signaturesummary' in url:
return Response("""
[
{
"version_string": "12.0",
"percentage": "48.440",
"report_count": 52311,
"product_name": "WaterWolf",
"category": "XXX",
"crashes": "1234",
"installations": "5679",
"null_count" : "456",
"low_count": "789",
"medium_count": "123",
"high_count": "1200",
"report_date": "2013-01-01",
"cpu_abi": "XXX",
"manufacturer": "YYY",
"model": "ZZZ",
"version": "1.2.3",
"vendor_hex" : "0x8086",
"adapter_hex": " 0x2972",
"vendor_name": "abc",
"adapter_name" : "def"
},
{
"version_string": "13.0b4",
"percentage": "9.244",
"report_count": 9983,
"product_name": "WaterWolf",
"category": "YYY",
"crashes": "3210",
"installations": "9876",
"null_count" : "123",
"low_count": "456",
"medium_count": "789",
"high_count": "1100",
"report_date": "2013-01-02",
"cpu_abi": "AAA",
"manufacturer": "BBB",
"model": "CCC",
"version": "4.5.6",
"vendor_hex": "0x10de",
"adapter_hex": "0x9804",
"vendor_name": "",
"adapter_name": ""
}
]
""")
raise NotImplementedError(url)
url = reverse('crashstats:signature_summary')
rget.side_effect = mocked_get
response = self.client.get(url, {'range_value': '1',
'signature': 'sig',
'version': 'WaterWolf:19.0'})
eq_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
ok_(struct['architectures'])
ok_(struct['flashVersions'])
ok_(struct['percentageByOs'])
ok_(struct['processTypes'])
ok_(struct['productVersions'])
ok_(struct['uptimeRange'])
ok_(struct['distinctInstall'])
ok_(struct['devices'])
ok_(struct['graphics'])
ok_(not struct['canViewExploitability'])
ok_('exploitabilityScore' not in struct)
# percentages are turned into string as they're fed straight into
# a mustache template.
# for example,
eq_(struct['uptimeRange'][0]['percentage'], '48.44')
user = self._login()
group = self._create_group_with_permission('view_exploitability')
user.groups.add(group)
response = self.client.get(url, {'range_value': '1',
'signature': 'sig',
'version': 'WaterWolf:19.0'})
eq_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
ok_(struct['canViewExploitability'])
ok_(struct['exploitabilityScore'])
@mock.patch('requests.get')
def test_signature_summary_flash_exploitability(self, rget):
def mocked_get(url, params, **options):
if (
'report_type' in params and
params['report_type'] == 'flash_version'
):
assert 'signature' in params
if 'sig1' in params['signature']:
return Response("""
[
{
"category": "11.9.900.117",
"percentage": "50.794",
"report_count": 320
},
{
"category": "11.9.900.152",
"percentage": "45.397",
"report_count": 286
},
{
"category": "11.7.700.224",
"percentage": "1.429",
"report_count": 9
}
]
""")
elif 'sig2' in params['signature']:
return Response("""
[
{
"category": "11.9.900.117",
"percentage": "50.794",
"report_count": 320
},
{
"category": "[blank]",
"percentage": "45.397",
"report_count": 286
},
{
"category": "11.7.700.224",
"percentage": "1.429",
"report_count": 9
}
]
""")
elif '/signaturesummary' in url:
return Response("""
[
{
"version_string": "12.0",
"percentage": "48.440",
"report_count": 52311,
"product_name": "WaterWolf",
"category": "XXX",
"crashes": "1234",
"installations": "5679",
"null_count" : "456",
"low_count": "789",
"medium_count": "123",
"high_count": "1200",
"report_date": "2013-01-01",
"cpu_abi": "XXX",
"manufacturer": "YYY",
"model": "ZZZ",
"version": "1.2.3",
"vendor_hex" : "0x8086",
"adapter_hex": " 0x2972",
"vendor_name": "abc",
"adapter_name" : "def"
},
{
"version_string": "13.0b4",
"percentage": "9.244",
"report_count": 9983,
"product_name": "WaterWolf",
"category": "YYY",
"crashes": "3210",
"installations": "9876",
"null_count" : "123",
"low_count": "456",
"medium_count": "789",
"high_count": "1100",
"report_date": "2013-01-02",
"cpu_abi": "AAA",
"manufacturer": "BBB",
"model": "CCC",
"version": "4.5.6",
"vendor_hex": "0x10de",
"adapter_hex": "0x9804",
"vendor_name": "",
"adapter_name": ""
}
]
""")
raise NotImplementedError(url)
url = reverse('crashstats:signature_summary')
rget.side_effect = mocked_get
user = self._login()
group = self._create_group_with_permission('view_flash_exploitability')
user.groups.add(group)
response = self.client.get(url, {'range_value': '1',
'signature': 'sig1',
'version': 'WaterWolf:19.0'})
eq_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
ok_(struct['canViewExploitability'])
ok_(struct['exploitabilityScore'])
response = self.client.get(url, {'range_value': '1',
'signature': 'sig2', # different
'version': 'WaterWolf:19.0'})
eq_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
ok_(not struct['canViewExploitability'])
ok_('exploitabilityScore' not in struct)
@mock.patch('requests.get')
def test_status(self, rget):
def mocked_get(url, **options):
assert '/server_status' in url, url
return Response(SAMPLE_STATUS)
rget.side_effect = mocked_get
url = reverse('crashstats:status')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('schema_12345' in response.content)
ok_('017d7b3f7042ce76bc80949ae55b41d1e915ab62' in response.content)
ok_('1035' in response.content)
ok_('Sep 28 2012 20:30:01' in response.content)
@mock.patch('requests.get')
def test_status_revision(self, rget):
def mocked_get(url, **options):
assert '/server_status' in url, url
return Response(SAMPLE_STATUS)
rget.side_effect = mocked_get
url = reverse('crashstats:status_revision')
response = self.client.get(url)
eq_(response.status_code, 200)
eq_(response.content, '017d7b3f7042ce76bc80949ae55b41d1e915ab62')
ok_('text/plain' in response['content-type'])
def test_login_required(self):
url = reverse(
'crashstats:exploitable_crashes',
args=(settings.DEFAULT_PRODUCT,)
)
response = self.client.get(url)
eq_(response.status_code, 302)
ok_(settings.LOGIN_URL in response['Location'] + '?next=%s' % url)
@mock.patch('requests.get')
def test_status_json(self, rget):
def mocked_get(**options):
assert '/server_status' in options['url'], options['url']
return Response(SAMPLE_STATUS)
rget.side_effect = mocked_get
url = reverse('crashstats:status_json')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(response.content.strip().startswith('{'))
ok_('017d7b3f7042ce76bc80949ae55b41d1e915ab62' in response.content)
ok_('1035' in response.content)
ok_('2012-09-28T20:30:01+00:00' in response.content)
ok_('application/json' in response['Content-Type'])
eq_('*', response['Access-Control-Allow-Origin'])
def test_crontabber_state(self):
url = reverse('crashstats:crontabber_state')
response = self.client.get(url)
eq_(response.status_code, 200)
@mock.patch('requests.get')
def test_crontabber_state_json(self, rget):
url = reverse('crashstats:crontabber_state_json')
sample_data = {
"state": {
"slow-one": {
"next_run": "2013-02-19T01:16:00+00:00",
"first_run": "2012-11-05T23:27:07+00:00",
"last_error": {
"traceback": "error error error",
"type": "<class 'sluggish.jobs.InternalError'>",
"value": "Have already run this for 2012-12-24 23:27"
},
"last_run": "2013-02-09T00:16:00+00:00",
"last_success": "2012-12-24T22:27:07+00:00",
"error_count": 6,
"depends_on": []
}
}
}
def mocked_get(**options):
assert '/crontabber_state' in options['url']
return Response(json.dumps(sample_data))
rget.side_effect = mocked_get
response = self.client.get(url)
ok_('application/json' in response['Content-Type'])
eq_(response.status_code, 200)
eq_(sample_data, json.loads(response.content))
@mock.patch('requests.get')
def test_your_crashes(self, rget):
url = reverse('crashstats:your_crashes')
def mocked_get(url, params, **options):
assert '/supersearch/' in url
if '/supersearch/fields/' in url:
return Response({
'email': {
'name': 'email',
'query_type': 'str',
'namespace': 'processed_crash',
'form_field_type': 'StringField',
'form_field_choices': None,
'permissions_needed': ['crashstats.view_pii'],
'default_value': None,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
}
})
assert 'email' in params
assert params['email'] == 'test@mozilla.com'
return Response({
'hits': [
{
'uuid': '1234abcd-ef56-7890-ab12-abcdef130801',
'date': '2000-01-01T00:00:00'
},
{
'uuid': '1234abcd-ef56-7890-ab12-abcdef130802',
'date': '2000-01-02T00:00:00'
}
],
'total': 2
})
rget.side_effect = mocked_get
# A user needs to be signed in to see this page.
response = self.client.get(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('crashstats:login') + '?next=%s' % url
)
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('1234abcd-ef56-7890-ab12-abcdef130801' in response.content)
ok_('1234abcd-ef56-7890-ab12-abcdef130802' in response.content)
ok_('test@mozilla.com' in response.content)
@mock.patch('requests.get')
def test_your_crashes_no_data(self, rget):
url = reverse('crashstats:your_crashes')
def mocked_get(url, params, **options):
assert '/supersearch/' in url
if '/supersearch/fields/' in url:
return Response({
'email': {
'name': 'email',
'query_type': 'str',
'namespace': 'processed_crash',
'form_field_type': 'StringField',
'form_field_choices': None,
'permissions_needed': ['crashstats.view_pii'],
'default_value': None,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
}
})
assert 'email' in params
assert params['email'] == 'test@mozilla.com'
return Response({
'hits': [],
'total': 0
})
rget.side_effect = mocked_get
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('test@mozilla.com' in response.content)
ok_('no crash report' in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index(self, rget, rpost):
# using \\n because it goes into the JSON string
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod"
comment0 = "This is a comment\\nOn multiple lines"
comment0 += "\\npeterbe@mozilla.com"
comment0 += "\\nwww.p0rn.com"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
def mocked_get(url, params, **options):
if '/crash_data' in url:
assert 'datatype' in params
if params['datatype'] == 'meta':
return Response(SAMPLE_META % (email0, url0))
if params['datatype'] == 'unredacted':
return Response(SAMPLE_UNREDACTED % (
dump,
comment0
))
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response(BUG_STATUS)
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
response = self.client.get(url)
eq_(response.status_code, 200)
# which bug IDs appear is important and the order matters too
ok_(
-1 ==
response.content.find('444444') <
response.content.find('333333') <
response.content.find('222222')
)
ok_('FakeSignature1' in response.content)
ok_('11cb72f5-eb28-41e1-a8e4-849982120611' in response.content)
comment_transformed = (
comment0
.replace('\\n', '<br>')
.replace('peterbe@mozilla.com', '(email removed)')
.replace('www.p0rn.com', '(URL removed)')
)
ok_(comment_transformed in response.content)
# but the email should have been scrubbed
ok_('peterbe@mozilla.com' not in response.content)
ok_(email0 not in response.content)
ok_(url0 not in response.content)
ok_(
'You need to be signed in to be able to download raw dumps.'
in response.content
)
# Should not be able to see sensitive key from stackwalker JSON
ok_('"sensitive"' not in response.content)
ok_('"exploitability"' not in response.content)
# the email address will appear if we log in
user = self._login()
group = self._create_group_with_permission('view_pii')
user.groups.add(group)
assert user.has_perm('crashstats.view_pii')
response = self.client.get(url)
ok_('peterbe@mozilla.com' in response.content)
ok_(email0 in response.content)
ok_(url0 in response.content)
ok_('"sensitive"' in response.content)
ok_('"exploitability"' in response.content)
eq_(response.status_code, 200)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_odd_product_and_version(self, rget, rpost):
"""If the processed JSON references an unfamiliar product and
version it should not use that to make links in the nav to
reports for that unfamiliar product and version."""
# using \\n because it goes into the JSON string
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod"
comment0 = "This is a comment\\nOn multiple lines"
comment0 += "\\npeterbe@mozilla.com"
comment0 += "\\nwww.p0rn.com"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
def mocked_get(url, params, **options):
if '/crash_data' in url:
assert 'datatype' in params
if params['datatype'] == 'meta':
return Response(SAMPLE_META % (email0, url0))
if params['datatype'] == 'unredacted':
processed_json = SAMPLE_UNREDACTED % (dump, comment0)
assert '"WaterWolf"' in processed_json
assert '"5.0a1"' in processed_json
processed_json = processed_json.replace(
'"WaterWolf"', '"SummerWolf"'
)
processed_json = processed_json.replace(
'"5.0a1"', '"99.9"'
)
return Response(processed_json)
if 'correlations/signatures' in url:
return Response("""
{
"hits": [
"FakeSignature1",
"FakeSignature2"
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response(BUG_STATUS)
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
response = self.client.get(url)
eq_(response.status_code, 200)
# the title should have the "SummerWolf 99.9" in it
doc = pyquery.PyQuery(response.content)
title = doc('title').text()
ok_('SummerWolf' in title)
ok_('99.9' in title)
# there shouldn't be any links to reports for the product
# mentioned in the processed JSON
bad_url = reverse('crashstats:home', args=('SummerWolf',))
ok_(bad_url not in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_correlations_failed(self, rget, rpost):
# using \\n because it goes into the JSON string
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod"
comment0 = "This is a comment"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
def mocked_get(url, params, **options):
if '/crash_data' in url:
assert 'datatype' in params
if params['datatype'] == 'meta':
return Response(SAMPLE_META % (email0, url0))
if params['datatype'] == 'unredacted':
return Response(SAMPLE_UNREDACTED % (
dump,
comment0
))
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response(BUG_STATUS)
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
response = self.client.get(url)
eq_(response.status_code, 200)
def test_report_index_invalid_crash_id(self):
# last 6 digits indicate 30th Feb 2012 which doesn't exist
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120230'])
response = self.client.get(url)
eq_(response.status_code, 400)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_pending_today(self, rget, rpost):
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
raise models.BadStatusCodeError(404)
rget.side_effect = mocked_get
today = datetime.datetime.utcnow().strftime('%y%m%d')
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982%s' % today])
response = self.client.get(url)
ok_('pendingStatus' in response.content)
eq_(response.status_code, 200)
yesterday = datetime.datetime.utcnow() - datetime.timedelta(days=1)
yesterday = yesterday.strftime('%y%m%d')
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982%s' % yesterday])
response = self.client.get(url)
ok_('Crash Not Found' in response.content)
eq_(response.status_code, 200)
url = reverse('crashstats:report_index',
args=['blablabla'])
response = self.client.get(url)
eq_(response.status_code, 400)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_with_hangid_in_raw_data(self, rget, rpost):
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod"
comment0 = "This is a comment"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
email1 = "some@otheremailaddress.com"
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'meta'
):
return Response("""
{
"InstallTime": "1339289895",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "%s",
"Vendor": "Mozilla",
"URL": "%s",
"HangID": "123456789"
}
""" % (email0, url0))
if '/crashes/paireduuid' in url:
return Response("""
{
"hits": [{
"uuid": "e8820616-1462-49b6-9784-e99a32120201"
}],
"total": 1
}
""")
if 'crashes/comments' in url:
return Response("""
{
"hits": [
{
"user_comments": "%s",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "%s",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}
],
"total": 1
}
""" % (comment0, email1))
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
return Response("""
{
"client_crash_date": "2012-06-11T06:08:45",
"dump": "%s",
"signature": "FakeSignature1",
"user_comments": null,
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": null,
"distributor_version": null,
"truncated": true,
"process_type": null,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": true,
"exploitability": "Unknown Exploitability"
}
""" % dump)
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
response = self.client.get(url)
ok_('Hang Minidump' in response.content)
# the HangID in the fixture above
ok_('123456789' in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_with_invalid_InstallTime(self, rget, rpost):
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod"
comment0 = "This is a comment"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
email1 = "some@otheremailaddress.com"
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'meta'
):
return Response("""
{
"InstallTime": "Not a number",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "%s",
"Vendor": "Mozilla",
"URL": "%s",
"HangID": "123456789"
}
""" % (email0, url0))
if '/crashes/paireduuid' in url:
return Response("""
{
"hits": [{
"uuid": "e8820616-1462-49b6-9784-e99a32120201"
}],
"total": 1
}
""")
if 'crashes/comments' in url:
return Response("""
{
"hits": [
{
"user_comments": "%s",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "%s",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}
],
"total": 1
}
""" % (comment0, email1))
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
return Response("""
{
"client_crash_date": "2012-06-11T06:08:45",
"dump": "%s",
"signature": "FakeSignature1",
"user_comments": null,
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": null,
"distributor_version": null,
"truncated": true,
"process_type": null,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": true,
"exploitability": "Unknown Exploitability"
}
""" % dump)
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
response = self.client.get(url)
ok_('<th>Install Time</th>' not in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_with_crash_exploitability(self, rget, rpost):
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod"
comment0 = "This is a comment"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
email1 = "some@otheremailaddress.com"
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'meta'
):
return Response("""
{
"InstallTime": "Not a number",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "%s",
"Vendor": "Mozilla",
"URL": "%s",
"HangID": "123456789"
}
""" % (email0, url0))
if '/crashes/paireduuid' in url:
return Response("""
{
"hits": [{
"uuid": "e8820616-1462-49b6-9784-e99a32120201"
}],
"total": 1
}
""")
if '/crashes/comments' in url:
return Response("""
{
"hits": [
{
"user_comments": "%s",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "%s",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}
],
"total": 1
}
""" % (comment0, email1))
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
return Response("""
{
"client_crash_date": "2012-06-11T06:08:45",
"dump": "%s",
"signature": "FakeSignature1",
"user_comments": null,
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": null,
"distributor_version": null,
"truncated": true,
"process_type": null,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": true,
"exploitability": "Unknown Exploitability"
}
""" % dump)
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index', args=[crash_id])
response = self.client.get(url)
ok_('Exploitability</th>' not in response.content)
# you must be signed in to see exploitability
user = self._login()
group = self._create_group_with_permission('view_exploitability')
user.groups.add(group)
response = self.client.get(url)
ok_('Exploitability</th>' in response.content)
ok_('Unknown Exploitability' in response.content)
@mock.patch('requests.get')
def test_report_index_not_found(self, rget):
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
raise models.BadStatusCodeError(404)
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_index',
args=[crash_id])
response = self.client.get(url)
eq_(response.status_code, 200)
ok_("We couldn't find" in response.content)
@mock.patch('requests.get')
def test_report_index_pending(self, rget):
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
raise models.BadStatusCodeError(408)
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_index',
args=[crash_id])
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Fetching this archived report' in response.content)
@mock.patch('requests.get')
def test_report_index_too_old(self, rget):
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
raise models.BadStatusCodeError(410)
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_index',
args=[crash_id])
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('This archived report has expired' in response.content)
@mock.patch('requests.get')
def test_report_index_other_error(self, rget):
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
return Response('Scary Error', status_code=500)
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_index',
args=[crash_id])
assert_raises(
models.BadStatusCodeError,
self.client.get,
url
)
@mock.patch('requests.get')
def test_report_pending_json(self, rget):
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
raise models.BadStatusCodeError(408)
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_pending',
args=[crash_id])
response = self.client.get(url)
expected = {
'status': 'error',
'status_message': ('The report for %s'
' is not available yet.' % crash_id),
'url_redirect': ''
}
eq_(response.status_code, 200)
eq_(expected, json.loads(response.content))
def test_report_index_and_pending_missing_crash_id(self):
url = reverse('crashstats:report_index', args=[''])
response = self.client.get(url)
eq_(response.status_code, 404)
url = reverse('crashstats:report_pending', args=[''])
response = self.client.get(url)
eq_(response.status_code, 404)
def test_report_list(self):
url = reverse('crashstats:report_list')
response = self.client.get(url)
eq_(response.status_code, 400)
response = self.client.get(url, {
'signature': 'sig',
'range_value': 'xxx'
})
eq_(response.status_code, 400)
response = self.client.get(url, {'signature': 'sig'})
eq_(response.status_code, 200)
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
ok_('Crash Reports for sig' in response.content)
def test_report_list_all_link(self):
url = reverse('crashstats:report_list')
sig = 'js::jit::EnterBaselineMethod(JSContext*, js::RunState&)'
response = self.client.get(url, {
'product': 'WaterWolf',
'signature': sig
})
eq_(response.status_code, 200)
doc = pyquery.PyQuery(response.content)
for link in doc('a'):
if link.text and 'View ALL' in link.text:
ok_(urllib.quote_plus(sig) in link.attrib['href'])
def test_report_list_columns_offered(self):
url = reverse('crashstats:report_list')
response = self.client.get(url, {'signature': 'sig'})
eq_(response.status_code, 200)
# The "user_comments" field is a choice
ok_('<option value="user_comments">' in response.content)
# The "URL" field is not a choice
ok_('<option value="URL">' not in response.content)
# also, all fields in models.RawCrash.API_WHITELIST should
# be there
for field in models.RawCrash.API_WHITELIST:
html = '<option value="%s">' % field
ok_(html in response.content)
# but it's different if you're logged in
user = self._login()
group = self._create_group_with_permission('view_pii')
user.groups.add(group)
response = self.client.get(url, {'signature': 'sig'})
eq_(response.status_code, 200)
ok_('<option value="user_comments">' in response.content)
ok_('<option value="URL">' in response.content)
@mock.patch('requests.get')
def test_report_list_partial_correlations(self, rget):
def mocked_get(url, params, **options):
if 'report/list' in url:
return Response("""
{
"hits": [
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"release_channel": "Release",
"process_type": "browser",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
},
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"uuid": "e491c551-be0d-b0fb-c69e-107380120905",
"cpu_info": "...",
"url": "http://example.com/60053",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"release_channel": "Release",
"process_type": "content",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120822000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
}
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('correlations',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
# relevant data is put into 'data' attributes
ok_('data-correlation_version="5.0a1"' in response.content)
ok_('data-correlation_os="Mac OS X"' in response.content)
@mock.patch('requests.get')
def test_report_list_partial_correlations_no_data(self, rget):
def mocked_get(url, params, **options):
if 'report/list' in url:
return Response("""
{
"hits": [],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('correlations',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
# relevant data is put into 'data' attributes
ok_('data-correlation_version=""' in response.content)
ok_('data-correlation_os=""' in response.content)
@mock.patch('requests.get')
def test_report_list_partial_sigurls(self, rget):
really_long_url = (
'http://thisistheworldsfivehundredthirtyfifthslong'
'esturk.com/that/contains/a/path/and/?a=query&'
)
assert len(really_long_url) > 80
def mocked_get(url, params, **options):
# no specific product was specified, then it should be all products
ok_('products' in params)
ok_(settings.DEFAULT_PRODUCT not in params['products'])
ok_('ALL' in params['products'])
if '/signatureurls' in url:
return Response("""{
"hits": [
{"url": "http://farm.ville", "crash_count":123},
{"url": "%s", "crash_count": 1}
],
"total": 2
}
""" % (really_long_url))
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('sigurls',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
ok_('Must be signed in to see signature URLs' in response.content)
ok_('http://farm.ville' not in response.content)
user = self._login()
group = self._create_group_with_permission('view_pii')
user.groups.add(group)
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
# <a href="HERE" title="HERE">HERE</a>
eq_(response.content.count('http://farm.ville'), 3)
# because the label is truncated
# <a href="HERE" title="HERE">HE...</a>
eq_(response.content.count(really_long_url), 2)
@mock.patch('requests.get')
def test_report_list_partial_sigurls_specific_product(self, rget):
really_long_url = (
'http://thisistheworldsfivehundredthirtyfifthslong'
'esturk.com/that/contains/a/path/and/?a=query&'
)
assert len(really_long_url) > 80
def mocked_get(url, params, **options):
# 'NightTrain' was specifically requested
ok_('products' in params)
ok_('NightTrain' in params['products'])
if '/signatureurls' in url:
return Response("""{
"hits": [
{"url": "http://farm.ville", "crash_count":123},
{"url": "%s", "crash_count": 1}
],
"total": 2
}
""" % (really_long_url))
raise NotImplementedError(url)
rget.side_effect = mocked_get
user = self._login()
group = self._create_group_with_permission('view_pii')
user.groups.add(group)
url = reverse('crashstats:report_list_partial', args=('sigurls',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3,
'product': 'NightTrain'
})
eq_(response.status_code, 200)
eq_(response.content.count('http://farm.ville'), 3)
@mock.patch('requests.get')
def test_report_list_partial_comments(self, rget):
def mocked_get(url, params, **options):
if '/crashes/comments' in url:
return Response("""
{
"hits": [
{
"user_comments": "I LOVE CHEESE cheese@email.com",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "bob@uncle.com",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}
],
"total": 1
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('comments',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
ok_('I LOVE CHEESE' in response.content)
ok_('email removed' in response.content)
ok_('bob@uncle.com' not in response.content)
ok_('cheese@email.com' not in response.content)
user = self._login()
group = self._create_group_with_permission('view_pii')
user.groups.add(group)
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
ok_('I LOVE CHEESE' in response.content)
ok_('email removed' not in response.content)
ok_('bob@uncle.com' in response.content)
ok_('cheese@email.com' in response.content)
@mock.patch('requests.get')
def test_report_list_partial_comments_paginated(self, rget):
called_with_params = []
def mocked_get(url, params, **options):
if '/crashes/comments' in url:
called_with_params.append(params)
if params.get('result_offset'):
return Response({
"hits": [{
"user_comments": "I LOVE HAM",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "bob@uncle.com",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}],
"total": 2
})
else:
return Response({
"hits": [{
"user_comments": "I LOVE CHEESE",
"date_processed": "2011-08-21T11:17:28-07:00",
"email": "bob@uncle.com",
"uuid": "469bde48-0e8f-3586-d486-b98810120829"
}],
"total": 2
})
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('comments',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
ok_('I LOVE CHEESE' in response.content)
ok_('I LOVE HAM' not in response.content)
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3,
'page': 2,
})
eq_(response.status_code, 200)
ok_('I LOVE HAM' in response.content)
ok_('I LOVE CHEESE' not in response.content)
eq_(len(called_with_params), 2)
@mock.patch('requests.get')
def test_report_list_partial_reports(self, rget):
def mocked_get(url, params, **options):
if 'report/list' in url:
return Response("""
{
"hits": [
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "browser",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
},
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"uuid": "e491c551-be0d-b0fb-c69e-107380120905",
"cpu_info": "...",
"url": "http://example.com/60053",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "content",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120822000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
}
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('reports',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
ok_('0xdeadbeef' in response.content)
@mock.patch('requests.get')
def test_report_list_partial_reports_with_sorting(self, rget):
mock_calls = []
def mocked_get(url, params, **options):
mock_calls.append(params)
if 'report/list' in url:
return Response("""
{
"hits": [
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "browser",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
},
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"uuid": "e491c551-be0d-b0fb-c69e-107380120905",
"cpu_info": "...",
"url": "http://example.com/60053",
"last_crash": 1234,
"date_processed": "2012-09-05T22:19:59+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "content",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120822000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
}
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('reports',))
data = {
'signature': 'FakeSignature2',
'range_value': 3
}
response = self.client.get(url, data)
eq_(response.status_code, 200)
assert len(mock_calls) == 1
eq_(mock_calls[-1]['sort'], 'date_processed')
ok_('reverse' not in mock_calls[-1])
response = self.client.get(url, dict(
data,
sort='build'
))
eq_(response.status_code, 200)
assert len(mock_calls) == 2
eq_(mock_calls[-1]['sort'], 'build')
ok_('reverse' not in mock_calls[-1])
response = self.client.get(url, dict(
data,
sort='build',
reverse='True'
))
eq_(response.status_code, 200)
assert len(mock_calls) == 3
eq_(mock_calls[-1]['sort'], 'build')
eq_(mock_calls[-1]['reverse'], True)
@mock.patch('requests.get')
def test_report_list_partial_reports_columns_override(self, rget):
def mocked_get(url, params, **options):
if 'report/list' in url:
return Response("""
{
"hits": [
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "browser",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
},
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"uuid": "e491c551-be0d-b0fb-c69e-107380120905",
"cpu_info": "...",
"url": "http://example.com/60053",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "content",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120822000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
}
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('reports',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3,
'c': ['crap', 'date_processed', 'reason', 'os_and_version']
})
eq_(response.status_code, 200)
# 'reason' in _columns
ok_('reason7' in response.content)
# 'address' not in _columns
ok_('0xdeadbeef' not in response.content)
# 'cpu_name' not in _columns
ok_('x86' not in response.content)
# 'os_and_version' not in _columns
ok_('Mac OS X' in response.content)
@mock.patch('requests.get')
def test_report_list_partial_reports_with_rawcrash(self, rget):
def mocked_get(url, params, **options):
if 'report/list' in url:
return Response("""
{
"hits": [
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "browser",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null,
"raw_crash": {
"Winsock_LSP": "Peter",
"SecondsSinceLastCrash": "Bengtsson"
}
},
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"uuid": "e491c551-be0d-b0fb-c69e-107380120905",
"cpu_info": "...",
"url": "http://example.com/60053",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "content",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120822000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null,
"raw_crash": null
}
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('reports',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3,
'c': ['date_processed', 'Winsock_LSP', 'SecondsSinceLastCrash']
})
eq_(response.status_code, 200)
ok_('Peter' in response.content)
ok_('Bengtsson' in response.content)
# and also the table headers should be there
ok_('Winsock_LSP*' in response.content)
ok_('SecondsSinceLastCrash*' in response.content)
@mock.patch('requests.get')
def test_report_list_partial_reports_page_2(self, rget):
uuids = []
_date = datetime.datetime.now()
for i in range(300):
uuids.append(
'441017f4-e006-4eea-8451-dc20e' +
_date.strftime('%Y%m%d')
)
_date += datetime.timedelta(days=1)
def mocked_get(url, params, **options):
if 'report/list' in url:
result_number = int(params['result_number'])
try:
result_offset = int(params['result_offset'])
except KeyError:
result_offset = 0
first = {
"user_comments": None,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "browser",
"hangid": None,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": None
}
hits = []
for i in range(result_offset, result_offset + result_number):
try:
item = dict(first, uuid=uuids[i])
hits.append(item)
except IndexError:
break
return Response(json.dumps({
"hits": hits,
"total": len(uuids)
}))
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('reports',))
response = self.client.get(url, {
'signature': 'sig',
})
eq_(response.status_code, 200)
ok_(uuids[0] in response.content)
ok_(uuids[-1] not in response.content)
# expect there to be a link with `page=2` in there
report_list_url = reverse('crashstats:report_list')
report_list_url += '?signature=sig'
ok_(report_list_url + '&page=2' in response.content)
# we'll need a copy of this for later
response_first = response
response = self.client.get(url, {
'signature': 'sig',
'page': 2
})
eq_(response.status_code, 200)
ok_(uuids[0] not in response.content)
ok_(uuids[-1] in response.content)
# try to be a smartass
response_zero = self.client.get(url, {
'signature': 'sig',
'page': 0
})
eq_(response.status_code, 200)
# because with page < 1 you get page=1
tbody_zero = response_zero.content.split('<tbody')[1]
tbody_first = response_first.content.split('<tbody')[1]
eq_(hash(tbody_zero), hash(tbody_first))
response = self.client.get(url, {
'signature': 'sig',
'page': 'xx'
})
eq_(response.status_code, 400)
@mock.patch('requests.get')
def test_report_list_partial_reports_non_defaults(self, rget):
def mocked_get(url, params, **options):
if 'report/list' in url:
return Response("""
{
"hits": [
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "browser",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
},
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"uuid": "e491c551-be0d-b0fb-c69e-107380120905",
"cpu_info": "...",
"url": "http://example.com/60053",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "content",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120822000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
}
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('reports',))
data = {
'signature': 'sig',
'range_unit': settings.RANGE_UNITS[-1],
'process_type': settings.PROCESS_TYPES[-1],
'range_value': 48,
'plugin_field': settings.PLUGIN_FIELDS[-1],
'hang_type': settings.HANG_TYPES[-1],
'plugin_query_type': settings.QUERY_TYPES[-1],
'product': 'NightTrain',
}
response = self.client.get(url, data)
eq_(response.status_code, 200)
def test_report_list_partial_reports_invalid_range_value(self):
url = reverse('crashstats:report_list_partial', args=('reports',))
data = {
'signature': 'sig',
'range_unit': 'days',
'process_type': settings.PROCESS_TYPES[-1],
'range_value': 48,
'plugin_field': settings.PLUGIN_FIELDS[-1],
'hang_type': settings.HANG_TYPES[-1],
'plugin_query_type': settings.QUERY_TYPES[-1],
'product': 'NightTrain',
}
response = self.client.get(url, data)
eq_(response.status_code, 400)
response = self.client.get(url, dict(data, range_unit='weeks'))
eq_(response.status_code, 400)
response = self.client.get(url, dict(
data,
range_unit='hours',
range_value=24 * 48
))
eq_(response.status_code, 400)
@mock.patch('requests.post')
def test_report_list_partial_bugzilla(self, rpost):
def mocked_post(url, **options):
if '/bugs/' in url:
return Response({
"hits": [
{"id": 111111,
"signature": "Something"},
{"id": 123456789,
"signature": "Something"}
]
})
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_list_partial', args=('bugzilla',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
# not the right signature so it's part of "Related Crash Signatures"
ok_(
response.content.find('Related Crash Signatures') <
response.content.find('123456789')
)
response = self.client.get(url, {
'signature': 'Something',
'range_value': 3
})
eq_(response.status_code, 200)
# now the right signature
ok_('123456789' in response.content)
ok_('111111' in response.content)
# because bug id 123456789 is > than 111111 we expect that order
# in the rendered output
ok_(
response.content.find('123456789') <
response.content.find('111111') <
response.content.find('Related Crash Signatures')
)
@mock.patch('requests.get')
def test_report_list_partial_table(self, rget):
def mocked_get(url, params, **options):
if '/crashes/frequency' in url:
# these fixtures make sure we stress the possibility that
# the build_date might be invalid or simply just null.
return Response("""
{
"hits": [
{
"count": 1050,
"build_date": "20130806030203",
"count_mac": 0,
"frequency_windows": 1.0,
"count_windows": 1050,
"frequency": 1.0,
"count_linux": 0,
"total": 1050,
"frequency_linux": 0.0,
"frequency_mac": 0.0
},
{
"count": 1150,
"build_date": "notadate",
"count_mac": 0,
"frequency_windows": 1.0,
"count_windows": 1150,
"frequency": 1.0,
"count_linux": 0,
"total": 1150,
"frequency_linux": 0.0,
"frequency_mac": 0.0
},
{
"count": 1250,
"build_date": null,
"count_mac": 0,
"frequency_windows": 1.0,
"count_windows": 1250,
"frequency": 1.0,
"count_linux": 0,
"total": 1250,
"frequency_linux": 0.0,
"frequency_mac": 0.0
}
]
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('table',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
ok_('1050 - 100.0%' in response.content)
ok_('1150 - 100.0%' in response.content)
ok_('1250 - 100.0%' in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_redirect_by_prefix(self, rget, rpost):
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod"
comment0 = "This is a comment"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
email1 = "some@otheremailaddress.com"
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'meta'
):
return Response("""
{
"InstallTime": "1339289895",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "%s",
"Vendor": "Mozilla",
"URL": "%s"
}
""" % (email0, url0))
if 'crashes/comments' in url:
return Response("""
{
"hits": [
{
"user_comments": "%s",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "%s",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}
],
"total": 1
}
""" % (comment0, email1))
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
return Response("""
{
"client_crash_date": "2012-06-11T06:08:45",
"dump": "%s",
"signature": "FakeSignature1",
"user_comments": null,
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": null,
"distributor_version": null,
"truncated": true,
"process_type": null,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": true,
"exploitability": "Unknown Exploitability"
}
""" % dump)
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
base_crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
crash_id = settings.CRASH_ID_PREFIX + base_crash_id
assert len(crash_id) > 36
url = reverse('crashstats:report_index', args=[crash_id])
response = self.client.get(url)
correct_url = reverse('crashstats:report_index', args=[base_crash_id])
self.assertRedirects(response, correct_url)
@mock.patch('requests.get')
def test_report_list_with_no_data(self, rget):
def mocked_get(url, params, **options):
if 'report/list' in url:
return Response("""
{
"hits": [],
"total": 0
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('reports',))
response = self.client.get(url, {'signature': 'sig'})
eq_(response.status_code, 200)
# it sucks to depend on the output like this but it'll do for now since
# it's quite a rare occurance.
ok_('</html>' not in response.content) # it's a partial
ok_('no reports in the time period specified' in response.content)
@mock.patch('requests.get')
def test_raw_data(self, rget):
def mocked_get(url, params, **options):
assert '/crash_data' in url
if 'datatype' in params and params['datatype'] == 'raw':
return Response("""
bla bla bla
""".strip())
else:
# default is datatype/meta
return Response("""
{"foo": "bar",
"stuff": 123}
""")
rget.side_effect = mocked_get
crash_id = '176bcd6c-c2ec-4b0c-9d5f-dadea2120531'
json_url = reverse('crashstats:raw_data', args=(crash_id, 'json'))
response = self.client.get(json_url)
self.assertRedirects(
response,
reverse('crashstats:login') + '?next=%s' % json_url
)
eq_(response.status_code, 302)
user = self._login()
group = self._create_group_with_permission('view_rawdump')
user.groups.add(group)
assert user.has_perm('crashstats.view_rawdump')
response = self.client.get(json_url)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'application/json')
eq_(json.loads(response.content),
{"foo": "bar", "stuff": 123})
dump_url = reverse('crashstats:raw_data', args=(crash_id, 'dmp'))
response = self.client.get(dump_url)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'application/octet-stream')
ok_('bla bla bla' in response.content, response.content)
# dump files are cached.
# check the mock function and expect no change
def different_mocked_get(url, **options):
if '/crash_data' in url and 'datatype=raw' in url:
return Response("""
SOMETHING DIFFERENT
""".strip())
raise NotImplementedError(url)
rget.side_effect = different_mocked_get
response = self.client.get(dump_url)
eq_(response.status_code, 200)
ok_('bla bla bla' in response.content) # still. good.
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_remembered_date_range_type(self, rget, rpost):
# if you visit the home page, the default date_range_type will be
# 'report' but if you switch to 'build' it'll remember that
def mocked_get(url, params, **options):
if '/products' in url and 'versions' not in params:
return Response("""
{
"products": [
"WaterWolf"
],
"hits": {
"WaterWolf": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"release": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}]
},
"total": 1
}
""")
elif '/products' in url:
return Response("""
{
"hits": [{
"is_featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"build_type": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}],
"total": 1
}
""")
if '/crashes/daily' in url:
return Response("""
{
"hits": {
"WaterWolf:19.0": {
"2012-10-08": {
"product": "WaterWolf",
"adu": 30000,
"crash_hadu": 71.099999999999994,
"version": "19.0",
"report_count": 2133,
"date": "2012-10-08"
},
"2012-10-02": {
"product": "WaterWolf",
"adu": 30000,
"crash_hadu": 77.299999999999997,
"version": "19.0",
"report_count": 2319,
"date": "2012-10-02"
}
}
}
}
""")
if '/crashes/signatures' in url:
return Response("""
{"crashes": [
{
"count": 188,
"mac_count": 66,
"content_count": 0,
"first_report": "2012-06-21",
"startup_percent": 0.0,
"currentRank": 0,
"previousRank": 1,
"first_report_exact": "2012-06-21T21:28:08",
"versions":
"2.0, 2.1, 3.0a2, 3.0b2, 3.1b1, 4.0a1, 4.0a2, 5.0a1",
"percentOfTotal": 0.24258064516128999,
"win_count": 56,
"changeInPercentOfTotal": 0.011139597126354983,
"linux_count": 66,
"hang_count": 0,
"signature": "FakeSignature1",
"versions_count": 8,
"changeInRank": 0,
"plugin_count": 0,
"previousPercentOfTotal": 0.23144104803493501,
"is_gc_count": 10
}
],
"totalPercentage": 0,
"start_date": "2012-05-10",
"end_date": "2012-05-24",
"totalNumberOfCrashes": 0}
""")
raise NotImplementedError(url)
def mocked_post(**options):
assert '/bugs/' in options['url'], options['url']
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:home', args=('WaterWolf',))
response = self.client.get(url)
eq_(response.status_code, 200)
regex = re.compile('(<a\s+href="\?date_range_type=(\w+)[^>]+)')
for tag, value in regex.findall(response.content):
if value == 'report':
ok_('selected' in tag)
else:
ok_('selected' not in tag)
# now, like the home page does, fire of an AJAX request to frontpage
# for 'build' instead
frontpage_json_url = reverse('crashstats:frontpage_json')
frontpage_reponse = self.client.get(frontpage_json_url, {
'product': 'WaterWolf',
'date_range_type': 'build'
})
eq_(frontpage_reponse.status_code, 200)
# load the home page again, and it should be on build date instead
response = self.client.get(url)
eq_(response.status_code, 200)
for tag, value in regex.findall(response.content):
if value == 'build':
ok_('selected' in tag)
else:
ok_('selected' not in tag)
# open topcrashers with 'report'
topcrasher_report_url = reverse(
'crashstats:topcrasher',
kwargs={
'product': 'WaterWolf',
'versions': '19.0',
'date_range_type': 'report'
}
)
response = self.client.get(topcrasher_report_url)
eq_(response.status_code, 200)
# now, go back to the home page, and 'report' should be the new default
response = self.client.get(url)
eq_(response.status_code, 200)
for tag, value in regex.findall(response.content):
if value == 'report':
ok_('selected' in tag)
else:
ok_('selected' not in tag)
# open topcrashers with 'build'
topcrasher_report_url = reverse(
'crashstats:topcrasher',
kwargs={
'product': 'WaterWolf',
'versions': '19.0',
'date_range_type': 'build'
}
)
response = self.client.get(topcrasher_report_url)
eq_(response.status_code, 200)
# now, go back to the home page, and 'report' should be the new default
response = self.client.get(url)
eq_(response.status_code, 200)
for tag, value in regex.findall(response.content):
if value == 'build':
ok_('selected' in tag)
else:
ok_('selected' not in tag)
@mock.patch('requests.get')
def test_correlations_json(self, rget):
url = reverse('crashstats:correlations_json')
def mocked_get(url, params, **options):
ok_('report_type' in params)
eq_(params['report_type'], 'core-counts')
return Response("""
{
"count": 13,
"load": "36% (4/11) vs. 26% (47/180) amd64 with 2 cores"
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(
url,
{'correlation_report_type': 'core-counts',
'product': 'WaterWolf',
'version': '19.0',
'platform': 'Windows NT',
'signature': 'FakeSignature'}
)
ok_(response.status_code, 200)
ok_('application/json' in response['content-type'])
def test_unauthenticated_user_redirected_from_protected_page(self):
url = reverse(
'crashstats:exploitable_crashes',
args=(settings.DEFAULT_PRODUCT,)
)
response = self.client.get(url)
self.assertRedirects(
response,
'%s?%s=%s' % (
reverse('crashstats:login'),
REDIRECT_FIELD_NAME,
url,
)
)
def test_login_page_renders(self):
url = reverse('crashstats:login')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Login Required' in response.content)
ok_('Insufficient Privileges' not in response.content)
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Login Required' not in response.content)
ok_('Insufficient Privileges' in response.content)
def test_your_permissions_page(self):
url = reverse('crashstats:permissions')
response = self.client.get(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('crashstats:login') + '?next=%s' % url
)
user = self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(user.email in response.content)
# make some groups and attach permissions
self._create_group_with_permission(
'view_pii', 'Group A'
)
groupB = self._create_group_with_permission(
'view_exploitability', 'Group B'
)
user.groups.add(groupB)
assert not user.has_perm('crashstats.view_pii')
assert user.has_perm('crashstats.view_exploitability')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(PERMISSIONS['view_pii'] in response.content)
ok_(PERMISSIONS['view_exploitability'] in response.content)
doc = pyquery.PyQuery(response.content)
for row in doc('table.permissions tbody tr'):
cells = []
for td in doc('td', row):
cells.append(td.text.strip())
if cells[0] == PERMISSIONS['view_pii']:
eq_(cells[1], 'No')
elif cells[0] == PERMISSIONS['view_exploitability']:
eq_(cells[1], 'Yes!')
|
bsmedberg/socorro
|
webapp-django/crashstats/crashstats/tests/test_views.py
|
Python
|
mpl-2.0
| 194,868
|
[
"VisIt"
] |
9552dccfefd014d6ca210950a32969fcaa679687b3a0cf7ea8dbffeca6737684
|
#!/usr/bin/python
"""Test to verify bug #353268 is still fixed.
Orca is double reading lines in openoffice with latest Ubuntu live CD.
"""
from macaroon.playback import *
import utils
sequence = MacroSequence()
######################################################################
# 1. Start oowriter.
#
sequence.append(WaitForWindowActivate("Untitled 1 - " + utils.getOOoName("Writer"), None))
sequence.append(WaitForFocus("", acc_role=pyatspi.ROLE_PARAGRAPH))
######################################################################
# 2. Enter Alt-f, right arrow and Return. (File->New->Text Document).
#
sequence.append(KeyComboAction("<Alt>f"))
sequence.append(WaitForFocus("New", acc_role=pyatspi.ROLE_MENU))
sequence.append(KeyComboAction("Right"))
sequence.append(WaitForFocus("Text Document", acc_role=pyatspi.ROLE_MENU_ITEM))
sequence.append(KeyComboAction("Return"))
sequence.append(WaitForFocus("", acc_role=pyatspi.ROLE_PARAGRAPH))
######################################################################
# 3. Enter a couple lines of text:
# "Line 1"
# "Line 2"
#
sequence.append(TypeAction("Line 1", 0, 1000))
sequence.append(KeyComboAction("Return"))
sequence.append(WaitForFocus("", acc_role=pyatspi.ROLE_PARAGRAPH))
sequence.append(TypeAction("Line 2", 0, 1000))
sequence.append(KeyComboAction("Return"))
sequence.append(WaitForFocus("", acc_role=pyatspi.ROLE_PARAGRAPH))
######################################################################
# 4. Enter Control-Home to return to the top of the document.
#
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(WaitForFocus("", acc_role=pyatspi.ROLE_PARAGRAPH))
sequence.append(utils.AssertPresentationAction(
"Return to top of document",
["BRAILLE LINE: 'soffice Application Untitled 2 - " + utils.getOOoName("Writer") + " Frame Untitled 2 - " + utils.getOOoName("Writer") + " RootPane ScrollPane Document view Line 1 \$l'",
" VISIBLE: 'Line 1 $l', cursor=1",
"BRAILLE LINE: 'soffice Application Untitled 2 - " + utils.getOOoName("Writer") + " Frame Untitled 2 - " + utils.getOOoName("Writer") + " RootPane ScrollPane Document view Line 1 \$l'",
" VISIBLE: 'Line 1 $l', cursor=1",
"SPEECH OUTPUT: 'Line 1'"]))
######################################################################
# 5. Arrow down over the first line of text.
#
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(WaitForFocus("", acc_role=pyatspi.ROLE_PARAGRAPH))
sequence.append(utils.AssertPresentationAction(
"Arrow down over first line of text",
["BRAILLE LINE: 'Line 2 \$l'",
" VISIBLE: 'Line 2 \$l', cursor=1",
"BRAILLE LINE: 'Line 2 \$l'",
" VISIBLE: 'Line 2 \$l', cursor=1",
"SPEECH OUTPUT: 'Line 2'"]))
######################################################################
# 6. Arrow down over the second line of text.
#
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(WaitForFocus("", acc_role=pyatspi.ROLE_PARAGRAPH))
sequence.append(utils.AssertPresentationAction(
"Arrow down over second line of text",
["BRAILLE LINE: ' \$l'",
" VISIBLE: ' \$l', cursor=1",
"BRAILLE LINE: ' \$l'",
" VISIBLE: ' \$l', cursor=1",
"SPEECH OUTPUT: 'blank'"]))
######################################################################
# 7. Enter Alt-f, Alt-c to close the Writer application.
#
sequence.append(KeyComboAction("<Alt>f"))
sequence.append(WaitForFocus("New", acc_role=pyatspi.ROLE_MENU))
sequence.append(KeyComboAction("<Alt>c"))
sequence.append(WaitForFocus("Save", acc_role=pyatspi.ROLE_PUSH_BUTTON))
######################################################################
# 8. Enter Tab and Return to discard the current changes.
#
sequence.append(KeyComboAction("Tab"))
sequence.append(WaitForFocus("Discard", acc_role=pyatspi.ROLE_PUSH_BUTTON))
sequence.append(KeyComboAction("Return"))
######################################################################
# 9. Wait for things to get back to normal.
#
sequence.append(PauseAction(3000))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
h4ck3rm1k3/orca-sonar
|
test/keystrokes/oowriter/bug_353268.py
|
Python
|
lgpl-2.1
| 4,251
|
[
"ORCA"
] |
13dcadbd06173ecdf8a4b450689af476d1aa7788ebc28e2809ad130643d22670
|
import os
import re
import sys
import urllib
import BeautifulSoup
import json
#import simplejson
from models import Song as AppSong
import random
from django.http import HttpResponse
import logging
from django.core import serializers
URLS_LIST = ['http://pigeonsandplanes.com/', 'http://www.2dopeboyz.com/', 'http://agrumpyoldmanwithabeard.blogspot.com/', 'http://www.cocaineblunts.com/blunts/?page_id=1074']
URLS_LIST2 = ['http://3hive.com']
URLS_LIST3 = ['http://fakeshoredrive.com/','http://earmilk.com','http://passionweiss.com','http://creamteam.tv',]
MAX_DEPTH = 6
LOG_FILENAME = '/home/hiphopgoblin/logs/user/debug.log'
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)
def song(url, text):
if url[-4:] == '.mp3':
try:
s = AppSong(url=url, title=text, count=0)
s.save()
logging.debug('song saved')
except:
logging.debug('song found, could not be saved')
return (url, text)
def site(url, text):
if url[-4:] != '.mp3' and url[:7] == 'http://':
logging.debug(url)
return url
def strip_tags(string):
tag = re.compile(r'<.*?>')
return tag.sub(' ', string)
def separate(lst):
songs = [song(x,y) for x,y in lst]
sites = [site(x,y) for x,y in lst]
return (sites, songs)
class Node(object):
def __init__(self, url, depth, top):
self.url = url
self.children = []
self.songs = []
self.depth = depth
self.topNode = top
def openResources(self):
try:
f = urllib.urlopen(self.url)
logging.debug('urllib worked')
return BeautifulSoup.BeautifulSoup(f.read())
except:
logging.debug('rsrcs opening fail ')
return False
def getUrls(self):
link_list = []
soup = self.openResources()
if soup:
logging.debug('returned a soup obj')
links = soup.findAll('a')
for l in links:
try:
link_list.append((l['href'], strip_tags((' ').join(str(t) for t in l.contents))))
logging.debug('appended' + l['href'])
except:
logging.debug('could not append')
return link_list
def visit(self, scraper):
logging.debug('visited')
if self.depth <= MAX_DEPTH:
urls_list, self.songs = separate(self.getUrls())
self.children = [UnderNode(parent=self, top=self.topNode, url=x) for x in urls_list]
self.pushSongs(scraper)
for x in self.children:
x.visit(scraper)
else:
return
def pushSongs(self, scraper):
scraper.songs = [scraper.songs.append(s) for s in self.songs]
class TopNode(Node):
def __init__(self, url):
super(TopNode, self).__init__(url, 0, top=self)
class UnderNode(Node):
def __init__(self, parent, top, url):
super(UnderNode, self).__init__(url, parent.depth + 1, top)
self.topNode = top
def pushSongs(self, scraper):
scraper.songs = [scraper.songs.append(s) for s in self.songs]
class Scraper:
def __init__(self, urls):
self.songs = []
self.topNodes = [TopNode(x) for x in urls]
def collectSongs(self):
self.songs = [x.songs for x in self.topNodes]
return self.songs
class Song:
def __init__(self, url, text):
self.url = url
self.text = text
def __unicode__(self):
return self.url + " --- " + self.text
def scrape(songid=None):
#scraper = Scraper(URLS_LIST)
#for node in scraper.topNodes:
#node.visit()
#for song in scraper.collectSongs():
#print song
#print
json_serializer = serializers.get_serializer("json")()
if songid:
s = AppSong.objects.get(id=songid)
else:
count = len(AppSong.objects.all())
index = random.randint(1,count)
s = AppSong.objects.all()[index-1]
#json.write({"filename":s.url, "title":s.title, "count":s.count,"id":s.id,})
return json.write({"filename":s.url, "title":s.title, "count":s.count,"id":s.id,})
def main():
scraper = Scraper(URLS_LIST)
for node in scraper.topNodes:
node.visit(scraper)
def getsongs(request):
logging.debug('called getsongs')
scraper = Scraper(URLS_LIST3)
for node in scraper.topNodes:
node.visit(scraper)
return HttpResponse()
|
zackster/HipHopGoblin
|
hhg/hhg_app/scraper.py
|
Python
|
bsd-3-clause
| 3,878
|
[
"VisIt"
] |
61f16c0127435a2932a8a21519b4c6256e6fc04262f0eef93c7aeae188fb41a0
|
import itertools
import sys
from ...utils.helpers import PickleSerializer
from ...exceptions import SmtlibError
from .expression import (
BitVecVariable,
BoolVariable,
ArrayVariable,
Array,
Bool,
BitVec,
BoolConstant,
ArrayProxy,
BoolEqual,
Variable,
Constant,
)
from .visitors import GetDeclarations, TranslatorSmtlib, get_variables, simplify, replace
import logging
logger = logging.getLogger(__name__)
class ConstraintException(SmtlibError):
"""
Constraint exception
"""
pass
class ConstraintSet:
""" Constraint Sets
An object containing a set of constraints. Serves also as a factory for
new variables.
"""
def __init__(self):
self._constraints = list()
self._parent = None
self._sid = 0
self._declarations = {}
self._child = None
def __reduce__(self):
return (
self.__class__,
(),
{
"_parent": self._parent,
"_constraints": self._constraints,
"_sid": self._sid,
"_declarations": self._declarations,
},
)
def __enter__(self):
assert self._child is None
self._child = self.__class__()
self._child._parent = self
self._child._sid = self._sid
self._child._declarations = dict(self._declarations)
return self._child
def __exit__(self, ty, value, traceback):
self._child._parent = None
self._child = None
def __len__(self):
if self._parent is not None:
return len(self._constraints) + len(self._parent)
return len(self._constraints)
def add(self, constraint, check=False):
"""
Add a constraint to the set
:param constraint: The constraint to add to the set.
:param check: Currently unused.
:return:
"""
if isinstance(constraint, bool):
constraint = BoolConstant(constraint)
assert isinstance(constraint, Bool)
constraint = simplify(constraint)
# If self._child is not None this constraint set has been forked and a
# a derived constraintset may be using this. So we can't add any more
# constraints to this one. After the child constraintSet is deleted
# we regain the ability to add constraints.
if self._child is not None:
raise ConstraintException("ConstraintSet is frozen")
if isinstance(constraint, BoolConstant):
if not constraint.value:
logger.info("Adding an impossible constant constraint")
self._constraints = [constraint]
else:
return
self._constraints.append(constraint)
if check:
from ...core.smtlib import solver
if not solver.check(self):
raise ValueError("Added an impossible constraint")
def _get_sid(self):
""" Returns a unique id. """
assert self._child is None
self._sid += 1
return self._sid
def __get_related(self, related_to=None):
if related_to is not None:
number_of_constraints = len(self.constraints)
remaining_constraints = set(self.constraints)
related_variables = get_variables(related_to)
related_constraints = set()
added = True
while added:
added = False
logger.debug("Related variables %r", [x.name for x in related_variables])
for constraint in list(remaining_constraints):
if isinstance(constraint, BoolConstant):
if constraint.value:
continue
else:
related_constraints = {constraint}
break
variables = get_variables(constraint)
if related_variables & variables:
remaining_constraints.remove(constraint)
related_constraints.add(constraint)
related_variables |= variables
added = True
logger.debug(
"Reduced %d constraints!!", number_of_constraints - len(related_constraints)
)
else:
related_variables = set()
for constraint in self.constraints:
related_variables |= get_variables(constraint)
related_constraints = set(self.constraints)
return related_variables, related_constraints
def to_string(self, related_to=None, replace_constants=False):
related_variables, related_constraints = self.__get_related(related_to)
if replace_constants:
constant_bindings = {}
for expression in related_constraints:
if (
isinstance(expression, BoolEqual)
and isinstance(expression.operands[0], Variable)
and isinstance(expression.operands[1], (Variable, Constant))
):
constant_bindings[expression.operands[0]] = expression.operands[1]
tmp = set()
result = ""
for var in related_variables:
# FIXME
# band aid hack around the fact that we are double declaring stuff :( :(
if var.declaration in tmp:
logger.warning("Variable '%s' was copied twice somewhere", var.name)
continue
tmp.add(var.declaration)
result += var.declaration + "\n"
translator = TranslatorSmtlib(use_bindings=True)
for constraint in related_constraints:
if replace_constants:
constraint = simplify(replace(constraint, constant_bindings))
# if no variables then it is a constant
if isinstance(constraint, Constant) and constraint.value == True:
continue
translator.visit(constraint)
if replace_constants:
for k, v in constant_bindings.items():
translator.visit(k == v)
for name, exp, smtlib in translator.bindings:
if isinstance(exp, BitVec):
result += f"(declare-fun {name} () (_ BitVec {exp.size}))"
elif isinstance(exp, Bool):
result += f"(declare-fun {name} () Bool)"
elif isinstance(exp, Array):
result += f"(declare-fun {name} () (Array (_ BitVec {exp.index_bits}) (_ BitVec {exp.value_bits})))"
else:
raise ConstraintException(f"Type not supported {exp!r}")
result += f"(assert (= {name} {smtlib}))\n"
constraint_str = translator.pop()
while constraint_str is not None:
if constraint_str != "true":
result += f"(assert {constraint_str})\n"
constraint_str = translator.pop()
return result
def _declare(self, var):
""" Declare the variable `var` """
if var.name in self._declarations:
raise ValueError("Variable already declared")
self._declarations[var.name] = var
return var
def get_declared_variables(self):
""" Returns the variable expressions of this constraint set """
return self._declarations.values()
def get_variable(self, name):
""" Returns the variable declared under name or None if it does not exists """
return self._declarations.get(name)
@property
def declarations(self):
""" Returns the variable expressions of this constraint set """
declarations = GetDeclarations()
for a in self.constraints:
try:
declarations.visit(a)
except RuntimeError:
# TODO: (defunct) move recursion management out of PickleSerializer
if sys.getrecursionlimit() >= PickleSerializer.MAX_RECURSION:
raise ConstraintException(
f"declarations recursion limit surpassed {PickleSerializer.MAX_RECURSION}, aborting"
)
new_limit = sys.getrecursionlimit() + PickleSerializer.DEFAULT_RECURSION
if new_limit <= PickleSerializer.DEFAULT_RECURSION:
sys.setrecursionlimit(new_limit)
return self.declarations
return declarations.result
@property
def constraints(self):
"""
:rtype tuple
:return: All constraints represented by this and parent sets.
"""
if self._parent is not None:
return tuple(self._constraints) + self._parent.constraints
return tuple(self._constraints)
def __iter__(self):
return iter(self.constraints)
def __str__(self):
""" Returns a smtlib representation of the current state """
return self.to_string()
def _make_unique_name(self, name="VAR"):
""" Makes a unique variable name"""
# the while loop is necessary because appending the result of _get_sid()
# is not guaranteed to make a unique name on the first try; a colliding
# name could have been added previously
while name in self._declarations:
name = f"{name}_{self._get_sid()}"
return name
def is_declared(self, expression_var):
""" True if expression_var is declared in this constraint set """
if not isinstance(expression_var, Variable):
raise ValueError(f"Expression must be a Variable (not a {type(expression_var)})")
return any(expression_var is x for x in self.get_declared_variables())
def migrate(self, expression, name_migration_map=None):
""" Migrate an expression created for a different constraint set to self.
Returns an expression that can be used with this constraintSet
All the foreign variables used in the expression are replaced by
variables of this constraint set. If the variable was replaced before
the replacement is taken from the provided migration map.
The migration mapping is updated with new replacements.
:param expression: the potentially foreign expression
:param name_migration_map: mapping of already migrated variables. maps from string name of foreign variable to its currently existing migrated string name. this is updated during this migration.
:return: a migrated expression where all the variables are local. name_migration_map is updated
"""
if name_migration_map is None:
name_migration_map = {}
# name_migration_map -> object_migration_map
# Based on the name mapping in name_migration_map build an object to
# object mapping to be used in the replacing of variables
# inv: object_migration_map's keys should ALWAYS be external/foreign
# expressions, and its values should ALWAYS be internal/local expressions
object_migration_map = {}
# List of foreign vars used in expression
foreign_vars = itertools.filterfalse(self.is_declared, get_variables(expression))
for foreign_var in foreign_vars:
# If a variable with the same name was previously migrated
if foreign_var.name in name_migration_map:
migrated_name = name_migration_map[foreign_var.name]
native_var = self.get_variable(migrated_name)
assert (
native_var is not None
), "name_migration_map contains a variable that does not exist in this ConstraintSet"
object_migration_map[foreign_var] = native_var
else:
# foreign_var was not found in the local declared variables nor
# any variable with the same name was previously migrated
# let's make a new unique internal name for it
migrated_name = foreign_var.name
if migrated_name in self._declarations:
migrated_name = self._make_unique_name(f"{foreign_var.name}_migrated")
# Create and declare a new variable of given type
if isinstance(foreign_var, Bool):
new_var = self.new_bool(name=migrated_name)
elif isinstance(foreign_var, BitVec):
new_var = self.new_bitvec(foreign_var.size, name=migrated_name)
elif isinstance(foreign_var, Array):
# Note that we are discarding the ArrayProxy encapsulation
new_var = self.new_array(
index_max=foreign_var.index_max,
index_bits=foreign_var.index_bits,
value_bits=foreign_var.value_bits,
name=migrated_name,
).array
else:
raise NotImplemented(
f"Unknown expression type {type(var)} encountered during expression migration"
)
# Update the var to var mapping
object_migration_map[foreign_var] = new_var
# Update the name to name mapping
name_migration_map[foreign_var.name] = new_var.name
# Actually replace each appearance of migrated variables by the new ones
migrated_expression = replace(expression, object_migration_map)
return migrated_expression
def new_bool(self, name=None, taint=frozenset(), avoid_collisions=False):
""" Declares a free symbolic boolean in the constraint store
:param name: try to assign name to internal variable representation,
if not unique, a numeric nonce will be appended
:param avoid_collisions: potentially avoid_collisions the variable to avoid name collisions if True
:return: a fresh BoolVariable
"""
if name is None:
name = "B"
avoid_collisions = True
if avoid_collisions:
name = self._make_unique_name(name)
if not avoid_collisions and name in self._declarations:
raise ValueError(f"Name {name} already used")
var = BoolVariable(name, taint=taint)
return self._declare(var)
def new_bitvec(self, size, name=None, taint=frozenset(), avoid_collisions=False):
""" Declares a free symbolic bitvector in the constraint store
:param size: size in bits for the bitvector
:param name: try to assign name to internal variable representation,
if not unique, a numeric nonce will be appended
:param avoid_collisions: potentially avoid_collisions the variable to avoid name collisions if True
:return: a fresh BitVecVariable
"""
if size <= 0:
raise ValueError(f"Bitvec size ({size}) can't be equal to or less than 0")
if name is None:
name = "BV"
avoid_collisions = True
if avoid_collisions:
name = self._make_unique_name(name)
if not avoid_collisions and name in self._declarations:
raise ValueError(f"Name {name} already used")
var = BitVecVariable(size, name, taint=taint)
return self._declare(var)
def new_array(
self,
index_bits=32,
name=None,
index_max=None,
value_bits=8,
taint=frozenset(),
avoid_collisions=False,
default=None,
):
""" Declares a free symbolic array of value_bits long bitvectors in the constraint store.
:param index_bits: size in bits for the array indexes one of [32, 64]
:param value_bits: size in bits for the array values
:param name: try to assign name to internal variable representation,
if not unique, a numeric nonce will be appended
:param index_max: upper limit for indexes on this array (#FIXME)
:param avoid_collisions: potentially avoid_collisions the variable to avoid name collisions if True
:param default: default for not initialized values
:return: a fresh ArrayProxy
"""
if name is None:
name = "A"
avoid_collisions = True
if avoid_collisions:
name = self._make_unique_name(name)
if not avoid_collisions and name in self._declarations:
raise ValueError(f"Name {name} already used")
var = self._declare(ArrayVariable(index_bits, index_max, value_bits, name, taint=taint))
return ArrayProxy(var, default=default)
|
montyly/manticore
|
manticore/core/smtlib/constraints.py
|
Python
|
apache-2.0
| 16,759
|
[
"VisIt"
] |
d5b179fae42182df77ef12ecca77e4c5dcd166d6ad4c394849c47fc13ef79f86
|
'''
Code specific to Neuron devices
------------------------------------------
'''
import struct
import datetime
from dali.bus import Bus
import dali.gear.general
from math import sqrt
from tornado import gen
from tornado.ioloop import IOLoop
from modbusclient_tornado import ModbusClientProtocol, StartClient
from pymodbus.pdu import ExceptionResponse
from pymodbus.exceptions import ModbusIOException
from tornado.locks import Semaphore
import modbusclient_rs485
from devices import *
from log import *
import config
import time
from modbusclient_rs485 import AsyncErrorResponse
import subprocess
from unipidali import SyncUnipiDALIDriver
from dali.address import Broadcast, Group
class ENoBoard(Exception):
pass
class ModbusCacheMap(object):
last_comm_time = 0
def __init__(self, modbus_reg_map, neuron):
self.modbus_reg_map = modbus_reg_map
self.neuron = neuron
self.sem = Semaphore(1)
self.registered = {}
self.registered_input = {}
self.frequency = {}
for m_reg_group in modbus_reg_map:
self.frequency[m_reg_group['start_reg']] = 10000001 # frequency less than 1/10 million are not read on start
for index in range(m_reg_group['count']):
if 'type' in m_reg_group and m_reg_group['type'] == 'input':
self.registered_input[(m_reg_group['start_reg'] + index)] = None
else:
self.registered[(m_reg_group['start_reg'] + index)] = None
def get_register(self, count, index, unit=0, is_input=False):
ret = []
for counter in range(index,count+index):
if is_input:
if counter not in self.registered_input:
raise Exception('Unknown register %d' % counter)
elif self.registered_input[counter] is None:
raise Exception('No cached value of register %d on unit %d - read error' % (counter, unit))
ret += [self.registered_input[counter]]
else:
if counter not in self.registered:
raise Exception('Unknown register %d' % counter)
elif self.registered[counter] is None:
raise Exception('No cached value of register %d on unit %d - read error' % (counter, unit))
ret += [self.registered[counter]]
return ret
@gen.coroutine
def do_scan(self, unit=0, initial=False):
if initial:
yield self.sem.acquire()
changeset = []
for m_reg_group in self.modbus_reg_map:
if (self.frequency[m_reg_group['start_reg']] >= m_reg_group['frequency']) or (self.frequency[m_reg_group['start_reg']] == 0): # only read once for every [frequency] cycles
try:
val = None
if 'type' in m_reg_group and m_reg_group['type'] == 'input':
val = yield self.neuron.client.read_input_registers(m_reg_group['start_reg'], m_reg_group['count'], unit=unit)
else:
val = yield self.neuron.client.read_holding_registers(m_reg_group['start_reg'], m_reg_group['count'], unit=unit)
if not isinstance(val, AsyncErrorResponse) and not isinstance(val, ModbusIOException) and not isinstance(val, ExceptionResponse):
self.last_comm_time = time.time()
if 'type' in m_reg_group and m_reg_group['type'] == 'input':
for index in range(m_reg_group['count']):
if (m_reg_group['start_reg'] + index) in self.neuron.datadeps and self.registered_input[(m_reg_group['start_reg'] + index)] != val.registers[index]:
for ddep in self.neuron.datadeps[m_reg_group['start_reg'] + index]:
if (not ((isinstance(ddep, Input) or isinstance(ddep, ULED)))) or ddep.value_delta(val.registers[index]):
changeset += [ddep]
self.registered_input[(m_reg_group['start_reg'] + index)] = val.registers[index]
self.frequency[m_reg_group['start_reg']] = 1
else:
for index in range(m_reg_group['count']):
if (m_reg_group['start_reg'] + index) in self.neuron.datadeps and self.registered[(m_reg_group['start_reg'] + index)] != val.registers[index]:
for ddep in self.neuron.datadeps[m_reg_group['start_reg'] + index]:
if (not ((isinstance(ddep, Input) or isinstance(ddep, ULED) or isinstance(ddep, Relay) or isinstance(ddep, Watchdog)))) or ddep.value_delta(val.registers[index]):
changeset += [ddep]
self.registered[(m_reg_group['start_reg'] + index)] = val.registers[index]
self.frequency[m_reg_group['start_reg']] = 1
except Exception, E:
logger.debug(str(E))
else:
self.frequency[m_reg_group['start_reg']] += 1
if len(changeset) > 0:
proxy = Proxy(set(changeset))
devents.status(proxy)
if initial:
self.sem.release()
def set_register(self, count, index, inp, unit=0, is_input=False):
if len(inp) < count:
raise Exception('Insufficient data to write into registers')
for counter in range(count):
if is_input:
if index + counter not in self.registered_input:
raise Exception('Unknown register %d' % index + counter)
self.neuron.client.write_register(index + counter, 1, inp[counter], unit=unit)
self.registered_input[index + counter] = inp[counter]
else:
if index + counter not in self.registered:
raise Exception('Unknown register %d' % index + counter)
self.neuron.client.write_register(index + counter, 1, inp[counter], unit=unit)
self.registered[index + counter] = inp[counter]
def has_register(self, index, is_input=False):
if is_input:
if index not in self.registered_input:
return False
else:
return True
else:
if index not in self.registered:
return False
else:
return True
@gen.coroutine
def get_register_async(self, count, index, unit=0, is_input=False):
if is_input:
for counter in range(index,count+index):
if counter not in self.registered_input:
raise Exception('Unknown register')
val = yield self.neuron.client.read_input_registers(index, count, unit=unit)
for counter in range(len(val.registers)):
self.registered_input[index+counter] = val.registers[counter]
raise gen.Return(val.registers)
else:
for counter in range(index,count+index):
if counter not in self.registered:
raise Exception('Unknown register')
val = yield self.neuron.client.read_holding_registers(index, count, unit=unit)
for counter in range(len(val.registers)):
self.registered[index+counter] = val.registers[counter]
raise gen.Return(val.registers)
@gen.coroutine
def set_register_async(self, count, index, inp, unit=0, is_input=False):
if is_input:
if len(inp) < count:
raise Exception('Insufficient data to write into registers')
for counter in range(count):
if index + counter not in self.registered_input:
raise Exception('Unknown register')
yield self.neuron.client.write_register(index + counter, 1, inp[counter], unit=unit)
self.registered_input[index + counter] = inp[counter]
else:
if len(inp) < count:
raise Exception('Insufficient data to write into registers')
for counter in range(count):
if index + counter not in self.registered:
raise Exception('Unknown register')
yield self.neuron.client.write_register(index + counter, 1, inp[counter], unit=unit)
self.registered[index + counter] = inp[counter]
class Neuron(object):
def __init__(self, circuit, Config, modbus_server, modbus_port, scan_freq, scan_enabled, hw_dict, direct_access=False, major_group=1, dev_id=0):
self.alias = ""
self.devtype = NEURON
self.dev_id = dev_id
self.circuit = str(circuit)
self.hw_dict = hw_dict
self.datadeps = {}
self.Config = Config
self.direct_access = direct_access
self.modbus_server = modbus_server
self.modbus_port = modbus_port
self.major_group = major_group
self.modbus_address = 0
self.do_scanning = False
self.is_scanning = False
self.scanning_error_triggered = False
if scan_freq == 0:
self.scan_interval = 0
else:
self.scan_interval = 1.0 / scan_freq
self.scan_enabled = scan_enabled
self.boards = list()
self.modbus_cache_map = None
self.versions = []
self.logfile = Config.getstringdef("MAIN", "log_file", "/var/log/evok.log")
def switch_to_async(self, loop, alias_dict):
self.loop = loop
self.client = ModbusClientProtocol()
loop.add_callback(lambda: StartClient(self.client, self.modbus_server, self.modbus_port, self.readboards, callback_args=alias_dict))
@gen.coroutine
def set(self, print_log=None):
if print_log is not None and print_log != 0:
log_tail = subprocess.check_output(["tail", "-n 255", self.logfile])
raise gen.Return(log_tail)
else:
raise gen.Return("")
@gen.coroutine
def readboards(self, alias_dict):
""" Try to read version registers on 3 boards and create subdevices """
logger.info("Reading SPI boards")
for board in self.boards:
del (board)
self.boards = list()
for i in (1, 2, 3):
try:
versions = yield self.client.read_input_registers(1000, 10, unit=i)
if isinstance(versions, ExceptionResponse):
raise ENoBoard("Bad request")
else:
self.versions += [versions.registers]
board = Board(self.Config, i, self, versions.registers, major_group=i, direct_access=self.direct_access, dev_id=self.dev_id)
yield board.parse_definition(self.hw_dict, i)
self.boards.append(board)
except ENoBoard:
logger.info("No board on SPI %d" % i)
continue
except Exception, E:
logger.exception(str(E))
pass
yield config.add_aliases(alias_dict)
def start_scanning(self):
self.do_scanning = True
if not self.is_scanning:
self.loop.call_later(self.scan_interval, self.scan_boards)
self.is_scanning = True
def stop_scanning(self):
if not self.scan_enabled:
self.do_scanning = False
def full(self):
ret = {'dev': 'neuron',
'circuit': self.circuit,
'model': config.up_globals['model'],
'sn': config.up_globals['serial'],
'ver2': config.up_globals['version2'],
'board_count': len(self.boards),
'glob_dev_id': self.dev_id,
'last_comm': 0x7fffffff}
if self.alias != '':
ret['alias'] = self.alias
if self.modbus_cache_map is not None:
ret['last_comm'] = time.time() - self.modbus_cache_map.last_comm_time
return ret
def get(self):
return self.full()
@gen.coroutine
def scan_boards(self):
if self.client.connected:
try:
if self.modbus_cache_map is not None:
yield self.modbus_cache_map.do_scan()
except Exception, E:
if not self.scanning_error_triggered:
logger.exception(str(E))
self.scanning_error_triggered = True
self.scanning_error_triggered = False
if self.do_scanning and (self.scan_interval != 0):
self.loop.call_later(self.scan_interval, self.scan_boards)
self.is_scanning = True
else:
self.is_scanning = False
class UartNeuron(object):
def __init__(self, circuit, Config, port, scan_freq, scan_enabled, hw_dict, baud_rate=19200, parity='N', stopbits=1, uart_address=15, major_group=1, device_name='unspecified', direct_access=False, neuron_uart_circuit="None", dev_id=0):
self.alias = ""
self.devtype = NEURON
self.modbus_cache_map = None
self.datadeps = {}
self.boards = list()
self.dev_id = dev_id
self.circuit = "UART_" + str(uart_address) + "_" + str(circuit)
self.hw_dict = hw_dict
self.port = port
self.direct_access = direct_access
self.modbus_address = uart_address
self.device_name = device_name
self.Config = Config
self.do_scanning = False
self.is_scanning = False
self.scanning_error_triggered = False
self.major_group = major_group
self.baud_rate = baud_rate
self.parity = parity
self.stopbits = stopbits
self.neuron_uart_circuit = neuron_uart_circuit
self.hw_board_dict = {}
if scan_freq == 0:
self.scan_interval = 0
else:
self.scan_interval = 1.0 / scan_freq
self.scan_enabled = scan_enabled
self.versions = []
self.logfile = Config.getstringdef("MAIN", "log_file", "/var/log/evok.log")
def switch_to_async(self, loop, alias_dict):
self.loop = loop
if self.port in modbusclient_rs485.client_dict:
self.client = modbusclient_rs485.client_dict[self.port]
else:
self.client = modbusclient_rs485.AsyncModbusGeneratorClient(method='rtu', stopbits=self.stopbits, bytesize=8, parity=self.parity, baudrate=self.baud_rate, timeout=1.5, port=self.port)
modbusclient_rs485.client_dict[self.port] = self.client
loop.add_callback(lambda: modbusclient_rs485.UartStartClient(self, self.readboards, callback_args=alias_dict))
@gen.coroutine
def set(self, print_log=None):
if print_log is not None and print_log != 0:
log_tail = subprocess.check_output(["tail", "-n 255", self.logfile])
raise gen.Return(log_tail)
else:
return gen.Return("")
@gen.coroutine
def readboards(self, alias_dict):
logger.info("Reading the UART board on Modbus address %d" % self.modbus_address)
self.boards = list()
try:
for defin in self.hw_dict.definitions:
if defin and (defin['type'] == self.device_name):
self.hw_board_dict = defin
break
self.versions = yield self.client.read_input_registers(1000, 10, unit=self.modbus_address)
if isinstance(self.versions, ExceptionResponse):
self.versions = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
elif isinstance(self.versions, AsyncErrorResponse):
self.versions = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
else:
self.versions = self.versions.registers
board = UartBoard(self.Config, self.circuit, self.modbus_address, self, self.versions, dev_id=self.dev_id, direct_access=self.direct_access)
yield board.parse_definition(self.hw_dict)
self.boards.append(board)
yield config.add_aliases(alias_dict)
except ENoBoard:
logger.info("No board detected on UART %d" % self.modbus_address)
except Exception, E:
Devices.remove_global_device(self.dev_id)
logger.exception(str(E))
pass
def start_scanning(self):
self.do_scanning = True
if not self.is_scanning:
self.loop.call_later(self.scan_interval, self.scan_boards)
self.is_scanning = True
def stop_scanning(self):
if not self.scan_enabled:
self.do_scanning = False
def full(self):
ret = {'dev': 'extension',
'circuit': self.circuit,
'model': self.device_name,
'uart_circuit': self.neuron_uart_circuit,
'uart_port': self.port,
'glob_dev_id': self.dev_id,
'last_comm': 0x7fffffff}
if self.alias != '':
ret['alias'] = self.alias
if self.modbus_cache_map is not None:
ret['last_comm'] = time.time() - self.modbus_cache_map.last_comm_time
return ret
def get(self):
return self.full()
@gen.coroutine
def scan_boards(self, invoc=False):
if self.is_scanning and invoc:
raise gen.Return()
try:
if self.modbus_cache_map is not None:
yield self.modbus_cache_map.do_scan(unit=self.modbus_address)
self.scanning_error_triggered = False
except Exception, E:
if not self.scanning_error_triggered:
logger.debug(str(E))
self.scanning_error_triggered = True
if self.do_scanning and (self.scan_interval != 0):
self.loop.call_later(self.scan_interval, self.scan_boards)
self.is_scanning = True
else:
self.is_scanning = False
class Proxy(object):
def __init__(self, changeset):
self.changeset = changeset
def full(self):
self.result = [c.full() for c in self.changeset]
self.full = self.fullcache
return self.result
def fullcache(self):
return self.result
class UartBoard(object):
def __init__(self, Config, circuit, modbus_address, neuron, versions, direct_access=False, major_group=1, dev_id=0):
self.alias = ""
self.devtype = BOARD
self.dev_id = dev_id
self.Config = Config
self.circuit = circuit
self.direct_access = direct_access
self.legacy_mode = not (Config.getbooldef('MAIN','use_experimental_api', False))
self.neuron = neuron
self.major_group = major_group
self.modbus_address = modbus_address
self.sw = versions[0]
self.ndi = (versions[1] & 0xff00) >> 8
self.ndo = (versions[1] & 0x00ff)
self.nai = (versions[2] & 0xff00) >> 8
self.nao = (versions[2] & 0x00f0) >> 4
self.nuart = (versions[2] & 0x000f)
self.hw = (versions[3] & 0xff00) >> 8
self.hwv = (versions[3] & 0x00ff)
self.serial = versions[5] + (versions[6] << 16)
self.nai1 = self.nai if self.hw != 0 else 1 # full featured AI (with switched V/A)
self.nai2 = 0 if self.hw != 0 else 1 # Voltage only AI
@gen.coroutine
def set(self, alias=None):
if not alias is None:
if Devices.add_alias(alias, self, file_update=True):
self.alias = alias
return gen.Return(self.full())
@gen.coroutine
def initialise_cache(self, cache_definition):
if cache_definition and (self.neuron.device_name == cache_definition['type']):
if cache_definition.has_key('modbus_register_blocks'):
if self.neuron.modbus_cache_map == None:
self.neuron.modbus_cache_map = ModbusCacheMap(cache_definition['modbus_register_blocks'], self.neuron)
yield self.neuron.modbus_cache_map.do_scan(initial=True, unit=self.modbus_address)
yield self.neuron.modbus_cache_map.sem.acquire()
self.neuron.modbus_cache_map.sem.release()
else:
yield self.neuron.modbus_cache_map.sem.acquire()
self.neuron.modbus_cache_map.sem.release()
else:
raise Exception("HW Definition %s requires Modbus register blocks to be specified" % cache_definition['type'])
def parse_feature_di(self, max_count, m_feature, board_id):
counter = 0
while counter < max_count:
board_val_reg = m_feature['val_reg']
board_counter_reg = m_feature['counter_reg']
board_deboun_reg = m_feature['deboun_reg']
start_index = 0
if m_feature.has_key('start_index'):
start_index = m_feature['start_index']
if m_feature.has_key('ds_modes') and m_feature.has_key('direct_reg') and m_feature.has_key('polar_reg') and m_feature.has_key('toggle_reg'):
_inp = Input("%s_%02d" % (self.circuit, counter + 1 + start_index), self, board_val_reg, 0x1 << (counter % 16),
regdebounce=board_deboun_reg + counter, major_group=0, regcounter=board_counter_reg + (2 * counter), modes=m_feature['modes'],
dev_id=self.dev_id, ds_modes=m_feature['ds_modes'], regmode=m_feature['direct_reg'], regtoggle=m_feature['toggle_reg'],
regpolarity=m_feature['polar_reg'], legacy_mode=self.legacy_mode)
else:
_inp = Input("%s_%02d" % (self.circuit, counter + 1 + start_index), self, board_val_reg, 0x1 << (counter % 16),
regdebounce=board_deboun_reg + counter, major_group=0, regcounter=board_counter_reg + (2 * counter), modes=m_feature['modes'],
dev_id=self.dev_id, legacy_mode=self.legacy_mode)
if self.neuron.datadeps.has_key(board_val_reg):
self.neuron.datadeps[board_val_reg]+=[_inp]
else:
self.neuron.datadeps[board_val_reg] = [_inp]
if self.neuron.datadeps.has_key(board_counter_reg + (2 * counter)):
self.neuron.datadeps[board_counter_reg + (2 * counter)]+=[_inp]
else:
self.neuron.datadeps[board_counter_reg + (2 * counter)] = [_inp]
Devices.register_device(INPUT, _inp)
counter+=1
def parse_feature_ro(self, max_count, m_feature, board_id):
counter = 0
while counter < max_count:
board_val_reg = m_feature['val_reg']
if m_feature['type'] == 'DO' and m_feature['pwm_reg'] and m_feature['pwm_ps_reg'] and m_feature['pwm_c_reg']:
if not self.legacy_mode:
_r = Relay("%s_%02d" % (self.circuit, counter + 1), self, m_feature['val_coil'] + counter, board_val_reg, 0x1 << (counter % 16),
dev_id=self.dev_id, major_group=0, pwmcyclereg=m_feature['pwm_c_reg'], pwmprescalereg=m_feature['pwm_ps_reg'], digital_only=True,
pwmdutyreg=m_feature['pwm_reg'] + counter, modes=m_feature['modes'], legacy_mode=self.legacy_mode)
else:
_r = Relay("%s_%02d" % (self.circuit, counter + 1), self, m_feature['val_coil'] + counter, board_val_reg, 0x1 << (counter % 16),
dev_id=self.dev_id, major_group=0, pwmcyclereg=m_feature['pwm_c_reg'], pwmprescalereg=m_feature['pwm_ps_reg'], digital_only=True,
pwmdutyreg=m_feature['pwm_reg'] + counter, modes=m_feature['modes'], legacy_mode=self.legacy_mode)
else:
_r = Relay("%s_%02d" % (self.circuit, counter + 1), self, m_feature['val_coil'] + counter, board_val_reg, 0x1 << (counter % 16),
dev_id=self.dev_id, major_group=0, legacy_mode=self.legacy_mode)
if self.neuron.datadeps.has_key(board_val_reg):
self.neuron.datadeps[board_val_reg]+=[_r]
else:
self.neuron.datadeps[board_val_reg] = [_r]
Devices.register_device(RELAY, _r)
counter+=1
def parse_feature_led(self, max_count, m_feature, board_id):
counter = 0
while counter < max_count:
board_val_reg = m_feature['val_reg']
_led = ULED("%s_%02d" % (self.circuit, counter + 1), self, counter, board_val_reg, 0x1 << (counter % 16), m_feature['val_coil'] + counter,
dev_id=self.dev_id, major_group=0, legacy_mode=self.legacy_mode)
if self.neuron.datadeps.has_key(board_val_reg + counter):
self.neuron.datadeps[board_val_reg] += [_led]
else:
self.neuron.datadeps[board_val_reg] = [_led]
Devices.register_device(LED, _led)
counter+=1
def parse_feature_wd(self, max_count, m_feature, board_id):
counter = 0
while counter < max_count:
board_val_reg = m_feature['val_reg']
board_timeout_reg = m_feature['timeout_reg']
_wd = Watchdog("%s_%02d" % (self.circuit, counter + 1), self, counter, board_val_reg + counter, board_timeout_reg + counter,
dev_id=self.dev_id, major_group=0, nv_save_coil=m_feature['nv_sav_coil'], reset_coil=m_feature['reset_coil'],
legacy_mode=self.legacy_mode)
if self.neuron.datadeps.has_key(board_val_reg + counter):
self.neuron.datadeps[board_val_reg + counter]+=[_wd]
else:
self.neuron.datadeps[board_val_reg + counter] = [_wd]
Devices.register_device(WATCHDOG, _wd)
counter+=1
def parse_feature_ao(self, max_count, m_feature, board_id):
counter = 0
while counter < max_count:
board_val_reg = m_feature['val_reg']
if m_feature.has_key('cal_reg'):
_ao = AnalogOutput("%s_%02d" % (self.circuit, counter + 1), self, board_val_reg + counter, regcal=m_feature['cal_reg'],
regmode=m_feature['mode_reg'], reg_res=m_feature['res_val_reg'], modes=m_feature['modes'],
dev_id=self.dev_id, major_group=m_feature['major_group'], legacy_mode=self.legacy_mode)
else:
_ao = AnalogOutput("%s_%02d" % (self.circuit, counter + 1), self, board_val_reg + counter, dev_id=self.dev_id,
major_group=0, legacy_mode=self.legacy_mode)
if self.neuron.datadeps.has_key(board_val_reg + counter):
self.neuron.datadeps[board_val_reg + counter]+=[_ao]
else:
self.neuron.datadeps[board_val_reg + counter] = [_ao]
Devices.register_device(AO, _ao)
counter+=1
def parse_feature_ai(self, max_count, m_feature, board_id):
counter = 0
while counter < max_count:
board_val_reg = m_feature['val_reg']
tolerances = m_feature['tolerances']
if m_feature.has_key('cal_reg'):
_ai = AnalogInput("%s_%02d" % (self.circuit, counter + 1), self, board_val_reg + counter, regcal=m_feature['cal_reg'],
regmode=m_feature['mode_reg'], dev_id=self.dev_id, major_group=0, tolerances=tolerances, modes=m_feature['modes'], legacy_mode=self.legacy_mode)
if self.neuron.datadeps.has_key(board_val_reg + counter):
self.neuron.datadeps[board_val_reg + counter]+=[_ai]
else:
self.neuron.datadeps[board_val_reg + counter] = [_ai]
elif 'SecondaryAI' in m_feature['modes']:
_ai = AnalogInput("%s_%02d" % (self.circuit, counter + 1), self, board_val_reg + counter * 2, regmode=m_feature['mode_reg'] + counter,
dev_id=self.dev_id, major_group=0, tolerances=tolerances, modes=m_feature['modes'], legacy_mode=self.legacy_mode)
if self.neuron.datadeps.has_key(board_val_reg + (counter * 2)):
self.neuron.datadeps[board_val_reg + (counter * 2)]+=[_ai]
else:
self.neuron.datadeps[board_val_reg + (counter * 2)] = [_ai]
else:
_ai = AnalogInput("%s_%02d" % (self.circuit, counter + 1), self, board_val_reg + counter * 2, dev_id=self.dev_id,
major_group=0, modes=m_feature['modes'], regmode=m_feature['mode_reg'] + counter, tolerances=tolerances,
legacy_mode=self.legacy_mode)
if self.neuron.datadeps.has_key(board_val_reg + (counter * 2)):
self.neuron.datadeps[board_val_reg + (counter * 2)]+=[_ai]
else:
self.neuron.datadeps[board_val_reg + (counter * 2)] = [_ai]
Devices.register_device(AI, _ai)
counter+=1
def parse_feature_register(self, max_count, m_feature, board_id):
counter = 0
while counter < max_count:
board_val_reg = m_feature['start_reg']
if 'reg_type' in m_feature and m_feature['reg_type'] == 'input':
_reg = Register("%s_%d_inp" % (self.circuit, board_val_reg + counter), self, counter, board_val_reg + counter, reg_type='input', dev_id=self.dev_id,
major_group=0, legacy_mode=self.legacy_mode)
else:
_reg = Register("%s_%d" % (self.circuit, board_val_reg + counter), self, counter, board_val_reg + counter, dev_id=self.dev_id,
major_group=0, legacy_mode=self.legacy_mode)
if board_val_reg and self.neuron.datadeps.has_key(board_val_reg + counter):
self.neuron.datadeps[board_val_reg + counter] += [_reg]
elif board_val_reg:
self.neuron.datadeps[board_val_reg + counter] = [_reg]
Devices.register_device(REGISTER, _reg)
counter+=1
def parse_feature_uart(self, max_count, m_feature, board_id):
counter = 0
while counter < max_count:
board_val_reg = m_feature['conf_reg']
address_reg = m_feature['address_reg']
_uart = Uart("%s_%02d" % (self.circuit, counter + 1), self, board_val_reg + counter, dev_id=self.dev_id,
major_group=0, parity_modes=m_feature['parity_modes'], speed_modes=m_feature['speed_modes'],
stopb_modes=m_feature['stopb_modes'], address_reg=address_reg, legacy_mode=self.legacy_mode)
Devices.register_device(UART, _uart)
counter+=1
def parse_feature_unit_register(self, max_count, m_feature, board_id):
counter = 0
board_val_reg = m_feature['value_reg']
while counter < max_count:
#self, circuit, arm, post, reg, dev_id=0, major_group=0
_offset = m_feature.get("offset",0)
_factor = m_feature.get("factor",1)
_unit = m_feature.get("unit")
_name = m_feature.get("name")
_valid_mask = m_feature.get('valid_mask_reg')
_post_write_action = m_feature.get('post_write')
_datatype = m_feature.get('datatype')
_xgt = UnitRegister("{}_{}".format(self.circuit, board_val_reg + counter), self, board_val_reg + counter, reg_type="input",
dev_id=self.dev_id, datatype=_datatype, major_group=0, offset=_offset, factor=_factor, unit=_unit,
valid_mask=_valid_mask, name=_name, post_write=_post_write_action)
Devices.register_device(UNIT_REGISTER, _xgt)
counter+=1
def parse_feature_ext_config(self, m_feature, board_id):
#_xext_conf = ExtConfig("{}_CONFIG".format(self.circuit), self, reg_groups=m_feature, dev_id=self.dev_id)
_xext_conf = ExtConfig("{}".format(self.circuit), self, reg_groups=m_feature, dev_id=self.dev_id)
Devices.register_device(EXT_CONFIG, _xext_conf)
# --------------------------------------------------------------
"""
board_val_reg = m_feature['value_reg']
while counter < max_count:
_valid_mask = m_feature.get('valid_mask_reg')
_post_write_action = m_feature.get('post_write')
_xgt = UnitRegister("{}_{}".format(self.circuit, board_val_reg + counter), self, board_val_reg + counter, reg_type="input",
dev_id=self.dev_id, major_group=0, offset=_offset, factor=_factor, unit=_unit,
valid_mask=_valid_mask, name=_name, post_write=_post_write_action)
counter+=1
"""
def parse_feature(self, m_feature):
board_id = 1 # UART Extension has always only one group
max_count = m_feature.get('count')
if m_feature['type'] == 'DI':
self.parse_feature_di(max_count, m_feature, board_id)
elif (m_feature['type'] == 'RO' or m_feature['type'] == 'DO'):
self.parse_feature_ro(max_count, m_feature, board_id)
elif m_feature['type'] == 'LED':
self.parse_feature_led(max_count, m_feature, board_id)
elif m_feature['type'] == 'WD':
self.parse_feature_wd(max_count, m_feature, board_id)
elif m_feature['type'] == 'AO':
self.parse_feature_ao(max_count, m_feature, board_id)
elif m_feature['type'] == 'AI':
self.parse_feature_ai(max_count, m_feature, board_id)
elif m_feature['type'] == 'REGISTER' and self.direct_access:
self.parse_feature_register(max_count, m_feature, board_id)
elif m_feature['type'] == 'UART':
self.parse_feature_uart(max_count, m_feature, board_id)
elif m_feature['type'] == 'UNIT_REGISTER':
self.parse_feature_unit_register(max_count, m_feature, board_id)
elif m_feature['type'] == 'EXT_CONFIG':
self.parse_feature_ext_config(m_feature, board_id)
else:
print("Unknown feature: " + str(m_feature) + " at board id: " + str(board_id))
@gen.coroutine
def parse_definition(self, hw_dict):
self.volt_refx = 33000
self.volt_ref = 3.3
for defin in hw_dict.definitions:
if defin and (self.neuron.device_name == defin['type']):
yield self.initialise_cache(defin);
for m_feature in defin['modbus_features']:
self.parse_feature(m_feature)
def get(self):
return self.full()
class Board(object):
def __init__(self, Config, circuit, neuron, versions, major_group=1, dev_id=0, direct_access=False):
self.alias = ""
self.devtype = BOARD
self.dev_id = dev_id
self.Config = Config
self.circuit = circuit
self.direct_access = direct_access
self.legacy_mode = not (Config.getbooldef('MAIN','use_experimental_api', False))
self.modbus_address = 0
self.sw = versions[0]
self.neuron = neuron
self.major_group = major_group
self.ndi = (versions[1] & 0xff00) >> 8
self.ndo = (versions[1] & 0x00ff)
self.nai = (versions[2] & 0xff00) >> 8
self.nao = (versions[2] & 0x00f0) >> 4
self.nuart = (versions[2] & 0x000f)
self.hw = (versions[3] & 0xff00) >> 8
self.hwv = (versions[3] & 0x00ff)
self.serial = versions[5] + (versions[6] << 16)
self.nai1 = self.nai if self.hw != 0 else 1 # full featured AI (with switched V/A)
self.nai2 = 0 if self.hw != 0 else 1 # Voltage only AI
@gen.coroutine
def set(self, alias=None):
if not alias is None:
if Devices.add_alias(alias, self, file_update=True):
self.alias = alias
return gen.Return(self.full())
@gen.coroutine
def initialise_cache(self, cache_definition):
if cache_definition.has_key('modbus_register_blocks'):
if self.neuron.modbus_cache_map == None:
self.neuron.modbus_cache_map = ModbusCacheMap(cache_definition['modbus_register_blocks'], self.neuron)
yield self.neuron.modbus_cache_map.do_scan(initial=True)
yield self.neuron.modbus_cache_map.sem.acquire()
self.neuron.modbus_cache_map.sem.release()
if (self.hw == 0):
self.volt_refx = (3.3 * (1 + self.neuron.modbus_cache_map.get_register(1, 1009)[0]))
self.volt_ref = (3.3 * (1 + self.neuron.modbus_cache_map.get_register(1, 1009)[0])) / self.neuron.modbus_cache_map.get_register(1, 5)[0]
else:
yield self.neuron.modbus_cache_map.sem.acquire()
self.neuron.modbus_cache_map.sem.release()
else:
raise Exception("HW Definition %s requires Modbus register blocks to be specified" % cache_definition['type'])
def parse_feature_di(self, max_count, m_feature, board_id):
counter = 0
while counter < max_count:
board_val_reg = m_feature['val_reg']
board_counter_reg = m_feature['counter_reg']
board_deboun_reg = m_feature['deboun_reg']
if m_feature.has_key('ds_modes') and m_feature.has_key('direct_reg') and m_feature.has_key('polar_reg') and m_feature.has_key('toggle_reg'):
_inp = Input("%s_%02d" % (self.circuit, len(Devices.by_int(INPUT, major_group=m_feature['major_group'])) + 1), self, board_val_reg, 0x1 << (counter % 16),
regdebounce=board_deboun_reg + counter, major_group=m_feature['major_group'], regcounter=board_counter_reg + (2 * counter), modes=m_feature['modes'],
dev_id=self.dev_id, ds_modes=m_feature['ds_modes'], regmode=m_feature['direct_reg'], regtoggle=m_feature['toggle_reg'],
regpolarity=m_feature['polar_reg'], legacy_mode=self.legacy_mode)
else:
_inp = Input("%s_%02d" % (self.circuit, len(Devices.by_int(INPUT, major_group=m_feature['major_group'])) + 1), self, board_val_reg, 0x1 << (counter % 16),
regdebounce=board_deboun_reg + counter, major_group=m_feature['major_group'], regcounter=board_counter_reg + (2 * counter), modes=m_feature['modes'],
dev_id=self.dev_id, legacy_mode=self.legacy_mode)
if self.neuron.datadeps.has_key(board_val_reg):
self.neuron.datadeps[board_val_reg]+=[_inp]
else:
self.neuron.datadeps[board_val_reg] = [_inp]
if self.neuron.datadeps.has_key(board_counter_reg + (2 * counter)):
self.neuron.datadeps[board_counter_reg + (2 * counter)]+=[_inp]
else:
self.neuron.datadeps[board_counter_reg + (2 * counter)] = [_inp]
Devices.register_device(INPUT, _inp)
counter+=1
def parse_feature_ro(self, max_count, m_feature, board_id):
counter = 0
while counter < max_count:
board_val_reg = m_feature['val_reg']
if m_feature['type'] == 'DO' and m_feature['pwm_reg'] and m_feature['pwm_ps_reg'] and m_feature['pwm_c_reg']:
if not self.legacy_mode:
_r = Relay("%s_%02d" % (self.circuit, len(Devices.by_int(RELAY, major_group=m_feature['major_group'])) + 1), self, m_feature['val_coil'] + counter, board_val_reg, 0x1 << (counter % 16),
dev_id=self.dev_id, major_group=m_feature['major_group'], pwmcyclereg=m_feature['pwm_c_reg'], pwmprescalereg=m_feature['pwm_ps_reg'], digital_only=True,
pwmdutyreg=m_feature['pwm_reg'] + counter, modes=m_feature['modes'], legacy_mode=self.legacy_mode)
else:
_r = Relay("%s_%02d" % (self.circuit, len(Devices.by_int(RELAY, major_group=m_feature['major_group'])) + 1), self, m_feature['val_coil'] + counter, board_val_reg, 0x1 << (counter % 16),
dev_id=self.dev_id, major_group=m_feature['major_group'], pwmcyclereg=m_feature['pwm_c_reg'], pwmprescalereg=m_feature['pwm_ps_reg'], digital_only=True,
pwmdutyreg=m_feature['pwm_reg'] + counter, modes=m_feature['modes'], legacy_mode=self.legacy_mode)
else:
_r = Relay("%s_%02d" % (self.circuit, len(Devices.by_int(RELAY, major_group=m_feature['major_group'])) + 1), self, m_feature['val_coil'] + counter, board_val_reg, 0x1 << (counter % 16),
dev_id=self.dev_id, major_group=m_feature['major_group'], legacy_mode=self.legacy_mode)
if self.neuron.datadeps.has_key(board_val_reg):
self.neuron.datadeps[board_val_reg]+=[_r]
else:
self.neuron.datadeps[board_val_reg] = [_r]
Devices.register_device(RELAY, _r)
counter+=1
def parse_feature_led(self, max_count, m_feature, board_id):
counter = 0
while counter < max_count:
board_val_reg = m_feature['val_reg']
_led = ULED("%s_%02d" % (self.circuit, len(Devices.by_int(LED, major_group=m_feature['major_group'])) + 1), self, counter, board_val_reg, 0x1 << (counter % 16), m_feature['val_coil'] + counter,
dev_id=self.dev_id, major_group=m_feature['major_group'], legacy_mode=self.legacy_mode)
if self.neuron.datadeps.has_key(board_val_reg):
self.neuron.datadeps[board_val_reg]+=[_led]
else:
self.neuron.datadeps[board_val_reg] = [_led]
Devices.register_device(LED, _led)
counter+=1
def parse_feature_wd(self, max_count, m_feature, board_id):
counter = 0
while counter < max_count:
board_val_reg = m_feature['val_reg']
board_timeout_reg = m_feature['timeout_reg']
_wd = Watchdog("%s_%02d" % (self.circuit, len(Devices.by_int(WATCHDOG, major_group=m_feature['major_group'])) + 1), self, counter, board_val_reg + counter, board_timeout_reg + counter,
dev_id=self.dev_id, major_group=m_feature['major_group'], nv_save_coil=m_feature['nv_sav_coil'], reset_coil=m_feature['reset_coil'],
legacy_mode=self.legacy_mode)
if self.neuron.datadeps.has_key(board_val_reg + counter):
self.neuron.datadeps[board_val_reg + counter]+=[_wd]
else:
self.neuron.datadeps[board_val_reg + counter] = [_wd]
Devices.register_device(WATCHDOG, _wd)
counter+=1
def parse_feature_ao(self, max_count, m_feature, board_id):
counter = 0
while counter < max_count:
board_val_reg = m_feature['val_reg']
if m_feature.has_key('cal_reg'):
res_val_reg = m_feature['res_val_reg']
_ao = AnalogOutput("%s_%02d" % (self.circuit, len(Devices.by_int(AO, major_group=m_feature['major_group'])) + 1), self, board_val_reg + counter, regcal=m_feature['cal_reg'],
regmode=m_feature['mode_reg'], reg_res=m_feature['res_val_reg'], modes=m_feature['modes'],
dev_id=self.dev_id, major_group=m_feature['major_group'], legacy_mode=self.legacy_mode)
if self.neuron.datadeps.has_key(res_val_reg + counter):
self.neuron.datadeps[res_val_reg + counter]+=[_ao]
else:
self.neuron.datadeps[res_val_reg + counter] = [_ao]
else:
_ao = AnalogOutput("%s_%02d" % (self.circuit, len(Devices.by_int(AO, major_group=m_feature['major_group'])) + 1), self, board_val_reg + counter, dev_id=self.dev_id,
major_group=m_feature['major_group'], legacy_mode=self.legacy_mode)
if self.neuron.datadeps.has_key(board_val_reg + counter):
self.neuron.datadeps[board_val_reg + counter]+=[_ao]
else:
self.neuron.datadeps[board_val_reg + counter] = [_ao]
Devices.register_device(AO, _ao)
counter+=1
def parse_feature_ai(self, max_count, m_feature, board_id):
counter = 0
while counter < max_count:
board_val_reg = m_feature['val_reg']
tolerances = m_feature['tolerances']
if m_feature.has_key('cal_reg'):
_ai = AnalogInput("%s_%02d" % (self.circuit, len(Devices.by_int(AI, major_group=m_feature['major_group'])) + 1), self, board_val_reg + counter, regcal=m_feature['cal_reg'], regmode=m_feature['mode_reg'],
dev_id=self.dev_id, major_group=m_feature['major_group'], tolerances=tolerances, modes=m_feature['modes'], legacy_mode=self.legacy_mode)
if self.neuron.datadeps.has_key(board_val_reg + counter):
self.neuron.datadeps[board_val_reg + counter] += [_ai]
else:
self.neuron.datadeps[board_val_reg + counter] = [_ai]
else:
_ai = AnalogInput("%s_%02d" % (self.circuit, len(Devices.by_int(AI, major_group=m_feature['major_group'])) + 1), self, board_val_reg + counter * 2, regmode=m_feature['mode_reg'] + counter,
dev_id=self.dev_id, major_group=m_feature['major_group'], tolerances=tolerances, modes=m_feature['modes'], legacy_mode=self.legacy_mode)
if self.neuron.datadeps.has_key(board_val_reg + (counter * 2)):
self.neuron.datadeps[board_val_reg + (counter * 2)] += [_ai]
else:
self.neuron.datadeps[board_val_reg + (counter * 2)] = [_ai]
Devices.register_device(AI, _ai)
counter+=1
def parse_feature_register(self, max_count, m_feature, board_id):
counter = 0
while counter < max_count:
board_val_reg = m_feature['start_reg']
if 'reg_type' in m_feature and m_feature['reg_type'] == 'input':
_reg = Register("%s_%d_inp" % (self.circuit, board_val_reg + counter), self, counter, board_val_reg + counter, reg_type='input', dev_id=self.dev_id,
major_group=m_feature['major_group'], legacy_mode=self.legacy_mode)
else:
_reg = Register("%s_%d" % (self.circuit, board_val_reg + counter), self, counter, board_val_reg + counter, dev_id=self.dev_id,
major_group=m_feature['major_group'], legacy_mode=self.legacy_mode)
if board_val_reg and self.neuron.datadeps.has_key(board_val_reg + counter):
self.neuron.datadeps[board_val_reg + counter] += [_reg]
elif board_val_reg:
self.neuron.datadeps[board_val_reg + counter] = [_reg]
Devices.register_device(REGISTER, _reg)
counter+=1
def parse_feature_uart(self, max_count, m_feature, board_id):
counter = 0
while counter < max_count:
board_val_reg = m_feature['conf_reg']
_uart = Uart("%s_%02d" % (self.circuit, len(Devices.by_int(UART, major_group=m_feature['major_group'])) + 1), self, board_val_reg + counter, dev_id=self.dev_id,
major_group=m_feature['major_group'], parity_modes=m_feature['parity_modes'], speed_modes=m_feature['speed_modes'],
stopb_modes=m_feature['stopb_modes'], legacy_mode=self.legacy_mode)
Devices.register_device(UART, _uart)
counter+=1
def parse_feature_light_channel(self, max_count, m_feature, board_id):
counter = 0
while counter < max_count:
read_reg = m_feature['read_reg'] + (counter * 3)
write_reg = m_feature['write_reg'] + (counter * 2)
status_reg = m_feature['status_reg']
_light_c = LightChannel("%s_%02d" % (self.circuit, len(Devices.by_int(LIGHT_CHANNEL, major_group=m_feature['major_group'])) + 1),
self, counter, status_reg, 0x1 << counter, read_reg + 1, read_reg, write_reg, read_reg + 2, write_reg + 1, dev_id=self.dev_id,
major_group=m_feature['major_group'], legacy_mode=self.legacy_mode)
Devices.register_device(LIGHT_CHANNEL, _light_c)
counter+=1
def parse_feature(self, m_feature, board_id):
max_count = m_feature['count']
if m_feature['type'] == 'DI' and m_feature['major_group'] == board_id:
self.parse_feature_di(max_count, m_feature, board_id)
elif (m_feature['type'] == 'RO' or m_feature['type'] == 'DO') and m_feature['major_group'] == board_id:
self.parse_feature_ro(max_count, m_feature, board_id)
elif m_feature['type'] == 'LED' and m_feature['major_group'] == board_id:
self.parse_feature_led(max_count, m_feature, board_id)
elif m_feature['type'] == 'WD' and m_feature['major_group'] == board_id:
self.parse_feature_wd(max_count, m_feature, board_id)
elif m_feature['type'] == 'AO' and m_feature['major_group'] == board_id:
self.parse_feature_ao(max_count, m_feature, board_id)
elif m_feature['type'] == 'AI' and m_feature['major_group'] == board_id:
self.parse_feature_ai(max_count, m_feature, board_id)
elif m_feature['type'] == 'REGISTER' and m_feature['major_group'] == board_id and self.direct_access:
self.parse_feature_register(max_count, m_feature, board_id)
elif m_feature['type'] == 'UART' and m_feature['major_group'] == board_id:
self.parse_feature_uart(max_count, m_feature, board_id)
elif m_feature['type'] == 'LIGHT_CHANNEL' and m_feature['major_group'] == board_id:
self.parse_feature_light
@gen.coroutine
def parse_definition(self, hw_dict, board_id):
self.volt_refx = 33000
self.volt_ref = 3.3
if 'model' not in config.up_globals:
logger.info("NO NEURON EEPROM DATA DETECTED, EXITING")
logger.info("PLEASE USE A FRESH EVOK IMAGE, OR ENABLE I2C, I2C-DEV AND THE EEPROM OVERLAY")
exit(-1);
defin = hw_dict.neuron_definition
if defin and defin['type'] in config.up_globals['model']:
yield self.initialise_cache(defin)
for m_feature in defin['modbus_features']:
self.parse_feature(m_feature, board_id)
def get(self):
return self.full()
class Relay(object):
pending_id = 0
def __init__(self, circuit, arm, coil, reg, mask, dev_id=0, major_group=0, pwmcyclereg=-1, pwmprescalereg=-1, pwmdutyreg=-1, legacy_mode=True, digital_only=False, modes=['Simple']):
self.alias = ""
self.devtype = RELAY
self.dev_id = dev_id
self.circuit = circuit
self.arm = arm
self.modes = modes
self.pwmcyclereg = pwmcyclereg
self.pwmprescalereg = pwmprescalereg
self.pwmdutyreg = pwmdutyreg
self.pwm_duty = 0
self.pwm_duty_val = 0
self.pwm_freq = 0
self.pwm_cycle_val = 0
self.pwm_prescale_val = 0
self.major_group = major_group
self.legacy_mode = legacy_mode
self.digital_only = digital_only
self.coil = coil
self.valreg = reg
self.bitmask = mask
self.regvalue = lambda: self.arm.neuron.modbus_cache_map.get_register(1,self.valreg, unit=self.arm.modbus_address)[0]
if self.pwmdutyreg >= 0: # This instance supports PWM mode
self.pwm_duty_val = (self.arm.neuron.modbus_cache_map.get_register(1, self.pwmdutyreg, unit=self.arm.modbus_address))[0]
self.pwm_cycle_val = ((self.arm.neuron.modbus_cache_map.get_register(1, self.pwmcyclereg, unit=self.arm.modbus_address))[0] + 1)
self.pwm_prescale_val = (self.arm.neuron.modbus_cache_map.get_register(1, self.pwmprescalereg, unit=self.arm.modbus_address))[0]
if (self.pwm_cycle_val > 0) and (self.pwm_prescale_val > 0):
self.pwm_freq = 48000000 / (self.pwm_cycle_val * self.pwm_prescale_val)
else:
self.pwm_freq = 0
if (self.pwm_duty_val == 0):
self.pwm_duty = 0
self.mode = 'Simple' # Mode field is for backward compatibility, will be deprecated soon
else:
logger.info("Pocitam z {} {}".format(self.pwm_cycle_val, self.pwm_duty_val))
self.pwm_duty = (100 / (float(self.pwm_cycle_val) / float(self.pwm_duty_val)))
self.pwm_duty = round(self.pwm_duty ,1) if self.pwm_duty % 1 else int(self.pwm_duty)
self.mode = 'PWM' # Mode field is for backward compatibility, will be deprecated soon
else: # This RELAY instance does not support PWM mode (no pwmdutyreg given)
self.mode = 'Simple'
self.forced_changes = arm.neuron.Config.getbooldef("MAIN", "force_immediate_state_changes", False)
def full(self, forced_value=None):
ret = {'dev': 'relay',
'relay_type': 'physical',
'circuit': self.circuit,
'value': self.value,
'pending': self.pending_id != 0,
'mode': self.mode,
'modes': self.modes,
'glob_dev_id': self.dev_id}
if self.digital_only:
ret['relay_type'] = 'digital'
ret['pwm_freq'] = self.pwm_freq
ret['pwm_duty'] = self.pwm_duty
if self.alias != '':
ret['alias'] = self.alias
if forced_value is not None:
ret['value'] = forced_value
return ret
def simple(self):
return {'dev': 'relay',
'circuit': self.circuit,
'value': self.value}
@property
def value(self):
try:
if self.regvalue() & self.bitmask: return 1
except:
pass
return 0
def get_state(self):
""" Returns ( status, is_pending )
current on/off status is taken from last mcp value without reading it from hardware
is_pending is Boolean
"""
return (self.value, self.pending_id != 0)
@gen.coroutine
def set_state(self, value):
""" Sets new on/off status. Disable pending timeouts
"""
if self.pending_id:
IOLoop.instance().remove_timeout(self.pending_id)
self.pending_id = None
yield self.arm.neuron.client.write_coil(self.coil, 1 if value else 0, unit=self.arm.modbus_address)
raise gen.Return(1 if value else 0)
def value_delta(self, new_val):
return (self.regvalue() ^ new_val) & self.bitmask
@gen.coroutine
def set(self, value=None, timeout=None, mode=None, pwm_freq=None, pwm_duty=None, alias=None):
""" Sets new on/off status. Disable pending timeouts
"""
if self.pending_id:
IOLoop.instance().remove_timeout(self.pending_id)
self.pending_id = None
#if pwm_duty is not None and self.mode == 'PWM' and float(pwm_duty) <= 0.01:
# mode = 'Simple'
# New system - mode field will no longer be used
# Set PWM Freq
if (pwm_freq is not None) and (float(pwm_freq) > 0):
self.pwm_freq = pwm_freq;
self.pwm_delay_val = 48000000 / float(pwm_freq)
if ((int(self.pwm_delay_val) % 50000) == 0) and ((self.pwm_delay_val / 50000) < 65535):
self.pwm_cycle_val = 50000
self.pwm_prescale_val = self.pwm_delay_val / 50000
elif ((int(self.pwm_delay_val) % 10000) == 0) and ((self.pwm_delay_val / 10000) < 65535):
self.pwm_cycle_val = 10000
self.pwm_prescale_val = self.pwm_delay_val / 10000
elif ((int(self.pwm_delay_val) % 5000) == 0) and ((self.pwm_delay_val / 5000) < 65535):
self.pwm_cycle_val = 5000
self.pwm_prescale_val = self.pwm_delay_val / 5000
elif ((int(self.pwm_delay_val) % 1000) == 0) and ((self.pwm_delay_val / 1000) < 65535):
self.pwm_cycle_val = 1000
self.pwm_prescale_val = self.pwm_delay_val / 1000
else:
self.pwm_prescale_val = sqrt(self.pwm_delay_val)
self.pwm_cycle_val = self.pwm_prescale_val
if self.pwm_duty > 0:
self.pwm_duty_val = float(self.pwm_cycle_val) * float(float(self.pwm_duty) / 100.0)
#else:
# self.pwm_duty_val = 0
# self.arm.neuron.client.write_register(self.pwmdutyreg, self.pwm_duty_val, unit=self.arm.modbus_address)
other_devs = Devices.by_int(RELAY, major_group=self.major_group) # All PWM outs in the same group share this registers
for other_dev in other_devs:
if other_dev.pwm_duty > 0:
other_dev.pwm_freq = self.pwm_freq
other_dev.pwm_delay_val = self.pwm_delay_val
other_dev.pwm_cycle_val = self.pwm_cycle_val
other_dev.pwm_prescale_val = self.pwm_prescale_val
yield other_dev.set(pwm_duty=other_dev.pwm_duty)
self.arm.neuron.client.write_register(self.pwmcyclereg, self.pwm_cycle_val - 1, unit=self.arm.modbus_address)
self.arm.neuron.client.write_register(self.pwmprescalereg, self.pwm_prescale_val, unit=self.arm.modbus_address)
self.arm.neuron.client.write_register(self.pwmdutyreg, self.pwm_duty_val, unit=self.arm.modbus_address)
# Set Binary value
if value is not None:
parsed_value = 1 if int(value) else 0
if pwm_duty is not None:
if (pwm_duty == 100 and parsed_value == 1) or (pwm_duty == 0 and parsed_value == 0): # No conflict in this case
pass
else:
raise Exception('Set value conflict: Cannot set both value and pwm_duty at once.')
if not (timeout is None):
timeout = float(timeout)
self.mode = 'Simple'
self.arm.neuron.client.write_coil(self.coil, parsed_value, unit=self.arm.modbus_address)
if self.pwm_duty != 0:
self.pwm_duty = 0
self.arm.neuron.client.write_register(self.pwmdutyreg, self.pwm_duty, unit=self.arm.modbus_address) # Turn off PWM
# Set PWM Duty
elif pwm_duty is not None and float(pwm_duty) >= 0.0 and float(pwm_duty) <= 100.0:
self.pwm_duty = pwm_duty
self.pwm_duty_val = float(self.pwm_cycle_val) * float(float(self.pwm_duty) / 100.0)
if self.value != 0:
self.arm.neuron.client.write_coil(self.coil, 0, unit=self.arm.modbus_address)
self.arm.neuron.client.write_register(self.pwmdutyreg, self.pwm_duty_val, unit=self.arm.modbus_address)
self.mode = 'PWM'
if alias is not None:
if Devices.add_alias(alias, self, file_update=True):
self.alias = alias
if timeout is None:
if (value is not None) and self.forced_changes:
raise gen.Return(self.full(forced_value=parsed_value))
else:
raise gen.Return(self.full())
def timercallback():
self.pending_id = None
self.arm.neuron.client.write_coil(self.coil, 0 if value else 1, unit=self.arm.modbus_address)
self.pending_id = IOLoop.instance().add_timeout(
datetime.timedelta(seconds=float(timeout)), timercallback)
if (value is not None) and self.forced_changes:
raise gen.Return(self.full(forced_value=parsed_value))
else:
raise gen.Return(self.full())
def get(self):
return self.full()
class ULED(object):
def __init__(self, circuit, arm, post, reg, mask, coil, dev_id=0, major_group=0, legacy_mode=True):
self.alias = ""
self.devtype = LED
self.dev_id = dev_id
self.circuit = circuit
self.arm = arm
self.major_group = major_group
self.legacy_mode = legacy_mode
self.bitmask = mask
self.valreg = reg
self.regvalue = lambda: self.arm.neuron.modbus_cache_map.get_register(1, self.valreg, unit=self.arm.modbus_address)[0]
self.coil = coil
def full(self):
ret = {'dev': 'led', 'circuit': self.circuit, 'value': self.value, 'glob_dev_id': self.dev_id}
if self.alias != '':
ret['alias'] = self.alias
return ret
def simple(self):
return {'dev': 'led', 'circuit': self.circuit, 'value': self.value}
@property
def value(self):
try:
if self.regvalue() & self.bitmask: return 1
except:
pass
return 0
def value_delta(self, new_val):
return (self.regvalue() ^ new_val) & self.bitmask
def get_state(self):
""" Returns ( status, is_pending )
current on/off status is taken from last mcp value without reading it from hardware
is_pending is Boolean
"""
return (self.value)
@gen.coroutine
def set_state(self, value):
""" Sets new on/off status. Disable pending timeouts
"""
yield self.arm.neuron.client.write_coil(self.coil, 1 if value else 0, unit=self.arm.modbus_address)
raise gen.Return(1 if value else 0)
@gen.coroutine
def set(self, value=None, alias=None):
""" Sets new on/off status. Disable pending timeouts
"""
if alias is not None:
if Devices.add_alias(alias, self):
self.alias = alias
if value is not None:
value = int(value)
self.arm.neuron.client.write_coil(self.coil, 1 if value else 0, unit=self.arm.modbus_address)
raise gen.Return(self.full())
def get(self):
return self.full()
class LightDevice(object):
def __init__(self, circuit, arm, bus, dev_id=0):
self.alias = ""
self.devtype = LIGHT_DEVICE
self.dev_id = dev_id
def full(self):
ret = {'dev': 'light_channel', 'circuit': self.circuit, 'glob_dev_id': self.dev_id}
if self.alias != '':
ret['alias'] = self.alias
return ret
def get(self):
return self.full()
class LightChannel(object):
def __init__(self, circuit, arm, bus_number, reg_status, status_mask, reg_transmit, reg_receive, reg_receive_counter, reg_config_transmit, reg_config_receive, dev_id=0, major_group=0, legacy_mode=True):
self.alias = ""
self.devtype = LIGHT_CHANNEL
self.dev_id = dev_id
self.circuit = circuit
self.arm = arm
self.major_group = major_group
self.legacy_mode = legacy_mode
self.reg_status = reg_status
self.bus_number = bus_number
self.status_mask = status_mask
self.reg_transmit = reg_transmit
self.reg_receive = reg_receive
self.reg_receive_counter = reg_receive_counter
self.reg_config_transmit = reg_config_transmit
self.reg_config_receive = reg_config_receive
self.broadcast_commands = ["recall_max_level", "recall_min_level", "off", "up", "down", "step_up", "step_down", "step_down_and_off",
"turn_on_and_step_up", "DAPC", "reset", "identify_device", "DTR0", "DTR1", "DTR2"]
self.group_commands = ["recall_max_level", "recall_min_level", "off", "up", "down", "step_up", "step_down", "step_down_and_off",
"turn_on_and_step_up", "DAPC", "reset", "identify_device"]
self.scan_types = ["assigned", "unassigned"]
self.light_driver = SyncUnipiDALIDriver(self.bus_number)
#self.light_driver.logger = logger
#self.light_driver.debug = True
self.light_bus = Bus(self.circuit, self.light_driver)
def full(self):
ret = {'dev': 'light_channel', 'circuit': self.circuit, 'glob_dev_id': self.dev_id, 'broadcast_commands': self.broadcast_commands,
'group_commands': self.group_commands, 'scan_types': self.scan_types}
if self.alias != '':
ret['alias'] = self.alias
return ret
def get(self):
return self.full()
def simple(self):
return {'dev': 'light_channel', 'circuit': self.circuit}
@gen.coroutine
def set(self, broadcast_command=None, broadcast_argument=None, group_command=None, group_address=None, group_argument=None, scan=None, alias=None):
""" Sets new on/off status. Disable pending timeouts
"""
if alias is not None:
if Devices.add_alias(alias, self):
self.alias = alias
if scan is not None and scan is self.scan_types:
try:
self.light_bus.assign_short_addresses()
except Exception, E:
logger.exception(str(E))
elif broadcast_command is not None:
if broadcast_command == "recall_max_level":
command = dali.gear.general.RecallMaxLevel(Broadcast())
elif broadcast_command == "recall_min_level":
command = dali.gear.general.RecallMinLevel(Broadcast())
elif broadcast_command == "off":
command = dali.gear.general.Off(Broadcast())
elif broadcast_command == "up":
command = dali.gear.general.Up(Broadcast())
elif broadcast_command == "down":
command = dali.gear.general.Down(Broadcast())
elif broadcast_command == "step_up":
command = dali.gear.general.StepUp(Broadcast())
elif broadcast_command == "step_down":
command = dali.gear.general.StepDown(Broadcast())
elif broadcast_command == "step_down_and_off":
command = dali.gear.general.StepDownAndOff(Broadcast())
elif broadcast_command == "turn_on_and_step_up":
command = dali.gear.general.OnAndStepUp(Broadcast())
elif broadcast_command == "DAPC" and broadcast_argument is not None:
if broadcast_argument == "MASK" or broadcast_argument == "OFF":
command = dali.gear.general.DAPC(Broadcast(), broadcast_argument)
else:
command = dali.gear.general.DAPC(Broadcast(), int(broadcast_argument))
elif broadcast_command == "reset":
command = dali.gear.general.Reset(Broadcast())
elif broadcast_command == "identify_device":
command = dali.gear.general.IdentifyDevice(Broadcast())
elif broadcast_command == "DTR0":
command = dali.gear.general.DTR0(int(broadcast_argument))
elif broadcast_command == "DTR1":
command = dali.gear.general.DTR1(int(broadcast_argument))
elif broadcast_command == "DTR2":
command = dali.gear.general.DTR2(int(broadcast_argument))
else:
raise Exception("Invalid lighting broadcast command: %d" % broadcast_command)
self.light_driver.logger = logger
self.light_driver.debug = True
print('Response: {}'.format(self.light_driver.send(command)))
elif group_command is not None:
if group_command == "recall_max_level":
command = dali.gear.general.RecallMaxLevel(Group(group_address))
elif group_command == "recall_min_level":
command = dali.gear.general.RecallMinLevel(Group(group_address))
elif group_command == "off":
command = dali.gear.general.Off(Group(group_address))
elif group_command == "up":
command = dali.gear.general.Up(Group(group_address))
elif group_command == "down":
command = dali.gear.general.Down(Group(group_address))
elif group_command == "step_up":
command = dali.gear.general.StepUp(Group(group_address))
elif group_command == "step_down":
command = dali.gear.general.StepDown(Group(group_address))
elif group_command == "step_down_and_off":
command = dali.gear.general.StepDownAndOff(Group(group_address))
elif group_command == "turn_on_and_step_up":
command = dali.gear.general.OnAndStepUp(Group(group_address))
elif group_command == "DAPC" and group_argument is not None:
if group_argument == "MASK" or group_argument == "OFF":
command = dali.gear.general.DAPC(Group(group_address), group_argument)
else:
command = dali.gear.general.DAPC(Group(group_address), int(group_argument))
elif group_command == "reset":
command = dali.gear.general.Reset(Group(group_address))
elif group_command == "identify_device":
command = dali.gear.general.IdentifyDevice(Group(group_address))
else:
raise Exception("Invalid lighting broadcast command (and/or required argument was not provided): %d" % group_command)
self.light_driver.logger = logger
self.light_driver.debug = True
print('Response: {}'.format(self.light_driver.send(command)))
raise gen.Return(self.full())
class Watchdog(object):
def __init__(self, circuit, arm, post, reg, timeout_reg, nv_save_coil=-1, reset_coil=-1, wd_reset_ro_coil=-1, dev_id=0, major_group=0, legacy_mode=True):
self.alias = ""
self.devtype = WATCHDOG
self.dev_id = dev_id
self.circuit = circuit
self.arm = arm
self.major_group = major_group
self.legacy_mode = legacy_mode
self.timeoutvalue = lambda: self.arm.neuron.modbus_cache_map.get_register(1, self.toreg, unit=self.arm.modbus_address)
self.regvalue = lambda: self.arm.neuron.modbus_cache_map.get_register(1, self.toreg, unit=self.arm.modbus_address)[0]
self.nvsavvalue = 0
self.resetvalue = 0
self.nv_save_coil = nv_save_coil
self.reset_coil = reset_coil
self.wd_reset_ro_coil = wd_reset_ro_coil
self.wdwasresetvalue = 0
self.valreg = reg
self.toreg = timeout_reg
def full(self):
ret = {'dev': 'wd',
'circuit': self.circuit,
'value': self.value,
'timeout': self.timeout[0],
'was_wd_reset': self.was_wd_boot_value,
'nv_save' :self.nvsavvalue,
'glob_dev_id': self.dev_id}
if self.alias != '':
ret['alias'] = self.alias
return ret
def get(self):
return self.full()
def simple(self):
return {'dev': 'wd',
'circuit': self.circuit,
'value': self.regvalue()}
def value_delta(self, new_val):
return (self.regvalue() ^ new_val) & 0x03 #Only the two lowest bits contains watchdog status
@property
def value(self):
try:
if self.regvalue() & self.bitmask: return 1
except:
pass
return 0
@property
def timeout(self):
try:
if self.timeoutvalue(): return self.timeoutvalue()
except:
pass
return 0
@property
def was_wd_boot_value(self):
try:
if self.regvalue() & 0b10: return 1
except:
pass
return 0
def get_state(self):
""" Returns ( status, is_pending )
current on/off status is taken from last mcp value without reading it from hardware
is_pending is Boolean
"""
return (self.value, self.timeout)
@gen.coroutine
def set_state(self, value):
""" Sets new on/off status. Disable pending timeouts
"""
self.arm.neuron.client.write_register(self.valreg, 1 if value else 0, unit=self.arm.modbus_address)
raise gen.Return(1 if value else 0)
@gen.coroutine
def set(self, value=None, timeout=None, reset=None, nv_save=None, alias=None):
""" Sets new on/off status. Disable pending timeouts
"""
if alias is not None:
if Devices.add_alias(alias, self, file_update=True):
self.alias = alias
if self.nv_save_coil >= 0 and nv_save is not None and nv_save != self.nvsavvalue:
if nv_save != 0:
self.nvsavvalue = 1
else:
self.nvsavvalue = 0
self.arm.neuron.client.write_coil(self.nv_save_coil, 1, unit=self.arm.modbus_address)
if value is not None:
value = int(value)
self.arm.neuron.client.write_register(self.valreg, 1 if value else 0, unit=self.arm.modbus_address)
if not (timeout is None):
timeout = int(timeout)
if timeout > 65535:
timeout = 65535
self.arm.neuron.client.write_register(self.toreg, timeout, unit=self.arm.modbus_address)
if self.reset_coil >= 0 and reset is not None:
if reset != 0:
self.nvsavvalue = 0
self.arm.neuron.client.write_coil(self.reset_coil, 1, unit=self.arm.modbus_address)
logger.info("Performed reset of board %s" % self.circuit)
raise gen.Return(self.full())
class ExtConfig(object):
def __init__(self, circuit, arm, reg_groups, dev_id=0):
self._alias = ""
self._devtype = EXT_CONFIG
self.circuit = circuit
self._reg_map = {}
self._dev_id = dev_id
self._arm = arm
self._params = {}
self.post_write = reg_groups.get("post_write_coils", None)
for reg_block in reg_groups['reg_blocks']:
reg_base_addr = reg_block['start_reg']
if reg_block['count'] > 1: # Array of registers
for reg_offset in range(reg_block['count']):
self._params["{}_{}".format(reg_block['name'], reg_offset + 1)] = reg_base_addr + reg_offset
else: # Single register only
self._params[reg_block['name']] = reg_base_addr
def __getattr__(self, name):
if name in self._params:
return self.get_param(name)
else:
raise AttributeError("Parameter {} not found in {}".format(name, self.circuit))
def full(self):
ret = self.simple()
ret['glob_dev_id'] = self._dev_id
if self._alias != '':
ret['alias'] = self._alias
return ret
def simple(self):
ret = {'dev': 'ext_config',
'circuit': self.circuit}
for (param, reg_addr) in self._params.items():
ret[param] = self._arm.neuron.modbus_cache_map.get_register(1, reg_addr, unit=self._arm.modbus_address, is_input=False)[0]
return ret
def get_param(self, par_name):
return self._arm.neuron.modbus_cache_map.get_register(1, self._params[par_name], unit=self._arm.modbus_address, is_input=False)[0]
def get(self):
return self.full()
def get_state(self):
""" Returns ( status, is_pending )
current on/off status is taken from last mcp value without reading it from hardware
is_pending is Boolean
"""
return (self.value)
@gen.coroutine
def set(self, alias=None, **kwargs):
""" Sets new on/off status. Disable pending timeouts """
if alias is not None:
if Devices.add_alias(alias, self, file_update=True):
self.alias = alias
if len(kwargs) > 0:
for param, value in kwargs.items():
self._arm.neuron.client.write_register(self._params[param], int(value), unit=self._arm.modbus_address)
if isinstance(self.post_write, list):
for coil in self.post_write:
self._arm.neuron.client.write_coil(coil, 1, unit=self._arm.modbus_address)
raise gen.Return(self.full())
class UnitRegister():
def __init__(self, circuit, arm, reg, reg_type="input", dev_id=0, major_group=0, datatype=None, unit=None, offset=0, factor=1, valid_mask=None, name=None, post_write=None):
# TODO - valid mask reg
self.alias = ""
self.devtype = UNIT_REGISTER
self.dev_id = dev_id
self.circuit = circuit
self.arm = arm
self.major_group = major_group
self.valreg = reg
self.offset = offset
self.factor = factor
self.unit = unit
self.name = name
self.post_write = post_write
self.datatype = datatype
if reg_type == "input":
_is_iput = True
else:
_is_iput = False
if valid_mask is not None:
self.valid_mask = lambda: self.arm.neuron.modbus_cache_map.get_register(1, valid_mask, unit=self.arm.modbus_address, is_input=_is_iput)[0]
else:
self.valid_mask = None
if self.datatype is None:
if factor == 1 and offset == 0: # Reading RAW value - save some CPU time
self.regvalue = lambda: self.arm.neuron.modbus_cache_map.get_register(1, self.valreg, unit=self.arm.modbus_address, is_input=_is_iput)[0]
else:
self.regvalue = lambda: (self.arm.neuron.modbus_cache_map.get_register(1, self.valreg, unit=self.arm.modbus_address, is_input=_is_iput)[0] * self.factor) + self.offset
elif datatype == "float32":
# TODO - add factor and offset version
self.regvalue = lambda: self.__parse_float32(self.arm.neuron.modbus_cache_map.get_register(2, self.valreg, unit=self.arm.modbus_address, is_input=_is_iput))
def __parse_float32(self, raw_regs):
datal = bytearray(4)
datal[1] = raw_regs[0] & 0xFF
datal[0] = (raw_regs[0] >> 8) & 0xFF
datal[3] = raw_regs[1] & 0xFF
datal[2] = (raw_regs[1] >> 8) & 0xFF
return struct.unpack_from('>f', datal)[0]
# TODO - toto asi jenom read only -
@gen.coroutine
def set(self, value=None, alias=None, **kwargs):
""" Sets new on/off status. Disable pending timeouts """
if alias is not None:
if Devices.add_alias(alias, self, file_update=True):
self.alias = alias
raise Exception("Unit_register object is read-only")
# nastavit to nepujde
if value is not None:
self.arm.neuron.client.write_register(self.valreg, int(value), unit=self.arm.modbus_address)
if isinstance(self.post_write, list):
for coil in self.post_write:
self.arm.neuron.client.write_coil(coil, 1, unit=self.arm.modbus_address)
"""
self.arm.neuron.client.write_register(self.valreg, 1 if value else 0, unit=self.arm.modbus_address)
if not (timeout is None):
timeout = int(timeout)
if timeout > 65535:
timeout = 65535
self.arm.neuron.client.write_register(self.toreg, timeout, unit=self.arm.modbus_address)
if self.reset_coil >= 0 and reset is not None:
if reset != 0:
self.nvsavvalue = 0
self.arm.neuron.client.write_coil(self.reset_coil, 1, unit=self.arm.modbus_address)
logger.info("Performed reset of board %s" % self.circuit)
"""
def full(self):
ret = {'dev': 'unit_register',
'circuit': self.circuit,
'value': (self.regvalue()),
'glob_dev_id': self.dev_id}
if self.name is not None:
ret['name'] = self.name
if self.valid_mask is not None:
ret['valid'] = "true" if (self.valid_mask() & (1 << self.valreg - 1)) != 0 else "false"
if self.unit is not None:
ret['unit'] = self.unit
if self.alias != '':
ret['alias'] = self.alias
return ret
def simple(self):
return {'dev': 'unit_register',
'circuit': self.circuit,
'value': self.regvalue()}
@property
def value(self):
try:
if self.regvalue():
print("CTU " + str(self.circuit))
return self.regvalue()
except:
pass
return 0
def get(self):
return self.full()
def get_state(self):
""" Returns ( status, is_pending )
current on/off status is taken from last mcp value without reading it from hardware
is_pending is Boolean
"""
return (self.value)
class Register():
def __init__(self, circuit, arm, post, reg, reg_type="holding", dev_id=0, major_group=0, legacy_mode=True):
self.alias = ""
self.devtype = REGISTER
self.dev_id = dev_id
self.circuit = circuit
self.arm = arm
self.major_group = major_group
self.legacy_mode = legacy_mode
self.valreg = reg
self.reg_type = reg_type
if reg_type == "input":
self.regvalue = lambda: self.arm.neuron.modbus_cache_map.get_register(1, self.valreg, unit=self.arm.modbus_address, is_input=True)[0]
else:
self.regvalue = lambda: self.arm.neuron.modbus_cache_map.get_register(1, self.valreg, unit=self.arm.modbus_address, is_input=False)[0]
def full(self):
ret = {'dev': 'register',
'circuit': self.circuit,
'value': self.regvalue(),
'glob_dev_id': self.dev_id}
if self.alias != '':
ret['alias'] = self.alias
return ret
def simple(self):
return {'dev': 'register',
'circuit': self.circuit,
'value': self.regvalue()}
@property
def value(self):
try:
if self.regvalue():
return self.regvalue()
except:
pass
return 0
def get(self):
return self.full()
def get_state(self):
""" Returns ( status, is_pending )
current on/off status is taken from last mcp value without reading it from hardware
is_pending is Boolean
"""
return (self.value)
@gen.coroutine
def set_state(self, value):
""" Sets new on/off status. Disable pending timeouts
"""
self.arm.neuron.client.write_register(self.valreg, value if value else 0, unit=self.arm.modbus_address)
raise gen.Return(value if value else 0)
@gen.coroutine
def set(self, value=None, alias=None):
""" Sets new on/off status. Disable pending timeouts
"""
if alias is not None:
if Devices.add_alias(alias, self, file_update=True):
self.alias = alias
if value is not None:
value = int(value)
self.arm.neuron.client.write_register(self.valreg, value if value else 0, unit=self.arm.modbus_address)
raise gen.Return(self.full())
class Input():
def __init__(self, circuit, arm, reg, mask, regcounter=None, regdebounce=None, regmode=None, regtoggle=None, regpolarity=None,
dev_id=0, major_group=0, modes=['Simple'], ds_modes=['Simple'], counter_modes=['Enabled', 'Disabled'], legacy_mode=True):
self.alias = ""
self.devtype = INPUT
self.dev_id = dev_id
self.circuit = circuit
self.arm = arm
self.modes = modes
self.ds_modes = ds_modes
self.counter_modes = counter_modes
self.major_group = major_group
self.legacy_mode = legacy_mode
self.bitmask = mask
self.regcounter = regcounter
self.regdebounce = regdebounce
self.regmode = regmode
self.regtoggle = regtoggle
self.regpolarity = regpolarity
self.reg = reg
self.regvalue = lambda: self.arm.neuron.modbus_cache_map.get_register(1, self.reg, unit=self.arm.modbus_address)[0]
self.regcountervalue = self.regdebouncevalue = lambda: None
if not (regcounter is None): self.regcountervalue = lambda: self.arm.neuron.modbus_cache_map.get_register(1, regcounter, unit=self.arm.modbus_address)[0] + (self.arm.neuron.modbus_cache_map.get_register(1, regcounter + 1, unit=self.arm.modbus_address)[0] << 16)
if not (regdebounce is None): self.regdebouncevalue = lambda: self.arm.neuron.modbus_cache_map.get_register(1, regdebounce, unit=self.arm.modbus_address)[0]
self.mode = 'Simple'
self.ds_mode = 'Simple'
if 'DirectSwitch' in self.modes:
curr_ds = self.arm.neuron.modbus_cache_map.get_register(1, self.regmode, unit=self.arm.modbus_address)[0]
if (curr_ds & self.bitmask) > 0:
self.mode = 'DirectSwitch'
curr_ds_pol = self.arm.neuron.modbus_cache_map.get_register(1, self.regpolarity, unit=self.arm.modbus_address)[0]
curr_ds_tgl = self.arm.neuron.modbus_cache_map.get_register(1, self.regtoggle, unit=self.arm.modbus_address)[0]
if (curr_ds_pol & self.bitmask):
self.ds_mode = 'Inverted'
elif (curr_ds_tgl & self.bitmask):
self.ds_mode = 'Toggle'
self.counter_mode = "Enabled"
@property
def debounce(self):
try:
return self.regdebouncevalue()
except:
pass
return 0
@property
def value(self):
try:
if self.regvalue() & self.bitmask: return 1
except:
pass
return 0
@property
def counter(self):
try:
return self.regcountervalue()
except:
return 0
def value_delta(self, new_val):
return (self.regvalue() ^ new_val) & self.bitmask
def full(self):
ret = {'dev': 'input',
'circuit': self.circuit,
'value': self.value,
'debounce': self.debounce,
'counter_modes': self.counter_modes,
'counter_mode': self.counter_mode,
'counter': self.counter if self.counter_mode == 'Enabled' else 0,
'mode': self.mode,
'modes': self.modes,
'glob_dev_id': self.dev_id }
if self.mode == 'DirectSwitch':
ret['ds_mode'] = self.ds_mode
ret['ds_modes'] = self.ds_modes
if self.alias != '':
ret['alias'] = self.alias
return ret
def simple(self):
if self.counter_mode == 'Enabled':
return {'dev': 'input',
'circuit': self.circuit,
'value': self.value,
'counter': self.counter}
else:
return {'dev': 'input',
'circuit': self.circuit,
'value': self.value}
@gen.coroutine
def set(self, debounce=None, mode=None, counter=None, counter_mode=None, ds_mode=None, alias=None):
if alias is not None:
if Devices.add_alias(alias, self, file_update=True):
self.alias = alias
if mode is not None and mode != self.mode and mode in self.modes:
self.mode = mode
if self.mode == 'DirectSwitch':
curr_ds = yield self.arm.neuron.modbus_cache_map.get_register_async(1, self.regmode, unit=self.arm.modbus_address)
curr_ds_val = curr_ds[0]
curr_ds_val = curr_ds_val | int(self.bitmask)
yield self.arm.neuron.client.write_register(self.regmode, curr_ds_val, unit=self.arm.modbus_address)
else:
curr_ds = yield self.arm.neuron.modbus_cache_map.get_register_async(1, self.regmode, unit=self.arm.modbus_address)
curr_ds_val = curr_ds[0]
curr_ds_val = curr_ds_val & (~int(self.bitmask))
yield self.arm.neuron.client.write_register(self.regmode, curr_ds_val, unit=self.arm.modbus_address)
if self.mode == 'DirectSwitch' and ds_mode is not None and ds_mode in self.ds_modes:
self.ds_mode = ds_mode
curr_ds_pol = yield self.arm.neuron.modbus_cache_map.get_register_async(1, self.regpolarity, unit=self.arm.modbus_address)
curr_ds_tgl = yield self.arm.neuron.modbus_cache_map.get_register_async(1, self.regtoggle, unit=self.arm.modbus_address)
curr_ds_pol_val = curr_ds_pol[0]
curr_ds_tgl_val = curr_ds_tgl[0]
if self.ds_mode == 'Inverted':
curr_ds_pol_val = curr_ds_pol_val | self.bitmask
curr_ds_tgl_val = curr_ds_tgl_val & (~self.bitmask)
elif self.ds_mode == 'Toggle':
curr_ds_pol_val = curr_ds_pol_val & (~self.bitmask)
curr_ds_tgl_val = curr_ds_tgl_val | self.bitmask
else:
curr_ds_pol_val = curr_ds_pol_val & (~self.bitmask)
curr_ds_tgl_val = curr_ds_tgl_val & (~self.bitmask)
yield self.arm.neuron.client.write_register(self.regpolarity, curr_ds_pol_val, unit=self.arm.modbus_address)
yield self.arm.neuron.client.write_register(self.regtoggle, curr_ds_tgl_val, unit=self.arm.modbus_address)
if counter_mode is not None and counter_mode in self.counter_modes and counter_mode != self.counter_mode:
self.counter_mode = counter_mode
if debounce is not None:
if self.regdebounce is not None:
yield self.arm.neuron.client.write_register(self.regdebounce, int(float(debounce)), unit=self.arm.modbus_address)
if counter is not None:
if self.regcounter is not None:
yield self.arm.neuron.client.write_registers(self.regcounter, ((int(float(counter)) & 0xFFFF), (int(float(counter)) >> 16) & 0xFFFF), unit=self.arm.modbus_address)
raise gen.Return(self.full())
def get(self):
""" Returns ( value, debounce )
current on/off value is taken from last value without reading it from hardware
"""
return (self.value, self.debounce)
def get_value(self):
""" Returns value
current on/off value is taken from last value without reading it from hardware
"""
return self.value
class Uart():
def __init__(self, circuit, arm, reg, dev_id=0, parity_modes=['None'], speed_modes=['19200bps'], stopb_modes = ['One'],
address_reg=-1, major_group=0, legacy_mode=True):
self.alias = ""
self.devtype = UART
self.dev_id = dev_id
self.circuit = circuit
self.legacy_mode = legacy_mode
self.arm = arm
self.parity_modes = parity_modes
self.speed_modes = speed_modes
self.stopb_modes = stopb_modes
self.speed_mask = 0x0001000f # Termios mask
self.parity_mask = 0x00000300 # Termios mask
self.stopb_mask = 0x00000040 # Termios mask
self.major_group = major_group
self.valreg = reg
self.address_reg = address_reg
self.regvalue = lambda: self.arm.neuron.modbus_cache_map.get_register(1, self.valreg, unit=self.arm.modbus_address)[0]
parity_mode_val = (self.arm.neuron.modbus_cache_map.get_register(1, self.valreg, unit=self.arm.modbus_address)[0]) & self.parity_mask
speed_mode_val = (self.arm.neuron.modbus_cache_map.get_register(1, self.valreg, unit=self.arm.modbus_address)[0]) & self.speed_mask
stopb_mode_val = (self.arm.neuron.modbus_cache_map.get_register(1, self.valreg, unit=self.arm.modbus_address)[0]) & self.stopb_mask
if self.address_reg != -1:
self.address_val = self.arm.neuron.modbus_cache_map.get_register(1, self.address_reg, unit=self.arm.modbus_address)[0]
self.addressvalue = lambda: self.arm.neuron.modbus_cache_map.get_register(1, self.address_reg, unit=self.arm.modbus_address)[0]
else:
self.address_val = 0
self.addressvalue = None
if parity_mode_val == 0x00000300:
self.parity_mode = 'Odd'
elif parity_mode_val == 0x00000200:
self.parity_mode = 'Even'
else:
self.parity_mode = 'None'
if speed_mode_val == 0x0000000b:
self.speed_mode = '2400bps'
elif speed_mode_val == 0x0000000c:
self.speed_mode = '4800bps'
elif speed_mode_val == 0x0000000d:
self.speed_mode = '9600bps'
elif speed_mode_val == 0x0000000e:
self.speed_mode = '19200bps'
elif speed_mode_val == 0x0000000f:
self.speed_mode = '38400bps'
elif speed_mode_val == 0x00010001:
self.speed_mode = '57600bps'
elif speed_mode_val == 0x00010002:
self.speed_mode = '115200bps'
else:
self.speed_mode = '19200bps'
if stopb_mode_val == 0x00000040:
self.stopb_mode = 'Two'
else:
self.stopb_mode = 'One'
@property
def conf(self):
try:
if self.regvalue(): return self.regvalue()
except:
pass
return 0
def full(self):
ret = {'dev': 'uart',
'circuit': self.circuit,
'conf_value': self.conf,
'parity_modes': self.parity_modes,
'parity_mode': self.parity_mode,
'speed_modes': self.speed_modes,
'speed_mode': self.speed_mode,
'stopb_modes': self.stopb_modes,
'stopb_mode': self.stopb_mode,
'sw_address': self.address_val,
'glob_dev_id': self.dev_id}
if self.alias != '':
ret['alias'] = self.alias
return ret
def simple(self):
return {'dev': 'uart',
'circuit': self.circuit,
'conf_value': self.conf}
@gen.coroutine
def set(self, conf_value=None, parity_mode=None, speed_mode=None, stopb_mode=None, sw_address=None, alias=None):
val = self.regvalue()
if conf_value is not None:
self.arm.neuron.client.write_register(self.valreg, conf_value, unit=self.arm.modbus_address)
if parity_mode is not None and parity_mode in self.parity_modes and parity_mode != self.parity_mode:
val &= ~self.parity_mask
if parity_mode == 'None':
val = val
elif parity_mode == 'Odd':
val |= 0x00000300
elif parity_mode == 'Even':
val |= 0x00000200
else:
val = val
self.arm.neuron.client.write_register(self.valreg, val, unit=self.arm.modbus_address)
self.parity_mode = parity_mode
if speed_mode is not None and speed_mode in self.speed_modes and speed_mode != self.speed_mode:
val &= ~self.speed_mask
if speed_mode == '2400bps':
val |= 0x0000000b
elif speed_mode == '4800bps':
val |= 0x0000000c
elif speed_mode == '9600bps':
val |= 0x0000000d
elif speed_mode == '19200bps':
val |= 0x0000000e
elif speed_mode == '38400bps':
val |= 0x0000000f
elif speed_mode == '57600bps':
val |= 0x00010001
elif speed_mode == '115200bps':
val |= 0x00010002
else:
val |= 0x0000000e
self.arm.neuron.client.write_register(self.valreg, val, unit=self.arm.modbus_address)
self.speed_mode = speed_mode
if stopb_mode is not None and stopb_mode in self.stopb_modes and stopb_mode != self.stopb_mode:
val &= ~self.stopb_mask
if stopb_mode == 'One':
val = val
elif stopb_mode == 'Two':
val |= 0x00000040
self.arm.neuron.client.write_register(self.valreg, val, unit=self.arm.modbus_address)
self.stopb_mode = stopb_mode
if sw_address is not None and self.address_val != 0:
self.arm.neuron.client.write_register(self.address_reg, sw_address, unit=self.arm.modbus_address)
if alias is not None:
if Devices.add_alias(alias, self, file_update=True):
self.alias = alias
raise gen.Return(self.full())
def get(self):
return self.full()
def get_value(self):
""" Returns value
current on/off value is taken from last value without reading it from hardware
"""
return self.conf
def uint16_to_int(inp):
if inp > 0x8000: return (inp - 0x10000)
return inp
class WiFiAdapter():
def __init__(self, circuit, dev_id=0, major_group=0, ip_addr="192.168.1.100", enabled=False, enabled_routing=False, legacy_mode=True):
self.alias = ""
self.devtype = WIFI
self.dev_id = dev_id
self.circuit = circuit
self.legacy_mode = legacy_mode
self.major_group = major_group
self.enabled_val = enabled
self.enabled_routing_val = enabled_routing
self.ip_addr = ip_addr
self.packets_recieved = 0
self.packets_sent = 0
try:
if ("UP" in subprocess.check_output(["ifconfig", "-a", "wlan0"])) and ("running" in subprocess.check_output(["systemctl", "status", "unipidns"])):
self.enabled_val = True
except subprocess.CalledProcessError:
self.enabled_val = False
try:
if ("MASQUERADE" in subprocess.check_output(["iptables", "-t", "nat", "-L"])):
self.enabled_routing_val = True
except subprocess.CalledProcessError:
self.enabled_routing_val = False
@property
def enabled(self):
if self.enabled_val:
return 'Enabled'
else:
return 'Disabled'
@property
def routing_enabled(self):
if self.enabled_routing_val:
return 'Enabled'
else:
return 'Disabled'
@gen.coroutine
def get_packets(self):
subprocess.check_output(["ifconfig", "-a", "wlan0"])
def full(self):
ret = {'dev': 'wifi',
'ap_state': self.enabled,
'eth0_masq': self.routing_enabled,
'circuit': self.circuit,
'glob_dev_id': self.dev_id}
if self.alias != '':
ret['alias'] = self.alias
return ret
def simple(self):
return {'dev': 'wifi',
'circuit': self.circuit,
'ap_state': self.enabled,
'eth0_masq': self.routing_enabled,
#'ip': self.ip_addr,
'glob_dev_id': self.dev_id}
@gen.coroutine
def set(self, ap_state=None, eth0_masq=None, alias=None):
if alias is not None:
if Devices.add_alias(alias, self, file_update=True):
self.alias = alias
if ap_state is not None and ap_state in ['Enabled', 'Disabled'] and ap_state != self.enabled:
if ap_state == 'Enabled':
subprocess.check_output(["systemctl", "start", "unipidns"])
self.enabled_val = True
else:
if not (("UP" in subprocess.check_output(["ifconfig", "-a", "wlan0"])) and ("running" in subprocess.check_output(["systemctl", "status", "unipidns"]))):
raise Exception("WiFi could not be terminated due to invalid state (possibly is starting up?)")
subprocess.check_output(["systemctl", "stop", "unipidns"])
subprocess.check_output(["systemctl", "stop", "unipiap"])
self.enabled_val = False
if eth0_masq is not None and eth0_masq in ['Enabled', 'Disabled'] and eth0_masq != self.routing_enabled:
if eth0_masq == 'Enabled':
subprocess.check_output(["iptables", "-t", "nat", "-A", "POSTROUTING", "-o", "eth0", "-j", "MASQUERADE"])
self.enabled_routing_val = True
else:
subprocess.check_output(["iptables", "-t", "nat", "-D", "POSTROUTING", "-o", "eth0", "-j", "MASQUERADE"])
self.enabled_routing_val = False
raise gen.Return(self.full())
def get(self):
return self.full()
class AnalogOutput():
def __init__(self, circuit, arm, reg, regcal=-1, regmode=-1, reg_res=0, dev_id=0, modes=['Voltage'], major_group=0, legacy_mode=True):
self.alias = ""
self.devtype = AO
self.dev_id = dev_id
self.circuit = circuit
self.reg = reg
self.regvalue = lambda: self.arm.neuron.modbus_cache_map.get_register(1, self.reg, unit=self.arm.modbus_address)[0]
self.regcal = regcal
self.regmode = regmode
self.legacy_mode = legacy_mode
self.reg_res = reg_res
self.regresvalue = lambda: self.arm.neuron.modbus_cache_map.get_register(1, self.reg_res, unit=self.arm.modbus_address)[0]
self.modes = modes
self.arm = arm
self.major_group = major_group
if regcal >= 0:
self.offset = (uint16_to_int(self.arm.neuron.modbus_cache_map.get_register(1, self.regcal + 1, unit=self.arm.modbus_address)[0]) / 10000.0)
else:
self.offset = 0
self.is_voltage = lambda: True
if circuit == '1_01' and regcal >= 0:
self.is_voltage = lambda: bool(self.arm.neuron.modbus_cache_map.get_register(1, self.regmode, unit=self.arm.modbus_address)[0] == 0)
if self.is_voltage():
self.mode = 'Voltage'
elif self.arm.neuron.modbus_cache_map.get_register(1, self.regmode, unit=self.arm.modbus_address)[0] == 1:
self.mode = 'Current'
else:
self.mode = 'Resistance'
self.reg_shift = 2 if self.is_voltage() else 0
if self.circuit == '1_01':
self.factor = arm.volt_ref / 4095 * (1 + uint16_to_int(self.arm.neuron.modbus_cache_map.get_register(1, regcal + self.reg_shift, unit=self.arm.modbus_address)[0]) / 10000.0)
self.factorx = arm.volt_refx / 4095 * (1 + uint16_to_int(self.arm.neuron.modbus_cache_map.get_register(1, regcal + self.reg_shift, unit=self.arm.modbus_address)[0]) / 10000.0)
else:
self.factor = arm.volt_ref / 4095 * (1 / 10000.0)
self.factorx = arm.volt_refx / 4095 * (1 / 10000.0)
if self.is_voltage():
self.factor *= 3
self.factorx *= 3
else:
self.factor *= 10
self.factorx *= 10
@property
def value(self):
try:
if self.circuit == '1_01':
return self.regvalue() * self.factor + self.offset
else:
return self.regvalue() * 0.0025
except:
return 0
@property
def res_value(self):
try:
if self.circuit == '1_01':
return float(self.regresvalue()) / 10.0
else:
return float(self.regvalue()) * 0.0025
except:
return 0
def full(self):
ret = {'dev': 'ao',
'circuit': self.circuit,
'mode': self.mode,
'modes': self.modes,
'glob_dev_id': self.dev_id}
if self.mode == 'Resistance':
ret['value'] = self.res_value
ret['unit'] = (unit_names[OHM])
else:
ret['value'] = self.value
ret['unit'] = (unit_names[VOLT]) if self.is_voltage() else (unit_names[AMPERE])
if self.alias != '':
ret['alias'] = self.alias
return ret
def simple(self):
if self.mode == 'Resistance':
return {'dev': 'ao',
'circuit': self.circuit,
'value': self.res_value}
else:
return {'dev': 'ao',
'circuit': self.circuit,
'value': self.value}
@gen.coroutine
def set_value(self, value):
if self.circuit == '1_01':
valuei = int((float(value) - self.offset) / self.factor)
else:
valuei = int((float(value) / 0.0025))
if valuei < 0:
valuei = 0
elif valuei > 4095:
valuei = 4095
self.arm.neuron.client.write_register(self.reg, valuei, unit=self.arm.modbus_address)
if self.circuit == '1_01':
raise gen.Return(float(valuei) * self.factor + self.offset)
else:
raise gen.Return(float(valuei) * 0.0025)
@gen.coroutine
def set(self, value=None, mode=None, alias=None):
if alias is not None:
if Devices.add_alias(alias, self, file_update=True):
self.alias = alias
if mode is not None and mode in self.modes and self.regmode != -1:
val = self.arm.neuron.modbus_cache_map.get_register(1, self.regmode, unit=self.arm.modbus_address)[0]
cur_val = self.value
if mode == "Voltage":
val = 0
if (self.mode == 'Current'):
self.factor = (self.factor / 10) * 3
elif mode == "Current":
val = 1
if (self.mode == 'Voltage' or self.mode == 'Resistance'):
self.factor = (self.factor / 3) * 10
elif mode == "Resistance":
val = 3
self.mode = mode
self.arm.neuron.client.write_register(self.regmode, val, unit=self.arm.modbus_address)
if mode == "Voltage" or mode == "Current":
yield self.set_value(cur_val) # Restore original value (i.e. 1.5V becomes 1.5mA)
if not (value is None):
if self.circuit == '1_01':
valuei = int((float(value) - self.offset) / self.factor)
else:
valuei = int((float(value) / 0.0025))
if valuei < 0:
valuei = 0
elif valuei > 4095:
valuei = 4095
self.arm.neuron.client.write_register(self.reg, valuei, unit=self.arm.modbus_address)
raise gen.Return(self.full())
def get(self):
return self.full()
class AnalogInput():
def __init__(self, circuit, arm, reg, regcal=-1, regmode=-1, dev_id=0, major_group=0, legacy_mode=True, tolerances='brain', modes=['Voltage']):
self.alias = ""
self.devtype = AI
self.dev_id = dev_id
self.circuit = circuit
self.valreg = reg
self.arm = arm
self.regvalue = lambda: self.arm.neuron.modbus_cache_map.get_register(1, self.valreg, unit=self.arm.modbus_address)[0]
self.regcal = regcal
self.legacy_mode = legacy_mode
self.regmode = regmode
self.modes = modes
self.mode = 'Voltage'
self.unit_name = unit_names[VOLT]
self.tolerances = tolerances
self.sec_ai_mode = 0
if self.tolerances == '500series':
self.sec_ai_mode = self.arm.neuron.modbus_cache_map.get_register(1, self.regmode, unit=self.arm.modbus_address)[0]
self.mode = self.get_500_series_mode()
self.unit_name = self.internal_unit
self.major_group = major_group
self.is_voltage = lambda: True
if self.tolerances == 'brain' and regcal >= 0:
self.is_voltage = lambda: bool(self.arm.neuron.modbus_cache_map.get_register(1, self.regmode, unit=self.arm.modbus_address)[0] == 0)
if self.is_voltage():
self.mode = "Voltage"
else:
self.mode = "Current"
self.unit_name = unit_names[AMPERE]
self.tolerance_mode = self.get_tolerance_mode()
self.reg_shift = 2 if self.is_voltage() else 0
if regcal >= 0:
self.vfactor = arm.volt_ref / 4095 * (1 + uint16_to_int(self.arm.neuron.modbus_cache_map.get_register(1, regcal + self.reg_shift + 1, unit=self.arm.modbus_address)[0]) / 10000.0)
self.vfactorx = arm.volt_refx / 4095 * (1 + uint16_to_int(self.arm.neuron.modbus_cache_map.get_register(1, regcal + self.reg_shift + 1, unit=self.arm.modbus_address)[0]) / 10000.0)
self.voffset = (uint16_to_int(self.arm.neuron.modbus_cache_map.get_register(1, regcal + self.reg_shift + 2, unit=self.arm.modbus_address)[0]) / 10000.0)
else:
self.vfactor = arm.volt_ref / 4095 * (1 / 10000.0)
self.vfactorx = arm.volt_refx / 4095 * (1 / 10000.0)
self.voffset = 0
if self.is_voltage():
self.vfactor *= 3
self.vfactorx *= 3
else:
self.vfactor *= 10
self.vfactorx *= 10
@property
def value(self):
try:
if self.circuit == '1_01':
if self.regvalue() == 65535 or self.regvalue() == 0:
return 0
else:
return (self.regvalue() * self.vfactor) + self.voffset
else:
byte_arr = bytearray(4)
byte_arr[2] = (self.regvalue() >> 8) & 255
byte_arr[3] = self.regvalue() & 255
byte_arr[0] = (self.arm.neuron.modbus_cache_map.get_register(1, self.valreg + 1, unit=self.arm.modbus_address)[0] >> 8) & 255
byte_arr[1] = self.arm.neuron.modbus_cache_map.get_register(1, self.valreg + 1, unit=self.arm.modbus_address)[0] & 255
return struct.unpack('>f', str(byte_arr))[0]
except Exception, E:
logger.exception(str(E))
return 0
def get_tolerance_modes(self):
if self.tolerances == 'brain':
if self.mode == 'Voltage':
return ["10.0"]
else:
return ["20.0"]
elif self.tolerances == '500series':
if self.mode == 'Voltage':
return ["0.0", "2.5", "10.0"]
elif self.mode == 'Current':
return ["20.0"]
elif self.mode == "Resistance":
return ["1960.0", "100.0"]
def get_tolerance_mode(self):
if self.tolerances == 'brain':
if self.mode == 'Voltage':
return "10.0"
else:
return "20.0"
elif self.tolerances == '500series':
if self.sec_ai_mode == 0:
return "0.0"
elif self.sec_ai_mode == 1:
return "10.0"
elif self.sec_ai_mode == 2:
return "2.5"
elif self.sec_ai_mode == 3:
return "20.0"
elif self.sec_ai_mode == 4:
return "1960.0"
elif self.sec_ai_mode == 5:
return "100.0"
def get_500_series_mode(self):
if self.sec_ai_mode == 0:
return "Voltage"
elif self.sec_ai_mode == 1:
return "Voltage"
elif self.sec_ai_mode == 2:
return "Voltage"
elif self.sec_ai_mode == 3:
return "Current"
elif self.sec_ai_mode == 4:
return "Resistance"
elif self.sec_ai_mode == 5:
return "Resistance"
def get_500_series_sec_mode(self):
if self.mode == "Voltage":
if self.tolerance_mode == "0.0":
return 0
elif self.tolerance_mode == "10.0":
return 1
elif self.tolerance_mode == "2.5":
return 2
elif self.mode == "Current":
if self.tolerance_mode == "20.0":
return 3
elif self.mode == "Resistance":
if self.tolerance_mode == "1960.0":
return 4
elif self.tolerance_mode == "100.0":
return 5
@gen.coroutine
def set(self, mode=None, range=None, alias=None):
if alias is not None:
if Devices.add_alias(alias, self, file_update=True):
self.alias = alias
if mode is not None and mode in self.modes:
if self.tolerances == "brain" and mode != self.mode:
self.mode = mode
if self.mode == "Voltage":
self.unit_name = unit_names[VOLT]
yield self.arm.neuron.client.write_register(self.regmode, 0, unit=self.arm.modbus_address)
elif self.mode == "Current":
self.unit_name = unit_names[AMPERE]
yield self.arm.neuron.client.write_register(self.regmode, 1, unit=self.arm.modbus_address)
self.reg_shift = 2 if self.mode == "Voltage" else 0
if self.regcal >= 0:
self.vfactor = self.arm.volt_ref / 4095 * (1 + uint16_to_int(self.arm.neuron.modbus_cache_map.get_register(1, self.regcal + self.reg_shift + 1, unit=self.arm.modbus_address)[0]) / 10000.0)
self.vfactorx = self.arm.volt_refx / 4095 * (1 + uint16_to_int(self.arm.neuron.modbus_cache_map.get_register(1, self.regcal + self.reg_shift + 1, unit=self.arm.modbus_address)[0]) / 10000.0)
self.voffset = (uint16_to_int(self.arm.neuron.modbus_cache_map.get_register(1, self.regcal + self.reg_shift + 2, unit=self.arm.modbus_address)[0]) / 10000.0)
else:
self.vfactor = self.arm.volt_ref / 4095 * (1 / 10000.0)
self.vfactorx = self.arm.volt_refx / 4095 * (1 / 10000.0)
self.voffset = 0
if self.mode == "Voltage":
self.vfactor *= 3
self.vfactorx *= 3
else:
self.vfactor *= 10
self.vfactorx *= 10
self.tolerance_mode = self.get_tolerance_mode()
elif self.tolerances == "500series":
self.mode = mode
if self.mode == "Voltage":
self.unit_name = unit_names[VOLT]
self.sec_ai_mode = 1
elif self.mode == "Current":
self.unit_name = unit_names[AMPERE]
self.sec_ai_mode = 3
elif self.mode == "Resistance":
self.unit_name = unit_names[OHM]
self.sec_ai_mode = 4
self.tolerance_mode = self.get_tolerance_mode()
yield self.arm.neuron.client.write_register(self.regmode, self.sec_ai_mode, unit=self.arm.modbus_address)
if self.tolerances == '500series' and range is not None and range in self.get_tolerance_modes():
if self.mode == "Voltage":
self.unit_name = unit_names[VOLT]
elif self.mode == "Current":
self.unit_name = unit_names[AMPERE]
else:
self.unit_name = unit_names[OHM]
self.tolerance_mode = range
self.sec_ai_mode = self.get_500_series_sec_mode()
yield self.arm.neuron.client.write_register(self.regmode, self.sec_ai_mode, unit=self.arm.modbus_address)
raise gen.Return(self.full())
def full(self):
ret = {'dev': 'ai',
'circuit': self.circuit,
'value': self.value,
'unit': self.unit_name,
'glob_dev_id': self.dev_id,
'mode': self.mode,
'modes': self.modes,
'range': self.tolerance_mode,
'range_modes': self.get_tolerance_modes()}
if self.alias != '':
ret['alias'] = self.alias
return ret
def get(self):
return self.full()
def simple(self):
return {'dev': 'ai',
'circuit': self.circuit,
'value': self.value}
@property
def voltage(self):
return self.value
@property
def internal_unit(self):
if self.mode == "Voltage":
return unit_names[VOLT]
elif self.mode == "Current":
return unit_names[AMPERE]
else:
return unit_names[OHM]
|
UniPiTechnology/evok
|
evok/neuron.py
|
Python
|
apache-2.0
| 116,035
|
[
"NEURON"
] |
e521a873dd7a9381be96d7dea249634691ffdcbb9412ec5dff4fe796284b7933
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Convert Android xml resources to API 14 compatible.
There are two reasons that we cannot just use API 17 attributes,
so we are generating another set of resources by this script.
1. paddingStart attribute can cause a crash on Galaxy Tab 2.
2. There is a bug that paddingStart does not override paddingLeft on
JB-MR1. This is fixed on JB-MR2.
Therefore, this resource generation script can be removed when
we drop the support for JB-MR1.
Please refer to http://crbug.com/235118 for the details.
"""
import optparse
import os
import re
import shutil
import sys
import xml.dom.minidom as minidom
from util import build_utils
# Note that we are assuming 'android:' is an alias of
# the namespace 'http://schemas.android.com/apk/res/android'.
GRAVITY_ATTRIBUTES = ('android:gravity', 'android:layout_gravity')
# Almost all the attributes that has "Start" or "End" in
# its name should be mapped.
ATTRIBUTES_TO_MAP = {'paddingStart' : 'paddingLeft',
'drawableStart' : 'drawableLeft',
'layout_alignStart' : 'layout_alignLeft',
'layout_marginStart' : 'layout_marginLeft',
'layout_alignParentStart' : 'layout_alignParentLeft',
'layout_toStartOf' : 'layout_toLeftOf',
'paddingEnd' : 'paddingRight',
'drawableEnd' : 'drawableRight',
'layout_alignEnd' : 'layout_alignRight',
'layout_marginEnd' : 'layout_marginRight',
'layout_alignParentEnd' : 'layout_alignParentRight',
'layout_toEndOf' : 'layout_toRightOf'}
ATTRIBUTES_TO_MAP = dict(['android:' + k, 'android:' + v] for k, v
in ATTRIBUTES_TO_MAP.iteritems())
ATTRIBUTES_TO_MAP_REVERSED = dict([v, k] for k, v
in ATTRIBUTES_TO_MAP.iteritems())
def IterateXmlElements(node):
"""minidom helper function that iterates all the element nodes.
Iteration order is pre-order depth-first."""
if node.nodeType == node.ELEMENT_NODE:
yield node
for child_node in node.childNodes:
for child_node_element in IterateXmlElements(child_node):
yield child_node_element
def AssertNotDeprecatedAttribute(name, value, filename):
"""Raises an exception if the given attribute is deprecated."""
msg = None
if name in ATTRIBUTES_TO_MAP_REVERSED:
msg = '{0} should use {1} instead of {2}'.format(filename,
ATTRIBUTES_TO_MAP_REVERSED[name], name)
elif name in GRAVITY_ATTRIBUTES and ('left' in value or 'right' in value):
msg = '{0} should use start/end instead of left/right for {1}'.format(
filename, name)
if msg:
msg += ('\nFor background, see: http://android-developers.blogspot.com/'
'2013/03/native-rtl-support-in-android-42.html\n'
'If you have a legitimate need for this attribute, discuss with '
'kkimlabs@chromium.org or newt@chromium.org')
raise Exception(msg)
def WriteDomToFile(dom, filename):
"""Write the given dom to filename."""
build_utils.MakeDirectory(os.path.dirname(filename))
with open(filename, 'w') as f:
dom.writexml(f, '', ' ', '\n', encoding='utf-8')
def HasStyleResource(dom):
"""Return True if the dom is a style resource, False otherwise."""
root_node = IterateXmlElements(dom).next()
return bool(root_node.nodeName == 'resources' and
list(root_node.getElementsByTagName('style')))
def ErrorIfStyleResourceExistsInDir(input_dir):
"""If a style resource is in input_dir, raises an exception."""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
dom = minidom.parse(input_filename)
if HasStyleResource(dom):
raise Exception('error: style file ' + input_filename +
' should be under ' + input_dir +
'-v17 directory. Please refer to '
'http://crbug.com/243952 for the details.')
def GenerateV14LayoutResourceDom(dom, filename, assert_not_deprecated=True):
"""Convert layout resource to API 14 compatible layout resource.
Args:
dom: Parsed minidom object to be modified.
filename: Filename that the DOM was parsed from.
assert_not_deprecated: Whether deprecated attributes (e.g. paddingLeft) will
cause an exception to be thrown.
Returns:
True if dom is modified, False otherwise.
"""
is_modified = False
# Iterate all the elements' attributes to find attributes to convert.
for element in IterateXmlElements(dom):
for name, value in list(element.attributes.items()):
# Convert any API 17 Start/End attributes to Left/Right attributes.
# For example, from paddingStart="10dp" to paddingLeft="10dp"
# Note: gravity attributes are not necessary to convert because
# start/end values are backward-compatible. Explained at
# https://plus.sandbox.google.com/+RomanNurik/posts/huuJd8iVVXY?e=Showroom
if name in ATTRIBUTES_TO_MAP:
element.setAttribute(ATTRIBUTES_TO_MAP[name], value)
del element.attributes[name]
is_modified = True
elif assert_not_deprecated:
AssertNotDeprecatedAttribute(name, value, filename)
return is_modified
def GenerateV14StyleResourceDom(dom, filename, assert_not_deprecated=True):
"""Convert style resource to API 14 compatible style resource.
Args:
dom: Parsed minidom object to be modified.
filename: Filename that the DOM was parsed from.
assert_not_deprecated: Whether deprecated attributes (e.g. paddingLeft) will
cause an exception to be thrown.
Returns:
True if dom is modified, False otherwise.
"""
is_modified = False
for style_element in dom.getElementsByTagName('style'):
for item_element in style_element.getElementsByTagName('item'):
name = item_element.attributes['name'].value
value = item_element.childNodes[0].nodeValue
if name in ATTRIBUTES_TO_MAP:
item_element.attributes['name'].value = ATTRIBUTES_TO_MAP[name]
is_modified = True
elif assert_not_deprecated:
AssertNotDeprecatedAttribute(name, value, filename)
return is_modified
def GenerateV14LayoutResource(input_filename, output_v14_filename,
output_v17_filename):
"""Convert API 17 layout resource to API 14 compatible layout resource.
It's mostly a simple replacement, s/Start/Left s/End/Right,
on the attribute names.
If the generated resource is identical to the original resource,
don't do anything. If not, write the generated resource to
output_v14_filename, and copy the original resource to output_v17_filename.
"""
dom = minidom.parse(input_filename)
is_modified = GenerateV14LayoutResourceDom(dom, input_filename)
if is_modified:
# Write the generated resource.
WriteDomToFile(dom, output_v14_filename)
# Copy the original resource.
build_utils.MakeDirectory(os.path.dirname(output_v17_filename))
shutil.copy2(input_filename, output_v17_filename)
def GenerateV14StyleResource(input_filename, output_v14_filename):
"""Convert API 17 style resources to API 14 compatible style resource.
Write the generated style resource to output_v14_filename.
It's mostly a simple replacement, s/Start/Left s/End/Right,
on the attribute names.
"""
dom = minidom.parse(input_filename)
GenerateV14StyleResourceDom(dom, input_filename)
# Write the generated resource.
WriteDomToFile(dom, output_v14_filename)
def GenerateV14LayoutResourcesInDir(input_dir, output_v14_dir, output_v17_dir):
"""Convert layout resources to API 14 compatible resources in input_dir."""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
rel_filename = os.path.relpath(input_filename, input_dir)
output_v14_filename = os.path.join(output_v14_dir, rel_filename)
output_v17_filename = os.path.join(output_v17_dir, rel_filename)
GenerateV14LayoutResource(input_filename, output_v14_filename,
output_v17_filename)
def GenerateV14StyleResourcesInDir(input_dir, output_v14_dir):
"""Convert style resources to API 14 compatible resources in input_dir."""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
rel_filename = os.path.relpath(input_filename, input_dir)
output_v14_filename = os.path.join(output_v14_dir, rel_filename)
GenerateV14StyleResource(input_filename, output_v14_filename)
def VerifyV14ResourcesInDir(input_dir, resource_type):
"""Verify that the resources in input_dir is compatible with v14, i.e., they
don't use attributes that cause crashes on certain devices. Print an error if
they have."""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
warning_message = ('warning : ' + input_filename + ' has an RTL attribute, '
'i.e., attribute that has "start" or "end" in its name.'
' Pre-v17 resources should not include it because it '
'can cause crashes on certain devices. Please refer to '
'http://crbug.com/243952 for the details.')
dom = minidom.parse(input_filename)
if resource_type in ('layout', 'xml'):
if GenerateV14LayoutResourceDom(dom, input_filename, False):
print warning_message
elif resource_type == 'values':
if GenerateV14StyleResourceDom(dom, input_filename, False):
print warning_message
def AssertNoDeprecatedAttributesInDir(input_dir, resource_type):
"""Raises an exception if resources in input_dir have deprecated attributes,
e.g., paddingLeft, paddingRight"""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
dom = minidom.parse(input_filename)
if resource_type in ('layout', 'xml'):
GenerateV14LayoutResourceDom(dom, input_filename)
elif resource_type == 'values':
GenerateV14StyleResourceDom(dom, input_filename)
def ParseArgs():
"""Parses command line options.
Returns:
An options object as from optparse.OptionsParser.parse_args()
"""
parser = optparse.OptionParser()
parser.add_option('--res-dir',
help='directory containing resources '
'used to generate v14 compatible resources')
parser.add_option('--res-v14-compatibility-dir',
help='output directory into which '
'v14 compatible resources will be generated')
parser.add_option('--stamp', help='File to touch on success')
parser.add_option('--verify-only', action="store_true", help='Do not generate'
' v14 resources. Instead, just verify that the resources are already '
"compatible with v14, i.e. they don't use attributes that cause crashes "
'on certain devices.')
options, args = parser.parse_args()
if args:
parser.error('No positional arguments should be given.')
# Check that required options have been provided.
required_options = ('res_dir', 'res_v14_compatibility_dir')
build_utils.CheckOptions(options, parser, required=required_options)
return options
def GenerateV14Resources(res_dir, res_v14_dir, verify_only):
for name in os.listdir(res_dir):
if not os.path.isdir(os.path.join(res_dir, name)):
continue
dir_pieces = name.split('-')
resource_type = dir_pieces[0]
qualifiers = dir_pieces[1:]
api_level_qualifier_index = -1
api_level_qualifier = ''
for index, qualifier in enumerate(qualifiers):
if re.match('v[0-9]+$', qualifier):
api_level_qualifier_index = index
api_level_qualifier = qualifier
break
# Android pre-v17 API doesn't support RTL. Skip.
if 'ldrtl' in qualifiers:
continue
input_dir = os.path.abspath(os.path.join(res_dir, name))
if verify_only:
if not api_level_qualifier or int(api_level_qualifier[1:]) < 17:
VerifyV14ResourcesInDir(input_dir, resource_type)
else:
AssertNoDeprecatedAttributesInDir(input_dir, resource_type)
else:
# We also need to copy the original v17 resource to *-v17 directory
# because the generated v14 resource will hide the original resource.
output_v14_dir = os.path.join(res_v14_dir, name)
output_v17_dir = os.path.join(res_v14_dir, name + '-v17')
# We only convert layout resources under layout*/, xml*/,
# and style resources under values*/.
if resource_type in ('layout', 'xml'):
if not api_level_qualifier:
GenerateV14LayoutResourcesInDir(input_dir, output_v14_dir,
output_v17_dir)
elif resource_type == 'values':
if api_level_qualifier == 'v17':
output_qualifiers = qualifiers[:]
del output_qualifiers[api_level_qualifier_index]
output_v14_dir = os.path.join(res_v14_dir,
'-'.join([resource_type] +
output_qualifiers))
GenerateV14StyleResourcesInDir(input_dir, output_v14_dir)
elif not api_level_qualifier:
ErrorIfStyleResourceExistsInDir(input_dir)
def main():
options = ParseArgs()
res_v14_dir = options.res_v14_compatibility_dir
build_utils.DeleteDirectory(res_v14_dir)
build_utils.MakeDirectory(res_v14_dir)
GenerateV14Resources(options.res_dir, res_v14_dir, options.verify_only)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main())
|
boundarydevices/android_external_chromium_org
|
build/android/gyp/generate_v14_compatible_resources.py
|
Python
|
bsd-3-clause
| 13,731
|
[
"Galaxy"
] |
36ca33f23698d0471ddfb407b62ba796e52315ff84f7b307dc0291bd0f4f188c
|
"""
Interactivity functions and classes using matplotlib and IPython widgets
**Gravity forward modeling**
* :class:`~fatiando.gravmag.interactive.Moulder`: a matplitlib GUI for 2D
forward modeling using polygons
----
"""
from __future__ import division
import cPickle as pickle
import numpy
from matplotlib import pyplot, widgets, patches
from matplotlib.lines import Line2D
from IPython.core.pylabtools import print_figure
from IPython.display import Image
from .. import utils
from . import talwani
from ..mesher import Polygon
class Moulder(object):
"""
Interactive 2D forward modeling using polygons.
A matplotlib GUI application. Allows drawing and manipulating polygons and
computes their predicted data automatically. Also permits contaminating the
data with gaussian pseudo-random error for producing synthetic data sets.
Uses :mod:`fatiando.gravmag.talwani` for computations.
*Moulder* objects can be persisted to Python pickle files using the
:meth:`~fatiando.gravmag.interactive.Moulder.save` method and later
restored using :meth:`~fatiando.gravmag.interactive.Moulder.load`.
.. warning::
Cannot be used with ``%matplotlib inline`` on IPython notebooks because
the app uses the matplotlib plot window. You can still embed the
generated model and data figure on notebooks using the
:meth:`~fatiando.gravmag.interactive.Moulder.plot` method.
Parameters:
* area : list = (x1, x2, z1, z2)
The limits of the model drawing area, in meters.
* x, z : 1d-arrays
The x- and z-coordinates of the computation points (places where
predicted data will be computed). In meters.
* data : None or 1d-array
Observed data measured at *x* and *z*. Will plot this with black dots
along the predicted data.
* density_range : list = [min, max]
The minimum and maximum values allowed for the density. Determines the
limits of the density slider of the application. In kg.m^-3. Defaults
to [-2000, 2000].
* kwargs : dict
Other keyword arguments used to restore the state of the application.
Used by the :meth:`~fatiando.gravmag.interactive.Moulder.load` method.
Not intended for general use.
Examples:
Make the Moulder object and start the app::
import numpy as np
area = (0, 10e3, 0, 5e3)
# Calculate on 100 points
x = np.linspace(area[0], area[1], 100)
z = np.zeros_like(x)
app = Moulder(area, x, z)
app.run()
# This will pop-up a window with the application (like the screenshot
# below). Start drawing (follow the instruction in the figure title).
# When satisfied, close the window to resume execution.
.. image:: ../_static/images/Moulder-screenshot.png
:alt: Screenshot of the Moulder GUI
After closing the plot window, you can access the model and data from the
*Moulder* object::
app.model # The drawn model as fatiando.mesher.Polygon
app.predicted # 1d-array with the data predicted by the model
# You can save the predicted data to use later
app.save_predicted('data.txt')
# You can also save the application and resume it later
app.save('application.pkl')
# Close this session/IPython notebook/etc.
# To resume drawing later:
app = Moulder.load('application.pkl')
app.run()
"""
# The tolerance range for mouse clicks on vertices. In pixels.
epsilon = 5
# App instructions printed in the figure suptitle
instructions = ' | '.join([
'n: New polygon', 'd: delete', 'click: select/move', 'esc: cancel'])
def __init__(self, area, x, z, data=None, density_range=[-2000, 2000],
**kwargs):
self.area = area
self.x, self.z = numpy.asarray(x), numpy.asarray(z)
self.density_range = density_range
self.data = data
# Used to set the ylims for the data axes.
if data is None:
self.dmin, self.dmax = 0, 0
else:
self.dmin, self.dmax = data.min(), data.max()
self.predicted = kwargs.get('predicted', numpy.zeros_like(x))
self.error = kwargs.get('error', 0)
self.cmap = kwargs.get('cmap', pyplot.cm.RdBu_r)
self.line_args = dict(
linewidth=2, linestyle='-', color='k', marker='o',
markerfacecolor='k', markersize=5, animated=False, alpha=0.6)
self.polygons = []
self.lines = []
self.densities = kwargs.get('densities', [])
vertices = kwargs.get('vertices', [])
for xy, dens in zip(vertices, self.densities):
poly, line = self._make_polygon(xy, dens)
self.polygons.append(poly)
self.lines.append(line)
def save_predicted(self, fname):
"""
Save the predicted data to a text file.
Data will be saved in 3 columns separated by spaces: x z data
Parameters:
* fname : string or file-like object
The name of the output file or an open file-like object.
"""
numpy.savetxt(fname, numpy.transpose([self.x, self.z, self.predicted]))
def save(self, fname):
"""
Save the application state into a pickle file.
Use this to persist the application. You can later reload the entire
object, with the drawn model and data, using the
:meth:`~fatiando.gravmag.interactive.Moulder.load` method.
Parameters:
* fname : string
The name of the file to save the application. The extension doesn't
matter (use ``.pkl`` if in doubt).
"""
with open(fname, 'w') as f:
vertices = [numpy.asarray(p.xy) for p in self.polygons]
state = dict(area=self.area, x=self.x,
z=self.z, data=self.data,
density_range=self.density_range,
cmap=self.cmap,
predicted=self.predicted,
vertices=vertices,
densities=self.densities,
error=self.error)
pickle.dump(state, f)
@classmethod
def load(cls, fname):
"""
Restore an application from a pickle file.
The pickle file should have been generated by the
:meth:`~fatiando.gravmag.interactive.Moulder.save` method.
Parameters:
* fname : string
The name of the file.
Returns:
* app : Moulder object
The restored application. You can continue using it as if nothing
had happened.
"""
with open(fname) as f:
state = pickle.load(f)
app = cls(**state)
return app
@property
def model(self):
"""
The polygon model drawn as :class:`fatiando.mesher.Polygon` objects.
"""
m = [Polygon(p.xy, {'density': d})
for p, d in zip(self.polygons, self.densities)]
return m
def run(self):
"""
Start the application for drawing.
Will pop-up a window with a place for drawing the model (below) and a
place with the predicted (and, optionally, observed) data (top).
Follow the instruction on the figure title.
When done, close the window to resume program execution.
"""
fig = self._figure_setup()
# Sliders to control the density and the error in the data
self.density_slider = widgets.Slider(
fig.add_axes([0.10, 0.01, 0.30, 0.02]), 'Density',
self.density_range[0], self.density_range[1], valinit=0.,
valfmt='%6.0f kg/m3')
self.error_slider = widgets.Slider(
fig.add_axes([0.60, 0.01, 0.30, 0.02]), 'Error',
0, 5, valinit=self.error, valfmt='%1.2f mGal')
# Put instructions on figure title
self.dataax.set_title(self.instructions)
# Markers for mouse click events
self._ivert = None
self._ipoly = None
self._lastevent = None
self._drawing = False
self._xy = []
self._drawing_plot = None
# Used to blit the model plot and make
# rendering faster
self.background = None
# Connect event callbacks
self._connect()
self._update_data()
self._update_data_plot()
self.canvas.draw()
pyplot.show()
def _connect(self):
"""
Connect the matplotlib events to their callback methods.
"""
# Make the proper callback connections
self.canvas.mpl_connect('button_press_event',
self._button_press_callback)
self.canvas.mpl_connect('key_press_event',
self._key_press_callback)
self.canvas.mpl_connect('button_release_event',
self._button_release_callback)
self.canvas.mpl_connect('motion_notify_event',
self._mouse_move_callback)
self.canvas.mpl_connect('draw_event',
self._draw_callback)
# Call the cleanup and extra code for a draw event when resizing as
# well. This is needed so that tight_layout adjusts the figure when
# resized. Otherwise, tight_layout snaps only when the user clicks on
# the figure to do something.
self.canvas.mpl_connect('resize_event',
self._draw_callback)
self.density_slider.on_changed(self._set_density_callback)
self.error_slider.on_changed(self._set_error_callback)
def plot(self, figsize=(10, 8), dpi=70):
"""
Make a plot of the data and model for embedding in IPython notebooks
Doesn't require ``%matplotlib inline`` to embed the plot (as that would
not allow the app to run).
Parameters:
* figsize : list = (width, height)
The figure size in inches.
* dpi : float
The number of dots-per-inch for the figure resolution.
"""
fig = self._figure_setup(figsize=figsize, facecolor='white')
self._update_data_plot()
pyplot.close(fig)
data = print_figure(fig, dpi=dpi)
return Image(data=data)
def _figure_setup(self, **kwargs):
"""
Setup the plot figure with labels, titles, ticks, etc.
Sets the *canvas*, *dataax*, *modelax*, *polygons* and *lines*
attributes.
Parameters:
* kwargs : dict
Keyword arguments passed to ``pyplot.subplots``.
Returns:
* fig : matplotlib figure object
The created figure
"""
fig, axes = pyplot.subplots(2, 1, **kwargs)
ax1, ax2 = axes
self.predicted_line, = ax1.plot(self.x, self.predicted, '-r')
if self.data is not None:
self.data_line, = ax1.plot(self.x, self.data, '.k')
ax1.set_ylabel('Gravity anomaly (mGal)')
ax1.set_xlabel('x (m)', labelpad=-10)
ax1.set_xlim(self.area[:2])
ax1.set_ylim((-200, 200))
ax1.grid(True)
tmp = ax2.pcolor(numpy.array([self.density_range]), cmap=self.cmap)
tmp.set_visible(False)
pyplot.colorbar(tmp, orientation='horizontal',
pad=0.08, aspect=80).set_label(r'Density (kg/cm3)')
# Remake the polygons and lines to make sure they belong to the right
# axis coordinates
vertices = [p.xy for p in self.polygons]
newpolygons, newlines = [], []
for xy, dens in zip(vertices, self.densities):
poly, line = self._make_polygon(xy, dens)
newpolygons.append(poly)
newlines.append(line)
ax2.add_patch(poly)
ax2.add_line(line)
self.polygons = newpolygons
self.lines = newlines
ax2.set_xlim(self.area[:2])
ax2.set_ylim(self.area[2:])
ax2.grid(True)
ax2.invert_yaxis()
ax2.set_ylabel('z (m)')
fig.subplots_adjust(top=0.95, left=0.1, right=0.95, bottom=0.06,
hspace=0.1)
self.figure = fig
self.canvas = fig.canvas
self.dataax = axes[0]
self.modelax = axes[1]
fig.canvas.draw()
return fig
def _density2color(self, density):
"""
Map density values to colors using the given *cmap* attribute.
Parameters:
* density : 1d-array
The density values of the model polygons
Returns
* colors : 1d-array
The colors mapped to each density value (returned by a matplotlib
colormap object.
"""
dmin, dmax = self.density_range
return self.cmap((density - dmin)/(dmax - dmin))
def _make_polygon(self, vertices, density):
"""
Create a polygon for drawing.
Polygons are matplitlib.patches.Polygon objects for the fill and
matplotlib.lines.Line2D for the contour.
Parameters:
* vertices : list of [x, z]
List of the [x, z] coordinate pairs of each vertex of the polygon
* density : float
The density of the polygon (used to set the color)
Returns:
* polygon, line
The matplotlib Polygon and Line2D objects
"""
poly = patches.Polygon(vertices, animated=False, alpha=0.9,
color=self._density2color(density))
x, y = zip(*poly.xy)
line = Line2D(x, y, **self.line_args)
return poly, line
def _update_data(self):
"""
Recalculate the predicted data (optionally with random error)
"""
self.predicted = talwani.gz(self.x, self.z, self.model)
if self.error > 0:
self.predicted = utils.contaminate(self.predicted, self.error)
def _update_data_plot(self):
"""
Update the predicted data plot in the *dataax*.
Adjusts the xlim of the axes to fit the data.
"""
self.predicted_line.set_ydata(self.predicted)
vmin = 1.2*min(self.predicted.min(), self.dmin)
vmax = 1.2*max(self.predicted.max(), self.dmax)
self.dataax.set_ylim(vmin, vmax)
self.dataax.grid(True)
self.canvas.draw()
def _draw_callback(self, value):
"""
Callback for the canvas.draw() event.
This is called everytime the figure is redrawn. Used to do some
clean up and tunning whenever this is called as well, like calling
``tight_layout``.
"""
self.figure.tight_layout()
def _set_error_callback(self, value):
"""
Callback when error slider is edited
"""
self.error = value
self._update_data()
self._update_data_plot()
def _set_density_callback(self, value):
"""
Callback when density slider is edited
"""
if self._ipoly is not None:
self.densities[self._ipoly] = value
self.polygons[self._ipoly].set_color(self._density2color(value))
self._update_data()
self._update_data_plot()
self.canvas.draw()
def _get_polygon_vertice_id(self, event):
"""
Find out which vertex of which polygon the event happened in.
If the click was inside a polygon (not on a vertex), identify that
polygon.
Returns:
* p, v : int, int
p: the index of the polygon the event happened in or None if
outside all polygons.
v: the index of the polygon vertex that was clicked or None if the
click was not on a vertex.
"""
distances = []
indices = []
for poly in self.polygons:
x, y = poly.get_transform().transform(poly.xy).T
d = numpy.sqrt((x - event.x)**2 + (y - event.y)**2)
distances.append(d.min())
indices.append(numpy.argmin(d))
p = numpy.argmin(distances)
if distances[p] >= self.epsilon:
# Check if the event was inside a polygon
x, y = event.x, event.y
p, v = None, None
for i, poly in enumerate(self.polygons):
if poly.contains_point([x, y]):
p = i
break
else:
v = indices[p]
last = len(self.polygons[p].xy) - 1
if v == 0 or v == last:
v = [0, last]
return p, v
def _button_press_callback(self, event):
"""
What actions to perform when a mouse button is clicked
"""
if event.inaxes != self.modelax:
return
if event.button == 1 and not self._drawing and self.polygons:
self._lastevent = event
for line, poly in zip(self.lines, self.polygons):
poly.set_animated(False)
line.set_animated(False)
line.set_color([0, 0, 0, 0])
self.canvas.draw()
# Find out if a click happened on a vertice
# and which vertice of which polygon
self._ipoly, self._ivert = self._get_polygon_vertice_id(event)
if self._ipoly is not None:
self.density_slider.set_val(self.densities[self._ipoly])
self.polygons[self._ipoly].set_animated(True)
self.lines[self._ipoly].set_animated(True)
self.lines[self._ipoly].set_color([0, 1, 0, 0])
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.modelax.bbox)
self.modelax.draw_artist(self.polygons[self._ipoly])
self.modelax.draw_artist(self.lines[self._ipoly])
self.canvas.blit(self.modelax.bbox)
elif self._drawing:
if event.button == 1:
self._xy.append([event.xdata, event.ydata])
self._drawing_plot.set_data(zip(*self._xy))
self.canvas.restore_region(self.background)
self.modelax.draw_artist(self._drawing_plot)
self.canvas.blit(self.modelax.bbox)
elif event.button == 3:
if len(self._xy) >= 3:
density = self.density_slider.val
poly, line = self._make_polygon(self._xy, density)
self.polygons.append(poly)
self.lines.append(line)
self.densities.append(density)
self.modelax.add_patch(poly)
self.modelax.add_line(line)
self._drawing_plot.remove()
self._drawing_plot = None
self._xy = None
self._drawing = False
self._ipoly = len(self.polygons) - 1
self.lines[self._ipoly].set_color([0, 1, 0, 0])
self.dataax.set_title(self.instructions)
self.canvas.draw()
self._update_data()
self._update_data_plot()
def _button_release_callback(self, event):
"""
Reset place markers on mouse button release
"""
if event.inaxes != self.modelax:
return
if event.button != 1:
return
if self._ivert is None and self._ipoly is None:
return
self.background = None
for line, poly in zip(self.lines, self.polygons):
poly.set_animated(False)
line.set_animated(False)
self.canvas.draw()
self._ivert = None
# self._ipoly is only released when clicking outside
# the polygons
self._lastevent = None
self._update_data()
self._update_data_plot()
def _key_press_callback(self, event):
"""
What to do when a key is pressed on the keyboard.
"""
if event.inaxes is None:
return
if event.key == 'd':
if self._drawing and self._xy:
self._xy.pop()
if self._xy:
self._drawing_plot.set_data(zip(*self._xy))
else:
self._drawing_plot.set_data([], [])
self.canvas.restore_region(self.background)
self.modelax.draw_artist(self._drawing_plot)
self.canvas.blit(self.modelax.bbox)
elif self._ivert is not None:
poly = self.polygons[self._ipoly]
line = self.lines[self._ipoly]
if len(poly.xy) > 4:
verts = numpy.atleast_1d(self._ivert)
poly.xy = numpy.array([xy for i, xy in enumerate(poly.xy)
if i not in verts])
line.set_data(zip(*poly.xy))
self._update_data()
self._update_data_plot()
self.canvas.restore_region(self.background)
self.modelax.draw_artist(poly)
self.modelax.draw_artist(line)
self.canvas.blit(self.modelax.bbox)
self._ivert = None
elif self._ipoly is not None:
self.polygons[self._ipoly].remove()
self.lines[self._ipoly].remove()
self.polygons.pop(self._ipoly)
self.lines.pop(self._ipoly)
self.densities.pop(self._ipoly)
self._ipoly = None
self.canvas.draw()
self._update_data()
self._update_data_plot()
elif event.key == 'n':
self._ivert = None
self._ipoly = None
for line, poly in zip(self.lines, self.polygons):
poly.set_animated(False)
line.set_animated(False)
line.set_color([0, 0, 0, 0])
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.modelax.bbox)
self._drawing = True
self._xy = []
self._drawing_plot = Line2D([], [], **self.line_args)
self._drawing_plot.set_animated(True)
self.modelax.add_line(self._drawing_plot)
self.dataax.set_title(' | '.join([
'left click: set vertice', 'right click: finish',
'esc: cancel']))
self.canvas.draw()
elif event.key == 'escape':
self._drawing = False
self._xy = []
if self._drawing_plot is not None:
self._drawing_plot.remove()
self._drawing_plot = None
for line, poly in zip(self.lines, self.polygons):
poly.set_animated(False)
line.set_animated(False)
line.set_color([0, 0, 0, 0])
self.canvas.draw()
def _mouse_move_callback(self, event):
"""
Handle things when the mouse move.
"""
if event.inaxes != self.modelax:
return
if event.button != 1:
return
if self._ivert is None and self._ipoly is None:
return
x, y = event.xdata, event.ydata
p = self._ipoly
v = self._ivert
if self._ivert is not None:
self.polygons[p].xy[v] = x, y
else:
dx = x - self._lastevent.xdata
dy = y - self._lastevent.ydata
self.polygons[p].xy[:, 0] += dx
self.polygons[p].xy[:, 1] += dy
self.lines[p].set_data(zip(*self.polygons[p].xy))
self._lastevent = event
self.canvas.restore_region(self.background)
self.modelax.draw_artist(self.polygons[p])
self.modelax.draw_artist(self.lines[p])
self.canvas.blit(self.modelax.bbox)
|
eusoubrasileiro/fatiando_seismic
|
fatiando/gravmag/interactive.py
|
Python
|
bsd-3-clause
| 23,916
|
[
"Gaussian"
] |
80365596eacbf6969839a35c0d457eaee137462c0b2fc34315e2fce3cf9f6b8f
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Single stack of transformations with no masking.
Produces output aligned with inputs.
Configurable using hyperparameters to use some combination of convolutions,
attention, mixtures of experts, etc.
A good problem for this model is languagemodel_wiki_scramble1k50 .
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import diet
from tensor2tensor.utils import expert_utils
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
ModeKeys = tf.estimator.ModeKeys # pylint: disable=invalid-name
def _should_preprocess(layer_type):
return layer_type not in ["timing", "pos_emb", "att_memory_efficient"]
def _should_postprocess(layer_type):
return layer_type not in ["timing", "pos_emb"]
@registry.register_model
class Aligned(t2t_model.T2TModel):
"""Attention net. See file docstring."""
@property
def use_body_sharded(self):
return True
def body_sharded(self, sharded_features):
# Remove dropout if not training
hparams = self._hparams
dp = self._data_parallelism
x = dp(tf.squeeze, sharded_features["inputs"], 2)
def preprocess(x):
return dp(common_layers.layer_preprocess, x, hparams)
def postprocess(x, y):
return dp(common_layers.layer_postprocess, x, y, hparams)
x = dp(tf.nn.dropout, x, 1.0 - hparams.layer_prepostprocess_dropout)
extra_loss = 0.0
ffn_hidden_sizes = [int(s) for s in hparams.ffn_hidden_sizes.split(",")]
moe_hidden_sizes = [int(s) for s in hparams.moe_hidden_sizes.split(",")]
if hparams.mask_right:
def _bias(x):
return common_attention.attention_bias_lower_triangle(
common_layers.shape_list(x)[1])
bias = dp(_bias, x)
else:
bias = tf.zeros([1, 1, 1, 1])
if hparams.diet_experts:
hsize, = moe_hidden_sizes
def _diet_expert(x):
return diet.diet_expert(x, hsize, diet.diet_adam_optimizer_params())
expert_fn = _diet_expert
else:
expert_fn = expert_utils.ffn_expert_fn(
hparams.hidden_size, moe_hidden_sizes, hparams.hidden_size)
batch_coordinate = dp(get_batch_coordinate, x)
layers = hparams.layers.strip(",").split(",")
for layer_num, layer_type in enumerate(layers):
with tf.variable_scope("%s_%d" % (layer_type, layer_num)):
if _should_preprocess(layer_type):
x = preprocess(x)
if layer_type == "timing":
y = dp(common_attention.add_timing_signal_nd, x)
elif layer_type == "pos_emb":
y = dp(
common_attention.add_positional_embedding_nd,
x,
hparams.max_length,
name="pos_emb")
elif layer_type == "att":
y = dp(
common_attention.multihead_attention,
x,
None,
bias, # bias
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout)
elif layer_type == "att_grouped":
multiplicative_overhead = (
hparams.multiplicative_overhead if hparams.mode == ModeKeys.TRAIN
else hparams.multiplicative_overhead_eval)
y, loss = dp(
common_attention.grouped_attention_multihead,
x,
x,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
num_groups=hparams.attention_num_groups,
memory_target_density=hparams.memory_target_density,
multiplicative_overhead=multiplicative_overhead,
make_image_summary=hparams.attention_image_summary,
mask_right=hparams.mask_right,
)
extra_loss += tf.add_n(loss) / dp.n
elif layer_type == "att_memory_efficient":
assert hparams.layer_preprocess_sequence == "n"
y = dp(common_attention.multihead_self_attention_memory_efficient, x,
bias, hparams.num_heads)
elif layer_type == "att_local":
y = dp(
common_attention.multihead_attention,
x,
None,
None, # bias
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=("local_mask_right"
if hparams.mask_right else "local_unmasked"),
block_length=hparams.local_attention_window,
block_width=hparams.local_attention_window)
elif layer_type == "att_pseudolocal":
# This is an inefficient implementation of local attention, for the
# purpose of testing model quality.
def _pseudolocal_bias(x):
return common_attention.attention_bias_local(
common_layers.shape_list(x)[1], hparams.local_attention_window,
0 if hparams.mask_right else hparams.local_attention_window)
pseudolocal_bias = dp(_pseudolocal_bias, x)
y = dp(common_attention.multihead_attention, x, None,
pseudolocal_bias, hparams.attention_key_channels or
hparams.hidden_size, hparams.attention_value_channels or
hparams.hidden_size, hparams.hidden_size, hparams.num_heads,
hparams.attention_dropout)
elif layer_type == "att_local_expert":
y, loss = dp(
common_attention.local_expert_attention,
x,
k=hparams.attention_moe_k,
loss_coef=hparams.attention_load_balance,
attention_num_experts=hparams.attention_num_experts,
train=hparams.mode == ModeKeys.TRAIN,
batch_coordinate=batch_coordinate,
mask_right=hparams.mask_right,
split_batch=bool(hparams.attention_split_batch),
attention_kq_size=hparams.attention_kq_size,
attention_v_size=hparams.attention_v_size)
# TODO(avaswani, epot, noam): Do we need to divide by num shards ?
extra_loss += tf.add_n(loss) / dp.n
elif layer_type == "att_lsh":
if hparams.lsh_truncated:
attention_fn = common_attention.multihead_attention_sparse_truncated
else:
attention_fn = common_attention.multihead_attention_sparse_dot_prod
y, loss = dp(
attention_fn,
x,
None,
None, # Bias is computed inside
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
# Additional parameters
bi=[
common_attention.BatchInfo(
coordinates=batch_coordinate[i],
order=None, # No future mask
) for i in range(dp.n)
],
use_map_fn=False,
experts_params=dict(nb_hyperplanes=4,))
extra_loss += tf.add_n(loss) / dp.n
elif layer_type == "moe":
y, loss = expert_utils.distributed_moe(
dp,
self._ps_devices,
x,
hparams.mode == ModeKeys.TRAIN,
input_size=hparams.hidden_size,
expert_fn=expert_fn,
num_experts=hparams.moe_num_experts,
k=hparams.moe_k,
loss_coef=hparams.moe_loss_coef)
extra_loss += loss
elif layer_type == "ffn":
y = dp(
expert_utils.ffn_expert_fn(hparams.hidden_size, ffn_hidden_sizes,
hparams.hidden_size),
dp(expert_utils.flatten_all_but_last, x))
y = dp(expert_utils.reshape_like, y, x)
elif layer_type == "conv":
y = dp(
common_layers.conv1d,
x,
hparams.hidden_size,
hparams.kernel_height,
activation=tf.nn.relu,
padding="SAME",
)
else:
assert False, "unknown sublayer %s" % layer_type
if _should_postprocess(layer_type):
x = postprocess(x, y)
else:
x = y
x = preprocess(x)
decoder_output = dp(tf.expand_dims, x, 2)
return decoder_output, extra_loss
def get_batch_coordinate(x):
"""Return a flat int32 tensor of shape [1, batch_size*length, 1]."""
# Compute the batch coordinate before flattening all batches
batch_coordinate = tf.expand_dims(
common_attention.coordinate_tensor(
common_layers.shape_list(x)[:-1], axis=0),
axis=-1)
return batch_coordinate
@registry.register_hparams
def aligned_base():
"""Set of hyperparameters.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps (10min): log(ppl)_eval = 2.60
12.0 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.00
Returns:
a hparams object
"""
hparams = common_hparams.basic_params1()
hparams.hidden_size = 512
hparams.batch_size = 5000
hparams.max_length = 0
hparams.min_length_bucket = 1024
hparams.dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.label_smoothing = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 2000
hparams.initializer_gain = 1.0
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.shared_embedding_and_softmax_weights = True
hparams.add_hparam("ffn_hidden_sizes", "2048") # Add new ones like this.
hparams.moe_num_experts = 32
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.add_hparam("layers", "timing," + "conv,att,ffn," * 2)
# attention-related flags
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("pos", "timing") # timing, none
# moe params. local attention moe.
hparams.add_hparam("attention_local", False)
hparams.add_hparam("attention_moe_k", 2)
hparams.add_hparam("attention_num_experts", 16)
hparams.add_hparam("attention_split_batch", False)
# Key, query and value dimensions for the attention
hparams.add_hparam("attention_kq_size", 128)
hparams.add_hparam("attention_v_size", 256)
# Loss coef for load balancing
hparams.add_hparam("attention_load_balance", 2e-2)
hparams.add_hparam("diet_experts", False)
hparams.add_hparam("memory_efficient_ffn", False)
hparams.add_hparam("local_attention_window", 128)
hparams.add_hparam("attention_num_groups", 8)
hparams.add_hparam("memory_target_density", 2.0)
hparams.add_hparam("multiplicative_overhead", 1.25)
hparams.add_hparam("multiplicative_overhead_eval", 2.0)
hparams.add_hparam("attention_image_summary", True)
# LSH params
hparams.add_hparam("lsh_truncated", True)
# For testing right-masking.
# This is not implemented in all layers.
hparams.add_hparam("mask_right", False)
return hparams
@registry.register_hparams
def aligned_memory_efficient():
"""Use multihead_self_attention_memory_efficient.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.59
8.7 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.02
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "timing," + "conv,att_memory_efficient,ffn," * 2
return hparams
@registry.register_hparams
def aligned_local_expert():
"""Use local_expert_attention.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.72
10.2 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.27
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "timing," + "conv,att_local_expert,ffn," * 2
return hparams
@registry.register_hparams
def aligned_grouped():
"""Use local_expert_attention.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.63
10.2 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.04
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "timing," + "conv,att_grouped,ffn," * 2
return hparams
@registry.register_hparams
def aligned_local():
"""Use local attention code.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.57
12.8 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.08
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "timing," + "conv,att_local,ffn," * 2
return hparams
@registry.register_hparams
def aligned_local_1k():
"""Use local attention code, attend to full sequence.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.57
7.5 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.00
Returns:
a hparams object
"""
hparams = aligned_local()
hparams.local_attention_window = 1024
return hparams
@registry.register_hparams
def aligned_pseudolocal():
"""Use a bias to simulate local attention. attention radius 128.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.57
12.0 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.06
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "timing," + "conv,att_pseudolocal,ffn," * 2
return hparams
@registry.register_hparams
def aligned_pseudolocal_256():
"""Use a bias to simulate local attention. attentio radius 256.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.56
12.0 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.05
Returns:
a hparams object
"""
hparams = aligned_pseudolocal()
hparams.local_attention_window = 256
return hparams
@registry.register_hparams
def aligned_no_timing():
"""No timing signal.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.75
12.3 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.39
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "conv,att,ffn," * 2
return hparams
@registry.register_hparams
def aligned_no_att():
"""No attention at all.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.89
20.8 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.70
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "conv,ffn," * 2
return hparams
@registry.register_hparams
def aligned_pos_emb():
"""positional embedding insead of timing signal.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.67
12.1 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.00
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "pos_emb," + "conv,att,ffn," * 2
return hparams
@registry.register_hparams
def aligned_moe():
"""mixture of experts instead of ffn.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.62
6.7 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 1.94
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "timing," + "conv,att,moe," * 2
return hparams
@registry.register_hparams
def aligned_lsh():
"""Use multihead_attention_sparse_dot_prod.
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "timing," + "conv,att_lsh,ffn," * 2
return hparams
@registry.register_hparams
def aligned_8k():
"""version for languagemodel_wiki_scramble8k50.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.93
1.5 steps/sec on P100
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.batch_size = 8192
return hparams
@registry.register_hparams
def aligned_8k_grouped():
"""version for languagemodel_wiki_scramble8k50.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.92
3.3 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.15
Returns:
a hparams object
"""
hparams = aligned_grouped()
hparams.batch_size = 8192
# hparams.attention_image_summary = False
hparams.num_groups = 16
hparams.multiplicative_overhead = 1.1
return hparams
|
rsepassi/tensor2tensor
|
tensor2tensor/models/research/aligned.py
|
Python
|
apache-2.0
| 17,850
|
[
"MOE"
] |
73aa8c5ccdef802b513942d12fd02911b0bd2b27e3fdb0a8fc4f64ae46c9d5c7
|
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from __future__ import print_function
__author__ = 'Brian Wickman'
import os
import tempfile
from twitter.common.python.pex_builder import PEXBuilder
from twitter.pants.base import Config
from twitter.pants.targets import PythonBinary
from twitter.pants.python.python_chroot import PythonChroot
class PythonBinaryBuilder(object):
class NotABinaryTargetException(Exception): pass
def __init__(self, target, args, root_dir):
self.target = target
if not isinstance(target, PythonBinary):
raise PythonBinaryBuilder.NotABinaryTargetException(
"Target %s is not a PythonBinary!" % target)
config = Config.load()
self.distdir = config.getdefault('pants_distdir')
distpath = tempfile.mktemp(dir=self.distdir, prefix=target.name)
self.builder = PEXBuilder(distpath)
# configure builder PexInfo options
for repo in target._repositories:
self.builder.info().add_repository(repo)
for index in target._indices:
self.builder.info().add_index(index)
self.builder.info().allow_pypi = target._allow_pypi
self.builder.info().zip_safe = target._zip_safe
self.builder.info().inherit_path = target._inherit_path
self.builder.info().entry_point = target._entry_point
self.builder.info().ignore_errors = target._ignore_errors
self.chroot = PythonChroot(target, root_dir, builder=self.builder)
def run(self):
print('Building PythonBinary %s:' % self.target)
env = self.chroot.dump()
filename = os.path.join(self.distdir, '%s.pex' % self.target.name)
env.build(filename)
print('Wrote %s' % filename)
return 0
|
foursquare/commons-old
|
src/python/twitter/pants/python/binary_builder.py
|
Python
|
apache-2.0
| 2,509
|
[
"Brian"
] |
1775a4e98ad1385624c8187176e175b332cc9d08516e6b560f687e8fd4b488c0
|
import os, shutil, math
from datetime import datetime
from splinter import Browser
from itertools import combinations
from webmon.utils import _clean_filename, _rmsdiff
URLS = [
'http://www.mc706.com',
'http://www.google.com',
'http://www.reddit.com',
'http://www.msn.com',
]
PHOTOS_DIR = 'screenshots'
SAMPLES = 10
def directory_stdev(directory):
"""Calculates the average and standard deviation of images in a directory"""
files = os.listdir(directory)[-SAMPLES:-1]
rms = []
pairs = combinations(files, 2)
for pair in pairs:
rms.append(_rmsdiff(os.path.join(directory, pair[0]), os.path.join(directory, pair[1])))
return mean(rms), pstdev(rms)
def run():
created = []
changed = []
today = str(datetime.now())
photos_dir = os.path.join(os.getcwd(), PHOTOS_DIR)
print "Taking Screenshots"
with Browser() as browser:
for url in tqdm(URLS):
browser.visit(url)
screenshot = browser.screenshot('screenshot.png')
if screenshot:
dest = os.path.join(photos_dir, _clean_filename(url))
if not os.path.exists(dest):
os.makedirs(dest)
name = os.path.join(dest, 'screenshot-{0}.png'.format(today))
created.append(name)
shutil.move(screenshot, name)
print "Calculating Differences"
for directory in tqdm(os.listdir(photos_dir)):
if os.path.isdir(os.path.join(photos_dir, directory)):
for image in created:
if directory in image:
active = image
if active:
files = os.listdir(os.path.join(photos_dir, directory))
mean, std = directory_stdev(os.path.join(photos_dir, directory))
current = _rmsdiff(os.path.join(photos_dir, directory, files[-1]), active)
if current < mean - std or current > mean + std:
changed.append((directory, current, mean, std))
if changed:
for change in changed:
print "{0} has changed by more than the average, {1} !~ {2}+/-{3}".format(*change)
else:
print "None of the websites have changed"
|
mc706/webmon
|
webmon/main.py
|
Python
|
mit
| 2,220
|
[
"VisIt"
] |
1c7be3eefba9616c1f70fcd4bcb043735e11e7ad76a44a49a4a0eb3bcc4f753e
|
# Copyright (C) 2009 by Eric Talevich (eric.talevich@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""PhyloXML reader/parser, writer, and associated functions.
Instantiates tree elements from a parsed PhyloXML file, and constructs an XML
file from a `Bio.Phylo.PhyloXML` object.
About capitalization:
- phyloXML means the file format specification
- PhyloXML means the Biopython module `Bio.Phylo.PhyloXML` and its classes
- Phyloxml means the top-level class used by `PhyloXMLIO.read` (but not
`Bio.Phylo.read`!), containing a list of Phylogenies (objects derived from
`BaseTree.Tree`)
"""
__docformat__ = "restructuredtext en"
import sys
# Add path to Bio
sys.path.append('../..')
#import sys
from Bio._py3k import basestring
from Bio._py3k import unicode
from Bio.Phylo import PhyloXML as PX
# For speed try to use cElementTree rather than ElementTree
try:
if (3, 0) <= sys.version_info[:2] <= (3, 1):
# Workaround for bug in python 3.0 and 3.1,
# see http://bugs.python.org/issue9257
from xml.etree import ElementTree as ElementTree
else:
from xml.etree import cElementTree as ElementTree
except ImportError:
from xml.etree import ElementTree as ElementTree
# Recognize the phyloXML namespace when parsing
# See http://effbot.org/zone/element-namespaces.htm
NAMESPACES = {
'phy': 'http://www.phyloxml.org',
}
try:
register_namespace = ElementTree.register_namespace
except AttributeError:
if not hasattr(ElementTree, '_namespace_map'):
# cElementTree needs the pure-Python xml.etree.ElementTree
from xml.etree import ElementTree as ET_py
ElementTree._namespace_map = ET_py._namespace_map
def register_namespace(prefix, uri):
ElementTree._namespace_map[uri] = prefix
for prefix, uri in NAMESPACES.items():
register_namespace(prefix, uri)
class PhyloXMLError(Exception):
"""Exception raised when PhyloXML object construction cannot continue.
XML syntax errors will be found and raised by the underlying ElementTree
module; this exception is for valid XML that breaks the phyloXML
specification.
"""
pass
# ---------------------------------------------------------
# Public API
def read(file):
"""Parse a phyloXML file or stream and build a tree of Biopython objects.
The children of the root node are phylogenies and possibly other arbitrary
(non-phyloXML) objects.
:returns: a single `Bio.Phylo.PhyloXML.Phyloxml` object.
"""
return Parser(file).read()
def parse(file):
"""Iterate over the phylogenetic trees in a phyloXML file.
This ignores any additional data stored at the top level, but may be more
memory-efficient than the `read` function.
:returns: a generator of `Bio.Phylo.PhyloXML.Phylogeny` objects.
"""
return Parser(file).parse()
def write(obj, file, encoding='utf-8', indent=True):
"""Write a phyloXML file.
:Parameters:
obj
an instance of `Phyloxml`, `Phylogeny` or `BaseTree.Tree`, or an
iterable of either of the latter two. The object will be converted
to a Phyloxml object before serialization.
file
either an open handle or a file name.
"""
def fix_single(tree):
if isinstance(tree, PX.Phylogeny):
return tree
if isinstance(tree, PX.Clade):
return tree.to_phylogeny()
if isinstance(tree, PX.BaseTree.Tree):
return PX.Phylogeny.from_tree(tree)
if isinstance(tree, PX.BaseTree.Clade):
return PX.Phylogeny.from_tree(PX.BaseTree.Tree(root=tree))
else:
raise ValueError("iterable must contain Tree or Clade types")
if isinstance(obj, PX.Phyloxml):
pass
elif (isinstance(obj, PX.BaseTree.Tree) or
isinstance(obj, PX.BaseTree.Clade)):
obj = fix_single(obj).to_phyloxml()
elif hasattr(obj, '__iter__'):
obj = PX.Phyloxml({}, phylogenies=(fix_single(t) for t in obj))
else:
raise ValueError("First argument must be a Phyloxml, Phylogeny, "
"Tree, or iterable of Trees or Phylogenies.")
return Writer(obj).write(file, encoding=encoding, indent=indent)
# ---------------------------------------------------------
# Functions I wish ElementTree had
def _local(tag):
"""Extract the local tag from a namespaced tag name."""
if tag[0] == '{':
return tag[tag.index('}') + 1:]
return tag
def _split_namespace(tag):
"""Split a tag into namespace and local tag strings."""
try:
return tag[1:].split('}', 1)
except:
return ('', tag)
def _ns(tag, namespace=NAMESPACES['phy']):
"""Format an XML tag with the given namespace."""
return '{%s}%s' % (namespace, tag)
def _get_child_as(parent, tag, construct):
"""Find a child node by tag, and pass it through a constructor.
Returns None if no matching child is found.
"""
child = parent.find(_ns(tag))
if child is not None:
return construct(child)
def _get_child_text(parent, tag, construct=unicode):
"""Find a child node by tag; pass its text through a constructor.
Returns None if no matching child is found.
"""
child = parent.find(_ns(tag))
if child is not None and child.text:
return construct(child.text)
def _get_children_as(parent, tag, construct):
"""Find child nodes by tag; pass each through a constructor.
Returns an empty list if no matching child is found.
"""
return [construct(child) for child in
parent.findall(_ns(tag))]
def _get_children_text(parent, tag, construct=unicode):
"""Find child nodes by tag; pass each node's text through a constructor.
Returns an empty list if no matching child is found.
"""
return [construct(child.text) for child in
parent.findall(_ns(tag))
if child.text]
def _indent(elem, level=0):
"""Add line breaks and indentation to ElementTree in-place.
Sources:
- http://effbot.org/zone/element-lib.htm#prettyprint
- http://infix.se/2007/02/06/gentlemen-indent-your-xml
"""
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for e in elem:
_indent(e, level + 1)
if not e.tail or not e.tail.strip():
e.tail = i + " "
if not e.tail or not e.tail.strip():
e.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
# ---------------------------------------------------------
# INPUT
# ---------------------------------------------------------
def _str2bool(text):
if text == 'true' or text == '1':
return True
if text == 'false' or text == '0':
return False
raise ValueError('String could not be converted to boolean: ' + text)
def _dict_str2bool(dct, keys):
out = dct.copy()
for key in keys:
if key in out:
out[key] = _str2bool(out[key])
return out
def _int(text):
if text is not None:
try:
return int(text)
except Exception:
return None
def _float(text):
if text is not None:
try:
return float(text)
except Exception:
return None
def _collapse_wspace(text):
"""Replace all spans of whitespace with a single space character.
Also remove leading and trailing whitespace. See "Collapse Whitespace
Policy" in the phyloXML spec glossary:
http://phyloxml.org/documentation/version_100/phyloxml.xsd.html#Glossary
"""
if text is not None:
return ' '.join(text.split())
# NB: Not currently used
def _replace_wspace(text):
"""Replace tab, LF and CR characters with spaces, but don't collapse.
See "Replace Whitespace Policy" in the phyloXML spec glossary:
http://phyloxml.org/documentation/version_100/phyloxml.xsd.html#Glossary
"""
for char in ('\t', '\n', '\r'):
if char in text:
text = text.replace(char, ' ')
return text
class Parser(object):
"""Methods for parsing all phyloXML nodes from an XML stream.
To minimize memory use, the tree of ElementTree parsing events is cleared
after completing each phylogeny, clade, and top-level 'other' element.
Elements below the clade level are kept in memory until parsing of the
current clade is finished -- this shouldn't be a problem because clade is
the only recursive element, and non-clade nodes below this level are of
bounded size.
"""
def __init__(self, file):
# Get an iterable context for XML parsing events
context = iter(ElementTree.iterparse(file, events=('start', 'end')))
event, root = next(context)
self.root = root
self.context = context
def read(self):
"""Parse the phyloXML file and create a single Phyloxml object."""
phyloxml = PX.Phyloxml(dict((_local(key), val)
for key, val in self.root.items()))
other_depth = 0
for event, elem in self.context:
namespace, localtag = _split_namespace(elem.tag)
if event == 'start':
if namespace != NAMESPACES['phy']:
other_depth += 1
continue
if localtag == 'phylogeny':
phylogeny = self._parse_phylogeny(elem)
phyloxml.phylogenies.append(phylogeny)
if event == 'end' and namespace != NAMESPACES['phy']:
# Deal with items not specified by phyloXML
other_depth -= 1
if other_depth == 0:
# We're directly under the root node -- evaluate
otr = self.other(elem, namespace, localtag)
phyloxml.other.append(otr)
self.root.clear()
return phyloxml
def parse(self):
"""Parse the phyloXML file incrementally and return each phylogeny."""
phytag = _ns('phylogeny')
for event, elem in self.context:
if event == 'start' and elem.tag == phytag:
yield self._parse_phylogeny(elem)
# Special parsing cases -- incremental, using self.context
def _parse_phylogeny(self, parent):
"""Parse a single phylogeny within the phyloXML tree.
Recursively builds a phylogenetic tree with help from parse_clade, then
clears the XML event history for the phylogeny element and returns
control to the top-level parsing function.
"""
phylogeny = PX.Phylogeny(**_dict_str2bool(parent.attrib,
['rooted', 'rerootable']))
list_types = {
# XML tag, plural attribute
'confidence': 'confidences',
'property': 'properties',
'clade_relation': 'clade_relations',
'sequence_relation': 'sequence_relations',
}
for event, elem in self.context:
namespace, tag = _split_namespace(elem.tag)
if event == 'start' and tag == 'clade':
assert phylogeny.root is None, \
"Phylogeny object should only have 1 clade"
phylogeny.root = self._parse_clade(elem)
continue
if event == 'end':
if tag == 'phylogeny':
parent.clear()
break
# Handle the other non-recursive children
if tag in list_types:
getattr(phylogeny, list_types[tag]).append(
getattr(self, tag)(elem))
# Complex types
elif tag in ('date', 'id'):
setattr(phylogeny, tag, getattr(self, tag)(elem))
# Simple types
elif tag in ('name', 'description'):
setattr(phylogeny, tag, _collapse_wspace(elem.text))
# Unknown tags
elif namespace != NAMESPACES['phy']:
phylogeny.other.append(self.other(elem, namespace, tag))
parent.clear()
else:
# NB: This shouldn't happen in valid files
raise PhyloXMLError('Misidentified tag: ' + tag)
return phylogeny
_clade_complex_types = ['color', 'events', 'binary_characters', 'date']
_clade_list_types = {
'confidence': 'confidences',
'distribution': 'distributions',
'reference': 'references',
'property': 'properties',
}
_clade_tracked_tags = set(_clade_complex_types).union(_clade_list_types.keys()).union(
['branch_length', 'name', 'node_id', 'width'])
def _parse_clade(self, parent):
"""Parse a Clade node and its children, recursively."""
clade = PX.Clade(**parent.attrib)
if clade.branch_length is not None:
clade.branch_length = float(clade.branch_length)
# NB: Only evaluate nodes at the current level
tag_stack = []
for event, elem in self.context:
namespace, tag = _split_namespace(elem.tag)
if event == 'start':
if tag == 'clade':
clade.clades.append(self._parse_clade(elem))
continue
if tag == 'taxonomy':
clade.taxonomies.append(self._parse_taxonomy(elem))
continue
if tag == 'sequence':
clade.sequences.append(self._parse_sequence(elem))
continue
if tag in self._clade_tracked_tags:
tag_stack.append(tag)
if event == 'end':
if tag == 'clade':
elem.clear()
break
if tag != tag_stack[-1]:
continue
tag_stack.pop()
# Handle the other non-recursive children
if tag in self._clade_list_types:
getattr(clade, self._clade_list_types[tag]).append(
getattr(self, tag)(elem))
elif tag in self._clade_complex_types:
setattr(clade, tag, getattr(self, tag)(elem))
elif tag == 'branch_length':
# NB: possible collision with the attribute
if clade.branch_length is not None:
raise PhyloXMLError(
'Attribute branch_length was already set '
'for this Clade.')
clade.branch_length = _float(elem.text)
elif tag == 'width':
clade.width = _float(elem.text)
elif tag == 'name':
clade.name = _collapse_wspace(elem.text)
elif tag == 'node_id':
clade.node_id = PX.Id(elem.text.strip(),
elem.attrib.get('provider'))
elif namespace != NAMESPACES['phy']:
clade.other.append(self.other(elem, namespace, tag))
elem.clear()
else:
raise PhyloXMLError('Misidentified tag: ' + tag)
return clade
def _parse_sequence(self, parent):
sequence = PX.Sequence(**parent.attrib)
for event, elem in self.context:
namespace, tag = _split_namespace(elem.tag)
if event == 'end':
if tag == 'sequence':
parent.clear()
break
if tag in ('accession', 'mol_seq', 'uri',
'domain_architecture'):
setattr(sequence, tag, getattr(self, tag)(elem))
elif tag == 'annotation':
sequence.annotations.append(self.annotation(elem))
elif tag == 'name':
sequence.name = _collapse_wspace(elem.text)
elif tag in ('symbol', 'location'):
setattr(sequence, tag, elem.text)
elif namespace != NAMESPACES['phy']:
sequence.other.append(self.other(elem, namespace, tag))
parent.clear()
return sequence
def _parse_taxonomy(self, parent):
taxonomy = PX.Taxonomy(**parent.attrib)
for event, elem in self.context:
namespace, tag = _split_namespace(elem.tag)
if event == 'end':
if tag == 'taxonomy':
parent.clear()
break
if tag in ('id', 'uri'):
setattr(taxonomy, tag, getattr(self, tag)(elem))
elif tag == 'common_name':
taxonomy.common_names.append(_collapse_wspace(elem.text))
elif tag == 'synonym':
taxonomy.synonyms.append(elem.text)
elif tag in ('code', 'scientific_name', 'authority', 'rank'):
# ENH: check_str on rank
setattr(taxonomy, tag, elem.text)
elif namespace != NAMESPACES['phy']:
taxonomy.other.append(self.other(elem, namespace, tag))
parent.clear()
return taxonomy
def other(self, elem, namespace, localtag):
return PX.Other(localtag, namespace, elem.attrib,
value=elem.text and elem.text.strip() or None,
children=[self.other(child, *_split_namespace(child.tag))
for child in elem])
# Complex types
def accession(self, elem):
return PX.Accession(elem.text.strip(), elem.get('source'))
def annotation(self, elem):
return PX.Annotation(
desc=_collapse_wspace(_get_child_text(elem, 'desc')),
confidence=_get_child_as(elem, 'confidence', self.confidence),
properties=_get_children_as(elem, 'property', self.property),
uri=_get_child_as(elem, 'uri', self.uri),
**elem.attrib)
def binary_characters(self, elem):
def bc_getter(elem):
return _get_children_text(elem, 'bc')
return PX.BinaryCharacters(
type=elem.get('type'),
gained_count=_int(elem.get('gained_count')),
lost_count=_int(elem.get('lost_count')),
present_count=_int(elem.get('present_count')),
absent_count=_int(elem.get('absent_count')),
# Flatten BinaryCharacterList sub-nodes into lists of strings
gained=_get_child_as(elem, 'gained', bc_getter),
lost=_get_child_as(elem, 'lost', bc_getter),
present=_get_child_as(elem, 'present', bc_getter),
absent=_get_child_as(elem, 'absent', bc_getter))
def clade_relation(self, elem):
return PX.CladeRelation(
elem.get('type'), elem.get('id_ref_0'), elem.get('id_ref_1'),
distance=elem.get('distance'),
confidence=_get_child_as(elem, 'confidence', self.confidence))
def color(self, elem):
red, green, blue = (_get_child_text(elem, color, int) for color in
('red', 'green', 'blue'))
return PX.BranchColor(red, green, blue)
def confidence(self, elem):
return PX.Confidence(
_float(elem.text),
elem.get('type'))
def date(self, elem):
return PX.Date(
unit=elem.get('unit'),
desc=_collapse_wspace(_get_child_text(elem, 'desc')),
value=_get_child_text(elem, 'value', float),
minimum=_get_child_text(elem, 'minimum', float),
maximum=_get_child_text(elem, 'maximum', float),
)
def distribution(self, elem):
return PX.Distribution(
desc=_collapse_wspace(_get_child_text(elem, 'desc')),
points=_get_children_as(elem, 'point', self.point),
polygons=_get_children_as(elem, 'polygon', self.polygon))
def domain(self, elem):
return PX.ProteinDomain(elem.text.strip(),
int(elem.get('from')) - 1,
int(elem.get('to')),
confidence=_float(elem.get('confidence')),
id=elem.get('id'))
def domain_architecture(self, elem):
return PX.DomainArchitecture(
length=int(elem.get('length')),
domains=_get_children_as(elem, 'domain', self.domain))
def events(self, elem):
return PX.Events(
type=_get_child_text(elem, 'type'),
duplications=_get_child_text(elem, 'duplications', int),
speciations=_get_child_text(elem, 'speciations', int),
losses=_get_child_text(elem, 'losses', int),
confidence=_get_child_as(elem, 'confidence', self.confidence))
def id(self, elem):
provider = elem.get('provider') or elem.get('type')
return PX.Id(elem.text.strip(), provider)
def mol_seq(self, elem):
is_aligned = elem.get('is_aligned')
if is_aligned is not None:
is_aligned = _str2bool(is_aligned)
return PX.MolSeq(elem.text.strip(), is_aligned=is_aligned)
def point(self, elem):
return PX.Point(
elem.get('geodetic_datum'),
_get_child_text(elem, 'lat', float),
_get_child_text(elem, 'long', float),
alt=_get_child_text(elem, 'alt', float),
alt_unit=elem.get('alt_unit'))
def polygon(self, elem):
return PX.Polygon(
points=_get_children_as(elem, 'point', self.point))
def property(self, elem):
return PX.Property(elem.text.strip(),
elem.get('ref'),
elem.get('applies_to'),
elem.get('datatype'),
unit=elem.get('unit'),
id_ref=elem.get('id_ref'))
def reference(self, elem):
return PX.Reference(
doi=elem.get('doi'),
desc=_get_child_text(elem, 'desc'))
def sequence_relation(self, elem):
return PX.SequenceRelation(
elem.get('type'), elem.get('id_ref_0'), elem.get('id_ref_1'),
distance=_float(elem.get('distance')),
confidence=_get_child_as(elem, 'confidence', self.confidence))
def uri(self, elem):
return PX.Uri(elem.text.strip(),
desc=_collapse_wspace(elem.get('desc')),
type=elem.get('type'))
# ---------------------------------------------------------
# OUTPUT
# ---------------------------------------------------------
def _serialize(value):
"""Convert a Python primitive to a phyloXML-compatible Unicode string."""
if isinstance(value, float):
return unicode(value).upper()
elif isinstance(value, bool):
return unicode(value).lower()
return unicode(value)
def _clean_attrib(obj, attrs):
"""Create a dictionary from an object's specified, non-None attributes."""
out = {}
for key in attrs:
val = getattr(obj, key)
if val is not None:
out[key] = _serialize(val)
return out
def _handle_complex(tag, attribs, subnodes, has_text=False):
def wrapped(self, obj):
elem = ElementTree.Element(tag, _clean_attrib(obj, attribs))
for subn in subnodes:
if isinstance(subn, basestring):
# singular object: method and attribute names are the same
if getattr(obj, subn) is not None:
elem.append(getattr(self, subn)(getattr(obj, subn)))
else:
# list: singular method, pluralized attribute name
method, plural = subn
for item in getattr(obj, plural):
elem.append(getattr(self, method)(item))
if has_text:
elem.text = _serialize(obj.value)
return elem
wrapped.__doc__ = "Serialize a %s and its subnodes, in order." % tag
return wrapped
def _handle_simple(tag):
def wrapped(self, obj):
elem = ElementTree.Element(tag)
elem.text = _serialize(obj)
return elem
wrapped.__doc__ = "Serialize a simple %s node." % tag
return wrapped
class Writer(object):
"""Methods for serializing a PhyloXML object to XML."""
def __init__(self, phyloxml):
"""Build an ElementTree from a PhyloXML object."""
assert isinstance(phyloxml, PX.Phyloxml), "Not a Phyloxml object"
self._tree = ElementTree.ElementTree(self.phyloxml(phyloxml))
def write(self, file, encoding='utf-8', indent=True):
if indent:
_indent(self._tree.getroot())
self._tree.write(file, encoding)
return len(self._tree.getroot())
# Convert classes to ETree elements
def phyloxml(self, obj):
elem = ElementTree.Element('phyloxml', obj.attributes) # Namespaces
for tree in obj.phylogenies:
elem.append(self.phylogeny(tree))
for otr in obj.other:
elem.append(self.other(otr))
return elem
def other(self, obj):
elem = ElementTree.Element(_ns(obj.tag, obj.namespace), obj.attributes)
elem.text = obj.value
for child in obj.children:
elem.append(self.other(child))
return elem
phylogeny = _handle_complex('phylogeny',
('rooted', 'rerootable',
'branch_length_unit', 'type'),
('name',
'id',
'description',
'date',
('confidence', 'confidences'),
'clade',
('clade_relation', 'clade_relations'),
('sequence_relation',
'sequence_relations'),
('property', 'properties'),
('other', 'other'),
))
clade = _handle_complex('clade', ('id_source',),
('name',
'branch_length',
('confidence', 'confidences'),
'width',
'color',
'node_id',
('taxonomy', 'taxonomies'),
('sequence', 'sequences'),
'events',
'binary_characters',
('distribution', 'distributions'),
'date',
('reference', 'references'),
('property', 'properties'),
('clade', 'clades'),
('other', 'other'),
))
accession = _handle_complex('accession', ('source',),
(), has_text=True)
annotation = _handle_complex('annotation',
('ref', 'source', 'evidence', 'type'),
('desc',
'confidence',
('property', 'properties'),
'uri',
))
def binary_characters(self, obj):
"""Serialize a binary_characters node and its subnodes."""
elem = ElementTree.Element('binary_characters',
_clean_attrib(obj,
('type', 'gained_count', 'lost_count',
'present_count', 'absent_count')))
for subn in ('gained', 'lost', 'present', 'absent'):
subelem = ElementTree.Element(subn)
for token in getattr(obj, subn):
subelem.append(self.bc(token))
elem.append(subelem)
return elem
clade_relation = _handle_complex('clade_relation',
('id_ref_0', 'id_ref_1',
'distance', 'type'),
('confidence',))
color = _handle_complex('color', (), ('red', 'green', 'blue'))
confidence = _handle_complex('confidence', ('type',),
(), has_text=True)
date = _handle_complex('date', ('unit',),
('desc', 'value', 'minimum', 'maximum'))
distribution = _handle_complex('distribution', (),
('desc',
('point', 'points'),
('polygon', 'polygons'),
))
def domain(self, obj):
"""Serialize a domain node."""
elem = ElementTree.Element('domain',
{'from': str(obj.start + 1), 'to': str(obj.end)})
if obj.confidence is not None:
elem.set('confidence', _serialize(obj.confidence))
if obj.id is not None:
elem.set('id', obj.id)
elem.text = _serialize(obj.value)
return elem
domain_architecture = _handle_complex('domain_architecture',
('length',),
(('domain', 'domains'),))
events = _handle_complex('events', (),
('type',
'duplications',
'speciations',
'losses',
'confidence',
))
id = _handle_complex('id', ('provider',), (), has_text=True)
mol_seq = _handle_complex('mol_seq', ('is_aligned',),
(), has_text=True)
node_id = _handle_complex('node_id', ('provider',), (), has_text=True)
point = _handle_complex('point', ('geodetic_datum', 'alt_unit'),
('lat', 'long', 'alt'))
polygon = _handle_complex('polygon', (), (('point', 'points'),))
property = _handle_complex('property',
('ref', 'unit', 'datatype',
'applies_to', 'id_ref'),
(), has_text=True)
reference = _handle_complex('reference', ('doi',), ('desc',))
sequence = _handle_complex('sequence',
('type', 'id_ref', 'id_source'),
('symbol',
'accession',
'name',
'location',
'mol_seq',
'uri',
('annotation', 'annotations'),
'domain_architecture',
('other', 'other'),
))
sequence_relation = _handle_complex('sequence_relation',
('id_ref_0', 'id_ref_1',
'distance', 'type'),
('confidence',))
taxonomy = _handle_complex('taxonomy',
('id_source',),
('id',
'code',
'scientific_name',
'authority',
('common_name', 'common_names'),
('synonym', 'synonyms'),
'rank',
'uri',
('other', 'other'),
))
uri = _handle_complex('uri', ('desc', 'type'), (), has_text=True)
# Primitive types
# Floating point
alt = _handle_simple('alt')
branch_length = _handle_simple('branch_length')
lat = _handle_simple('lat')
long = _handle_simple('long')
maximum = _handle_simple('maximum')
minimum = _handle_simple('minimum')
value = _handle_simple('value')
width = _handle_simple('width')
# Integers
blue = _handle_simple('blue')
duplications = _handle_simple('duplications')
green = _handle_simple('green')
losses = _handle_simple('losses')
red = _handle_simple('red')
speciations = _handle_simple('speciations')
# Strings
bc = _handle_simple('bc')
code = _handle_simple('code')
common_name = _handle_simple('common_name')
desc = _handle_simple('desc')
description = _handle_simple('description')
location = _handle_simple('location')
name = _handle_simple('name')
rank = _handle_simple('rank')
scientific_name = _handle_simple('scientific_name')
symbol = _handle_simple('symbol')
synonym = _handle_simple('synonym')
type = _handle_simple('type')
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/Phylo/PhyloXMLIO.py
|
Python
|
gpl-2.0
| 33,288
|
[
"Biopython"
] |
9c166518cd6fe2ebff150589547a5deead5f6445d57065b5006b17029bbd0d0d
|
#-*- coding: utf-8 -*-
# module pyparsing.py
#
# Copyright (c) 2003-2019 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
=============================================================================
The pyparsing module is an alternative approach to creating and
executing simple grammars, vs. the traditional lex/yacc approach, or the
use of regular expressions. With pyparsing, you don't need to learn
a new syntax for defining grammars or matching expressions - the parsing
module provides a library of classes that you use to construct the
grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
``"<salutation>, <addressee>!"``), built up using :class:`Word`,
:class:`Literal`, and :class:`And` elements
(the :class:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,
and the strings are auto-converted to :class:`Literal` expressions)::
from pip._vendor.pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the
self-explanatory class names, and the use of '+', '|' and '^' operators.
The :class:`ParseResults` object returned from
:class:`ParserElement.parseString` can be
accessed as a nested list, a dictionary, or an object with named
attributes.
The pyparsing module handles some of the problems that are typically
vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle
"Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
Getting Started -
-----------------
Visit the classes :class:`ParserElement` and :class:`ParseResults` to
see the base classes that most other pyparsing
classes inherit from. Use the docstrings for examples of how to:
- construct literal match expressions from :class:`Literal` and
:class:`CaselessLiteral` classes
- construct character word-group expressions using the :class:`Word`
class
- see how to create repetitive expressions using :class:`ZeroOrMore`
and :class:`OneOrMore` classes
- use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,
and :class:`'&'<Each>` operators to combine simple expressions into
more complex ones
- associate names with your parsed results using
:class:`ParserElement.setResultsName`
- find some helpful expression short-cuts like :class:`delimitedList`
and :class:`oneOf`
- find more useful common expressions in the :class:`pyparsing_common`
namespace class
"""
__version__ = "2.4.0"
__versionTime__ = "07 Apr 2019 18:28 UTC"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
import pprint
import traceback
import types
from datetime import datetime
try:
# Python 3
from itertools import filterfalse
except ImportError:
from itertools import ifilterfalse as filterfalse
try:
from _thread import RLock
except ImportError:
from threading import RLock
try:
# Python 3
from collections.abc import Iterable
from collections.abc import MutableMapping
except ImportError:
# Python 2.7
from collections import Iterable
from collections import MutableMapping
try:
from collections import OrderedDict as _OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict as _OrderedDict
except ImportError:
_OrderedDict = None
try:
from types import SimpleNamespace
except ImportError:
class SimpleNamespace: pass
# version compatibility configuration
__compat__ = SimpleNamespace()
__compat__.__doc__ = """
A cross-version compatibility configuration for pyparsing features that will be
released in a future version. By setting values in this configuration to True,
those features can be enabled in prior versions for compatibility development
and testing.
- collect_all_And_tokens - flag to enable fix for Issue #63 that fixes erroneous grouping
of results names when an And expression is nested within an Or or MatchFirst; set to
True to enable bugfix to be released in pyparsing 2.4
"""
__compat__.collect_all_And_tokens = True
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [ '__version__', '__versionTime__', '__author__', '__compat__',
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'PrecededBy', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'Char',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
'CloseMatch', 'tokenMap', 'pyparsing_common', 'pyparsing_unicode', 'unicode_set',
]
system_version = tuple(sys.version_info)[:3]
PY_3 = system_version[0] == 3
if PY_3:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
unicode = str
_ustr = str
# build list of single arg builtins, that can be used as parse actions
singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode
friendly. It first tries str(obj). If that fails with
a UnicodeEncodeError, then it tries unicode(obj). It then
< returns the unicode object | encodes it with the default
encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# Else encode it
ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
xmlcharref = Regex(r'&#\d+;')
xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
return xmlcharref.transformString(ret)
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len sorted reversed list tuple set any all min max".split():
try:
singleArgBuiltins.append(getattr(__builtin__,fname))
except AttributeError:
continue
_generatorType = type((y for y in range(1)))
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
alphas = string.ascii_uppercase + string.ascii_lowercase
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join(c for c in string.printable if c not in string.whitespace)
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
self.args = (pstr, loc, msg)
@classmethod
def _from_exception(cls, pe):
"""
internal factory method to simplify creating one type of ParseException
from another - avoids having __init__ signature conflicts among subclasses
"""
return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join((line_str[:line_column],
markerString, line_str[line_column:]))
return line_str.strip()
def __dir__(self):
return "lineno col line".split() + dir(type(self))
class ParseException(ParseBaseException):
"""
Exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
Example::
try:
Word(nums).setName("integer").parseString("ABC")
except ParseException as pe:
print(pe)
print("column: {}".format(pe.col))
prints::
Expected integer (at char 0), (line:1, col:1)
column: 1
"""
@staticmethod
def explain(exc, depth=16):
"""
Method to take an exception and translate the Python internal traceback into a list
of the pyparsing expressions that caused the exception to be raised.
Parameters:
- exc - exception raised during parsing (need not be a ParseException, in support
of Python exceptions that might be raised in a parse action)
- depth (default=16) - number of levels back in the stack trace to list expression
and function names; if None, the full stack trace names will be listed; if 0, only
the failing input line, marker, and exception string will be shown
Returns a multi-line string listing the ParserElements and/or function names in the
exception's stack trace.
Note: the diagnostic output will include string representations of the expressions
that failed to parse. These representations will be more helpful if you use `setName` to
give identifiable names to your expressions. Otherwise they will use the default string
forms, which may be cryptic to read.
explain() is only supported under Python 3.
"""
import inspect
if depth is None:
depth = sys.getrecursionlimit()
ret = []
if isinstance(exc, ParseBaseException):
ret.append(exc.line)
ret.append(' ' * (exc.col - 1) + '^')
ret.append("{0}: {1}".format(type(exc).__name__, exc))
if depth > 0:
callers = inspect.getinnerframes(exc.__traceback__, context=depth)
seen = set()
for i, ff in enumerate(callers[-depth:]):
frm = ff[0]
f_self = frm.f_locals.get('self', None)
if isinstance(f_self, ParserElement):
if frm.f_code.co_name not in ('parseImpl', '_parseNoCache'):
continue
if f_self in seen:
continue
seen.add(f_self)
self_type = type(f_self)
ret.append("{0}.{1} - {2}".format(self_type.__module__,
self_type.__name__,
f_self))
elif f_self is not None:
self_type = type(f_self)
ret.append("{0}.{1}".format(self_type.__module__,
self_type.__name__))
else:
code = frm.f_code
if code.co_name in ('wrapper', '<module>'):
continue
ret.append("{0}".format(code.co_name))
depth -= 1
if not depth:
break
return '\n'.join(ret)
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like :class:`ParseFatalException`, but thrown internally
when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates
that parsing is to stop immediately because an unbacktrackable
syntax error has been found.
"""
pass
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by :class:`ParserElement.validate` if the
grammar could be improperly recursive
"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup[0])
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to
the parsed data:
- as a list (``len(results)``)
- by list index (``results[0], results[1]``, etc.)
- by attribute (``results.<resultsName>`` - see :class:`ParserElement.setResultsName`)
Example::
integer = Word(nums)
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
# date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
# parseString returns a ParseResults object
result = date_str.parseString("1999/12/31")
def test(s, fn=repr):
print("%s -> %s" % (s, fn(eval(s))))
test("list(result)")
test("result[0]")
test("result['month']")
test("result.day")
test("'month' in result")
test("'minutes' in result")
test("result.dump()", str)
prints::
list(result) -> ['1999', '/', '12', '/', '31']
result[0] -> '1999'
result['month'] -> '12'
result.day -> '31'
'month' in result -> True
'minutes' in result -> False
result.dump() -> ['1999', '/', '12', '/', '31']
- day: 31
- month: 12
- year: 1999
"""
def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
self.__asList = asList
self.__modal = modal
if toklist is None:
toklist = []
if isinstance(toklist, list):
self.__toklist = toklist[:]
elif isinstance(toklist, _generatorType):
self.__toklist = list(toklist)
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(ParseResults(toklist.__toklist), 0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v, isinstance=isinstance ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,(int,slice)):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name,occurrences in self.__tokdict.items():
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return ( not not self.__toklist )
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( self.__toklist[::-1] )
def _iterkeys( self ):
if hasattr(self.__tokdict, "iterkeys"):
return self.__tokdict.iterkeys()
else:
return iter(self.__tokdict)
def _itervalues( self ):
return (self[k] for k in self._iterkeys())
def _iteritems( self ):
return ((k, self[k]) for k in self._iterkeys())
if PY_3:
keys = _iterkeys
"""Returns an iterator of all named result keys."""
values = _itervalues
"""Returns an iterator of all named result values."""
items = _iteritems
"""Returns an iterator of all named result key-value tuples."""
else:
iterkeys = _iterkeys
"""Returns an iterator of all named result keys (Python 2.x only)."""
itervalues = _itervalues
"""Returns an iterator of all named result values (Python 2.x only)."""
iteritems = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 2.x only)."""
def keys( self ):
"""Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iterkeys())
def values( self ):
"""Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.itervalues())
def items( self ):
"""Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iteritems())
def haskeys( self ):
"""Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self.__tokdict)
def pop( self, *args, **kwargs):
"""
Removes and returns item at specified index (default= ``last``).
Supports both ``list`` and ``dict`` semantics for ``pop()``. If
passed no argument or an integer argument, it will use ``list``
semantics and pop tokens from the list of parsed tokens. If passed
a non-integer argument (most likely a string), it will use ``dict``
semantics and pop the corresponding value from any defined results
names. A second default return value argument is supported, just as in
``dict.pop()``.
Example::
def remove_first(tokens):
tokens.pop(0)
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
label = Word(alphas)
patt = label("LABEL") + OneOrMore(Word(nums))
print(patt.parseString("AAB 123 321").dump())
# Use pop() in a parse action to remove named result (note that corresponding value is not
# removed from list form of results)
def remove_LABEL(tokens):
tokens.pop("LABEL")
return tokens
patt.addParseAction(remove_LABEL)
print(patt.parseString("AAB 123 321").dump())
prints::
['AAB', '123', '321']
- LABEL: AAB
['AAB', '123', '321']
"""
if not args:
args = [-1]
for k,v in kwargs.items():
if k == 'default':
args = (args[0], v)
else:
raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
if (isinstance(args[0], int) or
len(args) == 1 or
args[0] in self):
index = args[0]
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, defaultValue=None):
"""
Returns named result matching the given key, or if there is no
such name, then returns the given ``defaultValue`` or ``None`` if no
``defaultValue`` is specified.
Similar to ``dict.get()``.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString("1999/12/31")
print(result.get("year")) # -> '1999'
print(result.get("hour", "not specified")) # -> 'not specified'
print(result.get("hour")) # -> None
"""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""
Inserts new element at location index in the list of parsed tokens.
Similar to ``list.insert()``.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to insert the parse location in the front of the parsed results
def insert_locn(locn, tokens):
tokens.insert(0, locn)
print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
"""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name,occurrences in self.__tokdict.items():
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def append( self, item ):
"""
Add single element to end of ParseResults list of elements.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to compute the sum of the parsed integers, and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
"""
self.__toklist.append(item)
def extend( self, itemseq ):
"""
Add sequence of elements to end of ParseResults list of elements.
Example::
patt = OneOrMore(Word(alphas))
# use a parse action to append the reverse of the matched strings, to make a palindrome
def make_palindrome(tokens):
tokens.extend(reversed([t[::-1] for t in tokens]))
return ''.join(tokens)
print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
"""
if isinstance(itemseq, ParseResults):
self.__iadd__(itemseq)
else:
self.__toklist.extend(itemseq)
def clear( self ):
"""
Clear all elements and results names.
"""
del self.__toklist[:]
self.__tokdict.clear()
def __getattr__( self, name ):
try:
return self[name]
except KeyError:
return ""
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = lambda a: offset if a<0 else a+offset
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
# useful for merging many ParseResults using sum() builtin
return self.copy()
else:
# this may raise a TypeError - so be it
return other + self
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""
Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = OneOrMore(Word(alphas))
result = patt.parseString("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use asList() to create an actual list
result_list = result.asList()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
"""
return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
def asDict( self ):
"""
Returns the named parse results as a nested dictionary.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
result_dict = result.asDict()
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
import json
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
"""
if PY_3:
item_fn = self.items
else:
item_fn = self.iteritems
def toItem(obj):
if isinstance(obj, ParseResults):
if obj.haskeys():
return obj.asDict()
else:
return [toItem(v) for v in obj]
else:
return obj
return dict((k,toItem(v)) for k,v in item_fn())
def copy( self ):
"""
Returns a new copy of a :class:`ParseResults` object.
"""
ret = ParseResults( self.__toklist )
ret.__tokdict = dict(self.__tokdict.items())
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""
(Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
"""
nl = "\n"
out = []
namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist)
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
for i,res in enumerate(self.__toklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
r"""
Returns the results name for this token expression. Useful when several
different expressions might match at a particular location.
Example::
integer = Word(nums)
ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
house_number_expr = Suppress('#') + Word(nums, alphanums)
user_data = (Group(house_number_expr)("house_number")
| Group(ssn_expr)("ssn")
| Group(integer)("age"))
user_info = OneOrMore(user_data)
result = user_info.parseString("22 111-22-3333 #221B")
for item in result:
print(item.getName(), ':', item[0])
prints::
age : 22
ssn : 111-22-3333
house_number : 221B
"""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
return next(iter(self.__tokdict.keys()))
else:
return None
def dump(self, indent='', depth=0, full=True):
"""
Diagnostic method for listing out the contents of
a :class:`ParseResults`. Accepts an optional ``indent`` argument so
that this string can be embedded in a nested display of other data.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(result.dump())
prints::
['12', '/', '31', '/', '1999']
- day: 1999
- month: 31
- year: 12
"""
out = []
NL = '\n'
out.append( indent+_ustr(self.asList()) )
if full:
if self.haskeys():
items = sorted((str(k), v) for k,v in self.items())
for k,v in items:
if out:
out.append(NL)
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v:
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(repr(v))
elif any(isinstance(vv,ParseResults) for vv in self):
v = self
for i,vv in enumerate(v):
if isinstance(vv,ParseResults):
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) ))
else:
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv)))
return "".join(out)
def pprint(self, *args, **kwargs):
"""
Pretty-printer for parsed results as a list, using the
`pprint <https://docs.python.org/3/library/pprint.html>`_ module.
Accepts additional positional or keyword args as defined for
`pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ .
Example::
ident = Word(alphas, alphanums)
num = Word(nums)
func = Forward()
term = ident | num | Group('(' + func + ')')
func <<= ident + Group(Optional(delimitedList(term)))
result = func.parseString("fna a,b,(fnb c,d,200),100")
result.pprint(width=40)
prints::
['fna',
['a',
'b',
['(', 'fnb', ['c', 'd', '200'], ')'],
'100']]
"""
pprint.pprint(self.asList(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
(self.__tokdict,
par,
inAccumNames,
self.__name) = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __getnewargs__(self):
return self.__toklist, self.__name, self.__asList, self.__modal
def __dir__(self):
return (dir(type(self)) + list(self.keys()))
MutableMapping.register(ParseResults)
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See
:class:`ParserElement.parseString` for more
information on parsing strings containing ``<TAB>`` s, and suggested
methods to maintain a consistent view of the parsed string, the parse
location, and line and column positions within the parsed string.
"""
s = strg
return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note - the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See :class:`ParserElement.parseString`
for more information on parsing strings containing ``<TAB>`` s, and
suggested methods to maintain a consistent view of the parsed string, the
parse location, and line and column positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
#~ 'decorator to trim function calls to match the arity of the target'
#~ def _trim_arity(func, maxargs=3):
#~ if func in singleArgBuiltins:
#~ return lambda s,l,t: func(t)
#~ limit = 0
#~ foundArity = False
#~ def wrapper(*args):
#~ nonlocal limit,foundArity
#~ while 1:
#~ try:
#~ ret = func(*args[limit:])
#~ foundArity = True
#~ return ret
#~ except TypeError:
#~ if limit == maxargs or foundArity:
#~ raise
#~ limit += 1
#~ continue
#~ return wrapper
# this version is Python 2.x-3.x cross-compatible
'decorator to trim function calls to match the arity of the target'
def _trim_arity(func, maxargs=2):
if func in singleArgBuiltins:
return lambda s,l,t: func(t)
limit = [0]
foundArity = [False]
# traceback return data structure changed in Py3.5 - normalize back to plain tuples
if system_version[:2] >= (3,5):
def extract_stack(limit=0):
# special handling for Python 3.5.0 - extra deep call stack by 1
offset = -3 if system_version == (3,5,0) else -2
frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
return [frame_summary[:2]]
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[-1]
return [frame_summary[:2]]
else:
extract_stack = traceback.extract_stack
extract_tb = traceback.extract_tb
# synthesize what would be returned by traceback.extract_stack at the call to
# user's parse action 'func', so that we don't incur call penalty at parse time
LINE_DIFF = 6
# IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
# THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
this_line = extract_stack(limit=2)[-1]
pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0]:])
foundArity[0] = True
return ret
except TypeError:
# re-raise TypeErrors if they did not come from our arity testing
if foundArity[0]:
raise
else:
try:
tb = sys.exc_info()[-1]
if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
raise
finally:
del tb
if limit[0] <= maxargs:
limit[0] += 1
continue
raise
# copy func name to wrapper for sensible debug output
func_name = "<parse action>"
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
wrapper.__name__ = func_name
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
@staticmethod
def setDefaultWhitespaceChars( chars ):
r"""
Overrides the default whitespace chars
Example::
# default whitespace chars are space, <TAB> and newline
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
# change to just treat newline as significant
ParserElement.setDefaultWhitespaceChars(" \t")
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
@staticmethod
def inlineLiteralsUsing(cls):
"""
Set class to be used for inclusion of string literals into a parser.
Example::
# default literal class used is Literal
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# change to Suppress
ParserElement.inlineLiteralsUsing(Suppress)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
"""
ParserElement._literalStringClass = cls
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""
Make a copy of this :class:`ParserElement`. Useful for defining
different parse actions for the same parsing pattern, using copies of
the original parse element.
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of ``expr.copy()`` is just ``expr()``::
integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
"""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""
Define name for this expression, makes debugging and exception messages clearer.
Example::
Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
"""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""
Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original :class:`ParserElement` object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
``expr("name")`` in place of ``expr.setResultsName("name")``
- see :class:`__call__`.
Example::
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
"""
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches=True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set ``breakFlag`` to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction( self, *fns, **kwargs ):
"""
Define one or more actions to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as ``fn(s,loc,toks)`` ,
``fn(loc,toks)`` , ``fn(toks)`` , or just ``fn()`` , where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a :class:`ParseResults` object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Optional keyword arguments:
- callDuringTry = (default= ``False`` ) indicate if parse action should be run during lookaheads and alternate testing
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See :class:`parseString for more
information on parsing strings containing ``<TAB>`` s, and suggested
methods to maintain a consistent view of the parsed string, the parse
location, and line and column positions within the parsed string.
Example::
integer = Word(nums)
date_str = integer + '/' + integer + '/' + integer
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# use parse action to convert to ints at parse time
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
date_str = integer + '/' + integer + '/' + integer
# note that integer fields are now ints, not strings
date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
"""
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = kwargs.get("callDuringTry", False)
return self
def addParseAction( self, *fns, **kwargs ):
"""
Add one or more parse actions to expression's list of parse actions. See :class:`setParseAction`.
See examples in :class:`copy`.
"""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def addCondition(self, *fns, **kwargs):
"""Add a boolean predicate function to expression's list of parse actions. See
:class:`setParseAction` for function call signatures. Unlike ``setParseAction``,
functions passed to ``addCondition`` need to return boolean success/fail of the condition.
Optional keyword arguments:
- message = define a custom message to be used in the raised exception
- fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
year_int = integer.copy()
year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
date_str = year_int + '/' + integer + '/' + integer
result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
"""
msg = kwargs.get("message", "failed user-defined condition")
exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
for fn in fns:
fn = _trim_arity(fn)
def pa(s,l,t):
if not bool(fn(s,l,t)):
raise exc_type(s,l,msg)
self.parseAction.append(pa)
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
``fn(s,loc,expr,err)`` where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw :class:`ParseFatalException`
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException as err:
#~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or preloc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
try:
tokens = fn( instring, tokensStart, retTokens )
except IndexError as parse_action_exc:
exc = ParseException("exception raised in parse action")
exc.__cause__ = parse_action_exc
raise exc
if tokens is not None and tokens is not retTokens:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException as err:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
try:
tokens = fn( instring, tokensStart, retTokens )
except IndexError as parse_action_exc:
exc = ParseException("exception raised in parse action")
exc.__cause__ = parse_action_exc
raise exc
if tokens is not None and tokens is not retTokens:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
def canParseNext(self, instring, loc):
try:
self.tryParse(instring, loc)
except (ParseException, IndexError):
return False
else:
return True
class _UnboundedCache(object):
def __init__(self):
cache = {}
self.not_in_cache = not_in_cache = object()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
def clear(self):
cache.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
if _OrderedDict is not None:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = _OrderedDict()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
while len(cache) > size:
try:
cache.popitem(False)
except KeyError:
pass
def clear(self):
cache.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
else:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = {}
key_fifo = collections.deque([], size)
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
while len(key_fifo) > size:
cache.pop(key_fifo.popleft(), None)
key_fifo.append(key)
def clear(self):
cache.clear()
key_fifo.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
# argument cache for optimizing repeated calls when backtracking through recursive expressions
packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
packrat_cache_lock = RLock()
packrat_cache_stats = [0, 0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
HIT, MISS = 0, 1
lookup = (self, instring, loc, callPreParse, doActions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if value is cache.not_in_cache:
ParserElement.packrat_cache_stats[MISS] += 1
try:
value = self._parseNoCache(instring, loc, doActions, callPreParse)
except ParseBaseException as pe:
# cache a copy of the exception, without the traceback
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy()))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if isinstance(value, Exception):
raise value
return (value[0], value[1].copy())
_parse = _parseNoCache
@staticmethod
def resetCache():
ParserElement.packrat_cache.clear()
ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
_packratEnabled = False
@staticmethod
def enablePackrat(cache_size_limit=128):
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
Parameters:
- cache_size_limit - (default= ``128``) - if an integer value is provided
will limit the size of the packrat cache; if None is passed, then
the cache size will be unbounded; if 0 is passed, the cache will
be effectively disabled.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method :class:`ParserElement.enablePackrat`.
For best results, call ``enablePackrat()`` immediately after
importing pyparsing.
Example::
from pip._vendor import pyparsing
pyparsing.ParserElement.enablePackrat()
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
if cache_size_limit is None:
ParserElement.packrat_cache = ParserElement._UnboundedCache()
else:
ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
ParserElement._parse = ParserElement._parseCache
def parseString( self, instring, parseAll=False ):
"""
Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set ``parseAll`` to True (equivalent to ending
the grammar with ``StringEnd()``).
Note: ``parseString`` implicitly calls ``expandtabs()`` on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the ``loc`` argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling ``parseWithTabs`` on your grammar before calling ``parseString``
(see :class:`parseWithTabs`)
- define your parse action using the full ``(s,loc,toks)`` signature, and
reference the input string using the parse action's ``s`` argument
- explictly expand the tabs in your input string before calling
``parseString``
Example::
Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
loc = self.preParse( instring, loc )
se = Empty() + StringEnd()
se._parse( instring, loc )
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
``maxMatches`` argument, to clip scanning after 'n' matches are found. If
``overlap`` is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See :class:`parseString` for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens,start,end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn( instring, loc )
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def transformString( self, instring ):
"""
Extension to :class:`scanString`, to modify matching text with modified tokens that may
be returned from a parse action. To use ``transformString``, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking ``transformString()`` on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. ``transformString()`` returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.setParseAction(lambda toks: toks[0].title())
print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
"""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr,_flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""
Another extension to :class:`scanString`, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
``maxMatches`` argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
# the sum() builtin can be used to merge results into a single ParseResults object
print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
prints::
[['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
"""
Generator method to split a string using the given expression as a separator.
May be called with optional ``maxsplit`` argument, to limit the number of splits;
and the optional ``includeSeparators`` argument (default= ``False``), if the separating
matching text should be included in the split results.
Example::
punc = oneOf(list(".,;:/-!?"))
print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
prints::
['This', ' this', '', ' this sentence', ' is badly punctuated', '']
"""
splits = 0
last = 0
for t,s,e in self.scanString(instring, maxMatches=maxsplit):
yield instring[last:s]
if includeSeparators:
yield t[0]
last = e
yield instring[last:]
def __add__(self, other ):
"""
Implementation of + operator - returns :class:`And`. Adding strings to a ParserElement
converts them to :class:`Literal`s by default.
Example::
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
prints::
Hello, World! -> ['Hello', ',', 'World', '!']
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""
Implementation of + operator when left operand is not a :class:`ParserElement`
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""
Implementation of - operator, returns :class:`And` with error stop
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return self + And._ErrorStop() + other
def __rsub__(self, other ):
"""
Implementation of - operator when left operand is not a :class:`ParserElement`
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""
Implementation of * operator, allows use of ``expr * 3`` in place of
``expr + expr + expr``. Expressions may also me multiplied by a 2-integer
tuple, similar to ``{min,max}`` multipliers in regular expressions. Tuples
may also include ``None`` as in:
- ``expr*(n,None)`` or ``expr*(n,)`` is equivalent
to ``expr*n + ZeroOrMore(expr)``
(read as "at least n instances of ``expr``")
- ``expr*(None,n)`` is equivalent to ``expr*(0,n)``
(read as "0 to n instances of ``expr``")
- ``expr*(None,None)`` is equivalent to ``ZeroOrMore(expr)``
- ``expr*(1,None)`` is equivalent to ``OneOrMore(expr)``
Note that ``expr*(None,n)`` does not raise an exception if
more than n exprs exist in the input stream; that is,
``expr*(None,n)`` does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
``expr*(None,n) + ~expr``
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""
Implementation of | operator - returns :class:`MatchFirst`
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""
Implementation of | operator when left operand is not a :class:`ParserElement`
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""
Implementation of ^ operator - returns :class:`Or`
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""
Implementation of ^ operator when left operand is not a :class:`ParserElement`
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""
Implementation of & operator - returns :class:`Each`
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""
Implementation of & operator when left operand is not a :class:`ParserElement`
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""
Implementation of ~ operator - returns :class:`NotAny`
"""
return NotAny( self )
def __call__(self, name=None):
"""
Shortcut for :class:`setResultsName`, with ``listAllMatches=False``.
If ``name`` is given with a trailing ``'*'`` character, then ``listAllMatches`` will be
passed as ``True``.
If ``name` is omitted, same as calling :class:`copy`.
Example::
# these are equivalent
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
if name is not None:
return self.setResultsName(name)
else:
return self.copy()
def suppress( self ):
"""
Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""
Disables the skipping of whitespace before matching the characters in the
:class:`ParserElement`'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""
Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""
Overrides default behavior to expand ``<TAB>``s to spaces before parsing the input string.
Must be called before ``parseString`` when the input grammar contains elements that
match ``<TAB>`` characters.
"""
self.keepTabs = True
return self
def ignore( self, other ):
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
Example::
patt = OneOrMore(Word(alphas))
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
patt.ignore(cStyleComment)
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
"""
if isinstance(other, basestring):
other = Suppress(other)
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""
Enable display of debugging messages while doing pattern matching.
"""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""
Enable display of debugging messages while doing pattern matching.
Set ``flag`` to True to enable, False to disable.
Example::
wd = Word(alphas).setName("alphaword")
integer = Word(nums).setName("numword")
term = wd | integer
# turn on debugging for wd
wd.setDebug()
OneOrMore(term).parseString("abc 123 xyz 890")
prints::
Match alphaword at loc 0(1,1)
Matched alphaword -> ['abc']
Match alphaword at loc 3(1,4)
Exception raised:Expected alphaword (at char 4), (line:1, col:5)
Match alphaword at loc 7(1,8)
Matched alphaword -> ['xyz']
Match alphaword at loc 11(1,12)
Exception raised:Expected alphaword (at char 12), (line:1, col:13)
Match alphaword at loc 15(1,16)
Exception raised:Expected alphaword (at char 15), (line:1, col:16)
The output shown is that produced by the default debug actions - custom debug actions can be
specified using :class:`setDebugActions`. Prior to attempting
to match the ``wd`` expression, the debugging message ``"Match <exprname> at loc <n>(<line>,<col>)"``
is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"``
message is shown. Also note the use of :class:`setName` to assign a human-readable name to the expression,
which makes debugging and exception messages easier to understand - for instance, the default
name created for the :class:`Word` expression without calling ``setName`` is ``"W:(ABCD...)"``.
"""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""
Check defined expressions for valid structure, check for infinite recursive definitions.
"""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""
Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
with open(file_or_filename, "r") as f:
file_contents = f.read()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or vars(self) == vars(other)
elif isinstance(other, basestring):
return self.matches(other)
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
def matches(self, testString, parseAll=True):
"""
Method for quick testing of a parser against a test string. Good for simple
inline microtests of sub expressions while building up larger parser.
Parameters:
- testString - to test against this expression for a match
- parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests
Example::
expr = Word(nums)
assert expr.matches("100")
"""
try:
self.parseString(_ustr(testString), parseAll=parseAll)
return True
except ParseBaseException:
return False
def runTests(self, tests, parseAll=True, comment='#',
fullDump=True, printResults=True, failureTests=False, postParse=None):
"""
Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- tests - a list of separate test strings, or a multiline string of test strings
- parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests
- comment - (default= ``'#'``) - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- fullDump - (default= ``True``) - dump results as list followed by results names in nested outline;
if False, only dump nested list
- printResults - (default= ``True``) prints test output to stdout
- failureTests - (default= ``False``) indicates if these tests are expected to fail parsing
- postParse - (default= ``None``) optional callback for successful parse results; called as
`fn(test_string, parse_results)` and returns a string to be added to the test output
Returns: a (success, results) tuple, where success indicates that all tests succeeded
(or failed if ``failureTests`` is True), and the results contain a list of lines of each
test's output
Example::
number_expr = pyparsing_common.number.copy()
result = number_expr.runTests('''
# unsigned integer
100
# negative integer
-100
# float with scientific notation
6.02e23
# integer with scientific notation
1e-12
''')
print("Success" if result[0] else "Failed!")
result = number_expr.runTests('''
# stray character
100Z
# missing leading digit before '.'
-.100
# too many '.'
3.14.159
''', failureTests=True)
print("Success" if result[0] else "Failed!")
prints::
# unsigned integer
100
[100]
# negative integer
-100
[-100]
# float with scientific notation
6.02e23
[6.02e+23]
# integer with scientific notation
1e-12
[1e-12]
Success
# stray character
100Z
^
FAIL: Expected end of text (at char 3), (line:1, col:4)
# missing leading digit before '.'
-.100
^
FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
# too many '.'
3.14.159
^
FAIL: Expected end of text (at char 4), (line:1, col:5)
Success
Each test string must be on a single line. If you want to test a string that spans multiple
lines, create a test like this::
expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
(Note that this is a raw string literal, you must include the leading 'r'.)
"""
if isinstance(tests, basestring):
tests = list(map(str.strip, tests.rstrip().splitlines()))
if isinstance(comment, basestring):
comment = Literal(comment)
allResults = []
comments = []
success = True
for t in tests:
if comment is not None and comment.matches(t, False) or comments and not t:
comments.append(t)
continue
if not t:
continue
out = ['\n'.join(comments), t]
comments = []
try:
# convert newline marks to actual newlines, and strip leading BOM if present
NL = Literal(r'\n').addParseAction(replaceWith('\n')).ignore(quotedString)
BOM = '\ufeff'
t = NL.transformString(t.lstrip(BOM))
result = self.parseString(t, parseAll=parseAll)
out.append(result.dump(full=fullDump))
success = success and not failureTests
if postParse is not None:
try:
pp_value = postParse(t, result)
if pp_value is not None:
out.append(str(pp_value))
except Exception as e:
out.append("{0} failed: {1}: {2}".format(postParse.__name__, type(e).__name__, e))
except ParseBaseException as pe:
fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
if '\n' in t:
out.append(line(pe.loc, t))
out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
else:
out.append(' '*pe.loc + '^' + fatal)
out.append("FAIL: " + str(pe))
success = success and failureTests
result = pe
except Exception as exc:
out.append("FAIL-EXCEPTION: " + str(exc))
success = success and failureTests
result = exc
if printResults:
if fullDump:
out.append('')
print('\n'.join(out))
allResults.append((t, result))
return success, allResults
class Token(ParserElement):
"""Abstract :class:`ParserElement` subclass, for defining atomic
matching patterns.
"""
def __init__( self ):
super(Token,self).__init__( savelist=False )
class Empty(Token):
"""An empty token, will always match.
"""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match.
"""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl( self, instring, loc, doActions=True ):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""Token to exactly match a specified string.
Example::
Literal('blah').parseString('blah') # -> ['blah']
Literal('blah').parseString('blahfooblah') # -> ['blah']
Literal('blah').parseString('bla') # -> Exception: Expected "blah"
For case-insensitive matching, use :class:`CaselessLiteral`.
For keyword matching (force word break before and after the matched string),
use :class:`Keyword` or :class:`CaselessKeyword`.
"""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
ParserElement._literalStringClass = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is,
it must be immediately followed by a non-keyword character. Compare
with :class:`Literal`:
- ``Literal("if")`` will match the leading ``'if'`` in
``'ifAndOnlyIf'``.
- ``Keyword("if")`` will not; it will only match the leading
``'if'`` in ``'if x=1'``, or ``'if(y==2)'``
Accepts two optional constructor arguments in addition to the
keyword string:
- ``identChars`` is a string of characters that would be valid
identifier characters, defaulting to all alphanumerics + "_" and
"$"
- ``caseless`` allows case-insensitive matching, default is ``False``.
Example::
Keyword("start").parseString("start") # -> ['start']
Keyword("start").parseString("starting") # -> Exception
For case-insensitive matching, use :class:`CaselessKeyword`.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=None, caseless=False ):
super(Keyword,self).__init__()
if identChars is None:
identChars = Keyword.DEFAULT_KEYWORD_CHARS
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
@staticmethod
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
Example::
OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
(Contrast with example for :class:`CaselessKeyword`.)
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
"""
Caseless version of :class:`Keyword`.
Example::
OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
(Contrast with example for :class:`CaselessLiteral`.)
"""
def __init__( self, matchString, identChars=None ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
class CloseMatch(Token):
"""A variation on :class:`Literal` which matches "close" matches,
that is, strings with at most 'n' mismatching characters.
:class:`CloseMatch` takes parameters:
- ``match_string`` - string to be matched
- ``maxMismatches`` - (``default=1``) maximum number of
mismatches allowed to count as a match
The results from a successful parse will contain the matched text
from the input string and the following named results:
- ``mismatches`` - a list of the positions within the
match_string where mismatches were found
- ``original`` - the original match_string used to compare
against the input string
If ``mismatches`` is an empty list, then the match was an exact
match.
Example::
patt = CloseMatch("ATCATCGAATGGA")
patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
# exact match
patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
# close match allowing up to 2 mismatches
patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
"""
def __init__(self, match_string, maxMismatches=1):
super(CloseMatch,self).__init__()
self.name = match_string
self.match_string = match_string
self.maxMismatches = maxMismatches
self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
self.mayIndexError = False
self.mayReturnEmpty = False
def parseImpl( self, instring, loc, doActions=True ):
start = loc
instrlen = len(instring)
maxloc = start + len(self.match_string)
if maxloc <= instrlen:
match_string = self.match_string
match_stringloc = 0
mismatches = []
maxMismatches = self.maxMismatches
for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
src,mat = s_m
if src != mat:
mismatches.append(match_stringloc)
if len(mismatches) > maxMismatches:
break
else:
loc = match_stringloc + 1
results = ParseResults([instring[start:loc]])
results['original'] = self.match_string
results['mismatches'] = mismatches
return loc, results
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters, an
optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for ``min`` is
1 (a minimum value < 1 is not valid); the default values for
``max`` and ``exact`` are 0, meaning no maximum or exact
length restriction. An optional ``excludeChars`` parameter can
list characters that might be found in the input ``bodyChars``
string; useful to define a word of all printables except for one or
two characters, for instance.
:class:`srange` is useful for defining custom character set strings
for defining ``Word`` expressions, using range notation from
regular expression character sets.
A common mistake is to use :class:`Word` to match a specific literal
string, as in ``Word("Address")``. Remember that :class:`Word`
uses the string argument to define *sets* of matchable characters.
This expression would match "Add", "AAA", "dAred", or any other word
made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an
exact literal string, use :class:`Literal` or :class:`Keyword`.
pyparsing includes helper strings for building Words:
- :class:`alphas`
- :class:`nums`
- :class:`alphanums`
- :class:`hexnums`
- :class:`alphas8bit` (alphabetic characters in ASCII range 128-255
- accented, tilded, umlauted, etc.)
- :class:`punc8bit` (non-alphabetic characters in ASCII range
128-255 - currency, symbols, superscripts, diacriticals, etc.)
- :class:`printables` (any non-whitespace character)
Example::
# a word composed of digits
integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
# a word with a leading capital, and zero or more lowercase
capital_word = Word(alphas.upper(), alphas.lower())
# hostnames are alphanumeric, with leading alpha, and '-'
hostname = Word(alphas, alphanums+'-')
# roman numeral (not a strict parser, accepts invalid mix of characters)
roman = Word("IVXLCDM")
# any string of non-whitespace characters, except for ','
csv_value = Word(printables, excludeChars=",")
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
super(Word,self).__init__()
if excludeChars:
excludeChars = set(excludeChars)
initChars = ''.join(c for c in initChars if c not in excludeChars)
if bodyChars:
bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.initCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except Exception:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
if instring[loc] not in self.initChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
elif self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except Exception:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Char(Word):
"""A short-cut class for defining ``Word(characters, exact=1)``,
when defining a match of any single character in a string of
characters.
"""
def __init__(self, charset, asKeyword=False, excludeChars=None):
super(Char, self).__init__(charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars)
self.reString = "[%s]" % _escapeRegexRangeChars(self.initCharsOrig)
self.re = re.compile( self.reString )
class Regex(Token):
r"""Token for matching strings that match a given regular
expression. Defined with string specifying the regular expression in
a form recognized by the stdlib Python `re module <https://docs.python.org/3/library/re.html>`_.
If the given regex contains named groups (defined using ``(?P<name>...)``),
these will be preserved as named parse results.
Example::
realnum = Regex(r"[+-]?\d+\.\d*")
date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
# ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0, asGroupList=False, asMatch=False):
"""The parameters ``pattern`` and ``flags`` are passed
to the ``re.compile()`` function as-is. See the Python
`re module <https://docs.python.org/3/library/re.html>`_ module for an
explanation of the acceptable patterns and flags.
"""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if not pattern:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
self.asGroupList = asGroupList
self.asMatch = asMatch
if self.asGroupList:
self.parseImpl = self.parseImplAsGroupList
if self.asMatch:
self.parseImpl = self.parseImplAsMatch
def parseImpl(self, instring, loc, doActions=True):
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = ParseResults(result.group())
d = result.groupdict()
if d:
for k, v in d.items():
ret[k] = v
return loc, ret
def parseImplAsGroupList(self, instring, loc, doActions=True):
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.groups()
return loc, ret
def parseImplAsMatch(self, instring, loc, doActions=True):
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result
return loc, ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
def sub(self, repl):
r"""
Return Regex with an attached parse action to transform the parsed
result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_.
Example::
make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>")
print(make_html.transformString("h1:main title:"))
# prints "<h1>main title</h1>"
"""
if self.asGroupList:
warnings.warn("cannot use sub() with Regex(asGroupList=True)",
SyntaxWarning, stacklevel=2)
raise SyntaxError()
if self.asMatch and callable(repl):
warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)",
SyntaxWarning, stacklevel=2)
raise SyntaxError()
if self.asMatch:
def pa(tokens):
return tokens[0].expand(repl)
else:
def pa(tokens):
return self.re.sub(repl, tokens[0])
return self.addParseAction(pa)
class QuotedString(Token):
r"""
Token for matching strings that are delimited by quoting characters.
Defined with the following parameters:
- quoteChar - string of one or more characters defining the
quote delimiting string
- escChar - character to escape quotes, typically backslash
(default= ``None`` )
- escQuote - special quote sequence to escape an embedded quote
string (such as SQL's ``""`` to escape an embedded ``"``)
(default= ``None`` )
- multiline - boolean indicating whether quotes can span
multiple lines (default= ``False`` )
- unquoteResults - boolean indicating whether the matched text
should be unquoted (default= ``True`` )
- endQuoteChar - string of one or more characters defining the
end of the quote delimited string (default= ``None`` => same as
quoteChar)
- convertWhitespaceEscapes - convert escaped whitespace
(``'\t'``, ``'\n'``, etc.) to actual whitespace
(default= ``True`` )
Example::
qs = QuotedString('"')
print(qs.searchString('lsjdf "This is the quote" sldjf'))
complex_qs = QuotedString('{{', endQuoteChar='}}')
print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
sql_qs = QuotedString('"', escQuote='""')
print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
prints::
[['This is the quote']]
[['This is the "quote"']]
[['This is the quote with "embedded" quotes']]
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if not quoteChar:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if not endQuoteChar:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
self.convertWhitespaceEscapes = convertWhitespaceEscapes
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped whitespace
if '\\' in ret and self.convertWhitespaceEscapes:
ws_map = {
r'\t' : '\t',
r'\n' : '\n',
r'\f' : '\f',
r'\r' : '\r',
}
for wslit,wschar in ws_map.items():
ret = ret.replace(wslit, wschar)
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given
set (will include whitespace in matched characters if not listed in
the provided exclusion set - see example). Defined with string
containing all disallowed characters, and an optional minimum,
maximum, and/or exact length. The default value for ``min`` is
1 (a minimum value < 1 is not valid); the default values for
``max`` and ``exact`` are 0, meaning no maximum or exact
length restriction.
Example::
# define a comma-separated-value as anything that is not a ','
csv_value = CharsNotIn(',')
print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
prints::
['dkls', 'lsdkjf', 's12 34', '@!#', '213']
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError(
"cannot specify a minimum length < 1; use " +
"Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except Exception:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally,
whitespace is ignored by pyparsing grammars. This class is included
when some whitespace structures are significant. Define with
a string containing the whitespace characters to be matched; default
is ``" \\t\\r\\n"``. Also takes optional ``min``,
``max``, and ``exact`` arguments, as defined for the
:class:`Word` class.
"""
whiteStrs = {
' ' : '<SP>',
'\t': '<TAB>',
'\n': '<LF>',
'\r': '<CR>',
'\f': '<FF>',
'u\00A0': '<NBSP>',
'u\1680': '<OGHAM_SPACE_MARK>',
'u\180E': '<MONGOLIAN_VOWEL_SEPARATOR>',
'u\2000': '<EN_QUAD>',
'u\2001': '<EM_QUAD>',
'u\2002': '<EN_SPACE>',
'u\2003': '<EM_SPACE>',
'u\2004': '<THREE-PER-EM_SPACE>',
'u\2005': '<FOUR-PER-EM_SPACE>',
'u\2006': '<SIX-PER-EM_SPACE>',
'u\2007': '<FIGURE_SPACE>',
'u\2008': '<PUNCTUATION_SPACE>',
'u\2009': '<THIN_SPACE>',
'u\200A': '<HAIR_SPACE>',
'u\200B': '<ZERO_WIDTH_SPACE>',
'u\202F': '<NNBSP>',
'u\205F': '<MMSP>',
'u\3000': '<IDEOGRAPHIC_SPACE>',
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
#~ self.leaveWhitespace()
self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] not in self.matchWhite:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for
tabular report scraping.
"""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
r"""Matches if current position is at the beginning of a line within
the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
print(t)
prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__( self ):
super(LineStart,self).__init__()
self.errmsg = "Expected start of line"
def parseImpl( self, instring, loc, doActions=True ):
if col(loc, instring) == 1:
return loc, []
raise ParseException(instring, loc, self.errmsg, self)
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the
parse string
"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse
string
"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string
"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word,
and is not preceded by any character in a given set of
``wordChars`` (default= ``printables``). To emulate the
``\b`` behavior of regular expressions, use
``WordStart(alphanums)``. ``WordStart`` will also match at
the beginning of the string being parsed, or at the beginning of
a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and is
not followed by any character in a given set of ``wordChars``
(default= ``printables``). To emulate the ``\b`` behavior of
regular expressions, use ``WordEnd(alphanums)``. ``WordEnd``
will also match at the end of the string being parsed, or at the end
of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and
post-processing parsed tokens.
"""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, _generatorType ):
exprs = list(exprs)
if isinstance( exprs, basestring ):
self.exprs = [ ParserElement._literalStringClass( exprs ) ]
elif isinstance( exprs, Iterable ):
exprs = list(exprs)
# if sequence of strings provided, wrap with Literal
if all(isinstance(expr, basestring) for expr in exprs):
exprs = map(ParserElement._literalStringClass, exprs)
self.exprs = list(exprs)
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = "Expected " + _ustr(self)
return self
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
def copy(self):
ret = super(ParseExpression,self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
class And(ParseExpression):
"""
Requires all given :class:`ParseExpression` s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the ``'+'`` operator.
May also be constructed using the ``'-'`` operator, which will
suppress backtracking.
Example::
integer = Word(nums)
name_expr = OneOrMore(Word(alphas))
expr = And([integer("id"),name_expr("name"),integer("age")])
# more easily written as:
expr = integer("id") + name_expr("name") + integer("age")
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(And._ErrorStop,self).__init__(*args, **kwargs)
self.name = '-'
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.setWhitespaceChars( self.exprs[0].whiteChars )
self.skipWhitespace = self.exprs[0].skipWhitespace
self.callPreparse = True
def streamline(self):
super(And, self).streamline()
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
return self
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException._from_exception(pe)
except IndexError:
raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one :class:`ParseExpression` is found. If
two expressions match, the expression that matches the longest
string will be used. May be constructed using the ``'^'``
operator.
Example::
# construct Or using '^' operator
number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789"))
prints::
[['123'], ['3.1416'], ['789']]
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def streamline(self):
super(Or, self).streamline()
if __compat__.collect_all_And_tokens:
self.saveAsList = any(e.saveAsList for e in self.exprs)
return self
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
matches = []
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
# save match among all matches, to retry longest to shortest
matches.append((loc2, e))
if matches:
matches.sort(key=lambda x: -x[0])
for _,e in matches:
try:
return e._parse( instring, loc, doActions )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""Requires that at least one :class:`ParseExpression` is found. If
two expressions match, the first one listed is the one that will
match. May be constructed using the ``'|'`` operator.
Example::
# construct MatchFirst using '|' operator
# watch the order of expressions to match
number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
# put more selective expression first
number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def streamline(self):
super(MatchFirst, self).streamline()
if __compat__.collect_all_And_tokens:
self.saveAsList = any(e.saveAsList for e in self.exprs)
return self
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""Requires all given :class:`ParseExpression` s to be found, but in
any order. Expressions may be separated by whitespace.
May be constructed using the ``'&'`` operator.
Example::
color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
integer = Word(nums)
shape_attr = "shape:" + shape_type("shape")
posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
color_attr = "color:" + color("color")
size_attr = "size:" + integer("size")
# use Each (using operator '&') to accept attributes in any order
# (shape and posn are required, color and size are optional)
shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
shape_spec.runTests('''
shape: SQUARE color: BLACK posn: 100, 120
shape: CIRCLE size: 50 color: BLUE posn: 50,80
color:GREEN size:20 shape:TRIANGLE posn:20,40
'''
)
prints::
shape: SQUARE color: BLACK posn: 100, 120
['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- color: BLACK
- posn: ['100', ',', '120']
- x: 100
- y: 120
- shape: SQUARE
shape: CIRCLE size: 50 color: BLUE posn: 50,80
['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- color: BLUE
- posn: ['50', ',', '80']
- x: 50
- y: 80
- shape: CIRCLE
- size: 50
color: GREEN size: 20 shape: TRIANGLE posn: 20,40
['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- color: GREEN
- posn: ['20', ',', '40']
- x: 20
- y: 40
- shape: TRIANGLE
- size: 20
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = True
self.initExprGroups = True
self.saveAsList = True
def streamline(self):
super(Each, self).streamline()
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
return self
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(self.opt1map.get(id(e),e))
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join(_ustr(e) for e in tmpReqd)
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = sum(resultlist, ParseResults([]))
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""Abstract subclass of :class:`ParserElement`, for combining and
post-processing parsed tokens.
"""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
if issubclass(ParserElement._literalStringClass, Token):
expr = ParserElement._literalStringClass(expr)
else:
expr = ParserElement._literalStringClass(Literal(expr))
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except Exception:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression.
``FollowedBy`` does *not* advance the parsing position within
the input string, it only verifies that the specified parse
expression matches at the current position. ``FollowedBy``
always returns a null token list. If any results names are defined
in the lookahead expression, those *will* be returned for access by
name.
Example::
# use FollowedBy to match a label only if it is followed by a ':'
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
prints::
[['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
"""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
_, ret = self.expr._parse(instring, loc, doActions=doActions)
del ret[:]
return loc, ret
class PrecededBy(ParseElementEnhance):
"""Lookbehind matching of the given parse expression.
``PrecededBy`` does not advance the parsing position within the
input string, it only verifies that the specified parse expression
matches prior to the current position. ``PrecededBy`` always
returns a null token list, but if a results name is defined on the
given expression, it is returned.
Parameters:
- expr - expression that must match prior to the current parse
location
- retreat - (default= ``None``) - (int) maximum number of characters
to lookbehind prior to the current parse location
If the lookbehind expression is a string, Literal, Keyword, or
a Word or CharsNotIn with a specified exact or maximum length, then
the retreat parameter is not required. Otherwise, retreat must be
specified to give a maximum number of characters to look back from
the current parse position for a lookbehind match.
Example::
# VB-style variable names with type prefixes
int_var = PrecededBy("#") + pyparsing_common.identifier
str_var = PrecededBy("$") + pyparsing_common.identifier
"""
def __init__(self, expr, retreat=None):
super(PrecededBy, self).__init__(expr)
self.expr = self.expr().leaveWhitespace()
self.mayReturnEmpty = True
self.mayIndexError = False
self.exact = False
if isinstance(expr, str):
retreat = len(expr)
self.exact = True
elif isinstance(expr, (Literal, Keyword)):
retreat = expr.matchLen
self.exact = True
elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT:
retreat = expr.maxLen
self.exact = True
elif isinstance(expr, _PositionToken):
retreat = 0
self.exact = True
self.retreat = retreat
self.errmsg = "not preceded by " + str(expr)
self.skipWhitespace = False
def parseImpl(self, instring, loc=0, doActions=True):
if self.exact:
if loc < self.retreat:
raise ParseException(instring, loc, self.errmsg)
start = loc - self.retreat
_, ret = self.expr._parse(instring, start)
else:
# retreat specified a maximum lookbehind window, iterate
test_expr = self.expr + StringEnd()
instring_slice = instring[:loc]
last_expr = ParseException(instring, loc, self.errmsg)
for offset in range(1, min(loc, self.retreat+1)):
try:
_, ret = test_expr._parse(instring_slice, loc-offset)
except ParseBaseException as pbe:
last_expr = pbe
else:
break
else:
raise last_expr
# return empty list of tokens, but preserve any defined results names
del ret[:]
return loc, ret
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression.
``NotAny`` does *not* advance the parsing position within the
input string, it only verifies that the specified parse expression
does *not* match at the current position. Also, ``NotAny`` does
*not* skip over leading whitespace. ``NotAny`` always returns
a null token list. May be constructed using the '~' operator.
Example::
AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split())
# take care not to mistake keywords for identifiers
ident = ~(AND | OR | NOT) + Word(alphas)
boolean_term = Optional(NOT) + ident
# very crude boolean expression - to support parenthesis groups and
# operation hierarchy, use infixNotation
boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term)
# integers that are followed by "." are actually floats
integer = Word(nums) + ~Char(".")
"""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr.canParseNext(instring, loc):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class _MultipleMatch(ParseElementEnhance):
def __init__( self, expr, stopOn=None):
super(_MultipleMatch, self).__init__(expr)
self.saveAsList = True
ender = stopOn
if isinstance(ender, basestring):
ender = ParserElement._literalStringClass(ender)
self.not_ender = ~ender if ender is not None else None
def parseImpl( self, instring, loc, doActions=True ):
self_expr_parse = self.expr._parse
self_skip_ignorables = self._skipIgnorables
check_ender = self.not_ender is not None
if check_ender:
try_not_ender = self.not_ender.tryParse
# must be at least one (but first see if we are the stopOn sentinel;
# if so, fail)
if check_ender:
try_not_ender(instring, loc)
loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = (not not self.ignoreExprs)
while 1:
if check_ender:
try_not_ender(instring, loc)
if hasIgnoreExprs:
preloc = self_skip_ignorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self_expr_parse( instring, preloc, doActions )
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
class OneOrMore(_MultipleMatch):
"""Repetition of one or more of the given expression.
Parameters:
- expr - expression that must match one or more times
- stopOn - (default= ``None``) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: BLACK"
OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
# use stopOn attribute for OneOrMore to avoid reading label string as part of the data
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
# could also be written as
(attr_expr * (1,)).parseString(text).pprint()
"""
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
class ZeroOrMore(_MultipleMatch):
"""Optional repetition of zero or more of the given expression.
Parameters:
- expr - expression that must match zero or more times
- stopOn - (default= ``None``) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example: similar to :class:`OneOrMore`
"""
def __init__( self, expr, stopOn=None):
super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
except (ParseException,IndexError):
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
Parameters:
- expr - expression that must match zero or more times
- default (optional) - value to be returned if the optional expression is not found.
Example::
# US postal code can be a 5-digit zip, plus optional 4-digit qualifier
zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
zip.runTests('''
# traditional ZIP code
12345
# ZIP+4 form
12101-0001
# invalid ZIP
98765-
''')
prints::
# traditional ZIP code
12345
['12345']
# ZIP+4 form
12101-0001
['12101-0001']
# invalid ZIP
98765-
^
FAIL: Expected end of text (at char 5), (line:1, col:6)
"""
def __init__( self, expr, default=_optionalNotMatched ):
super(Optional,self).__init__( expr, savelist=False )
self.saveAsList = self.expr.saveAsList
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched
expression is found.
Parameters:
- expr - target expression marking the end of the data to be skipped
- include - (default= ``False``) if True, the target expression is also parsed
(the skipped text and target expression are returned as a 2-element list).
- ignore - (default= ``None``) used to define grammars (typically quoted strings and
comments) that might contain false matches to the target expression
- failOn - (default= ``None``) define expressions that are not allowed to be
included in the skipped test; if found before the target expression is found,
the SkipTo is not a match
Example::
report = '''
Outstanding Issues Report - 1 Jan 2000
# | Severity | Description | Days Open
-----+----------+-------------------------------------------+-----------
101 | Critical | Intermittent system crash | 6
94 | Cosmetic | Spelling error on Login ('log|n') | 14
79 | Minor | System slow when running too many reports | 47
'''
integer = Word(nums)
SEP = Suppress('|')
# use SkipTo to simply match everything up until the next SEP
# - ignore quoted strings, so that a '|' character inside a quoted string does not match
# - parse action will call token.strip() for each matched token, i.e., the description body
string_data = SkipTo(SEP, ignore=quotedString)
string_data.setParseAction(tokenMap(str.strip))
ticket_expr = (integer("issue_num") + SEP
+ string_data("sev") + SEP
+ string_data("desc") + SEP
+ integer("days_open"))
for tkt in ticket_expr.searchString(report):
print tkt.dump()
prints::
['101', 'Critical', 'Intermittent system crash', '6']
- days_open: 6
- desc: Intermittent system crash
- issue_num: 101
- sev: Critical
['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- days_open: 14
- desc: Spelling error on Login ('log|n')
- issue_num: 94
- sev: Cosmetic
['79', 'Minor', 'System slow when running too many reports', '47']
- days_open: 47
- desc: System slow when running too many reports
- issue_num: 79
- sev: Minor
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.saveAsList = False
if isinstance(failOn, basestring):
self.failOn = ParserElement._literalStringClass(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
startloc = loc
instrlen = len(instring)
expr = self.expr
expr_parse = self.expr._parse
self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
tmploc = loc
while tmploc <= instrlen:
if self_failOn_canParseNext is not None:
# break if failOn expression matches
if self_failOn_canParseNext(instring, tmploc):
break
if self_ignoreExpr_tryParse is not None:
# advance past ignore expressions
while 1:
try:
tmploc = self_ignoreExpr_tryParse(instring, tmploc)
except ParseBaseException:
break
try:
expr_parse(instring, tmploc, doActions=False, callPreParse=False)
except (ParseException, IndexError):
# no match, advance loc in string
tmploc += 1
else:
# matched skipto expr, done
break
else:
# ran off the end of the input string without matching skipto expr, fail
raise ParseException(instring, loc, self.errmsg, self)
# build up return values
loc = tmploc
skiptext = instring[startloc:loc]
skipresult = ParseResults(skiptext)
if self.includeMatch:
loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
skipresult += mat
return loc, skipresult
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the ``Forward``
variable using the '<<' operator.
Note: take care when assigning to ``Forward`` not to overlook
precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the ``Forward``::
fwdExpr << (a | b | c)
Converting to use the '<<=' operator instead will avoid this problem.
See :class:`ParseResults.pprint` for an example of a recursive
parser created using ``Forward``.
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass(other)
self.expr = other
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
def __ilshift__(self, other):
return self << other
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
# Avoid infinite recursion by setting a temporary name
self.name = self.__class__.__name__ + ": ..."
# Use the string representation of main expression.
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
del self.name
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret <<= self
return ret
class TokenConverter(ParseElementEnhance):
"""
Abstract subclass of :class:`ParseExpression`, for converting parsed results.
"""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the
input string; this can be disabled by specifying
``'adjacent=False'`` in the constructor.
Example::
real = Word(nums) + '.' + Word(nums)
print(real.parseString('3.1416')) # -> ['3', '.', '1416']
# will also erroneously match the following
print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
real = Combine(Word(nums) + '.' + Word(nums))
print(real.parseString('3.1416')) # -> ['3.1416']
# no match when there are internal spaces
print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and retToks.haskeys():
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for
returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions.
Example::
ident = Word(alphas)
num = Word(nums)
term = ident | num
func = ident + Optional(delimitedList(term))
print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100']
func = ident + Group(Optional(delimitedList(term)))
print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']]
"""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also
as a dictionary. Each element can also be referenced using the first
token in the expression as its key. Useful for tabular report
scraping when the first column can be used as a item key.
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
# print attributes as plain groups
print(OneOrMore(attr_expr).parseString(text).dump())
# instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
print(result.dump())
# access named fields as dict entries, or output as dict
print(result['shape'])
print(result.asDict())
prints::
['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
{'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
See more examples at :class:`ParseResults` of accessing fields by results name.
"""
def __init__( self, expr ):
super(Dict,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression.
Example::
source = "a, b, c,d"
wd = Word(alphas)
wd_list1 = wd + ZeroOrMore(',' + wd)
print(wd_list1.parseString(source))
# often, delimiters that are useful during parsing are just in the
# way afterward - use Suppress to keep them out of the parsed output
wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
print(wd_list2.parseString(source))
prints::
['a', ',', 'b', ',', 'c', ',', 'd']
['a', 'b', 'c', 'd']
(See also :class:`delimitedList`.)
"""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once.
"""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions.
When the parse action is called, this decorator will print
``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``.
When the parse action completes, the decorator will print
``"<<"`` followed by the returned value, or any exception that the parse action raised.
Example::
wd = Word(alphas)
@traceParseAction
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens))))
wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
prints::
>>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
<<leaving remove_duplicate_chars (ret: 'dfjkls')
['dfjkls']
"""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.__name__
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""Helper to define a delimited list of expressions - the delimiter
defaults to ','. By default, the list elements and delimiters can
have intervening whitespace, and comments, but this can be
overridden by passing ``combine=True`` in the constructor. If
``combine`` is set to ``True``, the matching tokens are
returned as a single token string, with the delimiters included;
otherwise, the matching tokens are returned as a list of tokens,
with the delimiters suppressed.
Example::
delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr, intExpr=None ):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the
leading count token is suppressed.
If ``intExpr`` is specified, it should be a pyparsing expression
that produces an integer value.
Example::
countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
# in this parser, the leading integer value is given in binary,
# '10' indicating that 2 values are in the array
binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = t[0]
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
def _flatten(L):
ret = []
for i in L:
if isinstance(i,list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks for
a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match ``"1:1"``, but not ``"1:2"``. Because this
matches a previous literal, will also match the leading
``"1:1"`` in ``"1:10"``. If this is not desired, use
:class:`matchPreviousExpr`. Do *not* use with packrat parsing
enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And(Literal(tt) for tt in tflat)
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks for
a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match ``"1:1"``, but not ``"1:2"``. Because this
matches by expressions, will *not* match the leading ``"1:1"``
in ``"1:10"``; the expressions are evaluated first, and then
compared, so ``"1"`` is compared with ``"10"``. Do *not* use
with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""Helper to quickly define a set of alternative Literals, and makes
sure to do longest-first testing when there is a conflict,
regardless of the input order, but returns
a :class:`MatchFirst` for best performance.
Parameters:
- strs - a string of space-delimited literals, or a collection of
string literals
- caseless - (default= ``False``) - treat all literals as
caseless
- useRegex - (default= ``True``) - as an optimization, will
generate a Regex object; otherwise, will generate
a :class:`MatchFirst` object (if ``caseless=True``, or if
creating a :class:`Regex` raises an exception)
Example::
comp_oper = oneOf("< = > <= >= !=")
var = Word(alphas)
number = Word(nums)
term = var | number
comparison_expr = term + comp_oper + term
print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
prints::
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
symbols = []
if isinstance(strs,basestring):
symbols = strs.split()
elif isinstance(strs, Iterable):
symbols = list(strs)
else:
warnings.warn("Invalid argument to oneOf, expected string or iterable",
SyntaxWarning, stacklevel=2)
if not symbols:
return NoMatch()
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
else:
return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
except Exception:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
def dictOf( key, value ):
"""Helper to easily and clearly define a dictionary by specifying
the respective patterns for the key and value. Takes care of
defining the :class:`Dict`, :class:`ZeroOrMore`, and
:class:`Group` tokens in the proper order. The key pattern
can include delimiting markers or punctuation, as long as they are
suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the :class:`Dict` results
can include named token fields.
Example::
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
print(OneOrMore(attr_expr).parseString(text).dump())
attr_label = label
attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
# similar to Dict, but simpler call format
result = dictOf(attr_label, attr_value).parseString(text)
print(result.dump())
print(result['shape'])
print(result.shape) # object attribute access works too
print(result.asDict())
prints::
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
SQUARE
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
"""
return Dict(OneOrMore(Group(key + value)))
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given
expression. Useful to restore the parsed fields of an HTML start
tag into the raw tag text itself, or to revert separate tokens with
intervening whitespace back to the original matching input text. By
default, returns astring containing the original parsed text.
If the optional ``asString`` argument is passed as
``False``, then the return value is
a :class:`ParseResults` containing any results names that
were originally matched, and a single token containing the original
matched text from the input string. So if the expression passed to
:class:`originalTextFor` contains expressions with defined
results names, you must set ``asString`` to ``False`` if you
want to preserve those results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b","i"):
opener,closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
"""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
matchExpr.setParseAction(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
return matchExpr
def ungroup(expr):
"""Helper to undo pyparsing's default grouping of And expressions,
even if all but one are non-empty.
"""
return TokenConverter(expr).addParseAction(lambda t:t[0])
def locatedExpr(expr):
"""Helper to decorate a returned token with its starting and ending
locations in the input string.
This helper adds the following results names:
- locn_start = location where matched expression begins
- locn_end = location where matched expression ends
- value = the actual parsed results
Be careful if the input text contains ``<TAB>`` characters, you
may want to call :class:`ParserElement.parseWithTabs`
Example::
wd = Word(alphas)
for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[[0, 'ljsdf', 5]]
[[8, 'lksdjjf', 15]]
[[18, 'lkkjj', 23]]
"""
locator = Empty().setParseAction(lambda s,l,t: l)
return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
def srange(s):
r"""Helper to easily define string ranges for use in Word
construction. Borrows syntax from regexp '[]' string range
definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string
is the expanded character set joined into a single string. The
values enclosed in the []'s may be:
- a single character
- an escaped character with a leading backslash (such as ``\-``
or ``\]``)
- an escaped hex character with a leading ``'\x'``
(``\x21``, which is a ``'!'`` character) (``\0x##``
is also supported for backwards compatibility)
- an escaped octal character with a leading ``'\0'``
(``\041``, which is a ``'!'`` character)
- a range of any of the above, separated by a dash (``'a-z'``,
etc.)
- any combination of the above (``'aeiouy'``,
``'a-zA-Z0-9_$'``, etc.)
"""
_expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
try:
return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
except Exception:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at
a specific column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return
a literal value. Especially useful when used with
:class:`transformString<ParserElement.transformString>` ().
Example::
num = Word(nums).setParseAction(lambda toks: int(toks[0]))
na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
term = na | num
OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
"""
return lambda s,l,t: [replStr]
def removeQuotes(s,l,t):
"""Helper parse action for removing quotation marks from parsed
quoted strings.
Example::
# by default, quotation marks are included in parsed results
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
# use removeQuotes to strip quotation marks from parsed results
quotedString.setParseAction(removeQuotes)
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
"""
return t[0][1:-1]
def tokenMap(func, *args):
"""Helper to define a parse action by mapping a function to all
elements of a ParseResults list. If any additional args are passed,
they are forwarded to the given function as additional arguments
after the token, as in
``hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))``,
which will convert the parsed data to an integer using base 16.
Example (compare the last to example in :class:`ParserElement.transformString`::
hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
hex_ints.runTests('''
00 11 22 aa FF 0a 0d 1a
''')
upperword = Word(alphas).setParseAction(tokenMap(str.upper))
OneOrMore(upperword).runTests('''
my kingdom for a horse
''')
wd = Word(alphas).setParseAction(tokenMap(str.title))
OneOrMore(wd).setParseAction(' '.join).runTests('''
now is the winter of our discontent made glorious summer by this sun of york
''')
prints::
00 11 22 aa FF 0a 0d 1a
[0, 17, 34, 170, 255, 10, 13, 26]
my kingdom for a horse
['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
now is the winter of our discontent made glorious summer by this sun of york
['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
"""
def pa(s,l,t):
return [func(tokn, *args) for tokn in t]
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
pa.__name__ = func_name
return pa
upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
"""(Deprecated) Helper parse action to convert tokens to upper case.
Deprecated in favor of :class:`pyparsing_common.upcaseTokens`"""
downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
"""(Deprecated) Helper parse action to convert tokens to lower case.
Deprecated in favor of :class:`pyparsing_common.downcaseTokens`"""
def _makeTags(tagStr, xml,
suppress_LT=Suppress("<"),
suppress_GT=Suppress(">")):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = (suppress_LT
+ tagStr("tag")
+ Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue )))
+ Optional("/", default=[False])("empty").setParseAction(lambda s,l,t:t[0]=='/')
+ suppress_GT)
else:
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printables, excludeChars=">")
openTag = (suppress_LT
+ tagStr("tag")
+ Dict(ZeroOrMore(Group(tagAttrName.setParseAction(downcaseTokens)
+ Optional(Suppress("=") + tagAttrValue))))
+ Optional("/",default=[False])("empty").setParseAction(lambda s,l,t:t[0]=='/')
+ suppress_GT)
closeTag = Combine(_L("</") + tagStr + ">", adjacent=False)
openTag.setName("<%s>" % resname)
# add start<tagname> results name in parse action now that ungrouped names are not reported at two levels
openTag.addParseAction(lambda t: t.__setitem__("start"+"".join(resname.replace(":"," ").title().split()), t.copy()))
closeTag = closeTag("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
openTag.tag = resname
closeTag.tag = resname
openTag.tag_body = SkipTo(closeTag())
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML,
given a tag name. Matches tags in either upper or lower case,
attributes with namespaces and with quoted or unquoted values.
Example::
text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
# makeHTMLTags returns pyparsing expressions for the opening and
# closing tags as a 2-tuple
a,a_end = makeHTMLTags("A")
link_expr = a + SkipTo(a_end)("link_text") + a_end
for link in link_expr.searchString(text):
# attributes in the <A> tag (like "href" shown here) are
# also accessible as named results
print(link.link_text, '->', link.href)
prints::
pyparsing -> https://github.com/pyparsing/pyparsing/wiki
"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML,
given a tag name. Matches tags only in the given upper/lower case.
Example: similar to :class:`makeHTMLTags`
"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""Helper to create a validating parse action to be used with start
tags created with :class:`makeXMLTags` or
:class:`makeHTMLTags`. Use ``withAttribute`` to qualify
a starting tag with a required attribute value, to avoid false
matches on common tags such as ``<TD>`` or ``<DIV>``.
Call ``withAttribute`` with a series of attribute names and
values. Specify the list of filter attributes names and values as:
- keyword arguments, as in ``(align="right")``, or
- as an explicit dict with ``**`` operator, when an attribute
name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
- a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align","right"))``
For attribute names with a namespace prefix, you must use the second
form. Attribute names are matched insensitive to upper/lower case.
If just testing for ``class`` (with or without a namespace), use
:class:`withClass`.
To verify that the attribute exists, but without specifying a value,
pass ``withAttribute.ANY_VALUE`` as the value.
Example::
html = '''
<div>
Some text
<div type="grid">1 4 0 1 0</div>
<div type="graph">1,3 2,3 1,1</div>
<div>this has no type</div>
</div>
'''
div,div_end = makeHTMLTags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().setParseAction(withAttribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
def withClass(classname, namespace=''):
"""Simplified version of :class:`withAttribute` when
matching on a div class - made difficult because ``class`` is
a reserved word in Python.
Example::
html = '''
<div>
Some text
<div class="grid">1 4 0 1 0</div>
<div class="graph">1,3 2,3 1,1</div>
<div>this <div> has no class</div>
</div>
'''
div,div_end = makeHTMLTags("div")
div_grid = div().setParseAction(withClass("grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
classattr = "%s:class" % namespace if namespace else "class"
return withAttribute(**{classattr : classname})
opAssoc = SimpleNamespace()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary
or binary, left- or right-associative. Parse actions can also be
attached to operator expressions. The generated parser will also
recognize the use of parentheses to override operator precedences
(see example below).
Note: if you define a deep operator list, you may see performance
issues when using infixNotation. See
:class:`ParserElement.enablePackrat` for a mechanism to potentially
improve your parser performance.
Parameters:
- baseExpr - expression representing the most basic element for the
nested
- opList - list of tuples, one for each operator precedence level
in the expression grammar; each tuple is of the form ``(opExpr,
numTerms, rightLeftAssoc, parseAction)``, where:
- opExpr is the pyparsing expression for the operator; may also
be a string, which will be converted to a Literal; if numTerms
is 3, opExpr is a tuple of two expressions, for the two
operators separating the 3 terms
- numTerms is the number of terms for this operator (must be 1,
2, or 3)
- rightLeftAssoc is the indicator whether the operator is right
or left associative, using the pyparsing-defined constants
``opAssoc.RIGHT`` and ``opAssoc.LEFT``.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the parse action
tuple member may be omitted); if the parse action is passed
a tuple or list of functions, this is equivalent to calling
``setParseAction(*fn)``
(:class:`ParserElement.setParseAction`)
- lpar - expression for matching left-parentheses
(default= ``Suppress('(')``)
- rpar - expression for matching right-parentheses
(default= ``Suppress(')')``)
Example::
# simple example of four-function arithmetic with ints and
# variable names
integer = pyparsing_common.signed_integer
varname = pyparsing_common.identifier
arith_expr = infixNotation(integer | varname,
[
('-', 1, opAssoc.RIGHT),
(oneOf('* /'), 2, opAssoc.LEFT),
(oneOf('+ -'), 2, opAssoc.LEFT),
])
arith_expr.runTests('''
5+3*6
(5+3)*6
-2--11
''', fullDump=False)
prints::
5+3*6
[[5, '+', [3, '*', 6]]]
(5+3)*6
[[[5, '+', 3], '*', 6]]
-2--11
[[['-', 2], '-', ['-', 11]]]
"""
# captive version of FollowedBy that does not do parse actions or capture results names
class _FB(FollowedBy):
def parseImpl(self, instring, loc, doActions=True):
self.expr.tryParse(instring, loc)
return loc, []
ret = Forward()
lastExpr = baseExpr | ( lpar + ret + rpar )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError(
"if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward().setName(termName)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = _FB(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = _FB(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = _FB(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = _FB(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = _FB(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = _FB(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
if isinstance(pa, (tuple, list)):
matchExpr.setParseAction(*pa)
else:
matchExpr.setParseAction(pa)
thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
lastExpr = thisExpr
ret <<= lastExpr
return ret
operatorPrecedence = infixNotation
"""(Deprecated) Former name of :class:`infixNotation`, will be
dropped in a future release."""
dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")
sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")
quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|
Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""Helper method for defining nested lists enclosed in opening and
closing delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list
(default= ``"("``); can also be a pyparsing expression
- closer - closing character for a nested list
(default= ``")"``); can also be a pyparsing expression
- content - expression for items within the nested lists
(default= ``None``)
- ignoreExpr - expression for ignoring opening and closing
delimiters (default= :class:`quotedString`)
If an expression is not provided for the content argument, the
nested expression will capture all whitespace-delimited content
between delimiters as a list of separate values.
Use the ``ignoreExpr`` argument to define expressions that may
contain opening or closing characters that should not be treated as
opening or closing characters for nesting, such as quotedString or
a comment expression. Specify multiple expressions using an
:class:`Or` or :class:`MatchFirst`. The default is
:class:`quotedString`, but if no expressions are to be ignored, then
pass ``None`` for this argument.
Example::
data_type = oneOf("void int short long char float double")
decl_data_type = Combine(data_type + Optional(Word('*')))
ident = Word(alphas+'_', alphanums+'_')
number = pyparsing_common.number
arg = Group(decl_data_type + ident)
LPAR,RPAR = map(Suppress, "()")
code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
c_function = (decl_data_type("type")
+ ident("name")
+ LPAR + Optional(delimitedList(arg), [])("args") + RPAR
+ code_body("body"))
c_function.ignore(cStyleComment)
source_code = '''
int is_odd(int x) {
return (x%2);
}
int dec_to_hex(char hchar) {
if (hchar >= '0' && hchar <= '9') {
return (ord(hchar)-ord('0'));
} else {
return (10+ord(hchar)-ord('A'));
}
}
'''
for func in c_function.searchString(source_code):
print("%(name)s (%(type)s) args: %(args)s" % func)
prints::
is_odd (int) args: [['int', 'x']]
dec_to_hex (int) args: [['char', 'hchar']]
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
ret.setName('nested %s%s expression' % (opener,closer))
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""Helper method for defining space-delimited indentation blocks,
such as those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single
grammar should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond
the the current level; set to False for block of left-most
statements (default= ``True``)
A valid block must contain at least one ``blockStatement``.
Example::
data = '''
def A(z):
A1
B = 100
G = A2
A2
A3
B
def BB(a,b,c):
BB1
def BBA():
bba1
bba2
bba3
C
D
def spam(x,y):
def eggs(z):
pass
'''
indentStack = [1]
stmt = Forward()
identifier = Word(alphas, alphanums)
funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
func_body = indentedBlock(stmt, indentStack)
funcDef = Group( funcDecl + func_body )
rvalue = Forward()
funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
rvalue << (funcCall | identifier | Word(nums))
assignment = Group(identifier + "=" + rvalue)
stmt << ( funcDef | assignment | identifier )
module_body = OneOrMore(stmt)
parseTree = module_body.parseString(data)
parseTree.pprint()
prints::
[['def',
'A',
['(', 'z', ')'],
':',
[['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
'B',
['def',
'BB',
['(', 'a', 'b', 'c', ')'],
':',
[['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
'C',
'D',
['def',
'spam',
['(', 'x', 'y', ')'],
':',
[[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
"""
backup_stack = indentStack[:]
def reset_stack():
indentStack[:] = backup_stack
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
PEER = Empty().setParseAction(checkPeerIndent).setName('')
UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
smExpr.setFailAction(lambda a, b, c, d: reset_stack())
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr.setName('indented block')
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))
_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))
commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
def replaceHTMLEntity(t):
"""Helper parser action to replace common HTML entities with their special characters"""
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
"Comment of the form ``/* ... */``"
htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
"Comment of the form ``<!-- ... -->``"
restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
"Comment of the form ``// ... (to end of line)``"
cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")
"Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`"
javaStyleComment = cppStyleComment
"Same as :class:`cppStyleComment`"
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
"Comment of the form ``# ... (to end of line)``"
_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
"""(Deprecated) Predefined expression of 1 or more printable words or
quoted strings, separated by commas.
This expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`.
"""
# some other useful expressions - using lower-case class name since we are really using this as a namespace
class pyparsing_common:
"""Here are some common low-level expressions that may be useful in
jump-starting parser development:
- numeric forms (:class:`integers<integer>`, :class:`reals<real>`,
:class:`scientific notation<sci_real>`)
- common :class:`programming identifiers<identifier>`
- network addresses (:class:`MAC<mac_address>`,
:class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`)
- ISO8601 :class:`dates<iso8601_date>` and
:class:`datetime<iso8601_datetime>`
- :class:`UUID<uuid>`
- :class:`comma-separated list<comma_separated_list>`
Parse actions:
- :class:`convertToInteger`
- :class:`convertToFloat`
- :class:`convertToDate`
- :class:`convertToDatetime`
- :class:`stripHTMLTags`
- :class:`upcaseTokens`
- :class:`downcaseTokens`
Example::
pyparsing_common.number.runTests('''
# any int or real number, returned as the appropriate type
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.fnumber.runTests('''
# any int or real number, returned as float
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.hex_integer.runTests('''
# hex numbers
100
FF
''')
pyparsing_common.fraction.runTests('''
# fractions
1/2
-3/4
''')
pyparsing_common.mixed_integer.runTests('''
# mixed fractions
1
1/2
-3/4
1-3/4
''')
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests('''
# uuid
12345678-1234-5678-1234-567812345678
''')
prints::
# any int or real number, returned as the appropriate type
100
[100]
-100
[-100]
+100
[100]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# any int or real number, returned as float
100
[100.0]
-100
[-100.0]
+100
[100.0]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# hex numbers
100
[256]
FF
[255]
# fractions
1/2
[0.5]
-3/4
[-0.75]
# mixed fractions
1
[1]
1/2
[0.5]
-3/4
[-0.75]
1-3/4
[1.75]
# uuid
12345678-1234-5678-1234-567812345678
[UUID('12345678-1234-5678-1234-567812345678')]
"""
convertToInteger = tokenMap(int)
"""
Parse action for converting parsed integers to Python int
"""
convertToFloat = tokenMap(float)
"""
Parse action for converting parsed numbers to Python float
"""
integer = Word(nums).setName("integer").setParseAction(convertToInteger)
"""expression that parses an unsigned integer, returns an int"""
hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16))
"""expression that parses a hexadecimal integer, returns an int"""
signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
"""expression that parses an integer with optional leading sign, returns an int"""
fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
"""fractional expression of an integer divided by an integer, returns a float"""
fraction.addParseAction(lambda t: t[0]/t[-1])
mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
"""mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
mixed_integer.addParseAction(sum)
real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)
"""expression that parses a floating point number and returns a float"""
sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
"""expression that parses a floating point number with optional
scientific notation and returns a float"""
# streamlining this expression makes the docs nicer-looking
number = (sci_real | real | signed_integer).streamline()
"""any numeric expression, returns the corresponding Python type"""
fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
"""any int or real number, returned as float"""
identifier = Word(alphas+'_', alphanums+'_').setName("identifier")
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
"IPv4 address (``0.0.0.0 - 255.255.255.255``)"
_ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
_full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address")
_short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address")
_short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
_mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
"IPv6 address (long, short, or mixed form)"
mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
@staticmethod
def convertToDate(fmt="%Y-%m-%d"):
"""
Helper to create a parse action for converting parsed date string to Python datetime.date
Params -
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``)
Example::
date_expr = pyparsing_common.iso8601_date.copy()
date_expr.setParseAction(pyparsing_common.convertToDate())
print(date_expr.parseString("1999-12-31"))
prints::
[datetime.date(1999, 12, 31)]
"""
def cvt_fn(s,l,t):
try:
return datetime.strptime(t[0], fmt).date()
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
@staticmethod
def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
"""Helper to create a parse action for converting parsed
datetime string to Python datetime.datetime
Params -
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``)
Example::
dt_expr = pyparsing_common.iso8601_datetime.copy()
dt_expr.setParseAction(pyparsing_common.convertToDatetime())
print(dt_expr.parseString("1999-12-31T23:59:59.999"))
prints::
[datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
"""
def cvt_fn(s,l,t):
try:
return datetime.strptime(t[0], fmt)
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
"ISO8601 date (``yyyy-mm-dd``)"
iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
"ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``"
uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
"UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)"
_html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
@staticmethod
def stripHTMLTags(s, l, tokens):
"""Parse action to remove HTML tags from web page HTML source
Example::
# strip HTML links from normal text
text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
td,td_end = makeHTMLTags("TD")
table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
print(table_text.parseString(text).body)
Prints::
More info at the pyparsing wiki page
"""
return pyparsing_common._html_stripper.transformString(tokens[0])
_commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',')
+ Optional( White(" \t") ) ) ).streamline().setName("commaItem")
comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list")
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
"""Parse action to convert tokens to upper case."""
downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
"""Parse action to convert tokens to lower case."""
class _lazyclassproperty(object):
def __init__(self, fn):
self.fn = fn
self.__doc__ = fn.__doc__
self.__name__ = fn.__name__
def __get__(self, obj, cls):
if cls is None:
cls = type(obj)
if not hasattr(cls, '_intern') or any(cls._intern is getattr(superclass, '_intern', []) for superclass in cls.__mro__[1:]):
cls._intern = {}
attrname = self.fn.__name__
if attrname not in cls._intern:
cls._intern[attrname] = self.fn(cls)
return cls._intern[attrname]
class unicode_set(object):
"""
A set of Unicode characters, for language-specific strings for
``alphas``, ``nums``, ``alphanums``, and ``printables``.
A unicode_set is defined by a list of ranges in the Unicode character
set, in a class attribute ``_ranges``, such as::
_ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),]
A unicode set can also be defined using multiple inheritance of other unicode sets::
class CJK(Chinese, Japanese, Korean):
pass
"""
_ranges = []
@classmethod
def _get_chars_for_ranges(cls):
ret = []
for cc in cls.__mro__:
if cc is unicode_set:
break
for rr in cc._ranges:
ret.extend(range(rr[0], rr[-1]+1))
return [unichr(c) for c in sorted(set(ret))]
@_lazyclassproperty
def printables(cls):
"all non-whitespace characters in this range"
return u''.join(filterfalse(unicode.isspace, cls._get_chars_for_ranges()))
@_lazyclassproperty
def alphas(cls):
"all alphabetic characters in this range"
return u''.join(filter(unicode.isalpha, cls._get_chars_for_ranges()))
@_lazyclassproperty
def nums(cls):
"all numeric digit characters in this range"
return u''.join(filter(unicode.isdigit, cls._get_chars_for_ranges()))
@_lazyclassproperty
def alphanums(cls):
"all alphanumeric characters in this range"
return cls.alphas + cls.nums
class pyparsing_unicode(unicode_set):
"""
A namespace class for defining common language unicode_sets.
"""
_ranges = [(32, sys.maxunicode)]
class Latin1(unicode_set):
"Unicode set for Latin-1 Unicode Character Range"
_ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),]
class LatinA(unicode_set):
"Unicode set for Latin-A Unicode Character Range"
_ranges = [(0x0100, 0x017f),]
class LatinB(unicode_set):
"Unicode set for Latin-B Unicode Character Range"
_ranges = [(0x0180, 0x024f),]
class Greek(unicode_set):
"Unicode set for Greek Unicode Character Ranges"
_ranges = [
(0x0370, 0x03ff), (0x1f00, 0x1f15), (0x1f18, 0x1f1d), (0x1f20, 0x1f45), (0x1f48, 0x1f4d),
(0x1f50, 0x1f57), (0x1f59,), (0x1f5b,), (0x1f5d,), (0x1f5f, 0x1f7d), (0x1f80, 0x1fb4), (0x1fb6, 0x1fc4),
(0x1fc6, 0x1fd3), (0x1fd6, 0x1fdb), (0x1fdd, 0x1fef), (0x1ff2, 0x1ff4), (0x1ff6, 0x1ffe),
]
class Cyrillic(unicode_set):
"Unicode set for Cyrillic Unicode Character Range"
_ranges = [(0x0400, 0x04ff)]
class Chinese(unicode_set):
"Unicode set for Chinese Unicode Character Range"
_ranges = [(0x4e00, 0x9fff), (0x3000, 0x303f), ]
class Japanese(unicode_set):
"Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"
_ranges = [ ]
class Kanji(unicode_set):
"Unicode set for Kanji Unicode Character Range"
_ranges = [(0x4E00, 0x9Fbf), (0x3000, 0x303f), ]
class Hiragana(unicode_set):
"Unicode set for Hiragana Unicode Character Range"
_ranges = [(0x3040, 0x309f), ]
class Katakana(unicode_set):
"Unicode set for Katakana Unicode Character Range"
_ranges = [(0x30a0, 0x30ff), ]
class Korean(unicode_set):
"Unicode set for Korean Unicode Character Range"
_ranges = [(0xac00, 0xd7af), (0x1100, 0x11ff), (0x3130, 0x318f), (0xa960, 0xa97f), (0xd7b0, 0xd7ff), (0x3000, 0x303f), ]
class CJK(Chinese, Japanese, Korean):
"Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"
pass
class Thai(unicode_set):
"Unicode set for Thai Unicode Character Range"
_ranges = [(0x0e01, 0x0e3a), (0x0e3f, 0x0e5b), ]
class Arabic(unicode_set):
"Unicode set for Arabic Unicode Character Range"
_ranges = [(0x0600, 0x061b), (0x061e, 0x06ff), (0x0700, 0x077f), ]
class Hebrew(unicode_set):
"Unicode set for Hebrew Unicode Character Range"
_ranges = [(0x0590, 0x05ff), ]
class Devanagari(unicode_set):
"Unicode set for Devanagari Unicode Character Range"
_ranges = [(0x0900, 0x097f), (0xa8e0, 0xa8ff)]
pyparsing_unicode.Japanese._ranges = (pyparsing_unicode.Japanese.Kanji._ranges
+ pyparsing_unicode.Japanese.Hiragana._ranges
+ pyparsing_unicode.Japanese.Katakana._ranges)
# define ranges in language character sets
if PY_3:
setattr(pyparsing_unicode, "العربية", pyparsing_unicode.Arabic)
setattr(pyparsing_unicode, "中文", pyparsing_unicode.Chinese)
setattr(pyparsing_unicode, "кириллица", pyparsing_unicode.Cyrillic)
setattr(pyparsing_unicode, "Ελληνικά", pyparsing_unicode.Greek)
setattr(pyparsing_unicode, "עִברִית", pyparsing_unicode.Hebrew)
setattr(pyparsing_unicode, "日本語", pyparsing_unicode.Japanese)
setattr(pyparsing_unicode.Japanese, "漢字", pyparsing_unicode.Japanese.Kanji)
setattr(pyparsing_unicode.Japanese, "カタカナ", pyparsing_unicode.Japanese.Katakana)
setattr(pyparsing_unicode.Japanese, "ひらがな", pyparsing_unicode.Japanese.Hiragana)
setattr(pyparsing_unicode, "한국어", pyparsing_unicode.Korean)
setattr(pyparsing_unicode, "ไทย", pyparsing_unicode.Thai)
setattr(pyparsing_unicode, "देवनागरी", pyparsing_unicode.Devanagari)
if __name__ == "__main__":
selectToken = CaselessLiteral("select")
fromToken = CaselessLiteral("from")
ident = Word(alphas, alphanums + "_$")
columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
columnNameList = Group(delimitedList(columnName)).setName("columns")
columnSpec = ('*' | columnNameList)
tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
tableNameList = Group(delimitedList(tableName)).setName("tables")
simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
# demo runTests method, including embedded comments in test string
simpleSQL.runTests("""
# '*' as column list and dotted table name
select * from SYS.XYZZY
# caseless match on "SELECT", and casts back to "select"
SELECT * from XYZZY, ABC
# list of column names, and mixed case SELECT keyword
Select AA,BB,CC from Sys.dual
# multiple tables
Select A, B, C from Sys.dual, Table2
# invalid SELECT keyword - should fail
Xelect A, B, C from Sys.dual
# incomplete command - should fail
Select
# invalid column name - should fail
Select ^^^ frox Sys.dual
""")
pyparsing_common.number.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
# any int or real number, returned as float
pyparsing_common.fnumber.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
pyparsing_common.hex_integer.runTests("""
100
FF
""")
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests("""
12345678-1234-5678-1234-567812345678
""")
|
xyuanmu/XX-Net
|
python3.8.2/Lib/site-packages/pip/_vendor/pyparsing.py
|
Python
|
bsd-2-clause
| 245,385
|
[
"VisIt"
] |
b311947bf61c5810796681de7349b5889b60723e222affd219f740ff35426082
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import tempfile
from functools import reduce
import numpy
from pyscf import gto
from pyscf import lib
from pyscf.gto.basis import parse_molpro
from pyscf.gto.basis import parse_gaussian
from pyscf.lib.exceptions import BasisNotFoundError
class KnownValues(unittest.TestCase):
def test_parse_pople(self):
self.assertEqual(gto.basis._parse_pople_basis('631g(d)', 'C'),
('pople-basis/6-31G.dat', 'pople-basis/6-31G-polarization-d.dat'))
self.assertEqual(gto.basis._parse_pople_basis('631g**', 'C'),
('pople-basis/6-31Gss.dat',))
self.assertEqual(gto.basis._parse_pople_basis('631++g**', 'C'),
('pople-basis/6-31++Gss.dat',))
self.assertEqual(gto.basis._parse_pople_basis('6311+g(d,p)', 'C'),
('pople-basis/6-311+G.dat', 'pople-basis/6-311G-polarization-d.dat'))
self.assertRaises(KeyError, gto.basis._parse_pople_basis, '631g++', 'C')
def test_basis_load(self):
self.assertEqual(gto.basis.load(__file__, 'H'), [])
self.assertRaises(BasisNotFoundError, gto.basis.load, 'abas', 'H')
#self.assertRaises(BasisNotFoundError, gto.basis.load(__file__, 'C'), [])
self.assertEqual(len(gto.basis.load('631++g**', 'C')), 8)
self.assertEqual(len(gto.basis.load('ccpcvdz', 'C')), 7)
basdat = gto.basis.load('minao', 'C') + gto.basis.load('sto3g', 'C')
basdat1 = gto.basis.parse_nwchem.parse(
gto.basis.parse_nwchem.convert_basis_to_nwchem('C', basdat), 'C')
bas = []
for b in sorted(basdat, reverse=True):
b1 = b[:1]
for x in b[1:]:
b1.append(list(x))
bas.append(b1)
bas = [b for b in bas if b[0]==0] + [b for b in bas if b[0]==1]
self.assertEqual(bas, basdat1)
self.assertEqual(len(gto.basis.load('def2-svp', 'Rn')), 16)
def test_basis_load_from_file(self):
ftmp = tempfile.NamedTemporaryFile()
ftmp.write('''
Li S
16.1195750 0.15432897
2.9362007 0.53532814
0.7946505 0.44463454
Li S
0.6362897 -0.09996723
0.1478601 0.39951283
0.0480887 0.70011547
'''.encode())
ftmp.flush()
b = gto.basis.load(ftmp.name, 'Li')
self.assertEqual(len(b), 2)
self.assertEqual(len(b[0][1:]), 3)
self.assertEqual(len(b[1][1:]), 3)
def test_basis_load_ecp(self):
self.assertEqual(gto.basis.load_ecp(__file__, 'H'), [])
def test_parse_basis(self):
basis_str = '''
#BASIS SET: (6s,3p) -> [2s,1p]
C S
71.6168370 0.15432897
13.0450960 0.53532814
#
3.5305122 0.44463454
C SP
2.9412494 -0.09996723 0.15591627
0.6834831 0.39951283 0.60768372
0.2222899 0.70011547 0.39195739 '''
self.assertRaises(BasisNotFoundError, gto.basis.parse_nwchem.parse, basis_str, 'O')
basis_dat = gto.basis.parse_nwchem.parse(basis_str)
self.assertEqual(len(basis_dat), 3)
def test_parse_ecp(self):
ecp_str = '''
#
Na nelec 10
Na ul
1 175.5502590 -10.0000000
2 35.0516791 -47.4902024
#
2 7.9060270 -17.2283007
Na S
0 243.3605846 3.0000000*np.exp(0)
1 41.5764759 36.2847626*np.exp(0)
2 13.2649167 72.9304880*np.exp(0)
Na P
0 1257.2650682 5.0000000
1 189.6248810 117.4495683
2 54.5247759 423.3986704
'''
ecpdat = gto.basis.parse_nwchem.parse_ecp(ecp_str, 'Na')
self.assertEqual(ecpdat[0], 10)
self.assertEqual(len(ecpdat[1]), 3)
ecpdat1 = gto.basis.parse_nwchem.parse_ecp(ecp_str)
self.assertEqual(ecpdat, ecpdat1)
ecpdat1 = gto.basis.parse_nwchem.parse_ecp(
gto.basis.parse_nwchem.convert_ecp_to_nwchem('Na', ecpdat), 'Na')
self.assertEqual(ecpdat, ecpdat1)
def test_optimize_contraction(self):
bas = gto.parse(r'''
#BASIS SET: (6s,3p) -> [2s,1p]
C S
2.9412494 -0.09996723
0.6834831 0.39951283
0.2222899 0.70011547
C S
2.9412494 0.15591627
0.6834831 0.60768372
0.2222899 0.39195739
''', optimize=True)
self.assertEqual(len(bas), 1)
bas = [[1, 0,
[2.9412494, -0.09996723],
[0.6834831, 0.39951283],
[0.2222899, 0.70011547]],
[1, 1,
[2.9412494, -0.09996723],
[0.6834831, 0.39951283],
[0.2222899, 0.70011547]],
[1, 1,
[2.9412494, 0.15591627],
[0.6834831, 0.60768372],
[0.2222899, 0.39195739]]]
bas = gto.basis.parse_nwchem.optimize_contraction(bas)
self.assertEqual(len(bas), 2)
def test_remove_zero(self):
bas = gto.parse(r'''
C S
7.2610457926 0.0000000000 0.0000000000
2.1056583087 0.0000000000 0.0000000000
0.6439906571 1.0000000000 0.0000000000
0.0797152017 0.0000000000 1.0000000000
0.0294029590 0.0000000000 0.0000000000
''')
self.assertEqual(len(bas[0]), 3)
bas = [[0, 0,
[7.2610457926, 0.0000000000, 0.0000000000],
[2.1056583087, 0.0000000000, 0.0000000000],
[0.6439906571, 1.0000000000, 0.0000000000],
[0.0797152017, 0.0000000000, 1.0000000000],
[0.0294029590, 0.0000000000, 0.0000000000]]]
bas = gto.basis.parse_nwchem.remove_zero(bas)
self.assertEqual(len(bas[0]), 4)
def test_parse_molpro_basis(self):
basis_str = '''
C s aug-cc-pVTZ AVTZ : 11 5 1.10 1.10 8.8 10.10 11.11
aug-cc-pVTZ
8236 1235 280.8 79.27 25.59 8.997 3.319 0.9059 0.3643 0.1285 0.04402
0.000531 0.004108 0.021087 0.081853 0.234817 0.434401 0.346129 0.039378
-0.008983 0.002385 -0.000113 -0.000878 -0.00454 -0.018133 -0.05576
-0.126895 -0.170352 0.140382 0.598684 0.395389 1 1 1
C p aug-cc-pVTZ AVTZ : 6 4 1.5 4.4 5.5 6.6
aug-cc-pVTZ
18.71 4.133 1.2 0.3827 0.1209 0.03569 0.014031 0.086866 0.290216
0.501008 0.343406 1 1 1
C d aug-cc-pVTZ AVTZ : 3 0
aug-cc-pVTZ
1.097 0.318 0.1
C f aug-cc-pVTZ AVTZ : 2 0
aug-cc-pVTZ
0.761 0.268
'''
basis1 = parse_molpro.parse(basis_str)
ref = gto.basis.parse('''
#BASIS SET: (11s,6p,3d,2f) -> [5s,4p,3d,2f]
C S
8236.0000000 0.0005310 -0.0001130 0.0000000 0.0000000 0
1235.0000000 0.0041080 -0.0008780 0.0000000 0.0000000 0
280.8000000 0.0210870 -0.0045400 0.0000000 0.0000000 0
79.2700000 0.0818530 -0.0181330 0.0000000 0.0000000 0
25.5900000 0.2348170 -0.0557600 0.0000000 0.0000000 0
8.9970000 0.4344010 -0.1268950 0.0000000 0.0000000 0
3.3190000 0.3461290 -0.1703520 0.0000000 0.0000000 0
0.9059000 0.0393780 0.1403820 1.0000000 0.0000000 0
0.3643000 -0.0089830 0.5986840 0.0000000 0.0000000 0
0.1285000 0.0023850 0.3953890 0.0000000 1.0000000 0
0.0440200 0.0000000 0.0000000 0.0000000 0.0000000 1.0000000
C P
18.7100000 0.0140310 0.0000000 0.0000000 0
4.1330000 0.0868660 0.0000000 0.0000000 0
1.2000000 0.2902160 0.0000000 0.0000000 0
0.3827000 0.5010080 1.0000000 0.0000000 0
0.1209000 0.3434060 0.0000000 1.0000000 0
0.0356900 0.0000000 0.0000000 0.0000000 1.0000000
C D
1.0970000 1.0000000
C D
0.3180000 1.0000000
C D
0.1000000 1.0000000
C F
0.7610000 1.0000000
C F
0.2680000 1.0000000
END''')
self.assertEqual(ref, basis1)
basis_str = '''
c s 631g sv : 10 3 1.6 7.9 10.10
3047.52500d+00 457.369500d+00 103.948700d+00 29.2101600d+00 9.28666300d+00
3.16392700d+00 7.86827200d+00 1.88128900d+00 0.54424930d+00 0.16871440d+00
1.83473700d-03 1.40373200d-02 0.06884262d+00 0.23218444d+00 0.46794130d+00
0.36231200d+00 -0.11933240d+00 -0.16085420d+00 1.14345600d+00 1.00000000d+00
c p 631g sv : 4 2 1.3 4.4
7.86827200d+00 1.88128900d+00 0.54424930d+00 0.16871440d+00 0.06899907d+00
0.31642340d+00 0.74430830d+00 1.00000000d+00
'''
basis1 = parse_molpro.parse(basis_str)
ref = gto.basis.parse('''
#BASIS SET: (10s,4p) -> [3s,2p]
C S
3047.5250000 0.001834737 0 0
457.3695000 0.01403732 0 0
103.9487000 0.06884262 0 0
29.2101600 0.23218444 0 0
9.2866630 0.4679413 0 0
3.1639270 0.3623120 0 0
7.8682720 0 -0.1193324 0
1.8812890 0 -0.1608542 0
0.5442493 0 1.1434560 0
0.1687144 0 0.0000000 1
C P
7.8682720 0.06899907 0
1.8812890 0.3164234 0
0.5442493 0.7443083 0
0.1687144 0 1
END ''')
self.assertEqual(ref, basis1)
def test_parse_gaussian_basis(self):
basis_str = '''
****
C 0
S 8 1.00
8236.0000000 0.0005310
1235.0000000 0.0041080
280.8000000 0.0210870
79.2700000 0.0818530
25.5900000 0.2348170
8.9970000 0.4344010
3.3190000 0.3461290
0.3643000 -0.0089830
S 8 1.00
8236.0000000 -0.0001130
1235.0000000 -0.0008780
280.8000000 -0.0045400
79.2700000 -0.0181330
25.5900000 -0.0557600
8.9970000 -0.1268950
3.3190000 -0.1703520
0.3643000 0.5986840
S 1 1.00
0.9059000 1.0000000
S 1 1.00
0.1285000 1.0000000
S 1 1.00
0.0440200 1.0000000
P 3 1.00
18.7100000 0.0140310
4.1330000 0.0868660
1.2000000 0.2902160
P 1 1.00
0.3827000 1.0000000
P 1 1.00
0.1209000 1.0000000
P 1 1.00
0.0356900 1.0000000
D 1 1.00
1.0970000 1.0000000
D 1 1.00
0.3180000 1.0000000
D 1 1.00
0.1000000 1.0000000
F 1 1.00
0.7610000 1.0000000
F 1 1.00
0.2680000 1.0000000
****
'''
basis1 = parse_gaussian.parse(basis_str)
ref = gto.basis.load('augccpvtz', 'C')
self.assertEqual(ref, basis1)
basis_str = '''
****
C 0
S 6 1.00
4563.2400000 0.00196665
682.0240000 0.0152306
154.9730000 0.0761269
44.4553000 0.2608010
13.0290000 0.6164620
1.8277300 0.2210060
SP 3 1.00
20.9642000 0.1146600 0.0402487
4.8033100 0.9199990 0.2375940
1.4593300 -0.00303068 0.8158540
SP 1 1.00
0.4834560 1.0000000 1.0000000
SP 1 1.00
0.1455850 1.0000000 1.0000000
SP 1 1.00
0.0438000 1.0000000 1.0000000
D 1 1.00
0.6260000 1.0000000
****
'''
basis1 = parse_gaussian.parse(basis_str)
ref = gto.basis.load('6311++g*', 'C')
self.assertEqual(ref, basis1)
basis_str = '''
****
C 0
S 6 1.00
3047.5249000 0.0018347
457.3695100 0.0140373
103.9486900 0.0688426
29.2101550 0.2321844
9.2866630 0.4679413
3.1639270 0.3623120
SP 3 1.00
7.8682724 -0.1193324 0.0689991
1.8812885 -0.1608542 0.3164240
0.5442493 1.1434564 0.7443083
SP 1 1.00
0.1687144 1.0000000 1.0000000
D 1 1.00
2.5040000 1.0000000
D 1 1.00
0.6260000 1.0000000
D 1 1.00
0.1565000 1.0000000
F 1 1.00
0.8000000 1.0000000
****
'''
basis1 = parse_gaussian.parse(basis_str)
ref = gto.basis.load('631g(3df,3pd)', 'C')
self.assertEqual(ref, basis1)
def test_parse_gaussian_load_basis(self):
with tempfile.NamedTemporaryFile(mode='w+') as f:
f.write('''
****
H 0
S 1 1.0
1.0 1.0
****
''')
f.flush()
self.assertEqual(parse_gaussian.load(f.name, 'H'), [[0, [1., 1.]]])
with tempfile.NamedTemporaryFile(mode='w+') as f:
f.write('''
H 0
S 1 1.0
1.0 1.0
****
''')
f.flush()
self.assertEqual(parse_gaussian.load(f.name, 'H'), [[0, [1., 1.]]])
with tempfile.NamedTemporaryFile(mode='w+') as f:
f.write('''
****
H 0
S 1 1.0
1.0 1.0
''')
f.flush()
self.assertEqual(parse_gaussian.load(f.name, 'H'), [[0, [1., 1.]]])
with tempfile.NamedTemporaryFile(mode='w+') as f:
f.write('''
H 0
S 1 1.0
1.0 1.0
''')
f.flush()
self.assertEqual(parse_gaussian.load(f.name, 'H'), [[0, [1., 1.]]])
def test_basis_truncation(self):
b = gto.basis.load('ano@3s1p1f', 'C')
self.assertEqual(len(b), 3)
self.assertEqual(len(b[0][1]), 4)
self.assertEqual(len(b[1][1]), 2)
self.assertEqual(b[2][0], 3)
self.assertEqual(len(b[2][1]), 2)
b = gto.basis.load('631g(3df,3pd)@3s2p1f', 'C')
self.assertEqual(len(b), 6)
self.assertEqual(len(b[0][1]), 2)
self.assertEqual(len(b[1][1]), 2)
self.assertEqual(len(b[2][1]), 2)
self.assertEqual(len(b[3][1]), 2)
self.assertEqual(len(b[4][1]), 2)
self.assertEqual(b[5][0], 3)
self.assertEqual(len(b[5][1]), 2)
b = gto.basis.load('aug-ccpvtz@4s3p', 'C')
self.assertEqual(len(b), 6)
self.assertEqual(b[3][0], 1)
self.assertRaises(AssertionError, gto.basis.load, 'aug-ccpvtz@4s3f', 'C')
def test_to_general_contraction(self):
b = gto.basis.to_general_contraction(gto.load('cc-pvtz', 'H'))
self.assertEqual(len(b), 3)
self.assertEqual(len(b[0]), 6)
self.assertEqual(len(b[1]), 3)
self.assertEqual(len(b[2]), 2)
def test_parse_molpro_ecp_soc(self):
ecp_data = parse_molpro.parse_ecp('''
! Q=7., MEFIT, MCDHF+Breit, Ref 32; CPP: alpha=1.028;delta=1.247;ncut=2.
ECP,I,46,4,3;
1; 2,1.000000,0.000000;
2; 2,3.380230,83.107547; 2,1.973454,5.099343;
4; 2,2.925323,27.299020; 2,3.073557,55.607847; 2,1.903188,0.778322; 2,1.119689,1.751128;
4; 2,1.999036,8.234552; 2,1.967767,12.488097; 2,0.998982,2.177334; 2,0.972272,3.167401;
4; 2,2.928812,-11.777154; 2,2.904069,-15.525522; 2,0.287352,-0.148550; 2,0.489380,-0.273682;
4; 2,2.925323,-54.598040; 2,3.073557,55.607847; 2,1.903188,-1.556643; 2,1.119689,1.751128;
4; 2,1.999036,-8.234552; 2,1.967767,8.325398; 2,0.998982,-2.177334; 2,0.972272,2.111601;
4; 2,2.928812,7.851436; 2,2.904069,-7.762761; 2,0.287352,0.099033; 2,0.489380,-0.136841;
''')
ref = [46,
[[-1, [[], [], [[1.0, 0.0]], [], [], [], []]],
[0, [[], [], [[3.38023, 83.107547], [1.973454, 5.099343]], [], [], [], []]],
[1, [[], [], [[2.925323, 27.29902, -54.59804], [3.073557, 55.607847, 55.607847], [1.903188, 0.778322, -1.556643], [1.119689, 1.751128, 1.751128]], [], [], [], []]],
[2, [[], [], [[1.999036, 8.234552, -8.234552], [1.967767, 12.488097, 8.325398], [0.998982, 2.177334, -2.177334], [0.972272, 3.167401, 2.111601]], [], [], [], []]],
[3, [[], [], [[2.928812, -11.777154, 7.851436], [2.904069, -15.525522, -7.762761], [0.287352, -0.14855, 0.099033], [0.48938, -0.273682, -0.136841]], [], [], [], []]]]]
self.assertEqual(ecp_data, ref)
if __name__ == "__main__":
print("test basis module")
unittest.main()
|
sunqm/pyscf
|
pyscf/gto/test/test_basis_parser.py
|
Python
|
apache-2.0
| 18,908
|
[
"PySCF"
] |
9188fcd158ba060a957ef02dc418946c98815ee183c4445582e340740f62ad2f
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import vtk
def get_program_parameters():
import argparse
description = 'How to align two vtkPolyData\'s.'
epilogue = '''
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('src_fn', help='The polydata source file name,e.g. Grey_Nurse_Shark.stl.')
parser.add_argument('tgt_fn', help='The polydata target file name, e.g. shark.ply.')
args = parser.parse_args()
return args.src_fn, args.tgt_fn
def main():
colors = vtk.vtkNamedColors()
src_fn, tgt_fn = get_program_parameters()
print('Loading source:', src_fn)
sourcePolyData = ReadPolyData(src_fn)
# Save the source polydata in case the align does not improve
# segmentation
originalSourcePolyData = vtk.vtkPolyData()
originalSourcePolyData.DeepCopy(sourcePolyData)
print('Loading target:', tgt_fn)
targetPolyData = ReadPolyData(tgt_fn)
# If the target orientation is markedly different,
# you may need to apply a transform to orient the
# target with the source.
# For example, when using Grey_Nurse_Shark.stl as the source and
# greatWhite.stl as the target, you need to uncomment the following
# two rotations.
trnf = vtk.vtkTransform()
# trnf.RotateX(90)
# trnf.RotateY(-90)
tpd = vtk.vtkTransformPolyDataFilter()
tpd.SetTransform(trnf)
tpd.SetInputData(targetPolyData)
tpd.Update()
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(renderWindow)
distance = vtk.vtkHausdorffDistancePointSetFilter()
distance.SetInputData(0, tpd.GetOutput())
distance.SetInputData(1, sourcePolyData)
distance.Update()
distanceBeforeAlign = distance.GetOutput(0).GetFieldData().GetArray('HausdorffDistance').GetComponent(0, 0)
# Get initial alignment using oriented bounding boxes
AlignBoundingBoxes(sourcePolyData, tpd.GetOutput())
distance.SetInputData(0, tpd.GetOutput())
distance.SetInputData(1, sourcePolyData)
distance.Modified()
distance.Update()
distanceAfterAlign = distance.GetOutput(0).GetFieldData().GetArray('HausdorffDistance').GetComponent(0, 0)
bestDistance = min(distanceBeforeAlign, distanceAfterAlign)
if distanceAfterAlign > distanceBeforeAlign:
sourcePolyData.DeepCopy(originalSourcePolyData)
# Refine the alignment using IterativeClosestPoint
icp = vtk.vtkIterativeClosestPointTransform()
icp.SetSource(sourcePolyData)
icp.SetTarget(tpd.GetOutput())
icp.GetLandmarkTransform().SetModeToRigidBody()
icp.SetMaximumNumberOfLandmarks(100)
icp.SetMaximumMeanDistance(.00001)
icp.SetMaximumNumberOfIterations(500)
icp.CheckMeanDistanceOn()
icp.StartByMatchingCentroidsOn()
icp.Update()
# print(icp)
lmTransform = icp.GetLandmarkTransform()
transform = vtk.vtkTransformPolyDataFilter()
transform.SetInputData(sourcePolyData)
transform.SetTransform(lmTransform)
transform.SetTransform(icp)
transform.Update()
distance.SetInputData(0, tpd.GetOutput())
distance.SetInputData(1, transform.GetOutput())
distance.Update()
distanceAfterICP = distance.GetOutput(0).GetFieldData().GetArray('HausdorffDistance').GetComponent(0, 0)
if distanceAfterICP < bestDistance:
bestDistance = distanceAfterICP
print(
'Distance before, after align, after ICP, min: {:0.5f}, {:0.5f}, {:0.5f}, {:0.5f}'.format(distanceBeforeAlign,
distanceAfterAlign,
distanceAfterICP,
bestDistance))
# Select
sourceMapper = vtk.vtkDataSetMapper()
if bestDistance == distanceBeforeAlign:
sourceMapper.SetInputData(originalSourcePolyData)
print('Using original alignment')
elif bestDistance == distanceAfterAlign:
sourceMapper.SetInputData(sourcePolyData)
print('Using alignment by OBB')
else:
sourceMapper.SetInputConnection(transform.GetOutputPort())
print('Using alignment by ICP')
sourceMapper.ScalarVisibilityOff()
sourceActor = vtk.vtkActor()
sourceActor.SetMapper(sourceMapper)
sourceActor.GetProperty().SetOpacity(.6)
sourceActor.GetProperty().SetDiffuseColor(
colors.GetColor3d('White'))
renderer.AddActor(sourceActor)
targetMapper = vtk.vtkDataSetMapper()
targetMapper.SetInputData(tpd.GetOutput())
targetMapper.ScalarVisibilityOff()
targetActor = vtk.vtkActor()
targetActor.SetMapper(targetMapper)
targetActor.GetProperty().SetDiffuseColor(
colors.GetColor3d('Tomato'))
renderer.AddActor(targetActor)
renderWindow.AddRenderer(renderer)
renderer.SetBackground(colors.GetColor3d("sea_green_light"))
renderer.UseHiddenLineRemovalOn()
renderWindow.SetSize(640, 480)
renderWindow.Render()
renderWindow.SetWindowName('AlignTwoPolyDatas')
renderWindow.Render()
interactor.Start()
def ReadPolyData(file_name):
import os
path, extension = os.path.splitext(file_name)
extension = extension.lower()
if extension == ".ply":
reader = vtk.vtkPLYReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".vtp":
reader = vtk.vtkXMLpoly_dataReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".obj":
reader = vtk.vtkOBJReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".stl":
reader = vtk.vtkSTLReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".vtk":
reader = vtk.vtkpoly_dataReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".g":
reader = vtk.vtkBYUReader()
reader.SetGeometryFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
else:
# Return a None if the extension is unknown.
poly_data = None
return poly_data
def AlignBoundingBoxes(source, target):
# Use OBBTree to create an oriented bounding box for target and source
sourceOBBTree = vtk.vtkOBBTree()
sourceOBBTree.SetDataSet(source)
sourceOBBTree.SetMaxLevel(1)
sourceOBBTree.BuildLocator()
targetOBBTree = vtk.vtkOBBTree()
targetOBBTree.SetDataSet(target)
targetOBBTree.SetMaxLevel(1)
targetOBBTree.BuildLocator()
sourceLandmarks = vtk.vtkPolyData()
sourceOBBTree.GenerateRepresentation(0, sourceLandmarks)
targetLandmarks = vtk.vtkPolyData()
targetOBBTree.GenerateRepresentation(0, targetLandmarks)
lmTransform = vtk.vtkLandmarkTransform()
lmTransform.SetModeToSimilarity()
lmTransform.SetTargetLandmarks(targetLandmarks.GetPoints())
# lmTransformPD = vtk.vtkTransformPolyDataFilter()
bestDistance = vtk.VTK_DOUBLE_MAX
bestPoints = vtk.vtkPoints()
bestDistance = BestBoundingBox(
"X",
target,
source,
targetLandmarks,
sourceLandmarks,
bestDistance,
bestPoints)
bestDistance = BestBoundingBox(
"Y",
target,
source,
targetLandmarks,
sourceLandmarks,
bestDistance,
bestPoints)
bestDistance = BestBoundingBox(
"Z",
target,
source,
targetLandmarks,
sourceLandmarks,
bestDistance,
bestPoints)
lmTransform.SetSourceLandmarks(bestPoints)
lmTransform.Modified()
transformPD = vtk.vtkTransformPolyDataFilter()
transformPD.SetInputData(source)
transformPD.SetTransform(lmTransform)
transformPD.Update()
source.DeepCopy(transformPD.GetOutput())
return
def BestBoundingBox(axis, target, source, targetLandmarks, sourceLandmarks, bestDistance, bestPoints):
distance = vtk.vtkHausdorffDistancePointSetFilter()
testTransform = vtk.vtkTransform()
testTransformPD = vtk.vtkTransformPolyDataFilter()
lmTransform = vtk.vtkLandmarkTransform()
lmTransformPD = vtk.vtkTransformPolyDataFilter()
lmTransform.SetModeToSimilarity()
lmTransform.SetTargetLandmarks(targetLandmarks.GetPoints())
sourceCenter = sourceLandmarks.GetCenter()
delta = 90.0
for i in range(0, 4):
angle = delta * i
# Rotate about center
testTransform.Identity()
testTransform.Translate(sourceCenter[0], sourceCenter[1], sourceCenter[2])
if axis == "X":
testTransform.RotateX(angle)
elif axis == "Y":
testTransform.RotateY(angle)
else:
testTransform.RotateZ(angle)
testTransform.Translate(-sourceCenter[0], -sourceCenter[1], -sourceCenter[2])
testTransformPD.SetTransform(testTransform)
testTransformPD.SetInputData(sourceLandmarks)
testTransformPD.Update()
lmTransform.SetSourceLandmarks(testTransformPD.GetOutput().GetPoints())
lmTransform.Modified()
lmTransformPD.SetInputData(source)
lmTransformPD.SetTransform(lmTransform)
lmTransformPD.Update()
distance.SetInputData(0, target)
distance.SetInputData(1, lmTransformPD.GetOutput())
distance.Update()
testDistance = distance.GetOutput(0).GetFieldData().GetArray("HausdorffDistance").GetComponent(0, 0)
if testDistance < bestDistance:
bestDistance = testDistance
bestPoints.DeepCopy(testTransformPD.GetOutput().GetPoints())
return bestDistance
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/PolyData/AlignTwoPolyDatas.py
|
Python
|
apache-2.0
| 10,175
|
[
"VTK"
] |
00ede6ac0b8ab97e359f66081e3b36939e2a4a7f692891402aaf3905e940dc6e
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##################################################################################################
# module for the symmetric eigenvalue problem
# Copyright 2013 Timo Hartmann (thartmann15 at gmail.com)
#
# todo:
# - implement balancing
#
##################################################################################################
"""
The symmetric eigenvalue problem.
---------------------------------
This file contains routines for the symmetric eigenvalue problem.
high level routines:
eigsy : real symmetric (ordinary) eigenvalue problem
eighe : complex hermitian (ordinary) eigenvalue problem
eigh : unified interface for eigsy and eighe
svd_r : singular value decomposition for real matrices
svd_c : singular value decomposition for complex matrices
svd : unified interface for svd_r and svd_c
low level routines:
r_sy_tridiag : reduction of real symmetric matrix to real symmetric tridiagonal matrix
c_he_tridiag_0 : reduction of complex hermitian matrix to real symmetric tridiagonal matrix
c_he_tridiag_1 : auxiliary routine to c_he_tridiag_0
c_he_tridiag_2 : auxiliary routine to c_he_tridiag_0
tridiag_eigen : solves the real symmetric tridiagonal matrix eigenvalue problem
svd_r_raw : raw singular value decomposition for real matrices
svd_c_raw : raw singular value decomposition for complex matrices
"""
from ..libmp.backend import xrange
from .eigen import defun
def r_sy_tridiag(ctx, A, D, E, calc_ev = True):
"""
This routine transforms a real symmetric matrix A to a real symmetric
tridiagonal matrix T using an orthogonal similarity transformation:
Q' * A * Q = T (here ' denotes the matrix transpose).
The orthogonal matrix Q is build up from Householder reflectors.
parameters:
A (input/output) On input, A contains the real symmetric matrix of
dimension (n,n). On output, if calc_ev is true, A contains the
orthogonal matrix Q, otherwise A is destroyed.
D (output) real array of length n, contains the diagonal elements
of the tridiagonal matrix
E (output) real array of length n, contains the offdiagonal elements
of the tridiagonal matrix in E[0:(n-1)] where is the dimension of
the matrix A. E[n-1] is undefined.
calc_ev (input) If calc_ev is true, this routine explicitly calculates the
orthogonal matrix Q which is then returned in A. If calc_ev is
false, Q is not explicitly calculated resulting in a shorter run time.
This routine is a python translation of the fortran routine tred2.f in the
software library EISPACK (see netlib.org) which itself is based on the algol
procedure tred2 described in:
- Num. Math. 11, p.181-195 (1968) by Martin, Reinsch and Wilkonson
- Handbook for auto. comp., Vol II, Linear Algebra, p.212-226 (1971)
For a good introduction to Householder reflections, see also
Stoer, Bulirsch - Introduction to Numerical Analysis.
"""
# note : the vector v of the i-th houshoulder reflector is stored in a[(i+1):,i]
# whereas v/<v,v> is stored in a[i,(i+1):]
n = A.rows
for i in xrange(n - 1, 0, -1):
# scale the vector
scale = 0
for k in xrange(0, i):
scale += abs(A[k,i])
scale_inv = 0
if scale != 0:
scale_inv = 1/scale
# sadly there are floating point numbers not equal to zero whose reciprocal is infinity
if i == 1 or scale == 0 or ctx.isinf(scale_inv):
E[i] = A[i-1,i] # nothing to do
D[i] = 0
continue
# calculate parameters for housholder transformation
H = 0
for k in xrange(0, i):
A[k,i] *= scale_inv
H += A[k,i] * A[k,i]
F = A[i-1,i]
G = ctx.sqrt(H)
if F > 0:
G = -G
E[i] = scale * G
H -= F * G
A[i-1,i] = F - G
F = 0
# apply housholder transformation
for j in xrange(0, i):
if calc_ev:
A[i,j] = A[j,i] / H
G = 0 # calculate A*U
for k in xrange(0, j + 1):
G += A[k,j] * A[k,i]
for k in xrange(j + 1, i):
G += A[j,k] * A[k,i]
E[j] = G / H # calculate P
F += E[j] * A[j,i]
HH = F / (2 * H)
for j in xrange(0, i): # calculate reduced A
F = A[j,i]
G = E[j] - HH * F # calculate Q
E[j] = G
for k in xrange(0, j + 1):
A[k,j] -= F * E[k] + G * A[k,i]
D[i] = H
for i in xrange(1, n): # better for compatibility
E[i-1] = E[i]
E[n-1] = 0
if calc_ev:
D[0] = 0
for i in xrange(0, n):
if D[i] != 0:
for j in xrange(0, i): # accumulate transformation matrices
G = 0
for k in xrange(0, i):
G += A[i,k] * A[k,j]
for k in xrange(0, i):
A[k,j] -= G * A[k,i]
D[i] = A[i,i]
A[i,i] = 1
for j in xrange(0, i):
A[j,i] = A[i,j] = 0
else:
for i in xrange(0, n):
D[i] = A[i,i]
def c_he_tridiag_0(ctx, A, D, E, T):
"""
This routine transforms a complex hermitian matrix A to a real symmetric
tridiagonal matrix T using an unitary similarity transformation:
Q' * A * Q = T (here ' denotes the hermitian matrix transpose,
i.e. transposition und conjugation).
The unitary matrix Q is build up from Householder reflectors and
an unitary diagonal matrix.
parameters:
A (input/output) On input, A contains the complex hermitian matrix
of dimension (n,n). On output, A contains the unitary matrix Q
in compressed form.
D (output) real array of length n, contains the diagonal elements
of the tridiagonal matrix.
E (output) real array of length n, contains the offdiagonal elements
of the tridiagonal matrix in E[0:(n-1)] where is the dimension of
the matrix A. E[n-1] is undefined.
T (output) complex array of length n, contains a unitary diagonal
matrix.
This routine is a python translation (in slightly modified form) of the fortran
routine htridi.f in the software library EISPACK (see netlib.org) which itself
is a complex version of the algol procedure tred1 described in:
- Num. Math. 11, p.181-195 (1968) by Martin, Reinsch and Wilkonson
- Handbook for auto. comp., Vol II, Linear Algebra, p.212-226 (1971)
For a good introduction to Householder reflections, see also
Stoer, Bulirsch - Introduction to Numerical Analysis.
"""
n = A.rows
T[n-1] = 1
for i in xrange(n - 1, 0, -1):
# scale the vector
scale = 0
for k in xrange(0, i):
scale += abs(ctx.re(A[k,i])) + abs(ctx.im(A[k,i]))
scale_inv = 0
if scale != 0:
scale_inv = 1 / scale
# sadly there are floating point numbers not equal to zero whose reciprocal is infinity
if scale == 0 or ctx.isinf(scale_inv):
E[i] = 0
D[i] = 0
T[i-1] = 1
continue
if i == 1:
F = A[i-1,i]
f = abs(F)
E[i] = f
D[i] = 0
if f != 0:
T[i-1] = T[i] * F / f
else:
T[i-1] = T[i]
continue
# calculate parameters for housholder transformation
H = 0
for k in xrange(0, i):
A[k,i] *= scale_inv
rr = ctx.re(A[k,i])
ii = ctx.im(A[k,i])
H += rr * rr + ii * ii
F = A[i-1,i]
f = abs(F)
G = ctx.sqrt(H)
H += G * f
E[i] = scale * G
if f != 0:
F = F / f
TZ = - T[i] * F # T[i-1]=-T[i]*F, but we need T[i-1] as temporary storage
G *= F
else:
TZ = -T[i] # T[i-1]=-T[i]
A[i-1,i] += G
F = 0
# apply housholder transformation
for j in xrange(0, i):
A[i,j] = A[j,i] / H
G = 0 # calculate A*U
for k in xrange(0, j + 1):
G += ctx.conj(A[k,j]) * A[k,i]
for k in xrange(j + 1, i):
G += A[j,k] * A[k,i]
T[j] = G / H # calculate P
F += ctx.conj(T[j]) * A[j,i]
HH = F / (2 * H)
for j in xrange(0, i): # calculate reduced A
F = A[j,i]
G = T[j] - HH * F # calculate Q
T[j] = G
for k in xrange(0, j + 1):
A[k,j] -= ctx.conj(F) * T[k] + ctx.conj(G) * A[k,i]
# as we use the lower left part for storage
# we have to use the transpose of the normal formula
T[i-1] = TZ
D[i] = H
for i in xrange(1, n): # better for compatibility
E[i-1] = E[i]
E[n-1] = 0
D[0] = 0
for i in xrange(0, n):
zw = D[i]
D[i] = ctx.re(A[i,i])
A[i,i] = zw
def c_he_tridiag_1(ctx, A, T):
"""
This routine forms the unitary matrix Q described in c_he_tridiag_0.
parameters:
A (input/output) On input, A is the same matrix as delivered by
c_he_tridiag_0. On output, A is set to Q.
T (input) On input, T is the same array as delivered by c_he_tridiag_0.
"""
n = A.rows
for i in xrange(0, n):
if A[i,i] != 0:
for j in xrange(0, i):
G = 0
for k in xrange(0, i):
G += ctx.conj(A[i,k]) * A[k,j]
for k in xrange(0, i):
A[k,j] -= G * A[k,i]
A[i,i] = 1
for j in xrange(0, i):
A[j,i] = A[i,j] = 0
for i in xrange(0, n):
for k in xrange(0, n):
A[i,k] *= T[k]
def c_he_tridiag_2(ctx, A, T, B):
"""
This routine applied the unitary matrix Q described in c_he_tridiag_0
onto the the matrix B, i.e. it forms Q*B.
parameters:
A (input) On input, A is the same matrix as delivered by c_he_tridiag_0.
T (input) On input, T is the same array as delivered by c_he_tridiag_0.
B (input/output) On input, B is a complex matrix. On output B is replaced
by Q*B.
This routine is a python translation of the fortran routine htribk.f in the
software library EISPACK (see netlib.org). See c_he_tridiag_0 for more
references.
"""
n = A.rows
for i in xrange(0, n):
for k in xrange(0, n):
B[k,i] *= T[k]
for i in xrange(0, n):
if A[i,i] != 0:
for j in xrange(0, n):
G = 0
for k in xrange(0, i):
G += ctx.conj(A[i,k]) * B[k,j]
for k in xrange(0, i):
B[k,j] -= G * A[k,i]
def tridiag_eigen(ctx, d, e, z = False):
"""
This subroutine find the eigenvalues and the first components of the
eigenvectors of a real symmetric tridiagonal matrix using the implicit
QL method.
parameters:
d (input/output) real array of length n. on input, d contains the diagonal
elements of the input matrix. on output, d contains the eigenvalues in
ascending order.
e (input) real array of length n. on input, e contains the offdiagonal
elements of the input matrix in e[0:(n-1)]. On output, e has been
destroyed.
z (input/output) If z is equal to False, no eigenvectors will be computed.
Otherwise on input z should have the format z[0:m,0:n] (i.e. a real or
complex matrix of dimension (m,n) ). On output this matrix will be
multiplied by the matrix of the eigenvectors (i.e. the columns of this
matrix are the eigenvectors): z --> z*EV
That means if z[i,j]={1 if j==j; 0 otherwise} on input, then on output
z will contain the first m components of the eigenvectors. That means
if m is equal to n, the i-th eigenvector will be z[:,i].
This routine is a python translation (in slightly modified form) of the
fortran routine imtql2.f in the software library EISPACK (see netlib.org)
which itself is based on the algol procudure imtql2 desribed in:
- num. math. 12, p. 377-383(1968) by matrin and wilkinson
- modified in num. math. 15, p. 450(1970) by dubrulle
- handbook for auto. comp., vol. II-linear algebra, p. 241-248 (1971)
See also the routine gaussq.f in netlog.org or acm algorithm 726.
"""
n = len(d)
e[n-1] = 0
iterlim = 2 * ctx.dps
for l in xrange(n):
j = 0
while 1:
m = l
while 1:
# look for a small subdiagonal element
if m + 1 == n:
break
if abs(e[m]) <= ctx.eps * (abs(d[m]) + abs(d[m + 1])):
break
m = m + 1
if m == l:
break
if j >= iterlim:
raise RuntimeError("tridiag_eigen: no convergence to an eigenvalue after %d iterations" % iterlim)
j += 1
# form shift
p = d[l]
g = (d[l + 1] - p) / (2 * e[l])
r = ctx.hypot(g, 1)
if g < 0:
s = g - r
else:
s = g + r
g = d[m] - p + e[l] / s
s, c, p = 1, 1, 0
for i in xrange(m - 1, l - 1, -1):
f = s * e[i]
b = c * e[i]
if abs(f) > abs(g): # this here is a slight improvement also used in gaussq.f or acm algorithm 726.
c = g / f
r = ctx.hypot(c, 1)
e[i + 1] = f * r
s = 1 / r
c = c * s
else:
s = f / g
r = ctx.hypot(s, 1)
e[i + 1] = g * r
c = 1 / r
s = s * c
g = d[i + 1] - p
r = (d[i] - g) * s + 2 * c * b
p = s * r
d[i + 1] = g + p
g = c * r - b
if not isinstance(z, bool):
# calculate eigenvectors
for w in xrange(z.rows):
f = z[w,i+1]
z[w,i+1] = s * z[w,i] + c * f
z[w,i ] = c * z[w,i] - s * f
d[l] = d[l] - p
e[l] = g
e[m] = 0
for ii in xrange(1, n):
# sort eigenvalues and eigenvectors (bubble-sort)
i = ii - 1
k = i
p = d[i]
for j in xrange(ii, n):
if d[j] >= p:
continue
k = j
p = d[k]
if k == i:
continue
d[k] = d[i]
d[i] = p
if not isinstance(z, bool):
for w in xrange(z.rows):
p = z[w,i]
z[w,i] = z[w,k]
z[w,k] = p
########################################################################################
@defun
def eigsy(ctx, A, eigvals_only = False, overwrite_a = False):
"""
This routine solves the (ordinary) eigenvalue problem for a real symmetric
square matrix A. Given A, an orthogonal matrix Q is calculated which
diagonalizes A:
Q' A Q = diag(E) and Q Q' = Q' Q = 1
Here diag(E) is a diagonal matrix whose diagonal is E.
' denotes the transpose.
The columns of Q are the eigenvectors of A and E contains the eigenvalues:
A Q[:,i] = E[i] Q[:,i]
input:
A: real matrix of format (n,n) which is symmetric
(i.e. A=A' or A[i,j]=A[j,i])
eigvals_only: if true, calculates only the eigenvalues E.
if false, calculates both eigenvectors and eigenvalues.
overwrite_a: if true, allows modification of A which may improve
performance. if false, A is not modified.
output:
E: vector of format (n). contains the eigenvalues of A in ascending order.
Q: orthogonal matrix of format (n,n). contains the eigenvectors
of A as columns.
return value:
E if eigvals_only is true
(E, Q) if eigvals_only is false
example:
>>> from mpmath import mp
>>> A = mp.matrix([[3, 2], [2, 0]])
>>> E = mp.eigsy(A, eigvals_only = True)
>>> print(E)
[-1.0]
[ 4.0]
>>> A = mp.matrix([[1, 2], [2, 3]])
>>> E, Q = mp.eigsy(A)
>>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0]))
[0.0]
[0.0]
see also: eighe, eigh, eig
"""
if not overwrite_a:
A = A.copy()
d = ctx.zeros(A.rows, 1)
e = ctx.zeros(A.rows, 1)
if eigvals_only:
r_sy_tridiag(ctx, A, d, e, calc_ev = False)
tridiag_eigen(ctx, d, e, False)
return d
else:
r_sy_tridiag(ctx, A, d, e, calc_ev = True)
tridiag_eigen(ctx, d, e, A)
return (d, A)
@defun
def eighe(ctx, A, eigvals_only = False, overwrite_a = False):
"""
This routine solves the (ordinary) eigenvalue problem for a complex
hermitian square matrix A. Given A, an unitary matrix Q is calculated which
diagonalizes A:
Q' A Q = diag(E) and Q Q' = Q' Q = 1
Here diag(E) a is diagonal matrix whose diagonal is E.
' denotes the hermitian transpose (i.e. ordinary transposition and
complex conjugation).
The columns of Q are the eigenvectors of A and E contains the eigenvalues:
A Q[:,i] = E[i] Q[:,i]
input:
A: complex matrix of format (n,n) which is hermitian
(i.e. A=A' or A[i,j]=conj(A[j,i]))
eigvals_only: if true, calculates only the eigenvalues E.
if false, calculates both eigenvectors and eigenvalues.
overwrite_a: if true, allows modification of A which may improve
performance. if false, A is not modified.
output:
E: vector of format (n). contains the eigenvalues of A in ascending order.
Q: unitary matrix of format (n,n). contains the eigenvectors
of A as columns.
return value:
E if eigvals_only is true
(E, Q) if eigvals_only is false
example:
>>> from mpmath import mp
>>> A = mp.matrix([[1, -3 - 1j], [-3 + 1j, -2]])
>>> E = mp.eighe(A, eigvals_only = True)
>>> print(E)
[-4.0]
[ 3.0]
>>> A = mp.matrix([[1, 2 + 5j], [2 - 5j, 3]])
>>> E, Q = mp.eighe(A)
>>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0]))
[0.0]
[0.0]
see also: eigsy, eigh, eig
"""
if not overwrite_a:
A = A.copy()
d = ctx.zeros(A.rows, 1)
e = ctx.zeros(A.rows, 1)
t = ctx.zeros(A.rows, 1)
if eigvals_only:
c_he_tridiag_0(ctx, A, d, e, t)
tridiag_eigen(ctx, d, e, False)
return d
else:
c_he_tridiag_0(ctx, A, d, e, t)
B = ctx.eye(A.rows)
tridiag_eigen(ctx, d, e, B)
c_he_tridiag_2(ctx, A, t, B)
return (d, B)
@defun
def eigh(ctx, A, eigvals_only = False, overwrite_a = False):
"""
"eigh" is a unified interface for "eigsy" and "eighe". Depending on
whether A is real or complex the appropriate function is called.
This routine solves the (ordinary) eigenvalue problem for a real symmetric
or complex hermitian square matrix A. Given A, an orthogonal (A real) or
unitary (A complex) matrix Q is calculated which diagonalizes A:
Q' A Q = diag(E) and Q Q' = Q' Q = 1
Here diag(E) a is diagonal matrix whose diagonal is E.
' denotes the hermitian transpose (i.e. ordinary transposition and
complex conjugation).
The columns of Q are the eigenvectors of A and E contains the eigenvalues:
A Q[:,i] = E[i] Q[:,i]
input:
A: a real or complex square matrix of format (n,n) which is symmetric
(i.e. A[i,j]=A[j,i]) or hermitian (i.e. A[i,j]=conj(A[j,i])).
eigvals_only: if true, calculates only the eigenvalues E.
if false, calculates both eigenvectors and eigenvalues.
overwrite_a: if true, allows modification of A which may improve
performance. if false, A is not modified.
output:
E: vector of format (n). contains the eigenvalues of A in ascending order.
Q: an orthogonal or unitary matrix of format (n,n). contains the
eigenvectors of A as columns.
return value:
E if eigvals_only is true
(E, Q) if eigvals_only is false
example:
>>> from mpmath import mp
>>> A = mp.matrix([[3, 2], [2, 0]])
>>> E = mp.eigh(A, eigvals_only = True)
>>> print(E)
[-1.0]
[ 4.0]
>>> A = mp.matrix([[1, 2], [2, 3]])
>>> E, Q = mp.eigh(A)
>>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0]))
[0.0]
[0.0]
>>> A = mp.matrix([[1, 2 + 5j], [2 - 5j, 3]])
>>> E, Q = mp.eigh(A)
>>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0]))
[0.0]
[0.0]
see also: eigsy, eighe, eig
"""
iscomplex = any(type(x) is ctx.mpc for x in A)
if iscomplex:
return ctx.eighe(A, eigvals_only = eigvals_only, overwrite_a = overwrite_a)
else:
return ctx.eigsy(A, eigvals_only = eigvals_only, overwrite_a = overwrite_a)
@defun
def gauss_quadrature(ctx, n, qtype = "legendre", alpha = 0, beta = 0):
"""
This routine calulates gaussian quadrature rules for different
families of orthogonal polynomials. Let (a, b) be an interval,
W(x) a positive weight function and n a positive integer.
Then the purpose of this routine is to calculate pairs (x_k, w_k)
for k=0, 1, 2, ... (n-1) which give
int(W(x) * F(x), x = a..b) = sum(w_k * F(x_k),k = 0..(n-1))
exact for all polynomials F(x) of degree (strictly) less than 2*n. For all
integrable functions F(x) the sum is a (more or less) good approximation to
the integral. The x_k are called nodes (which are the zeros of the
related orthogonal polynomials) and the w_k are called the weights.
parameters
n (input) The degree of the quadrature rule, i.e. its number of
nodes.
qtype (input) The family of orthogonal polynmomials for which to
compute the quadrature rule. See the list below.
alpha (input) real number, used as parameter for some orthogonal
polynomials
beta (input) real number, used as parameter for some orthogonal
polynomials.
return value
(X, W) a pair of two real arrays where x_k = X[k] and w_k = W[k].
orthogonal polynomials:
qtype polynomial
----- ----------
"legendre" Legendre polynomials, W(x)=1 on the interval (-1, +1)
"legendre01" shifted Legendre polynomials, W(x)=1 on the interval (0, +1)
"hermite" Hermite polynomials, W(x)=exp(-x*x) on (-infinity,+infinity)
"laguerre" Laguerre polynomials, W(x)=exp(-x) on (0,+infinity)
"glaguerre" generalized Laguerre polynomials, W(x)=exp(-x)*x**alpha
on (0, +infinity)
"chebyshev1" Chebyshev polynomials of the first kind, W(x)=1/sqrt(1-x*x)
on (-1, +1)
"chebyshev2" Chebyshev polynomials of the second kind, W(x)=sqrt(1-x*x)
on (-1, +1)
"jacobi" Jacobi polynomials, W(x)=(1-x)**alpha * (1+x)**beta on (-1, +1)
with alpha>-1 and beta>-1
examples:
>>> from mpmath import mp
>>> f = lambda x: x**8 + 2 * x**6 - 3 * x**4 + 5 * x**2 - 7
>>> X, W = mp.gauss_quadrature(5, "hermite")
>>> A = mp.fdot([(f(x), w) for x, w in zip(X, W)])
>>> B = mp.sqrt(mp.pi) * 57 / 16
>>> C = mp.quad(lambda x: mp.exp(- x * x) * f(x), [-mp.inf, +mp.inf])
>>> print mp.chop(A-B, tol = 1e-10), mp.chop(A-C, tol = 1e-10)
0.0 0.0
>>> f = lambda x: x**5 - 2 * x**4 + 3 * x**3 - 5 * x**2 + 7 * x - 11
>>> X, W = mp.gauss_quadrature(3, "laguerre")
>>> A = mp.fdot([(f(x), w) for x, w in zip(X, W)])
>>> B = 76
>>> C = mp.quad(lambda x: mp.exp(-x) * f(x), [0, +mp.inf])
>>> print mp.chop(A-B, tol = 1e-10), mp.chop(A-C, tol = 1e-10)
0.0 0.0
# orthogonality of the chebyshev polynomials:
>>> f = lambda x: mp.chebyt(3, x) * mp.chebyt(2, x)
>>> X, W = mp.gauss_quadrature(3, "chebyshev1")
>>> A = mp.fdot([(f(x), w) for x, w in zip(X, W)])
>>> print(mp.chop(A, tol = 1e-10))
0.0
references:
- golub and welsch, "calculations of gaussian quadrature rules", mathematics of
computation 23, p. 221-230 (1969)
- golub, "some modified matrix eigenvalue problems", siam review 15, p. 318-334 (1973)
- stroud and secrest, "gaussian quadrature formulas", prentice-hall (1966)
See also the routine gaussq.f in netlog.org or ACM Transactions on
Mathematical Software algorithm 726.
"""
d = ctx.zeros(n, 1)
e = ctx.zeros(n, 1)
z = ctx.zeros(1, n)
z[0,0] = 1
if qtype == "legendre":
# legendre on the range -1 +1 , abramowitz, table 25.4, p.916
w = 2
for i in xrange(n):
j = i + 1
e[i] = ctx.sqrt(j * j / (4 * j * j - ctx.mpf(1)))
elif qtype == "legendre01":
# legendre shifted to 0 1 , abramowitz, table 25.8, p.921
w = 1
for i in xrange(n):
d[i] = 1 / ctx.mpf(2)
j = i + 1
e[i] = ctx.sqrt(j * j / (16 * j * j - ctx.mpf(4)))
elif qtype == "hermite":
# hermite on the range -inf +inf , abramowitz, table 25.10,p.924
w = ctx.sqrt(ctx.pi)
for i in xrange(n):
j = i + 1
e[i] = ctx.sqrt(j / ctx.mpf(2))
elif qtype == "laguerre":
# laguerre on the range 0 +inf , abramowitz, table 25.9, p. 923
w = 1
for i in xrange(n):
j = i + 1
d[i] = 2 * j - 1
e[i] = j
elif qtype=="chebyshev1":
# chebyshev polynimials of the first kind
w = ctx.pi
for i in xrange(n):
e[i] = 1 / ctx.mpf(2)
e[0] = ctx.sqrt(1 / ctx.mpf(2))
elif qtype == "chebyshev2":
# chebyshev polynimials of the second kind
w = ctx.pi / 2
for i in xrange(n):
e[i] = 1 / ctx.mpf(2)
elif qtype == "glaguerre":
# generalized laguerre on the range 0 +inf
w = ctx.gamma(1 + alpha)
for i in xrange(n):
j = i + 1
d[i] = 2 * j - 1 + alpha
e[i] = ctx.sqrt(j * (j + alpha))
elif qtype == "jacobi":
# jacobi polynomials
alpha = ctx.mpf(alpha)
beta = ctx.mpf(beta)
ab = alpha + beta
abi = ab + 2
w = (2**(ab+1)) * ctx.gamma(alpha + 1) * ctx.gamma(beta + 1) / ctx.gamma(abi)
d[0] = (beta - alpha) / abi
e[0] = ctx.sqrt(4 * (1 + alpha) * (1 + beta) / ((abi + 1) * (abi * abi)))
a2b2 = beta * beta - alpha * alpha
for i in xrange(1, n):
j = i + 1
abi = 2 * j + ab
d[i] = a2b2 / ((abi - 2) * abi)
e[i] = ctx.sqrt(4 * j * (j + alpha) * (j + beta) * (j + ab) / ((abi * abi - 1) * abi * abi))
elif isinstance(qtype, str):
raise ValueError("unknown quadrature rule \"%s\"" % qtype)
elif not isinstance(qtype, str):
w = qtype(d, e)
else:
assert 0
tridiag_eigen(ctx, d, e, z)
for i in xrange(len(z)):
z[i] *= z[i]
z = z.transpose()
return (d, w * z)
##################################################################################################
##################################################################################################
##################################################################################################
def svd_r_raw(ctx, A, V = False, calc_u = False):
"""
This routine computes the singular value decomposition of a matrix A.
Given A, two orthogonal matrices U and V are calculated such that
A = U S V
where S is a suitable shaped matrix whose off-diagonal elements are zero.
The diagonal elements of S are the singular values of A, i.e. the
squareroots of the eigenvalues of A' A or A A'. Here ' denotes the transpose.
Householder bidiagonalization and a variant of the QR algorithm is used.
overview of the matrices :
A : m*n A gets replaced by U
U : m*n U replaces A. If n>m then only the first m*m block of U is
non-zero. column-orthogonal: U' U = B
here B is a n*n matrix whose first min(m,n) diagonal
elements are 1 and all other elements are zero.
S : n*n diagonal matrix, only the diagonal elements are stored in
the array S. only the first min(m,n) diagonal elements are non-zero.
V : n*n orthogonal: V V' = V' V = 1
parameters:
A (input/output) On input, A contains a real matrix of shape m*n.
On output, if calc_u is true A contains the column-orthogonal
matrix U; otherwise A is simply used as workspace and thus destroyed.
V (input/output) if false, the matrix V is not calculated. otherwise
V must be a matrix of shape n*n.
calc_u (input) If true, the matrix U is calculated and replaces A.
if false, U is not calculated and A is simply destroyed
return value:
S an array of length n containing the singular values of A sorted by
decreasing magnitude. only the first min(m,n) elements are non-zero.
This routine is a python translation of the fortran routine svd.f in the
software library EISPACK (see netlib.org) which itself is based on the
algol procedure svd described in:
- num. math. 14, 403-420(1970) by golub and reinsch.
- wilkinson/reinsch: handbook for auto. comp., vol ii-linear algebra, 134-151(1971).
"""
m, n = A.rows, A.cols
S = ctx.zeros(n, 1)
# work is a temporary array of size n
work = ctx.zeros(n, 1)
g = scale = anorm = 0
maxits = 3 * ctx.dps
for i in xrange(n): # householder reduction to bidiagonal form
work[i] = scale*g
g = s = scale = 0
if i < m:
for k in xrange(i, m):
scale += ctx.fabs(A[k,i])
if scale != 0:
for k in xrange(i, m):
A[k,i] /= scale
s += A[k,i] * A[k,i]
f = A[i,i]
g = -ctx.sqrt(s)
if f < 0:
g = -g
h = f * g - s
A[i,i] = f - g
for j in xrange(i+1, n):
s = 0
for k in xrange(i, m):
s += A[k,i] * A[k,j]
f = s / h
for k in xrange(i, m):
A[k,j] += f * A[k,i]
for k in xrange(i,m):
A[k,i] *= scale
S[i] = scale * g
g = s = scale = 0
if i < m and i != n - 1:
for k in xrange(i+1, n):
scale += ctx.fabs(A[i,k])
if scale:
for k in xrange(i+1, n):
A[i,k] /= scale
s += A[i,k] * A[i,k]
f = A[i,i+1]
g = -ctx.sqrt(s)
if f < 0:
g = -g
h = f * g - s
A[i,i+1] = f - g
for k in xrange(i+1, n):
work[k] = A[i,k] / h
for j in xrange(i+1, m):
s = 0
for k in xrange(i+1, n):
s += A[j,k] * A[i,k]
for k in xrange(i+1, n):
A[j,k] += s * work[k]
for k in xrange(i+1, n):
A[i,k] *= scale
anorm = max(anorm, ctx.fabs(S[i]) + ctx.fabs(work[i]))
if not isinstance(V, bool):
for i in xrange(n-2, -1, -1): # accumulation of right hand transformations
V[i+1,i+1] = 1
if work[i+1] != 0:
for j in xrange(i+1, n):
V[i,j] = (A[i,j] / A[i,i+1]) / work[i+1]
for j in xrange(i+1, n):
s = 0
for k in xrange(i+1, n):
s += A[i,k] * V[j,k]
for k in xrange(i+1, n):
V[j,k] += s * V[i,k]
for j in xrange(i+1, n):
V[j,i] = V[i,j] = 0
V[0,0] = 1
if m<n : minnm = m
else : minnm = n
if calc_u:
for i in xrange(minnm-1, -1, -1): # accumulation of left hand transformations
g = S[i]
for j in xrange(i+1, n):
A[i,j] = 0
if g != 0:
g = 1 / g
for j in xrange(i+1, n):
s = 0
for k in xrange(i+1, m):
s += A[k,i] * A[k,j]
f = (s / A[i,i]) * g
for k in xrange(i, m):
A[k,j] += f * A[k,i]
for j in xrange(i, m):
A[j,i] *= g
else:
for j in xrange(i, m):
A[j,i] = 0
A[i,i] += 1
for k in xrange(n - 1, -1, -1):
# diagonalization of the bidiagonal form:
# loop over singular values, and over allowed itations
its = 0
while 1:
its += 1
flag = True
for l in xrange(k, -1, -1):
nm = l-1
if ctx.fabs(work[l]) + anorm == anorm:
flag = False
break
if ctx.fabs(S[nm]) + anorm == anorm:
break
if flag:
c = 0
s = 1
for i in xrange(l, k + 1):
f = s * work[i]
work[i] *= c
if ctx.fabs(f) + anorm == anorm:
break
g = S[i]
h = ctx.hypot(f, g)
S[i] = h
h = 1 / h
c = g * h
s = - f * h
if calc_u:
for j in xrange(m):
y = A[j,nm]
z = A[j,i]
A[j,nm] = y * c + z * s
A[j,i] = z * c - y * s
z = S[k]
if l == k: # convergence
if z < 0: # singular value is made nonnegative
S[k] = -z
if not isinstance(V, bool):
for j in xrange(n):
V[k,j] = -V[k,j]
break
if its >= maxits:
raise RuntimeError("svd: no convergence to an eigenvalue after %d iterations" % its)
x = S[l] # shift from bottom 2 by 2 minor
nm = k-1
y = S[nm]
g = work[nm]
h = work[k]
f = ((y - z) * (y + z) + (g - h) * (g + h))/(2 * h * y)
g = ctx.hypot(f, 1)
if f >= 0: f = ((x - z) * (x + z) + h * ((y / (f + g)) - h)) / x
else: f = ((x - z) * (x + z) + h * ((y / (f - g)) - h)) / x
c = s = 1 # next qt transformation
for j in xrange(l, nm + 1):
g = work[j+1]
y = S[j+1]
h = s * g
g = c * g
z = ctx.hypot(f, h)
work[j] = z
c = f / z
s = h / z
f = x * c + g * s
g = g * c - x * s
h = y * s
y *= c
if not isinstance(V, bool):
for jj in xrange(n):
x = V[j ,jj]
z = V[j+1,jj]
V[j ,jj]= x * c + z * s
V[j+1 ,jj]= z * c - x * s
z = ctx.hypot(f, h)
S[j] = z
if z != 0: # rotation can be arbitray if z=0
z = 1 / z
c = f * z
s = h * z
f = c * g + s * y
x = c * y - s * g
if calc_u:
for jj in xrange(m):
y = A[jj,j ]
z = A[jj,j+1]
A[jj,j ] = y * c + z * s
A[jj,j+1 ] = z * c - y * s
work[l] = 0
work[k] = f
S[k] = x
##########################
# Sort singular values into decreasing order (bubble-sort)
for i in xrange(n):
imax = i
s = ctx.fabs(S[i]) # s is the current maximal element
for j in xrange(i + 1, n):
c = ctx.fabs(S[j])
if c > s:
s = c
imax = j
if imax != i:
# swap singular values
z = S[i]
S[i] = S[imax]
S[imax] = z
if calc_u:
for j in xrange(m):
z = A[j,i]
A[j,i] = A[j,imax]
A[j,imax] = z
if not isinstance(V, bool):
for j in xrange(n):
z = V[i,j]
V[i,j] = V[imax,j]
V[imax,j] = z
return S
#######################
def svd_c_raw(ctx, A, V = False, calc_u = False):
"""
This routine computes the singular value decomposition of a matrix A.
Given A, two unitary matrices U and V are calculated such that
A = U S V
where S is a suitable shaped matrix whose off-diagonal elements are zero.
The diagonal elements of S are the singular values of A, i.e. the
squareroots of the eigenvalues of A' A or A A'. Here ' denotes the hermitian
transpose (i.e. transposition and conjugation). Householder bidiagonalization
and a variant of the QR algorithm is used.
overview of the matrices :
A : m*n A gets replaced by U
U : m*n U replaces A. If n>m then only the first m*m block of U is
non-zero. column-unitary: U' U = B
here B is a n*n matrix whose first min(m,n) diagonal
elements are 1 and all other elements are zero.
S : n*n diagonal matrix, only the diagonal elements are stored in
the array S. only the first min(m,n) diagonal elements are non-zero.
V : n*n unitary: V V' = V' V = 1
parameters:
A (input/output) On input, A contains a complex matrix of shape m*n.
On output, if calc_u is true A contains the column-unitary
matrix U; otherwise A is simply used as workspace and thus destroyed.
V (input/output) if false, the matrix V is not calculated. otherwise
V must be a matrix of shape n*n.
calc_u (input) If true, the matrix U is calculated and replaces A.
if false, U is not calculated and A is simply destroyed
return value:
S an array of length n containing the singular values of A sorted by
decreasing magnitude. only the first min(m,n) elements are non-zero.
This routine is a python translation of the fortran routine svd.f in the
software library EISPACK (see netlib.org) which itself is based on the
algol procedure svd described in:
- num. math. 14, 403-420(1970) by golub and reinsch.
- wilkinson/reinsch: handbook for auto. comp., vol ii-linear algebra, 134-151(1971).
"""
m, n = A.rows, A.cols
S = ctx.zeros(n, 1)
# work is a temporary array of size n
work = ctx.zeros(n, 1)
lbeta = ctx.zeros(n, 1)
rbeta = ctx.zeros(n, 1)
dwork = ctx.zeros(n, 1)
g = scale = anorm = 0
maxits = 3 * ctx.dps
for i in xrange(n): # householder reduction to bidiagonal form
dwork[i] = scale * g # dwork are the side-diagonal elements
g = s = scale = 0
if i < m:
for k in xrange(i, m):
scale += ctx.fabs(ctx.re(A[k,i])) + ctx.fabs(ctx.im(A[k,i]))
if scale != 0:
for k in xrange(i, m):
A[k,i] /= scale
ar = ctx.re(A[k,i])
ai = ctx.im(A[k,i])
s += ar * ar + ai * ai
f = A[i,i]
g = -ctx.sqrt(s)
if ctx.re(f) < 0:
beta = -g - ctx.conj(f)
g = -g
else:
beta = -g + ctx.conj(f)
beta /= ctx.conj(beta)
beta += 1
h = 2 * (ctx.re(f) * g - s)
A[i,i] = f - g
beta /= h
lbeta[i] = (beta / scale) / scale
for j in xrange(i+1, n):
s = 0
for k in xrange(i, m):
s += ctx.conj(A[k,i]) * A[k,j]
f = beta * s
for k in xrange(i, m):
A[k,j] += f * A[k,i]
for k in xrange(i, m):
A[k,i] *= scale
S[i] = scale * g # S are the diagonal elements
g = s = scale = 0
if i < m and i != n - 1:
for k in xrange(i+1, n):
scale += ctx.fabs(ctx.re(A[i,k])) + ctx.fabs(ctx.im(A[i,k]))
if scale:
for k in xrange(i+1, n):
A[i,k] /= scale
ar = ctx.re(A[i,k])
ai = ctx.im(A[i,k])
s += ar * ar + ai * ai
f = A[i,i+1]
g = -ctx.sqrt(s)
if ctx.re(f) < 0:
beta = -g - ctx.conj(f)
g = -g
else:
beta = -g + ctx.conj(f)
beta /= ctx.conj(beta)
beta += 1
h = 2 * (ctx.re(f) * g - s)
A[i,i+1] = f - g
beta /= h
rbeta[i] = (beta / scale) / scale
for k in xrange(i+1, n):
work[k] = A[i, k]
for j in xrange(i+1, m):
s = 0
for k in xrange(i+1, n):
s += ctx.conj(A[i,k]) * A[j,k]
f = s * beta
for k in xrange(i+1,n):
A[j,k] += f * work[k]
for k in xrange(i+1, n):
A[i,k] *= scale
anorm = max(anorm,ctx.fabs(S[i]) + ctx.fabs(dwork[i]))
if not isinstance(V, bool):
for i in xrange(n-2, -1, -1): # accumulation of right hand transformations
V[i+1,i+1] = 1
if dwork[i+1] != 0:
f = ctx.conj(rbeta[i])
for j in xrange(i+1, n):
V[i,j] = A[i,j] * f
for j in xrange(i+1, n):
s = 0
for k in xrange(i+1, n):
s += ctx.conj(A[i,k]) * V[j,k]
for k in xrange(i+1, n):
V[j,k] += s * V[i,k]
for j in xrange(i+1,n):
V[j,i] = V[i,j] = 0
V[0,0] = 1
if m < n : minnm = m
else : minnm = n
if calc_u:
for i in xrange(minnm-1, -1, -1): # accumulation of left hand transformations
g = S[i]
for j in xrange(i+1, n):
A[i,j] = 0
if g != 0:
g = 1 / g
for j in xrange(i+1, n):
s = 0
for k in xrange(i+1, m):
s += ctx.conj(A[k,i]) * A[k,j]
f = s * ctx.conj(lbeta[i])
for k in xrange(i, m):
A[k,j] += f * A[k,i]
for j in xrange(i, m):
A[j,i] *= g
else:
for j in xrange(i, m):
A[j,i] = 0
A[i,i] += 1
for k in xrange(n-1, -1, -1):
# diagonalization of the bidiagonal form:
# loop over singular values, and over allowed itations
its = 0
while 1:
its += 1
flag = True
for l in xrange(k, -1, -1):
nm = l - 1
if ctx.fabs(dwork[l]) + anorm == anorm:
flag = False
break
if ctx.fabs(S[nm]) + anorm == anorm:
break
if flag:
c = 0
s = 1
for i in xrange(l, k+1):
f = s * dwork[i]
dwork[i] *= c
if ctx.fabs(f) + anorm == anorm:
break
g = S[i]
h = ctx.hypot(f, g)
S[i] = h
h = 1 / h
c = g * h
s = -f * h
if calc_u:
for j in xrange(m):
y = A[j,nm]
z = A[j,i]
A[j,nm]= y * c + z * s
A[j,i] = z * c - y * s
z = S[k]
if l == k: # convergence
if z < 0: # singular value is made nonnegative
S[k] = -z
if not isinstance(V, bool):
for j in xrange(n):
V[k,j] = -V[k,j]
break
if its >= maxits:
raise RuntimeError("svd: no convergence to an eigenvalue after %d iterations" % its)
x = S[l] # shift from bottom 2 by 2 minor
nm = k-1
y = S[nm]
g = dwork[nm]
h = dwork[k]
f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2 * h * y)
g = ctx.hypot(f, 1)
if f >=0: f = (( x - z) *( x + z) + h *((y / (f + g)) - h)) / x
else: f = (( x - z) *( x + z) + h *((y / (f - g)) - h)) / x
c = s = 1 # next qt transformation
for j in xrange(l, nm + 1):
g = dwork[j+1]
y = S[j+1]
h = s * g
g = c * g
z = ctx.hypot(f, h)
dwork[j] = z
c = f / z
s = h / z
f = x * c + g * s
g = g * c - x * s
h = y * s
y *= c
if not isinstance(V, bool):
for jj in xrange(n):
x = V[j ,jj]
z = V[j+1,jj]
V[j ,jj]= x * c + z * s
V[j+1,jj ]= z * c - x * s
z = ctx.hypot(f, h)
S[j] = z
if z != 0: # rotation can be arbitray if z=0
z = 1 / z
c = f * z
s = h * z
f = c * g + s * y
x = c * y - s * g
if calc_u:
for jj in xrange(m):
y = A[jj,j ]
z = A[jj,j+1]
A[jj,j ]= y * c + z * s
A[jj,j+1 ]= z * c - y * s
dwork[l] = 0
dwork[k] = f
S[k] = x
##########################
# Sort singular values into decreasing order (bubble-sort)
for i in xrange(n):
imax = i
s = ctx.fabs(S[i]) # s is the current maximal element
for j in xrange(i + 1, n):
c = ctx.fabs(S[j])
if c > s:
s = c
imax = j
if imax != i:
# swap singular values
z = S[i]
S[i] = S[imax]
S[imax] = z
if calc_u:
for j in xrange(m):
z = A[j,i]
A[j,i] = A[j,imax]
A[j,imax] = z
if not isinstance(V, bool):
for j in xrange(n):
z = V[i,j]
V[i,j] = V[imax,j]
V[imax,j] = z
return S
##################################################################################################
@defun
def svd_r(ctx, A, full_matrices = False, compute_uv = True, overwrite_a = False):
"""
This routine computes the singular value decomposition of a matrix A.
Given A, two orthogonal matrices U and V are calculated such that
A = U S V and U' U = 1 and V V' = 1
where S is a suitable shaped matrix whose off-diagonal elements are zero.
Here ' denotes the transpose. The diagonal elements of S are the singular
values of A, i.e. the squareroots of the eigenvalues of A' A or A A'.
input:
A : a real matrix of shape (m, n)
full_matrices : if true, U and V are of shape (m, m) and (n, n).
if false, U and V are of shape (m, min(m, n)) and (min(m, n), n).
compute_uv : if true, U and V are calculated. if false, only S is calculated.
overwrite_a : if true, allows modification of A which may improve
performance. if false, A is not modified.
output:
U : an orthogonal matrix: U' U = 1. if full_matrices is true, U is of
shape (m, m). ortherwise it is of shape (m, min(m, n)).
S : an array of length min(m, n) containing the singular values of A sorted by
decreasing magnitude.
V : an orthogonal matrix: V V' = 1. if full_matrices is true, V is of
shape (n, n). ortherwise it is of shape (min(m, n), n).
return value:
S if compute_uv is false
(U, S, V) if compute_uv is true
overview of the matrices:
full_matrices true:
A : m*n
U : m*m U' U = 1
S as matrix : m*n
V : n*n V V' = 1
full_matrices false:
A : m*n
U : m*min(n,m) U' U = 1
S as matrix : min(m,n)*min(m,n)
V : min(m,n)*n V V' = 1
examples:
>>> from mpmath import mp
>>> A = mp.matrix([[2, -2, -1], [3, 4, -2], [-2, -2, 0]])
>>> S = mp.svd_r(A, compute_uv = False)
>>> print(S)
[6.0]
[3.0]
[1.0]
>>> U, S, V = mp.svd_r(A)
>>> print(mp.chop(A - U * mp.diag(S) * V))
[0.0 0.0 0.0]
[0.0 0.0 0.0]
[0.0 0.0 0.0]
see also: svd, svd_c
"""
m, n = A.rows, A.cols
if not compute_uv:
if not overwrite_a:
A = A.copy()
S = svd_r_raw(ctx, A, V = False, calc_u = False)
S = S[:min(m,n)]
return S
if full_matrices and n < m:
V = ctx.zeros(m, m)
A0 = ctx.zeros(m, m)
A0[:,:n] = A
S = svd_r_raw(ctx, A0, V, calc_u = True)
S = S[:n]
V = V[:n,:n]
return (A0, S, V)
else:
if not overwrite_a:
A = A.copy()
V = ctx.zeros(n, n)
S = svd_r_raw(ctx, A, V, calc_u = True)
if n > m:
if full_matrices == False:
V = V[:m,:]
S = S[:m]
A = A[:,:m]
return (A, S, V)
##############################
@defun
def svd_c(ctx, A, full_matrices = False, compute_uv = True, overwrite_a = False):
"""
This routine computes the singular value decomposition of a matrix A.
Given A, two unitary matrices U and V are calculated such that
A = U S V and U' U = 1 and V V' = 1
where S is a suitable shaped matrix whose off-diagonal elements are zero.
Here ' denotes the hermitian transpose (i.e. transposition and complex
conjugation). The diagonal elements of S are the singular values of A,
i.e. the squareroots of the eigenvalues of A' A or A A'.
input:
A : a complex matrix of shape (m, n)
full_matrices : if true, U and V are of shape (m, m) and (n, n).
if false, U and V are of shape (m, min(m, n)) and (min(m, n), n).
compute_uv : if true, U and V are calculated. if false, only S is calculated.
overwrite_a : if true, allows modification of A which may improve
performance. if false, A is not modified.
output:
U : an unitary matrix: U' U = 1. if full_matrices is true, U is of
shape (m, m). ortherwise it is of shape (m, min(m, n)).
S : an array of length min(m, n) containing the singular values of A sorted by
decreasing magnitude.
V : an unitary matrix: V V' = 1. if full_matrices is true, V is of
shape (n, n). ortherwise it is of shape (min(m, n), n).
return value:
S if compute_uv is false
(U, S, V) if compute_uv is true
overview of the matrices:
full_matrices true:
A : m*n
U : m*m U' U = 1
S as matrix : m*n
V : n*n V V' = 1
full_matrices false:
A : m*n
U : m*min(n,m) U' U = 1
S as matrix : min(m,n)*min(m,n)
V : min(m,n)*n V V' = 1
example:
>>> from mpmath import mp
>>> A = mp.matrix([[-2j, -1-3j, -2+2j], [2-2j, -1-3j, 1], [-3+1j,-2j,0]])
>>> S = mp.svd_c(A, compute_uv = False)
>>> print(mp.chop(S - mp.matrix([mp.sqrt(34), mp.sqrt(15), mp.sqrt(6)])))
[0.0]
[0.0]
[0.0]
>>> U, S, V = mp.svd_c(A)
>>> print(mp.chop(A - U * mp.diag(S) * V))
[0.0 0.0 0.0]
[0.0 0.0 0.0]
[0.0 0.0 0.0]
see also: svd, svd_r
"""
m, n = A.rows, A.cols
if not compute_uv:
if not overwrite_a:
A = A.copy()
S = svd_c_raw(ctx, A, V = False, calc_u = False)
S = S[:min(m,n)]
return S
if full_matrices and n < m:
V = ctx.zeros(m, m)
A0 = ctx.zeros(m, m)
A0[:,:n] = A
S = svd_c_raw(ctx, A0, V, calc_u = True)
S = S[:n]
V = V[:n,:n]
return (A0, S, V)
else:
if not overwrite_a:
A = A.copy()
V = ctx.zeros(n, n)
S = svd_c_raw(ctx, A, V, calc_u = True)
if n > m:
if full_matrices == False:
V = V[:m,:]
S = S[:m]
A = A[:,:m]
return (A, S, V)
@defun
def svd(ctx, A, full_matrices = False, compute_uv = True, overwrite_a = False):
"""
"svd" is a unified interface for "svd_r" and "svd_c". Depending on
whether A is real or complex the appropriate function is called.
This routine computes the singular value decomposition of a matrix A.
Given A, two orthogonal (A real) or unitary (A complex) matrices U and V
are calculated such that
A = U S V and U' U = 1 and V V' = 1
where S is a suitable shaped matrix whose off-diagonal elements are zero.
Here ' denotes the hermitian transpose (i.e. transposition and complex
conjugation). The diagonal elements of S are the singular values of A,
i.e. the squareroots of the eigenvalues of A' A or A A'.
input:
A : a real or complex matrix of shape (m, n)
full_matrices : if true, U and V are of shape (m, m) and (n, n).
if false, U and V are of shape (m, min(m, n)) and (min(m, n), n).
compute_uv : if true, U and V are calculated. if false, only S is calculated.
overwrite_a : if true, allows modification of A which may improve
performance. if false, A is not modified.
output:
U : an orthogonal or unitary matrix: U' U = 1. if full_matrices is true, U is of
shape (m, m). ortherwise it is of shape (m, min(m, n)).
S : an array of length min(m, n) containing the singular values of A sorted by
decreasing magnitude.
V : an orthogonal or unitary matrix: V V' = 1. if full_matrices is true, V is of
shape (n, n). ortherwise it is of shape (min(m, n), n).
return value:
S if compute_uv is false
(U, S, V) if compute_uv is true
overview of the matrices:
full_matrices true:
A : m*n
U : m*m U' U = 1
S as matrix : m*n
V : n*n V V' = 1
full_matrices false:
A : m*n
U : m*min(n,m) U' U = 1
S as matrix : min(m,n)*min(m,n)
V : min(m,n)*n V V' = 1
examples:
>>> from mpmath import mp
>>> A = mp.matrix([[2, -2, -1], [3, 4, -2], [-2, -2, 0]])
>>> S = mp.svd(A, compute_uv = False)
>>> print(S)
[6.0]
[3.0]
[1.0]
>>> U, S, V = mp.svd(A)
>>> print(mp.chop(A - U * mp.diag(S) * V))
[0.0 0.0 0.0]
[0.0 0.0 0.0]
[0.0 0.0 0.0]
see also: svd_r, svd_c
"""
iscomplex = any(type(x) is ctx.mpc for x in A)
if iscomplex:
return ctx.svd_c(A, full_matrices = full_matrices, compute_uv = compute_uv, overwrite_a = overwrite_a)
else:
return ctx.svd_r(A, full_matrices = full_matrices, compute_uv = compute_uv, overwrite_a = overwrite_a)
|
kantel/processingpy
|
mpmathtest/mpmath/matrices/eigen_symmetric.py
|
Python
|
mit
| 58,524
|
[
"Gaussian"
] |
d4f2ec6844b565f078572ccd35cbfba5bf7a28433e6d428e94dd535ee5d41a7f
|
#
# Parse tree nodes for expressions
#
from __future__ import absolute_import
import cython
cython.declare(error=object, warning=object, warn_once=object, InternalError=object,
CompileError=object, UtilityCode=object, TempitaUtilityCode=object,
StringEncoding=object, operator=object,
Naming=object, Nodes=object, PyrexTypes=object, py_object_type=object,
list_type=object, tuple_type=object, set_type=object, dict_type=object,
unicode_type=object, str_type=object, bytes_type=object, type_type=object,
Builtin=object, Symtab=object, Utils=object, find_coercion_error=object,
debug_disposal_code=object, debug_temp_alloc=object, debug_coercion=object,
bytearray_type=object, slice_type=object)
import sys
import copy
import operator
from .Errors import error, warning, warn_once, InternalError, CompileError
from .Errors import hold_errors, release_errors, held_errors, report_error
from .Code import UtilityCode, TempitaUtilityCode
from . import StringEncoding
from . import Naming
from . import Nodes
from .Nodes import Node
from . import PyrexTypes
from .PyrexTypes import py_object_type, c_long_type, typecast, error_type, \
unspecified_type
from . import TypeSlots
from .Builtin import list_type, tuple_type, set_type, dict_type, type_type, \
unicode_type, str_type, bytes_type, bytearray_type, basestring_type, slice_type
from . import Builtin
from . import Symtab
from .. import Utils
from .Annotate import AnnotationItem
from . import Future
from ..Debugging import print_call_chain
from .DebugFlags import debug_disposal_code, debug_temp_alloc, \
debug_coercion
try:
from __builtin__ import basestring
except ImportError:
basestring = str # Python 3
try:
from builtins import bytes
except ImportError:
bytes = str # Python 2
class NotConstant(object):
_obj = None
def __new__(cls):
if NotConstant._obj is None:
NotConstant._obj = super(NotConstant, cls).__new__(cls)
return NotConstant._obj
def __repr__(self):
return "<NOT CONSTANT>"
not_a_constant = NotConstant()
constant_value_not_set = object()
# error messages when coercing from key[0] to key[1]
coercion_error_dict = {
# string related errors
(Builtin.unicode_type, Builtin.bytes_type) : "Cannot convert Unicode string to 'bytes' implicitly, encoding required.",
(Builtin.unicode_type, Builtin.str_type) : "Cannot convert Unicode string to 'str' implicitly. This is not portable and requires explicit encoding.",
(Builtin.unicode_type, PyrexTypes.c_char_ptr_type) : "Unicode objects only support coercion to Py_UNICODE*.",
(Builtin.unicode_type, PyrexTypes.c_uchar_ptr_type) : "Unicode objects only support coercion to Py_UNICODE*.",
(Builtin.bytes_type, Builtin.unicode_type) : "Cannot convert 'bytes' object to unicode implicitly, decoding required",
(Builtin.bytes_type, Builtin.str_type) : "Cannot convert 'bytes' object to str implicitly. This is not portable to Py3.",
(Builtin.bytes_type, Builtin.basestring_type) : "Cannot convert 'bytes' object to basestring implicitly. This is not portable to Py3.",
(Builtin.bytes_type, PyrexTypes.c_py_unicode_ptr_type) : "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'.",
(Builtin.basestring_type, Builtin.bytes_type) : "Cannot convert 'basestring' object to bytes implicitly. This is not portable.",
(Builtin.str_type, Builtin.unicode_type) : "str objects do not support coercion to unicode, use a unicode string literal instead (u'')",
(Builtin.str_type, Builtin.bytes_type) : "Cannot convert 'str' to 'bytes' implicitly. This is not portable.",
(Builtin.str_type, PyrexTypes.c_char_ptr_type) : "'str' objects do not support coercion to C types (use 'bytes'?).",
(Builtin.str_type, PyrexTypes.c_uchar_ptr_type) : "'str' objects do not support coercion to C types (use 'bytes'?).",
(Builtin.str_type, PyrexTypes.c_py_unicode_ptr_type) : "'str' objects do not support coercion to C types (use 'unicode'?).",
(PyrexTypes.c_char_ptr_type, Builtin.unicode_type) : "Cannot convert 'char*' to unicode implicitly, decoding required",
(PyrexTypes.c_uchar_ptr_type, Builtin.unicode_type) : "Cannot convert 'char*' to unicode implicitly, decoding required",
}
def find_coercion_error(type_tuple, default, env):
err = coercion_error_dict.get(type_tuple)
if err is None:
return default
elif ((PyrexTypes.c_char_ptr_type in type_tuple or PyrexTypes.c_uchar_ptr_type in type_tuple)
and env.directives['c_string_encoding']):
if type_tuple[1].is_pyobject:
return default
elif env.directives['c_string_encoding'] in ('ascii', 'default'):
return default
else:
return "'%s' objects do not support coercion to C types with non-ascii or non-default c_string_encoding" % type_tuple[0].name
else:
return err
def default_str_type(env):
return {
'bytes': bytes_type,
'bytearray': bytearray_type,
'str': str_type,
'unicode': unicode_type
}.get(env.directives['c_string_type'])
def check_negative_indices(*nodes):
"""
Raise a warning on nodes that are known to have negative numeric values.
Used to find (potential) bugs inside of "wraparound=False" sections.
"""
for node in nodes:
if (node is None
or not isinstance(node.constant_result, (int, float, long))):
continue
if node.constant_result < 0:
warning(node.pos,
"the result of using negative indices inside of "
"code sections marked as 'wraparound=False' is "
"undefined", level=1)
def infer_sequence_item_type(env, seq_node, index_node=None, seq_type=None):
if not seq_node.is_sequence_constructor:
if seq_type is None:
seq_type = seq_node.infer_type(env)
if seq_type is tuple_type:
# tuples are immutable => we can safely follow assignments
if seq_node.cf_state and len(seq_node.cf_state) == 1:
try:
seq_node = seq_node.cf_state[0].rhs
except AttributeError:
pass
if seq_node is not None and seq_node.is_sequence_constructor:
if index_node is not None and index_node.has_constant_result():
try:
item = seq_node.args[index_node.constant_result]
except (ValueError, TypeError, IndexError):
pass
else:
return item.infer_type(env)
# if we're lucky, all items have the same type
item_types = set([item.infer_type(env) for item in seq_node.args])
if len(item_types) == 1:
return item_types.pop()
return None
class ExprNode(Node):
# subexprs [string] Class var holding names of subexpr node attrs
# type PyrexType Type of the result
# result_code string Code fragment
# result_ctype string C type of result_code if different from type
# is_temp boolean Result is in a temporary variable
# is_sequence_constructor
# boolean Is a list or tuple constructor expression
# is_starred boolean Is a starred expression (e.g. '*a')
# saved_subexpr_nodes
# [ExprNode or [ExprNode or None] or None]
# Cached result of subexpr_nodes()
# use_managed_ref boolean use ref-counted temps/assignments/etc.
# result_is_used boolean indicates that the result will be dropped and the
# result_code/temp_result can safely be set to None
result_ctype = None
type = None
temp_code = None
old_temp = None # error checker for multiple frees etc.
use_managed_ref = True # can be set by optimisation transforms
result_is_used = True
# The Analyse Expressions phase for expressions is split
# into two sub-phases:
#
# Analyse Types
# Determines the result type of the expression based
# on the types of its sub-expressions, and inserts
# coercion nodes into the expression tree where needed.
# Marks nodes which will need to have temporary variables
# allocated.
#
# Allocate Temps
# Allocates temporary variables where needed, and fills
# in the result_code field of each node.
#
# ExprNode provides some convenience routines which
# perform both of the above phases. These should only
# be called from statement nodes, and only when no
# coercion nodes need to be added around the expression
# being analysed. In that case, the above two phases
# should be invoked separately.
#
# Framework code in ExprNode provides much of the common
# processing for the various phases. It makes use of the
# 'subexprs' class attribute of ExprNodes, which should
# contain a list of the names of attributes which can
# hold sub-nodes or sequences of sub-nodes.
#
# The framework makes use of a number of abstract methods.
# Their responsibilities are as follows.
#
# Declaration Analysis phase
#
# analyse_target_declaration
# Called during the Analyse Declarations phase to analyse
# the LHS of an assignment or argument of a del statement.
# Nodes which cannot be the LHS of an assignment need not
# implement it.
#
# Expression Analysis phase
#
# analyse_types
# - Call analyse_types on all sub-expressions.
# - Check operand types, and wrap coercion nodes around
# sub-expressions where needed.
# - Set the type of this node.
# - If a temporary variable will be required for the
# result, set the is_temp flag of this node.
#
# analyse_target_types
# Called during the Analyse Types phase to analyse
# the LHS of an assignment or argument of a del
# statement. Similar responsibilities to analyse_types.
#
# target_code
# Called by the default implementation of allocate_target_temps.
# Should return a C lvalue for assigning to the node. The default
# implementation calls calculate_result_code.
#
# check_const
# - Check that this node and its subnodes form a
# legal constant expression. If so, do nothing,
# otherwise call not_const.
#
# The default implementation of check_const
# assumes that the expression is not constant.
#
# check_const_addr
# - Same as check_const, except check that the
# expression is a C lvalue whose address is
# constant. Otherwise, call addr_not_const.
#
# The default implementation of calc_const_addr
# assumes that the expression is not a constant
# lvalue.
#
# Code Generation phase
#
# generate_evaluation_code
# - Call generate_evaluation_code for sub-expressions.
# - Perform the functions of generate_result_code
# (see below).
# - If result is temporary, call generate_disposal_code
# on all sub-expressions.
#
# A default implementation of generate_evaluation_code
# is provided which uses the following abstract methods:
#
# generate_result_code
# - Generate any C statements necessary to calculate
# the result of this node from the results of its
# sub-expressions.
#
# calculate_result_code
# - Should return a C code fragment evaluating to the
# result. This is only called when the result is not
# a temporary.
#
# generate_assignment_code
# Called on the LHS of an assignment.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the assignment.
# - If the assignment absorbed a reference, call
# generate_post_assignment_code on the RHS,
# otherwise call generate_disposal_code on it.
#
# generate_deletion_code
# Called on an argument of a del statement.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the deletion.
# - Call generate_disposal_code on all sub-expressions.
#
#
is_sequence_constructor = False
is_dict_literal = False
is_string_literal = False
is_attribute = False
is_subscript = False
saved_subexpr_nodes = None
is_temp = False
is_target = False
is_starred = False
constant_result = constant_value_not_set
# whether this node with a memoryview type should be broadcast
memslice_broadcast = False
child_attrs = property(fget=operator.attrgetter('subexprs'))
def not_implemented(self, method_name):
print_call_chain(method_name, "not implemented") ###
raise InternalError(
"%s.%s not implemented" %
(self.__class__.__name__, method_name))
def is_lvalue(self):
return 0
def is_addressable(self):
return self.is_lvalue() and not self.type.is_memoryviewslice
def is_ephemeral(self):
# An ephemeral node is one whose result is in
# a Python temporary and we suspect there are no
# other references to it. Certain operations are
# disallowed on such values, since they are
# likely to result in a dangling pointer.
return self.type.is_pyobject and self.is_temp
def subexpr_nodes(self):
# Extract a list of subexpression nodes based
# on the contents of the subexprs class attribute.
nodes = []
for name in self.subexprs:
item = getattr(self, name)
if item is not None:
if type(item) is list:
nodes.extend(item)
else:
nodes.append(item)
return nodes
def result(self):
if self.is_temp:
return self.temp_code
else:
return self.calculate_result_code()
def result_as(self, type = None):
# Return the result code cast to the specified C type.
if (self.is_temp and self.type.is_pyobject and
type != py_object_type):
# Allocated temporaries are always PyObject *, which may not
# reflect the actual type (e.g. an extension type)
return typecast(type, py_object_type, self.result())
return typecast(type, self.ctype(), self.result())
def py_result(self):
# Return the result code cast to PyObject *.
return self.result_as(py_object_type)
def ctype(self):
# Return the native C type of the result (i.e. the
# C type of the result_code expression).
return self.result_ctype or self.type
def get_constant_c_result_code(self):
# Return the constant value of this node as a result code
# string, or None if the node is not constant. This method
# can be called when the constant result code is required
# before the code generation phase.
#
# The return value is a string that can represent a simple C
# value, a constant C name or a constant C expression. If the
# node type depends on Python code, this must return None.
return None
def calculate_constant_result(self):
# Calculate the constant compile time result value of this
# expression and store it in ``self.constant_result``. Does
# nothing by default, thus leaving ``self.constant_result``
# unknown. If valid, the result can be an arbitrary Python
# value.
#
# This must only be called when it is assured that all
# sub-expressions have a valid constant_result value. The
# ConstantFolding transform will do this.
pass
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def compile_time_value(self, denv):
# Return value of compile-time expression, or report error.
error(self.pos, "Invalid compile-time expression")
def compile_time_value_error(self, e):
error(self.pos, "Error in compile-time expression: %s: %s" % (
e.__class__.__name__, e))
# ------------- Declaration Analysis ----------------
def analyse_target_declaration(self, env):
error(self.pos, "Cannot assign to or delete this")
# ------------- Expression Analysis ----------------
def analyse_const_expression(self, env):
# Called during the analyse_declarations phase of a
# constant expression. Analyses the expression's type,
# checks whether it is a legal const expression,
# and determines its value.
node = self.analyse_types(env)
node.check_const()
return node
def analyse_expressions(self, env):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for a whole
# expression.
return self.analyse_types(env)
def analyse_target_expression(self, env, rhs):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for the LHS of
# an assignment.
return self.analyse_target_types(env)
def analyse_boolean_expression(self, env):
# Analyse expression and coerce to a boolean.
node = self.analyse_types(env)
bool = node.coerce_to_boolean(env)
return bool
def analyse_temp_boolean_expression(self, env):
# Analyse boolean expression and coerce result into
# a temporary. This is used when a branch is to be
# performed on the result and we won't have an
# opportunity to ensure disposal code is executed
# afterwards. By forcing the result into a temporary,
# we ensure that all disposal has been done by the
# time we get the result.
node = self.analyse_types(env)
return node.coerce_to_boolean(env).coerce_to_simple(env)
# --------------- Type Inference -----------------
def type_dependencies(self, env):
# Returns the list of entries whose types must be determined
# before the type of self can be inferred.
if hasattr(self, 'type') and self.type is not None:
return ()
return sum([node.type_dependencies(env) for node in self.subexpr_nodes()], ())
def infer_type(self, env):
# Attempt to deduce the type of self.
# Differs from analyse_types as it avoids unnecessary
# analysis of subexpressions, but can assume everything
# in self.type_dependencies() has been resolved.
if hasattr(self, 'type') and self.type is not None:
return self.type
elif hasattr(self, 'entry') and self.entry is not None:
return self.entry.type
else:
self.not_implemented("infer_type")
def nonlocally_immutable(self):
# Returns whether this variable is a safe reference, i.e.
# can't be modified as part of globals or closures.
return self.is_literal or self.is_temp or self.type.is_array or self.type.is_cfunction
# --------------- Type Analysis ------------------
def analyse_as_module(self, env):
# If this node can be interpreted as a reference to a
# cimported module, return its scope, else None.
return None
def analyse_as_type(self, env):
# If this node can be interpreted as a reference to a
# type, return that type, else None.
return None
def analyse_as_extension_type(self, env):
# If this node can be interpreted as a reference to an
# extension type or builtin type, return its type, else None.
return None
def analyse_types(self, env):
self.not_implemented("analyse_types")
def analyse_target_types(self, env):
return self.analyse_types(env)
def nogil_check(self, env):
# By default, any expression based on Python objects is
# prevented in nogil environments. Subtypes must override
# this if they can work without the GIL.
if self.type and self.type.is_pyobject:
self.gil_error()
def gil_assignment_check(self, env):
if env.nogil and self.type.is_pyobject:
error(self.pos, "Assignment of Python object not allowed without gil")
def check_const(self):
self.not_const()
return False
def not_const(self):
error(self.pos, "Not allowed in a constant expression")
def check_const_addr(self):
self.addr_not_const()
return False
def addr_not_const(self):
error(self.pos, "Address is not constant")
# ----------------- Result Allocation -----------------
def result_in_temp(self):
# Return true if result is in a temporary owned by
# this node or one of its subexpressions. Overridden
# by certain nodes which can share the result of
# a subnode.
return self.is_temp
def target_code(self):
# Return code fragment for use as LHS of a C assignment.
return self.calculate_result_code()
def calculate_result_code(self):
self.not_implemented("calculate_result_code")
# def release_target_temp(self, env):
# # Release temporaries used by LHS of an assignment.
# self.release_subexpr_temps(env)
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("Temp allocated multiple times in %r: %r" % (self.__class__.__name__, self.pos))
type = self.type
if not type.is_void:
if type.is_pyobject:
type = PyrexTypes.py_object_type
self.temp_code = code.funcstate.allocate_temp(
type, manage_ref=self.use_managed_ref)
else:
self.temp_code = None
def release_temp_result(self, code):
if not self.temp_code:
if not self.result_is_used:
# not used anyway, so ignore if not set up
return
if self.old_temp:
raise RuntimeError("temp %s released multiple times in %s" % (
self.old_temp, self.__class__.__name__))
else:
raise RuntimeError("no temp, but release requested in %s" % (
self.__class__.__name__))
code.funcstate.release_temp(self.temp_code)
self.old_temp = self.temp_code
self.temp_code = None
# ---------------- Code Generation -----------------
def make_owned_reference(self, code):
"""
If result is a pyobject, make sure we own a reference to it.
If the result is in a temp, it is already a new reference.
"""
if self.type.is_pyobject and not self.result_in_temp():
code.put_incref(self.result(), self.ctype())
def make_owned_memoryviewslice(self, code):
"""
Make sure we own the reference to this memoryview slice.
"""
if not self.result_in_temp():
code.put_incref_memoryviewslice(self.result(),
have_gil=self.in_nogil_context)
def generate_evaluation_code(self, code):
# Generate code to evaluate this node and
# its sub-expressions, and dispose of any
# temporary results of its sub-expressions.
self.generate_subexpr_evaluation_code(code)
code.mark_pos(self.pos)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_result_code(code)
if self.is_temp and not (self.type.is_string or self.type.is_pyunicode_ptr):
# If we are temp we do not need to wait until this node is disposed
# before disposing children.
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def generate_subexpr_evaluation_code(self, code):
for node in self.subexpr_nodes():
node.generate_evaluation_code(code)
def generate_result_code(self, code):
self.not_implemented("generate_result_code")
def generate_disposal_code(self, code):
if self.is_temp:
if self.type.is_string or self.type.is_pyunicode_ptr:
# postponed from self.generate_evaluation_code()
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
if self.result():
if self.type.is_pyobject:
code.put_decref_clear(self.result(), self.ctype())
elif self.type.is_memoryviewslice:
code.put_xdecref_memoryviewslice(
self.result(), have_gil=not self.in_nogil_context)
else:
# Already done if self.is_temp
self.generate_subexpr_disposal_code(code)
def generate_subexpr_disposal_code(self, code):
# Generate code to dispose of temporary results
# of all sub-expressions.
for node in self.subexpr_nodes():
node.generate_disposal_code(code)
def generate_post_assignment_code(self, code):
if self.is_temp:
if self.type.is_string or self.type.is_pyunicode_ptr:
# postponed from self.generate_evaluation_code()
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
elif self.type.is_pyobject:
code.putln("%s = 0;" % self.result())
elif self.type.is_memoryviewslice:
code.putln("%s.memview = NULL;" % self.result())
code.putln("%s.data = NULL;" % self.result())
else:
self.generate_subexpr_disposal_code(code)
def generate_assignment_code(self, rhs, code):
# Stub method for nodes which are not legal as
# the LHS of an assignment. An error will have
# been reported earlier.
pass
def generate_deletion_code(self, code, ignore_nonexisting=False):
# Stub method for nodes that are not legal as
# the argument of a del statement. An error
# will have been reported earlier.
pass
def free_temps(self, code):
if self.is_temp:
if not self.type.is_void:
self.release_temp_result(code)
else:
self.free_subexpr_temps(code)
def free_subexpr_temps(self, code):
for sub in self.subexpr_nodes():
sub.free_temps(code)
def generate_function_definitions(self, env, code):
pass
# ---------------- Annotation ---------------------
def annotate(self, code):
for node in self.subexpr_nodes():
node.annotate(code)
# ----------------- Coercion ----------------------
def coerce_to(self, dst_type, env):
# Coerce the result so that it can be assigned to
# something of type dst_type. If processing is necessary,
# wraps this node in a coercion node and returns that.
# Otherwise, returns this node unchanged.
#
# This method is called during the analyse_expressions
# phase of the src_node's processing.
#
# Note that subclasses that override this (especially
# ConstNodes) must not (re-)set their own .type attribute
# here. Since expression nodes may turn up in different
# places in the tree (e.g. inside of CloneNodes in cascaded
# assignments), this method must return a new node instance
# if it changes the type.
#
src = self
src_type = self.type
if self.check_for_coercion_error(dst_type, env):
return self
if dst_type.is_reference and not src_type.is_reference:
dst_type = dst_type.ref_base_type
if src_type.is_const:
src_type = src_type.const_base_type
if src_type.is_fused or dst_type.is_fused:
# See if we are coercing a fused function to a pointer to a
# specialized function
if (src_type.is_cfunction and not dst_type.is_fused and
dst_type.is_ptr and dst_type.base_type.is_cfunction):
dst_type = dst_type.base_type
for signature in src_type.get_all_specialized_function_types():
if signature.same_as(dst_type):
src.type = signature
src.entry = src.type.entry
src.entry.used = True
return self
if src_type.is_fused:
error(self.pos, "Type is not specialized")
else:
error(self.pos, "Cannot coerce to a type that is not specialized")
self.type = error_type
return self
if self.coercion_type is not None:
# This is purely for error checking purposes!
node = NameNode(self.pos, name='', type=self.coercion_type)
node.coerce_to(dst_type, env)
if dst_type.is_memoryviewslice:
from . import MemoryView
if not src.type.is_memoryviewslice:
if src.type.is_pyobject:
src = CoerceToMemViewSliceNode(src, dst_type, env)
elif src.type.is_array:
src = CythonArrayNode.from_carray(src, env).coerce_to(
dst_type, env)
elif not src_type.is_error:
error(self.pos,
"Cannot convert '%s' to memoryviewslice" %
(src_type,))
elif not MemoryView.src_conforms_to_dst(
src.type, dst_type, broadcast=self.memslice_broadcast):
if src.type.dtype.same_as(dst_type.dtype):
msg = "Memoryview '%s' not conformable to memoryview '%s'."
tup = src.type, dst_type
else:
msg = "Different base types for memoryviews (%s, %s)"
tup = src.type.dtype, dst_type.dtype
error(self.pos, msg % tup)
elif dst_type.is_pyobject:
if not src.type.is_pyobject:
if dst_type is bytes_type and src.type.is_int:
src = CoerceIntToBytesNode(src, env)
else:
src = CoerceToPyTypeNode(src, env, type=dst_type)
if not src.type.subtype_of(dst_type):
if src.constant_result is not None:
src = PyTypeTestNode(src, dst_type, env)
elif src.type.is_pyobject:
src = CoerceFromPyTypeNode(dst_type, src, env)
elif (dst_type.is_complex
and src_type != dst_type
and dst_type.assignable_from(src_type)):
src = CoerceToComplexNode(src, dst_type, env)
else: # neither src nor dst are py types
# Added the string comparison, since for c types that
# is enough, but Cython gets confused when the types are
# in different pxi files.
if not (str(src.type) == str(dst_type) or dst_type.assignable_from(src_type)):
self.fail_assignment(dst_type)
return src
def fail_assignment(self, dst_type):
error(self.pos, "Cannot assign type '%s' to '%s'" % (self.type, dst_type))
def check_for_coercion_error(self, dst_type, env, fail=False, default=None):
if fail and not default:
default = "Cannot assign type '%(FROM)s' to '%(TO)s'"
message = find_coercion_error((self.type, dst_type), default, env)
if message is not None:
error(self.pos, message % {'FROM': self.type, 'TO': dst_type})
return True
if fail:
self.fail_assignment(dst_type)
return True
return False
def coerce_to_pyobject(self, env):
return self.coerce_to(PyrexTypes.py_object_type, env)
def coerce_to_boolean(self, env):
# Coerce result to something acceptable as
# a boolean value.
# if it's constant, calculate the result now
if self.has_constant_result():
bool_value = bool(self.constant_result)
return BoolNode(self.pos, value=bool_value,
constant_result=bool_value)
type = self.type
if type.is_enum or type.is_error:
return self
elif type.is_pyobject or type.is_int or type.is_ptr or type.is_float:
return CoerceToBooleanNode(self, env)
else:
error(self.pos, "Type '%s' not acceptable as a boolean" % type)
return self
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.type.is_int:
return self
else:
return self.coerce_to(PyrexTypes.c_long_type, env)
def coerce_to_temp(self, env):
# Ensure that the result is in a temporary.
if self.result_in_temp():
return self
else:
return CoerceToTempNode(self, env)
def coerce_to_simple(self, env):
# Ensure that the result is simple (see is_simple).
if self.is_simple():
return self
else:
return self.coerce_to_temp(env)
def is_simple(self):
# A node is simple if its result is something that can
# be referred to without performing any operations, e.g.
# a constant, local var, C global var, struct member
# reference, or temporary.
return self.result_in_temp()
def may_be_none(self):
if self.type and not (self.type.is_pyobject or
self.type.is_memoryviewslice):
return False
if self.has_constant_result():
return self.constant_result is not None
return True
def as_cython_attribute(self):
return None
def as_none_safe_node(self, message, error="PyExc_TypeError", format_args=()):
# Wraps the node in a NoneCheckNode if it is not known to be
# not-None (e.g. because it is a Python literal).
if self.may_be_none():
return NoneCheckNode(self, error, message, format_args)
else:
return self
@classmethod
def from_node(cls, node, **kwargs):
"""Instantiate this node class from another node, properly
copying over all attributes that one would forget otherwise.
"""
attributes = "cf_state cf_maybe_null cf_is_null constant_result".split()
for attr_name in attributes:
if attr_name in kwargs:
continue
try:
value = getattr(node, attr_name)
except AttributeError:
pass
else:
kwargs[attr_name] = value
return cls(node.pos, **kwargs)
class AtomicExprNode(ExprNode):
# Abstract base class for expression nodes which have
# no sub-expressions.
subexprs = []
# Override to optimize -- we know we have no children
def generate_subexpr_evaluation_code(self, code):
pass
def generate_subexpr_disposal_code(self, code):
pass
class PyConstNode(AtomicExprNode):
# Abstract base class for constant Python values.
is_literal = 1
type = py_object_type
def is_simple(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
return self
def calculate_result_code(self):
return self.value
def generate_result_code(self, code):
pass
class NoneNode(PyConstNode):
# The constant value None
is_none = 1
value = "Py_None"
constant_result = None
nogil_check = None
def compile_time_value(self, denv):
return None
def may_be_none(self):
return True
class EllipsisNode(PyConstNode):
# '...' in a subscript list.
value = "Py_Ellipsis"
constant_result = Ellipsis
def compile_time_value(self, denv):
return Ellipsis
class ConstNode(AtomicExprNode):
# Abstract base type for literal constant nodes.
#
# value string C code fragment
is_literal = 1
nogil_check = None
def is_simple(self):
return 1
def nonlocally_immutable(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
return self # Types are held in class variables
def check_const(self):
return True
def get_constant_c_result_code(self):
return self.calculate_result_code()
def calculate_result_code(self):
return str(self.value)
def generate_result_code(self, code):
pass
class BoolNode(ConstNode):
type = PyrexTypes.c_bint_type
# The constant value True or False
def calculate_constant_result(self):
self.constant_result = self.value
def compile_time_value(self, denv):
return self.value
def calculate_result_code(self):
if self.type.is_pyobject:
return self.value and 'Py_True' or 'Py_False'
else:
return str(int(self.value))
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject and self.type.is_int:
return BoolNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=Builtin.bool_type)
if dst_type.is_int and self.type.is_pyobject:
return BoolNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=PyrexTypes.c_bint_type)
return ConstNode.coerce_to(self, dst_type, env)
class NullNode(ConstNode):
type = PyrexTypes.c_null_ptr_type
value = "NULL"
constant_result = 0
def get_constant_c_result_code(self):
return self.value
class CharNode(ConstNode):
type = PyrexTypes.c_char_type
def calculate_constant_result(self):
self.constant_result = ord(self.value)
def compile_time_value(self, denv):
return ord(self.value)
def calculate_result_code(self):
return "'%s'" % StringEncoding.escape_char(self.value)
class IntNode(ConstNode):
# unsigned "" or "U"
# longness "" or "L" or "LL"
# is_c_literal True/False/None creator considers this a C integer literal
unsigned = ""
longness = ""
is_c_literal = None # unknown
def __init__(self, pos, **kwds):
ExprNode.__init__(self, pos, **kwds)
if 'type' not in kwds:
self.type = self.find_suitable_type_for_value()
def find_suitable_type_for_value(self):
if self.constant_result is constant_value_not_set:
try:
self.calculate_constant_result()
except ValueError:
pass
# we ignore 'is_c_literal = True' and instead map signed 32bit
# integers as C long values
if self.is_c_literal or \
self.constant_result in (constant_value_not_set, not_a_constant) or \
self.unsigned or self.longness == 'LL':
# clearly a C literal
rank = (self.longness == 'LL') and 2 or 1
suitable_type = PyrexTypes.modifiers_and_name_to_type[not self.unsigned, rank, "int"]
if self.type:
suitable_type = PyrexTypes.widest_numeric_type(suitable_type, self.type)
else:
# C literal or Python literal - split at 32bit boundary
if -2**31 <= self.constant_result < 2**31:
if self.type and self.type.is_int:
suitable_type = self.type
else:
suitable_type = PyrexTypes.c_long_type
else:
suitable_type = PyrexTypes.py_object_type
return suitable_type
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
elif dst_type.is_float:
if self.has_constant_result():
return FloatNode(self.pos, value='%d.0' % int(self.constant_result), type=dst_type,
constant_result=float(self.constant_result))
else:
return FloatNode(self.pos, value=self.value, type=dst_type,
constant_result=not_a_constant)
if dst_type.is_numeric and not dst_type.is_complex:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type = dst_type, is_c_literal = True,
unsigned=self.unsigned, longness=self.longness)
return node
elif dst_type.is_pyobject:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type = PyrexTypes.py_object_type, is_c_literal = False,
unsigned=self.unsigned, longness=self.longness)
else:
# FIXME: not setting the type here to keep it working with
# complex numbers. Should they be special cased?
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
unsigned=self.unsigned, longness=self.longness)
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def coerce_to_boolean(self, env):
return IntNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=PyrexTypes.c_bint_type,
unsigned=self.unsigned, longness=self.longness)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
# pre-allocate a Python version of the number
plain_integer_string = str(Utils.str_to_number(self.value))
self.result_code = code.get_py_int(plain_integer_string, self.longness)
else:
self.result_code = self.get_constant_c_result_code()
def get_constant_c_result_code(self):
return self.value_as_c_integer_string() + self.unsigned + self.longness
def value_as_c_integer_string(self):
value = self.value
if len(value) > 2:
# convert C-incompatible Py3 oct/bin notations
if value[1] in 'oO':
value = value[0] + value[2:] # '0o123' => '0123'
elif value[1] in 'bB':
value = int(value[2:], 2)
return str(value)
def calculate_result_code(self):
return self.result_code
def calculate_constant_result(self):
self.constant_result = Utils.str_to_number(self.value)
def compile_time_value(self, denv):
return Utils.str_to_number(self.value)
class FloatNode(ConstNode):
type = PyrexTypes.c_double_type
def calculate_constant_result(self):
self.constant_result = float(self.value)
def compile_time_value(self, denv):
return float(self.value)
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject and self.type.is_float:
return FloatNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=Builtin.float_type)
if dst_type.is_float and self.type.is_pyobject:
return FloatNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=dst_type)
return ConstNode.coerce_to(self, dst_type, env)
def calculate_result_code(self):
return self.result_code
def get_constant_c_result_code(self):
strval = self.value
assert isinstance(strval, (str, unicode))
cmpval = repr(float(strval))
if cmpval == 'nan':
return "(Py_HUGE_VAL * 0)"
elif cmpval == 'inf':
return "Py_HUGE_VAL"
elif cmpval == '-inf':
return "(-Py_HUGE_VAL)"
else:
return strval
def generate_evaluation_code(self, code):
c_value = self.get_constant_c_result_code()
if self.type.is_pyobject:
self.result_code = code.get_py_float(self.value, c_value)
else:
self.result_code = c_value
def _analyse_name_as_type(name, pos, env):
type = PyrexTypes.parse_basic_type(name)
if type is not None:
return type
hold_errors()
from .TreeFragment import TreeFragment
pos = (pos[0], pos[1], pos[2]-7)
try:
declaration = TreeFragment(u"sizeof(%s)" % name, name=pos[0].filename, initial_pos=pos)
except CompileError:
sizeof_node = None
else:
sizeof_node = declaration.root.stats[0].expr
sizeof_node = sizeof_node.analyse_types(env)
release_errors(ignore=True)
if isinstance(sizeof_node, SizeofTypeNode):
return sizeof_node.arg_type
return None
class BytesNode(ConstNode):
# A char* or bytes literal
#
# value BytesLiteral
is_string_literal = True
# start off as Python 'bytes' to support len() in O(1)
type = bytes_type
def calculate_constant_result(self):
self.constant_result = self.value
def as_sliced_node(self, start, stop, step=None):
value = StringEncoding.BytesLiteral(self.value[start:stop:step])
value.encoding = self.value.encoding
return BytesNode(
self.pos, value=value, constant_result=value)
def compile_time_value(self, denv):
return self.value
def analyse_as_type(self, env):
return _analyse_name_as_type(self.value.decode('ISO8859-1'), self.pos, env)
def can_coerce_to_char_literal(self):
return len(self.value) == 1
def coerce_to_boolean(self, env):
# This is special because testing a C char* for truth directly
# would yield the wrong result.
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
def coerce_to(self, dst_type, env):
if self.type == dst_type:
return self
if dst_type.is_int:
if not self.can_coerce_to_char_literal():
error(self.pos, "Only single-character string literals can be coerced into ints.")
return self
if dst_type.is_unicode_char:
error(self.pos, "Bytes literals cannot coerce to Py_UNICODE/Py_UCS4, use a unicode literal instead.")
return self
return CharNode(self.pos, value=self.value,
constant_result=ord(self.value))
node = BytesNode(self.pos, value=self.value,
constant_result=self.constant_result)
if dst_type.is_pyobject:
if dst_type in (py_object_type, Builtin.bytes_type):
node.type = Builtin.bytes_type
else:
self.check_for_coercion_error(dst_type, env, fail=True)
return node
elif dst_type == PyrexTypes.c_char_ptr_type:
node.type = dst_type
return node
elif dst_type == PyrexTypes.c_uchar_ptr_type:
node.type = PyrexTypes.c_char_ptr_type
return CastNode(node, PyrexTypes.c_uchar_ptr_type)
elif dst_type.assignable_from(PyrexTypes.c_char_ptr_type):
node.type = dst_type
return node
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
self.result_code = code.get_py_string_const(self.value)
else:
self.result_code = code.get_string_const(self.value)
def get_constant_c_result_code(self):
return None # FIXME
def calculate_result_code(self):
return self.result_code
class UnicodeNode(ConstNode):
# A Py_UNICODE* or unicode literal
#
# value EncodedString
# bytes_value BytesLiteral the literal parsed as bytes string
# ('-3' unicode literals only)
is_string_literal = True
bytes_value = None
type = unicode_type
def calculate_constant_result(self):
self.constant_result = self.value
def analyse_as_type(self, env):
return _analyse_name_as_type(self.value, self.pos, env)
def as_sliced_node(self, start, stop, step=None):
if StringEncoding.string_contains_surrogates(self.value[:stop]):
# this is unsafe as it may give different results
# in different runtimes
return None
value = StringEncoding.EncodedString(self.value[start:stop:step])
value.encoding = self.value.encoding
if self.bytes_value is not None:
bytes_value = StringEncoding.BytesLiteral(
self.bytes_value[start:stop:step])
bytes_value.encoding = self.bytes_value.encoding
else:
bytes_value = None
return UnicodeNode(
self.pos, value=value, bytes_value=bytes_value,
constant_result=value)
def coerce_to(self, dst_type, env):
if dst_type is self.type:
pass
elif dst_type.is_unicode_char:
if not self.can_coerce_to_char_literal():
error(self.pos,
"Only single-character Unicode string literals or "
"surrogate pairs can be coerced into Py_UCS4/Py_UNICODE.")
return self
int_value = ord(self.value)
return IntNode(self.pos, type=dst_type, value=str(int_value),
constant_result=int_value)
elif not dst_type.is_pyobject:
if dst_type.is_string and self.bytes_value is not None:
# special case: '-3' enforced unicode literal used in a
# C char* context
return BytesNode(self.pos, value=self.bytes_value
).coerce_to(dst_type, env)
if dst_type.is_pyunicode_ptr:
node = UnicodeNode(self.pos, value=self.value)
node.type = dst_type
return node
error(self.pos,
"Unicode literals do not support coercion to C types other "
"than Py_UNICODE/Py_UCS4 (for characters) or Py_UNICODE* "
"(for strings).")
elif dst_type not in (py_object_type, Builtin.basestring_type):
self.check_for_coercion_error(dst_type, env, fail=True)
return self
def can_coerce_to_char_literal(self):
return len(self.value) == 1
## or (len(self.value) == 2
## and (0xD800 <= self.value[0] <= 0xDBFF)
## and (0xDC00 <= self.value[1] <= 0xDFFF))
def coerce_to_boolean(self, env):
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
def contains_surrogates(self):
return StringEncoding.string_contains_surrogates(self.value)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
if self.contains_surrogates():
# surrogates are not really portable and cannot be
# decoded by the UTF-8 codec in Py3.3
self.result_code = code.get_py_const(py_object_type, 'ustring')
data_cname = code.get_pyunicode_ptr_const(self.value)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
code.putln(
"%s = PyUnicode_FromUnicode(%s, (sizeof(%s) / sizeof(Py_UNICODE))-1); %s" % (
self.result_code,
data_cname,
data_cname,
code.error_goto_if_null(self.result_code, self.pos)))
code.putln("#if CYTHON_PEP393_ENABLED")
code.put_error_if_neg(
self.pos, "PyUnicode_READY(%s)" % self.result_code)
code.putln("#endif")
else:
self.result_code = code.get_py_string_const(self.value)
else:
self.result_code = code.get_pyunicode_ptr_const(self.value)
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
return self.value
class StringNode(PyConstNode):
# A Python str object, i.e. a byte string in Python 2.x and a
# unicode string in Python 3.x
#
# value BytesLiteral (or EncodedString with ASCII content)
# unicode_value EncodedString or None
# is_identifier boolean
type = str_type
is_string_literal = True
is_identifier = None
unicode_value = None
def calculate_constant_result(self):
if self.unicode_value is not None:
# only the Unicode value is portable across Py2/3
self.constant_result = self.unicode_value
def analyse_as_type(self, env):
return _analyse_name_as_type(self.unicode_value or self.value.decode('ISO8859-1'), self.pos, env)
def as_sliced_node(self, start, stop, step=None):
value = type(self.value)(self.value[start:stop:step])
value.encoding = self.value.encoding
if self.unicode_value is not None:
if StringEncoding.string_contains_surrogates(self.unicode_value[:stop]):
# this is unsafe as it may give different results in different runtimes
return None
unicode_value = StringEncoding.EncodedString(
self.unicode_value[start:stop:step])
else:
unicode_value = None
return StringNode(
self.pos, value=value, unicode_value=unicode_value,
constant_result=value, is_identifier=self.is_identifier)
def coerce_to(self, dst_type, env):
if dst_type is not py_object_type and not str_type.subtype_of(dst_type):
# if dst_type is Builtin.bytes_type:
# # special case: bytes = 'str literal'
# return BytesNode(self.pos, value=self.value)
if not dst_type.is_pyobject:
return BytesNode(self.pos, value=self.value).coerce_to(dst_type, env)
if dst_type is not Builtin.basestring_type:
self.check_for_coercion_error(dst_type, env, fail=True)
return self
def can_coerce_to_char_literal(self):
return not self.is_identifier and len(self.value) == 1
def generate_evaluation_code(self, code):
self.result_code = code.get_py_string_const(
self.value, identifier=self.is_identifier, is_str=True,
unicode_value=self.unicode_value)
def get_constant_c_result_code(self):
return None
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
return self.value
class IdentifierStringNode(StringNode):
# A special str value that represents an identifier (bytes in Py2,
# unicode in Py3).
is_identifier = True
class ImagNode(AtomicExprNode):
# Imaginary number literal
#
# value float imaginary part
type = PyrexTypes.c_double_complex_type
def calculate_constant_result(self):
self.constant_result = complex(0.0, self.value)
def compile_time_value(self, denv):
return complex(0.0, self.value)
def analyse_types(self, env):
self.type.create_declaration_utility_code(env)
return self
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
node = ImagNode(self.pos, value=self.value)
if dst_type.is_pyobject:
node.is_temp = 1
node.type = PyrexTypes.py_object_type
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return AtomicExprNode.coerce_to(node, dst_type, env)
gil_message = "Constructing complex number"
def calculate_result_code(self):
if self.type.is_pyobject:
return self.result()
else:
return "%s(0, %r)" % (self.type.from_parts, float(self.value))
def generate_result_code(self, code):
if self.type.is_pyobject:
code.putln(
"%s = PyComplex_FromDoubles(0.0, %r); %s" % (
self.result(),
float(self.value),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class NewExprNode(AtomicExprNode):
# C++ new statement
#
# cppclass node c++ class to create
type = None
def infer_type(self, env):
type = self.cppclass.analyse_as_type(env)
if type is None or not type.is_cpp_class:
error(self.pos, "new operator can only be applied to a C++ class")
self.type = error_type
return
self.cpp_check(env)
constructor = type.scope.lookup(u'<init>')
if constructor is None:
func_type = PyrexTypes.CFuncType(type, [], exception_check='+')
type.scope.declare_cfunction(u'<init>', func_type, self.pos)
constructor = type.scope.lookup(u'<init>')
self.class_type = type
self.entry = constructor
self.type = constructor.type
return self.type
def analyse_types(self, env):
if self.type is None:
self.infer_type(env)
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
pass
def calculate_result_code(self):
return "new " + self.class_type.declaration_code("")
class NameNode(AtomicExprNode):
# Reference to a local or global variable name.
#
# name string Python name of the variable
# entry Entry Symbol table entry
# type_entry Entry For extension type names, the original type entry
# cf_is_null boolean Is uninitialized before this node
# cf_maybe_null boolean Maybe uninitialized before this node
# allow_null boolean Don't raise UnboundLocalError
# nogil boolean Whether it is used in a nogil context
is_name = True
is_cython_module = False
cython_attribute = None
lhs_of_first_assignment = False # TODO: remove me
is_used_as_rvalue = 0
entry = None
type_entry = None
cf_maybe_null = True
cf_is_null = False
allow_null = False
nogil = False
inferred_type = None
def as_cython_attribute(self):
return self.cython_attribute
def type_dependencies(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is not None and self.entry.type.is_unspecified:
return (self,)
else:
return ()
def infer_type(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is None or self.entry.type is unspecified_type:
if self.inferred_type is not None:
return self.inferred_type
return py_object_type
elif (self.entry.type.is_extension_type or self.entry.type.is_builtin_type) and \
self.name == self.entry.type.name:
# Unfortunately the type attribute of type objects
# is used for the pointer to the type they represent.
return type_type
elif self.entry.type.is_cfunction:
if self.entry.scope.is_builtin_scope:
# special case: optimised builtin functions must be treated as Python objects
return py_object_type
else:
# special case: referring to a C function must return its pointer
return PyrexTypes.CPtrType(self.entry.type)
else:
# If entry is inferred as pyobject it's safe to use local
# NameNode's inferred_type.
if self.entry.type.is_pyobject and self.inferred_type:
# Overflow may happen if integer
if not (self.inferred_type.is_int and self.entry.might_overflow):
return self.inferred_type
return self.entry.type
def compile_time_value(self, denv):
try:
return denv.lookup(self.name)
except KeyError:
error(self.pos, "Compile-time name '%s' not defined" % self.name)
def get_constant_c_result_code(self):
if not self.entry or self.entry.type.is_pyobject:
return None
return self.entry.cname
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a builtin
# C function with a Python equivalent, manufacture a NameNode
# referring to the Python builtin.
#print "NameNode.coerce_to:", self.name, dst_type ###
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction:
var_entry = entry.as_variable
if var_entry:
if var_entry.is_builtin and var_entry.is_const:
var_entry = env.declare_builtin(var_entry.name, self.pos)
node = NameNode(self.pos, name = self.name)
node.entry = var_entry
node.analyse_rvalue_entry(env)
return node
return super(NameNode, self).coerce_to(dst_type, env)
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module.
# Returns the module scope, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.as_module:
return entry.as_module
return None
def analyse_as_type(self, env):
if self.cython_attribute:
type = PyrexTypes.parse_basic_type(self.cython_attribute)
else:
type = PyrexTypes.parse_basic_type(self.name)
if type:
return type
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type:
return entry.type
else:
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type.
# Returns the extension type, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type:
if entry.type.is_extension_type or entry.type.is_builtin_type:
return entry.type
return None
def analyse_target_declaration(self, env):
if not self.entry:
self.entry = env.lookup_here(self.name)
if not self.entry:
if env.directives['warn.undeclared']:
warning(self.pos, "implicit declaration of '%s'" % self.name, 1)
if env.directives['infer_types'] != False:
type = unspecified_type
else:
type = py_object_type
self.entry = env.declare_var(self.name, type, self.pos)
if self.entry.is_declared_generic:
self.result_ctype = py_object_type
def analyse_types(self, env):
self.initialized_check = env.directives['initializedcheck']
if self.entry is None:
self.entry = env.lookup(self.name)
if not self.entry:
self.entry = env.declare_builtin(self.name, self.pos)
if not self.entry:
self.type = PyrexTypes.error_type
return self
entry = self.entry
if entry:
entry.used = 1
if entry.type.is_buffer:
from . import Buffer
Buffer.used_buffer_aux_vars(entry)
self.analyse_rvalue_entry(env)
return self
def analyse_target_types(self, env):
self.analyse_entry(env, is_target=True)
if (not self.is_lvalue() and self.entry.is_cfunction and
self.entry.fused_cfunction and self.entry.as_variable):
# We need this for the fused 'def' TreeFragment
self.entry = self.entry.as_variable
self.type = self.entry.type
if self.type.is_const:
error(self.pos, "Assignment to const '%s'" % self.name)
if self.type.is_reference:
error(self.pos, "Assignment to reference '%s'" % self.name)
if not self.is_lvalue():
error(self.pos, "Assignment to non-lvalue '%s'"
% self.name)
self.type = PyrexTypes.error_type
self.entry.used = 1
if self.entry.type.is_buffer:
from . import Buffer
Buffer.used_buffer_aux_vars(self.entry)
return self
def analyse_rvalue_entry(self, env):
#print "NameNode.analyse_rvalue_entry:", self.name ###
#print "Entry:", self.entry.__dict__ ###
self.analyse_entry(env)
entry = self.entry
if entry.is_declared_generic:
self.result_ctype = py_object_type
if entry.is_pyglobal or entry.is_builtin:
if entry.is_builtin and entry.is_const:
self.is_temp = 0
else:
self.is_temp = 1
self.is_used_as_rvalue = 1
elif entry.type.is_memoryviewslice:
self.is_temp = False
self.is_used_as_rvalue = True
self.use_managed_ref = True
return self
def nogil_check(self, env):
self.nogil = True
if self.is_used_as_rvalue:
entry = self.entry
if entry.is_builtin:
if not entry.is_const: # cached builtins are ok
self.gil_error()
elif entry.is_pyglobal:
self.gil_error()
elif self.entry.type.is_memoryviewslice:
if self.cf_is_null or self.cf_maybe_null:
from . import MemoryView
MemoryView.err_if_nogil_initialized_check(self.pos, env)
gil_message = "Accessing Python global or builtin"
def analyse_entry(self, env, is_target=False):
#print "NameNode.analyse_entry:", self.name ###
self.check_identifier_kind()
entry = self.entry
type = entry.type
if (not is_target and type.is_pyobject and self.inferred_type and
self.inferred_type.is_builtin_type):
# assume that type inference is smarter than the static entry
type = self.inferred_type
self.type = type
def check_identifier_kind(self):
# Check that this is an appropriate kind of name for use in an
# expression. Also finds the variable entry associated with
# an extension type.
entry = self.entry
if entry.is_type and entry.type.is_extension_type:
self.type_entry = entry
if not (entry.is_const or entry.is_variable
or entry.is_builtin or entry.is_cfunction
or entry.is_cpp_class):
if self.entry.as_variable:
self.entry = self.entry.as_variable
else:
error(self.pos,
"'%s' is not a constant, variable or function identifier" % self.name)
def is_simple(self):
# If it's not a C variable, it'll be in a temp.
return 1
def may_be_none(self):
if self.cf_state and self.type and (self.type.is_pyobject or
self.type.is_memoryviewslice):
# gard against infinite recursion on self-dependencies
if getattr(self, '_none_checking', False):
# self-dependency - either this node receives a None
# value from *another* node, or it can not reference
# None at this point => safe to assume "not None"
return False
self._none_checking = True
# evaluate control flow state to see if there were any
# potential None values assigned to the node so far
may_be_none = False
for assignment in self.cf_state:
if assignment.rhs.may_be_none():
may_be_none = True
break
del self._none_checking
return may_be_none
return super(NameNode, self).may_be_none()
def nonlocally_immutable(self):
if ExprNode.nonlocally_immutable(self):
return True
entry = self.entry
if not entry or entry.in_closure:
return False
return entry.is_local or entry.is_arg or entry.is_builtin or entry.is_readonly
def calculate_target_results(self, env):
pass
def check_const(self):
entry = self.entry
if entry is not None and not (entry.is_const or entry.is_cfunction or entry.is_builtin):
self.not_const()
return False
return True
def check_const_addr(self):
entry = self.entry
if not (entry.is_cglobal or entry.is_cfunction or entry.is_builtin):
self.addr_not_const()
return False
return True
def is_lvalue(self):
return self.entry.is_variable and \
not self.entry.type.is_array and \
not self.entry.is_readonly
def is_addressable(self):
return self.entry.is_variable and not self.type.is_memoryviewslice
def is_ephemeral(self):
# Name nodes are never ephemeral, even if the
# result is in a temporary.
return 0
def calculate_result_code(self):
entry = self.entry
if not entry:
return "<error>" # There was an error earlier
return entry.cname
def generate_result_code(self, code):
assert hasattr(self, 'entry')
entry = self.entry
if entry is None:
return # There was an error earlier
if entry.is_builtin and entry.is_const:
return # Lookup already cached
elif entry.is_pyclass_attr:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.is_builtin:
namespace = Naming.builtins_cname
else: # entry.is_pyglobal
namespace = entry.scope.namespace_cname
if not self.cf_is_null:
code.putln(
'%s = PyObject_GetItem(%s, %s);' % (
self.result(),
namespace,
interned_cname))
code.putln('if (unlikely(!%s)) {' % self.result())
code.putln('PyErr_Clear();')
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetModuleGlobalName(%s);' % (
self.result(),
interned_cname))
if not self.cf_is_null:
code.putln("}")
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.py_result())
elif entry.is_builtin and not entry.scope.is_module_scope:
# known builtin
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetBuiltinName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetBuiltinName(%s); %s' % (
self.result(),
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif entry.is_pyglobal or (entry.is_builtin and entry.scope.is_module_scope):
# name in class body, global name or unknown builtin
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.scope.is_module_scope:
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetModuleGlobalName(%s); %s' % (
self.result(),
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
else:
# FIXME: is_pyglobal is also used for class namespace
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetNameInClass(%s, %s); %s' % (
self.result(),
entry.scope.namespace_cname,
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif entry.is_local or entry.in_closure or entry.from_closure or entry.type.is_memoryviewslice:
# Raise UnboundLocalError for objects and memoryviewslices
raise_unbound = (
(self.cf_maybe_null or self.cf_is_null) and not self.allow_null)
null_code = entry.type.check_for_null_code(entry.cname)
memslice_check = entry.type.is_memoryviewslice and self.initialized_check
if null_code and raise_unbound and (entry.type.is_pyobject or memslice_check):
code.put_error_if_unbound(self.pos, entry, self.in_nogil_context)
def generate_assignment_code(self, rhs, code):
#print "NameNode.generate_assignment_code:", self.name ###
entry = self.entry
if entry is None:
return # There was an error earlier
if (self.entry.type.is_ptr and isinstance(rhs, ListNode)
and not self.lhs_of_first_assignment and not rhs.in_module_scope):
error(self.pos, "Literal list must be assigned to pointer at time of declaration")
# is_pyglobal seems to be True for module level-globals only.
# We use this to access class->tp_dict if necessary.
if entry.is_pyglobal:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
namespace = self.entry.scope.namespace_cname
if entry.is_member:
# if the entry is a member we have to cheat: SetAttr does not work
# on types, so we create a descriptor which is then added to tp_dict
setter = 'PyDict_SetItem'
namespace = '%s->tp_dict' % namespace
elif entry.scope.is_module_scope:
setter = 'PyDict_SetItem'
namespace = Naming.moddict_cname
elif entry.is_pyclass_attr:
setter = 'PyObject_SetItem'
else:
assert False, repr(entry)
code.put_error_if_neg(
self.pos,
'%s(%s, %s, %s)' % (
setter,
namespace,
interned_cname,
rhs.py_result()))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating disposal code for %s" % rhs)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
if entry.is_member:
# in Py2.6+, we need to invalidate the method cache
code.putln("PyType_Modified(%s);" %
entry.scope.parent_type.typeptr_cname)
else:
if self.type.is_memoryviewslice:
self.generate_acquire_memoryviewslice(rhs, code)
elif self.type.is_buffer:
# Generate code for doing the buffer release/acquisition.
# This might raise an exception in which case the assignment (done
# below) will not happen.
#
# The reason this is not in a typetest-like node is because the
# variables that the acquired buffer info is stored to is allocated
# per entry and coupled with it.
self.generate_acquire_buffer(rhs, code)
assigned = False
if self.type.is_pyobject:
#print "NameNode.generate_assignment_code: to", self.name ###
#print "...from", rhs ###
#print "...LHS type", self.type, "ctype", self.ctype() ###
#print "...RHS type", rhs.type, "ctype", rhs.ctype() ###
if self.use_managed_ref:
rhs.make_owned_reference(code)
is_external_ref = entry.is_cglobal or self.entry.in_closure or self.entry.from_closure
if is_external_ref:
if not self.cf_is_null:
if self.cf_maybe_null:
code.put_xgotref(self.py_result())
else:
code.put_gotref(self.py_result())
assigned = True
if entry.is_cglobal:
code.put_decref_set(
self.result(), rhs.result_as(self.ctype()))
else:
if not self.cf_is_null:
if self.cf_maybe_null:
code.put_xdecref_set(
self.result(), rhs.result_as(self.ctype()))
else:
code.put_decref_set(
self.result(), rhs.result_as(self.ctype()))
else:
assigned = False
if is_external_ref:
code.put_giveref(rhs.py_result())
if not self.type.is_memoryviewslice:
if not assigned:
code.putln('%s = %s;' % (
self.result(), rhs.result_as(self.ctype())))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating post-assignment code for %s" % rhs)
rhs.generate_post_assignment_code(code)
elif rhs.result_in_temp():
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
def generate_acquire_memoryviewslice(self, rhs, code):
"""
Slices, coercions from objects, return values etc are new references.
We have a borrowed reference in case of dst = src
"""
from . import MemoryView
MemoryView.put_acquire_memoryviewslice(
lhs_cname=self.result(),
lhs_type=self.type,
lhs_pos=self.pos,
rhs=rhs,
code=code,
have_gil=not self.in_nogil_context,
first_assignment=self.cf_is_null)
def generate_acquire_buffer(self, rhs, code):
# rhstmp is only used in case the rhs is a complicated expression leading to
# the object, to avoid repeating the same C expression for every reference
# to the rhs. It does NOT hold a reference.
pretty_rhs = isinstance(rhs, NameNode) or rhs.is_temp
if pretty_rhs:
rhstmp = rhs.result_as(self.ctype())
else:
rhstmp = code.funcstate.allocate_temp(self.entry.type, manage_ref=False)
code.putln('%s = %s;' % (rhstmp, rhs.result_as(self.ctype())))
from . import Buffer
Buffer.put_assign_to_buffer(self.result(), rhstmp, self.entry,
is_initialized=not self.lhs_of_first_assignment,
pos=self.pos, code=code)
if not pretty_rhs:
code.putln("%s = 0;" % rhstmp)
code.funcstate.release_temp(rhstmp)
def generate_deletion_code(self, code, ignore_nonexisting=False):
if self.entry is None:
return # There was an error earlier
elif self.entry.is_pyclass_attr:
namespace = self.entry.scope.namespace_cname
interned_cname = code.intern_identifier(self.entry.name)
if ignore_nonexisting:
key_error_code = 'PyErr_Clear(); else'
else:
# minor hack: fake a NameError on KeyError
key_error_code = (
'{ PyErr_Clear(); PyErr_Format(PyExc_NameError, "name \'%%s\' is not defined", "%s"); }' %
self.entry.name)
code.putln(
'if (unlikely(PyObject_DelItem(%s, %s) < 0)) {'
' if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) %s'
' %s '
'}' % (namespace, interned_cname,
key_error_code,
code.error_goto(self.pos)))
elif self.entry.is_pyglobal:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
interned_cname = code.intern_identifier(self.entry.name)
del_code = '__Pyx_PyObject_DelAttrStr(%s, %s)' % (
Naming.module_cname, interned_cname)
if ignore_nonexisting:
code.putln('if (unlikely(%s < 0)) { if (likely(PyErr_ExceptionMatches(PyExc_AttributeError))) PyErr_Clear(); else %s }' % (
del_code,
code.error_goto(self.pos)))
else:
code.put_error_if_neg(self.pos, del_code)
elif self.entry.type.is_pyobject or self.entry.type.is_memoryviewslice:
if not self.cf_is_null:
if self.cf_maybe_null and not ignore_nonexisting:
code.put_error_if_unbound(self.pos, self.entry)
if self.entry.type.is_pyobject:
if self.entry.in_closure:
# generator
if ignore_nonexisting and self.cf_maybe_null:
code.put_xgotref(self.result())
else:
code.put_gotref(self.result())
if ignore_nonexisting and self.cf_maybe_null:
code.put_xdecref(self.result(), self.ctype())
else:
code.put_decref(self.result(), self.ctype())
code.putln('%s = NULL;' % self.result())
else:
code.put_xdecref_memoryviewslice(self.entry.cname,
have_gil=not self.nogil)
else:
error(self.pos, "Deletion of C names not supported")
def annotate(self, code):
if hasattr(self, 'is_called') and self.is_called:
pos = (self.pos[0], self.pos[1], self.pos[2] - len(self.name) - 1)
if self.type.is_pyobject:
style, text = 'py_call', 'python function (%s)'
else:
style, text = 'c_call', 'c function (%s)'
code.annotate(pos, AnnotationItem(style, text % self.type, size=len(self.name)))
class BackquoteNode(ExprNode):
# `expr`
#
# arg ExprNode
type = py_object_type
subexprs = ['arg']
def analyse_types(self, env):
self.arg = self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
self.is_temp = 1
return self
gil_message = "Backquote expression"
def calculate_constant_result(self):
self.constant_result = repr(self.arg.constant_result)
def generate_result_code(self, code):
code.putln(
"%s = PyObject_Repr(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class ImportNode(ExprNode):
# Used as part of import statement implementation.
# Implements result =
# __import__(module_name, globals(), None, name_list, level)
#
# module_name StringNode dotted name of module. Empty module
# name means importing the parent package according
# to level
# name_list ListNode or None list of names to be imported
# level int relative import level:
# -1: attempt both relative import and absolute import;
# 0: absolute import;
# >0: the number of parent directories to search
# relative to the current module.
# None: decide the level according to language level and
# directives
type = py_object_type
subexprs = ['module_name', 'name_list']
def analyse_types(self, env):
if self.level is None:
if (env.directives['py2_import'] or
Future.absolute_import not in env.global_scope().context.future_directives):
self.level = -1
else:
self.level = 0
module_name = self.module_name.analyse_types(env)
self.module_name = module_name.coerce_to_pyobject(env)
if self.name_list:
name_list = self.name_list.analyse_types(env)
self.name_list = name_list.coerce_to_pyobject(env)
self.is_temp = 1
env.use_utility_code(UtilityCode.load_cached("Import", "ImportExport.c"))
return self
gil_message = "Python import"
def generate_result_code(self, code):
if self.name_list:
name_list_code = self.name_list.py_result()
else:
name_list_code = "0"
code.putln(
"%s = __Pyx_Import(%s, %s, %d); %s" % (
self.result(),
self.module_name.py_result(),
name_list_code,
self.level,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class IteratorNode(ExprNode):
# Used as part of for statement implementation.
#
# Implements result = iter(sequence)
#
# sequence ExprNode
type = py_object_type
iter_func_ptr = None
counter_cname = None
cpp_iterator_cname = None
reversed = False # currently only used for list/tuple types (see Optimize.py)
subexprs = ['sequence']
def analyse_types(self, env):
self.sequence = self.sequence.analyse_types(env)
if (self.sequence.type.is_array or self.sequence.type.is_ptr) and \
not self.sequence.type.is_string:
# C array iteration will be transformed later on
self.type = self.sequence.type
elif self.sequence.type.is_cpp_class:
self.analyse_cpp_types(env)
else:
self.sequence = self.sequence.coerce_to_pyobject(env)
if self.sequence.type is list_type or \
self.sequence.type is tuple_type:
self.sequence = self.sequence.as_none_safe_node("'NoneType' object is not iterable")
self.is_temp = 1
return self
gil_message = "Iterating over Python object"
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def type_dependencies(self, env):
return self.sequence.type_dependencies(env)
def infer_type(self, env):
sequence_type = self.sequence.infer_type(env)
if sequence_type.is_array or sequence_type.is_ptr:
return sequence_type
elif sequence_type.is_cpp_class:
begin = sequence_type.scope.lookup("begin")
if begin is not None:
return begin.type.return_type
elif sequence_type.is_pyobject:
return sequence_type
return py_object_type
def analyse_cpp_types(self, env):
sequence_type = self.sequence.type
if sequence_type.is_ptr:
sequence_type = sequence_type.base_type
begin = sequence_type.scope.lookup("begin")
end = sequence_type.scope.lookup("end")
if (begin is None
or not begin.type.is_cfunction
or begin.type.args):
error(self.pos, "missing begin() on %s" % self.sequence.type)
self.type = error_type
return
if (end is None
or not end.type.is_cfunction
or end.type.args):
error(self.pos, "missing end() on %s" % self.sequence.type)
self.type = error_type
return
iter_type = begin.type.return_type
if iter_type.is_cpp_class:
if env.lookup_operator_for_types(
self.pos,
"!=",
[iter_type, end.type.return_type]) is None:
error(self.pos, "missing operator!= on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
if env.lookup_operator_for_types(self.pos, '++', [iter_type]) is None:
error(self.pos, "missing operator++ on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
if env.lookup_operator_for_types(self.pos, '*', [iter_type]) is None:
error(self.pos, "missing operator* on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
self.type = iter_type
elif iter_type.is_ptr:
if not (iter_type == end.type.return_type):
error(self.pos, "incompatible types for begin() and end()")
self.type = iter_type
else:
error(self.pos, "result type of begin() on %s must be a C++ class or pointer" % self.sequence.type)
self.type = error_type
return
def generate_result_code(self, code):
sequence_type = self.sequence.type
if sequence_type.is_cpp_class:
if self.sequence.is_name:
# safe: C++ won't allow you to reassign to class references
begin_func = "%s.begin" % self.sequence.result()
else:
sequence_type = PyrexTypes.c_ptr_type(sequence_type)
self.cpp_iterator_cname = code.funcstate.allocate_temp(sequence_type, manage_ref=False)
code.putln("%s = &%s;" % (self.cpp_iterator_cname, self.sequence.result()))
begin_func = "%s->begin" % self.cpp_iterator_cname
# TODO: Limit scope.
code.putln("%s = %s();" % (self.result(), begin_func))
return
if sequence_type.is_array or sequence_type.is_ptr:
raise InternalError("for in carray slice not transformed")
is_builtin_sequence = sequence_type is list_type or \
sequence_type is tuple_type
if not is_builtin_sequence:
# reversed() not currently optimised (see Optimize.py)
assert not self.reversed, "internal error: reversed() only implemented for list/tuple objects"
self.may_be_a_sequence = not sequence_type.is_builtin_type
if self.may_be_a_sequence:
code.putln(
"if (PyList_CheckExact(%s) || PyTuple_CheckExact(%s)) {" % (
self.sequence.py_result(),
self.sequence.py_result()))
if is_builtin_sequence or self.may_be_a_sequence:
self.counter_cname = code.funcstate.allocate_temp(
PyrexTypes.c_py_ssize_t_type, manage_ref=False)
if self.reversed:
if sequence_type is list_type:
init_value = 'PyList_GET_SIZE(%s) - 1' % self.result()
else:
init_value = 'PyTuple_GET_SIZE(%s) - 1' % self.result()
else:
init_value = '0'
code.putln(
"%s = %s; __Pyx_INCREF(%s); %s = %s;" % (
self.result(),
self.sequence.py_result(),
self.result(),
self.counter_cname,
init_value
))
if not is_builtin_sequence:
self.iter_func_ptr = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
if self.may_be_a_sequence:
code.putln("%s = NULL;" % self.iter_func_ptr)
code.putln("} else {")
code.put("%s = -1; " % self.counter_cname)
code.putln("%s = PyObject_GetIter(%s); %s" % (
self.result(),
self.sequence.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
# PyObject_GetIter() fails if "tp_iternext" is not set, but the check below
# makes it visible to the C compiler that the pointer really isn't NULL, so that
# it can distinguish between the special cases and the generic case
code.putln("%s = Py_TYPE(%s)->tp_iternext; %s" % (
self.iter_func_ptr, self.py_result(),
code.error_goto_if_null(self.iter_func_ptr, self.pos)))
if self.may_be_a_sequence:
code.putln("}")
def generate_next_sequence_item(self, test_name, result_name, code):
assert self.counter_cname, "internal error: counter_cname temp not prepared"
final_size = 'Py%s_GET_SIZE(%s)' % (test_name, self.py_result())
if self.sequence.is_sequence_constructor:
item_count = len(self.sequence.args)
if self.sequence.mult_factor is None:
final_size = item_count
elif isinstance(self.sequence.mult_factor.constant_result, (int, long)):
final_size = item_count * self.sequence.mult_factor.constant_result
code.putln("if (%s >= %s) break;" % (self.counter_cname, final_size))
if self.reversed:
inc_dec = '--'
else:
inc_dec = '++'
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
code.putln(
"%s = Py%s_GET_ITEM(%s, %s); __Pyx_INCREF(%s); %s%s; %s" % (
result_name,
test_name,
self.py_result(),
self.counter_cname,
result_name,
self.counter_cname,
inc_dec,
# use the error label to avoid C compiler warnings if we only use it below
code.error_goto_if_neg('0', self.pos)
))
code.putln("#else")
code.putln(
"%s = PySequence_ITEM(%s, %s); %s%s; %s" % (
result_name,
self.py_result(),
self.counter_cname,
self.counter_cname,
inc_dec,
code.error_goto_if_null(result_name, self.pos)))
code.putln("#endif")
def generate_iter_next_result_code(self, result_name, code):
sequence_type = self.sequence.type
if self.reversed:
code.putln("if (%s < 0) break;" % self.counter_cname)
if sequence_type.is_cpp_class:
if self.cpp_iterator_cname:
end_func = "%s->end" % self.cpp_iterator_cname
else:
end_func = "%s.end" % self.sequence.result()
# TODO: Cache end() call?
code.putln("if (!(%s != %s())) break;" % (
self.result(),
end_func))
code.putln("%s = *%s;" % (
result_name,
self.result()))
code.putln("++%s;" % self.result())
return
elif sequence_type is list_type:
self.generate_next_sequence_item('List', result_name, code)
return
elif sequence_type is tuple_type:
self.generate_next_sequence_item('Tuple', result_name, code)
return
if self.may_be_a_sequence:
code.putln("if (!%s) {" % self.iter_func_ptr)
code.putln("if (PyList_CheckExact(%s)) {" % self.py_result())
self.generate_next_sequence_item('List', result_name, code)
code.putln("} else {")
self.generate_next_sequence_item('Tuple', result_name, code)
code.putln("}")
code.put("} else ")
code.putln("{")
code.putln(
"%s = %s(%s);" % (
result_name,
self.iter_func_ptr,
self.py_result()))
code.putln("if (unlikely(!%s)) {" % result_name)
code.putln("PyObject* exc_type = PyErr_Occurred();")
code.putln("if (exc_type) {")
code.putln("if (likely(exc_type == PyExc_StopIteration ||"
" PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
code.putln("break;")
code.putln("}")
code.put_gotref(result_name)
code.putln("}")
def free_temps(self, code):
if self.counter_cname:
code.funcstate.release_temp(self.counter_cname)
if self.iter_func_ptr:
code.funcstate.release_temp(self.iter_func_ptr)
self.iter_func_ptr = None
if self.cpp_iterator_cname:
code.funcstate.release_temp(self.cpp_iterator_cname)
ExprNode.free_temps(self, code)
class NextNode(AtomicExprNode):
# Used as part of for statement implementation.
# Implements result = iterator.next()
# Created during analyse_types phase.
# The iterator is not owned by this node.
#
# iterator IteratorNode
def __init__(self, iterator):
AtomicExprNode.__init__(self, iterator.pos)
self.iterator = iterator
def type_dependencies(self, env):
return self.iterator.type_dependencies(env)
def infer_type(self, env, iterator_type = None):
if iterator_type is None:
iterator_type = self.iterator.infer_type(env)
if iterator_type.is_ptr or iterator_type.is_array:
return iterator_type.base_type
elif iterator_type.is_cpp_class:
item_type = env.lookup_operator_for_types(self.pos, "*", [iterator_type]).type.return_type
if item_type.is_reference:
item_type = item_type.ref_base_type
if item_type.is_const:
item_type = item_type.const_base_type
return item_type
else:
# Avoid duplication of complicated logic.
fake_index_node = IndexNode(
self.pos,
base=self.iterator.sequence,
index=IntNode(self.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type))
return fake_index_node.infer_type(env)
def analyse_types(self, env):
self.type = self.infer_type(env, self.iterator.type)
self.is_temp = 1
return self
def generate_result_code(self, code):
self.iterator.generate_iter_next_result_code(self.result(), code)
class WithExitCallNode(ExprNode):
# The __exit__() call of a 'with' statement. Used in both the
# except and finally clauses.
# with_stat WithStatNode the surrounding 'with' statement
# args TupleNode or ResultStatNode the exception info tuple
subexprs = ['args']
test_if_run = True
def analyse_types(self, env):
self.args = self.args.analyse_types(env)
self.type = PyrexTypes.c_bint_type
self.is_temp = True
return self
def generate_evaluation_code(self, code):
if self.test_if_run:
# call only if it was not already called (and decref-cleared)
code.putln("if (%s) {" % self.with_stat.exit_var)
self.args.generate_evaluation_code(code)
result_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
code.mark_pos(self.pos)
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln("%s = __Pyx_PyObject_Call(%s, %s, NULL);" % (
result_var,
self.with_stat.exit_var,
self.args.result()))
code.put_decref_clear(self.with_stat.exit_var, type=py_object_type)
self.args.generate_disposal_code(code)
self.args.free_temps(code)
code.putln(code.error_goto_if_null(result_var, self.pos))
code.put_gotref(result_var)
if self.result_is_used:
self.allocate_temp_result(code)
code.putln("%s = __Pyx_PyObject_IsTrue(%s);" % (self.result(), result_var))
code.put_decref_clear(result_var, type=py_object_type)
if self.result_is_used:
code.put_error_if_neg(self.pos, self.result())
code.funcstate.release_temp(result_var)
if self.test_if_run:
code.putln("}")
class ExcValueNode(AtomicExprNode):
# Node created during analyse_types phase
# of an ExceptClauseNode to fetch the current
# exception value.
type = py_object_type
def __init__(self, pos):
ExprNode.__init__(self, pos)
def set_var(self, var):
self.var = var
def calculate_result_code(self):
return self.var
def generate_result_code(self, code):
pass
def analyse_types(self, env):
return self
class TempNode(ExprNode):
# Node created during analyse_types phase
# of some nodes to hold a temporary value.
#
# Note: One must call "allocate" and "release" on
# the node during code generation to get/release the temp.
# This is because the temp result is often used outside of
# the regular cycle.
subexprs = []
def __init__(self, pos, type, env=None):
ExprNode.__init__(self, pos)
self.type = type
if type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
def analyse_types(self, env):
return self
def analyse_target_declaration(self, env):
pass
def generate_result_code(self, code):
pass
def allocate(self, code):
self.temp_cname = code.funcstate.allocate_temp(self.type, manage_ref=True)
def release(self, code):
code.funcstate.release_temp(self.temp_cname)
self.temp_cname = None
def result(self):
try:
return self.temp_cname
except:
assert False, "Remember to call allocate/release on TempNode"
raise
# Do not participate in normal temp alloc/dealloc:
def allocate_temp_result(self, code):
pass
def release_temp_result(self, code):
pass
class PyTempNode(TempNode):
# TempNode holding a Python value.
def __init__(self, pos, env):
TempNode.__init__(self, pos, PyrexTypes.py_object_type, env)
class RawCNameExprNode(ExprNode):
subexprs = []
def __init__(self, pos, type=None, cname=None):
ExprNode.__init__(self, pos, type=type)
if cname is not None:
self.cname = cname
def analyse_types(self, env):
return self
def set_cname(self, cname):
self.cname = cname
def result(self):
return self.cname
def generate_result_code(self, code):
pass
#-------------------------------------------------------------------
#
# Parallel nodes (cython.parallel.thread(savailable|id))
#
#-------------------------------------------------------------------
class ParallelThreadsAvailableNode(AtomicExprNode):
"""
Note: this is disabled and not a valid directive at this moment
Implements cython.parallel.threadsavailable(). If we are called from the
sequential part of the application, we need to call omp_get_max_threads(),
and in the parallel part we can just call omp_get_num_threads()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("if (omp_in_parallel()) %s = omp_get_max_threads();" %
self.temp_code)
code.putln("else %s = omp_get_num_threads();" % self.temp_code)
code.putln("#else")
code.putln("%s = 1;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
class ParallelThreadIdNode(AtomicExprNode): #, Nodes.ParallelNode):
"""
Implements cython.parallel.threadid()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("%s = omp_get_thread_num();" % self.temp_code)
code.putln("#else")
code.putln("%s = 0;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
#-------------------------------------------------------------------
#
# Trailer nodes
#
#-------------------------------------------------------------------
class IndexNode(ExprNode):
# Sequence indexing.
#
# base ExprNode
# index ExprNode
# indices [ExprNode]
# type_indices [PyrexType]
# is_buffer_access boolean Whether this is a buffer access.
#
# indices is used on buffer access, index on non-buffer access.
# The former contains a clean list of index parameters, the
# latter whatever Python object is needed for index access.
#
# is_fused_index boolean Whether the index is used to specialize a
# c(p)def function
subexprs = ['base', 'index', 'indices']
indices = None
type_indices = None
is_subscript = True
is_fused_index = False
# Whether we're assigning to a buffer (in that case it needs to be
# writable)
writable_needed = False
# Whether we are indexing or slicing a memoryviewslice
memslice_index = False
memslice_slice = False
is_memslice_copy = False
memslice_ellipsis_noop = False
warned_untyped_idx = False
# set by SingleAssignmentNode after analyse_types()
is_memslice_scalar_assignment = False
def __init__(self, pos, index, **kw):
ExprNode.__init__(self, pos, index=index, **kw)
self._index = index
def calculate_constant_result(self):
self.constant_result = \
self.base.constant_result[self.index.constant_result]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
index = self.index.compile_time_value(denv)
try:
return base[index]
except Exception, e:
self.compile_time_value_error(e)
def is_ephemeral(self):
# in most cases, indexing will return a safe reference to an object in a container,
# so we consider the result safe if the base object is
return self.base.is_ephemeral() or self.base.type in (
basestring_type, str_type, bytes_type, unicode_type)
def is_simple(self):
if self.is_buffer_access or self.memslice_index:
return False
elif self.memslice_slice:
return True
base = self.base
return (base.is_simple() and self.index.is_simple()
and base.type and (base.type.is_ptr or base.type.is_array))
def may_be_none(self):
base_type = self.base.type
if base_type:
if base_type.is_string:
return False
if isinstance(self.index, SliceNode):
# slicing!
if base_type in (bytes_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return False
return ExprNode.may_be_none(self)
def analyse_target_declaration(self, env):
pass
def analyse_as_type(self, env):
base_type = self.base.analyse_as_type(env)
if base_type and not base_type.is_pyobject:
if base_type.is_cpp_class:
if isinstance(self.index, TupleNode):
template_values = self.index.args
else:
template_values = [self.index]
type_node = Nodes.TemplatedTypeNode(
pos = self.pos,
positional_args = template_values,
keyword_args = None)
return type_node.analyse(env, base_type = base_type)
else:
index = self.index.compile_time_value(env)
if index is not None:
return PyrexTypes.CArrayType(base_type, int(index))
error(self.pos, "Array size must be a compile time constant")
return None
def type_dependencies(self, env):
return self.base.type_dependencies(env) + self.index.type_dependencies(env)
def infer_type(self, env):
base_type = self.base.infer_type(env)
if isinstance(self.index, SliceNode):
# slicing!
if base_type.is_string:
# sliced C strings must coerce to Python
return bytes_type
elif base_type.is_pyunicode_ptr:
# sliced Py_UNICODE* strings must coerce to Python
return unicode_type
elif base_type in (unicode_type, bytes_type, str_type,
bytearray_type, list_type, tuple_type):
# slicing these returns the same type
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
index_type = self.index.infer_type(env)
if index_type and index_type.is_int or isinstance(self.index, IntNode):
# indexing!
if base_type is unicode_type:
# Py_UCS4 will automatically coerce to a unicode string
# if required, so this is safe. We only infer Py_UCS4
# when the index is a C integer type. Otherwise, we may
# need to use normal Python item access, in which case
# it's faster to return the one-char unicode string than
# to receive it, throw it away, and potentially rebuild it
# on a subsequent PyObject coercion.
return PyrexTypes.c_py_ucs4_type
elif base_type is str_type:
# always returns str - Py2: bytes, Py3: unicode
return base_type
elif base_type is bytearray_type:
return PyrexTypes.c_uchar_type
elif isinstance(self.base, BytesNode):
#if env.global_scope().context.language_level >= 3:
# # inferring 'char' can be made to work in Python 3 mode
# return PyrexTypes.c_char_type
# Py2/3 return different types on indexing bytes objects
return py_object_type
elif base_type in (tuple_type, list_type):
# if base is a literal, take a look at its values
item_type = infer_sequence_item_type(
env, self.base, self.index, seq_type=base_type)
if item_type is not None:
return item_type
elif base_type.is_ptr or base_type.is_array:
return base_type.base_type
if base_type.is_cpp_class:
class FakeOperand:
def __init__(self, **kwds):
self.__dict__.update(kwds)
operands = [
FakeOperand(pos=self.pos, type=base_type),
FakeOperand(pos=self.pos, type=index_type),
]
index_func = env.lookup_operator('[]', operands)
if index_func is not None:
return index_func.type.return_type
# may be slicing or indexing, we don't know
if base_type in (unicode_type, str_type):
# these types always returns their own type on Python indexing/slicing
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
def analyse_types(self, env):
return self.analyse_base_and_index_types(env, getting=True)
def analyse_target_types(self, env):
node = self.analyse_base_and_index_types(env, setting=True)
if node.type.is_const:
error(self.pos, "Assignment to const dereference")
if not node.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % node.type)
return node
def analyse_base_and_index_types(self, env, getting=False, setting=False,
analyse_base=True):
# Note: This might be cleaned up by having IndexNode
# parsed in a saner way and only construct the tuple if
# needed.
# Note that this function must leave IndexNode in a cloneable state.
# For buffers, self.index is packed out on the initial analysis, and
# when cloning self.indices is copied.
self.is_buffer_access = False
# a[...] = b
self.is_memslice_copy = False
# incomplete indexing, Ellipsis indexing or slicing
self.memslice_slice = False
# integer indexing
self.memslice_index = False
if analyse_base:
self.base = self.base.analyse_types(env)
if self.base.type.is_error:
# Do not visit child tree if base is undeclared to avoid confusing
# error messages
self.type = PyrexTypes.error_type
return self
is_slice = isinstance(self.index, SliceNode)
if not env.directives['wraparound']:
if is_slice:
check_negative_indices(self.index.start, self.index.stop)
else:
check_negative_indices(self.index)
# Potentially overflowing index value.
if not is_slice and isinstance(self.index, IntNode) and Utils.long_literal(self.index.value):
self.index = self.index.coerce_to_pyobject(env)
is_memslice = self.base.type.is_memoryviewslice
# Handle the case where base is a literal char* (and we expect a string, not an int)
if not is_memslice and (isinstance(self.base, BytesNode) or is_slice):
if self.base.type.is_string or not (self.base.type.is_ptr or self.base.type.is_array):
self.base = self.base.coerce_to_pyobject(env)
skip_child_analysis = False
buffer_access = False
if self.indices:
indices = self.indices
elif isinstance(self.index, TupleNode):
indices = self.index.args
else:
indices = [self.index]
if (is_memslice and not self.indices and
isinstance(self.index, EllipsisNode)):
# Memoryviewslice copying
self.is_memslice_copy = True
elif is_memslice:
# memoryviewslice indexing or slicing
from . import MemoryView
skip_child_analysis = True
newaxes = [newaxis for newaxis in indices if newaxis.is_none]
have_slices, indices = MemoryView.unellipsify(indices,
newaxes,
self.base.type.ndim)
self.memslice_index = (not newaxes and
len(indices) == self.base.type.ndim)
axes = []
index_type = PyrexTypes.c_py_ssize_t_type
new_indices = []
if len(indices) - len(newaxes) > self.base.type.ndim:
self.type = error_type
error(indices[self.base.type.ndim].pos,
"Too many indices specified for type %s" %
self.base.type)
return self
axis_idx = 0
for i, index in enumerate(indices[:]):
index = index.analyse_types(env)
if not index.is_none:
access, packing = self.base.type.axes[axis_idx]
axis_idx += 1
if isinstance(index, SliceNode):
self.memslice_slice = True
if index.step.is_none:
axes.append((access, packing))
else:
axes.append((access, 'strided'))
# Coerce start, stop and step to temps of the right type
for attr in ('start', 'stop', 'step'):
value = getattr(index, attr)
if not value.is_none:
value = value.coerce_to(index_type, env)
#value = value.coerce_to_temp(env)
setattr(index, attr, value)
new_indices.append(value)
elif index.is_none:
self.memslice_slice = True
new_indices.append(index)
axes.append(('direct', 'strided'))
elif index.type.is_int or index.type.is_pyobject:
if index.type.is_pyobject and not self.warned_untyped_idx:
warning(index.pos, "Index should be typed for more "
"efficient access", level=2)
IndexNode.warned_untyped_idx = True
self.memslice_index = True
index = index.coerce_to(index_type, env)
indices[i] = index
new_indices.append(index)
else:
self.type = error_type
error(index.pos, "Invalid index for memoryview specified")
return self
self.memslice_index = self.memslice_index and not self.memslice_slice
self.original_indices = indices
# All indices with all start/stop/step for slices.
# We need to keep this around
self.indices = new_indices
self.env = env
elif self.base.type.is_buffer:
# Buffer indexing
if len(indices) == self.base.type.ndim:
buffer_access = True
skip_child_analysis = True
for x in indices:
x = x.analyse_types(env)
if not x.type.is_int:
buffer_access = False
if buffer_access and not self.base.type.is_memoryviewslice:
assert hasattr(self.base, "entry") # Must be a NameNode-like node
# On cloning, indices is cloned. Otherwise, unpack index into indices
assert not (buffer_access and isinstance(self.index, CloneNode))
self.nogil = env.nogil
if buffer_access or self.memslice_index:
#if self.base.type.is_memoryviewslice and not self.base.is_name:
# self.base = self.base.coerce_to_temp(env)
self.base = self.base.coerce_to_simple(env)
self.indices = indices
self.index = None
self.type = self.base.type.dtype
self.is_buffer_access = True
self.buffer_type = self.base.type #self.base.entry.type
if getting and self.type.is_pyobject:
self.is_temp = True
if setting and self.base.type.is_memoryviewslice:
self.base.type.writable_needed = True
elif setting:
if not self.base.entry.type.writable:
error(self.pos, "Writing to readonly buffer")
else:
self.writable_needed = True
if self.base.type.is_buffer:
self.base.entry.buffer_aux.writable_needed = True
elif self.is_memslice_copy:
self.type = self.base.type
if getting:
self.memslice_ellipsis_noop = True
else:
self.memslice_broadcast = True
elif self.memslice_slice:
self.index = None
self.is_temp = True
self.use_managed_ref = True
if not MemoryView.validate_axes(self.pos, axes):
self.type = error_type
return self
self.type = PyrexTypes.MemoryViewSliceType(
self.base.type.dtype, axes)
if (self.base.type.is_memoryviewslice and not
self.base.is_name and not
self.base.result_in_temp()):
self.base = self.base.coerce_to_temp(env)
if setting:
self.memslice_broadcast = True
else:
base_type = self.base.type
if not base_type.is_cfunction:
if isinstance(self.index, TupleNode):
self.index = self.index.analyse_types(
env, skip_children=skip_child_analysis)
elif not skip_child_analysis:
self.index = self.index.analyse_types(env)
self.original_index_type = self.index.type
if base_type.is_unicode_char:
# we infer Py_UNICODE/Py_UCS4 for unicode strings in some
# cases, but indexing must still work for them
if setting:
warning(self.pos, "cannot assign to Unicode string index", level=1)
elif self.index.constant_result in (0, -1):
# uchar[0] => uchar
return self.base
self.base = self.base.coerce_to_pyobject(env)
base_type = self.base.type
if base_type.is_pyobject:
if self.index.type.is_int and base_type is not dict_type:
if (getting
and (base_type in (list_type, tuple_type, bytearray_type))
and (not self.index.type.signed
or not env.directives['wraparound']
or (isinstance(self.index, IntNode) and
self.index.has_constant_result() and self.index.constant_result >= 0))
and not env.directives['boundscheck']):
self.is_temp = 0
else:
self.is_temp = 1
self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env).coerce_to_simple(env)
self.original_index_type.create_to_py_utility_code(env)
else:
self.index = self.index.coerce_to_pyobject(env)
self.is_temp = 1
if self.index.type.is_int and base_type is unicode_type:
# Py_UNICODE/Py_UCS4 will automatically coerce to a unicode string
# if required, so this is fast and safe
self.type = PyrexTypes.c_py_ucs4_type
elif self.index.type.is_int and base_type is bytearray_type:
if setting:
self.type = PyrexTypes.c_uchar_type
else:
# not using 'uchar' to enable fast and safe error reporting as '-1'
self.type = PyrexTypes.c_int_type
elif is_slice and base_type in (bytes_type, str_type, unicode_type, list_type, tuple_type):
self.type = base_type
else:
item_type = None
if base_type in (list_type, tuple_type) and self.index.type.is_int:
item_type = infer_sequence_item_type(
env, self.base, self.index, seq_type=base_type)
if item_type is None:
item_type = py_object_type
self.type = item_type
if base_type in (list_type, tuple_type, dict_type):
# do the None check explicitly (not in a helper) to allow optimising it away
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
else:
if base_type.is_ptr or base_type.is_array:
self.type = base_type.base_type
if is_slice:
self.type = base_type
elif self.index.type.is_pyobject:
self.index = self.index.coerce_to(
PyrexTypes.c_py_ssize_t_type, env)
elif not self.index.type.is_int:
error(self.pos,
"Invalid index type '%s'" %
self.index.type)
elif base_type.is_cpp_class:
function = env.lookup_operator("[]", [self.base, self.index])
if function is None:
error(self.pos, "Indexing '%s' not supported for index type '%s'" % (base_type, self.index.type))
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return self
func_type = function.type
if func_type.is_ptr:
func_type = func_type.base_type
self.index = self.index.coerce_to(func_type.args[0].type, env)
self.type = func_type.return_type
if setting and not func_type.return_type.is_reference:
error(self.pos, "Can't set non-reference result '%s'" % self.type)
elif base_type.is_cfunction:
if base_type.is_fused:
self.parse_indexed_fused_cdef(env)
else:
self.type_indices = self.parse_index_as_types(env)
if base_type.templates is None:
error(self.pos, "Can only parameterize template functions.")
elif len(base_type.templates) != len(self.type_indices):
error(self.pos, "Wrong number of template arguments: expected %s, got %s" % (
(len(base_type.templates), len(self.type_indices))))
self.type = base_type.specialize(dict(zip(base_type.templates, self.type_indices)))
else:
error(self.pos,
"Attempting to index non-array type '%s'" %
base_type)
self.type = PyrexTypes.error_type
self.wrap_in_nonecheck_node(env, getting)
return self
def wrap_in_nonecheck_node(self, env, getting):
if not env.directives['nonecheck'] or not self.base.may_be_none():
return
if self.base.type.is_memoryviewslice:
if self.is_memslice_copy and not getting:
msg = "Cannot assign to None memoryview slice"
elif self.memslice_slice:
msg = "Cannot slice None memoryview slice"
else:
msg = "Cannot index None memoryview slice"
else:
msg = "'NoneType' object is not subscriptable"
self.base = self.base.as_none_safe_node(msg)
def parse_index_as_types(self, env, required=True):
if isinstance(self.index, TupleNode):
indices = self.index.args
else:
indices = [self.index]
type_indices = []
for index in indices:
type_indices.append(index.analyse_as_type(env))
if type_indices[-1] is None:
if required:
error(index.pos, "not parsable as a type")
return None
return type_indices
def parse_indexed_fused_cdef(self, env):
"""
Interpret fused_cdef_func[specific_type1, ...]
Note that if this method is called, we are an indexed cdef function
with fused argument types, and this IndexNode will be replaced by the
NameNode with specific entry just after analysis of expressions by
AnalyseExpressionsTransform.
"""
self.type = PyrexTypes.error_type
self.is_fused_index = True
base_type = self.base.type
specific_types = []
positions = []
if self.index.is_name or self.index.is_attribute:
positions.append(self.index.pos)
elif isinstance(self.index, TupleNode):
for arg in self.index.args:
positions.append(arg.pos)
specific_types = self.parse_index_as_types(env, required=False)
if specific_types is None:
self.index = self.index.analyse_types(env)
if not self.base.entry.as_variable:
error(self.pos, "Can only index fused functions with types")
else:
# A cpdef function indexed with Python objects
self.base.entry = self.entry = self.base.entry.as_variable
self.base.type = self.type = self.entry.type
self.base.is_temp = True
self.is_temp = True
self.entry.used = True
self.is_fused_index = False
return
for i, type in enumerate(specific_types):
specific_types[i] = type.specialize_fused(env)
fused_types = base_type.get_fused_types()
if len(specific_types) > len(fused_types):
return error(self.pos, "Too many types specified")
elif len(specific_types) < len(fused_types):
t = fused_types[len(specific_types)]
return error(self.pos, "Not enough types specified to specialize "
"the function, %s is still fused" % t)
# See if our index types form valid specializations
for pos, specific_type, fused_type in zip(positions,
specific_types,
fused_types):
if not any([specific_type.same_as(t) for t in fused_type.types]):
return error(pos, "Type not in fused type")
if specific_type is None or specific_type.is_error:
return
fused_to_specific = dict(zip(fused_types, specific_types))
type = base_type.specialize(fused_to_specific)
if type.is_fused:
# Only partially specific, this is invalid
error(self.pos,
"Index operation makes function only partially specific")
else:
# Fully specific, find the signature with the specialized entry
for signature in self.base.type.get_all_specialized_function_types():
if type.same_as(signature):
self.type = signature
if self.base.is_attribute:
# Pretend to be a normal attribute, for cdef extension
# methods
self.entry = signature.entry
self.is_attribute = True
self.obj = self.base.obj
self.type.entry.used = True
self.base.type = signature
self.base.entry = signature.entry
break
else:
# This is a bug
raise InternalError("Couldn't find the right signature")
gil_message = "Indexing Python object"
def nogil_check(self, env):
if self.is_buffer_access or self.memslice_index or self.memslice_slice:
if not self.memslice_slice and env.directives['boundscheck']:
# error(self.pos, "Cannot check buffer index bounds without gil; "
# "use boundscheck(False) directive")
warning(self.pos, "Use boundscheck(False) for faster access",
level=1)
if self.type.is_pyobject:
error(self.pos, "Cannot access buffer with object dtype without gil")
return
super(IndexNode, self).nogil_check(env)
def check_const_addr(self):
return self.base.check_const_addr() and self.index.check_const()
def is_lvalue(self):
# NOTE: references currently have both is_reference and is_ptr
# set. Since pointers and references have different lvalue
# rules, we must be careful to separate the two.
if self.type.is_reference:
if self.type.ref_base_type.is_array:
# fixed-sized arrays aren't l-values
return False
elif self.type.is_ptr:
# non-const pointers can always be reassigned
return True
elif self.type.is_array:
# fixed-sized arrays aren't l-values
return False
# Just about everything else returned by the index operator
# can be an lvalue.
return True
def calculate_result_code(self):
if self.is_buffer_access:
return "(*%s)" % self.buffer_ptr_code
elif self.is_memslice_copy:
return self.base.result()
elif self.base.type in (list_type, tuple_type, bytearray_type):
if self.base.type is list_type:
index_code = "PyList_GET_ITEM(%s, %s)"
elif self.base.type is tuple_type:
index_code = "PyTuple_GET_ITEM(%s, %s)"
elif self.base.type is bytearray_type:
index_code = "((unsigned char)(PyByteArray_AS_STRING(%s)[%s]))"
else:
assert False, "unexpected base type in indexing: %s" % self.base.type
elif self.base.type.is_cfunction:
return "%s<%s>" % (
self.base.result(),
",".join([param.declaration_code("") for param in self.type_indices]))
else:
if (self.type.is_ptr or self.type.is_array) and self.type == self.base.type:
error(self.pos, "Invalid use of pointer slice")
return
index_code = "(%s[%s])"
return index_code % (self.base.result(), self.index.result())
def extra_index_params(self, code):
if self.index.type.is_int:
is_list = self.base.type is list_type
wraparound = (
bool(code.globalstate.directives['wraparound']) and
self.original_index_type.signed and
not (isinstance(self.index.constant_result, (int, long))
and self.index.constant_result >= 0))
boundscheck = bool(code.globalstate.directives['boundscheck'])
return ", %s, %d, %s, %d, %d, %d" % (
self.original_index_type.declaration_code(""),
self.original_index_type.signed and 1 or 0,
self.original_index_type.to_py_function,
is_list, wraparound, boundscheck)
else:
return ""
def generate_subexpr_evaluation_code(self, code):
self.base.generate_evaluation_code(code)
if self.type_indices is not None:
pass
elif self.indices is None:
self.index.generate_evaluation_code(code)
else:
for i in self.indices:
i.generate_evaluation_code(code)
def generate_subexpr_disposal_code(self, code):
self.base.generate_disposal_code(code)
if self.type_indices is not None:
pass
elif self.indices is None:
self.index.generate_disposal_code(code)
else:
for i in self.indices:
i.generate_disposal_code(code)
def free_subexpr_temps(self, code):
self.base.free_temps(code)
if self.indices is None:
self.index.free_temps(code)
else:
for i in self.indices:
i.free_temps(code)
def generate_result_code(self, code):
if self.is_buffer_access or self.memslice_index:
buffer_entry, self.buffer_ptr_code = self.buffer_lookup_code(code)
if self.type.is_pyobject:
# is_temp is True, so must pull out value and incref it.
# NOTE: object temporary results for nodes are declared
# as PyObject *, so we need a cast
code.putln("%s = (PyObject *) *%s;" % (self.temp_code,
self.buffer_ptr_code))
code.putln("__Pyx_INCREF((PyObject*)%s);" % self.temp_code)
elif self.memslice_slice:
self.put_memoryviewslice_slice_code(code)
elif self.is_temp:
if self.type.is_pyobject:
error_value = 'NULL'
if self.index.type.is_int:
if self.base.type is list_type:
function = "__Pyx_GetItemInt_List"
elif self.base.type is tuple_type:
function = "__Pyx_GetItemInt_Tuple"
else:
function = "__Pyx_GetItemInt"
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c"))
else:
if self.base.type is dict_type:
function = "__Pyx_PyDict_GetItem"
code.globalstate.use_utility_code(
UtilityCode.load_cached("DictGetItem", "ObjectHandling.c"))
else:
function = "PyObject_GetItem"
elif self.type.is_unicode_char and self.base.type is unicode_type:
assert self.index.type.is_int
function = "__Pyx_GetItemInt_Unicode"
error_value = '(Py_UCS4)-1'
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetItemIntUnicode", "StringTools.c"))
elif self.base.type is bytearray_type:
assert self.index.type.is_int
assert self.type.is_int
function = "__Pyx_GetItemInt_ByteArray"
error_value = '-1'
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetItemIntByteArray", "StringTools.c"))
else:
assert False, "unexpected type %s and base type %s for indexing" % (
self.type, self.base.type)
if self.index.type.is_int:
index_code = self.index.result()
else:
index_code = self.index.py_result()
code.putln(
"%s = %s(%s, %s%s); if (unlikely(%s == %s)) %s;" % (
self.result(),
function,
self.base.py_result(),
index_code,
self.extra_index_params(code),
self.result(),
error_value,
code.error_goto(self.pos)))
if self.type.is_pyobject:
code.put_gotref(self.py_result())
def generate_setitem_code(self, value_code, code):
if self.index.type.is_int:
if self.base.type is bytearray_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("SetItemIntByteArray", "StringTools.c"))
function = "__Pyx_SetItemInt_ByteArray"
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("SetItemInt", "ObjectHandling.c"))
function = "__Pyx_SetItemInt"
index_code = self.index.result()
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_SetItem"
# It would seem that we could specialized lists/tuples, but that
# shouldn't happen here.
# Both PyList_SetItem() and PyTuple_SetItem() take a Py_ssize_t as
# index instead of an object, and bad conversion here would give
# the wrong exception. Also, tuples are supposed to be immutable,
# and raise a TypeError when trying to set their entries
# (PyTuple_SetItem() is for creating new tuples from scratch).
else:
function = "PyObject_SetItem"
code.putln(
"if (unlikely(%s(%s, %s, %s%s) < 0)) %s" % (
function,
self.base.py_result(),
index_code,
value_code,
self.extra_index_params(code),
code.error_goto(self.pos)))
def generate_buffer_setitem_code(self, rhs, code, op=""):
# Used from generate_assignment_code and InPlaceAssignmentNode
buffer_entry, ptrexpr = self.buffer_lookup_code(code)
if self.buffer_type.dtype.is_pyobject:
# Must manage refcounts. Decref what is already there
# and incref what we put in.
ptr = code.funcstate.allocate_temp(buffer_entry.buf_ptr_type,
manage_ref=False)
rhs_code = rhs.result()
code.putln("%s = %s;" % (ptr, ptrexpr))
code.put_gotref("*%s" % ptr)
code.putln("__Pyx_INCREF(%s); __Pyx_DECREF(*%s);" % (
rhs_code, ptr))
code.putln("*%s %s= %s;" % (ptr, op, rhs_code))
code.put_giveref("*%s" % ptr)
code.funcstate.release_temp(ptr)
else:
# Simple case
code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result()))
def generate_assignment_code(self, rhs, code):
generate_evaluation_code = (self.is_memslice_scalar_assignment or
self.memslice_slice)
if generate_evaluation_code:
self.generate_evaluation_code(code)
else:
self.generate_subexpr_evaluation_code(code)
if self.is_buffer_access or self.memslice_index:
self.generate_buffer_setitem_code(rhs, code)
elif self.is_memslice_scalar_assignment:
self.generate_memoryviewslice_assign_scalar_code(rhs, code)
elif self.memslice_slice or self.is_memslice_copy:
self.generate_memoryviewslice_setslice_code(rhs, code)
elif self.type.is_pyobject:
self.generate_setitem_code(rhs.py_result(), code)
elif self.base.type is bytearray_type:
value_code = self._check_byte_value(code, rhs)
self.generate_setitem_code(value_code, code)
else:
code.putln(
"%s = %s;" % (
self.result(), rhs.result()))
if generate_evaluation_code:
self.generate_disposal_code(code)
else:
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def _check_byte_value(self, code, rhs):
# TODO: should we do this generally on downcasts, or just here?
assert rhs.type.is_int, repr(rhs.type)
value_code = rhs.result()
if rhs.has_constant_result():
if 0 <= rhs.constant_result < 256:
return value_code
needs_cast = True # make at least the C compiler happy
warning(rhs.pos,
"value outside of range(0, 256)"
" when assigning to byte: %s" % rhs.constant_result,
level=1)
else:
needs_cast = rhs.type != PyrexTypes.c_uchar_type
if not self.nogil:
conditions = []
if rhs.is_literal or rhs.type.signed:
conditions.append('%s < 0' % value_code)
if (rhs.is_literal or not
(rhs.is_temp and rhs.type in (
PyrexTypes.c_uchar_type, PyrexTypes.c_char_type,
PyrexTypes.c_schar_type))):
conditions.append('%s > 255' % value_code)
if conditions:
code.putln("if (unlikely(%s)) {" % ' || '.join(conditions))
code.putln(
'PyErr_SetString(PyExc_ValueError,'
' "byte must be in range(0, 256)"); %s' %
code.error_goto(self.pos))
code.putln("}")
if needs_cast:
value_code = '((unsigned char)%s)' % value_code
return value_code
def generate_deletion_code(self, code, ignore_nonexisting=False):
self.generate_subexpr_evaluation_code(code)
#if self.type.is_pyobject:
if self.index.type.is_int:
function = "__Pyx_DelItemInt"
index_code = self.index.result()
code.globalstate.use_utility_code(
UtilityCode.load_cached("DelItemInt", "ObjectHandling.c"))
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_DelItem"
else:
function = "PyObject_DelItem"
code.putln(
"if (%s(%s, %s%s) < 0) %s" % (
function,
self.base.py_result(),
index_code,
self.extra_index_params(code),
code.error_goto(self.pos)))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def buffer_entry(self):
from . import Buffer, MemoryView
base = self.base
if self.base.is_nonecheck:
base = base.arg
if base.is_name:
entry = base.entry
else:
# SimpleCallNode is_simple is not consistent with coerce_to_simple
assert base.is_simple() or base.is_temp
cname = base.result()
entry = Symtab.Entry(cname, cname, self.base.type, self.base.pos)
if entry.type.is_buffer:
buffer_entry = Buffer.BufferEntry(entry)
else:
buffer_entry = MemoryView.MemoryViewSliceBufferEntry(entry)
return buffer_entry
def buffer_lookup_code(self, code):
"ndarray[1, 2, 3] and memslice[1, 2, 3]"
# Assign indices to temps
index_temps = [code.funcstate.allocate_temp(i.type, manage_ref=False)
for i in self.indices]
for temp, index in zip(index_temps, self.indices):
code.putln("%s = %s;" % (temp, index.result()))
# Generate buffer access code using these temps
from . import Buffer
buffer_entry = self.buffer_entry()
if buffer_entry.type.is_buffer:
negative_indices = buffer_entry.type.negative_indices
else:
negative_indices = Buffer.buffer_defaults['negative_indices']
return buffer_entry, Buffer.put_buffer_lookup_code(
entry=buffer_entry,
index_signeds=[i.type.signed for i in self.indices],
index_cnames=index_temps,
directives=code.globalstate.directives,
pos=self.pos, code=code,
negative_indices=negative_indices,
in_nogil_context=self.in_nogil_context)
def put_memoryviewslice_slice_code(self, code):
"memslice[:]"
buffer_entry = self.buffer_entry()
have_gil = not self.in_nogil_context
have_slices = False
it = iter(self.indices)
for index in self.original_indices:
is_slice = isinstance(index, SliceNode)
have_slices = have_slices or is_slice
if is_slice:
if not index.start.is_none:
index.start = next(it)
if not index.stop.is_none:
index.stop = next(it)
if not index.step.is_none:
index.step = next(it)
else:
next(it)
assert not list(it)
buffer_entry.generate_buffer_slice_code(code, self.original_indices,
self.result(),
have_gil=have_gil,
have_slices=have_slices,
directives=code.globalstate.directives)
def generate_memoryviewslice_setslice_code(self, rhs, code):
"memslice1[...] = memslice2 or memslice1[:] = memslice2"
from . import MemoryView
MemoryView.copy_broadcast_memview_src_to_dst(rhs, self, code)
def generate_memoryviewslice_assign_scalar_code(self, rhs, code):
"memslice1[...] = 0.0 or memslice1[:] = 0.0"
from . import MemoryView
MemoryView.assign_scalar(self, rhs, code)
class SliceIndexNode(ExprNode):
# 2-element slice indexing
#
# base ExprNode
# start ExprNode or None
# stop ExprNode or None
# slice ExprNode or None constant slice object
subexprs = ['base', 'start', 'stop', 'slice']
slice = None
def infer_type(self, env):
base_type = self.base.infer_type(env)
if base_type.is_string or base_type.is_cpp_class:
return bytes_type
elif base_type.is_pyunicode_ptr:
return unicode_type
elif base_type in (bytes_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return base_type
elif base_type.is_ptr or base_type.is_array:
return PyrexTypes.c_array_type(base_type.base_type, None)
return py_object_type
def may_be_none(self):
base_type = self.base.type
if base_type:
if base_type.is_string:
return False
if base_type in (bytes_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return False
return ExprNode.may_be_none(self)
def calculate_constant_result(self):
if self.start is None:
start = None
else:
start = self.start.constant_result
if self.stop is None:
stop = None
else:
stop = self.stop.constant_result
self.constant_result = self.base.constant_result[start:stop]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
if self.start is None:
start = 0
else:
start = self.start.compile_time_value(denv)
if self.stop is None:
stop = None
else:
stop = self.stop.compile_time_value(denv)
try:
return base[start:stop]
except Exception, e:
self.compile_time_value_error(e)
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
node = self.analyse_types(env, getting=False)
# when assigning, we must accept any Python type
if node.type.is_pyobject:
node.type = py_object_type
return node
def analyse_types(self, env, getting=True):
self.base = self.base.analyse_types(env)
if self.base.type.is_memoryviewslice:
none_node = NoneNode(self.pos)
index = SliceNode(self.pos,
start=self.start or none_node,
stop=self.stop or none_node,
step=none_node)
index_node = IndexNode(self.pos, index, base=self.base)
return index_node.analyse_base_and_index_types(
env, getting=getting, setting=not getting,
analyse_base=False)
if self.start:
self.start = self.start.analyse_types(env)
if self.stop:
self.stop = self.stop.analyse_types(env)
if not env.directives['wraparound']:
check_negative_indices(self.start, self.stop)
base_type = self.base.type
if base_type.is_string or base_type.is_cpp_string:
self.type = default_str_type(env)
elif base_type.is_pyunicode_ptr:
self.type = unicode_type
elif base_type.is_ptr:
self.type = base_type
elif base_type.is_array:
# we need a ptr type here instead of an array type, as
# array types can result in invalid type casts in the C
# code
self.type = PyrexTypes.CPtrType(base_type.base_type)
else:
self.base = self.base.coerce_to_pyobject(env)
self.type = py_object_type
if base_type.is_builtin_type:
# slicing builtin types returns something of the same type
self.type = base_type
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
if self.type is py_object_type:
if (not self.start or self.start.is_literal) and \
(not self.stop or self.stop.is_literal):
# cache the constant slice object, in case we need it
none_node = NoneNode(self.pos)
self.slice = SliceNode(
self.pos,
start=copy.deepcopy(self.start or none_node),
stop=copy.deepcopy(self.stop or none_node),
step=none_node
).analyse_types(env)
else:
c_int = PyrexTypes.c_py_ssize_t_type
if self.start:
self.start = self.start.coerce_to(c_int, env)
if self.stop:
self.stop = self.stop.coerce_to(c_int, env)
self.is_temp = 1
return self
nogil_check = Node.gil_error
gil_message = "Slicing Python object"
get_slice_utility_code = TempitaUtilityCode.load(
"SliceObject", "ObjectHandling.c", context={'access': 'Get'})
set_slice_utility_code = TempitaUtilityCode.load(
"SliceObject", "ObjectHandling.c", context={'access': 'Set'})
def coerce_to(self, dst_type, env):
if ((self.base.type.is_string or self.base.type.is_cpp_string)
and dst_type in (bytes_type, bytearray_type, str_type, unicode_type)):
if (dst_type not in (bytes_type, bytearray_type)
and not env.directives['c_string_encoding']):
error(self.pos,
"default encoding required for conversion from '%s' to '%s'" %
(self.base.type, dst_type))
self.type = dst_type
return super(SliceIndexNode, self).coerce_to(dst_type, env)
def generate_result_code(self, code):
if not self.type.is_pyobject:
error(self.pos,
"Slicing is not currently supported for '%s'." % self.type)
return
base_result = self.base.result()
result = self.result()
start_code = self.start_code()
stop_code = self.stop_code()
if self.base.type.is_string:
base_result = self.base.result()
if self.base.type != PyrexTypes.c_char_ptr_type:
base_result = '((const char*)%s)' % base_result
if self.type is bytearray_type:
type_name = 'ByteArray'
else:
type_name = self.type.name.title()
if self.stop is None:
code.putln(
"%s = __Pyx_Py%s_FromString(%s + %s); %s" % (
result,
type_name,
base_result,
start_code,
code.error_goto_if_null(result, self.pos)))
else:
code.putln(
"%s = __Pyx_Py%s_FromStringAndSize(%s + %s, %s - %s); %s" % (
result,
type_name,
base_result,
start_code,
stop_code,
start_code,
code.error_goto_if_null(result, self.pos)))
elif self.base.type.is_pyunicode_ptr:
base_result = self.base.result()
if self.base.type != PyrexTypes.c_py_unicode_ptr_type:
base_result = '((const Py_UNICODE*)%s)' % base_result
if self.stop is None:
code.putln(
"%s = __Pyx_PyUnicode_FromUnicode(%s + %s); %s" % (
result,
base_result,
start_code,
code.error_goto_if_null(result, self.pos)))
else:
code.putln(
"%s = __Pyx_PyUnicode_FromUnicodeAndLength(%s + %s, %s - %s); %s" % (
result,
base_result,
start_code,
stop_code,
start_code,
code.error_goto_if_null(result, self.pos)))
elif self.base.type is unicode_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyUnicode_Substring", "StringTools.c"))
code.putln(
"%s = __Pyx_PyUnicode_Substring(%s, %s, %s); %s" % (
result,
base_result,
start_code,
stop_code,
code.error_goto_if_null(result, self.pos)))
elif self.type is py_object_type:
code.globalstate.use_utility_code(self.get_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.putln(
"%s = __Pyx_PyObject_GetSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d); %s" % (
result,
self.base.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound']),
code.error_goto_if_null(result, self.pos)))
else:
if self.base.type is list_type:
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
cfunc = '__Pyx_PyList_GetSlice'
elif self.base.type is tuple_type:
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
cfunc = '__Pyx_PyTuple_GetSlice'
else:
cfunc = 'PySequence_GetSlice'
code.putln(
"%s = %s(%s, %s, %s); %s" % (
result,
cfunc,
self.base.py_result(),
start_code,
stop_code,
code.error_goto_if_null(result, self.pos)))
code.put_gotref(self.py_result())
def generate_assignment_code(self, rhs, code):
self.generate_subexpr_evaluation_code(code)
if self.type.is_pyobject:
code.globalstate.use_utility_code(self.set_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.put_error_if_neg(self.pos,
"__Pyx_PyObject_SetSlice(%s, %s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
self.base.py_result(),
rhs.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound'])))
else:
start_offset = ''
if self.start:
start_offset = self.start_code()
if start_offset == '0':
start_offset = ''
else:
start_offset += '+'
if rhs.type.is_array:
array_length = rhs.type.size
self.generate_slice_guard_code(code, array_length)
else:
error(self.pos,
"Slice assignments from pointers are not yet supported.")
# FIXME: fix the array size according to start/stop
array_length = self.base.type.size
for i in range(array_length):
code.putln("%s[%s%s] = %s[%d];" % (
self.base.result(), start_offset, i,
rhs.result(), i))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
if not self.base.type.is_pyobject:
error(self.pos,
"Deleting slices is only supported for Python types, not '%s'." % self.type)
return
self.generate_subexpr_evaluation_code(code)
code.globalstate.use_utility_code(self.set_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.put_error_if_neg(self.pos,
"__Pyx_PyObject_DelSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
self.base.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound'])))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def get_slice_config(self):
has_c_start, c_start, py_start = False, '0', 'NULL'
if self.start:
has_c_start = not self.start.type.is_pyobject
if has_c_start:
c_start = self.start.result()
else:
py_start = '&%s' % self.start.py_result()
has_c_stop, c_stop, py_stop = False, '0', 'NULL'
if self.stop:
has_c_stop = not self.stop.type.is_pyobject
if has_c_stop:
c_stop = self.stop.result()
else:
py_stop = '&%s' % self.stop.py_result()
py_slice = self.slice and '&%s' % self.slice.py_result() or 'NULL'
return (has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice)
def generate_slice_guard_code(self, code, target_size):
if not self.base.type.is_array:
return
slice_size = self.base.type.size
start = stop = None
if self.stop:
stop = self.stop.result()
try:
stop = int(stop)
if stop < 0:
slice_size = self.base.type.size + stop
else:
slice_size = stop
stop = None
except ValueError:
pass
if self.start:
start = self.start.result()
try:
start = int(start)
if start < 0:
start = self.base.type.size + start
slice_size -= start
start = None
except ValueError:
pass
check = None
if slice_size < 0:
if target_size > 0:
error(self.pos, "Assignment to empty slice.")
elif start is None and stop is None:
# we know the exact slice length
if target_size != slice_size:
error(self.pos, "Assignment to slice of wrong length, expected %d, got %d" % (
slice_size, target_size))
elif start is not None:
if stop is None:
stop = slice_size
check = "(%s)-(%s)" % (stop, start)
else: # stop is not None:
check = stop
if check:
code.putln("if (unlikely((%s) != %d)) {" % (check, target_size))
code.putln('PyErr_Format(PyExc_ValueError, "Assignment to slice of wrong length, expected %%" CYTHON_FORMAT_SSIZE_T "d, got %%" CYTHON_FORMAT_SSIZE_T "d", (Py_ssize_t)%d, (Py_ssize_t)(%s));' % (
target_size, check))
code.putln(code.error_goto(self.pos))
code.putln("}")
def start_code(self):
if self.start:
return self.start.result()
else:
return "0"
def stop_code(self):
if self.stop:
return self.stop.result()
elif self.base.type.is_array:
return self.base.type.size
else:
return "PY_SSIZE_T_MAX"
def calculate_result_code(self):
# self.result() is not used, but this method must exist
return "<unused>"
class SliceNode(ExprNode):
# start:stop:step in subscript list
#
# start ExprNode
# stop ExprNode
# step ExprNode
subexprs = ['start', 'stop', 'step']
type = slice_type
is_temp = 1
def calculate_constant_result(self):
self.constant_result = slice(
self.start.constant_result,
self.stop.constant_result,
self.step.constant_result)
def compile_time_value(self, denv):
start = self.start.compile_time_value(denv)
stop = self.stop.compile_time_value(denv)
step = self.step.compile_time_value(denv)
try:
return slice(start, stop, step)
except Exception, e:
self.compile_time_value_error(e)
def may_be_none(self):
return False
def analyse_types(self, env):
start = self.start.analyse_types(env)
stop = self.stop.analyse_types(env)
step = self.step.analyse_types(env)
self.start = start.coerce_to_pyobject(env)
self.stop = stop.coerce_to_pyobject(env)
self.step = step.coerce_to_pyobject(env)
if self.start.is_literal and self.stop.is_literal and self.step.is_literal:
self.is_literal = True
self.is_temp = False
return self
gil_message = "Constructing Python slice object"
def calculate_result_code(self):
return self.result_code
def generate_result_code(self, code):
if self.is_literal:
self.result_code = code.get_py_const(py_object_type, 'slice', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
code.putln(
"%s = PySlice_New(%s, %s, %s); %s" % (
self.result(),
self.start.py_result(),
self.stop.py_result(),
self.step.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
if self.is_literal:
code.put_giveref(self.py_result())
def __deepcopy__(self, memo):
"""
There is a copy bug in python 2.4 for slice objects.
"""
return SliceNode(
self.pos,
start=copy.deepcopy(self.start, memo),
stop=copy.deepcopy(self.stop, memo),
step=copy.deepcopy(self.step, memo),
is_temp=self.is_temp,
is_literal=self.is_literal,
constant_result=self.constant_result)
class CallNode(ExprNode):
# allow overriding the default 'may_be_none' behaviour
may_return_none = None
def infer_type(self, env):
function = self.function
func_type = function.infer_type(env)
if isinstance(function, NewExprNode):
# note: needs call to infer_type() above
return PyrexTypes.CPtrType(function.class_type)
if func_type is py_object_type:
# function might have lied for safety => try to find better type
entry = getattr(function, 'entry', None)
if entry is not None:
func_type = entry.type or func_type
if func_type.is_ptr:
func_type = func_type.base_type
if func_type.is_cfunction:
return func_type.return_type
elif func_type is type_type:
if function.is_name and function.entry and function.entry.type:
result_type = function.entry.type
if result_type.is_extension_type:
return result_type
elif result_type.is_builtin_type:
if function.entry.name == 'float':
return PyrexTypes.c_double_type
elif function.entry.name in Builtin.types_that_construct_their_instance:
return result_type
return py_object_type
def type_dependencies(self, env):
# TODO: Update when Danilo's C++ code merged in to handle the
# the case of function overloading.
return self.function.type_dependencies(env)
def is_simple(self):
# C function calls could be considered simple, but they may
# have side-effects that may hit when multiple operations must
# be effected in order, e.g. when constructing the argument
# sequence for a function call or comparing values.
return False
def may_be_none(self):
if self.may_return_none is not None:
return self.may_return_none
func_type = self.function.type
if func_type is type_type and self.function.is_name:
entry = self.function.entry
if entry.type.is_extension_type:
return False
if (entry.type.is_builtin_type and
entry.name in Builtin.types_that_construct_their_instance):
return False
return ExprNode.may_be_none(self)
def analyse_as_type_constructor(self, env):
type = self.function.analyse_as_type(env)
if type and type.is_struct_or_union:
args, kwds = self.explicit_args_kwds()
items = []
for arg, member in zip(args, type.scope.var_entries):
items.append(DictItemNode(pos=arg.pos, key=StringNode(pos=arg.pos, value=member.name), value=arg))
if kwds:
items += kwds.key_value_pairs
self.key_value_pairs = items
self.__class__ = DictNode
self.analyse_types(env) # FIXME
self.coerce_to(type, env)
return True
elif type and type.is_cpp_class:
self.args = [ arg.analyse_types(env) for arg in self.args ]
constructor = type.scope.lookup("<init>")
self.function = RawCNameExprNode(self.function.pos, constructor.type)
self.function.entry = constructor
self.function.set_cname(type.declaration_code(""))
self.analyse_c_function_call(env)
self.type = type
return True
def is_lvalue(self):
return self.type.is_reference
def nogil_check(self, env):
func_type = self.function_type()
if func_type.is_pyobject:
self.gil_error()
elif not getattr(func_type, 'nogil', False):
self.gil_error()
gil_message = "Calling gil-requiring function"
class SimpleCallNode(CallNode):
# Function call without keyword, * or ** args.
#
# function ExprNode
# args [ExprNode]
# arg_tuple ExprNode or None used internally
# self ExprNode or None used internally
# coerced_self ExprNode or None used internally
# wrapper_call bool used internally
# has_optional_args bool used internally
# nogil bool used internally
subexprs = ['self', 'coerced_self', 'function', 'args', 'arg_tuple']
self = None
coerced_self = None
arg_tuple = None
wrapper_call = False
has_optional_args = False
nogil = False
analysed = False
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
args = [arg.compile_time_value(denv) for arg in self.args]
try:
return function(*args)
except Exception, e:
self.compile_time_value_error(e)
def analyse_as_type(self, env):
attr = self.function.as_cython_attribute()
if attr == 'pointer':
if len(self.args) != 1:
error(self.args.pos, "only one type allowed.")
else:
type = self.args[0].analyse_as_type(env)
if not type:
error(self.args[0].pos, "Unknown type")
else:
return PyrexTypes.CPtrType(type)
def explicit_args_kwds(self):
return self.args, None
def analyse_types(self, env):
if self.analyse_as_type_constructor(env):
return self
if self.analysed:
return self
self.analysed = True
self.function.is_called = 1
self.function = self.function.analyse_types(env)
function = self.function
if function.is_attribute and function.entry and function.entry.is_cmethod:
# Take ownership of the object from which the attribute
# was obtained, because we need to pass it as 'self'.
self.self = function.obj
function.obj = CloneNode(self.self)
func_type = self.function_type()
if func_type.is_pyobject:
self.arg_tuple = TupleNode(self.pos, args = self.args)
self.arg_tuple = self.arg_tuple.analyse_types(env)
self.args = None
if func_type is Builtin.type_type and function.is_name and \
function.entry and \
function.entry.is_builtin and \
function.entry.name in Builtin.types_that_construct_their_instance:
# calling a builtin type that returns a specific object type
if function.entry.name == 'float':
# the following will come true later on in a transform
self.type = PyrexTypes.c_double_type
self.result_ctype = PyrexTypes.c_double_type
else:
self.type = Builtin.builtin_types[function.entry.name]
self.result_ctype = py_object_type
self.may_return_none = False
elif function.is_name and function.type_entry:
# We are calling an extension type constructor. As
# long as we do not support __new__(), the result type
# is clear
self.type = function.type_entry.type
self.result_ctype = py_object_type
self.may_return_none = False
else:
self.type = py_object_type
self.is_temp = 1
else:
self.args = [ arg.analyse_types(env) for arg in self.args ]
self.analyse_c_function_call(env)
return self
def function_type(self):
# Return the type of the function being called, coercing a function
# pointer to a function if necessary. If the function has fused
# arguments, return the specific type.
func_type = self.function.type
if func_type.is_ptr:
func_type = func_type.base_type
return func_type
def analyse_c_function_call(self, env):
func_type = self.function.type
if func_type is error_type:
self.type = error_type
return
if func_type.is_cfunction and func_type.is_static_method:
if self.self and self.self.type.is_extension_type:
# To support this we'd need to pass self to determine whether
# it was overloaded in Python space (possibly via a Cython
# superclass turning a cdef method into a cpdef one).
error(self.pos, "Cannot call a static method on an instance variable.")
args = self.args
elif self.self:
args = [self.self] + self.args
else:
args = self.args
if func_type.is_cpp_class:
overloaded_entry = self.function.type.scope.lookup("operator()")
if overloaded_entry is None:
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
elif hasattr(self.function, 'entry'):
overloaded_entry = self.function.entry
elif (isinstance(self.function, IndexNode) and
self.function.is_fused_index):
overloaded_entry = self.function.type.entry
else:
overloaded_entry = None
if overloaded_entry:
if self.function.type.is_fused:
functypes = self.function.type.get_all_specialized_function_types()
alternatives = [f.entry for f in functypes]
else:
alternatives = overloaded_entry.all_alternatives()
entry = PyrexTypes.best_match(args, alternatives, self.pos, env)
if not entry:
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
entry.used = True
self.function.entry = entry
self.function.type = entry.type
func_type = self.function_type()
else:
entry = None
func_type = self.function_type()
if not func_type.is_cfunction:
error(self.pos, "Calling non-function type '%s'" % func_type)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
# Check no. of args
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(args)
if func_type.optional_arg_count and expected_nargs != actual_nargs:
self.has_optional_args = 1
self.is_temp = 1
# check 'self' argument
if entry and entry.is_cmethod and func_type.args and not func_type.is_static_method:
formal_arg = func_type.args[0]
arg = args[0]
if formal_arg.not_none:
if self.self:
self.self = self.self.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error='PyExc_AttributeError',
format_args=[entry.name])
else:
# unbound method
arg = arg.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=[entry.name, formal_arg.type.name])
if self.self:
if formal_arg.accept_builtin_subtypes:
arg = CMethodSelfCloneNode(self.self)
else:
arg = CloneNode(self.self)
arg = self.coerced_self = arg.coerce_to(formal_arg.type, env)
elif formal_arg.type.is_builtin_type:
# special case: unbound methods of builtins accept subtypes
arg = arg.coerce_to(formal_arg.type, env)
if arg.type.is_builtin_type and isinstance(arg, PyTypeTestNode):
arg.exact_builtin_type = False
args[0] = arg
# Coerce arguments
some_args_in_temps = False
for i in xrange(min(max_nargs, actual_nargs)):
formal_arg = func_type.args[i]
formal_type = formal_arg.type
arg = args[i].coerce_to(formal_type, env)
if formal_arg.not_none:
# C methods must do the None checks at *call* time
arg = arg.as_none_safe_node(
"cannot pass None into a C function argument that is declared 'not None'")
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if i == 0 and self.self is not None:
# a method's cloned "self" argument is ok
pass
elif arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
args[i] = arg
# handle additional varargs parameters
for i in xrange(max_nargs, actual_nargs):
arg = args[i]
if arg.type.is_pyobject:
arg_ctype = arg.type.default_coerced_ctype()
if arg_ctype is None:
error(self.args[i].pos,
"Python object cannot be passed as a varargs parameter")
else:
args[i] = arg = arg.coerce_to(arg_ctype, env)
if arg.is_temp and i > 0:
some_args_in_temps = True
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in xrange(actual_nargs-1):
if i == 0 and self.self is not None:
continue # self is ok
arg = args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0 or i == 1 and self.self is not None: # skip first arg
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
self.args[:] = args
# Calc result type and code fragment
if isinstance(self.function, NewExprNode):
self.type = PyrexTypes.CPtrType(self.function.class_type)
else:
self.type = func_type.return_type
if self.function.is_name or self.function.is_attribute:
if self.function.entry and self.function.entry.utility_code:
self.is_temp = 1 # currently doesn't work for self.calculate_result_code()
if self.type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
elif func_type.exception_value is not None \
or func_type.exception_check:
self.is_temp = 1
elif self.type.is_memoryviewslice:
self.is_temp = 1
# func_type.exception_check = True
# Called in 'nogil' context?
self.nogil = env.nogil
if (self.nogil and
func_type.exception_check and
func_type.exception_check != '+'):
env.use_utility_code(pyerr_occurred_withgil_utility_code)
# C++ exception handler
if func_type.exception_check == '+':
if func_type.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
def calculate_result_code(self):
return self.c_call_code()
def c_call_code(self):
func_type = self.function_type()
if self.type is PyrexTypes.error_type or not func_type.is_cfunction:
return "<error>"
formal_args = func_type.args
arg_list_code = []
args = list(zip(formal_args, self.args))
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(self.args)
for formal_arg, actual_arg in args[:expected_nargs]:
arg_code = actual_arg.result_as(formal_arg.type)
arg_list_code.append(arg_code)
if func_type.is_overridable:
arg_list_code.append(str(int(self.wrapper_call or self.function.entry.is_unbound_cmethod)))
if func_type.optional_arg_count:
if expected_nargs == actual_nargs:
optional_args = 'NULL'
else:
optional_args = "&%s" % self.opt_arg_struct
arg_list_code.append(optional_args)
for actual_arg in self.args[len(formal_args):]:
arg_list_code.append(actual_arg.result())
result = "%s(%s)" % (self.function.result(), ', '.join(arg_list_code))
return result
def generate_result_code(self, code):
func_type = self.function_type()
if self.function.is_name or self.function.is_attribute:
if self.function.entry and self.function.entry.utility_code:
code.globalstate.use_utility_code(self.function.entry.utility_code)
if func_type.is_pyobject:
arg_code = self.arg_tuple.py_result()
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
self.result(),
self.function.py_result(),
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif func_type.is_cfunction:
if self.has_optional_args:
actual_nargs = len(self.args)
expected_nargs = len(func_type.args) - func_type.optional_arg_count
self.opt_arg_struct = code.funcstate.allocate_temp(
func_type.op_arg_struct.base_type, manage_ref=True)
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
Naming.pyrex_prefix + "n",
len(self.args) - expected_nargs))
args = list(zip(func_type.args, self.args))
for formal_arg, actual_arg in args[expected_nargs:actual_nargs]:
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
func_type.opt_arg_cname(formal_arg.name),
actual_arg.result_as(formal_arg.type)))
exc_checks = []
if self.type.is_pyobject and self.is_temp:
exc_checks.append("!%s" % self.result())
elif self.type.is_memoryviewslice:
assert self.is_temp
exc_checks.append(self.type.error_condition(self.result()))
else:
exc_val = func_type.exception_value
exc_check = func_type.exception_check
if exc_val is not None:
exc_checks.append("%s == %s" % (self.result(), exc_val))
if exc_check:
if self.nogil:
exc_checks.append("__Pyx_ErrOccurredWithGIL()")
else:
exc_checks.append("PyErr_Occurred()")
if self.is_temp or exc_checks:
rhs = self.c_call_code()
if self.result():
lhs = "%s = " % self.result()
if self.is_temp and self.type.is_pyobject:
#return_type = self.type # func_type.return_type
#print "SimpleCallNode.generate_result_code: casting", rhs, \
# "from", return_type, "to pyobject" ###
rhs = typecast(py_object_type, self.type, rhs)
else:
lhs = ""
if func_type.exception_check == '+':
if func_type.exception_value is None:
raise_py_exception = "__Pyx_CppExn2PyErr();"
elif func_type.exception_value.type.is_pyobject:
raise_py_exception = 'try { throw; } catch(const std::exception& exn) { PyErr_SetString(%s, exn.what()); } catch(...) { PyErr_SetNone(%s); }' % (
func_type.exception_value.entry.cname,
func_type.exception_value.entry.cname)
else:
raise_py_exception = '%s(); if (!PyErr_Occurred()) PyErr_SetString(PyExc_RuntimeError , "Error converting c++ exception.");' % func_type.exception_value.entry.cname
code.putln("try {")
code.putln("%s%s;" % (lhs, rhs))
code.putln("} catch(...) {")
if self.nogil:
code.put_ensure_gil(declare_gilstate=True)
code.putln(raise_py_exception)
if self.nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
else:
if exc_checks:
goto_error = code.error_goto_if(" && ".join(exc_checks), self.pos)
else:
goto_error = ""
code.putln("%s%s; %s" % (lhs, rhs, goto_error))
if self.type.is_pyobject and self.result():
code.put_gotref(self.py_result())
if self.has_optional_args:
code.funcstate.release_temp(self.opt_arg_struct)
class InlinedDefNodeCallNode(CallNode):
# Inline call to defnode
#
# function PyCFunctionNode
# function_name NameNode
# args [ExprNode]
subexprs = ['args', 'function_name']
is_temp = 1
type = py_object_type
function = None
function_name = None
def can_be_inlined(self):
func_type= self.function.def_node
if func_type.star_arg or func_type.starstar_arg:
return False
if len(func_type.args) != len(self.args):
return False
return True
def analyse_types(self, env):
self.function_name = self.function_name.analyse_types(env)
self.args = [ arg.analyse_types(env) for arg in self.args ]
func_type = self.function.def_node
actual_nargs = len(self.args)
# Coerce arguments
some_args_in_temps = False
for i in xrange(actual_nargs):
formal_type = func_type.args[i].type
arg = self.args[i].coerce_to(formal_type, env)
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
self.args[i] = arg
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in xrange(actual_nargs-1):
arg = self.args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0:
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
return self
def generate_result_code(self, code):
arg_code = [self.function_name.py_result()]
func_type = self.function.def_node
for arg, proto_arg in zip(self.args, func_type.args):
if arg.type.is_pyobject:
arg_code.append(arg.result_as(proto_arg.type))
else:
arg_code.append(arg.result())
arg_code = ', '.join(arg_code)
code.putln(
"%s = %s(%s); %s" % (
self.result(),
self.function.def_node.entry.pyfunc_cname,
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PythonCapiFunctionNode(ExprNode):
subexprs = []
def __init__(self, pos, py_name, cname, func_type, utility_code = None):
ExprNode.__init__(self, pos, name=py_name, cname=cname,
type=func_type, utility_code=utility_code)
def analyse_types(self, env):
return self
def generate_result_code(self, code):
if self.utility_code:
code.globalstate.use_utility_code(self.utility_code)
def calculate_result_code(self):
return self.cname
class PythonCapiCallNode(SimpleCallNode):
# Python C-API Function call (only created in transforms)
# By default, we assume that the call never returns None, as this
# is true for most C-API functions in CPython. If this does not
# apply to a call, set the following to True (or None to inherit
# the default behaviour).
may_return_none = False
def __init__(self, pos, function_name, func_type,
utility_code = None, py_name=None, **kwargs):
self.type = func_type.return_type
self.result_ctype = self.type
self.function = PythonCapiFunctionNode(
pos, py_name, function_name, func_type,
utility_code = utility_code)
# call this last so that we can override the constructed
# attributes above with explicit keyword arguments if required
SimpleCallNode.__init__(self, pos, **kwargs)
class GeneralCallNode(CallNode):
# General Python function call, including keyword,
# * and ** arguments.
#
# function ExprNode
# positional_args ExprNode Tuple of positional arguments
# keyword_args ExprNode or None Dict of keyword arguments
type = py_object_type
subexprs = ['function', 'positional_args', 'keyword_args']
nogil_check = Node.gil_error
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
positional_args = self.positional_args.compile_time_value(denv)
keyword_args = self.keyword_args.compile_time_value(denv)
try:
return function(*positional_args, **keyword_args)
except Exception, e:
self.compile_time_value_error(e)
def explicit_args_kwds(self):
if (self.keyword_args and not isinstance(self.keyword_args, DictNode) or
not isinstance(self.positional_args, TupleNode)):
raise CompileError(self.pos,
'Compile-time keyword arguments must be explicit.')
return self.positional_args.args, self.keyword_args
def analyse_types(self, env):
if self.analyse_as_type_constructor(env):
return self
self.function = self.function.analyse_types(env)
if not self.function.type.is_pyobject:
if self.function.type.is_error:
self.type = error_type
return self
if hasattr(self.function, 'entry'):
node = self.map_to_simple_call_node()
if node is not None and node is not self:
return node.analyse_types(env)
elif self.function.entry.as_variable:
self.function = self.function.coerce_to_pyobject(env)
elif node is self:
error(self.pos,
"Non-trivial keyword arguments and starred "
"arguments not allowed in cdef functions.")
else:
# error was already reported
pass
else:
self.function = self.function.coerce_to_pyobject(env)
if self.keyword_args:
self.keyword_args = self.keyword_args.analyse_types(env)
self.positional_args = self.positional_args.analyse_types(env)
self.positional_args = \
self.positional_args.coerce_to_pyobject(env)
function = self.function
if function.is_name and function.type_entry:
# We are calling an extension type constructor. As long
# as we do not support __new__(), the result type is clear
self.type = function.type_entry.type
self.result_ctype = py_object_type
self.may_return_none = False
else:
self.type = py_object_type
self.is_temp = 1
return self
def map_to_simple_call_node(self):
"""
Tries to map keyword arguments to declared positional arguments.
Returns self to try a Python call, None to report an error
or a SimpleCallNode if the mapping succeeds.
"""
if not isinstance(self.positional_args, TupleNode):
# has starred argument
return self
if not isinstance(self.keyword_args, DictNode):
# keywords come from arbitrary expression => nothing to do here
return self
function = self.function
entry = getattr(function, 'entry', None)
if not entry:
return self
function_type = entry.type
if function_type.is_ptr:
function_type = function_type.base_type
if not function_type.is_cfunction:
return self
pos_args = self.positional_args.args
kwargs = self.keyword_args
declared_args = function_type.args
if entry.is_cmethod:
declared_args = declared_args[1:] # skip 'self'
if len(pos_args) > len(declared_args):
error(self.pos, "function call got too many positional arguments, "
"expected %d, got %s" % (len(declared_args),
len(pos_args)))
return None
matched_args = set([ arg.name for arg in declared_args[:len(pos_args)]
if arg.name ])
unmatched_args = declared_args[len(pos_args):]
matched_kwargs_count = 0
args = list(pos_args)
# check for duplicate keywords
seen = set(matched_args)
has_errors = False
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name in seen:
error(arg.pos, "argument '%s' passed twice" % name)
has_errors = True
# continue to report more errors if there are any
seen.add(name)
# match keywords that are passed in order
for decl_arg, arg in zip(unmatched_args, kwargs.key_value_pairs):
name = arg.key.value
if decl_arg.name == name:
matched_args.add(name)
matched_kwargs_count += 1
args.append(arg.value)
else:
break
# match keyword arguments that are passed out-of-order, but keep
# the evaluation of non-simple arguments in order by moving them
# into temps
from .UtilNodes import EvalWithTempExprNode, LetRefNode
temps = []
if len(kwargs.key_value_pairs) > matched_kwargs_count:
unmatched_args = declared_args[len(args):]
keywords = dict([ (arg.key.value, (i+len(pos_args), arg))
for i, arg in enumerate(kwargs.key_value_pairs) ])
first_missing_keyword = None
for decl_arg in unmatched_args:
name = decl_arg.name
if name not in keywords:
# missing keyword argument => either done or error
if not first_missing_keyword:
first_missing_keyword = name
continue
elif first_missing_keyword:
if entry.as_variable:
# we might be able to convert the function to a Python
# object, which then allows full calling semantics
# with default values in gaps - currently, we only
# support optional arguments at the end
return self
# wasn't the last keyword => gaps are not supported
error(self.pos, "C function call is missing "
"argument '%s'" % first_missing_keyword)
return None
pos, arg = keywords[name]
matched_args.add(name)
matched_kwargs_count += 1
if arg.value.is_simple():
args.append(arg.value)
else:
temp = LetRefNode(arg.value)
assert temp.is_simple()
args.append(temp)
temps.append((pos, temp))
if temps:
# may have to move preceding non-simple args into temps
final_args = []
new_temps = []
first_temp_arg = temps[0][-1]
for arg_value in args:
if arg_value is first_temp_arg:
break # done
if arg_value.is_simple():
final_args.append(arg_value)
else:
temp = LetRefNode(arg_value)
new_temps.append(temp)
final_args.append(temp)
if new_temps:
args = final_args
temps = new_temps + [ arg for i,arg in sorted(temps) ]
# check for unexpected keywords
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name not in matched_args:
has_errors = True
error(arg.pos,
"C function got unexpected keyword argument '%s'" %
name)
if has_errors:
# error was reported already
return None
# all keywords mapped to positional arguments
# if we are missing arguments, SimpleCallNode will figure it out
node = SimpleCallNode(self.pos, function=function, args=args)
for temp in temps[::-1]:
node = EvalWithTempExprNode(temp, node)
return node
def generate_result_code(self, code):
if self.type.is_error: return
if self.keyword_args:
kwargs = self.keyword_args.py_result()
else:
kwargs = 'NULL'
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, %s); %s" % (
self.result(),
self.function.py_result(),
self.positional_args.py_result(),
kwargs,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class AsTupleNode(ExprNode):
# Convert argument to tuple. Used for normalising
# the * argument of a function call.
#
# arg ExprNode
subexprs = ['arg']
def calculate_constant_result(self):
self.constant_result = tuple(self.arg.constant_result)
def compile_time_value(self, denv):
arg = self.arg.compile_time_value(denv)
try:
return tuple(arg)
except Exception, e:
self.compile_time_value_error(e)
def analyse_types(self, env):
self.arg = self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
self.type = tuple_type
self.is_temp = 1
return self
def may_be_none(self):
return False
nogil_check = Node.gil_error
gil_message = "Constructing Python tuple"
def generate_result_code(self, code):
code.putln(
"%s = PySequence_Tuple(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class AttributeNode(ExprNode):
# obj.attribute
#
# obj ExprNode
# attribute string
# needs_none_check boolean Used if obj is an extension type.
# If set to True, it is known that the type is not None.
#
# Used internally:
#
# is_py_attr boolean Is a Python getattr operation
# member string C name of struct member
# is_called boolean Function call is being done on result
# entry Entry Symbol table entry of attribute
is_attribute = 1
subexprs = ['obj']
type = PyrexTypes.error_type
entry = None
is_called = 0
needs_none_check = True
is_memslice_transpose = False
is_special_lookup = False
def as_cython_attribute(self):
if (isinstance(self.obj, NameNode) and
self.obj.is_cython_module and not
self.attribute == u"parallel"):
return self.attribute
cy = self.obj.as_cython_attribute()
if cy:
return "%s.%s" % (cy, self.attribute)
return None
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a cpdef function
# we can create the corresponding attribute
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction and entry.as_variable:
# must be a cpdef function
self.is_temp = 1
self.entry = entry.as_variable
self.analyse_as_python_attribute(env)
return self
return ExprNode.coerce_to(self, dst_type, env)
def calculate_constant_result(self):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
return
self.constant_result = getattr(self.obj.constant_result, attr)
def compile_time_value(self, denv):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
error(self.pos,
"Invalid attribute name '%s' in compile-time expression" % attr)
return None
obj = self.obj.compile_time_value(denv)
try:
return getattr(obj, attr)
except Exception, e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return self.obj.type_dependencies(env)
def infer_type(self, env):
# FIXME: this is way too redundant with analyse_types()
node = self.analyse_as_cimported_attribute_node(env, target=False)
if node is not None:
return node.entry.type
node = self.analyse_as_unbound_cmethod_node(env)
if node is not None:
return node.entry.type
obj_type = self.obj.infer_type(env)
self.analyse_attribute(env, obj_type=obj_type)
if obj_type.is_builtin_type and self.type.is_cfunction:
# special case: C-API replacements for C methods of
# builtin types cannot be inferred as C functions as
# that would prevent their use as bound methods
return py_object_type
return self.type
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
node = self.analyse_types(env, target = 1)
if node.type.is_const:
error(self.pos, "Assignment to const attribute '%s'" % self.attribute)
if not node.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % self.type)
return node
def analyse_types(self, env, target = 0):
self.initialized_check = env.directives['initializedcheck']
node = self.analyse_as_cimported_attribute_node(env, target)
if node is None and not target:
node = self.analyse_as_unbound_cmethod_node(env)
if node is None:
node = self.analyse_as_ordinary_attribute_node(env, target)
assert node is not None
if node.entry:
node.entry.used = True
if node.is_attribute:
node.wrap_obj_in_nonecheck(env)
return node
def analyse_as_cimported_attribute_node(self, env, target):
# Try to interpret this as a reference to an imported
# C const, type, var or function. If successful, mutates
# this node into a NameNode and returns 1, otherwise
# returns 0.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and (
entry.is_cglobal or entry.is_cfunction
or entry.is_type or entry.is_const):
return self.as_name_node(env, entry, target)
return None
def analyse_as_unbound_cmethod_node(self, env):
# Try to interpret this as a reference to an unbound
# C method of an extension type or builtin type. If successful,
# creates a corresponding NameNode and returns it, otherwise
# returns None.
if self.obj.is_string_literal:
return
type = self.obj.analyse_as_type(env)
if type and (type.is_extension_type or type.is_builtin_type or type.is_cpp_class):
entry = type.scope.lookup_here(self.attribute)
if entry and (entry.is_cmethod or type.is_cpp_class and entry.type.is_cfunction):
if type.is_builtin_type:
if not self.is_called:
# must handle this as Python object
return None
ubcm_entry = entry
else:
# Create a temporary entry describing the C method
# as an ordinary function.
if entry.func_cname and not hasattr(entry.type, 'op_arg_struct'):
cname = entry.func_cname
if entry.type.is_static_method:
ctype = entry.type
elif type.is_cpp_class:
error(self.pos, "%s not a static member of %s" % (entry.name, type))
ctype = PyrexTypes.error_type
else:
# Fix self type.
ctype = copy.copy(entry.type)
ctype.args = ctype.args[:]
ctype.args[0] = PyrexTypes.CFuncTypeArg('self', type, 'self', None)
else:
cname = "%s->%s" % (type.vtabptr_cname, entry.cname)
ctype = entry.type
ubcm_entry = Symtab.Entry(entry.name, cname, ctype)
ubcm_entry.is_cfunction = 1
ubcm_entry.func_cname = entry.func_cname
ubcm_entry.is_unbound_cmethod = 1
return self.as_name_node(env, ubcm_entry, target=False)
return None
def analyse_as_type(self, env):
module_scope = self.obj.analyse_as_module(env)
if module_scope:
return module_scope.lookup_type(self.attribute)
if not self.obj.is_string_literal:
base_type = self.obj.analyse_as_type(env)
if base_type and hasattr(base_type, 'scope') and base_type.scope is not None:
return base_type.scope.lookup_type(self.attribute)
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type
# in a cimported module. Returns the extension type, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.is_type:
if entry.type.is_extension_type or entry.type.is_builtin_type:
return entry.type
return None
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module
# in another cimported module. Returns the module scope, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.as_module:
return entry.as_module
return None
def as_name_node(self, env, entry, target):
# Create a corresponding NameNode from this node and complete the
# analyse_types phase.
node = NameNode.from_node(self, name=self.attribute, entry=entry)
if target:
node = node.analyse_target_types(env)
else:
node = node.analyse_rvalue_entry(env)
node.entry.used = 1
return node
def analyse_as_ordinary_attribute_node(self, env, target):
self.obj = self.obj.analyse_types(env)
self.analyse_attribute(env)
if self.entry and self.entry.is_cmethod and not self.is_called:
# error(self.pos, "C method can only be called")
pass
## Reference to C array turns into pointer to first element.
#while self.type.is_array:
# self.type = self.type.element_ptr_type()
if self.is_py_attr:
if not target:
self.is_temp = 1
self.result_ctype = py_object_type
elif target and self.obj.type.is_builtin_type:
error(self.pos, "Assignment to an immutable object field")
#elif self.type.is_memoryviewslice and not target:
# self.is_temp = True
return self
def analyse_attribute(self, env, obj_type = None):
# Look up attribute and set self.type and self.member.
immutable_obj = obj_type is not None # used during type inference
self.is_py_attr = 0
self.member = self.attribute
if obj_type is None:
if self.obj.type.is_string or self.obj.type.is_pyunicode_ptr:
self.obj = self.obj.coerce_to_pyobject(env)
obj_type = self.obj.type
else:
if obj_type.is_string or obj_type.is_pyunicode_ptr:
obj_type = py_object_type
if obj_type.is_ptr or obj_type.is_array:
obj_type = obj_type.base_type
self.op = "->"
elif obj_type.is_extension_type or obj_type.is_builtin_type:
self.op = "->"
else:
self.op = "."
if obj_type.has_attributes:
if obj_type.attributes_known():
if (obj_type.is_memoryviewslice and not
obj_type.scope.lookup_here(self.attribute)):
if self.attribute == 'T':
self.is_memslice_transpose = True
self.is_temp = True
self.use_managed_ref = True
self.type = self.obj.type
return
else:
obj_type.declare_attribute(self.attribute, env, self.pos)
entry = obj_type.scope.lookup_here(self.attribute)
if entry and entry.is_member:
entry = None
else:
error(self.pos,
"Cannot select attribute of incomplete type '%s'"
% obj_type)
self.type = PyrexTypes.error_type
return
self.entry = entry
if entry:
if obj_type.is_extension_type and entry.name == "__weakref__":
error(self.pos, "Illegal use of special attribute __weakref__")
# def methods need the normal attribute lookup
# because they do not have struct entries
# fused function go through assignment synthesis
# (foo = pycfunction(foo_func_obj)) and need to go through
# regular Python lookup as well
if (entry.is_variable and not entry.fused_cfunction) or entry.is_cmethod:
self.type = entry.type
self.member = entry.cname
return
else:
# If it's not a variable or C method, it must be a Python
# method of an extension type, so we treat it like a Python
# attribute.
pass
# If we get here, the base object is not a struct/union/extension
# type, or it is an extension type and the attribute is either not
# declared or is declared as a Python method. Treat it as a Python
# attribute reference.
self.analyse_as_python_attribute(env, obj_type, immutable_obj)
def analyse_as_python_attribute(self, env, obj_type=None, immutable_obj=False):
if obj_type is None:
obj_type = self.obj.type
# mangle private '__*' Python attributes used inside of a class
self.attribute = env.mangle_class_private_name(self.attribute)
self.member = self.attribute
self.type = py_object_type
self.is_py_attr = 1
if not obj_type.is_pyobject and not obj_type.is_error:
if obj_type.can_coerce_to_pyobject(env):
if not immutable_obj:
self.obj = self.obj.coerce_to_pyobject(env)
elif (obj_type.is_cfunction and (self.obj.is_name or self.obj.is_attribute)
and self.obj.entry.as_variable
and self.obj.entry.as_variable.type.is_pyobject):
# might be an optimised builtin function => unpack it
if not immutable_obj:
self.obj = self.obj.coerce_to_pyobject(env)
else:
error(self.pos,
"Object of type '%s' has no attribute '%s'" %
(obj_type, self.attribute))
def wrap_obj_in_nonecheck(self, env):
if not env.directives['nonecheck']:
return
msg = None
format_args = ()
if (self.obj.type.is_extension_type and self.needs_none_check and not
self.is_py_attr):
msg = "'NoneType' object has no attribute '%s'"
format_args = (self.attribute,)
elif self.obj.type.is_memoryviewslice:
if self.is_memslice_transpose:
msg = "Cannot transpose None memoryview slice"
else:
entry = self.obj.type.scope.lookup_here(self.attribute)
if entry:
# copy/is_c_contig/shape/strides etc
msg = "Cannot access '%s' attribute of None memoryview slice"
format_args = (entry.name,)
if msg:
self.obj = self.obj.as_none_safe_node(msg, 'PyExc_AttributeError',
format_args=format_args)
def nogil_check(self, env):
if self.is_py_attr:
self.gil_error()
elif self.type.is_memoryviewslice:
from . import MemoryView
MemoryView.err_if_nogil_initialized_check(self.pos, env, 'attribute')
gil_message = "Accessing Python attribute"
def is_simple(self):
if self.obj:
return self.result_in_temp() or self.obj.is_simple()
else:
return NameNode.is_simple(self)
def is_lvalue(self):
if self.obj:
return not self.type.is_array
else:
return NameNode.is_lvalue(self)
def is_ephemeral(self):
if self.obj:
return self.obj.is_ephemeral()
else:
return NameNode.is_ephemeral(self)
def calculate_result_code(self):
#print "AttributeNode.calculate_result_code:", self.member ###
#print "...obj node =", self.obj, "code", self.obj.result() ###
#print "...obj type", self.obj.type, "ctype", self.obj.ctype() ###
obj = self.obj
obj_code = obj.result_as(obj.type)
#print "...obj_code =", obj_code ###
if self.entry and self.entry.is_cmethod:
if obj.type.is_extension_type and not self.entry.is_builtin_cmethod:
if self.entry.final_func_cname:
return self.entry.final_func_cname
if self.type.from_fused:
# If the attribute was specialized through indexing, make
# sure to get the right fused name, as our entry was
# replaced by our parent index node
# (AnalyseExpressionsTransform)
self.member = self.entry.cname
return "((struct %s *)%s%s%s)->%s" % (
obj.type.vtabstruct_cname, obj_code, self.op,
obj.type.vtabslot_cname, self.member)
elif self.result_is_used:
return self.member
# Generating no code at all for unused access to optimised builtin
# methods fixes the problem that some optimisations only exist as
# macros, i.e. there is no function pointer to them, so we would
# generate invalid C code here.
return
elif obj.type.is_complex:
return "__Pyx_C%s(%s)" % (self.member.upper(), obj_code)
else:
if obj.type.is_builtin_type and self.entry and self.entry.is_variable:
# accessing a field of a builtin type, need to cast better than result_as() does
obj_code = obj.type.cast_code(obj.result(), to_object_struct = True)
return "%s%s%s" % (obj_code, self.op, self.member)
def generate_result_code(self, code):
if self.is_py_attr:
if self.is_special_lookup:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c"))
lookup_func_name = '__Pyx_PyObject_LookupSpecial'
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c"))
lookup_func_name = '__Pyx_PyObject_GetAttrStr'
code.putln(
'%s = %s(%s, %s); %s' % (
self.result(),
lookup_func_name,
self.obj.py_result(),
code.intern_identifier(self.attribute),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif self.type.is_memoryviewslice:
if self.is_memslice_transpose:
# transpose the slice
for access, packing in self.type.axes:
if access == 'ptr':
error(self.pos, "Transposing not supported for slices "
"with indirect dimensions")
return
code.putln("%s = %s;" % (self.result(), self.obj.result()))
if self.obj.is_name or (self.obj.is_attribute and
self.obj.is_memslice_transpose):
code.put_incref_memoryviewslice(self.result(), have_gil=True)
T = "__pyx_memslice_transpose(&%s) == 0"
code.putln(code.error_goto_if(T % self.result(), self.pos))
elif self.initialized_check:
code.putln(
'if (unlikely(!%s.memview)) {'
'PyErr_SetString(PyExc_AttributeError,'
'"Memoryview is not initialized");'
'%s'
'}' % (self.result(), code.error_goto(self.pos)))
else:
# result_code contains what is needed, but we may need to insert
# a check and raise an exception
if self.obj.type.is_extension_type:
pass
elif self.entry and self.entry.is_cmethod and self.entry.utility_code:
# C method implemented as function call with utility code
code.globalstate.use_utility_code(self.entry.utility_code)
def generate_disposal_code(self, code):
if self.is_temp and self.type.is_memoryviewslice and self.is_memslice_transpose:
# mirror condition for putting the memview incref here:
if self.obj.is_name or (self.obj.is_attribute and
self.obj.is_memslice_transpose):
code.put_xdecref_memoryviewslice(
self.result(), have_gil=True)
else:
ExprNode.generate_disposal_code(self, code)
def generate_assignment_code(self, rhs, code):
self.obj.generate_evaluation_code(code)
if self.is_py_attr:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
code.put_error_if_neg(self.pos,
'__Pyx_PyObject_SetAttrStr(%s, %s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute),
rhs.py_result()))
rhs.generate_disposal_code(code)
rhs.free_temps(code)
elif self.obj.type.is_complex:
code.putln("__Pyx_SET_C%s(%s, %s);" % (
self.member.upper(),
self.obj.result_as(self.obj.type),
rhs.result_as(self.ctype())))
else:
select_code = self.result()
if self.type.is_pyobject and self.use_managed_ref:
rhs.make_owned_reference(code)
code.put_giveref(rhs.py_result())
code.put_gotref(select_code)
code.put_decref(select_code, self.ctype())
elif self.type.is_memoryviewslice:
from . import MemoryView
MemoryView.put_assign_to_memviewslice(
select_code, rhs, rhs.result(), self.type, code)
if not self.type.is_memoryviewslice:
code.putln(
"%s = %s;" % (
select_code,
rhs.result_as(self.ctype())))
#rhs.result()))
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
self.obj.generate_evaluation_code(code)
if self.is_py_attr or (self.entry.scope.is_property_scope
and u'__del__' in self.entry.scope.entries):
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
code.put_error_if_neg(self.pos,
'__Pyx_PyObject_DelAttrStr(%s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute)))
else:
error(self.pos, "Cannot delete C attribute of extension type")
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def annotate(self, code):
if self.is_py_attr:
style, text = 'py_attr', 'python attribute (%s)'
else:
style, text = 'c_attr', 'c attribute (%s)'
code.annotate(self.pos, AnnotationItem(style, text % self.type, size=len(self.attribute)))
#-------------------------------------------------------------------
#
# Constructor nodes
#
#-------------------------------------------------------------------
class StarredTargetNode(ExprNode):
# A starred expression like "*a"
#
# This is only allowed in sequence assignment targets such as
#
# a, *b = (1,2,3,4) => a = 1 ; b = [2,3,4]
#
# and will be removed during type analysis (or generate an error
# if it's found at unexpected places).
#
# target ExprNode
subexprs = ['target']
is_starred = 1
type = py_object_type
is_temp = 1
def __init__(self, pos, target):
ExprNode.__init__(self, pos)
self.target = target
def analyse_declarations(self, env):
error(self.pos, "can use starred expression only as assignment target")
self.target.analyse_declarations(env)
def analyse_types(self, env):
error(self.pos, "can use starred expression only as assignment target")
self.target = self.target.analyse_types(env)
self.type = self.target.type
return self
def analyse_target_declaration(self, env):
self.target.analyse_target_declaration(env)
def analyse_target_types(self, env):
self.target = self.target.analyse_target_types(env)
self.type = self.target.type
return self
def calculate_result_code(self):
return ""
def generate_result_code(self, code):
pass
class SequenceNode(ExprNode):
# Base class for list and tuple constructor nodes.
# Contains common code for performing sequence unpacking.
#
# args [ExprNode]
# unpacked_items [ExprNode] or None
# coerced_unpacked_items [ExprNode] or None
# mult_factor ExprNode the integer number of content repetitions ([1,2]*3)
subexprs = ['args', 'mult_factor']
is_sequence_constructor = 1
unpacked_items = None
mult_factor = None
slow = False # trade speed for code size (e.g. use PyTuple_Pack())
def compile_time_value_list(self, denv):
return [arg.compile_time_value(denv) for arg in self.args]
def replace_starred_target_node(self):
# replace a starred node in the targets by the contained expression
self.starred_assignment = False
args = []
for arg in self.args:
if arg.is_starred:
if self.starred_assignment:
error(arg.pos, "more than 1 starred expression in assignment")
self.starred_assignment = True
arg = arg.target
arg.is_starred = True
args.append(arg)
self.args = args
def analyse_target_declaration(self, env):
self.replace_starred_target_node()
for arg in self.args:
arg.analyse_target_declaration(env)
def analyse_types(self, env, skip_children=False):
for i in range(len(self.args)):
arg = self.args[i]
if not skip_children: arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
if self.mult_factor:
self.mult_factor = self.mult_factor.analyse_types(env)
if not self.mult_factor.type.is_int:
self.mult_factor = self.mult_factor.coerce_to_pyobject(env)
self.is_temp = 1
# not setting self.type here, subtypes do this
return self
def may_be_none(self):
return False
def analyse_target_types(self, env):
if self.mult_factor:
error(self.pos, "can't assign to multiplied sequence")
self.unpacked_items = []
self.coerced_unpacked_items = []
self.any_coerced_items = False
for i, arg in enumerate(self.args):
arg = self.args[i] = arg.analyse_target_types(env)
if arg.is_starred:
if not arg.type.assignable_from(Builtin.list_type):
error(arg.pos,
"starred target must have Python object (list) type")
if arg.type is py_object_type:
arg.type = Builtin.list_type
unpacked_item = PyTempNode(self.pos, env)
coerced_unpacked_item = unpacked_item.coerce_to(arg.type, env)
if unpacked_item is not coerced_unpacked_item:
self.any_coerced_items = True
self.unpacked_items.append(unpacked_item)
self.coerced_unpacked_items.append(coerced_unpacked_item)
self.type = py_object_type
return self
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_sequence_packing_code(self, code, target=None, plain=False):
if target is None:
target = self.result()
size_factor = c_mult = ''
mult_factor = None
if self.mult_factor and not plain:
mult_factor = self.mult_factor
if mult_factor.type.is_int:
c_mult = mult_factor.result()
if isinstance(mult_factor.constant_result, (int,long)) \
and mult_factor.constant_result > 0:
size_factor = ' * %s' % mult_factor.constant_result
else:
size_factor = ' * ((%s<0) ? 0:%s)' % (c_mult, c_mult)
if self.type is Builtin.tuple_type and (self.is_literal or self.slow) and not c_mult:
# use PyTuple_Pack() to avoid generating huge amounts of one-time code
code.putln('%s = PyTuple_Pack(%d, %s); %s' % (
target,
len(self.args),
', '.join([ arg.py_result() for arg in self.args ]),
code.error_goto_if_null(target, self.pos)))
code.put_gotref(target)
else:
# build the tuple/list step by step, potentially multiplying it as we go
if self.type is Builtin.list_type:
create_func, set_item_func = 'PyList_New', 'PyList_SET_ITEM'
elif self.type is Builtin.tuple_type:
create_func, set_item_func = 'PyTuple_New', 'PyTuple_SET_ITEM'
else:
raise InternalError("sequence packing for unexpected type %s" % self.type)
arg_count = len(self.args)
code.putln("%s = %s(%s%s); %s" % (
target, create_func, arg_count, size_factor,
code.error_goto_if_null(target, self.pos)))
code.put_gotref(target)
if c_mult:
# FIXME: can't use a temp variable here as the code may
# end up in the constant building function. Temps
# currently don't work there.
#counter = code.funcstate.allocate_temp(mult_factor.type, manage_ref=False)
counter = Naming.quick_temp_cname
code.putln('{ Py_ssize_t %s;' % counter)
if arg_count == 1:
offset = counter
else:
offset = '%s * %s' % (counter, arg_count)
code.putln('for (%s=0; %s < %s; %s++) {' % (
counter, counter, c_mult, counter
))
else:
offset = ''
for i in xrange(arg_count):
arg = self.args[i]
if c_mult or not arg.result_in_temp():
code.put_incref(arg.result(), arg.ctype())
code.putln("%s(%s, %s, %s);" % (
set_item_func,
target,
(offset and i) and ('%s + %s' % (offset, i)) or (offset or i),
arg.py_result()))
code.put_giveref(arg.py_result())
if c_mult:
code.putln('}')
#code.funcstate.release_temp(counter)
code.putln('}')
if mult_factor is not None and mult_factor.type.is_pyobject:
code.putln('{ PyObject* %s = PyNumber_InPlaceMultiply(%s, %s); %s' % (
Naming.quick_temp_cname, target, mult_factor.py_result(),
code.error_goto_if_null(Naming.quick_temp_cname, self.pos)
))
code.put_gotref(Naming.quick_temp_cname)
code.put_decref(target, py_object_type)
code.putln('%s = %s;' % (target, Naming.quick_temp_cname))
code.putln('}')
def generate_subexpr_disposal_code(self, code):
if self.mult_factor and self.mult_factor.type.is_int:
super(SequenceNode, self).generate_subexpr_disposal_code(code)
elif self.type is Builtin.tuple_type and (self.is_literal or self.slow):
super(SequenceNode, self).generate_subexpr_disposal_code(code)
else:
# We call generate_post_assignment_code here instead
# of generate_disposal_code, because values were stored
# in the tuple using a reference-stealing operation.
for arg in self.args:
arg.generate_post_assignment_code(code)
# Should NOT call free_temps -- this is invoked by the default
# generate_evaluation_code which will do that.
if self.mult_factor:
self.mult_factor.generate_disposal_code(code)
def generate_assignment_code(self, rhs, code):
if self.starred_assignment:
self.generate_starred_assignment_code(rhs, code)
else:
self.generate_parallel_assignment_code(rhs, code)
for item in self.unpacked_items:
item.release(code)
rhs.free_temps(code)
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def generate_parallel_assignment_code(self, rhs, code):
# Need to work around the fact that generate_evaluation_code
# allocates the temps in a rather hacky way -- the assignment
# is evaluated twice, within each if-block.
for item in self.unpacked_items:
item.allocate(code)
special_unpack = (rhs.type is py_object_type
or rhs.type in (tuple_type, list_type)
or not rhs.type.is_builtin_type)
long_enough_for_a_loop = len(self.unpacked_items) > 3
if special_unpack:
self.generate_special_parallel_unpacking_code(
code, rhs, use_loop=long_enough_for_a_loop)
else:
code.putln("{")
self.generate_generic_parallel_unpacking_code(
code, rhs, self.unpacked_items, use_loop=long_enough_for_a_loop)
code.putln("}")
for value_node in self.coerced_unpacked_items:
value_node.generate_evaluation_code(code)
for i in range(len(self.args)):
self.args[i].generate_assignment_code(
self.coerced_unpacked_items[i], code)
def generate_special_parallel_unpacking_code(self, code, rhs, use_loop):
sequence_type_test = '1'
none_check = "likely(%s != Py_None)" % rhs.py_result()
if rhs.type is list_type:
sequence_types = ['List']
if rhs.may_be_none():
sequence_type_test = none_check
elif rhs.type is tuple_type:
sequence_types = ['Tuple']
if rhs.may_be_none():
sequence_type_test = none_check
else:
sequence_types = ['Tuple', 'List']
tuple_check = 'likely(PyTuple_CheckExact(%s))' % rhs.py_result()
list_check = 'PyList_CheckExact(%s)' % rhs.py_result()
sequence_type_test = "(%s) || (%s)" % (tuple_check, list_check)
code.putln("if (%s) {" % sequence_type_test)
code.putln("PyObject* sequence = %s;" % rhs.py_result())
# list/tuple => check size
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
code.putln("Py_ssize_t size = Py_SIZE(sequence);")
code.putln("#else")
code.putln("Py_ssize_t size = PySequence_Size(sequence);") # < 0 => exception
code.putln("#endif")
code.putln("if (unlikely(size != %d)) {" % len(self.args))
code.globalstate.use_utility_code(raise_too_many_values_to_unpack)
code.putln("if (size > %d) __Pyx_RaiseTooManyValuesError(%d);" % (
len(self.args), len(self.args)))
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.putln("else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);")
code.putln(code.error_goto(self.pos))
code.putln("}")
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
# unpack items from list/tuple in unrolled loop (can't fail)
if len(sequence_types) == 2:
code.putln("if (likely(Py%s_CheckExact(sequence))) {" % sequence_types[0])
for i, item in enumerate(self.unpacked_items):
code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
item.result(), sequence_types[0], i))
if len(sequence_types) == 2:
code.putln("} else {")
for i, item in enumerate(self.unpacked_items):
code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
item.result(), sequence_types[1], i))
code.putln("}")
for item in self.unpacked_items:
code.put_incref(item.result(), item.ctype())
code.putln("#else")
# in non-CPython, use the PySequence protocol (which can fail)
if not use_loop:
for i, item in enumerate(self.unpacked_items):
code.putln("%s = PySequence_ITEM(sequence, %d); %s" % (
item.result(), i,
code.error_goto_if_null(item.result(), self.pos)))
code.put_gotref(item.result())
else:
code.putln("{")
code.putln("Py_ssize_t i;")
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in self.unpacked_items])))
code.putln("for (i=0; i < %s; i++) {" % len(self.unpacked_items))
code.putln("PyObject* item = PySequence_ITEM(sequence, i); %s" % (
code.error_goto_if_null('item', self.pos)))
code.put_gotref('item')
code.putln("*(temps[i]) = item;")
code.putln("}")
code.putln("}")
code.putln("#endif")
rhs.generate_disposal_code(code)
if sequence_type_test == '1':
code.putln("}") # all done
elif sequence_type_test == none_check:
# either tuple/list or None => save some code by generating the error directly
code.putln("} else {")
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseNoneIterError", "ObjectHandling.c"))
code.putln("__Pyx_RaiseNoneNotIterableError(); %s" % code.error_goto(self.pos))
code.putln("}") # all done
else:
code.putln("} else {") # needs iteration fallback code
self.generate_generic_parallel_unpacking_code(
code, rhs, self.unpacked_items, use_loop=use_loop)
code.putln("}")
def generate_generic_parallel_unpacking_code(self, code, rhs, unpacked_items, use_loop, terminate=True):
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.globalstate.use_utility_code(UtilityCode.load_cached("IterFinish", "ObjectHandling.c"))
code.putln("Py_ssize_t index = -1;") # must be at the start of a C block!
if use_loop:
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in unpacked_items])))
iterator_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln(
"%s = PyObject_GetIter(%s); %s" % (
iterator_temp,
rhs.py_result(),
code.error_goto_if_null(iterator_temp, self.pos)))
code.put_gotref(iterator_temp)
rhs.generate_disposal_code(code)
iternext_func = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
code.putln("%s = Py_TYPE(%s)->tp_iternext;" % (
iternext_func, iterator_temp))
unpacking_error_label = code.new_label('unpacking_failed')
unpack_code = "%s(%s)" % (iternext_func, iterator_temp)
if use_loop:
code.putln("for (index=0; index < %s; index++) {" % len(unpacked_items))
code.put("PyObject* item = %s; if (unlikely(!item)) " % unpack_code)
code.put_goto(unpacking_error_label)
code.put_gotref("item")
code.putln("*(temps[index]) = item;")
code.putln("}")
else:
for i, item in enumerate(unpacked_items):
code.put(
"index = %d; %s = %s; if (unlikely(!%s)) " % (
i,
item.result(),
unpack_code,
item.result()))
code.put_goto(unpacking_error_label)
code.put_gotref(item.py_result())
if terminate:
code.globalstate.use_utility_code(
UtilityCode.load_cached("UnpackItemEndCheck", "ObjectHandling.c"))
code.put_error_if_neg(self.pos, "__Pyx_IternextUnpackEndCheck(%s, %d)" % (
unpack_code,
len(unpacked_items)))
code.putln("%s = NULL;" % iternext_func)
code.put_decref_clear(iterator_temp, py_object_type)
unpacking_done_label = code.new_label('unpacking_done')
code.put_goto(unpacking_done_label)
code.put_label(unpacking_error_label)
code.put_decref_clear(iterator_temp, py_object_type)
code.putln("%s = NULL;" % iternext_func)
code.putln("if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);")
code.putln(code.error_goto(self.pos))
code.put_label(unpacking_done_label)
code.funcstate.release_temp(iternext_func)
if terminate:
code.funcstate.release_temp(iterator_temp)
iterator_temp = None
return iterator_temp
def generate_starred_assignment_code(self, rhs, code):
for i, arg in enumerate(self.args):
if arg.is_starred:
starred_target = self.unpacked_items[i]
unpacked_fixed_items_left = self.unpacked_items[:i]
unpacked_fixed_items_right = self.unpacked_items[i+1:]
break
else:
assert False
iterator_temp = None
if unpacked_fixed_items_left:
for item in unpacked_fixed_items_left:
item.allocate(code)
code.putln('{')
iterator_temp = self.generate_generic_parallel_unpacking_code(
code, rhs, unpacked_fixed_items_left,
use_loop=True, terminate=False)
for i, item in enumerate(unpacked_fixed_items_left):
value_node = self.coerced_unpacked_items[i]
value_node.generate_evaluation_code(code)
code.putln('}')
starred_target.allocate(code)
target_list = starred_target.result()
code.putln("%s = PySequence_List(%s); %s" % (
target_list,
iterator_temp or rhs.py_result(),
code.error_goto_if_null(target_list, self.pos)))
code.put_gotref(target_list)
if iterator_temp:
code.put_decref_clear(iterator_temp, py_object_type)
code.funcstate.release_temp(iterator_temp)
else:
rhs.generate_disposal_code(code)
if unpacked_fixed_items_right:
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
length_temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
code.putln('%s = PyList_GET_SIZE(%s);' % (length_temp, target_list))
code.putln("if (unlikely(%s < %d)) {" % (length_temp, len(unpacked_fixed_items_right)))
code.putln("__Pyx_RaiseNeedMoreValuesError(%d+%s); %s" % (
len(unpacked_fixed_items_left), length_temp,
code.error_goto(self.pos)))
code.putln('}')
for item in unpacked_fixed_items_right[::-1]:
item.allocate(code)
for i, (item, coerced_arg) in enumerate(zip(unpacked_fixed_items_right[::-1],
self.coerced_unpacked_items[::-1])):
code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
code.putln("%s = PyList_GET_ITEM(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
# resize the list the hard way
code.putln("((PyVarObject*)%s)->ob_size--;" % target_list)
code.putln('#else')
code.putln("%s = PySequence_ITEM(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
code.putln('#endif')
code.put_gotref(item.py_result())
coerced_arg.generate_evaluation_code(code)
code.putln('#if !CYTHON_COMPILING_IN_CPYTHON')
sublist_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln('%s = PySequence_GetSlice(%s, 0, %s-%d); %s' % (
sublist_temp, target_list, length_temp, len(unpacked_fixed_items_right),
code.error_goto_if_null(sublist_temp, self.pos)))
code.put_gotref(sublist_temp)
code.funcstate.release_temp(length_temp)
code.put_decref(target_list, py_object_type)
code.putln('%s = %s; %s = NULL;' % (target_list, sublist_temp, sublist_temp))
code.putln('#else')
code.putln('%s = %s;' % (sublist_temp, sublist_temp)) # avoid warning about unused variable
code.funcstate.release_temp(sublist_temp)
code.putln('#endif')
for i, arg in enumerate(self.args):
arg.generate_assignment_code(self.coerced_unpacked_items[i], code)
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
if self.unpacked_items:
for arg in self.unpacked_items:
arg.annotate(code)
for arg in self.coerced_unpacked_items:
arg.annotate(code)
class TupleNode(SequenceNode):
# Tuple constructor.
type = tuple_type
is_partly_literal = False
gil_message = "Constructing Python tuple"
def analyse_types(self, env, skip_children=False):
if len(self.args) == 0:
node = self
node.is_temp = False
node.is_literal = True
else:
node = SequenceNode.analyse_types(self, env, skip_children)
for child in node.args:
if not child.is_literal:
break
else:
if not node.mult_factor or node.mult_factor.is_literal and \
isinstance(node.mult_factor.constant_result, (int, long)):
node.is_temp = False
node.is_literal = True
else:
if not node.mult_factor.type.is_pyobject:
node.mult_factor = node.mult_factor.coerce_to_pyobject(env)
node.is_temp = True
node.is_partly_literal = True
return node
def is_simple(self):
# either temp or constant => always simple
return True
def nonlocally_immutable(self):
# either temp or constant => always safe
return True
def calculate_result_code(self):
if len(self.args) > 0:
return self.result_code
else:
return Naming.empty_tuple
def calculate_constant_result(self):
self.constant_result = tuple([
arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = self.compile_time_value_list(denv)
try:
return tuple(values)
except Exception, e:
self.compile_time_value_error(e)
def generate_operation_code(self, code):
if len(self.args) == 0:
# result_code is Naming.empty_tuple
return
if self.is_partly_literal:
# underlying tuple is const, but factor is not
tuple_target = code.get_py_const(py_object_type, 'tuple', cleanup_level=2)
const_code = code.get_cached_constants_writer()
const_code.mark_pos(self.pos)
self.generate_sequence_packing_code(const_code, tuple_target, plain=True)
const_code.put_giveref(tuple_target)
code.putln('%s = PyNumber_Multiply(%s, %s); %s' % (
self.result(), tuple_target, self.mult_factor.py_result(),
code.error_goto_if_null(self.result(), self.pos)
))
code.put_gotref(self.py_result())
elif self.is_literal:
# non-empty cached tuple => result is global constant,
# creation code goes into separate code writer
self.result_code = code.get_py_const(py_object_type, 'tuple', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
self.generate_sequence_packing_code(code)
code.put_giveref(self.py_result())
else:
self.generate_sequence_packing_code(code)
class ListNode(SequenceNode):
# List constructor.
# obj_conversion_errors [PyrexError] used internally
# orignial_args [ExprNode] used internally
obj_conversion_errors = []
type = list_type
in_module_scope = False
gil_message = "Constructing Python list"
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TOOD: Infer non-object list arrays.
return list_type
def analyse_expressions(self, env):
node = SequenceNode.analyse_expressions(self, env)
return node.coerce_to_pyobject(env)
def analyse_types(self, env):
hold_errors()
self.original_args = list(self.args)
node = SequenceNode.analyse_types(self, env)
node.obj_conversion_errors = held_errors()
release_errors(ignore=True)
if env.is_module_scope:
self.in_module_scope = True
return node
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
elif self.mult_factor:
error(self.pos, "Cannot coerce multiplied list to '%s'" % dst_type)
elif dst_type.is_ptr and dst_type.base_type is not PyrexTypes.c_void_type:
base_type = dst_type.base_type
self.type = PyrexTypes.CArrayType(base_type, len(self.args))
for i in range(len(self.original_args)):
arg = self.args[i]
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(base_type, env)
elif dst_type.is_struct:
if len(self.args) > len(dst_type.scope.var_entries):
error(self.pos, "Too may members for '%s'" % dst_type)
else:
if len(self.args) < len(dst_type.scope.var_entries):
warning(self.pos, "Too few members for '%s'" % dst_type, 1)
for i, (arg, member) in enumerate(zip(self.original_args, dst_type.scope.var_entries)):
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(member.type, env)
self.type = dst_type
else:
self.type = error_type
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
return self
def as_tuple(self):
t = TupleNode(self.pos, args=self.args, mult_factor=self.mult_factor)
if isinstance(self.constant_result, list):
t.constant_result = tuple(self.constant_result)
return t
def allocate_temp_result(self, code):
if self.type.is_array and self.in_module_scope:
self.temp_code = code.funcstate.allocate_temp(
self.type, manage_ref=False, static=True)
else:
SequenceNode.allocate_temp_result(self, code)
def release_temp_result(self, env):
if self.type.is_array:
# To be valid C++, we must allocate the memory on the stack
# manually and be sure not to reuse it for something else.
pass
else:
SequenceNode.release_temp_result(self, env)
def calculate_constant_result(self):
if self.mult_factor:
raise ValueError() # may exceed the compile time memory
self.constant_result = [
arg.constant_result for arg in self.args]
def compile_time_value(self, denv):
l = self.compile_time_value_list(denv)
if self.mult_factor:
l *= self.mult_factor.compile_time_value(denv)
return l
def generate_operation_code(self, code):
if self.type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.generate_sequence_packing_code(code)
elif self.type.is_array:
for i, arg in enumerate(self.args):
code.putln("%s[%s] = %s;" % (
self.result(),
i,
arg.result()))
elif self.type.is_struct:
for arg, member in zip(self.args, self.type.scope.var_entries):
code.putln("%s.%s = %s;" % (
self.result(),
member.cname,
arg.result()))
else:
raise InternalError("List type never specified")
class ScopedExprNode(ExprNode):
# Abstract base class for ExprNodes that have their own local
# scope, such as generator expressions.
#
# expr_scope Scope the inner scope of the expression
subexprs = []
expr_scope = None
# does this node really have a local scope, e.g. does it leak loop
# variables or not? non-leaking Py3 behaviour is default, except
# for list comprehensions where the behaviour differs in Py2 and
# Py3 (set in Parsing.py based on parser context)
has_local_scope = True
def init_scope(self, outer_scope, expr_scope=None):
if expr_scope is not None:
self.expr_scope = expr_scope
elif self.has_local_scope:
self.expr_scope = Symtab.GeneratorExpressionScope(outer_scope)
else:
self.expr_scope = None
def analyse_declarations(self, env):
self.init_scope(env)
def analyse_scoped_declarations(self, env):
# this is called with the expr_scope as env
pass
def analyse_types(self, env):
# no recursion here, the children will be analysed separately below
return self
def analyse_scoped_expressions(self, env):
# this is called with the expr_scope as env
return self
def generate_evaluation_code(self, code):
# set up local variables and free their references on exit
generate_inner_evaluation_code = super(ScopedExprNode, self).generate_evaluation_code
if not self.has_local_scope or not self.expr_scope.var_entries:
# no local variables => delegate, done
generate_inner_evaluation_code(code)
return
code.putln('{ /* enter inner scope */')
py_entries = []
for entry in self.expr_scope.var_entries:
if not entry.in_closure:
code.put_var_declaration(entry)
if entry.type.is_pyobject and entry.used:
py_entries.append(entry)
if not py_entries:
# no local Python references => no cleanup required
generate_inner_evaluation_code(code)
code.putln('} /* exit inner scope */')
return
# must free all local Python references at each exit point
old_loop_labels = tuple(code.new_loop_labels())
old_error_label = code.new_error_label()
generate_inner_evaluation_code(code)
# normal (non-error) exit
for entry in py_entries:
code.put_var_decref(entry)
# error/loop body exit points
exit_scope = code.new_label('exit_scope')
code.put_goto(exit_scope)
for label, old_label in ([(code.error_label, old_error_label)] +
list(zip(code.get_loop_labels(), old_loop_labels))):
if code.label_used(label):
code.put_label(label)
for entry in py_entries:
code.put_var_decref(entry)
code.put_goto(old_label)
code.put_label(exit_scope)
code.putln('} /* exit inner scope */')
code.set_loop_labels(old_loop_labels)
code.error_label = old_error_label
class ComprehensionNode(ScopedExprNode):
# A list/set/dict comprehension
child_attrs = ["loop"]
is_temp = True
def infer_type(self, env):
return self.type
def analyse_declarations(self, env):
self.append.target = self # this is used in the PyList_Append of the inner loop
self.init_scope(env)
def analyse_scoped_declarations(self, env):
self.loop.analyse_declarations(env)
def analyse_types(self, env):
if not self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def analyse_scoped_expressions(self, env):
if self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_operation_code(self, code):
if self.type is Builtin.list_type:
create_code = 'PyList_New(0)'
elif self.type is Builtin.set_type:
create_code = 'PySet_New(NULL)'
elif self.type is Builtin.dict_type:
create_code = 'PyDict_New()'
else:
raise InternalError("illegal type for comprehension: %s" % self.type)
code.putln('%s = %s; %s' % (
self.result(), create_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
self.loop.generate_execution_code(code)
def annotate(self, code):
self.loop.annotate(code)
class ComprehensionAppendNode(Node):
# Need to be careful to avoid infinite recursion:
# target must not be in child_attrs/subexprs
child_attrs = ['expr']
target = None
type = PyrexTypes.c_int_type
def analyse_expressions(self, env):
self.expr = self.expr.analyse_expressions(env)
if not self.expr.type.is_pyobject:
self.expr = self.expr.coerce_to_pyobject(env)
return self
def generate_execution_code(self, code):
if self.target.type is list_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ListCompAppend", "Optimize.c"))
function = "__Pyx_ListComp_Append"
elif self.target.type is set_type:
function = "PySet_Add"
else:
raise InternalError(
"Invalid type for comprehension node: %s" % self.target.type)
self.expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("%s(%s, (PyObject*)%s)" % (
function,
self.target.result(),
self.expr.result()
), self.pos))
self.expr.generate_disposal_code(code)
self.expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.expr.generate_function_definitions(env, code)
def annotate(self, code):
self.expr.annotate(code)
class DictComprehensionAppendNode(ComprehensionAppendNode):
child_attrs = ['key_expr', 'value_expr']
def analyse_expressions(self, env):
self.key_expr = self.key_expr.analyse_expressions(env)
if not self.key_expr.type.is_pyobject:
self.key_expr = self.key_expr.coerce_to_pyobject(env)
self.value_expr = self.value_expr.analyse_expressions(env)
if not self.value_expr.type.is_pyobject:
self.value_expr = self.value_expr.coerce_to_pyobject(env)
return self
def generate_execution_code(self, code):
self.key_expr.generate_evaluation_code(code)
self.value_expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("PyDict_SetItem(%s, (PyObject*)%s, (PyObject*)%s)" % (
self.target.result(),
self.key_expr.result(),
self.value_expr.result()
), self.pos))
self.key_expr.generate_disposal_code(code)
self.key_expr.free_temps(code)
self.value_expr.generate_disposal_code(code)
self.value_expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.key_expr.generate_function_definitions(env, code)
self.value_expr.generate_function_definitions(env, code)
def annotate(self, code):
self.key_expr.annotate(code)
self.value_expr.annotate(code)
class InlinedGeneratorExpressionNode(ScopedExprNode):
# An inlined generator expression for which the result is
# calculated inside of the loop. This will only be created by
# transforms when replacing builtin calls on generator
# expressions.
#
# loop ForStatNode the for-loop, not containing any YieldExprNodes
# result_node ResultRefNode the reference to the result value temp
# orig_func String the name of the builtin function this node replaces
child_attrs = ["loop"]
loop_analysed = False
type = py_object_type
def analyse_scoped_declarations(self, env):
self.loop.analyse_declarations(env)
def may_be_none(self):
return False
def annotate(self, code):
self.loop.annotate(code)
def infer_type(self, env):
return self.result_node.infer_type(env)
def analyse_types(self, env):
if not self.has_local_scope:
self.loop_analysed = True
self.loop = self.loop.analyse_expressions(env)
self.type = self.result_node.type
self.is_temp = True
return self
def analyse_scoped_expressions(self, env):
self.loop_analysed = True
if self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def coerce_to(self, dst_type, env):
if self.orig_func == 'sum' and dst_type.is_numeric and not self.loop_analysed:
# We can optimise by dropping the aggregation variable and
# the add operations into C. This can only be done safely
# before analysing the loop body, after that, the result
# reference type will have infected expressions and
# assignments.
self.result_node.type = self.type = dst_type
return self
return super(InlinedGeneratorExpressionNode, self).coerce_to(dst_type, env)
def generate_result_code(self, code):
self.result_node.result_code = self.result()
self.loop.generate_execution_code(code)
class SetNode(ExprNode):
# Set constructor.
type = set_type
subexprs = ['args']
gil_message = "Constructing Python set"
def analyse_types(self, env):
for i in range(len(self.args)):
arg = self.args[i]
arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
self.type = set_type
self.is_temp = 1
return self
def may_be_none(self):
return False
def calculate_constant_result(self):
self.constant_result = set([arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = [arg.compile_time_value(denv) for arg in self.args]
try:
return set(values)
except Exception, e:
self.compile_time_value_error(e)
def generate_evaluation_code(self, code):
for arg in self.args:
arg.generate_evaluation_code(code)
self.allocate_temp_result(code)
code.putln(
"%s = PySet_New(0); %s" % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
for arg in self.args:
code.put_error_if_neg(
self.pos,
"PySet_Add(%s, %s)" % (self.result(), arg.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
class DictNode(ExprNode):
# Dictionary constructor.
#
# key_value_pairs [DictItemNode]
# exclude_null_values [boolean] Do not add NULL values to dict
#
# obj_conversion_errors [PyrexError] used internally
subexprs = ['key_value_pairs']
is_temp = 1
exclude_null_values = False
type = dict_type
is_dict_literal = True
obj_conversion_errors = []
@classmethod
def from_pairs(cls, pos, pairs):
return cls(pos, key_value_pairs=[
DictItemNode(pos, key=k, value=v) for k, v in pairs])
def calculate_constant_result(self):
self.constant_result = dict([
item.constant_result for item in self.key_value_pairs])
def compile_time_value(self, denv):
pairs = [(item.key.compile_time_value(denv), item.value.compile_time_value(denv))
for item in self.key_value_pairs]
try:
return dict(pairs)
except Exception, e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TOOD: Infer struct constructors.
return dict_type
def analyse_types(self, env):
hold_errors()
self.key_value_pairs = [ item.analyse_types(env)
for item in self.key_value_pairs ]
self.obj_conversion_errors = held_errors()
release_errors(ignore=True)
return self
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
self.release_errors()
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
elif dst_type.is_struct_or_union:
self.type = dst_type
if not dst_type.is_struct and len(self.key_value_pairs) != 1:
error(self.pos, "Exactly one field must be specified to convert to union '%s'" % dst_type)
elif dst_type.is_struct and len(self.key_value_pairs) < len(dst_type.scope.var_entries):
warning(self.pos, "Not all members given for struct '%s'" % dst_type, 1)
for item in self.key_value_pairs:
if isinstance(item.key, CoerceToPyTypeNode):
item.key = item.key.arg
if not item.key.is_string_literal:
error(item.key.pos, "Invalid struct field identifier")
item.key = StringNode(item.key.pos, value="<error>")
else:
key = str(item.key.value) # converts string literals to unicode in Py3
member = dst_type.scope.lookup_here(key)
if not member:
error(item.key.pos, "struct '%s' has no field '%s'" % (dst_type, key))
else:
value = item.value
if isinstance(value, CoerceToPyTypeNode):
value = value.arg
item.value = value.coerce_to(member.type, env)
else:
self.type = error_type
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
return self
def release_errors(self):
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
# Custom method used here because key-value
# pairs are evaluated and used one at a time.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
if self.type.is_pyobject:
self.release_errors()
code.putln(
"%s = PyDict_New(); %s" % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
for item in self.key_value_pairs:
item.generate_evaluation_code(code)
if self.type.is_pyobject:
if self.exclude_null_values:
code.putln('if (%s) {' % item.value.py_result())
code.put_error_if_neg(self.pos,
"PyDict_SetItem(%s, %s, %s)" % (
self.result(),
item.key.py_result(),
item.value.py_result()))
if self.exclude_null_values:
code.putln('}')
else:
code.putln("%s.%s = %s;" % (
self.result(),
item.key.value,
item.value.result()))
item.generate_disposal_code(code)
item.free_temps(code)
def annotate(self, code):
for item in self.key_value_pairs:
item.annotate(code)
class DictItemNode(ExprNode):
# Represents a single item in a DictNode
#
# key ExprNode
# value ExprNode
subexprs = ['key', 'value']
nogil_check = None # Parent DictNode takes care of it
def calculate_constant_result(self):
self.constant_result = (
self.key.constant_result, self.value.constant_result)
def analyse_types(self, env):
self.key = self.key.analyse_types(env)
self.value = self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
self.value = self.value.coerce_to_pyobject(env)
return self
def generate_evaluation_code(self, code):
self.key.generate_evaluation_code(code)
self.value.generate_evaluation_code(code)
def generate_disposal_code(self, code):
self.key.generate_disposal_code(code)
self.value.generate_disposal_code(code)
def free_temps(self, code):
self.key.free_temps(code)
self.value.free_temps(code)
def __iter__(self):
return iter([self.key, self.value])
class SortedDictKeysNode(ExprNode):
# build sorted list of dict keys, e.g. for dir()
subexprs = ['arg']
is_temp = True
def __init__(self, arg):
ExprNode.__init__(self, arg.pos, arg=arg)
self.type = Builtin.list_type
def analyse_types(self, env):
arg = self.arg.analyse_types(env)
if arg.type is Builtin.dict_type:
arg = arg.as_none_safe_node(
"'NoneType' object is not iterable")
self.arg = arg
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
dict_result = self.arg.py_result()
if self.arg.type is Builtin.dict_type:
function = 'PyDict_Keys'
else:
function = 'PyMapping_Keys'
code.putln('%s = %s(%s); %s' % (
self.result(), function, dict_result,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.put_error_if_neg(
self.pos, 'PyList_Sort(%s)' % self.py_result())
class ModuleNameMixin(object):
def get_py_mod_name(self, code):
return code.get_py_string_const(
self.module_name, identifier=True)
def get_py_qualified_name(self, code):
return code.get_py_string_const(
self.qualname, identifier=True)
class ClassNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# bases ExprNode Base class tuple
# dict ExprNode Class dict (not owned by this node)
# doc ExprNode or None Doc string
# module_name EncodedString Name of defining module
subexprs = ['bases', 'doc']
def analyse_types(self, env):
self.bases = self.bases.analyse_types(env)
if self.doc:
self.doc = self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
env.use_utility_code(UtilityCode.load_cached("CreateClass", "ObjectHandling.c"))
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
cname = code.intern_identifier(self.name)
if self.doc:
code.put_error_if_neg(self.pos,
'PyDict_SetItem(%s, %s, %s)' % (
self.dict.py_result(),
code.intern_identifier(
StringEncoding.EncodedString("__doc__")),
self.doc.py_result()))
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
code.putln(
'%s = __Pyx_CreateClass(%s, %s, %s, %s, %s); %s' % (
self.result(),
self.bases.py_result(),
self.dict.py_result(),
cname,
qualname,
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class Py3ClassNode(ExprNode):
# Helper class used in the implementation of Python3+
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# dict ExprNode Class dict (not owned by this node)
# module_name EncodedString Name of defining module
# calculate_metaclass bool should call CalculateMetaclass()
# allow_py2_metaclass bool should look for Py2 metaclass
subexprs = []
def analyse_types(self, env):
self.type = py_object_type
self.is_temp = 1
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("Py3ClassCreate", "ObjectHandling.c"))
cname = code.intern_identifier(self.name)
if self.mkw:
mkw = self.mkw.py_result()
else:
mkw = 'NULL'
if self.metaclass:
metaclass = self.metaclass.result()
else:
metaclass = "((PyObject*)&__Pyx_DefaultClassType)"
code.putln(
'%s = __Pyx_Py3ClassCreate(%s, %s, %s, %s, %s, %d, %d); %s' % (
self.result(),
metaclass,
cname,
self.bases.py_result(),
self.dict.py_result(),
mkw,
self.calculate_metaclass,
self.allow_py2_metaclass,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class KeywordArgsNode(ExprNode):
# Helper class for keyword arguments.
#
# starstar_arg DictNode
# keyword_args [DictItemNode]
subexprs = ['starstar_arg', 'keyword_args']
is_temp = 1
type = dict_type
def calculate_constant_result(self):
result = dict(self.starstar_arg.constant_result)
for item in self.keyword_args:
key, value = item.constant_result
if key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
self.constant_result = result
def compile_time_value(self, denv):
result = self.starstar_arg.compile_time_value(denv)
pairs = [ (item.key.compile_time_value(denv), item.value.compile_time_value(denv))
for item in self.keyword_args ]
try:
result = dict(result)
for key, value in pairs:
if key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
except Exception, e:
self.compile_time_value_error(e)
return result
def type_dependencies(self, env):
return ()
def infer_type(self, env):
return dict_type
def analyse_types(self, env):
arg = self.starstar_arg.analyse_types(env)
arg = arg.coerce_to_pyobject(env)
self.starstar_arg = arg.as_none_safe_node(
# FIXME: CPython's error message starts with the runtime function name
'argument after ** must be a mapping, not NoneType')
self.keyword_args = [ item.analyse_types(env)
for item in self.keyword_args ]
return self
def may_be_none(self):
return False
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.starstar_arg.generate_evaluation_code(code)
if self.starstar_arg.type is not Builtin.dict_type:
# CPython supports calling functions with non-dicts, so do we
code.putln('if (likely(PyDict_Check(%s))) {' %
self.starstar_arg.py_result())
if self.keyword_args:
code.putln(
"%s = PyDict_Copy(%s); %s" % (
self.result(),
self.starstar_arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
else:
code.putln("%s = %s;" % (
self.result(),
self.starstar_arg.py_result()))
code.put_incref(self.result(), py_object_type)
if self.starstar_arg.type is not Builtin.dict_type:
code.putln('} else {')
code.putln(
"%s = PyObject_CallFunctionObjArgs("
"(PyObject*)&PyDict_Type, %s, NULL); %s" % (
self.result(),
self.starstar_arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.putln('}')
self.starstar_arg.generate_disposal_code(code)
self.starstar_arg.free_temps(code)
if not self.keyword_args:
return
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseDoubleKeywords", "FunctionArguments.c"))
for item in self.keyword_args:
item.generate_evaluation_code(code)
code.putln("if (unlikely(PyDict_GetItem(%s, %s))) {" % (
self.result(),
item.key.py_result()))
# FIXME: find out function name at runtime!
code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % (
item.key.py_result(),
code.error_goto(self.pos)))
code.putln("}")
code.put_error_if_neg(self.pos,
"PyDict_SetItem(%s, %s, %s)" % (
self.result(),
item.key.py_result(),
item.value.py_result()))
item.generate_disposal_code(code)
item.free_temps(code)
def annotate(self, code):
self.starstar_arg.annotate(code)
for item in self.keyword_args:
item.annotate(code)
class PyClassMetaclassNode(ExprNode):
# Helper class holds Python3 metaclass object
#
# bases ExprNode Base class tuple (not owned by this node)
# mkw ExprNode Class keyword arguments (not owned by this node)
subexprs = []
def analyse_types(self, env):
self.type = py_object_type
self.is_temp = True
return self
def may_be_none(self):
return True
def generate_result_code(self, code):
if self.mkw:
code.globalstate.use_utility_code(
UtilityCode.load_cached("Py3MetaclassGet", "ObjectHandling.c"))
call = "__Pyx_Py3MetaclassGet(%s, %s)" % (
self.bases.result(),
self.mkw.result())
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("CalculateMetaclass", "ObjectHandling.c"))
call = "__Pyx_CalculateMetaclass(NULL, %s)" % (
self.bases.result())
code.putln(
"%s = %s; %s" % (
self.result(), call,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyClassNamespaceNode(ExprNode, ModuleNameMixin):
# Helper class holds Python3 namespace object
#
# All this are not owned by this node
# metaclass ExprNode Metaclass object
# bases ExprNode Base class tuple
# mkw ExprNode Class keyword arguments
# doc ExprNode or None Doc string (owned)
subexprs = ['doc']
def analyse_types(self, env):
if self.doc:
self.doc = self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
return self
def may_be_none(self):
return True
def generate_result_code(self, code):
cname = code.intern_identifier(self.name)
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
if self.doc:
doc_code = self.doc.result()
else:
doc_code = '(PyObject *) NULL'
if self.mkw:
mkw = self.mkw.py_result()
else:
mkw = '(PyObject *) NULL'
if self.metaclass:
metaclass = self.metaclass.result()
else:
metaclass = "(PyObject *) NULL"
code.putln(
"%s = __Pyx_Py3MetaclassPrepare(%s, %s, %s, %s, %s, %s, %s); %s" % (
self.result(),
metaclass,
self.bases.result(),
cname,
qualname,
mkw,
py_mod_name,
doc_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class ClassCellInjectorNode(ExprNode):
# Initialize CyFunction.func_classobj
is_temp = True
type = py_object_type
subexprs = []
is_active = False
def analyse_expressions(self, env):
if self.is_active:
env.use_utility_code(
UtilityCode.load_cached("CyFunctionClassCell", "CythonFunction.c"))
return self
def generate_evaluation_code(self, code):
if self.is_active:
self.allocate_temp_result(code)
code.putln(
'%s = PyList_New(0); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
def generate_injection_code(self, code, classobj_cname):
if self.is_active:
code.putln('__Pyx_CyFunction_InitClassCell(%s, %s);' % (
self.result(), classobj_cname))
class ClassCellNode(ExprNode):
# Class Cell for noargs super()
subexprs = []
is_temp = True
is_generator = False
type = py_object_type
def analyse_types(self, env):
return self
def generate_result_code(self, code):
if not self.is_generator:
code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % (
self.result(),
Naming.self_cname))
else:
code.putln('%s = %s->classobj;' % (
self.result(), Naming.generator_cname))
code.putln(
'if (!%s) { PyErr_SetString(PyExc_SystemError, '
'"super(): empty __class__ cell"); %s }' % (
self.result(),
code.error_goto(self.pos)))
code.put_incref(self.result(), py_object_type)
class BoundMethodNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs an bound method
# object from a class and a function.
#
# function ExprNode Function object
# self_object ExprNode self object
subexprs = ['function']
def analyse_types(self, env):
self.function = self.function.analyse_types(env)
self.type = py_object_type
self.is_temp = 1
return self
gil_message = "Constructing a bound method"
def generate_result_code(self, code):
code.putln(
"%s = PyMethod_New(%s, %s, (PyObject*)%s->ob_type); %s" % (
self.result(),
self.function.py_result(),
self.self_object.py_result(),
self.self_object.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class UnboundMethodNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs an unbound method
# object from a class and a function.
#
# function ExprNode Function object
type = py_object_type
is_temp = 1
subexprs = ['function']
def analyse_types(self, env):
self.function = self.function.analyse_types(env)
return self
def may_be_none(self):
return False
gil_message = "Constructing an unbound method"
def generate_result_code(self, code):
class_cname = code.pyclass_stack[-1].classobj.result()
code.putln(
"%s = PyMethod_New(%s, 0, %s); %s" % (
self.result(),
self.function.py_result(),
class_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyCFunctionNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# functions. Constructs a PyCFunction object
# from a PyMethodDef struct.
#
# pymethdef_cname string PyMethodDef structure
# self_object ExprNode or None
# binding bool
# def_node DefNode the Python function node
# module_name EncodedString Name of defining module
# code_object CodeObjectNode the PyCodeObject creator node
subexprs = ['code_object', 'defaults_tuple', 'defaults_kwdict',
'annotations_dict']
self_object = None
code_object = None
binding = False
def_node = None
defaults = None
defaults_struct = None
defaults_pyobjects = 0
defaults_tuple = None
defaults_kwdict = None
annotations_dict = None
type = py_object_type
is_temp = 1
specialized_cpdefs = None
is_specialization = False
@classmethod
def from_defnode(cls, node, binding):
return cls(node.pos,
def_node=node,
pymethdef_cname=node.entry.pymethdef_cname,
binding=binding or node.specialized_cpdefs,
specialized_cpdefs=node.specialized_cpdefs,
code_object=CodeObjectNode(node))
def analyse_types(self, env):
if self.binding:
self.analyse_default_args(env)
return self
def analyse_default_args(self, env):
"""
Handle non-literal function's default arguments.
"""
nonliteral_objects = []
nonliteral_other = []
default_args = []
default_kwargs = []
annotations = []
for arg in self.def_node.args:
if arg.default:
if not arg.default.is_literal:
arg.is_dynamic = True
if arg.type.is_pyobject:
nonliteral_objects.append(arg)
else:
nonliteral_other.append(arg)
else:
arg.default = DefaultLiteralArgNode(arg.pos, arg.default)
if arg.kw_only:
default_kwargs.append(arg)
else:
default_args.append(arg)
if arg.annotation:
arg.annotation = arg.annotation.analyse_types(env)
if not arg.annotation.type.is_pyobject:
arg.annotation = arg.annotation.coerce_to_pyobject(env)
annotations.append((arg.pos, arg.name, arg.annotation))
if self.def_node.return_type_annotation:
annotations.append((self.def_node.return_type_annotation.pos,
StringEncoding.EncodedString("return"),
self.def_node.return_type_annotation))
if nonliteral_objects or nonliteral_other:
module_scope = env.global_scope()
cname = module_scope.next_id(Naming.defaults_struct_prefix)
scope = Symtab.StructOrUnionScope(cname)
self.defaults = []
for arg in nonliteral_objects:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
allow_pyobject=True)
self.defaults.append((arg, entry))
for arg in nonliteral_other:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
allow_pyobject=False)
self.defaults.append((arg, entry))
entry = module_scope.declare_struct_or_union(
None, 'struct', scope, 1, None, cname=cname)
self.defaults_struct = scope
self.defaults_pyobjects = len(nonliteral_objects)
for arg, entry in self.defaults:
arg.default_value = '%s->%s' % (
Naming.dynamic_args_cname, entry.cname)
self.def_node.defaults_struct = self.defaults_struct.name
if default_args or default_kwargs:
if self.defaults_struct is None:
if default_args:
defaults_tuple = TupleNode(self.pos, args=[
arg.default for arg in default_args])
self.defaults_tuple = defaults_tuple.analyse_types(env)
if default_kwargs:
defaults_kwdict = DictNode(self.pos, key_value_pairs=[
DictItemNode(
arg.pos,
key=IdentifierStringNode(arg.pos, value=arg.name),
value=arg.default)
for arg in default_kwargs])
self.defaults_kwdict = defaults_kwdict.analyse_types(env)
else:
if default_args:
defaults_tuple = DefaultsTupleNode(
self.pos, default_args, self.defaults_struct)
else:
defaults_tuple = NoneNode(self.pos)
if default_kwargs:
defaults_kwdict = DefaultsKwDictNode(
self.pos, default_kwargs, self.defaults_struct)
else:
defaults_kwdict = NoneNode(self.pos)
defaults_getter = Nodes.DefNode(
self.pos, args=[], star_arg=None, starstar_arg=None,
body=Nodes.ReturnStatNode(
self.pos, return_type=py_object_type,
value=TupleNode(
self.pos, args=[defaults_tuple, defaults_kwdict])),
decorators=None,
name=StringEncoding.EncodedString("__defaults__"))
defaults_getter.analyse_declarations(env)
defaults_getter = defaults_getter.analyse_expressions(env)
defaults_getter.body = defaults_getter.body.analyse_expressions(
defaults_getter.local_scope)
defaults_getter.py_wrapper_required = False
defaults_getter.pymethdef_required = False
self.def_node.defaults_getter = defaults_getter
if annotations:
annotations_dict = DictNode(self.pos, key_value_pairs=[
DictItemNode(
pos, key=IdentifierStringNode(pos, value=name),
value=value)
for pos, name, value in annotations])
self.annotations_dict = annotations_dict.analyse_types(env)
def may_be_none(self):
return False
gil_message = "Constructing Python function"
def self_result_code(self):
if self.self_object is None:
self_result = "NULL"
else:
self_result = self.self_object.py_result()
return self_result
def generate_result_code(self, code):
if self.binding:
self.generate_cyfunction_code(code)
else:
self.generate_pycfunction_code(code)
def generate_pycfunction_code(self, code):
py_mod_name = self.get_py_mod_name(code)
code.putln(
'%s = PyCFunction_NewEx(&%s, %s, %s); %s' % (
self.result(),
self.pymethdef_cname,
self.self_result_code(),
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def generate_cyfunction_code(self, code):
if self.specialized_cpdefs:
def_node = self.specialized_cpdefs[0]
else:
def_node = self.def_node
if self.specialized_cpdefs or self.is_specialization:
code.globalstate.use_utility_code(
UtilityCode.load_cached("FusedFunction", "CythonFunction.c"))
constructor = "__pyx_FusedFunction_NewEx"
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("CythonFunction", "CythonFunction.c"))
constructor = "__Pyx_CyFunction_NewEx"
if self.code_object:
code_object_result = self.code_object.py_result()
else:
code_object_result = 'NULL'
flags = []
if def_node.is_staticmethod:
flags.append('__Pyx_CYFUNCTION_STATICMETHOD')
elif def_node.is_classmethod:
flags.append('__Pyx_CYFUNCTION_CLASSMETHOD')
if def_node.local_scope.parent_scope.is_c_class_scope:
flags.append('__Pyx_CYFUNCTION_CCLASS')
if flags:
flags = ' | '.join(flags)
else:
flags = '0'
code.putln(
'%s = %s(&%s, %s, %s, %s, %s, %s, %s); %s' % (
self.result(),
constructor,
self.pymethdef_cname,
flags,
self.get_py_qualified_name(code),
self.self_result_code(),
self.get_py_mod_name(code),
"PyModule_GetDict(%s)" % Naming.module_cname,
code_object_result,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
if def_node.requires_classobj:
assert code.pyclass_stack, "pyclass_stack is empty"
class_node = code.pyclass_stack[-1]
code.put_incref(self.py_result(), py_object_type)
code.putln(
'PyList_Append(%s, %s);' % (
class_node.class_cell.result(),
self.result()))
code.put_giveref(self.py_result())
if self.defaults:
code.putln(
'if (!__Pyx_CyFunction_InitDefaults(%s, sizeof(%s), %d)) %s' % (
self.result(), self.defaults_struct.name,
self.defaults_pyobjects, code.error_goto(self.pos)))
defaults = '__Pyx_CyFunction_Defaults(%s, %s)' % (
self.defaults_struct.name, self.result())
for arg, entry in self.defaults:
arg.generate_assignment_code(code, target='%s->%s' % (
defaults, entry.cname))
if self.defaults_tuple:
code.putln('__Pyx_CyFunction_SetDefaultsTuple(%s, %s);' % (
self.result(), self.defaults_tuple.py_result()))
if self.defaults_kwdict:
code.putln('__Pyx_CyFunction_SetDefaultsKwDict(%s, %s);' % (
self.result(), self.defaults_kwdict.py_result()))
if def_node.defaults_getter:
code.putln('__Pyx_CyFunction_SetDefaultsGetter(%s, %s);' % (
self.result(), def_node.defaults_getter.entry.pyfunc_cname))
if self.annotations_dict:
code.putln('__Pyx_CyFunction_SetAnnotationsDict(%s, %s);' % (
self.result(), self.annotations_dict.py_result()))
class InnerFunctionNode(PyCFunctionNode):
# Special PyCFunctionNode that depends on a closure class
#
binding = True
needs_self_code = True
def self_result_code(self):
if self.needs_self_code:
return "((PyObject*)%s)" % Naming.cur_scope_cname
return "NULL"
class CodeObjectNode(ExprNode):
# Create a PyCodeObject for a CyFunction instance.
#
# def_node DefNode the Python function node
# varnames TupleNode a tuple with all local variable names
subexprs = ['varnames']
is_temp = False
def __init__(self, def_node):
ExprNode.__init__(self, def_node.pos, def_node=def_node)
args = list(def_node.args)
# if we have args/kwargs, then the first two in var_entries are those
local_vars = [arg for arg in def_node.local_scope.var_entries if arg.name]
self.varnames = TupleNode(
def_node.pos,
args=[IdentifierStringNode(arg.pos, value=arg.name)
for arg in args + local_vars],
is_temp=0,
is_literal=1)
def may_be_none(self):
return False
def calculate_result_code(self):
return self.result_code
def generate_result_code(self, code):
self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
func = self.def_node
func_name = code.get_py_string_const(
func.name, identifier=True, is_str=False, unicode_value=func.name)
# FIXME: better way to get the module file path at module init time? Encoding to use?
file_path = StringEncoding.BytesLiteral(func.pos[0].get_filenametable_entry().encode('utf8'))
file_path_const = code.get_py_string_const(file_path, identifier=False, is_str=True)
flags = []
if self.def_node.star_arg:
flags.append('CO_VARARGS')
if self.def_node.starstar_arg:
flags.append('CO_VARKEYWORDS')
code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, 0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % (
self.result_code,
len(func.args) - func.num_kwonly_args, # argcount
func.num_kwonly_args, # kwonlyargcount (Py3 only)
len(self.varnames.args), # nlocals
'|'.join(flags) or '0', # flags
Naming.empty_bytes, # code
Naming.empty_tuple, # consts
Naming.empty_tuple, # names (FIXME)
self.varnames.result(), # varnames
Naming.empty_tuple, # freevars (FIXME)
Naming.empty_tuple, # cellvars (FIXME)
file_path_const, # filename
func_name, # name
self.pos[1], # firstlineno
Naming.empty_bytes, # lnotab
code.error_goto_if_null(self.result_code, self.pos),
))
class DefaultLiteralArgNode(ExprNode):
# CyFunction's literal argument default value
#
# Evaluate literal only once.
subexprs = []
is_literal = True
is_temp = False
def __init__(self, pos, arg):
super(DefaultLiteralArgNode, self).__init__(pos)
self.arg = arg
self.type = self.arg.type
self.evaluated = False
def analyse_types(self, env):
return self
def generate_result_code(self, code):
pass
def generate_evaluation_code(self, code):
if not self.evaluated:
self.arg.generate_evaluation_code(code)
self.evaluated = True
def result(self):
return self.type.cast_code(self.arg.result())
class DefaultNonLiteralArgNode(ExprNode):
# CyFunction's non-literal argument default value
subexprs = []
def __init__(self, pos, arg, defaults_struct):
super(DefaultNonLiteralArgNode, self).__init__(pos)
self.arg = arg
self.defaults_struct = defaults_struct
def analyse_types(self, env):
self.type = self.arg.type
self.is_temp = False
return self
def generate_result_code(self, code):
pass
def result(self):
return '__Pyx_CyFunction_Defaults(%s, %s)->%s' % (
self.defaults_struct.name, Naming.self_cname,
self.defaults_struct.lookup(self.arg.name).cname)
class DefaultsTupleNode(TupleNode):
# CyFunction's __defaults__ tuple
def __init__(self, pos, defaults, defaults_struct):
args = []
for arg in defaults:
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
args.append(arg)
super(DefaultsTupleNode, self).__init__(pos, args=args)
class DefaultsKwDictNode(DictNode):
# CyFunction's __kwdefaults__ dict
def __init__(self, pos, defaults, defaults_struct):
items = []
for arg in defaults:
name = IdentifierStringNode(arg.pos, value=arg.name)
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
items.append(DictItemNode(arg.pos, key=name, value=arg))
super(DefaultsKwDictNode, self).__init__(pos, key_value_pairs=items)
class LambdaNode(InnerFunctionNode):
# Lambda expression node (only used as a function reference)
#
# args [CArgDeclNode] formal arguments
# star_arg PyArgDeclNode or None * argument
# starstar_arg PyArgDeclNode or None ** argument
# lambda_name string a module-globally unique lambda name
# result_expr ExprNode
# def_node DefNode the underlying function 'def' node
child_attrs = ['def_node']
name = StringEncoding.EncodedString('<lambda>')
def analyse_declarations(self, env):
self.def_node.no_assignment_synthesis = True
self.def_node.pymethdef_required = True
self.def_node.analyse_declarations(env)
self.def_node.is_cyfunction = True
self.pymethdef_cname = self.def_node.entry.pymethdef_cname
env.add_lambda_def(self.def_node)
def analyse_types(self, env):
self.def_node = self.def_node.analyse_expressions(env)
return super(LambdaNode, self).analyse_types(env)
def generate_result_code(self, code):
self.def_node.generate_execution_code(code)
super(LambdaNode, self).generate_result_code(code)
class GeneratorExpressionNode(LambdaNode):
# A generator expression, e.g. (i for i in range(10))
#
# Result is a generator.
#
# loop ForStatNode the for-loop, containing a YieldExprNode
# def_node DefNode the underlying generator 'def' node
name = StringEncoding.EncodedString('genexpr')
binding = False
def analyse_declarations(self, env):
super(GeneratorExpressionNode, self).analyse_declarations(env)
# No pymethdef required
self.def_node.pymethdef_required = False
self.def_node.py_wrapper_required = False
self.def_node.is_cyfunction = False
# Force genexpr signature
self.def_node.entry.signature = TypeSlots.pyfunction_noargs
def generate_result_code(self, code):
code.putln(
'%s = %s(%s); %s' % (
self.result(),
self.def_node.entry.pyfunc_cname,
self.self_result_code(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class YieldExprNode(ExprNode):
# Yield expression node
#
# arg ExprNode the value to return from the generator
# label_num integer yield label number
# is_yield_from boolean is a YieldFromExprNode to delegate to another generator
subexprs = ['arg']
type = py_object_type
label_num = 0
is_yield_from = False
def analyse_types(self, env):
if not self.label_num:
error(self.pos, "'yield' not supported here")
self.is_temp = 1
if self.arg is not None:
self.arg = self.arg.analyse_types(env)
if not self.arg.type.is_pyobject:
self.coerce_yield_argument(env)
return self
def coerce_yield_argument(self, env):
self.arg = self.arg.coerce_to_pyobject(env)
def generate_evaluation_code(self, code):
if self.arg:
self.arg.generate_evaluation_code(code)
self.arg.make_owned_reference(code)
code.putln(
"%s = %s;" % (
Naming.retval_cname,
self.arg.result_as(py_object_type)))
self.arg.generate_post_assignment_code(code)
self.arg.free_temps(code)
else:
code.put_init_to_py_none(Naming.retval_cname, py_object_type)
self.generate_yield_code(code)
def generate_yield_code(self, code):
"""
Generate the code to return the argument in 'Naming.retval_cname'
and to continue at the yield label.
"""
label_num, label_name = code.new_yield_label()
code.use_label(label_name)
saved = []
code.funcstate.closure_temps.reset()
for cname, type, manage_ref in code.funcstate.temps_in_use():
save_cname = code.funcstate.closure_temps.allocate_temp(type)
saved.append((cname, save_cname, type))
if type.is_pyobject:
code.put_xgiveref(cname)
code.putln('%s->%s = %s;' % (Naming.cur_scope_cname, save_cname, cname))
code.put_xgiveref(Naming.retval_cname)
code.put_finish_refcount_context()
code.putln("/* return from generator, yielding value */")
code.putln("%s->resume_label = %d;" % (
Naming.generator_cname, label_num))
code.putln("return %s;" % Naming.retval_cname)
code.put_label(label_name)
for cname, save_cname, type in saved:
code.putln('%s = %s->%s;' % (cname, Naming.cur_scope_cname, save_cname))
if type.is_pyobject:
code.putln('%s->%s = 0;' % (Naming.cur_scope_cname, save_cname))
code.put_xgotref(cname)
code.putln(code.error_goto_if_null(Naming.sent_value_cname, self.pos))
if self.result_is_used:
self.allocate_temp_result(code)
code.put('%s = %s; ' % (self.result(), Naming.sent_value_cname))
code.put_incref(self.result(), py_object_type)
class YieldFromExprNode(YieldExprNode):
# "yield from GEN" expression
is_yield_from = True
def coerce_yield_argument(self, env):
if not self.arg.type.is_string:
# FIXME: support C arrays and C++ iterators?
error(self.pos, "yielding from non-Python object not supported")
self.arg = self.arg.coerce_to_pyobject(env)
def generate_evaluation_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("YieldFrom", "Generator.c"))
self.arg.generate_evaluation_code(code)
code.putln("%s = __Pyx_Generator_Yield_From(%s, %s);" % (
Naming.retval_cname,
Naming.generator_cname,
self.arg.result_as(py_object_type)))
self.arg.generate_disposal_code(code)
self.arg.free_temps(code)
code.put_xgotref(Naming.retval_cname)
code.putln("if (likely(%s)) {" % Naming.retval_cname)
self.generate_yield_code(code)
code.putln("} else {")
# either error or sub-generator has normally terminated: return value => node result
if self.result_is_used:
# YieldExprNode has allocated the result temp for us
code.putln("%s = NULL;" % self.result())
code.putln("if (unlikely(__Pyx_PyGen_FetchStopIterationValue(&%s) < 0)) %s" % (
self.result(),
code.error_goto(self.pos)))
code.put_gotref(self.result())
else:
code.putln("PyObject* exc_type = PyErr_Occurred();")
code.putln("if (exc_type) {")
code.putln("if (likely(exc_type == PyExc_StopIteration ||"
" PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
code.putln("}")
class GlobalsExprNode(AtomicExprNode):
type = dict_type
is_temp = 1
def analyse_types(self, env):
env.use_utility_code(Builtin.globals_utility_code)
return self
gil_message = "Constructing globals dict"
def may_be_none(self):
return False
def generate_result_code(self, code):
code.putln('%s = __Pyx_Globals(); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class LocalsDictItemNode(DictItemNode):
def analyse_types(self, env):
self.key = self.key.analyse_types(env)
self.value = self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
if self.value.type.can_coerce_to_pyobject(env):
self.value = self.value.coerce_to_pyobject(env)
else:
self.value = None
return self
class FuncLocalsExprNode(DictNode):
def __init__(self, pos, env):
local_vars = sorted([
entry.name for entry in env.entries.values() if entry.name])
items = [LocalsDictItemNode(
pos, key=IdentifierStringNode(pos, value=var),
value=NameNode(pos, name=var, allow_null=True))
for var in local_vars]
DictNode.__init__(self, pos, key_value_pairs=items,
exclude_null_values=True)
def analyse_types(self, env):
node = super(FuncLocalsExprNode, self).analyse_types(env)
node.key_value_pairs = [ i for i in node.key_value_pairs
if i.value is not None ]
return node
class PyClassLocalsExprNode(AtomicExprNode):
def __init__(self, pos, pyclass_dict):
AtomicExprNode.__init__(self, pos)
self.pyclass_dict = pyclass_dict
def analyse_types(self, env):
self.type = self.pyclass_dict.type
self.is_temp = False
return self
def may_be_none(self):
return False
def result(self):
return self.pyclass_dict.result()
def generate_result_code(self, code):
pass
def LocalsExprNode(pos, scope_node, env):
if env.is_module_scope:
return GlobalsExprNode(pos)
if env.is_py_class_scope:
return PyClassLocalsExprNode(pos, scope_node.dict)
return FuncLocalsExprNode(pos, env)
#-------------------------------------------------------------------
#
# Unary operator nodes
#
#-------------------------------------------------------------------
compile_time_unary_operators = {
'not': operator.not_,
'~': operator.inv,
'-': operator.neg,
'+': operator.pos,
}
class UnopNode(ExprNode):
# operator string
# operand ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when the operand is not a pyobject.
# - Check operand type and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand']
infix = True
def calculate_constant_result(self):
func = compile_time_unary_operators[self.operator]
self.constant_result = func(self.operand.constant_result)
def compile_time_value(self, denv):
func = compile_time_unary_operators.get(self.operator)
if not func:
error(self.pos,
"Unary '%s' not supported in compile-time expression"
% self.operator)
operand = self.operand.compile_time_value(denv)
try:
return func(operand)
except Exception, e:
self.compile_time_value_error(e)
def infer_type(self, env):
operand_type = self.operand.infer_type(env)
if operand_type.is_cpp_class or operand_type.is_ptr:
cpp_type = operand_type.find_cpp_operation_type(self.operator)
if cpp_type is not None:
return cpp_type
return self.infer_unop_type(env, operand_type)
def infer_unop_type(self, env, operand_type):
if operand_type.is_pyobject:
return py_object_type
else:
return operand_type
def may_be_none(self):
if self.operand.type and self.operand.type.is_builtin_type:
if self.operand.type is not type_type:
return False
return ExprNode.may_be_none(self)
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
if self.is_py_operation():
self.coerce_operand_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
return self
def check_const(self):
return self.operand.check_const()
def is_py_operation(self):
return self.operand.type.is_pyobject
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def is_cpp_operation(self):
type = self.operand.type
return type.is_cpp_class
def coerce_operand_to_pyobject(self, env):
self.operand = self.operand.coerce_to_pyobject(env)
def generate_result_code(self, code):
if self.operand.type.is_pyobject:
self.generate_py_operation_code(code)
def generate_py_operation_code(self, code):
function = self.py_operation_function(code)
code.putln(
"%s = %s(%s); %s" % (
self.result(),
function,
self.operand.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def type_error(self):
if not self.operand.type.is_error:
error(self.pos, "Invalid operand type for '%s' (%s)" %
(self.operator, self.operand.type))
self.type = PyrexTypes.error_type
def analyse_cpp_operation(self, env):
cpp_type = self.operand.type.find_cpp_operation_type(self.operator)
if cpp_type is None:
error(self.pos, "'%s' operator not defined for %s" % (
self.operator, type))
self.type_error()
return
self.type = cpp_type
class NotNode(UnopNode):
# 'not' operator
#
# operand ExprNode
operator = '!'
type = PyrexTypes.c_bint_type
def calculate_constant_result(self):
self.constant_result = not self.operand.constant_result
def compile_time_value(self, denv):
operand = self.operand.compile_time_value(denv)
try:
return not operand
except Exception, e:
self.compile_time_value_error(e)
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_bint_type
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
operand_type = self.operand.type
if operand_type.is_cpp_class:
cpp_type = operand_type.find_cpp_operation_type(self.operator)
if not cpp_type:
error(self.pos, "'!' operator not defined for %s" % operand_type)
self.type = PyrexTypes.error_type
return
self.type = cpp_type
else:
self.operand = self.operand.coerce_to_boolean(env)
return self
def calculate_result_code(self):
return "(!%s)" % self.operand.result()
def generate_result_code(self, code):
pass
class UnaryPlusNode(UnopNode):
# unary '+' operator
operator = '+'
def analyse_c_operation(self, env):
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
def py_operation_function(self, code):
return "PyNumber_Positive"
def calculate_result_code(self):
if self.is_cpp_operation():
return "(+%s)" % self.operand.result()
else:
return self.operand.result()
class UnaryMinusNode(UnopNode):
# unary '-' operator
operator = '-'
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
if self.type.is_complex:
self.infix = False
def py_operation_function(self, code):
return "PyNumber_Negative"
def calculate_result_code(self):
if self.infix:
return "(-%s)" % self.operand.result()
else:
return "%s(%s)" % (self.operand.type.unary_op('-'), self.operand.result())
def get_constant_c_result_code(self):
value = self.operand.get_constant_c_result_code()
if value:
return "(-%s)" % value
class TildeNode(UnopNode):
# unary '~' operator
def analyse_c_operation(self, env):
if self.operand.type.is_int:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
def py_operation_function(self, code):
return "PyNumber_Invert"
def calculate_result_code(self):
return "(~%s)" % self.operand.result()
class CUnopNode(UnopNode):
def is_py_operation(self):
return False
class DereferenceNode(CUnopNode):
# unary * operator
operator = '*'
def infer_unop_type(self, env, operand_type):
if operand_type.is_ptr:
return operand_type.base_type
else:
return PyrexTypes.error_type
def analyse_c_operation(self, env):
if self.operand.type.is_ptr:
self.type = self.operand.type.base_type
else:
self.type_error()
def calculate_result_code(self):
return "(*%s)" % self.operand.result()
class DecrementIncrementNode(CUnopNode):
# unary ++/-- operator
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_ptr:
self.type = self.operand.type
else:
self.type_error()
def calculate_result_code(self):
if self.is_prefix:
return "(%s%s)" % (self.operator, self.operand.result())
else:
return "(%s%s)" % (self.operand.result(), self.operator)
def inc_dec_constructor(is_prefix, operator):
return lambda pos, **kwds: DecrementIncrementNode(pos, is_prefix=is_prefix, operator=operator, **kwds)
class AmpersandNode(CUnopNode):
# The C address-of operator.
#
# operand ExprNode
operator = '&'
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_ptr_type(operand_type)
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
argtype = self.operand.type
if argtype.is_cpp_class:
cpp_type = argtype.find_cpp_operation_type(self.operator)
if cpp_type is not None:
self.type = cpp_type
return self
if not (argtype.is_cfunction or argtype.is_reference or self.operand.is_addressable()):
if argtype.is_memoryviewslice:
self.error("Cannot take address of memoryview slice")
else:
self.error("Taking address of non-lvalue")
return self
if argtype.is_pyobject:
self.error("Cannot take address of Python variable")
return self
self.type = PyrexTypes.c_ptr_type(argtype)
return self
def check_const(self):
return self.operand.check_const_addr()
def error(self, mess):
error(self.pos, mess)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
def calculate_result_code(self):
return "(&%s)" % self.operand.result()
def generate_result_code(self, code):
pass
unop_node_classes = {
"+": UnaryPlusNode,
"-": UnaryMinusNode,
"~": TildeNode,
}
def unop_node(pos, operator, operand):
# Construct unnop node of appropriate class for
# given operator.
if isinstance(operand, IntNode) and operator == '-':
return IntNode(pos = operand.pos, value = str(-Utils.str_to_number(operand.value)),
longness=operand.longness, unsigned=operand.unsigned)
elif isinstance(operand, UnopNode) and operand.operator == operator in '+-':
warning(pos, "Python has no increment/decrement operator: %s%sx == %s(%sx) == x" % ((operator,)*4), 5)
return unop_node_classes[operator](pos,
operator = operator,
operand = operand)
class TypecastNode(ExprNode):
# C type cast
#
# operand ExprNode
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# typecheck boolean
#
# If used from a transform, one can if wanted specify the attribute
# "type" directly and leave base_type and declarator to None
subexprs = ['operand']
base_type = declarator = type = None
def type_dependencies(self, env):
return ()
def infer_type(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
return self.type
def analyse_types(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
if self.operand.has_constant_result():
# Must be done after self.type is resolved.
self.calculate_constant_result()
if self.type.is_cfunction:
error(self.pos,
"Cannot cast to a function type")
self.type = PyrexTypes.error_type
self.operand = self.operand.analyse_types(env)
if self.type is PyrexTypes.c_bint_type:
# short circuit this to a coercion
return self.operand.coerce_to_boolean(env)
to_py = self.type.is_pyobject
from_py = self.operand.type.is_pyobject
if from_py and not to_py and self.operand.is_ephemeral():
if not self.type.is_numeric and not self.type.is_cpp_class:
error(self.pos, "Casting temporary Python object to non-numeric non-Python type")
if to_py and not from_py:
if self.type is bytes_type and self.operand.type.is_int:
return CoerceIntToBytesNode(self.operand, env)
elif self.operand.type.can_coerce_to_pyobject(env):
self.result_ctype = py_object_type
base_type = self.base_type.analyse(env)
self.operand = self.operand.coerce_to(base_type, env)
else:
if self.operand.type.is_ptr:
if not (self.operand.type.base_type.is_void or self.operand.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast from pointers of primitive types")
else:
# Should this be an error?
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.operand.type, self.type))
self.operand = self.operand.coerce_to_simple(env)
elif from_py and not to_py:
if self.type.create_from_py_utility_code(env):
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_ptr:
if not (self.type.base_type.is_void or self.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast to pointers of primitive types")
else:
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.type, self.operand.type))
elif from_py and to_py:
if self.typecheck:
self.operand = PyTypeTestNode(self.operand, self.type, env, notnone=True)
elif isinstance(self.operand, SliceIndexNode):
# This cast can influence the created type of string slices.
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_complex and self.operand.type.is_complex:
self.operand = self.operand.coerce_to_simple(env)
elif self.operand.type.is_fused:
self.operand = self.operand.coerce_to(self.type, env)
#self.type = self.operand.type
return self
def is_simple(self):
# either temp or a C cast => no side effects other than the operand's
return self.operand.is_simple()
def is_ephemeral(self):
# either temp or a C cast => no side effects other than the operand's
return self.operand.is_ephemeral()
def nonlocally_immutable(self):
return self.is_temp or self.operand.nonlocally_immutable()
def nogil_check(self, env):
if self.type and self.type.is_pyobject and self.is_temp:
self.gil_error()
def check_const(self):
return self.operand.check_const()
def calculate_constant_result(self):
self.constant_result = self.calculate_result_code(self.operand.constant_result)
def calculate_result_code(self, operand_result = None):
if operand_result is None:
operand_result = self.operand.result()
if self.type.is_complex:
operand_result = self.operand.result()
if self.operand.type.is_complex:
real_part = self.type.real_type.cast_code("__Pyx_CREAL(%s)" % operand_result)
imag_part = self.type.real_type.cast_code("__Pyx_CIMAG(%s)" % operand_result)
else:
real_part = self.type.real_type.cast_code(operand_result)
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
else:
return self.type.cast_code(operand_result)
def get_constant_c_result_code(self):
operand_result = self.operand.get_constant_c_result_code()
if operand_result:
return self.type.cast_code(operand_result)
def result_as(self, type):
if self.type.is_pyobject and not self.is_temp:
# Optimise away some unnecessary casting
return self.operand.result_as(type)
else:
return ExprNode.result_as(self, type)
def generate_result_code(self, code):
if self.is_temp:
code.putln(
"%s = (PyObject *)%s;" % (
self.result(),
self.operand.result()))
code.put_incref(self.result(), self.ctype())
ERR_START = "Start may not be given"
ERR_NOT_STOP = "Stop must be provided to indicate shape"
ERR_STEPS = ("Strides may only be given to indicate contiguity. "
"Consider slicing it after conversion")
ERR_NOT_POINTER = "Can only create cython.array from pointer or array"
ERR_BASE_TYPE = "Pointer base type does not match cython.array base type"
class CythonArrayNode(ExprNode):
"""
Used when a pointer of base_type is cast to a memoryviewslice with that
base type. i.e.
<int[:M:1, :N]> p
creates a fortran-contiguous cython.array.
We leave the type set to object so coercions to object are more efficient
and less work. Acquiring a memoryviewslice from this will be just as
efficient. ExprNode.coerce_to() will do the additional typecheck on
self.compile_time_type
This also handles <int[:, :]> my_c_array
operand ExprNode the thing we're casting
base_type_node MemoryViewSliceTypeNode the cast expression node
"""
subexprs = ['operand', 'shapes']
shapes = None
is_temp = True
mode = "c"
array_dtype = None
shape_type = PyrexTypes.c_py_ssize_t_type
def analyse_types(self, env):
from . import MemoryView
self.operand = self.operand.analyse_types(env)
if self.array_dtype:
array_dtype = self.array_dtype
else:
array_dtype = self.base_type_node.base_type_node.analyse(env)
axes = self.base_type_node.axes
MemoryView.validate_memslice_dtype(self.pos, array_dtype)
self.type = error_type
self.shapes = []
ndim = len(axes)
# Base type of the pointer or C array we are converting
base_type = self.operand.type
if not self.operand.type.is_ptr and not self.operand.type.is_array:
error(self.operand.pos, ERR_NOT_POINTER)
return self
# Dimension sizes of C array
array_dimension_sizes = []
if base_type.is_array:
while base_type.is_array:
array_dimension_sizes.append(base_type.size)
base_type = base_type.base_type
elif base_type.is_ptr:
base_type = base_type.base_type
else:
error(self.pos, "unexpected base type %s found" % base_type)
return self
if not (base_type.same_as(array_dtype) or base_type.is_void):
error(self.operand.pos, ERR_BASE_TYPE)
return self
elif self.operand.type.is_array and len(array_dimension_sizes) != ndim:
error(self.operand.pos,
"Expected %d dimensions, array has %d dimensions" %
(ndim, len(array_dimension_sizes)))
return self
# Verify the start, stop and step values
# In case of a C array, use the size of C array in each dimension to
# get an automatic cast
for axis_no, axis in enumerate(axes):
if not axis.start.is_none:
error(axis.start.pos, ERR_START)
return self
if axis.stop.is_none:
if array_dimension_sizes:
dimsize = array_dimension_sizes[axis_no]
axis.stop = IntNode(self.pos, value=str(dimsize),
constant_result=dimsize,
type=PyrexTypes.c_int_type)
else:
error(axis.pos, ERR_NOT_STOP)
return self
axis.stop = axis.stop.analyse_types(env)
shape = axis.stop.coerce_to(self.shape_type, env)
if not shape.is_literal:
shape.coerce_to_temp(env)
self.shapes.append(shape)
first_or_last = axis_no in (0, ndim - 1)
if not axis.step.is_none and first_or_last:
# '1' in the first or last dimension denotes F or C contiguity
axis.step = axis.step.analyse_types(env)
if (not axis.step.type.is_int and axis.step.is_literal and not
axis.step.type.is_error):
error(axis.step.pos, "Expected an integer literal")
return self
if axis.step.compile_time_value(env) != 1:
error(axis.step.pos, ERR_STEPS)
return self
if axis_no == 0:
self.mode = "fortran"
elif not axis.step.is_none and not first_or_last:
# step provided in some other dimension
error(axis.step.pos, ERR_STEPS)
return self
if not self.operand.is_name:
self.operand = self.operand.coerce_to_temp(env)
axes = [('direct', 'follow')] * len(axes)
if self.mode == "fortran":
axes[0] = ('direct', 'contig')
else:
axes[-1] = ('direct', 'contig')
self.coercion_type = PyrexTypes.MemoryViewSliceType(array_dtype, axes)
self.type = self.get_cython_array_type(env)
MemoryView.use_cython_array_utility_code(env)
env.use_utility_code(MemoryView.typeinfo_to_format_code)
return self
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("temp allocated mulitple times")
self.temp_code = code.funcstate.allocate_temp(self.type, True)
def infer_type(self, env):
return self.get_cython_array_type(env)
def get_cython_array_type(self, env):
return env.global_scope().context.cython_scope.viewscope.lookup("array").type
def generate_result_code(self, code):
from . import Buffer
shapes = [self.shape_type.cast_code(shape.result())
for shape in self.shapes]
dtype = self.coercion_type.dtype
shapes_temp = code.funcstate.allocate_temp(py_object_type, True)
format_temp = code.funcstate.allocate_temp(py_object_type, True)
itemsize = "sizeof(%s)" % dtype.declaration_code("")
type_info = Buffer.get_type_information_cname(code, dtype)
if self.operand.type.is_ptr:
code.putln("if (!%s) {" % self.operand.result())
code.putln( 'PyErr_SetString(PyExc_ValueError,'
'"Cannot create cython.array from NULL pointer");')
code.putln(code.error_goto(self.operand.pos))
code.putln("}")
code.putln("%s = __pyx_format_from_typeinfo(&%s);" %
(format_temp, type_info))
buildvalue_fmt = " __PYX_BUILD_PY_SSIZE_T " * len(shapes)
code.putln('%s = Py_BuildValue((char*) "(" %s ")", %s);' % (
shapes_temp, buildvalue_fmt, ", ".join(shapes)))
err = "!%s || !%s || !PyBytes_AsString(%s)" % (format_temp,
shapes_temp,
format_temp)
code.putln(code.error_goto_if(err, self.pos))
code.put_gotref(format_temp)
code.put_gotref(shapes_temp)
tup = (self.result(), shapes_temp, itemsize, format_temp,
self.mode, self.operand.result())
code.putln('%s = __pyx_array_new('
'%s, %s, PyBytes_AS_STRING(%s), '
'(char *) "%s", (char *) %s);' % tup)
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.result())
def dispose(temp):
code.put_decref_clear(temp, py_object_type)
code.funcstate.release_temp(temp)
dispose(shapes_temp)
dispose(format_temp)
@classmethod
def from_carray(cls, src_node, env):
"""
Given a C array type, return a CythonArrayNode
"""
pos = src_node.pos
base_type = src_node.type
none_node = NoneNode(pos)
axes = []
while base_type.is_array:
axes.append(SliceNode(pos, start=none_node, stop=none_node,
step=none_node))
base_type = base_type.base_type
axes[-1].step = IntNode(pos, value="1", is_c_literal=True)
memslicenode = Nodes.MemoryViewSliceTypeNode(pos, axes=axes,
base_type_node=base_type)
result = CythonArrayNode(pos, base_type_node=memslicenode,
operand=src_node, array_dtype=base_type)
result = result.analyse_types(env)
return result
class SizeofNode(ExprNode):
# Abstract base class for sizeof(x) expression nodes.
type = PyrexTypes.c_size_t_type
def check_const(self):
return True
def generate_result_code(self, code):
pass
class SizeofTypeNode(SizeofNode):
# C sizeof function applied to a type
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
subexprs = []
arg_type = None
def analyse_types(self, env):
# we may have incorrectly interpreted a dotted name as a type rather than an attribute
# this could be better handled by more uniformly treating types as runtime-available objects
if 0 and self.base_type.module_path:
path = self.base_type.module_path
obj = env.lookup(path[0])
if obj.as_module is None:
operand = NameNode(pos=self.pos, name=path[0])
for attr in path[1:]:
operand = AttributeNode(pos=self.pos, obj=operand, attribute=attr)
operand = AttributeNode(pos=self.pos, obj=operand, attribute=self.base_type.name)
self.operand = operand
self.__class__ = SizeofVarNode
node = self.analyse_types(env)
return node
if self.arg_type is None:
base_type = self.base_type.analyse(env)
_, arg_type = self.declarator.analyse(base_type, env)
self.arg_type = arg_type
self.check_type()
return self
def check_type(self):
arg_type = self.arg_type
if arg_type.is_pyobject and not arg_type.is_extension_type:
error(self.pos, "Cannot take sizeof Python object")
elif arg_type.is_void:
error(self.pos, "Cannot take sizeof void")
elif not arg_type.is_complete():
error(self.pos, "Cannot take sizeof incomplete type '%s'" % arg_type)
def calculate_result_code(self):
if self.arg_type.is_extension_type:
# the size of the pointer is boring
# we want the size of the actual struct
arg_code = self.arg_type.declaration_code("", deref=1)
else:
arg_code = self.arg_type.declaration_code("")
return "(sizeof(%s))" % arg_code
class SizeofVarNode(SizeofNode):
# C sizeof function applied to a variable
#
# operand ExprNode
subexprs = ['operand']
def analyse_types(self, env):
# We may actually be looking at a type rather than a variable...
# If we are, traditional analysis would fail...
operand_as_type = self.operand.analyse_as_type(env)
if operand_as_type:
self.arg_type = operand_as_type
if self.arg_type.is_fused:
self.arg_type = self.arg_type.specialize(env.fused_to_specific)
self.__class__ = SizeofTypeNode
self.check_type()
else:
self.operand = self.operand.analyse_types(env)
return self
def calculate_result_code(self):
return "(sizeof(%s))" % self.operand.result()
def generate_result_code(self, code):
pass
class TypeofNode(ExprNode):
# Compile-time type of an expression, as a string.
#
# operand ExprNode
# literal StringNode # internal
literal = None
type = py_object_type
subexprs = ['literal'] # 'operand' will be ignored after type analysis!
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
value = StringEncoding.EncodedString(str(self.operand.type)) #self.operand.type.typeof_name())
literal = StringNode(self.pos, value=value)
literal = literal.analyse_types(env)
self.literal = literal.coerce_to_pyobject(env)
return self
def may_be_none(self):
return False
def generate_evaluation_code(self, code):
self.literal.generate_evaluation_code(code)
def calculate_result_code(self):
return self.literal.calculate_result_code()
#-------------------------------------------------------------------
#
# Binary operator nodes
#
#-------------------------------------------------------------------
try:
matmul_operator = operator.matmul
except AttributeError:
def matmul_operator(a, b):
try:
func = a.__matmul__
except AttributeError:
func = b.__rmatmul__
return func(a, b)
compile_time_binary_operators = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
'is': operator.is_,
'is_not': operator.is_not,
'+': operator.add,
'&': operator.and_,
'/': operator.truediv,
'//': operator.floordiv,
'<<': operator.lshift,
'%': operator.mod,
'*': operator.mul,
'|': operator.or_,
'**': operator.pow,
'>>': operator.rshift,
'-': operator.sub,
'^': operator.xor,
'@': matmul_operator,
'in': lambda x, seq: x in seq,
'not_in': lambda x, seq: x not in seq,
}
def get_compile_time_binop(node):
func = compile_time_binary_operators.get(node.operator)
if not func:
error(node.pos,
"Binary '%s' not supported in compile-time expression"
% node.operator)
return func
class BinopNode(ExprNode):
# operator string
# operand1 ExprNode
# operand2 ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when neither operand is a pyobject.
# - Check operand types and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand1', 'operand2']
inplace = False
def calculate_constant_result(self):
func = compile_time_binary_operators[self.operator]
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
func = get_compile_time_binop(self)
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
return func(operand1, operand2)
except Exception, e:
self.compile_time_value_error(e)
def infer_type(self, env):
return self.result_type(self.operand1.infer_type(env),
self.operand2.infer_type(env))
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
self.analyse_operation(env)
return self
def analyse_operation(self, env):
if self.is_py_operation():
self.coerce_operands_to_pyobjects(env)
self.type = self.result_type(self.operand1.type,
self.operand2.type)
assert self.type.is_pyobject
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
def is_py_operation(self):
return self.is_py_operation_types(self.operand1.type, self.operand2.type)
def is_py_operation_types(self, type1, type2):
return type1.is_pyobject or type2.is_pyobject
def is_cpp_operation(self):
return (self.operand1.type.is_cpp_class
or self.operand2.type.is_cpp_class)
def analyse_cpp_operation(self, env):
entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
if not entry:
self.type_error()
return
func_type = entry.type
if func_type.is_ptr:
func_type = func_type.base_type
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type
def result_type(self, type1, type2):
if self.is_py_operation_types(type1, type2):
if type2.is_string:
type2 = Builtin.bytes_type
elif type2.is_pyunicode_ptr:
type2 = Builtin.unicode_type
if type1.is_string:
type1 = Builtin.bytes_type
elif type1.is_pyunicode_ptr:
type1 = Builtin.unicode_type
if type1.is_builtin_type or type2.is_builtin_type:
if type1 is type2 and self.operator in '**%+|&^':
# FIXME: at least these operators should be safe - others?
return type1
result_type = self.infer_builtin_types_operation(type1, type2)
if result_type is not None:
return result_type
return py_object_type
else:
return self.compute_c_result_type(type1, type2)
def infer_builtin_types_operation(self, type1, type2):
return None
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def coerce_operands_to_pyobjects(self, env):
self.operand1 = self.operand1.coerce_to_pyobject(env)
self.operand2 = self.operand2.coerce_to_pyobject(env)
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def is_ephemeral(self):
return (super(BinopNode, self).is_ephemeral() or
self.operand1.is_ephemeral() or self.operand2.is_ephemeral())
def generate_result_code(self, code):
#print "BinopNode.generate_result_code:", self.operand1, self.operand2 ###
if self.operand1.type.is_pyobject:
function = self.py_operation_function(code)
if self.operator == '**':
extra_args = ", Py_None"
else:
extra_args = ""
code.putln(
"%s = %s(%s, %s%s); %s" % (
self.result(),
function,
self.operand1.py_result(),
self.operand2.py_result(),
extra_args,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif self.is_temp:
code.putln("%s = %s;" % (self.result(), self.calculate_result_code()))
def type_error(self):
if not (self.operand1.type.is_error
or self.operand2.type.is_error):
error(self.pos, "Invalid operand types for '%s' (%s; %s)" %
(self.operator, self.operand1.type,
self.operand2.type))
self.type = PyrexTypes.error_type
class CBinopNode(BinopNode):
def analyse_types(self, env):
node = BinopNode.analyse_types(self, env)
if node.is_py_operation():
node.type = PyrexTypes.error_type
return node
def py_operation_function(self, code):
return ""
def calculate_result_code(self):
return "(%s %s %s)" % (
self.operand1.result(),
self.operator,
self.operand2.result())
def compute_c_result_type(self, type1, type2):
cpp_type = None
if type1.is_cpp_class or type1.is_ptr:
cpp_type = type1.find_cpp_operation_type(self.operator, type2)
# FIXME: handle the reversed case?
#if cpp_type is None and (type2.is_cpp_class or type2.is_ptr):
# cpp_type = type2.find_cpp_operation_type(self.operator, type1)
# FIXME: do we need to handle other cases here?
return cpp_type
def c_binop_constructor(operator):
def make_binop_node(pos, **operands):
return CBinopNode(pos, operator=operator, **operands)
return make_binop_node
class NumBinopNode(BinopNode):
# Binary operation taking numeric arguments.
infix = True
overflow_check = False
overflow_bit_node = None
def analyse_c_operation(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
self.type = self.compute_c_result_type(type1, type2)
if not self.type:
self.type_error()
return
if self.type.is_complex:
self.infix = False
if (self.type.is_int
and env.directives['overflowcheck']
and self.operator in self.overflow_op_names):
if (self.operator in ('+', '*')
and self.operand1.has_constant_result()
and not self.operand2.has_constant_result()):
self.operand1, self.operand2 = self.operand2, self.operand1
self.overflow_check = True
self.overflow_fold = env.directives['overflowcheck.fold']
self.func = self.type.overflow_check_binop(
self.overflow_op_names[self.operator],
env,
const_rhs = self.operand2.has_constant_result())
self.is_temp = True
if not self.infix or (type1.is_numeric and type2.is_numeric):
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
def compute_c_result_type(self, type1, type2):
if self.c_types_okay(type1, type2):
widest_type = PyrexTypes.widest_numeric_type(type1, type2)
if widest_type is PyrexTypes.c_bint_type:
if self.operator not in '|^&':
# False + False == 0 # not False!
widest_type = PyrexTypes.c_int_type
else:
widest_type = PyrexTypes.widest_numeric_type(
widest_type, PyrexTypes.c_int_type)
return widest_type
else:
return None
def may_be_none(self):
if self.type and self.type.is_builtin_type:
# if we know the result type, we know the operation, so it can't be None
return False
type1 = self.operand1.type
type2 = self.operand2.type
if type1 and type1.is_builtin_type and type2 and type2.is_builtin_type:
# XXX: I can't think of any case where a binary operation
# on builtin types evaluates to None - add a special case
# here if there is one.
return False
return super(NumBinopNode, self).may_be_none()
def get_constant_c_result_code(self):
value1 = self.operand1.get_constant_c_result_code()
value2 = self.operand2.get_constant_c_result_code()
if value1 and value2:
return "(%s %s %s)" % (value1, self.operator, value2)
else:
return None
def c_types_okay(self, type1, type2):
#print "NumBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_numeric or type1.is_enum) \
and (type2.is_numeric or type2.is_enum)
def generate_evaluation_code(self, code):
if self.overflow_check:
self.overflow_bit_node = self
self.overflow_bit = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = 0;" % self.overflow_bit)
super(NumBinopNode, self).generate_evaluation_code(code)
if self.overflow_check:
code.putln("if (unlikely(%s)) {" % self.overflow_bit)
code.putln('PyErr_SetString(PyExc_OverflowError, "value too large");')
code.putln(code.error_goto(self.pos))
code.putln("}")
code.funcstate.release_temp(self.overflow_bit)
def calculate_result_code(self):
if self.overflow_bit_node is not None:
return "%s(%s, %s, &%s)" % (
self.func,
self.operand1.result(),
self.operand2.result(),
self.overflow_bit_node.overflow_bit)
elif self.infix:
return "(%s %s %s)" % (
self.operand1.result(),
self.operator,
self.operand2.result())
else:
func = self.type.binary_op(self.operator)
if func is None:
error(self.pos, "binary operator %s not supported for %s" % (self.operator, self.type))
return "%s(%s, %s)" % (
func,
self.operand1.result(),
self.operand2.result())
def is_py_operation_types(self, type1, type2):
return (type1.is_unicode_char or
type2.is_unicode_char or
BinopNode.is_py_operation_types(self, type1, type2))
def py_operation_function(self, code):
function_name = self.py_functions[self.operator]
if self.inplace:
function_name = function_name.replace('PyNumber_', 'PyNumber_InPlace')
return function_name
py_functions = {
"|": "PyNumber_Or",
"^": "PyNumber_Xor",
"&": "PyNumber_And",
"<<": "PyNumber_Lshift",
">>": "PyNumber_Rshift",
"+": "PyNumber_Add",
"-": "PyNumber_Subtract",
"*": "PyNumber_Multiply",
"@": "__Pyx_PyNumber_MatrixMultiply",
"/": "__Pyx_PyNumber_Divide",
"//": "PyNumber_FloorDivide",
"%": "PyNumber_Remainder",
"**": "PyNumber_Power",
}
overflow_op_names = {
"+": "add",
"-": "sub",
"*": "mul",
"<<": "lshift",
}
class IntBinopNode(NumBinopNode):
# Binary operation taking integer arguments.
def c_types_okay(self, type1, type2):
#print "IntBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_int or type1.is_enum) \
and (type2.is_int or type2.is_enum)
class AddNode(NumBinopNode):
# '+' operator.
def is_py_operation_types(self, type1, type2):
if type1.is_string and type2.is_string or type1.is_pyunicode_ptr and type2.is_pyunicode_ptr:
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
def infer_builtin_types_operation(self, type1, type2):
# b'abc' + 'abc' raises an exception in Py3,
# so we can safely infer the Py2 type for bytes here
string_types = (bytes_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2 in string_types:
return string_types[max(string_types.index(type1),
string_types.index(type2))]
return None
def compute_c_result_type(self, type1, type2):
#print "AddNode.compute_c_result_type:", type1, self.operator, type2 ###
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type2.is_ptr or type2.is_array) and (type1.is_int or type1.is_enum):
return type2
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
def py_operation_function(self, code):
type1, type2 = self.operand1.type, self.operand2.type
if type1 is unicode_type or type2 is unicode_type:
if type1.is_builtin_type and type2.is_builtin_type:
if self.operand1.may_be_none() or self.operand2.may_be_none():
return '__Pyx_PyUnicode_ConcatSafe'
else:
return '__Pyx_PyUnicode_Concat'
return super(AddNode, self).py_operation_function(code)
class SubNode(NumBinopNode):
# '-' operator.
def compute_c_result_type(self, type1, type2):
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type1.is_ptr or type1.is_array) and (type2.is_ptr or type2.is_array):
return PyrexTypes.c_ptrdiff_t_type
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
class MulNode(NumBinopNode):
# '*' operator.
def is_py_operation_types(self, type1, type2):
if ((type1.is_string and type2.is_int) or
(type2.is_string and type1.is_int)):
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
def infer_builtin_types_operation(self, type1, type2):
# let's assume that whatever builtin type you multiply a string with
# will either return a string of the same type or fail with an exception
string_types = (bytes_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2.is_builtin_type:
return type1
if type2 in string_types and type1.is_builtin_type:
return type2
# multiplication of containers/numbers with an integer value
# always (?) returns the same type
if type1.is_int:
return type2
if type2.is_int:
return type1
return None
class MatMultNode(NumBinopNode):
# '@' operator.
def is_py_operation_types(self, type1, type2):
return True
def generate_evaluation_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("MatrixMultiply", "ObjectHandling.c"))
super(MatMultNode, self).generate_evaluation_code(code)
class DivNode(NumBinopNode):
# '/' or '//' operator.
cdivision = None
truedivision = None # == "unknown" if operator == '/'
ctruedivision = False
cdivision_warnings = False
zerodivision_check = None
def find_compile_time_binary_operator(self, op1, op2):
func = compile_time_binary_operators[self.operator]
if self.operator == '/' and self.truedivision is None:
# => true div for floats, floor div for integers
if isinstance(op1, (int,long)) and isinstance(op2, (int,long)):
func = compile_time_binary_operators['//']
return func
def calculate_constant_result(self):
op1 = self.operand1.constant_result
op2 = self.operand2.constant_result
func = self.find_compile_time_binary_operator(op1, op2)
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
func = self.find_compile_time_binary_operator(
operand1, operand2)
return func(operand1, operand2)
except Exception, e:
self.compile_time_value_error(e)
def analyse_operation(self, env):
if self.cdivision or env.directives['cdivision']:
self.ctruedivision = False
else:
self.ctruedivision = self.truedivision
NumBinopNode.analyse_operation(self, env)
if self.is_cpp_operation():
self.cdivision = True
if not self.type.is_pyobject:
self.zerodivision_check = (
self.cdivision is None and not env.directives['cdivision']
and (not self.operand2.has_constant_result() or
self.operand2.constant_result == 0))
if self.zerodivision_check or env.directives['cdivision_warnings']:
# Need to check ahead of time to warn or raise zero division error
self.operand1 = self.operand1.coerce_to_simple(env)
self.operand2 = self.operand2.coerce_to_simple(env)
def compute_c_result_type(self, type1, type2):
if self.operator == '/' and self.ctruedivision:
if not type1.is_float and not type2.is_float:
widest_type = PyrexTypes.widest_numeric_type(type1, PyrexTypes.c_double_type)
widest_type = PyrexTypes.widest_numeric_type(type2, widest_type)
return widest_type
return NumBinopNode.compute_c_result_type(self, type1, type2)
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float division"
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.type.is_complex:
if self.cdivision is None:
self.cdivision = (code.globalstate.directives['cdivision']
or not self.type.signed
or self.type.is_float)
if not self.cdivision:
code.globalstate.use_utility_code(div_int_utility_code.specialize(self.type))
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def generate_div_warning_code(self, code):
if not self.type.is_pyobject:
if self.zerodivision_check:
if not self.infix:
zero_test = "%s(%s)" % (self.type.unary_op('zero'), self.operand2.result())
else:
zero_test = "%s == 0" % self.operand2.result()
code.putln("if (unlikely(%s)) {" % zero_test)
code.put_ensure_gil()
code.putln('PyErr_SetString(PyExc_ZeroDivisionError, "%s");' % self.zero_division_message())
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
if self.type.is_int and self.type.signed and self.operator != '%':
code.globalstate.use_utility_code(division_overflow_test_code)
if self.operand2.type.signed == 2:
# explicitly signed, no runtime check needed
minus1_check = 'unlikely(%s == -1)' % self.operand2.result()
else:
type_of_op2 = self.operand2.type.declaration_code('')
minus1_check = '(!(((%s)-1) > 0)) && unlikely(%s == (%s)-1)' % (
type_of_op2, self.operand2.result(), type_of_op2)
code.putln("else if (sizeof(%s) == sizeof(long) && %s "
" && unlikely(UNARY_NEG_WOULD_OVERFLOW(%s))) {" % (
self.type.declaration_code(''),
minus1_check,
self.operand1.result()))
code.put_ensure_gil()
code.putln('PyErr_SetString(PyExc_OverflowError, "value too large to perform division");')
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
if code.globalstate.directives['cdivision_warnings'] and self.operator != '/':
code.globalstate.use_utility_code(cdivision_warning_utility_code)
code.putln("if (unlikely((%s < 0) ^ (%s < 0))) {" % (
self.operand1.result(),
self.operand2.result()))
code.put_ensure_gil()
code.putln(code.set_error_info(self.pos, used=True))
code.putln("if (__Pyx_cdivision_warning(%(FILENAME)s, "
"%(LINENO)s)) {" % {
'FILENAME': Naming.filename_cname,
'LINENO': Naming.lineno_cname,
})
code.put_release_ensured_gil()
code.put_goto(code.error_label)
code.putln("}")
code.put_release_ensured_gil()
code.putln("}")
def calculate_result_code(self):
if self.type.is_complex:
return NumBinopNode.calculate_result_code(self)
elif self.type.is_float and self.operator == '//':
return "floor(%s / %s)" % (
self.operand1.result(),
self.operand2.result())
elif self.truedivision or self.cdivision:
op1 = self.operand1.result()
op2 = self.operand2.result()
if self.truedivision:
if self.type != self.operand1.type:
op1 = self.type.cast_code(op1)
if self.type != self.operand2.type:
op2 = self.type.cast_code(op2)
return "(%s / %s)" % (op1, op2)
else:
return "__Pyx_div_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
class ModNode(DivNode):
# '%' operator.
def is_py_operation_types(self, type1, type2):
return (type1.is_string
or type2.is_string
or NumBinopNode.is_py_operation_types(self, type1, type2))
def infer_builtin_types_operation(self, type1, type2):
# b'%s' % xyz raises an exception in Py3, so it's safe to infer the type for Py2
if type1 is unicode_type:
# None + xyz may be implemented by RHS
if type2.is_builtin_type or not self.operand1.may_be_none():
return type1
elif type1 in (bytes_type, str_type, basestring_type):
if type2 is unicode_type:
return type2
elif type2.is_numeric:
return type1
elif type1 is bytes_type and not type2.is_builtin_type:
return None # RHS might implement '% operator differently in Py3
else:
return basestring_type # either str or unicode, can't tell
return None
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float divmod()"
def analyse_operation(self, env):
DivNode.analyse_operation(self, env)
if not self.type.is_pyobject:
if self.cdivision is None:
self.cdivision = env.directives['cdivision'] or not self.type.signed
if not self.cdivision and not self.type.is_int and not self.type.is_float:
error(self.pos, "mod operator not supported for type '%s'" % self.type)
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.cdivision:
if self.type.is_int:
code.globalstate.use_utility_code(
mod_int_utility_code.specialize(self.type))
else: # float
code.globalstate.use_utility_code(
mod_float_utility_code.specialize(
self.type, math_h_modifier=self.type.math_h_modifier))
# note: skipping over DivNode here
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def calculate_result_code(self):
if self.cdivision:
if self.type.is_float:
return "fmod%s(%s, %s)" % (
self.type.math_h_modifier,
self.operand1.result(),
self.operand2.result())
else:
return "(%s %% %s)" % (
self.operand1.result(),
self.operand2.result())
else:
return "__Pyx_mod_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
def py_operation_function(self, code):
if self.operand1.type is unicode_type:
if self.operand1.may_be_none():
return '__Pyx_PyUnicode_FormatSafe'
else:
return 'PyUnicode_Format'
elif self.operand1.type is str_type:
if self.operand1.may_be_none():
return '__Pyx_PyString_FormatSafe'
else:
return '__Pyx_PyString_Format'
return super(ModNode, self).py_operation_function(code)
class PowNode(NumBinopNode):
# '**' operator.
def analyse_c_operation(self, env):
NumBinopNode.analyse_c_operation(self, env)
if self.type.is_complex:
if self.type.real_type.is_float:
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
self.pow_func = "__Pyx_c_pow" + self.type.real_type.math_h_modifier
else:
error(self.pos, "complex int powers not supported")
self.pow_func = "<error>"
elif self.type.is_float:
self.pow_func = "pow" + self.type.math_h_modifier
elif self.type.is_int:
self.pow_func = "__Pyx_pow_%s" % self.type.declaration_code('').replace(' ', '_')
env.use_utility_code(
int_pow_utility_code.specialize(
func_name=self.pow_func,
type=self.type.declaration_code(''),
signed=self.type.signed and 1 or 0))
elif not self.type.is_error:
error(self.pos, "got unexpected types for C power operator: %s, %s" %
(self.operand1.type, self.operand2.type))
def calculate_result_code(self):
# Work around MSVC overloading ambiguity.
def typecast(operand):
if self.type == operand.type:
return operand.result()
else:
return self.type.cast_code(operand.result())
return "%s(%s, %s)" % (
self.pow_func,
typecast(self.operand1),
typecast(self.operand2))
def py_operation_function(self, code):
if (self.type.is_pyobject and
self.operand1.constant_result == 2 and
isinstance(self.operand1.constant_result, (int, long)) and
self.operand2.type is py_object_type):
code.globalstate.use_utility_code(UtilityCode.load_cached('PyNumberPow2', 'Optimize.c'))
if self.inplace:
return '__Pyx_PyNumber_InPlacePowerOf2'
else:
return '__Pyx_PyNumber_PowerOf2'
return super(PowNode, self).py_operation_function(code)
class BoolBinopNode(ExprNode):
"""
Short-circuiting boolean operation.
Note that this node provides the same code generation method as
BoolBinopResultNode to simplify expression nesting.
operator string "and"/"or"
operand1 BoolBinopNode/BoolBinopResultNode left operand
operand2 BoolBinopNode/BoolBinopResultNode right operand
"""
subexprs = ['operand1', 'operand2']
is_temp = True
operator = None
operand1 = None
operand2 = None
def infer_type(self, env):
type1 = self.operand1.infer_type(env)
type2 = self.operand2.infer_type(env)
return PyrexTypes.independent_spanning_type(type1, type2)
def may_be_none(self):
if self.operator == 'or':
return self.operand2.may_be_none()
else:
return self.operand1.may_be_none() or self.operand2.may_be_none()
def calculate_constant_result(self):
operand1 = self.operand1.constant_result
operand2 = self.operand2.constant_result
if self.operator == 'and':
self.constant_result = operand1 and operand2
else:
self.constant_result = operand1 or operand2
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
if self.operator == 'and':
return operand1 and operand2
else:
return operand1 or operand2
def is_ephemeral(self):
return self.operand1.is_ephemeral() or self.operand2.is_ephemeral()
def analyse_types(self, env):
# Note: we do not do any coercion here as we most likely do not know the final type anyway.
# We even accept to set self.type to ErrorType if both operands do not have a spanning type.
# The coercion to the final type and to a "simple" value is left to coerce_to().
operand1 = self.operand1.analyse_types(env)
operand2 = self.operand2.analyse_types(env)
self.type = PyrexTypes.independent_spanning_type(
operand1.type, operand2.type)
self.operand1 = self._wrap_operand(operand1, env)
self.operand2 = self._wrap_operand(operand2, env)
return self
def _wrap_operand(self, operand, env):
if not isinstance(operand, (BoolBinopNode, BoolBinopResultNode)):
operand = BoolBinopResultNode(operand, self.type, env)
return operand
def wrap_operands(self, env):
"""
Must get called by transforms that want to create a correct BoolBinopNode
after the type analysis phase.
"""
self.operand1 = self._wrap_operand(self.operand1, env)
self.operand2 = self._wrap_operand(self.operand2, env)
def coerce_to_boolean(self, env):
return self.coerce_to(PyrexTypes.c_bint_type, env)
def coerce_to(self, dst_type, env):
operand1 = self.operand1.coerce_to(dst_type, env)
operand2 = self.operand2.coerce_to(dst_type, env)
return BoolBinopNode.from_node(
self, type=dst_type,
operator=self.operator,
operand1=operand1, operand2=operand2)
def generate_bool_evaluation_code(self, code, final_result_temp, and_label, or_label, end_label):
code.mark_pos(self.pos)
outer_labels = (and_label, or_label)
if self.operator == 'and':
my_label = and_label = code.new_label('next_and')
else:
my_label = or_label = code.new_label('next_or')
self.operand1.generate_bool_evaluation_code(code, final_result_temp, and_label, or_label, end_label)
and_label, or_label = outer_labels
code.put_label(my_label)
self.operand2.generate_bool_evaluation_code(code, final_result_temp, and_label, or_label, end_label)
def generate_evaluation_code(self, code):
self.allocate_temp_result(code)
or_label = and_label = None
end_label = code.new_label('bool_binop_done')
self.generate_bool_evaluation_code(code, self.result(), and_label, or_label, end_label)
if code.label_used(end_label):
code.put_label(end_label)
gil_message = "Truth-testing Python object"
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def generate_subexpr_disposal_code(self, code):
pass # nothing to do here, all done in generate_evaluation_code()
def free_subexpr_temps(self, code):
pass # nothing to do here, all done in generate_evaluation_code()
def generate_operand1_test(self, code):
# Generate code to test the truth of the first operand.
if self.type.is_pyobject:
test_result = code.funcstate.allocate_temp(
PyrexTypes.c_bint_type, manage_ref=False)
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
test_result,
self.operand1.py_result(),
code.error_goto_if_neg(test_result, self.pos)))
else:
test_result = self.operand1.result()
return (test_result, self.type.is_pyobject)
class BoolBinopResultNode(ExprNode):
"""
Intermediate result of a short-circuiting and/or expression.
Tests the result for 'truthiness' and takes care of coercing the final result
of the overall expression to the target type.
Note that this node provides the same code generation method as
BoolBinopNode to simplify expression nesting.
arg ExprNode the argument to test
value ExprNode the coerced result value node
"""
subexprs = ['arg', 'value']
is_temp = True
arg = None
value = None
def __init__(self, arg, result_type, env):
# using 'arg' multiple times, so it must be a simple/temp value
arg = arg.coerce_to_simple(env)
# wrap in ProxyNode, in case a transform wants to replace self.arg later
arg = ProxyNode(arg)
super(BoolBinopResultNode, self).__init__(
arg.pos, arg=arg, type=result_type,
value=CloneNode(arg).coerce_to(result_type, env))
def coerce_to_boolean(self, env):
return self.coerce_to(PyrexTypes.c_bint_type, env)
def coerce_to(self, dst_type, env):
# unwrap, coerce, rewrap
arg = self.arg.arg
if dst_type is PyrexTypes.c_bint_type:
arg = arg.coerce_to_boolean(env)
# TODO: unwrap more coercion nodes?
return BoolBinopResultNode(arg, dst_type, env)
def nogil_check(self, env):
# let's leave all errors to BoolBinopNode
pass
def generate_operand_test(self, code):
# Generate code to test the truth of the first operand.
if self.arg.type.is_pyobject:
test_result = code.funcstate.allocate_temp(
PyrexTypes.c_bint_type, manage_ref=False)
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
test_result,
self.arg.py_result(),
code.error_goto_if_neg(test_result, self.pos)))
else:
test_result = self.arg.result()
return (test_result, self.arg.type.is_pyobject)
def generate_bool_evaluation_code(self, code, final_result_temp, and_label, or_label, end_label):
code.mark_pos(self.pos)
# x => x
# x and ... or ... => next 'and' / 'or'
# False ... or x => next 'or'
# True and x => next 'and'
# True or x => True (operand)
self.arg.generate_evaluation_code(code)
if and_label or or_label:
test_result, uses_temp = self.generate_operand_test(code)
sense = '!' if or_label else ''
code.putln("if (%s%s) {" % (sense, test_result))
if uses_temp:
code.funcstate.release_temp(test_result)
self.arg.generate_disposal_code(code)
if or_label:
# value is false => short-circuit to next 'or'
code.put_goto(or_label)
code.putln("} else {")
if and_label:
# value is true => go to next 'and'
code.put_goto(and_label)
if not or_label:
code.putln("} else {")
if not and_label or not or_label:
# if no next 'and' or 'or', we provide the result
self.value.generate_evaluation_code(code)
self.value.make_owned_reference(code)
code.putln("%s = %s;" % (final_result_temp, self.value.result()))
self.value.generate_post_assignment_code(code)
self.arg.generate_disposal_code(code)
self.value.free_temps(code)
if and_label or or_label:
code.put_goto(end_label)
if and_label or or_label:
code.putln("}")
self.arg.free_temps(code)
class CondExprNode(ExprNode):
# Short-circuiting conditional expression.
#
# test ExprNode
# true_val ExprNode
# false_val ExprNode
true_val = None
false_val = None
subexprs = ['test', 'true_val', 'false_val']
def type_dependencies(self, env):
return self.true_val.type_dependencies(env) + self.false_val.type_dependencies(env)
def infer_type(self, env):
return PyrexTypes.independent_spanning_type(
self.true_val.infer_type(env),
self.false_val.infer_type(env))
def calculate_constant_result(self):
if self.test.constant_result:
self.constant_result = self.true_val.constant_result
else:
self.constant_result = self.false_val.constant_result
def is_ephemeral(self):
return self.true_val.is_ephemeral() or self.false_val.is_ephemeral()
def analyse_types(self, env):
self.test = self.test.analyse_types(env).coerce_to_boolean(env)
self.true_val = self.true_val.analyse_types(env)
self.false_val = self.false_val.analyse_types(env)
self.is_temp = 1
return self.analyse_result_type(env)
def analyse_result_type(self, env):
self.type = PyrexTypes.independent_spanning_type(
self.true_val.type, self.false_val.type)
if self.type.is_pyobject:
self.result_ctype = py_object_type
elif self.true_val.is_ephemeral() or self.false_val.is_ephemeral():
error(self.pos, "Unsafe C derivative of temporary Python reference used in conditional expression")
if self.true_val.type.is_pyobject or self.false_val.type.is_pyobject:
self.true_val = self.true_val.coerce_to(self.type, env)
self.false_val = self.false_val.coerce_to(self.type, env)
if self.type == PyrexTypes.error_type:
self.type_error()
return self
def coerce_to(self, dst_type, env):
self.true_val = self.true_val.coerce_to(dst_type, env)
self.false_val = self.false_val.coerce_to(dst_type, env)
self.result_ctype = None
return self.analyse_result_type(env)
def type_error(self):
if not (self.true_val.type.is_error or self.false_val.type.is_error):
error(self.pos, "Incompatible types in conditional expression (%s; %s)" %
(self.true_val.type, self.false_val.type))
self.type = PyrexTypes.error_type
def check_const(self):
return (self.test.check_const()
and self.true_val.check_const()
and self.false_val.check_const())
def generate_evaluation_code(self, code):
# Because subexprs may not be evaluated we can use a more optimal
# subexpr allocation strategy than the default, so override evaluation_code.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.test.generate_evaluation_code(code)
code.putln("if (%s) {" % self.test.result())
self.eval_and_get(code, self.true_val)
code.putln("} else {")
self.eval_and_get(code, self.false_val)
code.putln("}")
self.test.generate_disposal_code(code)
self.test.free_temps(code)
def eval_and_get(self, code, expr):
expr.generate_evaluation_code(code)
expr.make_owned_reference(code)
code.putln('%s = %s;' % (self.result(), expr.result_as(self.ctype())))
expr.generate_post_assignment_code(code)
expr.free_temps(code)
def generate_subexpr_disposal_code(self, code):
pass # done explicitly above (cleanup must separately happen within the if/else blocks)
def free_subexpr_temps(self, code):
pass # done explicitly above (cleanup must separately happen within the if/else blocks)
richcmp_constants = {
"<" : "Py_LT",
"<=": "Py_LE",
"==": "Py_EQ",
"!=": "Py_NE",
"<>": "Py_NE",
">" : "Py_GT",
">=": "Py_GE",
# the following are faked by special compare functions
"in" : "Py_EQ",
"not_in": "Py_NE",
}
class CmpNode(object):
# Mixin class containing code common to PrimaryCmpNodes
# and CascadedCmpNodes.
special_bool_cmp_function = None
special_bool_cmp_utility_code = None
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def calculate_cascaded_constant_result(self, operand1_result):
func = compile_time_binary_operators[self.operator]
operand2_result = self.operand2.constant_result
if (isinstance(operand1_result, (bytes, unicode)) and
isinstance(operand2_result, (bytes, unicode)) and
type(operand1_result) != type(operand2_result)):
# string comparison of different types isn't portable
return
if self.operator in ('in', 'not_in'):
if isinstance(self.operand2, (ListNode, TupleNode, SetNode)):
if not self.operand2.args:
self.constant_result = self.operator == 'not_in'
return
elif isinstance(self.operand2, ListNode) and not self.cascade:
# tuples are more efficient to store than lists
self.operand2 = self.operand2.as_tuple()
elif isinstance(self.operand2, DictNode):
if not self.operand2.key_value_pairs:
self.constant_result = self.operator == 'not_in'
return
self.constant_result = func(operand1_result, operand2_result)
def cascaded_compile_time_value(self, operand1, denv):
func = get_compile_time_binop(self)
operand2 = self.operand2.compile_time_value(denv)
try:
result = func(operand1, operand2)
except Exception, e:
self.compile_time_value_error(e)
result = None
if result:
cascade = self.cascade
if cascade:
result = result and cascade.cascaded_compile_time_value(operand2, denv)
return result
def is_cpp_comparison(self):
return self.operand1.type.is_cpp_class or self.operand2.type.is_cpp_class
def find_common_int_type(self, env, op, operand1, operand2):
# type1 != type2 and at least one of the types is not a C int
type1 = operand1.type
type2 = operand2.type
type1_can_be_int = False
type2_can_be_int = False
if operand1.is_string_literal and operand1.can_coerce_to_char_literal():
type1_can_be_int = True
if operand2.is_string_literal and operand2.can_coerce_to_char_literal():
type2_can_be_int = True
if type1.is_int:
if type2_can_be_int:
return type1
elif type2.is_int:
if type1_can_be_int:
return type2
elif type1_can_be_int:
if type2_can_be_int:
if Builtin.unicode_type in (type1, type2):
return PyrexTypes.c_py_ucs4_type
else:
return PyrexTypes.c_uchar_type
return None
def find_common_type(self, env, op, operand1, common_type=None):
operand2 = self.operand2
type1 = operand1.type
type2 = operand2.type
new_common_type = None
# catch general errors
if type1 == str_type and (type2.is_string or type2 in (bytes_type, unicode_type)) or \
type2 == str_type and (type1.is_string or type1 in (bytes_type, unicode_type)):
error(self.pos, "Comparisons between bytes/unicode and str are not portable to Python 3")
new_common_type = error_type
# try to use numeric comparisons where possible
elif type1.is_complex or type2.is_complex:
if op not in ('==', '!=') \
and (type1.is_complex or type1.is_numeric) \
and (type2.is_complex or type2.is_numeric):
error(self.pos, "complex types are unordered")
new_common_type = error_type
elif type1.is_pyobject:
new_common_type = type1
elif type2.is_pyobject:
new_common_type = type2
else:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif type1.is_numeric and type2.is_numeric:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif common_type is None or not common_type.is_pyobject:
new_common_type = self.find_common_int_type(env, op, operand1, operand2)
if new_common_type is None:
# fall back to generic type compatibility tests
if type1 == type2:
new_common_type = type1
elif type1.is_pyobject or type2.is_pyobject:
if type2.is_numeric or type2.is_string:
if operand2.check_for_coercion_error(type1, env):
new_common_type = error_type
else:
new_common_type = py_object_type
elif type1.is_numeric or type1.is_string:
if operand1.check_for_coercion_error(type2, env):
new_common_type = error_type
else:
new_common_type = py_object_type
elif py_object_type.assignable_from(type1) and py_object_type.assignable_from(type2):
new_common_type = py_object_type
else:
# one Python type and one non-Python type, not assignable
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
elif type1.assignable_from(type2):
new_common_type = type1
elif type2.assignable_from(type1):
new_common_type = type2
else:
# C types that we couldn't handle up to here are an error
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
if new_common_type.is_string and (isinstance(operand1, BytesNode) or
isinstance(operand2, BytesNode)):
# special case when comparing char* to bytes literal: must
# compare string values!
new_common_type = bytes_type
# recursively merge types
if common_type is None or new_common_type.is_error:
common_type = new_common_type
else:
# we could do a lot better by splitting the comparison
# into a non-Python part and a Python part, but this is
# safer for now
common_type = PyrexTypes.spanning_type(common_type, new_common_type)
if self.cascade:
common_type = self.cascade.find_common_type(env, self.operator, operand2, common_type)
return common_type
def invalid_types_error(self, operand1, op, operand2):
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(op, operand1.type, operand2.type))
def is_python_comparison(self):
return (not self.is_ptr_contains()
and not self.is_c_string_contains()
and (self.has_python_operands()
or (self.cascade and self.cascade.is_python_comparison())
or self.operator in ('in', 'not_in')))
def coerce_operands_to(self, dst_type, env):
operand2 = self.operand2
if operand2.type != dst_type:
self.operand2 = operand2.coerce_to(dst_type, env)
if self.cascade:
self.cascade.coerce_operands_to(dst_type, env)
def is_python_result(self):
return ((self.has_python_operands() and
self.special_bool_cmp_function is None and
self.operator not in ('is', 'is_not', 'in', 'not_in') and
not self.is_c_string_contains() and
not self.is_ptr_contains())
or (self.cascade and self.cascade.is_python_result()))
def is_c_string_contains(self):
return self.operator in ('in', 'not_in') and \
((self.operand1.type.is_int
and (self.operand2.type.is_string or self.operand2.type is bytes_type)) or
(self.operand1.type.is_unicode_char
and self.operand2.type is unicode_type))
def is_ptr_contains(self):
if self.operator in ('in', 'not_in'):
container_type = self.operand2.type
return (container_type.is_ptr or container_type.is_array) \
and not container_type.is_string
def find_special_bool_compare_function(self, env, operand1, result_is_bool=False):
# note: currently operand1 must get coerced to a Python object if we succeed here!
if self.operator in ('==', '!='):
type1, type2 = operand1.type, self.operand2.type
if result_is_bool or (type1.is_builtin_type and type2.is_builtin_type):
if type1 is Builtin.unicode_type or type2 is Builtin.unicode_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
return True
elif type1 is Builtin.bytes_type or type2 is Builtin.bytes_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("BytesEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyBytes_Equals"
return True
elif type1 is Builtin.basestring_type or type2 is Builtin.basestring_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
return True
elif type1 is Builtin.str_type or type2 is Builtin.str_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("StrEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyString_Equals"
return True
elif self.operator in ('in', 'not_in'):
if self.operand2.type is Builtin.dict_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyDictContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PyDict_Contains"
return True
elif self.operand2.type is Builtin.unicode_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyUnicodeContains", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Contains"
return True
else:
if not self.operand2.type.is_pyobject:
self.operand2 = self.operand2.coerce_to_pyobject(env)
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySequenceContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PySequence_Contains"
return True
return False
def generate_operation_code(self, code, result_code,
operand1, op , operand2):
if self.type.is_pyobject:
error_clause = code.error_goto_if_null
got_ref = "__Pyx_XGOTREF(%s); " % result_code
if self.special_bool_cmp_function:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyBoolOrNullFromLong", "ObjectHandling.c"))
coerce_result = "__Pyx_PyBoolOrNull_FromLong"
else:
coerce_result = "__Pyx_PyBool_FromLong"
else:
error_clause = code.error_goto_if_neg
got_ref = ""
coerce_result = ""
if self.special_bool_cmp_function:
if operand1.type.is_pyobject:
result1 = operand1.py_result()
else:
result1 = operand1.result()
if operand2.type.is_pyobject:
result2 = operand2.py_result()
else:
result2 = operand2.result()
if self.special_bool_cmp_utility_code:
code.globalstate.use_utility_code(self.special_bool_cmp_utility_code)
code.putln(
"%s = %s(%s(%s, %s, %s)); %s%s" % (
result_code,
coerce_result,
self.special_bool_cmp_function,
result1, result2, richcmp_constants[op],
got_ref,
error_clause(result_code, self.pos)))
elif operand1.type.is_pyobject and op not in ('is', 'is_not'):
assert op not in ('in', 'not_in'), op
code.putln("%s = PyObject_RichCompare(%s, %s, %s); %s%s" % (
result_code,
operand1.py_result(),
operand2.py_result(),
richcmp_constants[op],
got_ref,
error_clause(result_code, self.pos)))
elif operand1.type.is_complex:
code.putln("%s = %s(%s%s(%s, %s));" % (
result_code,
coerce_result,
op == "!=" and "!" or "",
operand1.type.unary_op('eq'),
operand1.result(),
operand2.result()))
else:
type1 = operand1.type
type2 = operand2.type
if (type1.is_extension_type or type2.is_extension_type) \
and not type1.same_as(type2):
common_type = py_object_type
elif type1.is_numeric:
common_type = PyrexTypes.widest_numeric_type(type1, type2)
else:
common_type = type1
code1 = operand1.result_as(common_type)
code2 = operand2.result_as(common_type)
code.putln("%s = %s(%s %s %s);" % (
result_code,
coerce_result,
code1,
self.c_operator(op),
code2))
def c_operator(self, op):
if op == 'is':
return "=="
elif op == 'is_not':
return "!="
else:
return op
class PrimaryCmpNode(ExprNode, CmpNode):
# Non-cascaded comparison or first comparison of
# a cascaded sequence.
#
# operator string
# operand1 ExprNode
# operand2 ExprNode
# cascade CascadedCmpNode
# We don't use the subexprs mechanism, because
# things here are too complicated for it to handle.
# Instead, we override all the framework methods
# which use it.
child_attrs = ['operand1', 'operand2', 'coerced_operand2', 'cascade']
cascade = None
coerced_operand2 = None
is_memslice_nonecheck = False
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def type_dependencies(self, env):
return ()
def calculate_constant_result(self):
assert not self.cascade
self.calculate_cascaded_constant_result(self.operand1.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
return self.cascaded_compile_time_value(operand1, denv)
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
if self.is_cpp_comparison():
self.analyse_cpp_comparison(env)
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for cpp types.")
return self
if self.analyse_memoryviewslice_comparison(env):
return self
if self.cascade:
self.cascade = self.cascade.analyse_types(env)
if self.operator in ('in', 'not_in'):
if self.is_c_string_contains():
self.is_pycmp = False
common_type = None
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for 'int_val in string'.")
return self
if self.operand2.type is unicode_type:
env.use_utility_code(UtilityCode.load_cached("PyUCS4InUnicode", "StringTools.c"))
else:
if self.operand1.type is PyrexTypes.c_uchar_type:
self.operand1 = self.operand1.coerce_to(PyrexTypes.c_char_type, env)
if self.operand2.type is not bytes_type:
self.operand2 = self.operand2.coerce_to(bytes_type, env)
env.use_utility_code(UtilityCode.load_cached("BytesContains", "StringTools.c"))
self.operand2 = self.operand2.as_none_safe_node(
"argument of type 'NoneType' is not iterable")
elif self.is_ptr_contains():
if self.cascade:
error(self.pos, "Cascading comparison not supported for 'val in sliced pointer'.")
self.type = PyrexTypes.c_bint_type
# Will be transformed by IterationTransform
return self
elif self.find_special_bool_compare_function(env, self.operand1):
if not self.operand1.type.is_pyobject:
self.operand1 = self.operand1.coerce_to_pyobject(env)
common_type = None # if coercion needed, the method call above has already done it
self.is_pycmp = False # result is bint
else:
common_type = py_object_type
self.is_pycmp = True
elif self.find_special_bool_compare_function(env, self.operand1):
if not self.operand1.type.is_pyobject:
self.operand1 = self.operand1.coerce_to_pyobject(env)
common_type = None # if coercion needed, the method call above has already done it
self.is_pycmp = False # result is bint
else:
common_type = self.find_common_type(env, self.operator, self.operand1)
self.is_pycmp = common_type.is_pyobject
if common_type is not None and not common_type.is_error:
if self.operand1.type != common_type:
self.operand1 = self.operand1.coerce_to(common_type, env)
self.coerce_operands_to(common_type, env)
if self.cascade:
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
operand2 = self.cascade.optimise_comparison(self.operand2, env)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
if self.is_python_result():
self.type = PyrexTypes.py_object_type
else:
self.type = PyrexTypes.c_bint_type
cdr = self.cascade
while cdr:
cdr.type = self.type
cdr = cdr.cascade
if self.is_pycmp or self.cascade or self.special_bool_cmp_function:
# 1) owned reference, 2) reused value, 3) potential function error return value
self.is_temp = 1
return self
def analyse_cpp_comparison(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
if entry is None:
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(self.operator, type1, type2))
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
func_type = entry.type
if func_type.is_ptr:
func_type = func_type.base_type
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.is_pycmp = False
self.type = func_type.return_type
def analyse_memoryviewslice_comparison(self, env):
have_none = self.operand1.is_none or self.operand2.is_none
have_slice = (self.operand1.type.is_memoryviewslice or
self.operand2.type.is_memoryviewslice)
ops = ('==', '!=', 'is', 'is_not')
if have_slice and have_none and self.operator in ops:
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
self.is_memslice_nonecheck = True
return True
return False
def coerce_to_boolean(self, env):
if self.is_pycmp:
# coercing to bool => may allow for more efficient comparison code
if self.find_special_bool_compare_function(
env, self.operand1, result_is_bool=True):
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
self.is_temp = 1
if self.cascade:
operand2 = self.cascade.optimise_comparison(
self.operand2, env, result_is_bool=True)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
return self
# TODO: check if we can optimise parts of the cascade here
return ExprNode.coerce_to_boolean(self, env)
def has_python_operands(self):
return (self.operand1.type.is_pyobject
or self.operand2.type.is_pyobject)
def check_const(self):
if self.cascade:
self.not_const()
return False
else:
return self.operand1.check_const() and self.operand2.check_const()
def calculate_result_code(self):
if self.operand1.type.is_complex:
if self.operator == "!=":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
self.operand1.type.binary_op('=='),
self.operand1.result(),
self.operand2.result())
elif self.is_c_string_contains():
if self.operand2.type is unicode_type:
method = "__Pyx_UnicodeContainsUCS4"
else:
method = "__Pyx_BytesContains"
if self.operator == "not_in":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
method,
self.operand2.result(),
self.operand1.result())
else:
result1 = self.operand1.result()
result2 = self.operand2.result()
if self.is_memslice_nonecheck:
if self.operand1.type.is_memoryviewslice:
result1 = "((PyObject *) %s.memview)" % result1
else:
result2 = "((PyObject *) %s.memview)" % result2
return "(%s %s %s)" % (
result1,
self.c_operator(self.operator),
result2)
def generate_evaluation_code(self, code):
self.operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_operation_code(code, self.result(),
self.operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, self.result(), self.coerced_operand2 or self.operand2,
needs_evaluation=self.coerced_operand2 is not None)
self.operand1.generate_disposal_code(code)
self.operand1.free_temps(code)
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
def generate_subexpr_disposal_code(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.generate_disposal_code(code)
self.operand2.generate_disposal_code(code)
def free_subexpr_temps(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.free_temps(code)
self.operand2.free_temps(code)
def annotate(self, code):
self.operand1.annotate(code)
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
class CascadedCmpNode(Node, CmpNode):
# A CascadedCmpNode is not a complete expression node. It
# hangs off the side of another comparison node, shares
# its left operand with that node, and shares its result
# with the PrimaryCmpNode at the head of the chain.
#
# operator string
# operand2 ExprNode
# cascade CascadedCmpNode
child_attrs = ['operand2', 'coerced_operand2', 'cascade']
cascade = None
coerced_operand2 = None
constant_result = constant_value_not_set # FIXME: where to calculate this?
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def type_dependencies(self, env):
return ()
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def analyse_types(self, env):
self.operand2 = self.operand2.analyse_types(env)
if self.cascade:
self.cascade = self.cascade.analyse_types(env)
return self
def has_python_operands(self):
return self.operand2.type.is_pyobject
def optimise_comparison(self, operand1, env, result_is_bool=False):
if self.find_special_bool_compare_function(env, operand1, result_is_bool):
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
if not operand1.type.is_pyobject:
operand1 = operand1.coerce_to_pyobject(env)
if self.cascade:
operand2 = self.cascade.optimise_comparison(self.operand2, env, result_is_bool)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
return operand1
def coerce_operands_to_pyobjects(self, env):
self.operand2 = self.operand2.coerce_to_pyobject(env)
if self.operand2.type is dict_type and self.operator in ('in', 'not_in'):
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
if self.cascade:
self.cascade.coerce_operands_to_pyobjects(env)
def coerce_cascaded_operands_to_temp(self, env):
if self.cascade:
#self.operand2 = self.operand2.coerce_to_temp(env) #CTT
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
def generate_evaluation_code(self, code, result, operand1, needs_evaluation=False):
if self.type.is_pyobject:
code.putln("if (__Pyx_PyObject_IsTrue(%s)) {" % result)
code.put_decref(result, self.type)
else:
code.putln("if (%s) {" % result)
if needs_evaluation:
operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
self.generate_operation_code(code, result,
operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, result, self.coerced_operand2 or self.operand2,
needs_evaluation=self.coerced_operand2 is not None)
if needs_evaluation:
operand1.generate_disposal_code(code)
operand1.free_temps(code)
# Cascaded cmp result is always temp
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
code.putln("}")
def annotate(self, code):
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
binop_node_classes = {
"or": BoolBinopNode,
"and": BoolBinopNode,
"|": IntBinopNode,
"^": IntBinopNode,
"&": IntBinopNode,
"<<": IntBinopNode,
">>": IntBinopNode,
"+": AddNode,
"-": SubNode,
"*": MulNode,
"@": MatMultNode,
"/": DivNode,
"//": DivNode,
"%": ModNode,
"**": PowNode,
}
def binop_node(pos, operator, operand1, operand2, inplace=False):
# Construct binop node of appropriate class for
# given operator.
return binop_node_classes[operator](pos,
operator = operator,
operand1 = operand1,
operand2 = operand2,
inplace = inplace)
#-------------------------------------------------------------------
#
# Coercion nodes
#
# Coercion nodes are special in that they are created during
# the analyse_types phase of parse tree processing.
# Their __init__ methods consequently incorporate some aspects
# of that phase.
#
#-------------------------------------------------------------------
class CoercionNode(ExprNode):
# Abstract base class for coercion nodes.
#
# arg ExprNode node being coerced
subexprs = ['arg']
constant_result = not_a_constant
def __init__(self, arg):
super(CoercionNode, self).__init__(arg.pos)
self.arg = arg
if debug_coercion:
print("%s Coercing %s" % (self, self.arg))
def calculate_constant_result(self):
# constant folding can break type coercion, so this is disabled
pass
def annotate(self, code):
self.arg.annotate(code)
if self.arg.type != self.type:
file, line, col = self.pos
code.annotate((file, line, col-1), AnnotationItem(
style='coerce', tag='coerce', text='[%s] to [%s]' % (self.arg.type, self.type)))
class CoerceToMemViewSliceNode(CoercionNode):
"""
Coerce an object to a memoryview slice. This holds a new reference in
a managed temp.
"""
def __init__(self, arg, dst_type, env):
assert dst_type.is_memoryviewslice
assert not arg.type.is_memoryviewslice
CoercionNode.__init__(self, arg)
self.type = dst_type
self.is_temp = 1
self.env = env
self.use_managed_ref = True
self.arg = arg
def generate_result_code(self, code):
self.type.create_from_py_utility_code(self.env)
code.putln("%s = %s(%s);" % (self.result(),
self.type.from_py_function,
self.arg.py_result()))
error_cond = self.type.error_condition(self.result())
code.putln(code.error_goto_if(error_cond, self.pos))
class CastNode(CoercionNode):
# Wrap a node in a C type cast.
def __init__(self, arg, new_type):
CoercionNode.__init__(self, arg)
self.type = new_type
def may_be_none(self):
return self.arg.may_be_none()
def calculate_result_code(self):
return self.arg.result_as(self.type)
def generate_result_code(self, code):
self.arg.generate_result_code(code)
class PyTypeTestNode(CoercionNode):
# This node is used to check that a generic Python
# object is an instance of a particular extension type.
# This node borrows the result of its argument node.
exact_builtin_type = True
def __init__(self, arg, dst_type, env, notnone=False):
# The arg is know to be a Python object, and
# the dst_type is known to be an extension type.
assert dst_type.is_extension_type or dst_type.is_builtin_type, "PyTypeTest on non extension type"
CoercionNode.__init__(self, arg)
self.type = dst_type
self.result_ctype = arg.ctype()
self.notnone = notnone
nogil_check = Node.gil_error
gil_message = "Python type test"
def analyse_types(self, env):
return self
def may_be_none(self):
if self.notnone:
return False
return self.arg.may_be_none()
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def is_ephemeral(self):
return self.arg.is_ephemeral()
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
def calculate_constant_result(self):
# FIXME
pass
def calculate_result_code(self):
return self.arg.result()
def generate_result_code(self, code):
if self.type.typeobj_is_available():
if self.type.is_builtin_type:
type_test = self.type.type_test_code(
self.arg.py_result(),
self.notnone, exact=self.exact_builtin_type)
else:
type_test = self.type.type_test_code(
self.arg.py_result(), self.notnone)
code.globalstate.use_utility_code(
UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c"))
code.putln("if (!(%s)) %s" % (
type_test, code.error_goto(self.pos)))
else:
error(self.pos, "Cannot test type of extern C class "
"without type object name specification")
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class NoneCheckNode(CoercionNode):
# This node is used to check that a Python object is not None and
# raises an appropriate exception (as specified by the creating
# transform).
is_nonecheck = True
def __init__(self, arg, exception_type_cname, exception_message,
exception_format_args):
CoercionNode.__init__(self, arg)
self.type = arg.type
self.result_ctype = arg.ctype()
self.exception_type_cname = exception_type_cname
self.exception_message = exception_message
self.exception_format_args = tuple(exception_format_args or ())
nogil_check = None # this node only guards an operation that would fail already
def analyse_types(self, env):
return self
def may_be_none(self):
return False
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
def calculate_result_code(self):
return self.arg.result()
def condition(self):
if self.type.is_pyobject:
return self.arg.py_result()
elif self.type.is_memoryviewslice:
return "((PyObject *) %s.memview)" % self.arg.result()
else:
raise Exception("unsupported type")
def put_nonecheck(self, code):
code.putln(
"if (unlikely(%s == Py_None)) {" % self.condition())
if self.in_nogil_context:
code.put_ensure_gil()
escape = StringEncoding.escape_byte_string
if self.exception_format_args:
code.putln('PyErr_Format(%s, "%s", %s);' % (
self.exception_type_cname,
StringEncoding.escape_byte_string(
self.exception_message.encode('UTF-8')),
', '.join([ '"%s"' % escape(str(arg).encode('UTF-8'))
for arg in self.exception_format_args ])))
else:
code.putln('PyErr_SetString(%s, "%s");' % (
self.exception_type_cname,
escape(self.exception_message.encode('UTF-8'))))
if self.in_nogil_context:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
def generate_result_code(self, code):
self.put_nonecheck(code)
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CoerceToPyTypeNode(CoercionNode):
# This node is used to convert a C data type
# to a Python object.
type = py_object_type
is_temp = 1
def __init__(self, arg, env, type=py_object_type):
if not arg.type.create_to_py_utility_code(env):
error(arg.pos, "Cannot convert '%s' to Python object" % arg.type)
elif arg.type.is_complex:
# special case: complex coercion is so complex that it
# uses a macro ("__pyx_PyComplex_FromComplex()"), for
# which the argument must be simple
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
if type is py_object_type:
# be specific about some known types
if arg.type.is_string or arg.type.is_cpp_string:
self.type = default_str_type(env)
elif arg.type.is_pyunicode_ptr or arg.type.is_unicode_char:
self.type = unicode_type
elif arg.type.is_complex:
self.type = Builtin.complex_type
elif arg.type.is_string or arg.type.is_cpp_string:
if (type not in (bytes_type, bytearray_type)
and not env.directives['c_string_encoding']):
error(arg.pos,
"default encoding required for conversion from '%s' to '%s'" %
(arg.type, type))
self.type = type
else:
# FIXME: check that the target type and the resulting type are compatible
pass
if arg.type.is_memoryviewslice:
# Register utility codes at this point
arg.type.get_to_py_function(env, arg)
self.env = env
gil_message = "Converting to Python object"
def may_be_none(self):
# FIXME: is this always safe?
return False
def coerce_to_boolean(self, env):
arg_type = self.arg.type
if (arg_type == PyrexTypes.c_bint_type or
(arg_type.is_pyobject and arg_type.name == 'bool')):
return self.arg.coerce_to_temp(env)
else:
return CoerceToBooleanNode(self, env)
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.arg.type.is_int:
return self.arg
else:
return self.arg.coerce_to(PyrexTypes.c_long_type, env)
def analyse_types(self, env):
# The arg is always already analysed
return self
def generate_result_code(self, code):
arg_type = self.arg.type
if arg_type.is_memoryviewslice:
funccall = arg_type.get_to_py_function(self.env, self.arg)
else:
func = arg_type.to_py_function
if arg_type.is_string or arg_type.is_cpp_string:
if self.type in (bytes_type, str_type, unicode_type):
func = func.replace("Object", self.type.name.title())
elif self.type is bytearray_type:
func = func.replace("Object", "ByteArray")
funccall = "%s(%s)" % (func, self.arg.result())
code.putln('%s = %s; %s' % (
self.result(),
funccall,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class CoerceIntToBytesNode(CoerceToPyTypeNode):
# This node is used to convert a C int type to a Python bytes
# object.
is_temp = 1
def __init__(self, arg, env):
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
self.type = Builtin.bytes_type
def generate_result_code(self, code):
arg = self.arg
arg_result = arg.result()
if arg.type not in (PyrexTypes.c_char_type,
PyrexTypes.c_uchar_type,
PyrexTypes.c_schar_type):
if arg.type.signed:
code.putln("if ((%s < 0) || (%s > 255)) {" % (
arg_result, arg_result))
else:
code.putln("if (%s > 255) {" % arg_result)
code.putln('PyErr_SetString(PyExc_OverflowError, '
'"value too large to pack into a byte"); %s' % (
code.error_goto(self.pos)))
code.putln('}')
temp = None
if arg.type is not PyrexTypes.c_char_type:
temp = code.funcstate.allocate_temp(PyrexTypes.c_char_type, manage_ref=False)
code.putln("%s = (char)%s;" % (temp, arg_result))
arg_result = temp
code.putln('%s = PyBytes_FromStringAndSize(&%s, 1); %s' % (
self.result(),
arg_result,
code.error_goto_if_null(self.result(), self.pos)))
if temp is not None:
code.funcstate.release_temp(temp)
code.put_gotref(self.py_result())
class CoerceFromPyTypeNode(CoercionNode):
# This node is used to convert a Python object
# to a C data type.
def __init__(self, result_type, arg, env):
CoercionNode.__init__(self, arg)
self.type = result_type
self.is_temp = 1
if not result_type.create_from_py_utility_code(env):
error(arg.pos,
"Cannot convert Python object to '%s'" % result_type)
if self.type.is_string or self.type.is_pyunicode_ptr:
if self.arg.is_name and self.arg.entry and self.arg.entry.is_pyglobal:
warning(arg.pos,
"Obtaining '%s' from externally modifiable global Python value" % result_type,
level=1)
def analyse_types(self, env):
# The arg is always already analysed
return self
def is_ephemeral(self):
return self.type.is_ptr and self.arg.is_ephemeral()
def generate_result_code(self, code):
function = self.type.from_py_function
operand = self.arg.py_result()
rhs = "%s(%s)" % (function, operand)
if self.type.is_enum:
rhs = typecast(self.type, c_long_type, rhs)
code.putln('%s = %s; %s' % (
self.result(),
rhs,
code.error_goto_if(self.type.error_condition(self.result()), self.pos)))
if self.type.is_pyobject:
code.put_gotref(self.py_result())
def nogil_check(self, env):
error(self.pos, "Coercion from Python not allowed without the GIL")
class CoerceToBooleanNode(CoercionNode):
# This node is used when a result needs to be used
# in a boolean context.
type = PyrexTypes.c_bint_type
_special_builtins = {
Builtin.list_type : 'PyList_GET_SIZE',
Builtin.tuple_type : 'PyTuple_GET_SIZE',
Builtin.bytes_type : 'PyBytes_GET_SIZE',
Builtin.unicode_type : 'PyUnicode_GET_SIZE',
}
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
if arg.type.is_pyobject:
self.is_temp = 1
def nogil_check(self, env):
if self.arg.type.is_pyobject and self._special_builtins.get(self.arg.type) is None:
self.gil_error()
gil_message = "Truth-testing Python object"
def check_const(self):
if self.is_temp:
self.not_const()
return False
return self.arg.check_const()
def calculate_result_code(self):
return "(%s != 0)" % self.arg.result()
def generate_result_code(self, code):
if not self.is_temp:
return
test_func = self._special_builtins.get(self.arg.type)
if test_func is not None:
code.putln("%s = (%s != Py_None) && (%s(%s) != 0);" % (
self.result(),
self.arg.py_result(),
test_func,
self.arg.py_result()))
else:
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_neg(self.result(), self.pos)))
class CoerceToComplexNode(CoercionNode):
def __init__(self, arg, dst_type, env):
if arg.type.is_complex:
arg = arg.coerce_to_simple(env)
self.type = dst_type
CoercionNode.__init__(self, arg)
dst_type.create_declaration_utility_code(env)
def calculate_result_code(self):
if self.arg.type.is_complex:
real_part = "__Pyx_CREAL(%s)" % self.arg.result()
imag_part = "__Pyx_CIMAG(%s)" % self.arg.result()
else:
real_part = self.arg.result()
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
def generate_result_code(self, code):
pass
class CoerceToTempNode(CoercionNode):
# This node is used to force the result of another node
# to be stored in a temporary. It is only used if the
# argument node's result is not already in a temporary.
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
self.type = self.arg.type.as_argument_type()
self.constant_result = self.arg.constant_result
self.is_temp = 1
if self.type.is_pyobject:
self.result_ctype = py_object_type
gil_message = "Creating temporary Python reference"
def analyse_types(self, env):
# The arg is always already analysed
return self
def coerce_to_boolean(self, env):
self.arg = self.arg.coerce_to_boolean(env)
if self.arg.is_simple():
return self.arg
self.type = self.arg.type
self.result_ctype = self.type
return self
def generate_result_code(self, code):
#self.arg.generate_evaluation_code(code) # Already done
# by generic generate_subexpr_evaluation_code!
code.putln("%s = %s;" % (
self.result(), self.arg.result_as(self.ctype())))
if self.use_managed_ref:
if self.type.is_pyobject:
code.put_incref(self.result(), self.ctype())
elif self.type.is_memoryviewslice:
code.put_incref_memoryviewslice(self.result(),
not self.in_nogil_context)
class ProxyNode(CoercionNode):
"""
A node that should not be replaced by transforms or other means,
and hence can be useful to wrap the argument to a clone node
MyNode -> ProxyNode -> ArgNode
CloneNode -^
"""
nogil_check = None
def __init__(self, arg):
super(ProxyNode, self).__init__(arg)
self.constant_result = arg.constant_result
self._proxy_type()
def analyse_expressions(self, env):
self.arg = self.arg.analyse_expressions(env)
self._proxy_type()
return self
def _proxy_type(self):
if hasattr(self.arg, 'type'):
self.type = self.arg.type
self.result_ctype = self.arg.result_ctype
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
def generate_result_code(self, code):
self.arg.generate_result_code(code)
def result(self):
return self.arg.result()
def is_simple(self):
return self.arg.is_simple()
def may_be_none(self):
return self.arg.may_be_none()
def generate_evaluation_code(self, code):
self.arg.generate_evaluation_code(code)
def generate_result_code(self, code):
self.arg.generate_result_code(code)
def generate_disposal_code(self, code):
self.arg.generate_disposal_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CloneNode(CoercionNode):
# This node is employed when the result of another node needs
# to be used multiple times. The argument node's result must
# be in a temporary. This node "borrows" the result from the
# argument node, and does not generate any evaluation or
# disposal code for it. The original owner of the argument
# node is responsible for doing those things.
subexprs = [] # Arg is not considered a subexpr
nogil_check = None
def __init__(self, arg):
CoercionNode.__init__(self, arg)
self.constant_result = arg.constant_result
if hasattr(arg, 'type'):
self.type = arg.type
self.result_ctype = arg.result_ctype
if hasattr(arg, 'entry'):
self.entry = arg.entry
def result(self):
return self.arg.result()
def may_be_none(self):
return self.arg.may_be_none()
def type_dependencies(self, env):
return self.arg.type_dependencies(env)
def infer_type(self, env):
return self.arg.infer_type(env)
def analyse_types(self, env):
self.type = self.arg.type
self.result_ctype = self.arg.result_ctype
self.is_temp = 1
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
return self
def coerce_to(self, dest_type, env):
if self.arg.is_literal:
return self.arg.coerce_to(dest_type, env)
return super(CloneNode, self).coerce_to(dest_type, env)
def is_simple(self):
return True # result is always in a temp (or a name)
def generate_evaluation_code(self, code):
pass
def generate_result_code(self, code):
pass
def generate_disposal_code(self, code):
pass
def free_temps(self, code):
pass
class CMethodSelfCloneNode(CloneNode):
# Special CloneNode for the self argument of builtin C methods
# that accepts subtypes of the builtin type. This is safe only
# for 'final' subtypes, as subtypes of the declared type may
# override the C method.
def coerce_to(self, dst_type, env):
if dst_type.is_builtin_type and self.type.subtype_of(dst_type):
return self
return CloneNode.coerce_to(self, dst_type, env)
class ModuleRefNode(ExprNode):
# Simple returns the module object
type = py_object_type
is_temp = False
subexprs = []
def analyse_types(self, env):
return self
def may_be_none(self):
return False
def calculate_result_code(self):
return Naming.module_cname
def generate_result_code(self, code):
pass
class DocstringRefNode(ExprNode):
# Extracts the docstring of the body element
subexprs = ['body']
type = py_object_type
is_temp = True
def __init__(self, pos, body):
ExprNode.__init__(self, pos)
assert body.type.is_pyobject
self.body = body
def analyse_types(self, env):
return self
def generate_result_code(self, code):
code.putln('%s = __Pyx_GetAttr(%s, %s); %s' % (
self.result(), self.body.result(),
code.intern_identifier(StringEncoding.EncodedString("__doc__")),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
#------------------------------------------------------------------------------------
#
# Runtime support code
#
#------------------------------------------------------------------------------------
pyerr_occurred_withgil_utility_code= UtilityCode(
proto = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void); /* proto */
""",
impl = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void) {
int err;
#ifdef WITH_THREAD
PyGILState_STATE _save = PyGILState_Ensure();
#endif
err = !!PyErr_Occurred();
#ifdef WITH_THREAD
PyGILState_Release(_save);
#endif
return err;
}
"""
)
#------------------------------------------------------------------------------------
raise_unbound_local_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
""")
raise_closure_name_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname) {
PyErr_Format(PyExc_NameError, "free variable '%s' referenced before assignment in enclosing scope", varname);
}
""")
# Don't inline the function, it should really never be called in production
raise_unbound_memoryview_utility_code_nogil = UtilityCode(
proto = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname);
""",
impl = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname) {
#ifdef WITH_THREAD
PyGILState_STATE gilstate = PyGILState_Ensure();
#endif
__Pyx_RaiseUnboundLocalError(varname);
#ifdef WITH_THREAD
PyGILState_Release(gilstate);
#endif
}
""",
requires = [raise_unbound_local_error_utility_code])
#------------------------------------------------------------------------------------
raise_too_many_values_to_unpack = UtilityCode.load_cached("RaiseTooManyValuesToUnpack", "ObjectHandling.c")
raise_need_more_values_to_unpack = UtilityCode.load_cached("RaiseNeedMoreValuesToUnpack", "ObjectHandling.c")
tuple_unpacking_error_code = UtilityCode.load_cached("UnpackTupleError", "ObjectHandling.c")
#------------------------------------------------------------------------------------
int_pow_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s %(func_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s %(func_name)s(%(type)s b, %(type)s e) {
%(type)s t = b;
switch (e) {
case 3:
t *= b;
case 2:
t *= b;
case 1:
return t;
case 0:
return 1;
}
#if %(signed)s
if (unlikely(e<0)) return 0;
#endif
t = 1;
while (likely(e)) {
t *= (b * (e&1)) | ((~e)&1); /* 1 or b */
b *= b;
e >>= 1;
}
return t;
}
""")
# ------------------------------ Division ------------------------------------
div_int_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s a, %(type)s b) {
%(type)s q = a / b;
%(type)s r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
""")
mod_int_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) {
%(type)s r = a %% b;
r += ((r != 0) & ((r ^ b) < 0)) * b;
return r;
}
""")
mod_float_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) {
%(type)s r = fmod%(math_h_modifier)s(a, b);
r += ((r != 0) & ((r < 0) ^ (b < 0))) * b;
return r;
}
""")
cdivision_warning_utility_code = UtilityCode(
proto="""
static int __Pyx_cdivision_warning(const char *, int); /* proto */
""",
impl="""
static int __Pyx_cdivision_warning(const char *filename, int lineno) {
#if CYTHON_COMPILING_IN_PYPY
filename++; // avoid compiler warnings
lineno++;
return PyErr_Warn(PyExc_RuntimeWarning,
"division with oppositely signed operands, C and Python semantics differ");
#else
return PyErr_WarnExplicit(PyExc_RuntimeWarning,
"division with oppositely signed operands, C and Python semantics differ",
filename,
lineno,
__Pyx_MODULE_NAME,
NULL);
#endif
}
""")
# from intobject.c
division_overflow_test_code = UtilityCode(
proto="""
#define UNARY_NEG_WOULD_OVERFLOW(x) \
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
""")
|
thedrow/cython
|
Cython/Compiler/ExprNodes.py
|
Python
|
apache-2.0
| 450,199
|
[
"VisIt"
] |
86bccd47993f69203123397f0033b6e5b66f26cfb57179240c6538efe4e864de
|
"""
This module contains code for controlling the articulatory synthesizer at a high level.
"""
import audiosegment as asg
import collections
import copy
import experiment.configuration as configuration # pylint: disable=locally-disabled, import-error
import imageio
import itertools
import logging
import numpy as np
import os
import output.voice.synthesizer as synth # pylint: disable=locally-disabled, import-error
import pandas
import pickle
import primordialooze as po
import random
import tempfile
import traceback
class SynthModel:
"""
This class provides methods for pretraining and training the underlying machine
learning model that controls the articulatory synthesizer.
"""
def __init__(self, config):
"""
Takes a configuration object for its hyperparameters.
"""
allowedvals = config.getdict('synthesizer', 'allowed-articulator-values')
# We are given a dict of str: str, but we want a dict of str: list{float}.
# So try to do the conversion, raising an appropriate exception if unsuccessful.
self._allowed_values = collections.OrderedDict()
for k, v in allowedvals.items():
new_k = k.lower()
new_v = config.make_list_from_str(v, type=float)
self._allowed_values[new_k] = new_v
# Get parameters for all phases
self._articulation_time_points_ms = config.getlist('synthesizer', 'articulation-time-points-ms', type=float)
self._narticulators = len(self._allowed_values.keys())
self._articulation_duration_ms = config.getfloat('synthesizer', 'phoneme-durations-ms')
self._nworkers = config.getint('synthesizer', 'nworkers-phase0')
experimentname = config.getstr('experiment', 'name')
# Get spectrogram information
self.seconds_per_spectrogram = config.getfloat('preprocessing', 'seconds_per_spectrogram')
self.window_length_s = config.getfloat('preprocessing', 'spectrogram_window_length_s')
self.overlap = config.getfloat('preprocessing', 'spectrogram_window_overlap')
self.resample_to_hz = config.getfloat('preprocessing','spectrogram_sample_rate_hz')
# Get parameters for Phase 0
self._nagents_phase0 = config.getint('synthesizer', 'nagents-phase0')
self._phase0_niterations = config.getstr('synthesizer', 'niterations-phase0')
self._phase0_fitness_target = config.getstr('synthesizer', 'fitness-target-phase0')
self._fraction_top_selection_phase0 = config.getfloat('synthesizer', 'fraction-of-generation-to-select-phase0')
self._fraction_mutate_phase0 = config.getfloat('synthesizer', 'fraction-of-generation-to-mutate-phase0')
self._anneal_after_phase0 = config.getbool('synthesizer', 'anneal-after-phase0')
phase0_artifacts_dir = config.getstr('synthesizer', 'pretraining-output-directory')
self.phase0_artifacts_dir = os.path.join(phase0_artifacts_dir, experimentname)
crossoverfunc_phase0 = config.getstr('synthesizer', 'crossover-function-phase0')
if crossoverfunc_phase0 == '2-point':
self._phase0_crossover_function = None # Primordial Ooze defaults to 2-point
elif crossoverfunc_phase0.strip().lower() != "none":
raise ValueError("crossover-function-phase0 must be either '2-point' or 'None', but is {}".format(crossoverfunc_phase0))
# Get parameters for Phase 1
self._nagents_phase1 = config.getint('synthesizer', 'nagents-phase1')
self._fraction_top_selection_phase1 = config.getfloat('synthesizer', 'fraction-of-generation-to-select-phase1')
self._fraction_mutate_phase1 = config.getfloat('synthesizer', 'fraction-of-generation-to-mutate-phase1')
self._anneal_during_phase1 = config.getbool('synthesizer', 'anneal-during-phase1')
phase1_artifacts_dir = config.getstr('synthesizer', 'training-output-directory')
self.phase1_artifacts_dir = os.path.join(phase1_artifacts_dir, experimentname)
# These can be lists
try:
self._phase1_niterations = config.getlist('synthesizer', 'niterations-phase1', type=int)
if len(self._phase1_niterations) == 1:
# We'll just parse it as a string instead
raise configuration.ConfigError()
except configuration.ConfigError:
self._phase1_niterations = config.getstr('synthesizer', 'niterations-phase1')
try:
self._phase1_fitness_target = config.getlist('synthesizer', 'fitness-target-phase1', type=float)
if len(self._phase1_fitness_target) == 1:
# Just parse it as a string instead
raise configuration.ConfigError()
except configuration.ConfigError:
self._phase1_fitness_target = config.getstr('synthesizer', 'fitness-target-phase1')
crossoverfunc_phase1 = config.getstr('synthesizer', 'crossover-function-phase1')
if crossoverfunc_phase1 == '2-point':
self._phase1_crossover_function = None # Primordial Ooze defaults to 2-point
elif crossoverfunc_phase1.strip().lower() != "none":
raise ValueError("crossover-function-phase1 must be either '2-point' or 'None', but is {}".format(crossoverfunc_phase1))
# Validate the fractions
if self._fraction_mutate_phase0 < 0.0 or self._fraction_mutate_phase0 > 1.0:
raise configuration.ConfigError("Mutation fraction must be within 0.0 and 1.0, but is {}.".format(self._fraction_mutate_phase0))
if self._fraction_top_selection_phase0 < 0.0 or self._fraction_top_selection_phase0 > 1.0:
raise configuration.ConfigError("Selection fraction must be within 0.0 and 1.0, but is {}.".format(self._fraction_top_selection_phase0))
if self._fraction_mutate_phase1 < 0.0 or self._fraction_mutate_phase1 > 1.0:
raise configuration.ConfigError("Mutation fraction must be within 0.0 and 1.0, but is {}.".format(self._fraction_mutate_phase1))
if self._fraction_top_selection_phase1 < 0.0 or self._fraction_top_selection_phase1 > 1.0:
raise configuration.ConfigError("Selection fraction must be within 0.0 and 1.0, but is {}.".format(self._fraction_top_selection_phase1))
# Validate the phase 0 targets
if self._phase0_niterations.lower().strip() == "none":
self._phase0_niterations = None
else:
try:
self._phase0_niterations = int(self._phase0_niterations)
except ValueError:
raise configuration.ConfigError("Cannot convert {} into an int. This value must be 'None' or an integer.".format(self._phase0_niterations))
if self._phase0_fitness_target.lower().strip() == "none":
self._phase0_fitness_target = None
else:
try:
self._phase0_fitness_target = int(self._phase0_fitness_target)
except ValueError:
raise configuration.ConfigError("Cannot convert {} into an int. This value must be 'None' or an integer.".format(self._phase0_niterations))
# Validate the phase 1 targets
if type(self._phase1_niterations) == str and self._phase1_niterations.lower().strip() == "none":
self._phase1_niterations = None
elif type(self._phase1_niterations) == list:
try:
self._phase1_niterations = [int(i) for i in self._phase1_niterations]
except ValueError:
raise configuration.ConfigError("Cannot convert each item in {} into an int.".format(self._phase1_niterations))
else:
try:
self._phase1_niterations = int(self._phase1_niterations)
except ValueError:
raise configuration.ConfigError("Cannot convert {} into an int or a list. This value must be 'None' or an integer or a list of integers.".format(self._phase1_niterations))
if type(self._phase1_fitness_target) == str and self._phase1_fitness_target.lower().strip() == "none":
self._phase1_fitness_target = None
elif type(self._phase1_fitness_target) == list:
try:
self._phase1_fitness_target = [float(f) for f in self._phase1_fitness_target]
except ValueError:
raise configuration.ConfigError("Cannot convert each item in {} into a float.".format(self._phase1_fitness_target))
else:
try:
self._phase1_fitness_target = float(self._phase1_fitness_target)
except ValueError:
raise configuration.ConfigError("Cannot convert {} into a float or a list. This value must be 'None' or a float or a list of floats.".format(self._phase1_niterations))
if self._phase0_niterations is None and self._phase0_fitness_target is None:
raise configuration.ConfigError("niterations-phase0 and fitness-target-phase0 cannot both be None.")
if self._phase1_niterations is None and self._phase1_fitness_target is None:
raise configuration.ConfigError("niterations-phase1 and fitness-target-phase1 cannot both be None.")
# The shape of each agent is a flattend synthmat
self._agentshape = (self._narticulators * len(self._articulation_time_points_ms), )
# Create a lows array and a highs array from the ordered dict of allowedvalues
# These arrays should be the same length as an agent (ntimepoints * narticulators)
# and each value in the array should correspond to a min/max value allowed at that
# timepoint.
self._allowed_lows = np.zeros((self._narticulators, len(self._articulation_time_points_ms)))
self._allowed_highs = np.zeros((self._narticulators, len(self._articulation_time_points_ms)))
for i, (_, v) in enumerate(self._allowed_values.items()):
# Each value in this dict is a list of floats. Every two of these floats should be
# a min/max pair of allowable values in the time series.
if len(v) != 2 * len(self._articulation_time_points_ms):
raise configuration.ConfigError("Could not parse the allowable synth values matrix. One of the lists has {} items, but all of them must have {}.".format(len(v), 2 * len(self._articulation_time_points_ms)))
mins = [value for j, value in enumerate(v) if j % 2 == 0]
maxes = [value for j, value in enumerate(v) if j % 2 == 1]
self._allowed_lows[i, :] = np.array(mins)
self._allowed_highs[i, :] = np.array(maxes)
self._allowed_lows = np.reshape(self._allowed_lows, self._agentshape)
self._allowed_highs = np.reshape(self._allowed_highs, self._agentshape)
# We will be saving the populations sometimes
self._phase0_population = None
self._population_index = 0
self.best_agents_phase0 = None
self.best_agents_phase1 = None
self._phase1_population = None
# Create the save directories if they don't exist
os.makedirs(self.phase0_artifacts_dir, exist_ok=True)
os.makedirs(self.phase1_artifacts_dir, exist_ok=True)
def _zero_limits(self, articulator_mask):
"""
Returns self._allowed_lows and self._allowed_highs, but
with each value zeroed in it if it is NOT part of `articulator_mask`.
"""
# Make copies
lows = np.copy(self._allowed_lows)
highs = np.copy(self._allowed_highs)
# Reshape into matrix form (narticulators, ntimepoints)
lows = np.reshape(lows, (self._narticulators, -1))
highs = np.reshape(highs, (self._narticulators, -1))
# Make a zero version
zero_lows = np.zeros_like(lows)
zero_highs = np.zeros_like(highs)
# Add back in the articulators of interest
zero_lows[articulator_mask, :] = lows[articulator_mask, :]
zero_highs[articulator_mask, :] = highs[articulator_mask, :]
# Reshape back into vector form and return
return np.reshape(zero_lows, (-1,)), np.reshape(zero_highs, (-1,))
def pretrain(self):
"""
Pretrains the model to make noise as loudly as possible.
"""
# Create the fitness function
fitnessfunction = ParallelizableFitnessFunctionPhase0(self._narticulators, self._articulation_duration_ms, self._articulation_time_points_ms)
# Zero out the articulators we aren't using during phase 0 (but save the old limits)
saved_lows = np.copy(self._allowed_lows)
saved_highs = np.copy(self._allowed_highs)
self._allowed_lows, self._allowed_highs = self._zero_limits(synth.laryngeal_articulator_mask)
sim = po.Simulation(self._nagents_phase0, self._agentshape, fitnessfunction,
seedfunc=self._phase0_seed_function,
selectionfunc=self._phase0_selection_function,
crossoverfunc=self._phase0_crossover_function,
mutationfunc=self._phase0_mutation_function,
elitismfunc=None,
nworkers=self._nworkers,
max_agents_per_generation=self._nagents_phase0,
min_agents_per_generation=self._nagents_phase0)
best, value = sim.run(niterations=self._phase0_niterations, fitness=self._phase0_fitness_target)
self.best_agents_phase0 = list(sim.best_agents)
self._summarize_results(best, value, sim, "Phase0OutputSound.wav", is_phase0=True)
# Save the population, since we will use this population as the seed for the next phase
self._phase0_population = np.copy(sim._agents)
# Restore the original lows and highs
self._allowed_lows = saved_lows
self._allowed_highs = saved_highs
# If we want to anneal after phase 0, now's the time to do it
if self._anneal_after_phase0:
## Reshape best agent into matrix form (narticulators, ntimepoints)
bestmatrix = np.reshape(best, (self._narticulators, -1))
## Add/Subtract from each of its values
bestmatrixlows = bestmatrix - 0.05
bestmatrixhighs = bestmatrix + 0.05
## For each item in bestmatrixlows/highs, take the appropriate of min/max(best-lows/highs, currentlimits)
## This is so that we don't accidentally make the limits *less* stringent
lows = np.reshape(self._allowed_lows, (self._narticulators, -1))
highs = np.reshape(self._allowed_highs, (self._narticulators, -1))
bestmatrixlows = np.maximum(bestmatrixlows, lows)
bestmatrixhighs = np.minimum(bestmatrixhighs, highs)
## Now update our allowed highs/lows with the annealed values
lows[synth.laryngeal_articulator_mask, :] = bestmatrixlows[synth.laryngeal_articulator_mask, :]
highs[synth.laryngeal_articulator_mask, :] = bestmatrixhighs[synth.laryngeal_articulator_mask, :]
self._allowed_lows = np.reshape(lows, (-1,))
self._allowed_highs = np.reshape(highs, (-1,))
def _run_phase1_simulation(self, target, niterations, fitness_target, savefpath, fitness_function_name, target_coords, autoencoder, seedfunc=None):
if fitness_function_name.lower().strip() == 'xcor':
# Create the fitness function for phase 1
nworkers = self._nworkers
fitnessfunction = ParallelizableFitnessFunctionPhase1(self._narticulators,
self._articulation_duration_ms,
self._articulation_time_points_ms,
target)
elif fitness_function_name.lower().strip() == 'euclid':
nworkers = 0 # Can't parallelize this fitness function.... I know it says you can
fitnessfunction = ParallelizableFitnessFunctionDistance(self._narticulators,
self._articulation_duration_ms,
self._articulation_time_points_ms,
target_coords,
autoencoder,
self.seconds_per_spectrogram,
self.window_length_s,
self.overlap,
self.resample_to_hz)
elif fitness_function_name.lower().strip() == 'random':
nworkers = self._nworkers
fitnessfunction = ParallelizableFitnessFunctionRandom()
else:
raise ValueError("'fitness_function_name' is {}, but must be an allowed value.".format(fitness_function_name))
if seedfunc is None:
seedfunc = self._phase1_seed_function
elif not callable(seedfunc):
raise ValueError("'seedfunc' must be callable. Is: {}".format(seedfunc))
# Create the simulation and run it
sim = po.Simulation(self._nagents_phase1, self._agentshape, fitnessfunction,
seedfunc=seedfunc,
selectionfunc=self._phase1_selection_function,
crossoverfunc=self._phase1_crossover_function,
mutationfunc=self._phase1_mutation_function,
elitismfunc=None,
nworkers=nworkers,
max_agents_per_generation=self._nagents_phase1,
min_agents_per_generation=self._nagents_phase1)
best, value = sim.run(niterations=niterations, fitness=fitness_target)
self.best_agents_phase1 = list(sim.best_agents)
self._phase1_population = np.copy(sim._agents)
self._summarize_results(best, value, sim, savefpath)
return best
def save(self, fpath):
"""
Serializes the model into the given `fpath`. To load, call this module's `load` function.
"""
with open(fpath, 'wb') as f:
pickle.dump(self, f)
def train(self, target, savefpath=None, fitness_function_name='xcor', target_coords=None, autoencoder=None):
"""
Trains the model to mimic the given `target`, which should be an AudioSegment.
If `savefpath` is not None, we will save the sound that corresponds to the best agent at this location
as a WAV file.
If fitness_function_name is 'euclid', we use 1/euclidean distance between target_coords and
the embedding location as determined by encoding each item using the autoencoder as the fitness function.
If fitness_function_name is 'xcor', we use the cross correlation and ignore `target_coords` and `autoencoder`.
If fitness_function_name is 'random', we simply return a random number in the interval [0, 100] for each agent.
"""
self.target = target.name
if self._anneal_during_phase1:
masks_in_order = [
synth.jaw_articulator_mask,
synth.nasal_articulator_mask,
synth.lingual_articulator_support_mask,
synth.lingual_articulator_tongue_mask,
synth.labial_articulator_mask
]
# If we pretrained already, we should add the laryngeal group to the annealed list
annealed_masks = list(synth.laryngeal_articulator_mask) if self._phase0_population is not None else []
for maskidx, mask in enumerate(masks_in_order):
# Backup the limits
lows = np.copy(self._allowed_lows)
highs = np.copy(self._allowed_highs)
# Zero out the limits except for any that have already been annealed
zeromask = np.array(sorted(list(set(list(annealed_masks) + list(mask))))) # Uh.. sorry...
self._allowed_lows, self._allowed_highs = self._zero_limits(zeromask)
# Log the new limits as interleaved format
loglims = np.zeros((self._narticulators, 2 * len(self._articulation_time_points_ms)))
loglims[:, 0::2] = np.reshape(self._allowed_lows, (self._narticulators, -1))
loglims[:, 1::2] = np.reshape(self._allowed_highs, (self._narticulators, -1))
durations = [(t1, t2) for t1, t2 in zip(self._articulation_time_points_ms, self._articulation_time_points_ms)]
durations = [t for t in itertools.chain.from_iterable(durations)]
logging.info("New limits:\n{}".format(pandas.DataFrame(loglims, index=synth.articularizers, columns=durations)))
# Our target for the simulation is based on which group we are training
try:
niterations = self._phase1_niterations[maskidx]
except TypeError:
niterations = self._phase1_niterations
try:
fitnesstarget = self._phase1_fitness_target[maskidx]
except TypeError:
fitnesstarget = self._phase1_fitness_target
# Our seed function needs to pick up where we left off
if maskidx == 0:
# Just use the normal phase1 seed function
seedfunc = None
else:
# Use the special annealing seed function, which will pick up where we left off
seedfunc = Phase1AnnealingSeedFunction(np.copy(self._phase1_population))
# Now run the simulation normally
if savefpath is not None:
fpath = os.path.splitext(savefpath)[0] + "_" + str(maskidx) + ".wav"
best = self._run_phase1_simulation(target, niterations, fitnesstarget, fpath, fitness_function_name, target_coords, autoencoder, seedfunc=seedfunc)
# Add this latest mask to the list of masks that we should anneal
annealed_masks.extend(mask)
# Now actually anneal the values in this mask
## Make best into a matrix
best = np.reshape(best, (self._narticulators, -1))
## Add +/- 0.10 to the values in this mask to make new limits
annealedlows = best - 0.10
annealedhighs = best + 0.10
# Restore the lows and highs, but don't overwrite the newly annealed values
lows = np.reshape(lows, (self._narticulators, -1))
highs = np.reshape(highs, (self._narticulators, -1))
annealedlows = np.maximum(annealedlows, lows) # Make sure we don't anneal our limits to be *less* stringent
annealedhighs = np.minimum(annealedhighs, highs) # Ditto
lows[mask, :] = annealedlows[mask, :]
highs[mask, :] = annealedhighs[mask, :]
self._allowed_lows = np.reshape(lows, (-1))
self._allowed_highs = np.reshape(highs, (-1))
else:
# Run a normal simulation
self._run_phase1_simulation(target, self._phase1_niterations, self._phase1_fitness_target, savefpath, fitness_function_name, target_coords, autoencoder)
def _summarize_results(self, best, value, sim, soundfpath, is_phase0=False):
"""
Summarize `best` agent and `value`, which is its fitness.
"""
# Reshape the agent into a synthesis matrix
synthmat = np.reshape(best, (self._narticulators, len(self._articulation_time_points_ms)))
# Print the synthmat in an easily-digestible format
df = pandas.DataFrame(synthmat, index=synth.articularizers, columns=self._articulation_time_points_ms)
logging.info("Best Value: {}; Agent:\n{}".format(value, df))
if soundfpath:
# Make a sound from this agent and save it for human consumption
if is_phase0:
soundfpath = os.path.join(self.phase0_artifacts_dir, soundfpath)
else:
soundfpath = os.path.join(self.phase1_artifacts_dir, soundfpath)
seg = synth.make_seg_from_synthmat(synthmat, self._articulation_duration_ms / 1000.0, [tp / 1000.0 for tp in self._articulation_time_points_ms])
seg.export(soundfpath, format="WAV")
sim.dump_history_csv(os.path.splitext(soundfpath)[0] + ".csv")
def _phase0_seed_function(self):
"""
Returns an agent of random uniform values between each articulator's min, max.
"""
return np.random.uniform(self._allowed_lows, self._allowed_highs)
def _phase1_seed_function(self):
"""
If we have pretrained (done phase 0), we use the population from that phase, with some Gaussian noise
added to them.
If we do not have a pretrained population, we simply do a random uniform.
"""
if self._phase0_population is not None:
# Grab the next agent
agent = self._phase0_population[self._population_index, :]
# Adjust index for next time
self._population_index += 1
if self._population_index >= self._phase0_population.shape[0]:
self._population_index = 0
# Clip to allowed values
agent = np.clip(agent, self._allowed_lows, self._allowed_highs)
return agent
else:
return np.random.uniform(self._allowed_lows, self._allowed_highs)
def _phase0_selection_function(self, agents, fitnesses):
"""
Take the top x percent.
"""
nagents = int(agents.shape[0] * self._fraction_top_selection_phase0)
if nagents < 1:
nagents = 1
return agents[0:nagents]
def _phase1_selection_function(self, agents, fitnesses):
"""
Take the top x percent.
"""
nagents = int(agents.shape[0] * self._fraction_top_selection_phase1)
if nagents < 1:
nagents = 1
return agents[0:nagents]
def _phase0_crossover_function(self, agents):
"""
Do nothing. This behavior is specified by the config file.
"""
return agents
def _phase1_crossover_function(self, agents):
"""
Do nothing. This behavior is specified by the config file.
"""
return agents
def _phase0_mutation_function(self, agents):
"""
Mutates some number of agents each generation via Gaussian distributions
with mean=value to mutate, stdev=mutation_stdev.
Ensures that the agents do not stray outside the allowed bounds for values.
"""
nagents = int(self._fraction_mutate_phase0 * agents.shape[0])
if nagents < 1:
nagents = 1
idxs = np.random.choice(agents.shape[0], size=nagents, replace=False)
agents[idxs, :] = np.random.normal(agents[idxs, :], 0.1)
# make sure to clip to the allowed boundaries
agents[idxs, :] = np.clip(agents[idxs, :], self._allowed_lows, self._allowed_highs)
return agents
def _phase1_mutation_function(self, agents):
"""
Does exactly the same thing as Phase 0 to start with. If we need to change, we can.
"""
return self._phase0_mutation_function(agents)
class ParallelizableFitnessFunctionPhase0:
def __init__(self, narticulators, duration_ms, time_points_ms):
"""
:param narticulators: How many articulators?
:param duration_ms: The total ms of articulation we should create from each agent.
:param time_points_ms: The time points (in ms) at which to change the values of each articulator.
"""
self.narticulators = narticulators
self.duration_ms = duration_ms
self.time_points_ms = time_points_ms
self.ntimepoints = len(time_points_ms)
def __call__(self, agent):
"""
This fitness function evaluates an agent on how much sound it makes when run through
the articulatory synthesizer as a synthmat.
"""
synthmat = np.reshape(agent, (self.narticulators, self.ntimepoints))
seg = synth.make_seg_from_synthmat(synthmat, self.duration_ms / 1000.0, [tp / 1000.0 for tp in self.time_points_ms])
# The fitness of an agent in this phase is determined by the RMS of the sound it makes,
return seg.rms
class ParallelizableFitnessFunctionPhase1:
def __init__(self, narticulators, duration_ms, time_points_ms, prototype_sound):
"""
:param narticulators: How many articulators?
:param duration_ms: The total ms of articulation we should create from each agent.
:param time_points_ms: The time points (in ms) at which to change the values of each articulator.
:param prototype_sound: AudioSegment of the prototypical sound for this index.
"""
self.narticulators = narticulators
self.duration_ms = duration_ms
self.time_points_ms = time_points_ms
self.ntimepoints = len(time_points_ms)
# Forward process the target sound so that we don't have to do it every single time we execute
target = prototype_sound.to_numpy_array().astype(float)
target += abs(min(target))
if max(target) != min(target):
target /= max(target) - min(target)
self._normalized_target = target
assert sum(self._normalized_target[self._normalized_target < 0]) == 0
def __call__(self, agent):
"""
This fitness function evaluates an agent on how well it matches the prototype sound.
"""
synthmat = np.reshape(agent, (self.narticulators, self.ntimepoints))
seg = synth.make_seg_from_synthmat(synthmat, self.duration_ms / 1000.0, [tp / 1000.0 for tp in self.time_points_ms])
# Shift the wave form up by most negative value
ours = seg.to_numpy_array().astype(float)
most_neg_val = min(ours)
ours += abs(most_neg_val)
if max(ours) != min(ours):
ours /= max(ours) - min(ours)
assert sum(ours[ours < 0]) == 0
# Cross correlate with some amount of zero extension
xcor = np.correlate(ours, self._normalized_target, mode='full')
# Find the single maximum value along the xcor vector
# This is the place at which the waves match each other best
return max(xcor)
class ParallelizableFitnessFunctionRandom:
"""
A parallelizable fitness function to test the correlation of fitness function with
actually getting any better at saying the target.
"""
def __init__(self):
pass
def __call__(self, agent):
"""
Just returns a uniform random number between 0 and 100 every time.
"""
return random.randint(0, 100)
class ParallelizableFitnessFunctionDistance:
def __init__(self, narticulators, duration_ms, time_points_ms, target_coords, autoencoder,
seconds_per_spectrogram, window_length_s, overlap, resample_to_hz):
"""
:param narticulators: How many articulators?
:param duration_ms: The total ms of articulation we should create from each agent.
:param time_points_ms: The time points (in ms) at which to change the values of each articulator.
:param prototype_sound: AudioSegment of the prototypical sound for this index.
:param target_coords: The target coordinates
:param autoencoder: A trained autoencoder
"""
self.narticulators = narticulators
self.duration_ms = duration_ms
self.time_points_ms = time_points_ms
self.ntimepoints = len(time_points_ms)
self.target_coords = target_coords
self.autoencoder = autoencoder
self.seconds_per_spectrogram = seconds_per_spectrogram
self.window_length_s = window_length_s
self.overlap = overlap
self.resample_to_hz = resample_to_hz
def __call__(self, agent):
"""
This fitness function evaluates an agent on how well it matches the prototype sound.
"""
synthmat = np.reshape(agent, (self.narticulators, self.ntimepoints))
seg = synth.make_seg_from_synthmat(synthmat, self.duration_ms / 1000.0, [tp / 1000.0 for tp in self.time_points_ms])
if int(len(seg) / 1000) != int(self.seconds_per_spectrogram):
raise ValueError("The segments that the synthesizer is set up to create are not the right length. They are {} seconds, but need to be {} seconds.".format(len(seg) / 1000, self.seconds_per_spectrogram))
# Convert the segment to a spectrogram for input to the autoencoder
seg = seg.resample(sample_rate_Hz=self.resample_to_hz)
_fs, _ts, amps = seg.spectrogram(window_length_s=self.window_length_s, overlap=self.overlap)
# Do the exact same steps as you did when preprocessing the spectrograms and then
# feeding them into the vae in the first place
amps *= 255.0 / np.max(np.abs(amps))
amps = amps.astype(np.uint8)
amps = amps.astype(np.float32)
amps /= 255.0
amps = np.expand_dims(np.array(amps), -1) # Give it a "color" channel
amps = np.expand_dims(np.array(amps), 0) # Give it a batch channel
# Now get the location of this spectrogram in the latent space by feeding it into the encoder
try:
# Try a variational one
mean, _logvars, _encodings = self.autoencoder._encoder.predict(amps)
except ValueError:
# We have a vanilla autoencoder
mean = self.autoencoder._encoder.predict(amps)
# Return the fitness
return 1.0 / (np.linalg.norm(mean - self.target_coords) + 1e-9)
def train_on_targets(config: configuration.Configuration, pretrained_model: SynthModel, mimicry_targets: [(str, str, np.ndarray)], autoencoder) -> None:
"""
Trains a new SynthModel for some number of targets in `mimicry_targets`.
Does this by using whatever fitness function is specified in the config file.
If the config file specifies cross correlation, then the autoencoder is not used,
and instead the candidate sounds are compared against the target sound directly.
If the config file specifies that we should use the embedding space, then candidate
sounds are fed into the encoder and their locations in latent space are used -
specifically, we try to minimize the distance between the candidate's location in
latent space and the target's location in latent space.
:param config: The configuration file for the experiment
:param pretrained_model: A (potentially pretrained) synthesis model.
:param mimicry_targets: A list of tuples of the form (spectrogramfpath, audiofpath, coordinates-in-latent-space)
:param autoencoder: An autoencoder to use in the fitness function for evaluating the location of candidate
sounds in latent space.
:returns: A list of trained models
"""
# Get some configurations
sample_rate_hz = config.getfloat('preprocessing', 'spectrogram_sample_rate_hz')
sample_width = config.getint('preprocessing', 'bytewidth')
nchannels = config.getint('preprocessing', 'nchannels')
fitness_function_name = config.getstr('synthesizer', 'fitness-function')
if fitness_function_name.lower() not in ('xcor', 'euclid', 'random'):
raise ValueError("Fitness function must be one of 'xcor', 'euclid', or 'random', but is {}".format(fitness_function_name))
# We are going to try to train several synthesizers
trained_models = []
logging.info("Attempting to train synthesizers for each of the following {} items: {}".format(len(mimicry_targets), mimicry_targets))
# Now train the models
for specfpath, audiofpath, target_coords in mimicry_targets:
# We will save a file to this name in a directory specified by the config file
savefpath = os.path.basename(audiofpath) + ".synthmimic.wav"
# Since it takes so long to train each of these, it would be a real shame
# if something lame like a non-existent file crashed us after we trained
# several. Let's just log any errors and move on.
try:
msg = "Training the model to mimic {} and saving to: {}".format(audiofpath, savefpath)
print(msg)
logging.info(msg)
# Get the appropriate audiosegment
seg = asg.from_file(audiofpath).resample(sample_rate_Hz=sample_rate_hz, sample_width=sample_width, channels=nchannels)
# Make a deep copy of the pretrained model
copymodel = copy.deepcopy(pretrained_model)
# Train the new copy
copymodel.train(seg, savefpath=savefpath, fitness_function_name=fitness_function_name, target_coords=target_coords, autoencoder=autoencoder)
# Add it to the list of trained models
trained_models.append(copymodel)
except Exception:
print("Something went wrong with target found at {} (spec: {}). Specifically:.".format(audiofpath, specfpath))
traceback.print_exc()
return trained_models
def load(fpath):
"""
Deserializes an object created from `SynthModel.save()` into a SynthModel object.
"""
with open(fpath, 'rb') as f:
return pickle.load(f)
class Phase1AnnealingSeedFunction:
def __init__(self, population):
self._index = 0
self._pop = population
def __call__(self):
agent = self._pop[self._index,:]
self._index += 1
return agent
|
MaxStrange/ArtieInfant
|
Artie/internals/motorcortex/motorcortex.py
|
Python
|
mit
| 37,918
|
[
"Gaussian"
] |
1958d1628bbe60d4688280e8dc7963d9f40b13377dc73531323ea22c88e80a6e
|
#!/usr/bin/env python
"""
Serial version setup file
"""
print('building serial version')
## first load variables from PyMFEM_ROOT/setup_local.py
import sys
import os
ddd = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
root = os.path.abspath(os.path.join(ddd, '..', '..'))
sys.path.insert(0, root)
from setup_local import *
## remove current directory from path
print("__file__", os.path.abspath(__file__))
if '' in sys.path:
sys.path.remove('')
items = [x for x in sys.path if os.path.abspath(x) == os.path.dirname(os.path.abspath(__file__))]
for x in items:
sys.path.remove(x)
print("sys path", sys.path)
## this forces to use compiler written in setup_local.py
if cc_ser != '': os.environ['CC'] = cc_ser
if cxx_ser != '': os.environ['CXX'] = cxx_ser
from distutils.core import Extension, setup
from distutils import sysconfig
modules= ["io_stream", "vtk", "sort_pairs", "datacollection",
"cpointers", "symmat",
"globals", "mem_manager", "device", "hash", "stable3d",
"error", "array", "common_functions", "socketstream", "handle",
"fe_base", "fe_fixed_order", "fe_h1", "fe_l2",
"fe_nd", "fe_nurbs", "fe_pos", "fe_rt", "fe_ser", "doftrans",
"segment", "point", "hexahedron", "quadrilateral",
"tetrahedron", "triangle", "wedge",
"blockvector", "blockoperator", "blockmatrix",
"vertex", "sets", "element", "table", "fe",
"mesh", "fespace",
"fe_coll", "coefficient",
"linearform", "vector", "lininteg", "complex_operator",
"complex_fem",
"gridfunc", "hybridization", "bilinearform",
"bilininteg", "intrules", "sparsemat", "densemat",
"solvers", "estimators", "mesh_operators", "ode",
"sparsesmoothers",
"matrix", "operators", "ncmesh", "eltrans", "geom",
"nonlininteg", "nonlinearform", "restriction",
"fespacehierarchy", "multigrid", "constraints",
"transfer"]
sources = {name: [name + "_wrap.cxx"] for name in modules}
proxy_names = {name: '_'+name for name in modules}
import numpy
numpyinc = numpy.get_include()
print("numpy inc", numpyinc)
include_dirs = [mfemserbuilddir, mfemserincdir, mfemsrcdir, numpyinc,]
library_dirs = [mfemserlnkdir,]
libraries = ['mfem']
if add_cuda:
include_dirs.append(cudainc)
if add_libceed:
include_dirs.append(libceedinc)
extra_compile_args = [cxx11flag, '-DSWIG_TYPE_TABLE=PyMFEM']
import six
if six.PY3:
macros = [('TARGET_PY3', '1'), ('NPY_NO_DEPRECATED_API', 'NPY_1_7_API_VERSION')]
else:
macros = []
ext_modules = [Extension(proxy_names[modules[0]],
sources=sources[modules[0]],
extra_compile_args = extra_compile_args,
extra_link_args = [],
include_dirs = include_dirs,
library_dirs = library_dirs,
runtime_library_dirs = library_dirs,
libraries = libraries,
define_macros=macros),]
ext_modules.extend([Extension(proxy_names[name],
sources=sources[name],
extra_compile_args = extra_compile_args,
extra_link_args = [],
include_dirs = include_dirs,
runtime_library_dirs = library_dirs,
library_dirs = library_dirs,
libraries = libraries,
define_macros=macros)
for name in modules[1:]])
### read version number from __init__.py
path = os.path.join(os.path.dirname(os.path.abspath(os.path.realpath(__file__))),
'..', '__init__.py')
fid = open(path, 'r')
lines=fid.readlines()
fid.close()
for x in lines:
if x.strip().startswith('__version__'):
version = eval(x.split('=')[-1].strip())
setup (name = 'mfem_serial',
version = version,
author = "S.Shiraiwa",
description = """MFEM wrapper""",
ext_modules = ext_modules,
py_modules = modules,
)
|
mfem/PyMFEM
|
mfem/_ser/setup.py
|
Python
|
bsd-3-clause
| 4,193
|
[
"VTK"
] |
dd98c42d0b1e690ead702b2cd9100918587ebc30fd66f369568e12fb51970c53
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Initialize and configure Flask-Script extension.
Configuration
^^^^^^^^^^^^^
The following configuration variables are provided:
===================== =======================================================
`bind address` Preferred binding address of the server. Can be used to
select a specific interface or to bind to all via
`0.0.0.0`.
`bind port` Preferred binding port of the server. Can differ from
the one stated in `CFG_SITE_URL` so it can be accessed
via reverse proxy.
===================== =======================================================
They are assigned by the following parameters, in decreasing priority:
1. Command line arguments of `inveniomanage runserver`
2. `SERVER_BIND_ADDRESS` and `SERVER_BIND_PORT` configuration
3. Values guessed from `CFG_SITE_URL`
4. Defaults (`127.0.0.1:80`)
"""
from __future__ import print_function
import functools
import re
import ssl
from types import FunctionType
from flask import current_app, flash
from flask_registry import ModuleAutoDiscoveryRegistry, RegistryProxy
from flask_script import Manager as FlaskExtManager
from flask_script.commands import Clean, Server, Shell, ShowUrls
from invenio_base.signals import post_command, pre_command
from six.moves import urllib
from werkzeug.utils import find_modules, import_string
def change_command_name(method=None, new_name=None):
"""Change command name to `new_name` or replace '_' by '-'."""
if method is None:
return functools.partial(change_command_name, new_name=new_name)
if new_name is None:
new_name = method.__name__.replace('_', '-')
method.__name__ = new_name
return method
def generate_secret_key():
"""Generate secret key."""
import string
import random
rng = random.SystemRandom()
return ''.join(
rng.choice(string.ascii_letters + string.digits)
for dummy in range(0, 256)
)
def print_progress(p, L=40, prefix='', suffix=''):
"""Print textual progress bar."""
bricks = int(p * L)
print('\r{prefix} [{bricks}{spaces}] {progress}% {suffix}'.format(
prefix=prefix, suffix=suffix,
bricks='#' * bricks, spaces=' ' * (L - bricks),
progress=int(p * 100),
), end=' ')
def check_for_software_updates(flash_message=False):
"""Check for a new release of Invenio.
:return: True if you have latest version, else False if you need to upgrade
or None if server was not reachable.
"""
from invenio.config import CFG_VERSION
from invenio_base.i18n import _
try:
find = re.compile('Invenio v[0-9]+.[0-9]+.[0-9]+(\-rc[0-9])?'
' is released')
release_notes = 'https://raw.githubusercontent.com/' \
'inveniosoftware/invenio/master/RELEASE-NOTES'
webFile = urllib.request.urlopen(release_notes)
temp = ""
version = ""
version1 = ""
while True:
temp = webFile.readline()
match1 = find.match(temp)
try:
version = match1.group()
break
except Exception:
pass
if not temp:
break
webFile.close()
submatch = re.compile('[0-9]+.[0-9]+.[0-9]+(\-rc[0-9])?')
version1 = submatch.search(version)
web_version = version1.group().split(".")
local_version = CFG_VERSION.split(".")
if (web_version[0] > local_version[0] or
web_version[0] == local_version[0] and
web_version[1] > local_version[1] or
web_version[0] == local_version[0] and
web_version[1] == local_version[1] and
web_version[2] > local_version[2]):
if flash_message:
flash(_('A newer version of Invenio is available for '
'download. You may want to visit '
'<a href="%(wiki)s">%()s</a>',
wiki='<a href=\"http://invenio-software.org/wiki/'
'/Installation/Download'), 'warning(html_safe)')
return False
except Exception as e:
print(e)
if flash_message:
flash(_('Cannot download or parse release notes '
'from %(release_notes)s', release_notes=release_notes),
'error')
return None
return True
class Manager(FlaskExtManager):
"""Custom manager implementation with signaling support."""
def add_command(self, name, command):
"""Wrap default ``add_command`` method."""
sender = command.run if isinstance(command.run, FunctionType) \
else command.__class__
class SignalingCommand(command.__class__):
def __call__(self, *args, **kwargs):
app = self.app if not len(args) else args[0]
with app.test_request_context():
pre_command.send(sender, args=args, **kwargs)
res = super(SignalingCommand, self).__call__(*args, **kwargs)
with app.test_request_context():
post_command.send(sender, args=args, **kwargs)
return res
command.__class__ = SignalingCommand
return super(Manager, self).add_command(name, command)
def set_serve_static_files(sender, *args, **kwargs):
"""Enable serving of static files for `runserver` command.
Normally Apache serves static files, but during development and if you are
using the Werkzeug standalone development server, you can set this flag to
`True`, to enable static file serving.
"""
current_app.config.setdefault('CFG_FLASK_SERVE_STATIC_FILES', True)
pre_command.connect(set_serve_static_files, sender=Server)
def create_ssl_context(config):
"""Create :class:`ssl.SSLContext` from application config.
:param config: Dict-like application configuration.
:returns: A valid context or in case TLS is not enabled `None`.
The following configuration variables are processed:
============================ ==============================================
`SERVER_TLS_ENABLE` If `True`, a SSL context will be created. In
this case, the required configuration
variables must be provided.
`SERVER_TLS_KEY` (required) Filepath (string) of private key provided as
PEM file.
`SERVER_TLS_CERT` (required) Filepath (string) of your certificate plus
all intermediate certificate, concatenated in
that order and stored as PEM file.
`SERVER_TLS_KEYPASS` If private key is encrypted, a password can be
provided.
`SERVER_TLS_PROTOCOL` String that selects a protocol from
`ssl.PROTOCOL_*`. Defaults to `SSLv23`. See
:mod:`ssl` for details.
`SERVER_TLS_CIPHERS` String that selects possible ciphers according
to the `OpenSSL cipher list format
<https://www.openssl.org/docs/apps/
ciphers.html>`_
`SERVER_TLS_DHPARAMS` Filepath (string) to parameters for
Diffie-Helman key exchange. If not set the
built-in parameters are used.
`SERVER_TLS_ECDHCURVE` Curve (string) that should be used for
Elliptic Curve-based Diffie-Helman key
exchange. If not set, the defaults provided by
OpenSSL are used.
============================ ==============================================
.. note:: In case `None` is returned because of a non-enabling
configuration, TLS will be disabled. It is **not** possible to have a
TLS and non-TLS configuration at the same time. So if TLS is activated,
no non-TLS connection are accepted.
from invenio_utils.. important:: Keep in mind to change `CFG_SITE_URL` and
`CFG_SITE_SECURE_URL` according to your TLS configuration. This does
not only include the protocol (`http` vs `https`) but also the hostname
that has to match the common name in your certificate. If a wildcard
certificate is provided, the hostname stated in
`CFG_SITE[_SECURE]_URL` must match the wildcard pattern.
"""
ssl_context = None
if config.get('SERVER_TLS_ENABLE', False):
if 'SERVER_TLS_KEY' not in config \
or 'SERVER_TLS_CERT' not in config:
raise AttributeError(
'`SERVER_TLS_KEY` and `SERVER_TLS_CERT` required!'
)
# CLIENT_AUTH creates a server context, so do not get confused here
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
if 'SERVER_TLS_PROTOCOL' in config:
ssl_context.protocol = getattr(
ssl,
'PROTOCOL_{}'.format(config.get('SERVER_TLS_PROTOCOL'))
)
ssl_context.load_cert_chain(
certfile=config.get('SERVER_TLS_CERT'),
keyfile=config.get('SERVER_TLS_KEY'),
password=config.get('SERVER_TLS_KEYPASS', None)
)
if 'SERVER_TLS_CIPHERS' in config:
ssl_context.set_ciphers(
config.get('SERVER_TLS_CIPHERS')
)
if 'SERVER_TLS_DHPARAMS' in config:
ssl_context.load_dh_params(
config.get('SERVER_TLS_DHPARAMS')
)
if 'SERVER_TLS_ECDHCURVE' in config:
ssl_context.set_ecdh_curve(
config.get('SERVER_TLS_ECDHCURVE')
)
# that one seems to be required for werkzeug
ssl_context.check_hostname = False
return ssl_context
def register_manager(manager):
"""Register all manager plugins and default commands with the manager."""
from six.moves.urllib.parse import urlparse
managers = RegistryProxy('managers', ModuleAutoDiscoveryRegistry, 'manage')
def extract_name(name):
"""Guess manager name."""
parts = name.split('.')
if len(parts) == 2:
return parts[0].split('_')[-1]
return parts[-2]
with manager.app.app_context():
for script in find_modules('invenio_base.scripts'):
manager.add_command(script.split('.')[-1],
import_string(script + ':manager'))
for script in managers:
if script.__name__ == 'invenio_base.manage':
continue
manager.add_command(extract_name(script.__name__),
getattr(script, 'manager'))
manager.add_command("clean", Clean())
manager.add_command("show-urls", ShowUrls())
manager.add_command("shell", Shell())
parsed_url = urlparse(manager.app.config.get('CFG_SITE_URL'))
host = manager.app.config.get(
'SERVER_BIND_ADDRESS',
parsed_url.hostname or '127.0.0.1'
)
port = manager.app.config.get(
'SERVER_BIND_PORT',
parsed_url.port or 80
)
ssl_context = create_ssl_context(manager.app.config)
runserver = Server(host=host, port=port, ssl_context=ssl_context)
manager.add_command("runserver", runserver)
# FIXME separation of concerns is violated here.
from invenio_ext.collect import collect
collect.init_script(manager)
from invenio_ext.assets import command, bower
manager.add_command("assets", command)
manager.add_command("bower", bower)
|
SamiHiltunen/invenio-ext
|
invenio_ext/script/__init__.py
|
Python
|
gpl-2.0
| 12,586
|
[
"VisIt"
] |
05885c7d27cbc9444a5b265e4d416c3b7964f9ff4cb562ec5dc53a7041e72a8d
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Regcm(AutotoolsPackage):
"""RegCM, ICTP Regional Climate Model (https://ictp.it)."""
homepage = 'https://gforge.ictp.it/gf/project/regcm/'
version('4.7.0', sha256='456631c10dcb83d70e51c3babda2f7a1aa41ed9e60cb4209deb3764655267519',
url='https://gforge.ictp.it/gf/download/frsrelease/259/1845/RegCM-4.7.0.tar.gz')
variant('debug', default=False,
description='Build RegCM using debug options.')
variant('profile', default=False,
description='Build RegCM using profiling options.')
variant('singleprecision', default=False,
description='Build RegCM using single precision float type.')
# On Intel and PGI compilers, multiple archs can be built at the same time,
# producing a so-called fat binary. Unfortunately, gcc builds only the last
# architecture provided at the configure line.
# For this reason, we allow a single arch when using GCC (checks are
# performed below in the configure_args).
# Moreover, RegCM supports optimizations only for GCC and Intel compilers.
# To sum up:
# - intel: a user is able to build a single executables for all the
# combinations of architectures (e.g. `--extension=knl,skl,bdw,nhl`);
# - gcc: a user is allowed to build an executable using a single
# optimization/extension;
# - other compilers: no extensions/optimizations are supported.
#
# See also discussions: #974, #9934, #10797.
extensions = ('knl', 'skl', 'bdw', 'nhl')
variant('extension', values=any_combination_of(*extensions),
description='Build extensions for a specific architecture. Only '
'available for GCC and Intel compilers; moreover, '
'GCC builds only one architecture optimization.')
variant('pnetcdf', default=False,
description='Build NetCDF using the high performance parallel '
'NetCDF implementation.')
depends_on('netcdf-c')
depends_on('netcdf-fortran')
depends_on('hdf5')
depends_on('mpi')
depends_on('netcdf-c +parallel-netcdf', when='+pnetcdf')
intel_msg = ('Intel compiler not working with this specific version of '
'RegCM (generates a bug at runtime): please install a newer '
'version of RegCM or use a different compiler.')
conflicts('%intel', when='@4.7.0', msg=intel_msg)
# 'make' sometimes crashes when compiling with more than 10-12 cores.
# Moreover, parallel compile time is ~ 1m 30s, while serial is ~ 50s.
parallel = False
def flag_handler(self, name, flags):
if name == 'fflags' and self.compiler.fc.endswith('gfortran'):
flags.extend(['-Wall', '-Wextra', '-Warray-temporaries',
'-Wconversion', '-fimplicit-none', '-fbacktrace',
'-ffree-line-length-0', '-finit-real=nan',
'-ffpe-trap=zero,overflow,underflow', '-fcheck=all'])
elif name == 'ldlibs':
flags.extend(['-lnetcdff', '-lnetcdf'])
if self.compiler.fc.endswith('gfortran'):
flags.extend(['-lm', '-ldl'])
else:
flags.extend(['-lhdf5_hl', '-lhdf5', '-lz'])
return (None, None, flags)
def configure_args(self):
args = ['--enable-shared']
optimizations = self.spec.variants['extension'].value
first_optim = optimizations[0]
if first_optim != 'none':
if not (self.spec.satisfies(r'%gcc')
or self.spec.satisfies(r'%intel')):
# This means the user chose some optimizations on a different
# compiler from GCC and Intel, which are the only compiler
# supported by RegCM 4.7.x.
raise InstallError('Architecture optimizations are available '
'only for GCC and Intel compilers.')
if len(optimizations) > 1 and self.spec.satisfies(r'%gcc'):
# https://github.com/spack/spack/issues/974
raise InstallError('The GCC compiler does not support '
'multiple architecture optimizations.')
# RegCM configure script treats --disable-X as --enable-X, so we
# cannot use enable_or_disable; enable only the flags requested.
args += ('--enable-' + ext for ext in optimizations)
for opt in ('debug', 'profile', 'singleprecision'):
if ('+' + opt) in self.spec:
args.append('--enable-' + opt)
# RegCM SVN6916 introduced a specific flag to use some pnetcdf calls.
if '+pnetcdf' in self.spec and '@4.7.0-SVN6916:' in self.spec:
args.append('--enable-parallel-nc')
# RegCM doesn't listen to the FFLAGS variable, so we have to route
# flags to FCFLAGS.
fcflags = list(self.spec.compiler_flags['fflags'])
# RegCM complains when compiled with gfortran.
if self.compiler.fc.endswith('gfortran'):
fcflags.append('-fno-range-check')
args.append('FCFLAGS=' + ' '.join(fcflags))
# The configure needs a hint on the MPI Fortran compiler, otherwise it
# doesn't find it and tries to compile MPI Fortran code with the system
# Fortran non-MPI compiler.
args.append('MPIFC=' + self.spec['mpi'].mpifc)
return args
|
LLNL/spack
|
var/spack/repos/builtin/packages/regcm/package.py
|
Python
|
lgpl-2.1
| 5,626
|
[
"NetCDF"
] |
14671a08280fdc00eb2aef9ead352e499d1b9f164243f819ca63fa44f40f0c31
|
# Copyright 2010-2011 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Provides code to access the TogoWS integrated websevices of DBCLS, Japan.
This module aims to make the TogoWS (from DBCLS, Japan) easier to use. See:
http://togows.dbcls.jp/
The TogoWS REST service provides simple access to a range of databases, acting
as a proxy to shield you from all the different provider APIs. This works using
simple URLs (which this module will construct for you). For more details, see
http://togows.dbcls.jp/site/en/rest.html
The functionality is somewhat similar to Biopython's Bio.Entrez module which
provides access to the NCBI's Entrez Utilities (E-Utils) which also covers a
wide range of databases.
Currently TogoWS does not provide any usage guidelines (unlike the NCBI whose
requirements are reasonably clear). To avoid risking overloading the service,
Biopython will only allow three calls per second.
The TogoWS SOAP service offers a more complex API for calling web services
(essentially calling remote functions) provided by DDBJ, KEGG and PDBj. For
example, this allows you to run a remote BLAST search at the DDBJ. This is
not yet covered by this module, however there are lots of Python examples
on the TogoWS website using the SOAPpy python library. See:
http://togows.dbcls.jp/site/en/soap.html
http://soapy.sourceforge.net/
"""
import urllib
import urllib2
import time
from Bio._py3k import _binary_to_string_handle, _as_bytes
#Constant
_BASE_URL = "http://togows.dbcls.jp"
#Caches:
_search_db_names = None
_entry_db_names = None
_entry_db_fields = {}
_entry_db_formats = {}
_convert_formats = []
def _get_fields(url):
"""Queries a TogoWS URL for a plain text list of values (PRIVATE)."""
handle = _open(url)
fields = handle.read().strip().split()
handle.close()
return fields
def _get_entry_dbs():
return _get_fields(_BASE_URL + "/entry")
def _get_entry_fields(db):
return _get_fields(_BASE_URL + "/entry/%s?fields" % db)
def _get_entry_formats(db):
return _get_fields(_BASE_URL + "/entry/%s?formats" % db)
def _get_convert_formats():
return [pair.split(".") for pair in \
_get_fields(_BASE_URL + "/convert/")]
def entry(db, id, format=None, field=None):
"""TogoWS fetch entry (returns a handle).
db - database (string), see list below.
id - identier (string) or a list of identifiers (either as a list of
strings or a single string with comma separators).
format - return data file format (string), options depend on the database
e.g. "xml", "json", "gff", "fasta", "ttl" (RDF Turtle)
field - specific field from within the database record (string)
e.g. "au" or "authors" for pubmed.
At the time of writing, this includes the following:
KEGG: compound, drug, enzyme, genes, glycan, orthology, reaction,
module, pathway
DDBj: ddbj, dad, pdb
NCBI: nuccore, nucest, nucgss, nucleotide, protein, gene, onim,
homologue, snp, mesh, pubmed
EBI: embl, uniprot, uniparc, uniref100, uniref90, uniref50
For the current list, please see http://togows.dbcls.jp/entry/
This function is essentially equivalent to the NCBI Entrez service
EFetch, available in Biopython as Bio.Entrez.efetch(...), but that
does not offer field extraction.
"""
global _entry_db_names, _entry_db_fields, fetch_db_formats
if _entry_db_names is None:
_entry_db_names = _get_entry_dbs()
if db not in _entry_db_names:
raise ValueError("TogoWS entry fetch does not officially support "
"database '%s'." % db)
if field:
try:
fields = _entry_db_fields[db]
except KeyError:
fields = _get_entry_fields(db)
_entry_db_fields[db] = fields
if field not in fields:
raise ValueError("TogoWS entry fetch does not explicitly support "
"field '%s' for database '%s'. Only: %s" \
% (field, db, ", ".join(sorted(fields))))
if format:
try:
formats = _entry_db_formats[db]
except KeyError:
formats = _get_entry_formats(db)
_entry_db_formats[db] = formats
if format not in formats:
raise ValueError("TogoWS entry fetch does not explicitly support "
"format '%s' for database '%s'. Only: %s" \
% (format, db, ", ".join(sorted(formats))))
if isinstance(id, list):
id = ",".join(id)
url = _BASE_URL + "/entry/%s/%s" % (db, urllib.quote(id))
if field:
url += "/" + field
if format:
url += "." + format
return _open(url)
def search_count(db, query):
"""TogoWS search count (returns an integer).
db - database (string), see http://togows.dbcls.jp/search
query - search term (string)
You could then use the count to download a large set of search results in
batches using the offset and limit options to Bio.TogoWS.search(). In
general however the Bio.TogoWS.search_iter() function is simpler to use.
"""
global _search_db_names
if _search_db_names is None:
_search_db_names = _get_fields(_BASE_URL + "/search")
if db not in _search_db_names:
#TODO - Make this a ValueError? Right now despite the HTML website
#claiming to, the "gene" or "ncbi-gene" don't work and are not listed.
import warnings
warnings.warn("TogoWS search does not officially support database '%s'. "
"See %s/search/ for options." % (db, _BASE_URL))
handle = _open(_BASE_URL + "/search/%s/%s/count" \
% (db, urllib.quote(query)))
count = int(handle.read().strip())
handle.close()
return count
def search_iter(db, query, limit=None, batch=100):
"""TogoWS search iteratating over the results (generator function).
db - database (string), see http://togows.dbcls.jp/search
query - search term (string)
limit - optional upper bound on number of search results
batch - number of search results to pull back each time talk to
TogoWS (currently limited to 100).
You would use this function within a for loop, e.g.
>>> for id in search_iter("pubmed", "lung+cancer+drug", limit=10):
... print id #maybe fetch data with entry?
Internally this first calls the Bio.TogoWS.search_count() and then
uses Bio.TogoWS.search() to get the results in batches.
"""
count = search_count(db, query)
if not count:
raise StopIteration
#NOTE - We leave it to TogoWS to enforce any upper bound on each
#batch, they currently return an HTTP 400 Bad Request if above 100.
remain = count
if limit is not None:
remain = min(remain, limit)
offset = 1 #They don't use zero based counting
prev_ids = [] #Just cache the last batch for error checking
while remain:
batch = min(batch, remain)
#print "%r left, asking for %r" % (remain, batch)
ids = search(db, query, offset, batch).read().strip().split()
assert len(ids)==batch, "Got %i, expected %i" % (len(ids), batch)
#print "offset %i, %s ... %s" % (offset, ids[0], ids[-1])
if ids == prev_ids:
raise RuntimeError("Same search results for previous offset")
for identifier in ids:
if identifier in prev_ids:
raise RuntimeError("Result %s was in previous batch" \
% identifier)
yield identifier
offset += batch
remain -= batch
prev_ids = ids
def search(db, query, offset=None, limit=None, format=None):
"""TogoWS search (returns a handle).
This is a low level wrapper for the TogoWS search function, which
can return results in a several formats. In general, the search_iter
function is more suitable for end users.
db - database (string), see http://togows.dbcls.jp/search/
query - search term (string)
offset, limit - optional integers specifying which result to start from
(1 based) and the number of results to return.
format - return data file format (string), e.g. "json", "ttl" (RDF)
By default plain text is returned, one result per line.
At the time of writing, TogoWS applies a default count limit of 100
search results, and this is an upper bound. To access more results,
use the offset argument or the search_iter(...) function.
TogoWS supports a long list of databases, including many from the NCBI
(e.g. "ncbi-pubmed" or "pubmed", "ncbi-genbank" or "genbank", and
"ncbi-taxonomy"), EBI (e.g. "ebi-ebml" or "embl", "ebi-uniprot" or
"uniprot, "ebi-go"), and KEGG (e.g. "kegg-compound" or "compound").
For the current list, see http://togows.dbcls.jp/search/
The NCBI provide the Entrez Search service (ESearch) which is similar,
available in Biopython as the Bio.Entrez.esearch() function.
See also the function Bio.TogoWS.search_count() which returns the number
of matches found, and the Bio.TogoWS.search_iter() function which allows
you to iterate over the search results (taking care of batching for you).
"""
global _search_db_names
if _search_db_names is None:
_search_db_names = _get_fields(_BASE_URL + "/search")
if db not in _search_db_names:
#TODO - Make this a ValueError? Right now despite the HTML website
#claiming to, the "gene" or "ncbi-gene" don't work and are not listed.
import warnings
warnings.warn("TogoWS search does not explicitly support database '%s'. "
"See %s/search/ for options." % (db, _BASE_URL))
url = _BASE_URL + "/search/%s/%s" % (db, urllib.quote(query))
if offset is not None and limit is not None:
try:
offset = int(offset)
except:
raise ValueError("Offset should be an integer (at least one), not %r" % offset)
try:
limit = int(limit)
except:
raise ValueError("Limit should be an integer (at least one), not %r" % limit)
if offset <= 0:
raise ValueError("Offset should be at least one, not %i" % offset)
if limit <= 0:
raise ValueError("Count should be at least one, not %i" % limit)
url += "/%i,%i" % (offset, limit)
elif offset is not None or limit is not None:
raise ValueError("Expect BOTH offset AND limit to be provided (or neither)")
if format:
url += "." + format
#print url
return _open(url)
def convert(data, in_format, out_format):
"""TogoWS convert (returns a handle).
data - string or handle containing input record(s)
in_format - string describing the input file format (e.g. "genbank")
out_format - string describing the requested output format (e.g. "fasta")
For a list of supported conversions (e.g. "genbank" to "fasta"), see
http://togows.dbcls.jp/convert/
Note that Biopython has built in support for conversion of sequence and
alignnent file formats (functions Bio.SeqIO.convert and Bio.AlignIO.convert)
"""
global _convert_formats
if not _convert_formats:
_convert_formats = _get_convert_formats()
if [in_format, out_format] not in _convert_formats:
msg = "\n".join("%s -> %s" % tuple(pair) for pair in _convert_formats)
raise ValueError("Unsupported conversion. Choose from:\n%s" % msg)
url = _BASE_URL + "/convert/%s.%s" % (in_format, out_format)
#TODO - Should we just accept a string not a handle? What about a filename?
if hasattr(data, "read"):
#Handle
return _open(url, post={"data":data.read()})
else:
#String
return _open(url, post={"data":data})
def _open(url, post=None):
"""Helper function to build the URL and open a handle to it (PRIVATE).
Open a handle to TogoWS, will raise an IOError if it encounters an error.
In the absense of clear guidelines, this function enforces a limit of
"up to three queries per second" to avoid abusing the TogoWS servers.
"""
delay = 0.333333333 #one third of a second
current = time.time()
wait = _open.previous + delay - current
if wait > 0:
time.sleep(wait)
_open.previous = current + wait
else:
_open.previous = current
#print url
try:
if post:
handle = urllib2.urlopen(url, _as_bytes(urllib.urlencode(post)))
else:
handle = urllib2.urlopen(url)
except urllib2.HTTPError, exception:
raise exception
#We now trust TogoWS to have set an HTTP error code, that
#suffices for my current unit tests. Previously we would
#examine the start of the data returned back.
return _binary_to_string_handle(handle)
_open.previous = 0
|
bryback/quickseq
|
genescript/Bio/TogoWS/__init__.py
|
Python
|
mit
| 13,050
|
[
"BLAST",
"Biopython"
] |
4dfda1d05be2392993b07c463f0abbbc643211c80f4698e7fa59a8e1b16db0c0
|
from requests.auth import HTTPBasicAuth
from bs4 import BeautifulSoup
from . import nltk_utils
from time import time
import threading
import requests
import urllib2
import urllib
import json
bing_api = 'https://api.datamarket.azure.com/Bing/SearchWeb/v1/Web?$format=json&Query='
bing_key = 'IgVbvvtgQVYI7Yfu9hPgVx0Tmbih1gq5lFOXaIQH4f8'
user_agent = 'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus Build/IMM76B) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.133 Mobile Safari/535.19'
def search(q, q_url):
search_url = bing_api + urllib.quote(q)
print 'Search Url: %s\n' % search_url
try:
response = requests.get(search_url, auth=HTTPBasicAuth(bing_key, bing_key))
results = response.json()['d']['results']
urls = []
for r in results:
if r['Url'] != q_url:
urls.append(r['Url'])
if len(urls) >= 20:
return urls[:20]
else:
return urls
except Exception as e:
print e
#print response.text
traceback.print_exc(file=sys.stdout)
class URLThread(threading.Thread):
def __init__(self, _id, url, texts, *args, **kwargs):
threading.Thread.__init__(self)
self._id = _id
self.url = url
self.texts = texts
def _get_art(self, url):
#print 'requesting: {}'.format(url)
req = urllib2.Request(url, headers={'User-Agent': user_agent})
response = urllib2.urlopen(req, timeout=10)
html = response.read()
soup = BeautifulSoup(html, 'html5lib')
[s.extract() for s in soup(['script', 'a', 'rel', 'style', 'img', 'link', 'style'])]
text = soup.get_text()
text = nltk_utils.preprocess_text(text)
return text
def run(self):
txt = self._get_art(self.url)
self.texts[self._id] = txt
def get_articles(urls):
corpus = [''] * len(urls)
t0 = time()
threads = []
no_threads = 10
print 'url len: {}'.format(len(urls))
for i in range(len(urls)):
t = URLThread(i, urls[i], corpus)
threads.append(t)
for j in range(len(threads) / no_threads):
offset = no_threads * j
for t in threads[offset:offset +no_threads]:
t.start()
for t in threads[offset:offset + no_threads]:
t.join()
t1 = time()
print 'Time fetching urls: {}'.format(t1 - t0)
return corpus
|
xirdneh/liveqa-trec-2016
|
liveqa/websearch.py
|
Python
|
mit
| 2,430
|
[
"Galaxy"
] |
6fe1d7d2cea72157a317bdcea8fed0c8d721a73d02d6f46c6379ff491e2c43d8
|
## Copyright (C) 2011 Stellenbosch University
##
## This file is part of SUCEM.
##
## SUCEM is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## SUCEM is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with SUCEM. If not, see <http://www.gnu.org/licenses/>.
##
## Contact: cemagga@gmail.com
""" Module for converting various mesh formats."""
# Copyright (C) 2006 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Garth N. Wells (gmsh function)
# Modified by Alexander H. Jarosch (gmsh fix)
# Modified by Angelo Simone (Gmsh and Medit fix)
# Modified by Andy R. Terrel (gmsh fix and triangle function)
# Modified by Magnus Vikstrom (metis and scotch function)
# Modified by Bartosz Sawicki (diffpack function)
# Modified by Gideon Simpson (Exodus II function)
# Modified by Arve Knudsen (make into module, abaqus support)
# Modified by Kent-Andre Mardal (Star-CD function)
# Modified by Nuno Lopes (fix for emc2 mesh format (medit version 0))
# Modified by Neilen Marais (add gmsh support for reading physical region)
# NOTE: This module does not depend on (py)dolfin beeing installed.
# NOTE: If future additions need that please import dolfin in a try: except:
# NOTE: clause and tell the user to install dolfin if it is not installed.
import getopt
import sys
from dolfin_utils.commands import getstatusoutput
import re
import warnings
import os.path
def format_from_suffix(suffix):
"Return format for given suffix"
if suffix == "xml":
return "xml"
elif suffix == "mesh":
return "mesh"
elif suffix == "gmsh":
return "gmsh"
elif suffix == "msh":
return "gmsh"
elif suffix == "gra":
return "metis"
elif suffix == "grf":
return "scotch"
elif suffix == "grid":
return "diffpack"
elif suffix == "inp":
return "abaqus"
elif suffix == "ncdf":
return "NetCDF"
elif suffix =="exo":
return "ExodusII"
elif suffix =="e":
return "ExodusII"
elif suffix == "vrt" or suffix == "cel":
return "StarCD"
elif suffix == "ele" or suffix == "node":
return "Triangle"
else:
_error("Sorry, unknown suffix %s." % suffix)
def mesh2xml(ifilename, ofilename):
"""Convert between .mesh and .xml, parser implemented as a
state machine:
0 = read 'Dimension'
1 = read dimension
2 = read 'Vertices'
3 = read number of vertices
4 = read next vertex
5 = read 'Triangles' or 'Tetrahedra'
6 = read number of cells
7 = read next cell
8 = done
"""
print "Converting from Medit format (.mesh) to DOLFIN XML format"
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Scan file for cell type
cell_type = None
dim = 0
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Remove newline
if line[-1] == "\n":
line = line[:-1]
# Read dimension
if line == "Dimension" or line == " Dimension":
line = ifile.readline()
num_dims = int(line)
if num_dims == 2:
cell_type = "triangle"
dim = 2
elif num_dims == 3:
cell_type = "tetrahedron"
dim = 3
break
# Check that we got the cell type
if cell_type == None:
_error("Unable to find cell type.")
# Step to beginning of file
ifile.seek(0)
# Write header
write_header_mesh(ofile, cell_type, dim)
# Current state
state = 0
# Write data
num_vertices_read = 0
num_cells_read = 0
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Skip comments
if line[0] == '#':
continue
# Remove newline
if line[-1] == "\n":
line = line[:-1]
if state == 0:
if line == "Dimension" or line == " Dimension":
state += 1
elif state == 1:
num_dims = int(line)
state +=1
elif state == 2:
if line == "Vertices" or line == " Vertices":
state += 1
elif state == 3:
num_vertices = int(line)
write_header_vertices(ofile, num_vertices)
state +=1
elif state == 4:
if num_dims == 2:
(x, y, tmp) = line.split()
x = float(x)
y = float(y)
z = 0.0
elif num_dims == 3:
(x, y, z, tmp) = line.split()
x = float(x)
y = float(y)
z = float(z)
write_vertex(ofile, num_vertices_read, x, y, z)
num_vertices_read +=1
if num_vertices == num_vertices_read:
write_footer_vertices(ofile)
state += 1
elif state == 5:
if (line == "Triangles" or line == " Triangles") and num_dims == 2:
state += 1
if line == "Tetrahedra" and num_dims == 3:
state += 1
elif state == 6:
num_cells = int(line)
write_header_cells(ofile, num_cells)
state +=1
elif state == 7:
if num_dims == 2:
(n0, n1, n2, tmp) = line.split()
n0 = int(n0) - 1
n1 = int(n1) - 1
n2 = int(n2) - 1
write_cell_triangle(ofile, num_cells_read, n0, n1, n2)
elif num_dims == 3:
(n0, n1, n2, n3, tmp) = line.split()
n0 = int(n0) - 1
n1 = int(n1) - 1
n2 = int(n2) - 1
n3 = int(n3) - 1
write_cell_tetrahedron(ofile, num_cells_read, n0, n1, n2, n3)
num_cells_read +=1
if num_cells == num_cells_read:
write_footer_cells(ofile)
state += 1
elif state == 8:
break
# Check that we got all data
if state == 8:
print "Conversion done"
else:
_error("Missing data, unable to convert")
# Write footer
write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
def gmsh2xml(ifilename, handler):
"""Convert between .gmsh v2.0 format (http://www.geuz.org/gmsh/) and .xml,
parser implemented as a state machine:
0 = read 'MeshFormat'
1 = read mesh format data
2 = read 'EndMeshFormat'
3 = read 'Nodes'
4 = read number of vertices
5 = read vertices
6 = read 'EndNodes'
7 = read 'Elements'
8 = read number of cells
9 = read cells
10 = done
Afterwards, extract physical region numbers if they are defined in
the mesh file as a mesh function.
"""
print "Converting from Gmsh format (.msh, .gmsh) to DOLFIN XML format"
# Open files
ifile = open(ifilename, "r")
# Scan file for cell type
cell_type = None
dim = 0
line = ifile.readline()
while line:
# Remove newline
if line[-1] == "\n":
line = line[:-1]
# Read dimension
if line.find("$Elements") == 0:
line = ifile.readline()
num_cells = int(line)
num_cells_counted = 0
if num_cells == 0:
_error("No cells found in gmsh file.")
line = ifile.readline()
# Now iterate through elements to find largest dimension. Gmsh
# format might include elements of lower dimensions in the element list.
# We also need to count number of elements of correct dimensions.
# Also determine which vertices are not used.
dim_2_count = 0
dim_3_count = 0
vertices_2_used = []
# Array used to store gmsh tags for 2D (type 2/triangular) elements
tags_2 = []
# Array used to store gmsh tags for 3D (type 4/tet) elements
tags_3 = []
vertices_3_used = []
while line.find("$EndElements") == -1:
element = line.split()
elem_type = int(element[1])
num_tags = int(element[2])
if elem_type == 2:
if dim < 2:
cell_type = "triangle"
dim = 2
node_num_list = [int(node) for node in element[3 + num_tags:]]
vertices_2_used.extend(node_num_list)
if num_tags > 0:
tags_2.append(tuple(int(tag) for tag in element[3:3+num_tags]))
dim_2_count += 1
elif elem_type == 4:
if dim < 3:
cell_type = "tetrahedron"
dim = 3
vertices_2_used = None
node_num_list = [int(node) for node in element[3 + num_tags:]]
vertices_3_used.extend(node_num_list)
if num_tags > 0:
tags_3.append(tuple(int(tag) for tag in element[3:3+num_tags]))
dim_3_count += 1
line = ifile.readline()
else:
# Read next line
line = ifile.readline()
# Check that we got the cell type and set num_cells_counted
if cell_type == None:
_error("Unable to find cell type.")
if dim == 3:
num_cells_counted = dim_3_count
vertex_set = set(vertices_3_used)
vertices_3_used = None
elif dim == 2:
num_cells_counted = dim_2_count
vertex_set = set(vertices_2_used)
vertices_2_used = None
vertex_dict = {}
for n,v in enumerate(vertex_set):
vertex_dict[v] = n
# Step to beginning of file
ifile.seek(0)
# Set mesh type
handler.set_mesh_type(cell_type, dim)
# Initialise node list (gmsh does not export all vertexes in order)
nodelist = {}
# Current state
state = 0
# Write data
num_vertices_read = 0
num_cells_read = 0
while state != 10:
# Read next line
line = ifile.readline()
if not line: break
# Skip comments
if line[0] == '#':
continue
# Remove newline
if line[-1] == "\n":
line = line[:-1]
if state == 0:
if line == "$MeshFormat":
state = 1
elif state == 1:
(version, file_type, data_size) = line.split()
state = 2
elif state == 2:
if line == "$EndMeshFormat":
state = 3
elif state == 3:
if line == "$Nodes":
state = 4
elif state == 4:
num_vertices = len(vertex_dict)
handler.start_vertices(num_vertices)
state = 5
elif state == 5:
(node_no, x, y, z) = line.split()
node_no = int(node_no)
x,y,z = [float(xx) for xx in (x,y,z)]
if vertex_dict.has_key(node_no):
node_no = vertex_dict[node_no]
else:
continue
nodelist[int(node_no)] = num_vertices_read
handler.add_vertex(num_vertices_read, [x, y, z])
num_vertices_read +=1
if num_vertices == num_vertices_read:
handler.end_vertices()
state = 6
elif state == 6:
if line == "$EndNodes":
state = 7
elif state == 7:
if line == "$Elements":
state = 8
elif state == 8:
handler.start_cells(num_cells_counted)
state = 9
elif state == 9:
element = line.split()
elem_type = int(element[1])
num_tags = int(element[2])
if elem_type == 2 and dim == 2:
node_num_list = [vertex_dict[int(node)] for node in element[3 + num_tags:]]
for node in node_num_list:
if not node in nodelist:
_error("Vertex %d of triangle %d not previously defined." %
(node, num_cells_read))
cell_nodes = [nodelist[n] for n in node_num_list]
handler.add_cell(num_cells_read, cell_nodes)
num_cells_read +=1
elif elem_type == 4 and dim == 3:
node_num_list = [vertex_dict[int(node)] for node in element[3 + num_tags:]]
for node in node_num_list:
if not node in nodelist:
_error("Vertex %d of tetrahedron %d not previously defined." %
(node, num_cells_read))
# import pdb ; pdb.set_trace()
cell_nodes = [nodelist[n] for n in node_num_list]
handler.add_cell(num_cells_read, cell_nodes)
num_cells_read +=1
if num_cells_counted == num_cells_read:
handler.end_cells()
state = 10
elif state == 10:
break
# Write mesh function based on the Physical Regions defined by
# gmsh, but only if they are not all zero. All zero physical
# regions indicate that no physical regions were defined.
if dim == 2:
tags = tags_2
elif dim == 3:
tags = tags_3
else:
_error("Gmsh tags not supported for dimension %i. Probably a bug" % dim)
physical_regions = tuple(tag[0] for tag in tags)
if not all(tag == 0 for tag in tags):
handler.start_meshfunction("physical_region", dim, num_cells)
for i, physical_region in enumerate(physical_regions):
handler.add_entity_meshfunction(i, physical_region)
handler.end_meshfunction()
# Check that we got all data
if state == 10:
print "Conversion done"
else:
_error("Missing data, unable to convert \n\ Did you use version 2.0 of the gmsh file format?")
# Close files
ifile.close()
def triangle2xml(ifilename, ofilename):
"""Convert between triangle format (http://www.cs.cmu.edu/~quake/triangle.html) and .xml. The
given ifilename should be the prefix for the corresponding .node, and .ele files.
"""
def get_next_line (fp):
"""Helper function for skipping comments and blank lines"""
line = fp.readline()
if line == '':
_error("Hit end of file prematurely.")
line = line.strip()
if not (line.startswith('#') or line == ''):
return line
return get_next_line(fp)
print "Converting from Triangle format {.node, .ele} to DOLFIN XML format"
# Open files
for suffix in [".node", ".ele"]:
if suffix in ifilename and ifilename[-len(suffix):] == suffix:
ifilename = ifilename.replace(suffix, "")
node_file = open(ifilename+".node", "r")
ele_file = open(ifilename+".ele", "r")
ofile = open(ofilename, "w")
# Read all the nodes
nodes = {}
num_nodes, dim, attr, bound = map(int, get_next_line(node_file).split())
while len(nodes) < num_nodes:
node, x, y = get_next_line(node_file).split()[:3]
nodes[int(node)] = (float(x), float(y))
# Read all the triangles
tris = {}
num_tris, n_per_tri, attrs = map(int, get_next_line(ele_file).split())
while len(tris) < num_tris:
tri, n1, n2, n3 = map(int, get_next_line(ele_file).split()[:4])
tris[tri] = (n1, n2, n3)
# Write everything out
write_header_mesh(ofile, "triangle", 2)
write_header_vertices(ofile, num_nodes)
node_off = 0 if nodes.has_key(0) else -1
for node, node_t in nodes.iteritems():
write_vertex(ofile, node+node_off, node_t[0], node_t[1], 0.0)
write_footer_vertices(ofile)
write_header_cells(ofile, num_tris)
tri_off = 0 if tris.has_key(0) else -1
for tri, tri_t in tris.iteritems():
write_cell_triangle(ofile, tri+tri_off, tri_t[0] + node_off,
tri_t[1] + node_off, tri_t[2] + node_off)
write_footer_cells(ofile)
write_footer_mesh(ofile)
# Close files
node_file.close()
ele_file.close()
ofile.close()
def xml_old2xml(ifilename, ofilename):
"Convert from old DOLFIN XML format to new."
print "Converting from old (pre DOLFIN 0.6.2) to new DOLFIN XML format..."
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Scan file for cell type (assuming there is just one)
cell_type = None
dim = 0
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Read dimension
if "<triangle" in line:
cell_type = "triangle"
dim = 2
break
elif "<tetrahedron" in line:
cell_type = "tetrahedron"
dim = 3
break
# Step to beginning of file
ifile.seek(0)
# Read lines and make changes
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Modify line
if "xmlns" in line:
line = "<dolfin xmlns:dolfin=\"http://www.fenicsproject.org\">\n"
if "<mesh>" in line:
line = " <mesh celltype=\"%s\" dim=\"%d\">\n" % (cell_type, dim)
if dim == 2 and " z=\"0.0\"" in line:
line = line.replace(" z=\"0.0\"", "")
if " name=" in line:
line = line.replace(" name=", " index=")
if " name =" in line:
line = line.replace(" name =", " index=")
if "n0" in line:
line = line.replace("n0", "v0")
if "n1" in line:
line = line.replace("n1", "v1")
if "n2" in line:
line = line.replace("n2", "v2")
if "n3" in line:
line = line.replace("n3", "v3")
# Write line
ofile.write(line)
# Close files
ifile.close();
ofile.close();
print "Conversion done"
def metis_graph2graph_xml(ifilename, ofilename):
"Convert from Metis graph format to DOLFIN Graph XML."
print "Converting from Metis graph format to DOLFIN Graph XML."
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Read number of vertices and edges
line = ifile.readline()
if not line:
_error("Empty file")
(num_vertices, num_edges) = line.split()
write_header_graph(ofile, "directed")
write_header_vertices(ofile, int(num_vertices))
for i in range(int(num_vertices)):
line = ifile.readline()
edges = line.split()
write_graph_vertex(ofile, i, len(edges))
write_footer_vertices(ofile)
write_header_edges(ofile, 2*int(num_edges))
# Step to beginning of file and skip header info
ifile.seek(0)
ifile.readline()
for i in range(int(num_vertices)):
print "vertex %g", i
line = ifile.readline()
edges = line.split()
for e in edges:
write_graph_edge(ofile, i, int(e))
write_footer_edges(ofile)
write_footer_graph(ofile)
# Close files
ifile.close();
ofile.close();
def scotch_graph2graph_xml(ifilename, ofilename):
"Convert from Scotch graph format to DOLFIN Graph XML."
print "Converting from Scotch graph format to DOLFIN Graph XML."
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Skip graph file version number
ifile.readline()
# Read number of vertices and edges
line = ifile.readline()
if not line:
_error("Empty file")
(num_vertices, num_edges) = line.split()
# Read start index and numeric flag
# Start index is 0 or 1 (C/Fortran)
# Numeric flag is 3 bits where bit 1 enables vertex labels
# bit 2 enables edge weights and bit 3 enables vertex weights
line = ifile.readline()
(start_index, numeric_flag) = line.split()
# Handling not implented
if not numeric_flag == "000":
_error("Handling of scotch vertex labels, edge- and vertex weights not implemented")
write_header_graph(ofile, "undirected")
write_header_vertices(ofile, int(num_vertices))
# Read vertices and edges, first number gives number of edges from this vertex (not used)
for i in range(int(num_vertices)):
line = ifile.readline()
edges = line.split()
write_graph_vertex(ofile, i, len(edges)-1)
write_footer_vertices(ofile)
write_header_edges(ofile, int(num_edges))
# Step to beginning of file and skip header info
ifile.seek(0)
ifile.readline()
ifile.readline()
ifile.readline()
for i in range(int(num_vertices)):
line = ifile.readline()
edges = line.split()
for j in range(1, len(edges)):
write_graph_edge(ofile, i, int(edges[j]))
write_footer_edges(ofile)
write_footer_graph(ofile)
# Close files
ifile.close();
ofile.close();
def write_header_meshfunction(ofile, dimensions, size):
header = """<?xml version="1.0" encoding="UTF-8"?>
<dolfin xmlns:dolfin="http://www.fenics.org/dolfin/">
<meshfunction type="uint" dim="%d" size="%d">
""" % (dimensions, size)
ofile.write(header)
def write_entity_meshfunction(ofile, index, value):
ofile.write(""" <entity index=\"%d\" value=\"%d\"/>
""" % (index, value))
def write_footer_meshfunction(ofile):
ofile.write(""" </meshfunction>
</dolfin>""")
def diffpack2xml(ifilename, ofilename):
"Convert from Diffpack tetrahedral grid format to DOLFIN XML."
print diffpack2xml.__doc__
# Format strings for MeshFunction XML files
meshfunction_header = """\
<?xml version="1.0" encoding="UTF-8"?>\n
<dolfin xmlns:dolfin="http://www.fenics.org/dolfin/">
<meshfunction type="uint" dim="%d" size="%d">\n"""
meshfunction_entity = " <entity index=\"%d\" value=\"%d\"/>\n"
meshfunction_footer = " </meshfunction>\n</dolfin>"
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
ofile_mat = open(ofilename.split(".")[0]+"_mat.xml", "w")
ofile_bi = open(ofilename.split(".")[0]+"_bi.xml", "w")
# Read and analyze header
while 1:
line = ifile.readline()
if not line:
_error("Empty file")
if line[0] == "#":
break
if re.search(r"Number of elements", line):
num_cells = int(re.match(r".*\s(\d+).*", line).group(1))
if re.search(r"Number of nodes", line):
num_vertices = int(re.match(r".*\s(\d+).*", line).group(1))
write_header_mesh(ofile, "tetrahedron", 3)
write_header_vertices(ofile, num_vertices)
ofile_bi.write(meshfunction_header % (0, num_vertices))
ofile_mat.write(meshfunction_header % (3, num_cells))
# Read & write vertices
# Note that only first boundary indicator is rewriten into XML
for i in range(num_vertices):
line = ifile.readline()
m = re.match(r"^.*\(\s*(.*)\s*\).*\](.*)$", line)
x = re.split("[\s,]+", m.group(1))
write_vertex(ofile, i, x[0], x[1], x[2])
tmp = m.group(2).split()
if len(tmp) > 0:
bi = int(tmp[0])
else:
bi = 0
ofile_bi.write(meshfunction_entity % (i, bi))
write_footer_vertices(ofile)
write_header_cells(ofile, num_cells)
# Ignore comment lines
while 1:
line = ifile.readline()
if not line:
_error("Empty file")
if line[0] == "#":
break
# Read & write cells
for i in range(int(num_cells)):
line = ifile.readline()
v = line.split();
if v[1] != "ElmT4n3D":
_error("Only tetrahedral elements (ElmT4n3D) are implemented.")
write_cell_tetrahedron(ofile, i, int(v[3])-1, int(v[4])-1, int(v[5])-1, int(v[6])-1)
ofile_mat.write(meshfunction_entity % (i, int(v[2])))
write_footer_cells(ofile)
write_footer_mesh(ofile)
ofile_bi.write(meshfunction_footer)
ofile_mat.write(meshfunction_footer)
# Close files
ifile.close()
ofile.close()
ofile_mat.close()
ofile_bi.close()
class ParseError(Exception):
""" Error encountered in source file.
"""
class DataHandler(object):
""" Baseclass for handlers of mesh data.
The actual handling of mesh data encountered in the source file is
delegated to a polymorfic object. Typically, the delegate will write the
data to XML.
@ivar _state: the state which the handler is in, one of State_*.
@ivar _cell_type: cell type in mesh. One of CellType_*.
@ivar _dim: mesh dimensions.
"""
State_Invalid, State_Init, State_Vertices, State_Cells, State_MeshFunction = range(5)
CellType_Tetrahedron, CellType_Triangle = range(2)
def __init__(self):
self._state = self.State_Invalid
def set_mesh_type(self, cell_type, dim):
assert self._state == self.State_Invalid
self._state = self.State_Init
if cell_type == "tetrahedron":
self._cell_type = self.CellType_Tetrahedron
elif cell_type == "triangle":
self._cell_type = self.CellType_Triangle
self._dim = dim
def start_vertices(self, num_vertices):
assert self._state == self.State_Init
self._state = self.State_Vertices
def add_vertex(self, vertex, coords):
assert self._state == self.State_Vertices
def end_vertices(self):
assert self._state == self.State_Vertices
self._state = self.State_Init
def start_cells(self, num_cells):
assert self._state == self.State_Init
self._state = self.State_Cells
def add_cell(self, cell, nodes):
assert self._state == self.State_Cells
def end_cells(self):
assert self._state == self.State_Cells
self._state = self.State_Init
def start_meshfunction(self, name, dim, size):
assert self._state == self.State_Init
self._state = self.State_MeshFunction
def add_entity_meshfunction(self, index, value):
assert self._state == self.State_MeshFunction
def end_meshfunction(self):
assert self._state == self.State_MeshFunction
self._state = self.State_Init
def warn(self, msg):
""" Issue warning during parse.
"""
warnings.warn(msg)
def error(self, msg):
""" Raise error during parse.
This method is expected to raise ParseError.
"""
raise ParseError(msg)
def close(self):
self._state = self.State_Invalid
class XmlHandler(DataHandler):
""" Data handler class which writes to Dolfin XML.
"""
def __init__(self, ofilename):
DataHandler.__init__(self)
self._ofilename = ofilename
self.__ofile = file(ofilename, "wb")
self.__ofile_meshfunc = None
def set_mesh_type(self, cell_type, dim):
DataHandler.set_mesh_type(self, cell_type, dim)
write_header_mesh(self.__ofile, cell_type, dim)
def start_vertices(self, num_vertices):
DataHandler.start_vertices(self, num_vertices)
write_header_vertices(self.__ofile, num_vertices)
def add_vertex(self, vertex, coords):
DataHandler.add_vertex(self, vertex, coords)
write_vertex(self.__ofile, vertex, *coords)
def end_vertices(self):
DataHandler.end_vertices(self)
write_footer_vertices(self.__ofile)
def start_cells(self, num_cells):
DataHandler.start_cells(self, num_cells)
write_header_cells(self.__ofile, num_cells)
def add_cell(self, cell, nodes):
DataHandler.add_cell(self, cell, nodes)
if self._cell_type == self.CellType_Tetrahedron:
func = write_cell_tetrahedron
elif self._cell_type == self.CellType_Triangle:
func = write_cell_triangle
func(self.__ofile, cell, *nodes)
def end_cells(self):
DataHandler.end_cells(self)
write_footer_cells(self.__ofile)
def start_meshfunction(self, name, dim, size):
DataHandler.start_meshfunction(self, name, dim, size)
fname = os.path.splitext(self.__ofile.name)[0]
self.__ofile_meshfunc = file("%s_%s.xml" % (fname, name), "wb")
write_header_meshfunction(self.__ofile_meshfunc, dim, size)
def add_entity_meshfunction(self, index, value):
DataHandler.add_entity_meshfunction(self, index, value)
write_entity_meshfunction(self.__ofile_meshfunc, index, value)
def end_meshfunction(self):
DataHandler.end_meshfunction(self)
write_footer_meshfunction(self.__ofile_meshfunc)
self.__ofile_meshfunc.close()
self.__ofile_meshfunc = None
def close(self):
DataHandler.close(self)
if self.__ofile.closed:
return
write_footer_mesh(self.__ofile)
self.__ofile.close()
if self.__ofile_meshfunc is not None:
self.__ofile_meshfunc.close()
def _abaqus(ifilename, handler):
""" Convert from Abaqus.
The Abaqus format first defines a node block, then there should be a number
of elements containing these nodes.
"""
params = False
ifile = file(ifilename, "rb")
handler.set_mesh_type("tetrahedron", 3)
# Read definitions
def read_params(params_spec, pnames, lineno):
params = {}
for p in params_spec:
m = re.match(r"(.+)=(.+)", p)
if m is not None:
pname, val = m.groups()
else:
handler.warn("Invalid parameter syntax on line %d: %s" % (lineno, p))
continue
for pn in pnames:
if pn == pname:
params[pn] = val
break
return params
nodes = {}
elems = {}
eid2elset = {}
material2elsetids = {}
materials = []
re_sect = re.compile(r"\*([^,]+)(?:,(.*))?")
re_node = re.compile(r"(\d+),\s*(.+),\s*(.+),\s*(.+)")
re_tetra = re.compile(r"(\d+),\s*(\d+),\s*(\d+),\s*(\d+),\s*(\d+)")
sect = None
for lineno, l in enumerate(ifile):
l = l.strip().lower()
m = re_sect.match(l)
if m is not None:
sect, params_str = m.groups()
params_spec = ([s.strip() for s in params_str.split(",")] if params_str
else [])
if sect == "element":
pnames = ("type", "elset")
params = read_params(params_spec, pnames, lineno)
if "type" not in params:
handler.error("Element on line %d doesn't declare TYPE" %
(lineno,))
tp, elset = params["type"], params.get("elset")
if tp not in ("c3d4", "dc3d4"):
handler.warn("Unsupported element type '%s' on line %d" % (tp, lineno))
supported_elem = False
else:
supported_elem = True
elif sect == "solid section":
pnames = ("material", "elset")
params = read_params(params_spec, pnames, lineno)
for pname in pnames:
if pname not in params:
handler.error("Solid section on line %d doesn't "
"declare %s" % (lineno, pname.upper()))
matname = params["material"]
material2elsetids.setdefault(matname, []).append(params["elset"])
elif sect == "material":
name = read_params(params_spec, ["name"], lineno)["name"]
materials.append(name)
# We've read the section's heading, continue to next line
continue
# Read section entry
if sect == "node":
# Read node definition
m = re_node.match(l)
if m is None:
handler.warn("Node on line %d is on unsupported format" % (lineno,))
continue
idx, c0, c1, c2 = m.groups()
try: coords = [float(c) for c in (c0, c1, c2)]
except ValueError:
handler.warn("Node on line %d contains non-numeric coordinates"
% (lineno,))
continue
nodes[int(idx)] = coords
elif sect == "element":
if not supported_elem:
continue
m = re_tetra.match(l)
if m is None:
handler.error("Node on line %d badly specified (expected 3 "
"coordinates)" % (lineno,))
idx, n0, n1, n2, n3 = [int(x) for x in m.groups()]
elems[idx] = (tp, n0, n1, n2, n3)
eid2elset.setdefault(elset, set()).add(idx)
ifile.close()
# Note that vertices/cells must be consecutively numbered, which isn't
# necessarily the case in Abaqus. Therefore we enumerate and translate
# original IDs to sequence indexes.
handler.start_vertices(len(nodes))
nodeids = nodes.keys()
nodeids.sort()
for idx, nid in enumerate(nodeids):
handler.add_vertex(idx, nodes[nid])
handler.end_vertices()
handler.start_cells(len(elems))
elemids = elems.keys()
elemids.sort()
for idx, eid in enumerate(elemids):
elem = elems[eid]
tp = elem[0]
elemnodes = []
for nid in elem[1:]:
try: elemnodes.append(nodeids.index(nid))
except ValueError:
handler.error("Element %s references non-existent node %s" % (eid, nid))
handler.add_cell(idx, elemnodes)
handler.end_cells()
# Define the material function for the cells
num_entities = 0
for matname, elsetids in material2elsetids.items():
if matname not in materials:
handler.error("Unknown material %s referred to for element sets %s" %
(matname, ", ".join(elsetids)))
num_entities += len(elsetids)
handler.start_meshfunction("material", 3, num_entities)
# Each material is associated with a number of element sets
for i, matname in enumerate(materials):
try: elsetids = material2elsetids[matname]
except KeyError:
# No elements for this material
continue
# For each element set associated with this material
elsets = []
for eid in elsetids:
try: elsets.append(eid2elset[eid])
except KeyError:
handler.error("Material '%s' is assigned to undefined element "
"set '%s'" % (matname, eid))
for elset in elsets:
for elemid in elset:
handler.add_entity_meshfunction(elemids.index(elemid), i)
handler.end_meshfunction()
def netcdf2xml(ifilename,ofilename):
"Convert from NetCDF format to DOLFIN XML."
print "Converting from NetCDF format (.ncdf) to DOLFIN XML format"
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
cell_type = None
dim = 0
# Scan file for dimension, number of nodes, number of elements
while 1:
line = ifile.readline()
if not line:
_error("Empty file")
if re.search(r"num_dim.*=", line):
dim = int(re.match(".*\s=\s(\d+)\s;",line).group(1))
if re.search(r"num_nodes.*=", line):
num_vertices = int(re.match(".*\s=\s(\d+)\s;",line).group(1))
if re.search(r"num_elem.*=", line):
num_cells = int(re.match(".*\s=\s(\d+)\s;",line).group(1))
if re.search(r"connect1 =",line):
break
num_dims=dim
# Set cell type
if dim == 2:
cell_type ="triangle"
if dim == 3:
cell_type ="tetrahedron"
# Check that we got the cell type
if cell_type == None:
_error("Unable to find cell type.")
# Write header
write_header_mesh(ofile, cell_type, dim)
write_header_cells(ofile, num_cells)
num_cells_read = 0
# Read and write cells
while 1:
# Read next line
line = ifile.readline()
if not line:
break
connect=re.split("[,;]",line)
if num_dims == 2:
n0 = int(connect[0])-1
n1 = int(connect[1])-1
n2 = int(connect[2])-1
write_cell_triangle(ofile, num_cells_read, n0, n1, n2)
elif num_dims == 3:
n0 = int(connect[0])-1
n1 = int(connect[1])-1
n2 = int(connect[2])-1
n3 = int(connect[3])-1
write_cell_tetrahedron(ofile, num_cells_read, n0, n1, n2, n3)
num_cells_read +=1
if num_cells == num_cells_read:
write_footer_cells(ofile)
write_header_vertices(ofile, num_vertices)
break
num_vertices_read = 0
coords = [[],[],[]]
coord = -1
while 1:
line = ifile.readline()
if not line:
_error("Missing data")
if re.search(r"coord =",line):
break
# Read vertices
while 1:
line = ifile.readline()
# print line
if not line:
break
if re.search(r"\A\s\s\S+,",line):
# print line
coord+=1
print "Found x_"+str(coord)+" coordinates"
coords[coord] += line.split()
if re.search(r";",line):
break
# Write vertices
for i in range(num_vertices):
if num_dims == 2:
x = float(re.split(",",coords[0].pop(0))[0])
y = float(re.split(",",coords[1].pop(0))[0])
z = 0
if num_dims == 3:
x = float(re.split(",",coords[0].pop(0))[0])
y = float(re.split(",",coords[1].pop(0))[0])
z = float(re.split(",",coords[2].pop(0))[0])
write_vertex(ofile, i, x, y, z)
# Write footer
write_footer_vertices(ofile)
write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
def exodus2xml(ifilename,ofilename):
"Convert from Exodus II format to DOLFIN XML."
print "Converting from Exodus II format to NetCDF format"
name = ifilename.split(".")[0]
netcdffilename = name +".ncdf"
status, output = getstatusoutput('ncdump '+ifilename + ' > '+netcdffilename)
if status != 0:
raise IOError, "Something wrong while executing ncdump. Is ncdump "\
"installed on the system?"
netcdf2xml(netcdffilename, ofilename)
# Write mesh header
def write_header_mesh(ofile, cell_type, dim):
ofile.write("""\
<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<dolfin xmlns:dolfin=\"http://www.fenics.org/dolfin/\">
<mesh celltype="%s" dim="%d">
""" % (cell_type, dim))
# Write graph header
def write_header_graph(ofile, graph_type):
ofile.write("""\
<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<dolfin xmlns:dolfin=\"http://www.fenics.org/dolfin/\">
<graph type="%s">
""" % (graph_type))
# Write mesh footer
def write_footer_mesh(ofile):
ofile.write("""\
</mesh>
</dolfin>
""")
# Write graph footer
def write_footer_graph(ofile):
ofile.write("""\
</graph>
</dolfin>
""")
def write_header_vertices(ofile, num_vertices):
"Write vertices header"
print "Expecting %d vertices" % num_vertices
ofile.write(" <vertices size=\"%d\">\n" % num_vertices)
def write_footer_vertices(ofile):
"Write vertices footer"
ofile.write(" </vertices>\n")
print "Found all vertices"
def write_header_edges(ofile, num_edges):
"Write edges header"
print "Expecting %d edges" % num_edges
ofile.write(" <edges size=\"%d\">\n" % num_edges)
def write_footer_edges(ofile):
"Write edges footer"
ofile.write(" </edges>\n")
print "Found all edges"
def write_vertex(ofile, vertex, x, y, z):
"Write vertex"
ofile.write(" <vertex index=\"%d\" x=\"%s\" y=\"%s\" z=\"%s\"/>\n" % \
(vertex, x, y, z))
def write_graph_vertex(ofile, vertex, num_edges, weight = 1):
"Write graph vertex"
ofile.write(" <vertex index=\"%d\" num_edges=\"%d\" weight=\"%d\"/>\n" % \
(vertex, num_edges, weight))
def write_graph_edge(ofile, v1, v2, weight = 1):
"Write graph edge"
ofile.write(" <edge v1=\"%d\" v2=\"%d\" weight=\"%d\"/>\n" % \
(v1, v2, weight))
def write_header_cells(ofile, num_cells):
"Write cells header"
ofile.write(" <cells size=\"%d\">\n" % num_cells)
print "Expecting %d cells" % num_cells
def write_footer_cells(ofile):
"Write cells footer"
ofile.write(" </cells>\n")
print "Found all cells"
def write_cell_triangle(ofile, cell, n0, n1, n2):
"Write cell (triangle)"
ofile.write(" <triangle index=\"%d\" v0=\"%d\" v1=\"%d\" v2=\"%d\"/>\n" % \
(cell, n0, n1, n2))
def write_cell_tetrahedron(ofile, cell, n0, n1, n2, n3):
"Write cell (tetrahedron)"
ofile.write(" <tetrahedron index=\"%d\" v0=\"%d\" v1=\"%d\" v2=\"%d\" v3=\"%d\"/>\n" % \
(cell, n0, n1, n2, n3))
def _error(message):
"Write an error message"
for line in message.split("\n"):
print "*** %s" % line
sys.exit(2)
def convert2xml(ifilename, ofilename, iformat=None):
""" Convert a file to the DOLFIN XML format.
"""
convert(ifilename, XmlHandler(ofilename), iformat=iformat)
def convert(ifilename, handler, iformat=None):
""" Convert a file using a provided data handler.
Note that handler.close is called when this function finishes.
@param ifilename: Name of input file.
@param handler: The data handler (instance of L{DataHandler}).
@param iformat: Format of input file.
"""
if iformat is None:
iformat = format_from_suffix(os.path.splitext(ifilename)[1][1:])
# XXX: Backwards-compat
if hasattr(handler, "_ofilename"):
ofilename = handler._ofilename
# Choose conversion
if iformat == "mesh":
# Convert from mesh to xml format
mesh2xml(ifilename, ofilename)
elif iformat == "gmsh":
# Convert from gmsh to xml format
gmsh2xml(ifilename, handler)
elif iformat == "Triangle":
# Convert from Triangle to xml format
triangle2xml(ifilename, ofilename)
elif iformat == "xml-old":
# Convert from old to new xml format
xml_old2xml(ifilename, ofilename)
elif iformat == "metis":
# Convert from metis graph to dolfin graph xml format
metis_graph2graph_xml(ifilename, ofilename)
elif iformat == "scotch":
# Convert from scotch graph to dolfin graph xml format
scotch_graph2graph_xml(ifilename, ofilename)
elif iformat == "diffpack":
# Convert from Diffpack tetrahedral grid format to xml format
diffpack2xml(ifilename, ofilename)
elif iformat == "abaqus":
# Convert from abaqus to xml format
_abaqus(ifilename, handler)
elif iformat == "NetCDF":
# Convert from NetCDF generated from ExodusII format to xml format
netcdf2xml(ifilename, ofilename)
elif iformat =="ExodusII":
# Convert from ExodusII format to xml format via NetCDF
exodus2xml(ifilename, ofilename)
elif iformat == "StarCD":
# Convert from Star-CD tetrahedral grid format to xml format
starcd2xml(ifilename, ofilename)
else:
_error("Sorry, cannot convert between %s and DOLFIN xml file formats." % iformat)
# XXX: handler.close messes things for other input formats than abaqus or gmsh
if iformat in ("abaqus", "gmsh"):
handler.close()
def starcd2xml(ifilename, ofilename):
"Convert from Star-CD tetrahedral grid format to DOLFIN XML."
print starcd2xml.__doc__
if not os.path.isfile(ifilename[:-3] + "vrt") or not os.path.isfile(ifilename[:-3] + "cel"):
print "StarCD format requires one .vrt file and one .cel file"
sys.exit(2)
# open output file
ofile = open(ofilename, "w")
# Open file, the vertices are in a .vrt file
ifile = open(ifilename[:-3] + "vrt", "r")
write_header_mesh(ofile, "tetrahedron", 3)
# Read & write vertices
# first, read all lines (need to sweep to times through the file)
lines = ifile.readlines()
# second, find the number of vertices
num_vertices = -1
counter = 0
# nodenr_map is needed because starcd support node numbering like 1,2,4 (ie 3 is missing)
nodenr_map = {}
for line in lines:
nodenr = int(line[0:15])
nodenr_map[nodenr] = counter
counter += 1
num_vertices = counter
# third, run over all vertices
write_header_vertices(ofile, num_vertices)
for line in lines:
nodenr = int(line[0:15])
vertex0 = float(line[15:31])
vertex1 = float(line[31:47])
vertex2 = float(line[47:63])
write_vertex(ofile, nodenr_map[nodenr], float(vertex0), float(vertex1), float(vertex2))
write_footer_vertices(ofile)
# Open file, the cells are in a .cel file
ifile = open(ifilename[:-3] + "cel", "r")
# Read & write cells
# first, read all lines (need to sweep to times through the file)
lines = ifile.readlines()
# second, find the number of cells
num_cells = -1
counter = 0
for line in lines:
l = [int(a) for a in line.split()]
cellnr, node0, node1, node2, node3, node4, node5, node6, node7, tmp1, tmp2 = l
if node4 > 0:
if node2 == node3 and node4 == node5 and node5 == node6 and node6 == node7: # these nodes should be equal
counter += 1
else:
print "The file does contain cells that are not tetraheders. The cell number is ", cellnr, " the line read was ", line
else:
# triangles on the surface
# print "The file does contain cells that are not tetraheders node4==0. The cell number is ", cellnr, " the line read was ", line
#sys.exit(2)
pass
num_cells = counter
# third, run over all cells
write_header_cells(ofile, num_cells)
counter = 0
for line in lines:
l = [int(a) for a in line.split()]
cellnr, node0, node1, node2, node3, node4, node5, node6, node7, tmp1, tmp2 = l
if (node4 > 0):
if node2 == node3 and node4 == node5 and node5 == node6 and node6 == node7: # these nodes should be equal
write_cell_tetrahedron(ofile, counter, nodenr_map[node0], nodenr_map[node1], nodenr_map[node2], nodenr_map[node4])
counter += 1
write_footer_cells(ofile)
write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
|
cemagg/sucem-fem
|
sandbox/pec_labels/dolfin_utils_meshconvert.py
|
Python
|
gpl-3.0
| 47,536
|
[
"NetCDF"
] |
4b1190e45c863da4800c1c391c8b81b0704f85972d59f963237db5b2747e81c4
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.gis.db.models.fields
import django.db.models.deletion
from django.conf import settings
import flooding_lib.models
import django_extensions.db.fields
import django_extensions.db.fields.json
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('approvaltool', '0001_initial'),
('contenttypes', '0001_initial'),
('lizard_worker', '0001_initial'),
('sharedproject', '0001_initial'),
('auth', '0001_initial'),
('flooding_presentation', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Animation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('frames', models.IntegerField(default=0)),
('cols', models.IntegerField(default=0)),
('rows', models.IntegerField(default=0)),
('maxvalue', models.FloatField(null=True, blank=True)),
('geotransform', django_extensions.db.fields.json.JSONField()),
('basedir', models.TextField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Attachment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('name', models.CharField(max_length=200)),
('remarks', models.TextField(null=True, blank=True)),
('file', models.FileField(null=True, upload_to=flooding_lib.models.get_attachment_path, blank=True)),
('uploaded_by', models.CharField(max_length=200)),
('uploaded_date', models.DateTimeField(null=True, blank=True)),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
],
options={
'db_table': 'flooding_attachment',
'verbose_name': 'Attachment',
'verbose_name_plural': 'Attachments',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Breach',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('remarks', models.TextField(blank=True)),
('active', models.BooleanField(default=True)),
('levelnormfrequency', models.FloatField()),
('canalbottomlevel', models.FloatField(null=True, blank=True)),
('groundlevel', models.FloatField()),
('defrucritical', models.FloatField()),
('defbaselevel', models.FloatField(null=True, blank=True)),
('decheight', models.FloatField(null=True, blank=True)),
('decheightbaselevel', models.FloatField(null=True, blank=True)),
('internalnode', django.contrib.gis.db.models.fields.PointField(srid=4326, verbose_name=b'internal node')),
('externalnode', django.contrib.gis.db.models.fields.PointField(srid=4326, verbose_name=b'external node')),
('geom', django.contrib.gis.db.models.fields.PointField(srid=4326, verbose_name=b'node itself')),
('code', models.CharField(max_length=20, null=True)),
('administrator', models.IntegerField(help_text=b'Breach administrator', null=True, blank=True)),
('fl_rk_adm_jud', models.IntegerField(help_text=b'Flood risk - administrator judgment (section part)', null=True, blank=True)),
('fl_rk_dpv_ref_part', models.IntegerField(help_text=b'Flood risk - DPV reference (section part)', null=True, blank=True)),
('fl_rk_dpv_ref_sect', models.IntegerField(help_text=b'Flood risk - DPV reference (dike section)', null=True, blank=True)),
('fl_rk_nrm', models.IntegerField(help_text=b'Flood risk - Norm', null=True, blank=True)),
],
options={
'ordering': ['name'],
'db_table': 'flooding_breach',
'verbose_name': 'Breach',
'verbose_name_plural': 'Breaches',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BreachSobekModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sobekid', models.CharField(max_length=200)),
('breach', models.ForeignKey(to='flooding_lib.Breach')),
],
options={
'db_table': 'flooding_breachsobekmodel',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Colormap',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('matplotlib_name', models.CharField(unique=True, max_length=20)),
('description', models.CharField(unique=True, max_length=50)),
],
options={
'ordering': ('description',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CutoffLocation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('bottomlevel', models.FloatField()),
('width', models.FloatField()),
('deftclose', models.FloatField(null=True, blank=True)),
('type', models.IntegerField(choices=[(1, 'lock'), (2, 'culvert'), (3, 'weir'), (4, 'bridge'), (5, 'undefined'), (6, 'generic_internal')])),
('geom', django.contrib.gis.db.models.fields.PointField(srid=4326, verbose_name=b'node itself')),
('code', models.CharField(max_length=15, null=True)),
],
options={
'db_table': 'flooding_cutofflocation',
'verbose_name': 'Cutoff location',
'verbose_name_plural': 'Cutoff locations',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CutoffLocationSet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('cutofflocations', models.ManyToManyField(to='flooding_lib.CutoffLocation')),
],
options={
'db_table': 'flooding_cutofflocationset',
'verbose_name': 'Cutoff location set',
'verbose_name_plural': 'Cutoff location sets',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CutoffLocationSobekModelSetting',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sobekid', models.CharField(max_length=200)),
('cutofflocation', models.ForeignKey(to='flooding_lib.CutoffLocation')),
],
options={
'db_table': 'flooding_cutofflocationsobekmodelsetting',
'verbose_name': 'Cutoff location sobek model setting',
'verbose_name_plural': 'Cutoff location sobek model settings',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Dike',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
],
options={
'db_table': 'flooding_dike',
'verbose_name': 'Dike',
'verbose_name_plural': 'Dikes',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EmbankmentUnit',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('unit_id', models.CharField(max_length=20)),
('type', models.IntegerField(choices=[(0, 'existing'), (1, 'new')])),
('original_height', models.FloatField()),
('geometry', django.contrib.gis.db.models.fields.LineStringField(srid=4326)),
],
options={
'db_table': 'flooding_embankment_unit',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ExternalWater',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('type', models.IntegerField(choices=[(1, 'sea'), (2, 'lake'), (3, 'canal'), (4, 'internal_lake'), (5, 'internal_canal'), (6, 'river'), (7, 'unknown'), (8, 'lower_river')])),
('liztype', models.IntegerField(blank=True, null=True, choices=[(1, 'sea'), (2, b'estuarium'), (3, b'groot meer (incl. afgesloten zeearm)'), (4, b'grote rivier'), (5, b'scheepvaartkanaal'), (6, b'binnenmeer'), (7, b'regionale beek'), (8, b'regionale revier'), (9, b'boezemwater'), (10, b'polderwater')])),
('area', models.IntegerField(null=True, blank=True)),
('deftstorm', models.FloatField(null=True, blank=True)),
('deftpeak', models.FloatField(null=True, blank=True)),
('deftsim', models.FloatField()),
('minlevel', models.FloatField(default=-10)),
('maxlevel', models.FloatField(default=15)),
('code', models.CharField(max_length=15, null=True)),
('cutofflocations', models.ManyToManyField(to='flooding_lib.CutoffLocation', blank=True)),
],
options={
'db_table': 'flooding_externalwater',
'verbose_name': 'External water',
'verbose_name_plural': 'External waters',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ExtraInfoField',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=200)),
('use_in_scenario_overview', models.BooleanField(default=False)),
('header', models.IntegerField(default=20, choices=[(1, 'scenario'), (2, 'location'), (4, 'model'), (5, 'other'), (6, 'files'), (10, 'general'), (20, 'metadata'), (30, 'breaches'), (40, 'externalwater'), (70, 'none')])),
('position', models.IntegerField(default=0)),
],
options={
'db_table': 'flooding_extrainfofield',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ExtraScenarioInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value', models.CharField(max_length=100)),
('extrainfofield', models.ForeignKey(to='flooding_lib.ExtraInfoField')),
],
options={
'db_table': 'flooding_extrascenarioinfo',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Map',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('remarks', models.TextField(null=True, blank=True)),
('active', models.BooleanField(default=True)),
('index', models.IntegerField(default=100)),
('url', models.CharField(max_length=200)),
('layers', models.CharField(max_length=200)),
('transparent', models.NullBooleanField(default=None)),
('tiled', models.NullBooleanField(default=None)),
('srs', models.CharField(default=b'EPSG:900913', max_length=50)),
('visible', models.BooleanField(default=False)),
],
options={
'db_table': 'flooding_map',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Measure',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('reference_adjustment', models.IntegerField(default=0, choices=[(0, 'unkown'), (1, 'existing level'), (2, 'new level')])),
('adjustment', models.FloatField(default=0)),
],
options={
'db_table': 'flooding_measure',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Program',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
],
options={
'db_table': 'flooding_program',
'verbose_name': 'Program',
'verbose_name_plural': 'Programs',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('friendlyname', models.CharField(max_length=200)),
('name', models.CharField(max_length=200)),
('color_mapping_name', models.CharField(max_length=256, null=True, blank=True)),
('code', models.CharField(max_length=20, null=True)),
('approval_object_type', models.ForeignKey(default=flooding_lib.models.get_default_approval_type, to='approvaltool.ApprovalObjectType', null=True)),
('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('friendlyname', 'name', 'owner'),
'db_table': 'flooding_project',
'verbose_name': 'Project',
'verbose_name_plural': 'Projects',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProjectColormap',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('colormap', models.ForeignKey(to='flooding_lib.Colormap')),
('presentationtype', models.ForeignKey(to='flooding_presentation.PresentationType')),
('project', models.ForeignKey(to='flooding_lib.Project')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProjectGroupPermission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('permission', models.IntegerField(choices=[(1, 'view_scenario'), (2, 'add_scenario_new_simulation'), (7, 'add_scenario_import'), (3, 'edit_scenario'), (4, 'approve_scenario'), (5, 'delete_scenario'), (6, 'edit_scenario_simple')])),
('group', models.ForeignKey(to='auth.Group')),
('project', models.ForeignKey(to='flooding_lib.Project')),
],
options={
'db_table': 'flooding_projectgrouppermission',
'verbose_name': 'Project group permission',
'verbose_name_plural': 'Project group permissions',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Raster',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('uuid', django_extensions.db.fields.UUIDField(unique=True, editable=False, name=b'uuid', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('longname', models.CharField(max_length=200)),
('active', models.BooleanField(default=True)),
('normfrequency', models.IntegerField(null=True, blank=True)),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326, verbose_name=b'Region Border')),
('path', models.CharField(max_length=200)),
('code', models.CharField(max_length=20, null=True)),
('dijkringnr', models.IntegerField(null=True, blank=True)),
('cutofflocations', models.ManyToManyField(to='flooding_lib.CutoffLocation', blank=True)),
('maps', models.ManyToManyField(to='flooding_lib.Map', blank=True)),
],
options={
'db_table': 'flooding_region',
'verbose_name': 'Region',
'verbose_name_plural': 'Regions',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RegionSet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('parent', models.ForeignKey(related_name=b'children_set', blank=True, to='flooding_lib.RegionSet', null=True)),
('regions', models.ManyToManyField(to='flooding_lib.Region', blank=True)),
],
options={
'db_table': 'flooding_regionset',
'verbose_name': 'Region set',
'verbose_name_plural': 'Region sets',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Result',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('resultloc', models.CharField(max_length=200)),
('deltat', models.FloatField(null=True, blank=True)),
('resultpngloc', models.CharField(max_length=200, null=True, blank=True)),
('startnr', models.IntegerField(null=True, blank=True)),
('firstnr', models.IntegerField(null=True, blank=True)),
('lastnr', models.IntegerField(null=True, blank=True)),
('unit', models.CharField(max_length=10, null=True, blank=True)),
('value', models.FloatField(null=True, blank=True)),
('bbox', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326, null=True, verbose_name=b'Result Border', blank=True)),
('animation', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='flooding_lib.Animation', null=True)),
('raster', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='flooding_lib.Raster', null=True)),
],
options={
'db_table': 'flooding_result',
'verbose_name': 'Result',
'verbose_name_plural': 'Results',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ResultType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
('shortname_dutch', models.CharField(max_length=20, null=True, blank=True)),
('overlaytype', models.CharField(max_length=20, null=True, blank=True)),
('unit', models.CharField(max_length=15, null=True, blank=True)),
('color_mapping_name', models.CharField(max_length=256, null=True, blank=True)),
('content_names_re', models.CharField(max_length=256, null=True, blank=True)),
('use_to_compute_arrival_times', models.BooleanField(default=False, help_text=b'Dit is een animatie die geschikt is om er aankomsttijden mee te berekenen')),
],
options={
'db_table': 'flooding_resulttype',
'verbose_name': 'Result type',
'verbose_name_plural': 'Result types',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ResultType_PresentationType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('remarks', models.CharField(max_length=100)),
('presentationtype', models.ForeignKey(to='flooding_presentation.PresentationType')),
('resulttype', models.ForeignKey(to='flooding_lib.ResultType')),
],
options={
'db_table': 'flooding_resulttype_presentationtype',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Scenario',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200, verbose_name='name')),
('remarks', models.TextField(default=None, null=True, verbose_name='remarks', blank=True)),
('tsim', models.FloatField()),
('calcpriority', models.IntegerField(default=20, choices=[(20, 'low'), (30, 'medium'), (40, 'high')])),
('status_cache', models.IntegerField(default=None, null=True, choices=[(10, 'deleted'), (20, 'approved'), (30, 'disapproved'), (40, 'calculated'), (50, 'error'), (60, 'waiting'), (70, 'none'), (80, 'archived')])),
('migrated', models.NullBooleanField()),
('code', models.CharField(max_length=15, null=True)),
('project_id', models.IntegerField(null=True)),
('has_sobek_presentation', models.NullBooleanField()),
('result_base_path', models.TextField(help_text=b'If left blank, the path is retrieved through scenario.breaches[0].region.path', null=True, blank=True)),
('config_3di', models.CharField(max_length=50, null=True, blank=True)),
('archived', models.BooleanField(default=False, verbose_name='Archived')),
('archived_at', models.DateTimeField(null=True, verbose_name='Archived at', blank=True)),
('archived_by', models.ForeignKey(related_name=b'archived_by_user', verbose_name='Archived by', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('name', 'owner'),
'db_table': 'flooding_scenario',
'verbose_name': 'Scenario',
'verbose_name_plural': 'Scenarios',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Scenario_PresentationLayer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('presentationlayer', models.ForeignKey(to='flooding_presentation.PresentationLayer')),
('scenario', models.ForeignKey(to='flooding_lib.Scenario')),
],
options={
'db_table': 'flooding_scenario_presentationlayer',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ScenarioBreach',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('widthbrinit', models.FloatField()),
('methstartbreach', models.IntegerField(choices=[(1, 'at top'), (2, 'at moment x'), (3, 'at crossing level x'), (4, 'unknown/error at import')])),
('tstartbreach', models.FloatField()),
('hstartbreach', models.FloatField()),
('brdischcoef', models.FloatField()),
('brf1', models.FloatField()),
('brf2', models.FloatField()),
('bottomlevelbreach', models.FloatField()),
('initialcrest', models.FloatField(null=True, blank=True)),
('ucritical', models.FloatField()),
('pitdepth', models.FloatField()),
('tmaxdepth', models.FloatField()),
('extwmaxlevel', models.FloatField()),
('extwbaselevel', models.FloatField(default=None, null=True, blank=True)),
('extwrepeattime', models.IntegerField(default=None, null=True, blank=True)),
('tstorm', models.FloatField(default=None, null=True, blank=True)),
('tpeak', models.FloatField(default=None, null=True, blank=True)),
('tdeltaphase', models.FloatField(default=None, null=True, blank=True)),
('manualwaterlevelinput', models.BooleanField(default=False)),
('code', models.CharField(max_length=15, null=True, blank=True)),
('breach', models.ForeignKey(to='flooding_lib.Breach')),
('scenario', models.ForeignKey(to='flooding_lib.Scenario')),
],
options={
'db_table': 'flooding_scenariobreach',
'verbose_name': 'Scenario breach',
'verbose_name_plural': 'Scenario breaches',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ScenarioCutoffLocation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('action', models.IntegerField(default=1, null=True, blank=True)),
('tclose', models.FloatField()),
('cutofflocation', models.ForeignKey(to='flooding_lib.CutoffLocation')),
('scenario', models.ForeignKey(to='flooding_lib.Scenario')),
],
options={
'db_table': 'flooding_scenariocutofflocation',
'verbose_name': 'Scenario cutoff location',
'verbose_name_plural': 'Scenario cutoff locations',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ScenarioProject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_main_project', models.BooleanField(default=False)),
('approved', models.NullBooleanField()),
('approvalobject', models.ForeignKey(default=None, blank=True, to='approvaltool.ApprovalObject', null=True)),
('project', models.ForeignKey(to='flooding_lib.Project')),
('scenario', models.ForeignKey(to='flooding_lib.Scenario')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ScenarioShareOffer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('new_project', models.ForeignKey(to='flooding_lib.Project')),
('scenario', models.ForeignKey(to='flooding_lib.Scenario')),
('shared_by', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SobekModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sobekmodeltype', models.IntegerField(choices=[(1, 'canal'), (2, 'inundation')])),
('active', models.BooleanField(default=True)),
('project_fileloc', models.CharField(help_text=b'In case of 3Di, point to model zipfile.', max_length=200)),
('model_case', models.IntegerField()),
('model_version', models.CharField(max_length=20)),
('model_srid', models.IntegerField()),
('model_varname', models.CharField(help_text=b'In case of 3Di, .mdu filename in zip.', max_length=40, null=True, blank=True)),
('model_vardescription', models.CharField(max_length=200, null=True, blank=True)),
('remarks', models.TextField(null=True)),
('embankment_damage_shape', models.CharField(max_length=200, null=True, blank=True)),
('code', models.CharField(max_length=15, null=True, blank=True)),
('keep_initial_level', models.BooleanField(default=False)),
],
options={
'db_table': 'flooding_sobekmodel',
'verbose_name': 'Sobek model',
'verbose_name_plural': 'Sobek models',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SobekVersion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('fileloc_startfile', models.CharField(max_length=200)),
],
options={
'db_table': 'flooding_sobekversion',
'verbose_name': 'Sobek version',
'verbose_name_plural': 'Sobek versions',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Strategy',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('visible_for_loading', models.BooleanField(default=False)),
('save_date', models.DateTimeField(null=True, blank=True)),
('region', models.ForeignKey(blank=True, to='flooding_lib.Region', null=True)),
('user', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'db_table': 'flooding_strategy',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('remarks', models.TextField(blank=True)),
('creatorlog', models.CharField(max_length=40)),
('tstart', models.DateTimeField()),
('tfinished', models.DateTimeField(null=True, blank=True)),
('errorlog', models.TextField(null=True, blank=True)),
('successful', models.NullBooleanField()),
('scenario', models.ForeignKey(to='flooding_lib.Scenario')),
],
options={
'get_latest_by': 'tstart',
'verbose_name': 'Task',
'verbose_name_plural': 'Tasks',
'db_table': 'flooding_task',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TaskExecutor',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('ipaddress', models.IPAddressField()),
('port', models.IntegerField()),
('active', models.BooleanField(default=True)),
('revision', models.CharField(max_length=20)),
('seq', models.IntegerField(default=1)),
],
options={
'db_table': 'flooding_taskexecutor',
'verbose_name': 'Task executor',
'verbose_name_plural': 'Task executors',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TaskType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
],
options={
'db_table': 'flooding_tasktype',
'verbose_name': 'Task type',
'verbose_name_plural': 'Task types',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ThreediCalculation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.IntegerField(default=1, choices=[(1, b'created'), (2, b'netcdf created'), (3, b'images created, finished.')])),
('scenario', models.ForeignKey(to='flooding_lib.Scenario')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ThreediModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=80)),
('scenario_zip_filename', models.TextField(help_text=b'full path start with / or folder from Settings.SOURCE_DIR, must contain mdu file')),
('mdu_filename', models.TextField(help_text=b'base filename of mdu file')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserPermission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('permission', models.IntegerField(choices=[(1, 'view_scenario'), (2, 'add_scenario_new_simulation'), (7, 'add_scenario_import'), (3, 'edit_scenario'), (4, 'approve_scenario'), (5, 'delete_scenario'), (6, 'edit_scenario_simple')])),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'flooding_userpermission',
'verbose_name': 'User permission',
'verbose_name_plural': 'User permissions',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Waterlevel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('time', models.FloatField()),
('value', models.FloatField()),
],
options={
'db_table': 'flooding_waterlevel',
'verbose_name': 'Waterlevel',
'verbose_name_plural': 'Waterlevels',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='WaterlevelSet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('type', models.IntegerField(choices=[(1, 'undefined'), (2, 'tide'), (3, 'breach')])),
('remarks', models.TextField(null=True, blank=True)),
('code', models.CharField(max_length=20, null=True)),
],
options={
'db_table': 'flooding_waterlevelset',
'verbose_name': 'Waterlevel set',
'verbose_name_plural': 'Waterlevel sets',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='waterlevel',
name='waterlevelset',
field=models.ForeignKey(to='flooding_lib.WaterlevelSet'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='waterlevel',
unique_together=set([('waterlevelset', 'time')]),
),
migrations.AlterUniqueTogether(
name='userpermission',
unique_together=set([('user', 'permission')]),
),
migrations.AddField(
model_name='threedicalculation',
name='threedi_model',
field=models.ForeignKey(to='flooding_lib.ThreediModel'),
preserve_default=True,
),
migrations.AddField(
model_name='taskexecutor',
name='tasktypes',
field=models.ManyToManyField(to='flooding_lib.TaskType', null=True, blank=True),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='taskexecutor',
unique_together=set([('ipaddress', 'port'), ('name', 'seq')]),
),
migrations.AddField(
model_name='task',
name='tasktype',
field=models.ForeignKey(to='flooding_lib.TaskType'),
preserve_default=True,
),
migrations.AddField(
model_name='sobekmodel',
name='sobekversion',
field=models.ForeignKey(to='flooding_lib.SobekVersion'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='scenarioshareoffer',
unique_together=set([('scenario', 'new_project')]),
),
migrations.AlterUniqueTogether(
name='scenariocutofflocation',
unique_together=set([('scenario', 'cutofflocation')]),
),
migrations.AddField(
model_name='scenariobreach',
name='sobekmodel_externalwater',
field=models.ForeignKey(blank=True, to='flooding_lib.SobekModel', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='scenariobreach',
name='tide',
field=models.ForeignKey(related_name=b'tide', default=None, blank=True, to='flooding_lib.WaterlevelSet', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='scenariobreach',
name='waterlevelset',
field=models.ForeignKey(to='flooding_lib.WaterlevelSet'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='scenariobreach',
unique_together=set([('scenario', 'breach')]),
),
migrations.AddField(
model_name='scenario',
name='breaches',
field=models.ManyToManyField(to='flooding_lib.Breach', through='flooding_lib.ScenarioBreach'),
preserve_default=True,
),
migrations.AddField(
model_name='scenario',
name='cutofflocations',
field=models.ManyToManyField(to='flooding_lib.CutoffLocation', through='flooding_lib.ScenarioCutoffLocation', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='scenario',
name='owner',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='scenario',
name='presentationlayer',
field=models.ManyToManyField(to='flooding_presentation.PresentationLayer', through='flooding_lib.Scenario_PresentationLayer'),
preserve_default=True,
),
migrations.AddField(
model_name='scenario',
name='projects',
field=models.ManyToManyField(related_name=b'scenarios', through='flooding_lib.ScenarioProject', to='flooding_lib.Project'),
preserve_default=True,
),
migrations.AddField(
model_name='scenario',
name='ror_province',
field=models.ForeignKey(blank=True, to='sharedproject.Province', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='scenario',
name='sobekmodel_inundation',
field=models.ForeignKey(to='flooding_lib.SobekModel', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='scenario',
name='strategy',
field=models.ForeignKey(default=None, blank=True, to='flooding_lib.Strategy', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='scenario',
name='workflow_template',
field=models.ForeignKey(db_column=b'workflow_template', to='lizard_worker.WorkflowTemplate', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='resulttype',
name='presentationtype',
field=models.ManyToManyField(to='flooding_presentation.PresentationType', through='flooding_lib.ResultType_PresentationType'),
preserve_default=True,
),
migrations.AddField(
model_name='resulttype',
name='program',
field=models.ForeignKey(to='flooding_lib.Program'),
preserve_default=True,
),
migrations.AddField(
model_name='result',
name='resulttype',
field=models.ForeignKey(to='flooding_lib.ResultType'),
preserve_default=True,
),
migrations.AddField(
model_name='result',
name='scenario',
field=models.ForeignKey(to='flooding_lib.Scenario'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='result',
unique_together=set([('scenario', 'resulttype')]),
),
migrations.AddField(
model_name='region',
name='sobekmodels',
field=models.ManyToManyField(to='flooding_lib.SobekModel', blank=True),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='projectgrouppermission',
unique_together=set([('group', 'project', 'permission')]),
),
migrations.AddField(
model_name='project',
name='regions',
field=models.ManyToManyField(to='flooding_lib.Region', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='project',
name='regionsets',
field=models.ManyToManyField(to='flooding_lib.RegionSet', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='measure',
name='strategy',
field=models.ManyToManyField(to='flooding_lib.Strategy'),
preserve_default=True,
),
migrations.AddField(
model_name='extrascenarioinfo',
name='scenario',
field=models.ForeignKey(to='flooding_lib.Scenario'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='extrascenarioinfo',
unique_together=set([('extrainfofield', 'scenario')]),
),
migrations.AddField(
model_name='externalwater',
name='sobekmodels',
field=models.ManyToManyField(to='flooding_lib.SobekModel', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='embankmentunit',
name='measure',
field=models.ManyToManyField(to='flooding_lib.Measure'),
preserve_default=True,
),
migrations.AddField(
model_name='embankmentunit',
name='region',
field=models.ForeignKey(to='flooding_lib.Region'),
preserve_default=True,
),
migrations.AddField(
model_name='cutofflocationsobekmodelsetting',
name='sobekmodel',
field=models.ForeignKey(to='flooding_lib.SobekModel'),
preserve_default=True,
),
migrations.AddField(
model_name='cutofflocation',
name='sobekmodels',
field=models.ManyToManyField(to='flooding_lib.SobekModel', through='flooding_lib.CutoffLocationSobekModelSetting'),
preserve_default=True,
),
migrations.AddField(
model_name='breachsobekmodel',
name='sobekmodel',
field=models.ForeignKey(to='flooding_lib.SobekModel'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='breachsobekmodel',
unique_together=set([('sobekmodel', 'breach')]),
),
migrations.AddField(
model_name='breach',
name='defaulttide',
field=models.ForeignKey(to='flooding_lib.WaterlevelSet'),
preserve_default=True,
),
migrations.AddField(
model_name='breach',
name='dike',
field=models.ForeignKey(to='flooding_lib.Dike'),
preserve_default=True,
),
migrations.AddField(
model_name='breach',
name='externalwater',
field=models.ForeignKey(to='flooding_lib.ExternalWater'),
preserve_default=True,
),
migrations.AddField(
model_name='breach',
name='region',
field=models.ForeignKey(to='flooding_lib.Region'),
preserve_default=True,
),
migrations.AddField(
model_name='breach',
name='sobekmodels',
field=models.ManyToManyField(to='flooding_lib.SobekModel', through='flooding_lib.BreachSobekModel'),
preserve_default=True,
),
]
|
lizardsystem/flooding-lib
|
flooding_lib/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 47,131
|
[
"NetCDF"
] |
f1f19aaa2a12d572954506f35b708566a57732c2016bc4309190ef802e167dd3
|
"""Integration with Python standard library module urllib2: Request class.
Copyright 2004-2006 John J Lee <jjl@pobox.com>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import urllib2, urllib, logging
from _clientcookie import request_host
import _rfc3986
warn = logging.getLogger("mechanize").warning
# don't complain about missing logging handler
logging.getLogger("mechanize").setLevel(logging.ERROR)
class Request(urllib2.Request):
def __init__(self, url, data=None, headers={},
origin_req_host=None, unverifiable=False, visit=None):
# In mechanize 0.2, the interpretation of a unicode url argument will
# change: A unicode url argument will be interpreted as an IRI, and a
# bytestring as a URI. For now, we accept unicode or bytestring. We
# don't insist that the value is always a URI (specifically, must only
# contain characters which are legal), because that might break working
# code (who knows what bytes some servers want to see, especially with
# browser plugins for internationalised URIs).
if not _rfc3986.is_clean_uri(url):
warn("url argument is not a URI "
"(contains illegal characters) %r" % url)
urllib2.Request.__init__(self, url, data, headers)
self.selector = None
self.unredirected_hdrs = {}
self.visit = visit
# All the terminology below comes from RFC 2965.
self.unverifiable = unverifiable
# Set request-host of origin transaction.
# The origin request-host is needed in order to decide whether
# unverifiable sub-requests (automatic redirects, images embedded
# in HTML, etc.) are to third-party hosts. If they are, the
# resulting transactions might need to be conducted with cookies
# turned off.
if origin_req_host is None:
origin_req_host = request_host(self)
self.origin_req_host = origin_req_host
def get_selector(self):
return urllib.splittag(self.__r_host)[0]
def get_origin_req_host(self):
return self.origin_req_host
def is_unverifiable(self):
return self.unverifiable
def add_unredirected_header(self, key, val):
"""Add a header that will not be added to a redirected request."""
self.unredirected_hdrs[key.capitalize()] = val
def has_header(self, header_name):
"""True iff request has named header (regular or unredirected)."""
return (header_name in self.headers or
header_name in self.unredirected_hdrs)
def get_header(self, header_name, default=None):
return self.headers.get(
header_name,
self.unredirected_hdrs.get(header_name, default))
def header_items(self):
hdrs = self.unredirected_hdrs.copy()
hdrs.update(self.headers)
return hdrs.items()
def __str__(self):
return "<Request for %s>" % self.get_full_url()
def get_method(self):
if self.has_data():
return "POST"
else:
return "GET"
|
jasrusable/fun
|
venv/lib/python2.7/site-packages/twill/other_packages/_mechanize_dist/_request.py
|
Python
|
gpl-2.0
| 3,236
|
[
"VisIt"
] |
b822e1f68154a7dcfda41baf75f1d04d3fd3c4900aef88e9405428faf87e9c85
|
# This is the code for experiments performed on the CIFAR-10 dataset for the DeLiGAN model. Minor adjustments
# in the code as suggested in the comments can be done to test GAN. Corresponding details about these experiments
# can be found in section 5.4 of the paper and the results showing the outputs can be seen in Fig 5 and Table 1.
import argparse
import cPickle
import time
import numpy as np
import theano as th
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
import lasagne
import lasagne.layers as ll
from lasagne.init import Normal
from lasagne.layers import dnn
import nn
import sys
import plotting
import cifar10_data
import params
# settings
parser = argparse.ArgumentParser()
parser.add_argument('--seed', default=1)
parser.add_argument('--batch_size', default=100)
parser.add_argument('--unlabeled_weight', type=float, default=1.)
parser.add_argument('--learning_rate', type=float, default=0.0003)
parser.add_argument('--data_dir', type=str, default='../datasets/cifar-10-python')
parser.add_argument('--results_dir', type=str, default='../results/cifar-10-python')
parser.add_argument('--count', type=int, default=400)
args = parser.parse_args()
print(args)
# fixed random seeds
rng = np.random.RandomState(args.seed)
theano_rng = MRG_RandomStreams(rng.randint(2 ** 15))
lasagne.random.set_rng(np.random.RandomState(rng.randint(2 ** 15)))
# load CIFAR-10 and sample 2000 random images
trainx, trainy = cifar10_data.load(args.data_dir, subset='train')
ind = rng.permutation(trainx.shape[0])
trainx = trainx[ind]
trainy = trainy[ind]
trainx = trainx[:2000]
trainy = trainy[:2000]
trainx_unl = trainx.copy()
testx, testy = cifar10_data.load(args.data_dir, subset='test')
nr_batches_train = int(trainx.shape[0]/args.batch_size)
nr_batches_test = int(testx.shape[0]/args.batch_size)
# specify generative model
noise_dim = (args.batch_size, 100)
Z = th.shared(value=rng.uniform(-1.0,1.0,noise_dim).astype(np.float32), name='Z', borrow=True)
sig = th.shared(value=rng.uniform(-0.2, 0.2,noise_dim).astype(np.float32), name='sig', borrow=True)
noise = theano_rng.normal(size=noise_dim)
#one_hot = T.eye(args.batch_size) # Uncomment this line for training/testing MoE-GAN
#noise = T.concatenate([noise, one_hot], axis=1) # Uncomment this line for training/testing MoE-GAN
#gen_layers = [ll.InputLayer(shape=(args.batch_size,100 + args.batch_size), input_var=noise)] # Uncomment this line for training/testing MoE-GAN
gen_layers = [ll.InputLayer(shape=noise_dim, input_var=noise)]
gen_layers.append(nn.MoGLayer(gen_layers[-1], noise_dim=noise_dim, z=Z,sig=sig)) # Comment this line for training/testing baseline GAN models like GAN, GAN++, MoE-GAN
#gen_layers.append(ll.DenseLayer(gen_layers[-1], num_units=args.batch_size, W=Normal(0.05), nonlinearity=nn.relu)) # Uncomment this line when testing GAN++
gen_layers.append(nn.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=4*4*512, W=Normal(0.05), nonlinearity=nn.relu), g=None))
gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (args.batch_size,512,4,4)))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,256,8,8), (5,5), W=Normal(0.05), nonlinearity=nn.relu), g=None))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,128,16,16), (5,5), W=Normal(0.05), nonlinearity=nn.relu), g=None))
gen_layers.append(nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,3,32,32), (5,5), W=Normal(0.05), nonlinearity=T.tanh), train_g=True, init_stdv=0.1))
gen_dat = ll.get_output(gen_layers[-1])
# specify discriminative model
disc_layers = [ll.InputLayer(shape=(None, 3, 32, 32))]
disc_layers.append(ll.GaussianNoiseLayer(disc_layers[-1], sigma=0.2))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 96, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 96, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 96, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=0, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(ll.NINLayer(disc_layers[-1], num_units=192, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(ll.NINLayer(disc_layers[-1], num_units=192, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.GlobalPoolLayer(disc_layers[-1]))
disc_layers.append(nn.MinibatchLayer(disc_layers[-1], num_kernels=100))
disc_layers.append(nn.weight_norm(ll.DenseLayer(disc_layers[-1], num_units=10, W=Normal(0.05), nonlinearity=None), train_g=True, init_stdv=0.1))
# costs
labels = T.ivector()
x_lab = T.tensor4()
x_unl = T.tensor4()
temp = ll.get_output(gen_layers[-1], deterministic=False, init=True)
temp = ll.get_output(disc_layers[-1], x_lab, deterministic=False, init=True)
init_updates = [u for l in gen_layers+disc_layers for u in getattr(l,'init_updates',[])]
output_before_softmax_lab = ll.get_output(disc_layers[-1], x_lab, deterministic=False)
output_before_softmax_unl = ll.get_output(disc_layers[-1], x_unl, deterministic=False)
output_before_softmax_gen = ll.get_output(disc_layers[-1], gen_dat, deterministic=False)
sig1 = gen_layers[1].get_sig() # Comment this line for training/testing baseline GAN models
#sig1 = sig # Uncomment this line for training/testing baseline GAN models
sigloss =T.mean((1-sig1)*(1-sig1))*.05
l_lab = output_before_softmax_lab[T.arange(args.batch_size),labels]
l_unl = nn.log_sum_exp(output_before_softmax_unl)
l_gen = nn.log_sum_exp(output_before_softmax_gen)
loss_lab = -T.mean(l_lab) + T.mean(T.mean(nn.log_sum_exp(output_before_softmax_lab)))
loss_unl = -0.5*T.mean(l_unl) + 0.5*T.mean(T.nnet.softplus(l_unl)) + 0.5*T.mean(T.nnet.softplus(l_gen))
train_err = T.mean(T.neq(T.argmax(output_before_softmax_lab,axis=1),labels))
# test error
output_before_softmax = ll.get_output(disc_layers[-1], x_lab, deterministic=True)
test_err = T.mean(T.neq(T.argmax(output_before_softmax,axis=1),labels))
# Theano functions for training the disc net
lr = T.scalar()
disc_params = ll.get_all_params(disc_layers, trainable=True)
disc_param_updates = nn.adam_updates(disc_params, loss_lab + args.unlabeled_weight*loss_unl, lr=lr, mom1=0.5)
disc_param_avg = [th.shared(np.cast[th.config.floatX](0.*p.get_value())) for p in disc_params]
disc_avg_updates = [(a,a+0.0001*(p-a)) for p,a in zip(disc_params,disc_param_avg)]
disc_avg_givens = [(p,a) for p,a in zip(disc_params,disc_param_avg)]
init_param = th.function(inputs=[x_lab], outputs=None, updates=init_updates)
train_batch_disc = th.function(inputs=[x_lab,labels,x_unl,lr], outputs=[loss_lab, loss_unl, train_err], updates=disc_param_updates+disc_avg_updates)
test_batch = th.function(inputs=[x_lab,labels], outputs=test_err, givens=disc_avg_givens)
samplefun = th.function(inputs=[],outputs=gen_dat)
# Theano functions for training the gen net
loss_gen = -T.mean(T.nnet.softplus(l_gen))
gen_params = ll.get_all_params(gen_layers[-1], trainable=True)
gen_param_updates = nn.adam_updates(gen_params, loss_gen, lr=lr, mom1=0.5)
train_batch_gen = th.function(inputs=[lr], outputs=[sig1,sigloss,loss_gen], updates=gen_param_updates)
#Uncomment this block for generating GAN samples from given model
'''
f = np.load(args.results_dir + '/disc_params1180.npz')
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
for i,p in enumerate(disc_params):
p.set_value(param_values[i])
print("disc_params fed")
f = np.load(args.results_dir + '/gen_params1180.npz')
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
for i,p in enumerate(gen_params):
p.set_value(param_values[i])
print("gen_params fed")
samples=[]
for i in range(50):
sample_x = samplefun()
samples.append(sample_x)
samples = np.concatenate(samples,0)
print(samples)
#sys.exit()
np.save(args.results_dir + '/samples50k.npy', samples)
print("samples saved")
sys.exit()
'''
inds = rng.permutation(trainx.shape[0])
trainx = trainx[inds]
trainy = trainy[inds]
# Uncomment this block when training on the entire dataset
'''
txs = []
tys = []
for j in range(10):
txs.append(trainx[trainy==j][:args.count])
tys.append(trainy[trainy==j][:args.count])
txs = np.concatenate(txs, axis=0)
tys = np.concatenate(tys, axis=0)
'''
a=[]
# //////////// perform training //////////////
for epoch in range(1200):
begin = time.time()
lr = np.cast[th.config.floatX](args.learning_rate * np.minimum(3. - epoch/400., 1.))
# Uncomment this block when training on the entire dataset
'''
# construct randomly permuted minibatches
trainx = []
trainy = []
for t in range(int(np.ceil(trainx_unl.shape[0]/float(txs.shape[0])))):
inds = rng.permutation(txs.shape[0])
trainx.append(txs[inds])
trainy.append(tys[inds])
trainx = np.concatenate(trainx, axis=0)
trainy = np.concatenate(trainy, axis=0)
trainx_unl = trainx_unl[rng.permutation(trainx_unl.shape[0])]
'''
if epoch==0:
init_param(trainx[:500]) # data based initialization
# train
loss_lab = 0.
loss_unl = 0.
train_err = 0.
for t in range(nr_batches_train):
ll, lu, te = train_batch_disc(trainx[t*args.batch_size:(t+1)*args.batch_size],trainy[t*args.batch_size:(t+1)*args.batch_size],
trainx_unl[t*args.batch_size:(t+1)*args.batch_size],lr)
loss_lab += ll
loss_unl += lu
train_err += te
for rep in range(3):
sigm, sigmloss, genloss = train_batch_gen(lr)
loss_lab /= nr_batches_train
loss_unl /= nr_batches_train
train_err /= nr_batches_train
# test
test_err = 0.
for t in range(nr_batches_test):
test_err += test_batch(testx[t*args.batch_size:(t+1)*args.batch_size],testy[t*args.batch_size:(t+1)*args.batch_size])
test_err /= nr_batches_test
# report
print("Iteration %d, time = %ds, loss_lab = %.4f, loss_unl = %.4f, train err= %.4f, test err = %.4f, gen_loss = %.4f, sigloss = %.4f" %(epoch, time.time()-begin, loss_lab, loss_unl,train_err,test_err,genloss,sigmloss))
sys.stdout.flush()
a.append([epoch, loss_lab, loss_unl, train_err, test_err,genloss,sigmloss])
# generate samples from the model
sample_x = samplefun()
img_bhwc = np.transpose(sample_x[:100,], (0, 2, 3, 1))
img_tile = plotting.img_tile(img_bhwc, aspect_ratio=1.0, border_color=1.0, stretch=True)
img = plotting.plot_img(img_tile, title='CIFAR10 samples')
plotting.plt.savefig(args.results_dir + '/dg_cifar10_sample_minibatch.png')
if epoch%20==0:
NNdiff = np.sum(np.sum(np.sum(np.square(np.expand_dims(sample_x,axis=1)-np.expand_dims(trainx,axis=0)),axis=2),axis=2),axis=2)
NN = trainx[np.argmin(NNdiff,axis=1)]
NN = np.transpose(NN[:100], (0, 2, 3, 1))
NN_tile = plotting.img_tile(NN, aspect_ratio=1.0,border_color=1.0,stretch=True)
img_tile = np.concatenate((img_tile,NN_tile),axis=1)
img = plotting.plot_img(img_tile, title='CIFAR10 samples')
plotting.plt.savefig(args.results_dir + '/'+str(epoch)+'.png')
# save params
np.savez(args.results_dir + '/disc_params'+str(epoch)+'.npz',*[p.get_value() for p in disc_params])
np.savez(args.results_dir + '/gen_params'+str(epoch)+'.npz',*[p.get_value() for p in gen_params])
np.save(args.results_dir + '/train/errors.npy',a)
np.save(args.results_dir + '/train/sig.npy',sigm)
plotting.plt.close('all')
|
val-iisc/deligan
|
src/cifar/dg_cifar.py
|
Python
|
mit
| 12,180
|
[
"MOE"
] |
b3b17c6fa9fdb25e7cdd26d1324cfbe34344cce51846626113bbc8b479321b31
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 17 17:55:17 2015
Methods useful in bioinformatic algorithms
@author: Pablo the awesome molecular jedi
"""
# Libraries
from __future__ import division; # Needed in order to avoid weird rounding practices in Python 2
from builtins import str
from builtins import range
from math import *; # Used for logarithms
import random; # Needed to generate random color codes
import datetime; # Needed to access dates
from Bio.SeqUtils import MeltingTemp as mt # needed for NN melting temp
# Methods
# File handling
"""
Takes file with multiple sequences in Fasta format,
Returns a dictionary of keys=sequence Fasta names, values=sequences
"""
def loadFastas(pFile):
txt = open(pFile); # Access given file
d = txt.read(); # Read file
txt.close(); # Close file
d = d.lstrip(">"); # Remove initial >
d = d.split(">"); # Split into separate sequences
d[:] = [i.split("\n", 1) for i in d]; # Split each sequence (only first occurrence of \n) into lines: first line contains sequence name. Returns d as a list of lists (sequences) containing strings (seq name, seq).
strNames = [i[0] for i in d]; # Stores sequence names in separate list.
d[:] = [i[1].split("\n") for i in d]; # Split each sequence into its lines. Keeps only sequences. D is now a list of lists (sequences) containing strings (lines of sequence).
d[:] = ["".join(i) for i in d]; # Join each sequence into one string (per sequence).
seqDic = dict(zip(strNames, d)); # Creates dictionary with seq names as keys and seq strings as values.
return seqDic; # returns dictionary
"""
Takes file with multiple DNA sequences separated by a newline \n escape sequence,
Returns a list of strings (sequences)
"""
def loadSeqs(pFile):
txt = open(pFile); # Access given file
d = txt.read(); # Read file
d = d.split("\n"); # Split each sequence into its lines. Keeps only sequences. D is now a list containing strings (sequences).
txt.close(); # close file
return d; # returns dictionary
"""
Output data to fileName.
"""
def output(out, fileName, wipe=False, maxSizeFragment=10000):
mode = "a"; # default mode is to append
if wipe:
mode = "w+"; # change to overwrite file if wiping
chunks = [out[i:i+maxSizeFragment] for i in range(0, len(out), maxSizeFragment)]; # divide filestring into chunks
fOut = open(fileName,mode); # create or open file in appending mode
c = 0; # counter if keeping track of progress
for chunk in chunks: # break into smaller chunks and write
# print c # print progress
fOut.write(chunk); # write output to file
c +=1 # advance counter
fOut.close(); # close file
# Sequence manipulation
"""
Takes two strings,
returns True if they contain matching nucleotides.
Can handle multi-case and "N" nucleotides.
"""
def compareSeq(pSeq1, pSeq2):
seq1 = pSeq1.upper(); # makes seq all uppercase to enable multi-case comparing
seq2 = pSeq2.upper(); # makes seq all uppercase to enable multi-case comparing
r = seq1 == seq2; # compares sequences
if not r and len(seq1) == len(seq2): # if not equal and same size, pay closer attention
if seq1.find("N") < 0 or seq2.find("N") < 0: # if seqs contain N character
r = True; # reset response variable to true
for i in range(0,len(seq1)):
if seq1[i] != seq2[i] and seq1[i] != "N" and seq2[i] != "N":
r = False;
break; # escapes iteration
pass
return r; # returns result
"""
Takes two strings,
returns index of first occurrence of second string inside first
"""
def findFirst(pSeq, pMotif):
seq = pSeq.upper(); # makes seq all uppercase to enable multi-case searching
motif = pMotif.upper(); # makes motif all uppercase to enable multi-case searching
index = seq.find(motif); # find index of motif occurrence
return index;
"""
Takes two strings,
returns list with indexes of occurrences of second string inside first
"""
def findMotif(pSeq, pMotif):
indexes = []; # Stores indexes of all occurences of the given string
start = 0; # Starting index for the search
notDone = True;
while notDone: # As long as end not reached
seq = pSeq.upper(); # makes seq all uppercase to enable multi-case searching
motif = pMotif.upper(); # makes motif all uppercase to enable multi-case searching
index = seq.find(motif, start); # find index of motif occurrence
if index == -1: # if not found
notDone = False; # search done
else: # else (if found)
start = index+1; # Move index start number
indexes.append(index); # store index
#if len(indexes) == 0: # If motif has not been found,
# indexes = "Motif not found."; # say so.
return indexes;
"""
Counts As, Ts, Cs, and Gs in DNA sequence. Returns dictionary with nucleotides as keys.
"""
def count(seq):
atcg = {"A":(seq.count("A")+seq.count("a")), "T":(seq.count("T")+seq.count("t")), "G":(seq.count("G")+seq.count("g")), "C":(seq.count("C")+seq.count("c")) };
return atcg;
"""
Returns GC content of sequence as a fraction.
"""
def gcContent(pSeq):
seq = pSeq.upper(); # uppercases everything
GC = 0; # counter
for bp in seq: # for every letter in sequence
if bp == "G" or bp == "C": # if letter is G or C
GC = GC + 1; # add one to GC counter
GC = GC/len(seq); # Stores GC content for this sequence as decimal. Since using future division, we're fine with the division operator on ints.
return GC;
"""
Takes dictionary of keys=sequence Fasta names, values=sequences
Returns list with max GC content, name of sequence with max GC content
"""
def maxGCContent(pData):
max = [0, 0]; # stores maximum GC
for seq in list(pData.keys()): # for every sequence
GC = 0; # Counts number of occurrences of G or C
for j in pData[seq]: # for every letter in sequence
if j == "G" or j == "C" or j == "g" or j == "c" : # if letter is G or C
GC += 1; # add one to GC counter
if GC > max[0]: # if GC content of this sequence is greater than the previous maximum,
max = [GC, seq]; # store this content and the sequence name as maximum
max[0] = 100*max[0]/len(pData[max[1]]); # Stores GC content for this sequence as percentage
return max;
"""
Returns approximate melting temperature of sequence to its complement using
Howley formula. Cation concentration is in moles/L (M). RNA-DNA interactions are
equivalent to RNA-RNA interactions.
http://www.sigmaaldrich.com/technical-documents/articles/biology/oligos-melting-temp.html
"""
def meltingTempSimple(seq, cationConcentration=0.05, interactionType="DNA"):
Tm = 81.5 + 41*(gcContent(seq)) - 500/len(seq) + 16.6*log(cationConcentration,10);
if interactionType == "RNA":
Tm = 79.8 + 58.4*(gcContent(seq)) + 11.8*(gcContent(seq))**2 - 820/len(seq) + 18.5*log(cationConcentration,10);
return Tm;
"""
Returns approximate melting temperature of sequence to its complement using
nearest neighbor algorithm (Biopython wrapper). Cation concentration is in
moles/L (M).
"""
def meltingTemp(seq, cationConcentration=0.05, interactionType="DNA"):
if interactionType=="DNA":
Tm = mt.Tm_NN(seq,Na=cationConcentration*1000, nn_table=mt.DNA_NN3); # Allawi & SantaLucia (1997) (default)
if interactionType == "RNA":
Tm = mt.Tm_NN(seq,Na=cationConcentration*1000, nn_table=mt.RNA_NN3); # Chen et al. (2012)
return Tm;
"""
Takes dictionary of keys=sequence Fasta names, values=sequences
Return dictionary with { consensus: consensus sequence (string), nucleotide: profile (list of nucleotide frequency in each position)
"""
def consensus(data):
l = len(list(data.values())[0]); # Stores length of DNA sequences analyzed
C = { "consensus":"", "A":[0]*l, "T":[0]*l, "C":[0]*l, "G":[0]*l } # Dictionary with consensus sequence and lists of size l, stores consensus sequence and occurrences of nucleotide in all sequences for each position
# Build profile matrix:
for seq in list(data.values()): # For every sequence
for b in range(0,l): # For every base in sequence
C[seq[b]][b] = C[seq[b]][b] + 1; # Add one to count of corresponding nucleotide at corresponding position
# Build consensus sequence:
for p in range(0,l): # For every position in sequence
S = { C["A"][p]:"A", C["T"][p]:"T", C["C"][p]:"C", C["G"][p]:"G" } # Define a dictionary that relates the number of occurrences in all sequences of each nucleotide at the current position to the nucleotide's letter
C["consensus"] = C["consensus"] + S[max(S.keys())]; # Adds nucleotide letter with most occurrences at this position to the consensus sequence
return C;
"""
Transcribes DNA to RNA
"""
def transcribe(dna):
rna = dna.replace("T", "U");
rna = rna.replace("t", "u");
return rna;
"""
Reverse transcribes RNA to DNA
"""
def revTranscribe(rna):
dna = rna.replace("U", "T");
dna = dna.replace("u", "t");
return dna;
"""
Returns genetic code dictionary. Stop codons are "*"
"""
def geneticCode():
code = {'CGA':'R','UUU':'F','UUC':'F',
'UUA':'L','UUG':'L','UCU':'S',
'UCC':'S','UCA':'S','UCG':'S',
'UAU':'Y','UAC':'Y','UAA':'*',
'UAG':'*','UGU':'C','UGC':'C',
'UGA':'*','UGG':'W','CUU':'L',
'CUC':'L','CUA':'L','CUG':'L',
'CCU':'P','CCC':'P','CCA':'P',
'CCG':'P','CAU':'H','CAC':'H',
'CAA':'Q','CAG':'Q','CGU':'R',
'CGC':'R','AGA':'R','CGG':'R',
'AUU':'I','AUC':'I','AUA':'I',
'AUG':'M','ACU':'T','ACC':'T',
'ACA':'T','ACG':'T','AAU':'N',
'AAC':'N','AAA':'K','AAG':'K',
'AGU':'S','AGC':'S','GGA':'G',
'AGG':'R','GUU':'V','GUC':'V',
'GUA':'V','GUG':'V','GCU':'A',
'GCC':'A','GCA':'A','GCG':'A',
'GAU':'D','GAC':'D','GAA':'E',
'GAG':'E','GGU':'G','GGC':'G',
'GGG':'G'
};
return code;
"""
Returns dictionary with keys=codons and values=frequencies for the codon table
whose file path was specified as an argument. If no organism is specified,
uniform codon usage is assumed by default.
"""
def codonUsage(tableFilePath=""):
p = 1/float(64); # calculates probability of a codon according to uniform distribution
codonFreqs = dict.fromkeys( list(geneticCode().keys()), 1/float(len(geneticCode())) ); # creates dictionary with codons as keys and a uniform probability of each key as values
if len(tableFilePath) > 0: # if a file path was passed as an argument,
txt = open(tableFilePath); # Access given file
d = txt.read(); # Read file
d = d.lstrip(); # Remove initial whitespace
d = d.split("\n"); # Split into separate sequences
totCodons = 0; # stores total number of codons
for l in d[1::]: # iterates across file lines; starts at 1 to skip table header
el = l.split(); # splits lines into elements separated by whitespace
if len(el) > 0: # if line is not empty,
codon = el[1]; # stores codon (second column)
freq = float(el[2]); # stores codon's frequency (third column)
codonFreqs[codon] = freq; # associates codon to frequency
totCodons += freq; # adds number of codons to total
txt.close(); # close file
for c in codonFreqs: # for every codon,
codonFreqs[c] = codonFreqs[c]/float(totCodons); # normalize over total number of codons
return codonFreqs;
"""
Translates RNA to protein (takes rna string, returns peptide string).
"""
def translate(pSeq):
seq = transcribe(pSeq.upper()); # everything to uppercase
code = geneticCode(); # Stores genetic code
pep = ""; # stores peptide sequence
i = 0; # position on RNA sequence
while i < len(seq): # for every codon
codon = seq[i:(i+3)]; # get current codon
aa = code[codon]; # get aminoacid
if (aa != "*"): # if not a stop codon
pep = pep + aa; # add residue to peptide
i = i+3; # next codon
else: # if codon is stop
i = len(seq); # stop loop
return pep;
"""
Changes codons in dna sequence to synonyms according to given codon frequency
dictionary. Assumes frame starts in 0. Default codon frequency is equal
probabilities, resulting in codon scramble. Will select codons probabilistically
if sampling is True, will choose most likely codon otherwise.
"""
def optimizeCodons(pSeq, codonFreqs=codonUsage(), codonSampling=True):
seq = transcribe(pSeq.upper()); # everything to uppercase
code = geneticCode(); # Stores genetic code
newSeq = ""; # stores new sequence
i = 0; # position on DNA sequence
while i < len(seq): # for every codon
codon = seq[i:(i+3)]; # get current codon
aa = code[codon]; # get aminoacid
synonyms = []; # will store all synonyms
for c in code: # iterates over genetic code
if code[c] == code[codon]: # if codon is synonym,
synonyms.append(c); # add to synonyms list
totalProb = 0; # saves sum of likelihoods of all synonymous codons
for syn in synonyms: # for every synonymous codon,
totalProb += codonFreqs[syn]; # add this synonym's likelihood to total
newCodon = synonyms[0]; # set first codon synonym as default new codon
if codonSampling: # if not deterministic (choose codons probabilistically),
r = random.random(); # random number between zero and 1
j = 0; # stores index of codon selected
cumProb = 0; # stores cumulative probability of belonging to all codons in indexes 0:i
while cumProb < r: # will shift selected codon until cumulative probability surpasses random number
cumProb += codonFreqs[synonyms[j]]/float(totalProb); # adds this codon's probabilty to the cumulative frequency
j += 1; # advance counter
newCodon = synonyms[j-1]; # change codon for chosen synonym
else: # if deterministic mode (choose most likely codons),
for c in synonyms: # iterate over synonyms
if codonFreqs[c] > codonFreqs[newCodon]: # if this codon is more likely than current new codon,
newCodon = c; # set this codon as the new codon
newSeq = newSeq + newCodon; # add codon to sequence
i = i+3; # next codon
newDNASeq = revTranscribe(newSeq); # reverse transcribe sequence
return newDNASeq;
"""
Returns the reverse complement of a sequence
"""
def revComp(seq):
revSeq = seq[::-1]; # reverse seq
# replace nucleotides, conserve case
revSeq = revSeq.replace("A", "1");
revSeq = revSeq.replace("T", "2");
revSeq = revSeq.replace("G", "3");
revSeq = revSeq.replace("C", "4");
revSeq = revSeq.replace("2", "A");
revSeq = revSeq.replace("1", "T");
revSeq = revSeq.replace("4", "G");
revSeq = revSeq.replace("3", "C");
revSeq = revSeq.replace("a", "1");
revSeq = revSeq.replace("t", "2");
revSeq = revSeq.replace("g", "3");
revSeq = revSeq.replace("c", "4");
revSeq = revSeq.replace("2", "a");
revSeq = revSeq.replace("1", "t");
revSeq = revSeq.replace("4", "g");
revSeq = revSeq.replace("3", "c");
return revSeq;
"""
Returns a list with all possible DNA sequences given an ambiguous DNA sequence.
"""
def ambiguousSeqs(seq):
ambigCodes = {"R":"AG", "Y":"TC", "S":"CG", "W":"AT", "K":"GT", "M":"AC",
"B":"CGT", "D":"AGT", "H":"ATC", "V":"ACG", "N":"ATCG"};
outSeqs = [""];
for b in seq:
if b in list(ambigCodes.keys()):
newOutSeqs = [];
for possibleB in ambigCodes[b]:
newNewOutSeqs = [];
for s in outSeqs:
newNewOutSeqs.append(s + possibleB);
newOutSeqs = newOutSeqs + newNewOutSeqs;
outSeqs = newOutSeqs;
else:
newOutSeqs = [];
for s in outSeqs:
newOutSeqs.append(s + b);
outSeqs = newOutSeqs;
return outSeqs;
# Auxiliary methods
"""
Checks if a sequence is hard to synthesize.
"""
def isTricky(seq,repeatSize=14,bad_seqs=['TATATATATATATATATATA','GCGCGCGCGCGCGC',
'AAAAAAAAAA','TTTTTTTTTT','GGGGGGGGG','CCCCCCCCC']):
tricky = -1;
if hasRepeatedSeqs(seq,repeatSize) > -1: # if contains repeats larger than a given size
tricky = hasRepeatedSeqs(seq,repeatSize); # it's tricky
for bad_seq in bad_seqs:
i = findFirst(seq,bad_seq)
if i > -1 and i < tricky: # if 10 TA repeats found,
tricky = i; # it's tricky
return tricky;
"""
Returns False if there are >1 instances of any given k-mer of given size, index
of first repeat otherwise.
"""
def hasRepeatedSeqs(seq,size):
motifs = list()
if len(seq) > size:
for i in range(len(seq)-size+1):
if seq[i:i+size] in motifs:
return i
else:
motifs.append(seq[i:i+size])
return -1
"""
Returns a random hex color code.
"""
def randHex():
chars = [0,1,2,3,4,5,6,7,8,9,'a','b','c','d','e','f']; # stores all possible values
codeList = [str(chars[random.randint(0,len(chars)-1)]) for i in range(6)]; # generates six random characters from list
hexCode = "#" + "".join(codeList); # generates hex code string
return hexCode; # returns
"""
Returns a string with today'd date in format "06-JUN-2016"
"""
def todayDateStr():
months = ["JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT", "NOV", "DEC"]; # store month strings
today = datetime.date.today(); # get today's date
r = "-".join([str(today.day), months[today.month-1], str(today.year)]); # creates output string
return r; # returns
# //
|
pablocarderam/genetargeter
|
py/utils/BioUtils.py
|
Python
|
mit
| 17,935
|
[
"Biopython"
] |
0a7638c36e1279b3243fc42915a5b9d19fb3384e56d911f814912ea5cc94860a
|
# $Id$
#
# Copyright (C) 2007,2008 Greg Landrum
#
# @@ All Rights Reserved @@
#
import os,sys
import unittest
from rdkit.six.moves import cPickle
from rdkit import RDConfig
from rdkit import DataStructs as ds
def feq(v1,v2,tol=1e-4):
return abs(v1-v2)<tol
class TestCase(unittest.TestCase):
def setUp(self) :
pass
def test1Int(self):
"""
"""
v1 = ds.IntSparseIntVect(5)
self.assertRaises(IndexError,lambda:v1[5])
v1[0]=1
v1[2]=2
v1[3]=3
self.assertTrue(v1==v1)
self.assertTrue(v1.GetLength()==5)
v2= ds.IntSparseIntVect(5)
self.assertTrue(v1!=v2)
v2|=v1
self.assertTrue(v2==v1)
v3=v2|v1
self.assertTrue(v3==v1)
onVs = v1.GetNonzeroElements()
self.assertTrue(onVs=={0:1,2:2,3:3})
def test2Long(self):
"""
"""
l=1<<42
v1 = ds.LongSparseIntVect(l)
self.assertRaises(IndexError,lambda:v1[l])
v1[0]=1
v1[2]=2
v1[1<<35]=3
self.assertTrue(v1==v1)
self.assertTrue(v1.GetLength()==l)
v2= ds.LongSparseIntVect(l)
self.assertTrue(v1!=v2)
v2|=v1
self.assertTrue(v2==v1)
v3=v2|v1
self.assertTrue(v3==v1)
onVs = v1.GetNonzeroElements()
self.assertTrue(onVs=={0:1,2:2,1<<35:3})
def test3Pickle1(self):
"""
"""
l=1<<42
v1 = ds.LongSparseIntVect(l)
self.assertRaises(IndexError,lambda:v1[l+1])
v1[0]=1
v1[2]=2
v1[1<<35]=3
self.assertTrue(v1==v1)
v2= cPickle.loads(cPickle.dumps(v1))
self.assertTrue(v2==v1)
v3= ds.LongSparseIntVect(v2.ToBinary())
self.assertTrue(v2==v3)
self.assertTrue(v1==v3)
#cPickle.dump(v1,file('lsiv.pkl','wb+'))
with open(
os.path.join(RDConfig.RDBaseDir,
'Code/DataStructs/Wrap/testData/lsiv.pkl'),
'rb'
) as f:
v3 = cPickle.load(f)
self.assertTrue(v3==v1)
def test3Pickle2(self):
"""
"""
l=1<<21
v1 = ds.IntSparseIntVect(l)
self.assertRaises(IndexError,lambda:v1[l+1])
v1[0]=1
v1[2]=2
v1[1<<12]=3
self.assertTrue(v1==v1)
v2= cPickle.loads(cPickle.dumps(v1))
self.assertTrue(v2==v1)
v3= ds.IntSparseIntVect(v2.ToBinary())
self.assertTrue(v2==v3)
self.assertTrue(v1==v3)
#cPickle.dump(v1,file('isiv.pkl','wb+'))
with open(
os.path.join(RDConfig.RDBaseDir,
'Code/DataStructs/Wrap/testData/isiv.pkl'),
'rb'
) as f:
v3 = cPickle.load(f)
self.assertTrue(v3==v1)
def test4Update(self):
"""
"""
v1 = ds.IntSparseIntVect(5)
self.assertRaises(IndexError,lambda:v1[6])
v1[0]=1
v1[2]=2
v1[3]=3
self.assertTrue(v1==v1)
v2 = ds.IntSparseIntVect(5)
v2.UpdateFromSequence((0,2,3,3,2,3))
self.assertTrue(v1==v2)
def test5Dice(self):
"""
"""
v1 = ds.IntSparseIntVect(5)
v1[4]=4;
v1[0]=2;
v1[3]=1;
self.assertTrue(feq(ds.DiceSimilarity(v1,v1),1.0))
v1 = ds.IntSparseIntVect(5)
v1[0]=2;
v1[2]=1;
v1[3]=4;
v1[4]=6;
v2 = ds.IntSparseIntVect(5)
v2[1]=2;
v2[2]=3;
v2[3]=4;
v2[4]=4;
self.assertTrue(feq(ds.DiceSimilarity(v1,v2),18.0/26.))
self.assertTrue(feq(ds.DiceSimilarity(v2,v1),18.0/26.))
def test6BulkDice(self):
"""
"""
sz=10
nToSet=5
nVs=6
import random
vs = []
for i in range(nVs):
v = ds.IntSparseIntVect(sz)
for j in range(nToSet):
v[random.randint(0,sz-1)]=random.randint(1,10)
vs.append(v)
baseDs = [ds.DiceSimilarity(vs[0],vs[x]) for x in range(1,nVs)]
bulkDs = ds.BulkDiceSimilarity(vs[0],vs[1:])
for i in range(len(baseDs)):
self.assertTrue(feq(baseDs[i],bulkDs[i]))
def test6BulkTversky(self):
"""
"""
sz=10
nToSet=5
nVs=6
import random
vs = []
for i in range(nVs):
v = ds.IntSparseIntVect(sz)
for j in range(nToSet):
v[random.randint(0,sz-1)]=random.randint(1,10)
vs.append(v)
baseDs = [ds.TverskySimilarity(vs[0],vs[x],.5,.5) for x in range(1,nVs)]
bulkDs = ds.BulkTverskySimilarity(vs[0],vs[1:],0.5,0.5)
diceDs = [ds.DiceSimilarity(vs[0],vs[x]) for x in range(1,nVs)]
for i in range(len(baseDs)):
self.assertTrue(feq(baseDs[i],bulkDs[i]))
self.assertTrue(feq(baseDs[i],diceDs[i]))
bulkDs = ds.BulkTverskySimilarity(vs[0],vs[1:],1.0,1.0)
taniDs = [ds.TanimotoSimilarity(vs[0],vs[x]) for x in range(1,nVs)]
for i in range(len(bulkDs)):
self.assertTrue(feq(bulkDs[i],taniDs[i]))
taniDs = ds.BulkTanimotoSimilarity(vs[0],vs[1:])
for i in range(len(bulkDs)):
self.assertTrue(feq(bulkDs[i],taniDs[i]))
if __name__ == '__main__':
unittest.main()
|
AlexanderSavelyev/rdkit
|
Code/DataStructs/Wrap/testSparseIntVect.py
|
Python
|
bsd-3-clause
| 4,724
|
[
"RDKit"
] |
c25d605fee351683fcb3b06d2ea4fa11058f2701089c25cffb12fc935ffa16c0
|
#!/usr/bin/python
from nxs_utils import ThreadPool, Timer
import argparse, json, urllib2, multiprocessing, time, threading, thread, sys, datetime
from pprint import pprint
default_threads = multiprocessing.cpu_count()/2
# Blast given sequences (from json file) via neXtProt API sequencially + in parallel then check correctness of responses
#
# Example:
# ./nxs-test-blast-api.py localhost:8080 request.json --repeat-blast 2 --out /tmp/blast.out --thread 16
def parse_arguments():
"""Parse arguments
:return: a parsed arguments object
"""
parser = argparse.ArgumentParser(description='Testing neXtProt API blast')
parser.add_argument('host', help='API host (ie: build-api.nextprot.org)')
parser.add_argument('json_file', help='sequences to search (json file)')
parser.add_argument('-o', '--out', metavar='path', default='output.json',
help='file path to flush json output responses')
parser.add_argument('-r', '--repeat-blast', metavar='num', default=1, type=int,
help='blast sequences n times (default=1)')
parser.add_argument('-t', '--thread', metavar='num', default=default_threads, type=int,
help='number of threads (default=' + str(default_threads) + ')')
arguments = parser.parse_args()
# Update API host address
if not arguments.host.startswith("http"):
arguments.host = 'http://' + arguments.host
if not arguments.json_file.endswith(".json"):
raise ValueError(arguments.json_file+": invalid json file name")
arguments.sequences = json.loads(open(arguments.json_file).read())
print "Parameters"
print " API host : " + arguments.host
print " JSON input file : " + arguments.json_file + " (found "+str(len(arguments.sequences))+" sequences)"
print " JSON output file : " + arguments.out
print
print " repeat blast : " + str(arguments.repeat_blast) + " times"
print " thread number : " + str(arguments.thread)
print "-------------------------------------------------------------------------------------"
return arguments
def call_blast(blast_api, sequence):
url = blast_api + sequence+".json"
try:
response = urllib2.urlopen(url).read()
return json.loads(response)
except urllib2.URLError as e:
print str(e), ": could not run", url
raise
def test_parallel_run(blast_api, sequences, expected_results, threads_num):
pool = ThreadPool(threads_num)
for sequence in sequences:
pool.add_task(func=blast_and_test_correctness,
blast_api=blast_api,
sequence=sequence,
expected_results=expected_results)
pool.wait_completion()
def blast_and_test_correctness(blast_api, sequence, expected_results):
local_timer = Timer()
with local_timer:
try:
result = call_blast(blast_api, sequence)
compare_json_results(expected_results[sequence], result)
sys.stdout.write("SUCCESS: " + threading.current_thread().name)
except urllib2.URLError as e:
sys.stdout.write("FAILURE: " + threading.current_thread().name+" failed with error '"+str(e))
print " [" + str(datetime.timedelta(seconds=local_timer.duration_in_seconds())) + " seconds]"
def compare_json_results(sequential_results, parallel_results):
if sorted(sequential_results.items()) != sorted(parallel_results.items()):
raise ValueError("json content differs between "+pprint(sequential_results) + "\n and \n" +
pprint(parallel_results))
def blast_sequences_sequential(blast_api, sequences):
print "Blasting "+str(len(sequences)) + " sequences to " + args.host + "..."
results = {}
local_timer = Timer()
with local_timer:
for sequence in sequences:
results[sequence] = call_blast(blast_api, sequence)
duration = local_timer.duration_in_seconds()
duration_per_seconds = len(sequences) / duration
print "Sequential execution in "+str(datetime.timedelta(seconds=duration)) + " seconds [" + \
str(duration_per_seconds) + " sequences/second]"
return results
def test_parallel_run_time(blast_api, sequences, expected_results):
sequences *= args.repeat_blast
local_timer = Timer()
with local_timer:
test_parallel_run(blast_api, sequences, expected_results, args.thread)
duration = local_timer.duration_in_seconds()
duration_per_seconds = len(sequences) / duration
print "Parallel execution in "+str(datetime.timedelta(seconds=duration)) + " seconds [" + \
str(duration_per_seconds) + " sequences/second]"
if __name__ == '__main__':
args = parse_arguments()
blast_api = args.host + '/blast/sequence/'
sequential_results = blast_sequences_sequential(blast_api=blast_api, sequences=args.sequences)
print "\nsleeping...\n"
time.sleep(2)
test_parallel_run_time(blast_api=blast_api, sequences=args.sequences, expected_results=sequential_results)
f = open(args.out, 'w')
f.write(json.dumps(sequential_results))
f.close()
# example of json file
#[
# "GTTYVTDKSEEDNEIESEEEVQPKTQGSRR",
# "KGGHFYSAKPEILRAMQRADEALNKDKIKRLELAVCDEPSEPEEEEEMEVGTTYVTDK",
# "NDILIGCEEE",
# "TQTYSVLEGDPSEN",
# "SKKKIIDFLSALEGFKVMCK",
# "MSRQSTLYSFFPKSP",
# "LLALPVLASPAYVAPAPGQA",
# "HDSCQGDSGGPLVCKV",
# "HLYYQDQLLPVSRIIVHP",
# "VMVIGNLVVLNLFLALLLSSFSSDNLTAIEEDPDANNLQIAVTRIKKGIN",
# "GNKIQGCIFDLVTNQAFDISIMVLICLN",
# "WRFSCCQVN",
# "RTSLFSFKGRGRDIGSETEFADD",
# "GESGEMDSLRSQMEERFMSANPSK"
#]
|
calipho-sib/nextprot-scripts
|
src/nxs-test-blast-api.py
|
Python
|
gpl-2.0
| 5,666
|
[
"BLAST"
] |
181585948eeee73a3c631036f9bd0ffe9892bb1ca6cac5dcf947464bb9d5e46e
|
from __future__ import absolute_import
from .arc import ArcTrajectoryFile
from .dcd import DCDTrajectoryFile
from .binpos import BINPOSTrajectoryFile
from .xtc import XTCTrajectoryFile
from .trr import TRRTrajectoryFile
from .hdf5 import HDF5TrajectoryFile
from .netcdf import NetCDFTrajectoryFile
from .pdb import PDBTrajectoryFile
from .lh5 import LH5TrajectoryFile
from .mdcrd import MDCRDTrajectoryFile
from .amberrst import AmberRestartFile, AmberNetCDFRestartFile
from .lammpstrj import LAMMPSTrajectoryFile
from .dtr import DTRTrajectoryFile
from .gro import GroTrajectoryFile
from .xyzfile import XYZTrajectoryFile
from .tng import TNGTrajectoryFile
|
leeping/mdtraj
|
mdtraj/formats/__init__.py
|
Python
|
lgpl-2.1
| 659
|
[
"NetCDF"
] |
d845970c79306e8938e24c71bd100b524c4913af00fcb9673d99041d5d2a357c
|
""" Python test discovery, setup and run of test functions. """
import fnmatch
import functools
import py
import inspect
import sys
import pytest
from _pytest.mark import MarkDecorator, MarkerError
from py._code.code import TerminalRepr
import _pytest
import pluggy
cutdir2 = py.path.local(_pytest.__file__).dirpath()
cutdir1 = py.path.local(pluggy.__file__.rstrip("oc"))
NoneType = type(None)
NOTSET = object()
isfunction = inspect.isfunction
isclass = inspect.isclass
callable = py.builtin.callable
# used to work around a python2 exception info leak
exc_clear = getattr(sys, 'exc_clear', lambda: None)
def filter_traceback(entry):
return entry.path != cutdir1 and not entry.path.relto(cutdir2)
def get_real_func(obj):
"""gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
"""
while hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, 'place_as'):
obj = obj.place_as
fslineno = py.code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
try:
return func.im_func
except AttributeError:
return func
class FixtureFunctionMarker:
def __init__(self, scope, params,
autouse=False, yieldctx=False, ids=None):
self.scope = scope
self.params = params
self.autouse = autouse
self.yieldctx = yieldctx
self.ids = ids
def __call__(self, function):
if isclass(function):
raise ValueError(
"class fixtures not supported (may be in the future)")
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a fixture factory function.
This decorator can be used (with or or without parameters) to define
a fixture function. The name of the fixture function can later be
referenced to cause its invocation ahead of running tests: test
modules or classes can use the pytest.mark.usefixtures(fixturename)
marker. Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
:arg scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module", "session".
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids)
def yield_fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a yield-fixture factory function
(EXPERIMENTAL).
This takes the same arguments as :py:func:`pytest.fixture` but
expects a fixture function to use a ``yield`` instead of a ``return``
statement to provide a fixture. See
http://pytest.org/en/latest/yieldfixture.html for more info.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, yieldctx=True)(scope)
else:
return FixtureFunctionMarker(scope, params, autouse,
yieldctx=True, ids=ids)
defaultfuncargprefixmarker = fixture()
def pyobj_property(name):
def get(self):
node = self.getparent(getattr(pytest, name))
if node is not None:
return node.obj
doc = "python %s object this node was collected from (can be None)." % (
name.lower(),)
return property(get, None, None, doc)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--fixtures', '--funcargs',
action="store_true", dest="showfixtures", default=False,
help="show available fixtures, sorted by plugin appearance")
parser.addini("usefixtures", type="args", default=[],
help="list of default fixtures to be used with this project")
parser.addini("python_files", type="args",
default=['test_*.py', '*_test.py'],
help="glob-style file patterns for Python test module discovery")
parser.addini("python_classes", type="args", default=["Test",],
help="prefixes or glob names for Python test class discovery")
parser.addini("python_functions", type="args", default=["test",],
help="prefixes or glob names for Python test function and "
"method discovery")
def pytest_cmdline_main(config):
if config.option.showfixtures:
showfixtures(config)
return 0
def pytest_generate_tests(metafunc):
# this misspelling is common - raise a specific error to alert the user
if hasattr(metafunc.function, 'parameterize'):
msg = "{0} has 'parameterize', spelling should be 'parametrize'"
raise MarkerError(msg.format(metafunc.function.__name__))
try:
markers = metafunc.function.parametrize
except AttributeError:
return
for marker in markers:
metafunc.parametrize(*marker.args, **marker.kwargs)
def pytest_configure(config):
config.addinivalue_line("markers",
"parametrize(argnames, argvalues): call a test function multiple "
"times passing in different arguments in turn. argvalues generally "
"needs to be a list of values if argnames specifies only one name "
"or a list of tuples of values if argnames specifies multiple names. "
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
"decorated test function, one with arg1=1 and another with arg1=2."
"see http://pytest.org/latest/parametrize.html for more info and "
"examples."
)
config.addinivalue_line("markers",
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
"all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
)
def pytest_sessionstart(session):
session._fixturemanager = FixtureManager(session)
@pytest.hookimpl(trylast=True)
def pytest_namespace():
raises.Exception = pytest.fail.Exception
return {
'fixture': fixture,
'yield_fixture': yield_fixture,
'raises' : raises,
'collect': {
'Module': Module, 'Class': Class, 'Instance': Instance,
'Function': Function, 'Generator': Generator,
'_fillfuncargs': fillfixtures}
}
@fixture(scope="session")
def pytestconfig(request):
""" the pytest config object with access to command line opts."""
return request.config
@pytest.hookimpl(trylast=True)
def pytest_pyfunc_call(pyfuncitem):
testfunction = pyfuncitem.obj
if pyfuncitem._isyieldedfunction():
testfunction(*pyfuncitem._args)
else:
funcargs = pyfuncitem.funcargs
testargs = {}
for arg in pyfuncitem._fixtureinfo.argnames:
testargs[arg] = funcargs[arg]
testfunction(**testargs)
return True
def pytest_collect_file(path, parent):
ext = path.ext
if ext == ".py":
if not parent.session.isinitpath(path):
for pat in parent.config.getini('python_files'):
if path.fnmatch(pat):
break
else:
return
ihook = parent.session.gethookproxy(path)
return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
def pytest_pycollect_makemodule(path, parent):
return Module(path, parent)
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(collector, name, obj):
outcome = yield
res = outcome.get_result()
if res is not None:
raise StopIteration
# nothing was collected elsewhere, let's do it here
if isclass(obj):
if collector.classnamefilter(name):
Class = collector._getcustomclass("Class")
outcome.force_result(Class(name, parent=collector))
elif collector.funcnamefilter(name) and hasattr(obj, "__call__") and\
getfixturemarker(obj) is None:
# mock seems to store unbound methods (issue473), normalize it
obj = getattr(obj, "__func__", obj)
if not isfunction(obj):
collector.warn(code="C2", message=
"cannot collect %r because it is not a function."
% name, )
if getattr(obj, "__test__", True):
if is_generator(obj):
res = Generator(name, parent=collector)
else:
res = list(collector._genfunctions(name, obj))
outcome.force_result(res)
def is_generator(func):
try:
return py.code.getrawcode(func).co_flags & 32 # generator function
except AttributeError: # builtin functions have no bytecode
# assume them to not be generators
return False
class PyobjContext(object):
module = pyobj_property("Module")
cls = pyobj_property("Class")
instance = pyobj_property("Instance")
class PyobjMixin(PyobjContext):
def obj():
def fget(self):
try:
return self._obj
except AttributeError:
self._obj = obj = self._getobj()
return obj
def fset(self, value):
self._obj = value
return property(fget, fset, None, "underlying python object")
obj = obj()
def _getobj(self):
return getattr(self.parent.obj, self.name)
def getmodpath(self, stopatmodule=True, includemodule=False):
""" return python path relative to the containing module. """
chain = self.listchain()
chain.reverse()
parts = []
for node in chain:
if isinstance(node, Instance):
continue
name = node.name
if isinstance(node, Module):
assert name.endswith(".py")
name = name[:-3]
if stopatmodule:
if includemodule:
parts.append(name)
break
parts.append(name)
parts.reverse()
s = ".".join(parts)
return s.replace(".[", "[")
def _getfslineno(self):
return getfslineno(self.obj)
def reportinfo(self):
# XXX caching?
obj = self.obj
if hasattr(obj, 'compat_co_firstlineno'):
# nose compatibility
fspath = sys.modules[obj.__module__].__file__
if fspath.endswith(".pyc"):
fspath = fspath[:-1]
lineno = obj.compat_co_firstlineno
else:
fspath, lineno = getfslineno(obj)
modpath = self.getmodpath()
assert isinstance(lineno, int)
return fspath, lineno, modpath
class PyCollector(PyobjMixin, pytest.Collector):
def funcnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_functions', name)
def classnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_classes', name)
def _matches_prefix_or_glob_option(self, option_name, name):
"""
checks if the given name matches the prefix or glob-pattern defined
in ini configuration.
"""
for option in self.config.getini(option_name):
if name.startswith(option):
return True
# check that name looks like a glob-string before calling fnmatch
# because this is called for every name in each collected module,
# and fnmatch is somewhat expensive to call
elif ('*' in option or '?' in option or '[' in option) and \
fnmatch.fnmatch(name, option):
return True
return False
def collect(self):
if not getattr(self.obj, "__test__", True):
return []
# NB. we avoid random getattrs and peek in the __dict__ instead
# (XXX originally introduced from a PyPy need, still true?)
dicts = [getattr(self.obj, '__dict__', {})]
for basecls in inspect.getmro(self.obj.__class__):
dicts.append(basecls.__dict__)
seen = {}
l = []
for dic in dicts:
for name, obj in dic.items():
if name in seen:
continue
seen[name] = True
res = self.makeitem(name, obj)
if res is None:
continue
if not isinstance(res, list):
res = [res]
l.extend(res)
l.sort(key=lambda item: item.reportinfo()[:2])
return l
def makeitem(self, name, obj):
#assert self.ihook.fspath == self.fspath, self
return self.ihook.pytest_pycollect_makeitem(
collector=self, name=name, obj=obj)
def _genfunctions(self, name, funcobj):
module = self.getparent(Module).obj
clscol = self.getparent(Class)
cls = clscol and clscol.obj or None
transfer_markers(funcobj, cls, module)
fm = self.session._fixturemanager
fixtureinfo = fm.getfixtureinfo(self, funcobj, cls)
metafunc = Metafunc(funcobj, fixtureinfo, self.config,
cls=cls, module=module)
methods = []
if hasattr(module, "pytest_generate_tests"):
methods.append(module.pytest_generate_tests)
if hasattr(cls, "pytest_generate_tests"):
methods.append(cls().pytest_generate_tests)
if methods:
self.ihook.pytest_generate_tests.call_extra(methods,
dict(metafunc=metafunc))
else:
self.ihook.pytest_generate_tests(metafunc=metafunc)
Function = self._getcustomclass("Function")
if not metafunc._calls:
yield Function(name, parent=self, fixtureinfo=fixtureinfo)
else:
# add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
add_funcarg_pseudo_fixture_def(self, metafunc, fm)
for callspec in metafunc._calls:
subname = "%s[%s]" %(name, callspec.id)
yield Function(name=subname, parent=self,
callspec=callspec, callobj=funcobj,
fixtureinfo=fixtureinfo,
keywords={callspec.id:True})
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname,
scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(fixturemanager, '', argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist, False, False)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def get_direct_param_fixture_func(request):
return request.param
class FuncFixtureInfo:
def __init__(self, argnames, names_closure, name2fixturedefs):
self.argnames = argnames
self.names_closure = names_closure
self.name2fixturedefs = name2fixturedefs
def transfer_markers(funcobj, cls, mod):
# XXX this should rather be code in the mark plugin or the mark
# plugin should merge with the python plugin.
for holder in (cls, mod):
try:
pytestmark = holder.pytestmark
except AttributeError:
continue
if isinstance(pytestmark, list):
for mark in pytestmark:
mark(funcobj)
else:
pytestmark(funcobj)
class Module(pytest.File, PyCollector):
""" Collector for test classes and functions. """
def _getobj(self):
return self._memoizedcall('_obj', self._importtestmodule)
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Module, self).collect()
def _importtestmodule(self):
# we assume we are only called once per module
try:
mod = self.fspath.pyimport(ensuresyspath="append")
except SyntaxError:
raise self.CollectError(
py.code.ExceptionInfo().getrepr(style="short"))
except self.fspath.ImportMismatchError:
e = sys.exc_info()[1]
raise self.CollectError(
"import file mismatch:\n"
"imported module %r has this __file__ attribute:\n"
" %s\n"
"which is not the same as the test file we want to collect:\n"
" %s\n"
"HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules"
% e.args
)
#print "imported test module", mod
self.config.pluginmanager.consider_module(mod)
return mod
def setup(self):
setup_module = xunitsetup(self.obj, "setUpModule")
if setup_module is None:
setup_module = xunitsetup(self.obj, "setup_module")
if setup_module is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, its probably a pytest style one
# so we pass the current module object
if inspect.getargspec(setup_module)[0]:
setup_module(self.obj)
else:
setup_module()
fin = getattr(self.obj, 'tearDownModule', None)
if fin is None:
fin = getattr(self.obj, 'teardown_module', None)
if fin is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, it's probably a pytest style one
# so we pass the current module object
if inspect.getargspec(fin)[0]:
finalizer = lambda: fin(self.obj)
else:
finalizer = fin
self.addfinalizer(finalizer)
class Class(PyCollector):
""" Collector for test methods. """
def collect(self):
if hasinit(self.obj):
self.warn("C1", "cannot collect test class %r because it has a "
"__init__ constructor" % self.obj.__name__)
return []
return [self._getcustomclass("Instance")(name="()", parent=self)]
def setup(self):
setup_class = xunitsetup(self.obj, 'setup_class')
if setup_class is not None:
setup_class = getattr(setup_class, 'im_func', setup_class)
setup_class = getattr(setup_class, '__func__', setup_class)
setup_class(self.obj)
fin_class = getattr(self.obj, 'teardown_class', None)
if fin_class is not None:
fin_class = getattr(fin_class, 'im_func', fin_class)
fin_class = getattr(fin_class, '__func__', fin_class)
self.addfinalizer(lambda: fin_class(self.obj))
class Instance(PyCollector):
def _getobj(self):
obj = self.parent.obj()
return obj
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Instance, self).collect()
def newinstance(self):
self.obj = self._getobj()
return self.obj
class FunctionMixin(PyobjMixin):
""" mixin for the code common to Function and Generator.
"""
def setup(self):
""" perform setup for this test function. """
if hasattr(self, '_preservedparent'):
obj = self._preservedparent
elif isinstance(self.parent, Instance):
obj = self.parent.newinstance()
self.obj = self._getobj()
else:
obj = self.parent.obj
if inspect.ismethod(self.obj):
setup_name = 'setup_method'
teardown_name = 'teardown_method'
else:
setup_name = 'setup_function'
teardown_name = 'teardown_function'
setup_func_or_method = xunitsetup(obj, setup_name)
if setup_func_or_method is not None:
setup_func_or_method(self.obj)
fin = getattr(obj, teardown_name, None)
if fin is not None:
self.addfinalizer(lambda: fin(self.obj))
def _prunetraceback(self, excinfo):
if hasattr(self, '_obj') and not self.config.option.fulltrace:
code = py.code.Code(get_real_func(self.obj))
path, firstlineno = code.path, code.firstlineno
traceback = excinfo.traceback
ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
if ntraceback == traceback:
ntraceback = ntraceback.cut(path=path)
if ntraceback == traceback:
#ntraceback = ntraceback.cut(excludepath=cutdir2)
ntraceback = ntraceback.filter(filter_traceback)
if not ntraceback:
ntraceback = traceback
excinfo.traceback = ntraceback.filter()
# issue364: mark all but first and last frames to
# only show a single-line message for each frame
if self.config.option.tbstyle == "auto":
if len(excinfo.traceback) > 2:
for entry in excinfo.traceback[1:-1]:
entry.set_repr_style('short')
def _repr_failure_py(self, excinfo, style="long"):
if excinfo.errisinstance(pytest.fail.Exception):
if not excinfo.value.pytrace:
return str(excinfo.value)
return super(FunctionMixin, self)._repr_failure_py(excinfo,
style=style)
def repr_failure(self, excinfo, outerr=None):
assert outerr is None, "XXX outerr usage is deprecated"
style = self.config.option.tbstyle
if style == "auto":
style = "long"
return self._repr_failure_py(excinfo, style=style)
class Generator(FunctionMixin, PyCollector):
def collect(self):
# test generators are seen as collectors but they also
# invoke setup/teardown on popular request
# (induced by the common "test_*" naming shared with normal tests)
self.session._setupstate.prepare(self)
# see FunctionMixin.setup and test_setupstate_is_preserved_134
self._preservedparent = self.parent.obj
l = []
seen = {}
for i, x in enumerate(self.obj()):
name, call, args = self.getcallargs(x)
if not callable(call):
raise TypeError("%r yielded non callable test %r" %(self.obj, call,))
if name is None:
name = "[%d]" % i
else:
name = "['%s']" % name
if name in seen:
raise ValueError("%r generated tests with non-unique name %r" %(self, name))
seen[name] = True
l.append(self.Function(name, self, args=args, callobj=call))
return l
def getcallargs(self, obj):
if not isinstance(obj, (tuple, list)):
obj = (obj,)
# explict naming
if isinstance(obj[0], py.builtin._basestring):
name = obj[0]
obj = obj[1:]
else:
name = None
call, args = obj[0], obj[1:]
return name, call, args
def hasinit(obj):
init = getattr(obj, '__init__', None)
if init:
if init != object.__init__:
return True
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
_notexists = object()
class CallSpec2(object):
def __init__(self, metafunc):
self.metafunc = metafunc
self.funcargs = {}
self._idlist = []
self.params = {}
self._globalid = _notexists
self._globalid_args = set()
self._globalparam = _notexists
self._arg2scopenum = {} # used for sorting parametrized resources
self.keywords = {}
self.indices = {}
def copy(self, metafunc):
cs = CallSpec2(self.metafunc)
cs.funcargs.update(self.funcargs)
cs.params.update(self.params)
cs.keywords.update(self.keywords)
cs.indices.update(self.indices)
cs._arg2scopenum.update(self._arg2scopenum)
cs._idlist = list(self._idlist)
cs._globalid = self._globalid
cs._globalid_args = self._globalid_args
cs._globalparam = self._globalparam
return cs
def _checkargnotcontained(self, arg):
if arg in self.params or arg in self.funcargs:
raise ValueError("duplicate %r" %(arg,))
def getparam(self, name):
try:
return self.params[name]
except KeyError:
if self._globalparam is _notexists:
raise ValueError(name)
return self._globalparam
@property
def id(self):
return "-".join(map(str, filter(None, self._idlist)))
def setmulti(self, valtype, argnames, valset, id, keywords, scopenum,
param_index):
for arg,val in zip(argnames, valset):
self._checkargnotcontained(arg)
getattr(self, valtype)[arg] = val
self.indices[arg] = param_index
self._arg2scopenum[arg] = scopenum
if val is _notexists:
self._emptyparamspecified = True
self._idlist.append(id)
self.keywords.update(keywords)
def setall(self, funcargs, id, param):
for x in funcargs:
self._checkargnotcontained(x)
self.funcargs.update(funcargs)
if id is not _notexists:
self._idlist.append(id)
if param is not _notexists:
assert self._globalparam is _notexists
self._globalparam = param
for arg in funcargs:
self._arg2scopenum[arg] = scopenum_function
class FuncargnamesCompatAttr:
""" helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
"""
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
return self.fixturenames
class Metafunc(FuncargnamesCompatAttr):
"""
Metafunc objects are passed to the ``pytest_generate_tests`` hook.
They help to inspect a test function and to generate tests according to
test configuration or values specified in the class or module where a
test function is defined.
:ivar fixturenames: set of fixture names required by the test function
:ivar function: underlying python test function
:ivar cls: class object where the test function is defined in or ``None``.
:ivar module: the module object where the test function is defined in.
:ivar config: access to the :class:`_pytest.config.Config` object for the
test session.
:ivar funcargnames:
.. deprecated:: 2.3
Use ``fixturenames`` instead.
"""
def __init__(self, function, fixtureinfo, config, cls=None, module=None):
self.config = config
self.module = module
self.function = function
self.fixturenames = fixtureinfo.names_closure
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
self.cls = cls
self._calls = []
self._ids = py.builtin.set()
def parametrize(self, argnames, argvalues, indirect=False, ids=None,
scope=None):
""" Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
during the collection phase. If you need to setup expensive resources
see about setting indirect=True to do it rather at test setup time.
:arg argnames: a comma-separated string denoting one or more argument
names, or a list/tuple of argument strings.
:arg argvalues: The list of argvalues determines how often a
test is invoked with different argument values. If only one
argname was specified argvalues is a list of simple values. If N
argnames were specified, argvalues must be a list of N-tuples,
where each tuple-element specifies a value for its respective
argname.
:arg indirect: if True each argvalue corresponding to an argname will
be passed as request.param to its respective argname fixture
function so that it can perform more expensive setups during the
setup phase of a test rather than at collection time.
:arg ids: list of string ids, or a callable.
If strings, each is corresponding to the argvalues so that they are
part of the test id.
If callable, it should take one argument (a single argvalue) and return
a string or return None. If None, the automatically generated id for that
argument will be used.
If no ids are provided they will be generated automatically from
the argvalues.
:arg scope: if specified it denotes the scope of the parameters.
The scope is used for grouping tests by parameter instances.
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
"""
# individual parametrized argument sets can be wrapped in a series
# of markers in which case we unwrap the values and apply the mark
# at Function init
newkeywords = {}
unwrapped_argvalues = []
for i, argval in enumerate(argvalues):
while isinstance(argval, MarkDecorator):
newmark = MarkDecorator(argval.markname,
argval.args[:-1], argval.kwargs)
newmarks = newkeywords.setdefault(i, {})
newmarks[newmark.markname] = newmark
argval = argval.args[-1]
unwrapped_argvalues.append(argval)
argvalues = unwrapped_argvalues
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if len(argnames) == 1:
argvalues = [(val,) for val in argvalues]
if not argvalues:
argvalues = [(_notexists,) * len(argnames)]
if scope is None:
scope = "function"
scopenum = scopes.index(scope)
if not indirect:
#XXX should we also check for the opposite case?
for arg in argnames:
if arg not in self.fixturenames:
raise ValueError("%r uses no fixture %r" %(
self.function, arg))
valtype = indirect and "params" or "funcargs"
idfn = None
if callable(ids):
idfn = ids
ids = None
if ids and len(ids) != len(argvalues):
raise ValueError('%d tests specified with %d ids' %(
len(argvalues), len(ids)))
if not ids:
ids = idmaker(argnames, argvalues, idfn)
newcalls = []
for callspec in self._calls or [CallSpec2(self)]:
for param_index, valset in enumerate(argvalues):
assert len(valset) == len(argnames)
newcallspec = callspec.copy(self)
newcallspec.setmulti(valtype, argnames, valset, ids[param_index],
newkeywords.get(param_index, {}), scopenum,
param_index)
newcalls.append(newcallspec)
self._calls = newcalls
def addcall(self, funcargs=None, id=_notexists, param=_notexists):
""" (deprecated, use parametrize) Add a new call to the underlying
test function during the collection phase of a test run. Note that
request.addcall() is called during the test collection phase prior and
independently to actual test execution. You should only use addcall()
if you need to specify multiple arguments of a test function.
:arg funcargs: argument keyword dictionary used when invoking
the test function.
:arg id: used for reporting and identification purposes. If you
don't supply an `id` an automatic unique id will be generated.
:arg param: a parameter which will be exposed to a later fixture function
invocation through the ``request.param`` attribute.
"""
assert funcargs is None or isinstance(funcargs, dict)
if funcargs is not None:
for name in funcargs:
if name not in self.fixturenames:
pytest.fail("funcarg %r not used in this function." % name)
else:
funcargs = {}
if id is None:
raise ValueError("id=None not allowed")
if id is _notexists:
id = len(self._calls)
id = str(id)
if id in self._ids:
raise ValueError("duplicate id %r" % id)
self._ids.add(id)
cs = CallSpec2(self)
cs.setall(funcargs, id, param)
self._calls.append(cs)
def _idval(val, argname, idx, idfn):
if idfn:
try:
s = idfn(val)
if s:
return s
except Exception:
pass
if isinstance(val, (float, int, str, bool, NoneType)):
return str(val)
return str(argname)+str(idx)
def _idvalset(idx, valset, argnames, idfn):
this_id = [_idval(val, argname, idx, idfn)
for val, argname in zip(valset, argnames)]
return "-".join(this_id)
def idmaker(argnames, argvalues, idfn=None):
ids = [_idvalset(valindex, valset, argnames, idfn)
for valindex, valset in enumerate(argvalues)]
if len(set(ids)) < len(ids):
# user may have provided a bad idfn which means the ids are not unique
ids = [str(i) + testid for i, testid in enumerate(ids)]
return ids
def showfixtures(config):
from _pytest.main import wrap_session
return wrap_session(config, _showfixtures_main)
def _showfixtures_main(config, session):
session.perform_collect()
curdir = py.path.local()
tw = py.io.TerminalWriter()
verbose = config.getvalue("verbose")
fm = session._fixturemanager
available = []
for argname, fixturedefs in fm._arg2fixturedefs.items():
assert fixturedefs is not None
if not fixturedefs:
continue
fixturedef = fixturedefs[-1]
loc = getlocation(fixturedef.func, curdir)
available.append((len(fixturedef.baseid),
fixturedef.func.__module__,
curdir.bestrelpath(loc),
fixturedef.argname, fixturedef))
available.sort()
currentmodule = None
for baseid, module, bestrel, argname, fixturedef in available:
if currentmodule != module:
if not module.startswith("_pytest."):
tw.line()
tw.sep("-", "fixtures defined from %s" %(module,))
currentmodule = module
if verbose <= 0 and argname[0] == "_":
continue
if verbose > 0:
funcargspec = "%s -- %s" %(argname, bestrel,)
else:
funcargspec = argname
tw.line(funcargspec, green=True)
loc = getlocation(fixturedef.func, curdir)
doc = fixturedef.func.__doc__ or ""
if doc:
for line in doc.strip().split("\n"):
tw.line(" " + line.strip())
else:
tw.line(" %s: no docstring available" %(loc,),
red=True)
def getlocation(function, curdir):
import inspect
fn = py.path.local(inspect.getfile(function))
lineno = py.builtin._getcode(function).co_firstlineno
if fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" %(fn, lineno+1)
# builtin pytest.raises helper
def raises(ExpectedException, *args, **kwargs):
""" assert that a code block/function call raises @ExpectedException
and raise a failure exception otherwise.
This helper produces a ``py.code.ExceptionInfo()`` object.
If using Python 2.5 or above, you may use this function as a
context manager::
>>> with raises(ZeroDivisionError):
... 1/0
Or you can specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
A third possibility is to use a string to be executed::
>>> raises(ZeroDivisionError, "f(0)")
<ExceptionInfo ...>
Performance note:
-----------------
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``py.code.ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run. See the
official Python ``try`` statement documentation for more detailed
information.
"""
__tracebackhide__ = True
if ExpectedException is AssertionError:
# we want to catch a AssertionError
# replace our subclass with the builtin one
# see https://github.com/pytest-dev/pytest/issues/176
from _pytest.assertion.util import BuiltinAssertionError \
as ExpectedException
msg = ("exceptions must be old-style classes or"
" derived from BaseException, not %s")
if isinstance(ExpectedException, tuple):
for exc in ExpectedException:
if not inspect.isclass(exc):
raise TypeError(msg % type(exc))
elif not inspect.isclass(ExpectedException):
raise TypeError(msg % type(ExpectedException))
if not args:
return RaisesContext(ExpectedException)
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
#print "raises frame scope: %r" % frame.f_locals
try:
code = py.code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
# XXX didn'T mean f_globals == f_locals something special?
# this is destroyed here ...
except ExpectedException:
return py.code.ExceptionInfo()
else:
func = args[0]
try:
func(*args[1:], **kwargs)
except ExpectedException:
return py.code.ExceptionInfo()
pytest.fail("DID NOT RAISE")
class RaisesContext(object):
def __init__(self, ExpectedException):
self.ExpectedException = ExpectedException
self.excinfo = None
def __enter__(self):
self.excinfo = object.__new__(py.code.ExceptionInfo)
return self.excinfo
def __exit__(self, *tp):
__tracebackhide__ = True
if tp[0] is None:
pytest.fail("DID NOT RAISE")
if sys.version_info < (2, 7):
# py26: on __exit__() exc_value often does not contain the
# exception value.
# http://bugs.python.org/issue7853
if not isinstance(tp[1], BaseException):
exc_type, value, traceback = tp
tp = exc_type, exc_type(value), traceback
self.excinfo.__init__(tp)
return issubclass(self.excinfo.type, self.ExpectedException)
#
# the basic pytest Function item
#
class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr):
""" a Function Item is responsible for setting up and executing a
Python test function.
"""
_genid = None
def __init__(self, name, parent, args=None, config=None,
callspec=None, callobj=NOTSET, keywords=None, session=None,
fixtureinfo=None):
super(Function, self).__init__(name, parent, config=config,
session=session)
self._args = args
if callobj is not NOTSET:
self.obj = callobj
self.keywords.update(self.obj.__dict__)
if callspec:
self.callspec = callspec
self.keywords.update(callspec.keywords)
if keywords:
self.keywords.update(keywords)
if fixtureinfo is None:
fixtureinfo = self.session._fixturemanager.getfixtureinfo(
self.parent, self.obj, self.cls,
funcargs=not self._isyieldedfunction())
self._fixtureinfo = fixtureinfo
self.fixturenames = fixtureinfo.names_closure
self._initrequest()
def _initrequest(self):
self.funcargs = {}
if self._isyieldedfunction():
assert not hasattr(self, "callspec"), (
"yielded functions (deprecated) cannot have funcargs")
else:
if hasattr(self, "callspec"):
callspec = self.callspec
assert not callspec.funcargs
self._genid = callspec.id
if hasattr(callspec, "param"):
self.param = callspec.param
self._request = FixtureRequest(self)
@property
def function(self):
"underlying python 'function' object"
return getattr(self.obj, 'im_func', self.obj)
def _getobj(self):
name = self.name
i = name.find("[") # parametrization
if i != -1:
name = name[:i]
return getattr(self.parent.obj, name)
@property
def _pyfuncitem(self):
"(compatonly) for code expecting pytest-2.2 style request objects"
return self
def _isyieldedfunction(self):
return getattr(self, "_args", None) is not None
def runtest(self):
""" execute the underlying test function. """
self.ihook.pytest_pyfunc_call(pyfuncitem=self)
def setup(self):
# check if parametrization happend with an empty list
try:
self.callspec._emptyparamspecified
except AttributeError:
pass
else:
fs, lineno = self._getfslineno()
pytest.skip("got empty parameter set, function %s at %s:%d" %(
self.function.__name__, fs, lineno))
super(Function, self).setup()
fillfixtures(self)
scope2props = dict(session=())
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance", )
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError("%s not available in %s-scoped context" % (
scopename, self.scope))
return property(provide, None, None, func.__doc__)
return decoratescope
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "cls", "module", "session"
self.scope = "function"
self._funcargs = {}
self._fixturedefs = {}
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self.fixturenames = fixtureinfo.names_closure
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a a dynamic call to
# getfuncargvalue(argname) usage which was naturally
# not known at parsing/collection time
fixturedefs = self._fixturemanager.getfixturedefs(
argname, self._pyfuncitem.parent.nodeid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(pytest.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
if function is not None:
return py.builtin._getimself(function)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(pytest.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
"""add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
try:
self.node.keywords[marker.markname] = marker
except AttributeError:
raise ValueError(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfuncargvalue(argname)
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
""" (deprecated) Return a testing resource managed by ``setup`` &
``teardown`` calls. ``scope`` and ``extrakey`` determine when the
``teardown`` function will be called so that subsequent calls to
``setup`` would recreate the resource. With pytest-2.3 you often
do not need ``cached_setup()`` as you can directly declare a scope
on a fixture function and register a finalizer through
``request.addfinalizer()``.
:arg teardown: function receiving a previously setup resource.
:arg setup: a no-argument function creating a resource.
:arg scope: a string value out of ``function``, ``class``, ``module``
or ``session`` indicating the caching lifecycle of the resource.
:arg extrakey: added to internal caching key of (funcargname, scope).
"""
if not hasattr(self.config, '_setupcache'):
self.config._setupcache = {} # XXX weakref?
cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
cache = self.config._setupcache
try:
val = cache[cachekey]
except KeyError:
self._check_scope(self.fixturename, self.scope, scope)
val = setup()
cache[cachekey] = val
if teardown is not None:
def finalizer():
del cache[cachekey]
teardown(val)
self._addfinalizer(finalizer, scope=scope)
return val
def getfuncargvalue(self, argname):
""" Dynamically retrieve a named fixture function argument.
As of pytest-2.3, it is easier and usually better to access other
fixture values by stating it as an input argument in the fixture
function. If you only can decide about using another fixture at test
setup time, you may use this function to retrieve it inside a fixture
function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def _get_active_fixturedef(self, argname):
try:
return self._fixturedefs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
class PseudoFixtureDef:
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef
raise
# remove indent to prevent the python3 exception
# from leaking into the call
result = self._getfuncargvalue(fixturedef)
self._funcargs[argname] = result
self._fixturedefs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
l = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
l.reverse()
return l
l.append(fixturedef)
current = current._parent_request
def _getfuncargvalue(self, fixturedef):
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
else:
# indices might not be set if old-style metafunc.addcall() was used
param_index = funcitem.callspec.indices.get(argname, 0)
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if its not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
val = fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(fixturedef.finish,
subrequest.node)
return val
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
pytest.fail("ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s" %(
(requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = inspect.formatargspec(*inspect.getargspec(factory))
lines.append("%s:%d: def %s%s" %(
p, lineno, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node
return node
def __repr__(self):
return "<FixtureRequest for %r>" %(self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self.addfinalizer = fixturedef.addfinalizer
self._pyfuncitem = request._pyfuncitem
self._funcargs = request._funcargs
self._fixturedefs = request._fixturedefs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self.fixturenames = request.fixturenames
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
class ScopeMismatchError(Exception):
""" A fixture function tries to use a different fixture function which
which has a lower scope (e.g. a Session one calls a function one)
"""
scopes = "session module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
stack = stack[:-1] # the last fixture raise an error, let's present
# it at the requesting side
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except IOError:
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno+1))
else:
addline("file %s, line %s" % (fspath, lineno+1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith('def'):
break
if msg is None:
fm = self.request._fixturemanager
available = []
for name, fixturedef in fm._arg2fixturedefs.items():
parentid = self.request._pyfuncitem.parent.nodeid
faclist = list(fm._matchfactories(fixturedef, parentid))
if faclist:
available.append(name)
msg = "fixture %r not found" % (self.argname,)
msg += "\n available fixtures: %s" %(", ".join(available),)
msg += "\n use 'py.test --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
#tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
for line in self.errorstring.split("\n"):
tw.line(" " + line.strip(), red=True)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno+1))
class FixtureManager:
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
_argprefix = "pytest_funcarg__"
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not hasattr(node, "nofuncargs"):
if cls is not None:
startindex = 1
else:
startindex = None
argnames = getfuncargnames(func, startindex)
else:
argnames = ()
usefixtures = getattr(func, "usefixtures", None)
initialnames = argnames
if usefixtures is not None:
initialnames = usefixtures.args + initialnames
fm = node.session._fixturemanager
names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
node)
return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__)
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != "/":
nodeid = nodeid.replace(p.sep, "/")
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i:i+1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
# make sure autousenames are sorted by scope, scopenum 0 is session
autousenames.sort(
key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return a arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
return fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
# skip directly parametrized arguments
if argname not in func_params and argname not in func_params[0]:
metafunc.parametrize(argname, fixturedef.params,
indirect=True, scope=fixturedef.scope,
ids=fixturedef.ids)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
obj = getattr(holderobj, name, None)
if not callable(obj):
continue
# fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
# or are "@pytest.fixture" marked
marker = getfixturemarker(obj)
if marker is None:
if not name.startswith(self._argprefix):
continue
marker = defaultfuncargprefixmarker
name = name[len(self._argprefix):]
elif not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
else:
assert not name.startswith(self._argprefix)
fixturedef = FixtureDef(self, nodeid, name, obj,
marker.scope, marker.params,
yieldctx=marker.yieldctx,
unittest=unittest, ids=marker.ids)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixturedef.has_location:
faclist.append(fixturedef)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixturedef)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or '', autousenames))
def getfixturedefs(self, argname, nodeid):
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
else:
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodeid.startswith(fixturedef.baseid):
yield fixturedef
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno+1)
source = py.code.Source(fixturefunc)
pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs, yieldctx):
if yieldctx:
if not is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="yield_fixture requires yield statement in function")
iter = fixturefunc(**kwargs)
next = getattr(iter, "__next__", None)
if next is None:
next = getattr(iter, "next")
res = next()
def teardown():
try:
next()
except StopIteration:
pass
else:
fail_fixturefunc(fixturefunc,
"yield_fixture function has more than one 'yield'")
request.addfinalizer(teardown)
else:
if is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="pytest.fixture functions cannot use ``yield``. "
"Instead write and return an inner function/generator "
"and let the consumer call and iterate over it.")
res = fixturefunc(**kwargs)
return res
class FixtureDef:
""" A container for a factory definition. """
def __init__(self, fixturemanager, baseid, argname, func, scope, params,
yieldctx, unittest=False, ids=None):
self._fixturemanager = fixturemanager
self.baseid = baseid or ''
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scopes.index(scope or "function")
self.params = params
startindex = unittest and 1 or None
self.argnames = getfuncargnames(func, startindex=startindex)
self.yieldctx = yieldctx
self.unittest = unittest
self.ids = ids
self._finalizer = []
def addfinalizer(self, finalizer):
self._finalizer.append(finalizer)
def finish(self):
try:
while self._finalizer:
func = self._finalizer.pop()
func()
finally:
# even if finalization fails, we invalidate
# the cached fixture value
if hasattr(self, "cached_result"):
del self.cached_result
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
kwargs = {}
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixturedef.cached_result
request._check_scope(argname, request.scope, fixturedef.scope)
kwargs[argname] = result
if argname != "request":
fixturedef.addfinalizer(self.finish)
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
py.builtin._reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish()
assert not hasattr(self, "cached_result")
if self.unittest:
result = self.func(request.instance, **kwargs)
else:
fixturefunc = self.func
# the fixture function needs to be bound to the actual
# request.instance so that code working with "self" behaves
# as expected.
if request.instance is not None:
fixturefunc = getimfunc(self.func)
if fixturefunc != self.func:
fixturefunc = fixturefunc.__get__(request.instance)
try:
result = call_fixture_func(fixturefunc, request, kwargs,
self.yieldctx)
except Exception:
self.cached_result = (None, my_cache_key, sys.exc_info())
raise
self.cached_result = (result, my_cache_key, None)
return result
def __repr__(self):
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
(self.argname, self.scope, self.baseid))
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
if mock is not None:
return len([p for p in patchings
if not p.attribute_name and p.new is mock.DEFAULT])
return len(patchings)
def getfuncargnames(function, startindex=None):
# XXX merge with main.py's varnames
#assert not inspect.isclass(function)
realfunction = function
while hasattr(realfunction, "__wrapped__"):
realfunction = realfunction.__wrapped__
if startindex is None:
startindex = inspect.ismethod(function) and 1 or 0
if realfunction != function:
startindex += num_mock_patch_args(function)
function = realfunction
if isinstance(function, functools.partial):
argnames = inspect.getargs(py.code.getrawcode(function.func))[0]
partial = function
argnames = argnames[len(partial.args):]
if partial.keywords:
for kw in partial.keywords:
argnames.remove(kw)
else:
argnames = inspect.getargs(py.code.getrawcode(function))[0]
defaults = getattr(function, 'func_defaults',
getattr(function, '__defaults__', None)) or ()
numdefaults = len(defaults)
if numdefaults:
return tuple(argnames[startindex:-numdefaults])
return tuple(argnames[startindex:])
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
for item in items:
keys = set(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
return reorder_items_atscope(items, set(), argkeys_cache, 0)
def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
items_done = []
while 1:
items_before, items_same, items_other, newignore = \
slice_items(items, ignore, argkeys_cache[scopenum])
items_before = reorder_items_atscope(
items_before, ignore, argkeys_cache,scopenum+1)
if items_same is None:
# nothing to reorder in this scope
assert items_other is None
return items_done + items_before
items_done.extend(items_before)
items = items_same + items_other
ignore = newignore
def slice_items(items, ignore, scoped_argkeys_cache):
# we pick the first item which uses a fixture instance in the
# requested scope and which we haven't seen yet. We slice the input
# items list into a list of items_nomatch, items_same and
# items_other
if scoped_argkeys_cache: # do we need to do work at all?
it = iter(items)
# first find a slicing key
for i, item in enumerate(it):
argkeys = scoped_argkeys_cache.get(item)
if argkeys is not None:
argkeys = argkeys.difference(ignore)
if argkeys: # found a slicing key
slicing_argkey = argkeys.pop()
items_before = items[:i]
items_same = [item]
items_other = []
# now slice the remainder of the list
for item in it:
argkeys = scoped_argkeys_cache.get(item)
if argkeys and slicing_argkey in argkeys and \
slicing_argkey not in ignore:
items_same.append(item)
else:
items_other.append(item)
newignore = ignore.copy()
newignore.add(slicing_argkey)
return (items_before, items_same, items_other, newignore)
return items, None, None, None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indictes.items() is random order of argnames but
# then again different functions (items) can change order of
# arguments so it doesn't matter much probably
for argname, param_index in cs.indices.items():
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # module
key = (argname, param_index, item.fspath)
elif scopenum == 2: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
def xunitsetup(obj, name):
meth = getattr(obj, name, None)
if getfixturemarker(meth) is None:
return meth
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except KeyboardInterrupt:
raise
except Exception:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
scopename2class = {
'class': Class,
'module': Module,
'function': pytest.Item,
}
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
if scope == "session":
return node.session
raise ValueError("unknown scope")
return node.getparent(cls)
|
hunse/pytest
|
_pytest/python.py
|
Python
|
mit
| 81,948
|
[
"VisIt"
] |
557a57c56c45fb7e472503e646254caea46f9119b4ffb3a3cdfe76b161836bfe
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Initialize and configure Flask-Script extension.
Configuration
^^^^^^^^^^^^^
The following configuration variables are provided:
===================== =======================================================
`bind address` Preferred binding address of the server. Can be used to
select a specific interface or to bind to all via
`0.0.0.0`.
`bind port` Preferred binding port of the server. Can differ from
the one stated in `CFG_SITE_URL` so it can be accessed
via reverse proxy.
===================== =======================================================
They are assigned by the following parameters, in decreasing priority:
1. Command line arguments of `inveniomanage runserver`
2. `SERVER_BIND_ADDRESS` and `SERVER_BIND_PORT` configuration
3. Values guessed from `CFG_SITE_URL`
4. Defaults (`127.0.0.1:80`)
"""
from __future__ import print_function
import functools
import re
import ssl
from types import FunctionType
from flask import current_app, flash
from flask_registry import ModuleAutoDiscoveryRegistry, RegistryProxy
from flask_script import Manager as FlaskExtManager
from flask_script.commands import Clean, Server, Shell, ShowUrls
from invenio_base.signals import post_command, pre_command
from six.moves import urllib
from werkzeug.utils import find_modules, import_string
def change_command_name(method=None, new_name=None):
"""Change command name to `new_name` or replace '_' by '-'."""
if method is None:
return functools.partial(change_command_name, new_name=new_name)
if new_name is None:
new_name = method.__name__.replace('_', '-')
method.__name__ = new_name
return method
def generate_secret_key():
"""Generate secret key."""
import string
import random
rng = random.SystemRandom()
return ''.join(
rng.choice(string.ascii_letters + string.digits)
for dummy in range(0, 256)
)
def print_progress(p, L=40, prefix='', suffix=''):
"""Print textual progress bar."""
bricks = int(p * L)
print('\r{prefix} [{bricks}{spaces}] {progress}% {suffix}'.format(
prefix=prefix, suffix=suffix,
bricks='#' * bricks, spaces=' ' * (L - bricks),
progress=int(p * 100),
), end=' ')
def check_for_software_updates(flash_message=False):
"""Check for a new release of Invenio.
:return: True if you have latest version, else False if you need to upgrade
or None if server was not reachable.
"""
from invenio_base.globals import cfg
from invenio_base.i18n import _
try:
find = re.compile('Invenio v[0-9]+.[0-9]+.[0-9]+(\-rc[0-9])?'
' is released')
release_notes = 'https://raw.githubusercontent.com/' \
'inveniosoftware/invenio/master/RELEASE-NOTES'
webFile = urllib.request.urlopen(release_notes)
temp = ""
version = ""
version1 = ""
while True:
temp = webFile.readline()
match1 = find.match(temp)
try:
version = match1.group()
break
except Exception:
pass
if not temp:
break
webFile.close()
submatch = re.compile('[0-9]+.[0-9]+.[0-9]+(\-rc[0-9])?')
version1 = submatch.search(version)
web_version = version1.group().split(".")
local_version = cfg['CFG_VERSION'].split(".")
if (web_version[0] > local_version[0] or
web_version[0] == local_version[0] and
web_version[1] > local_version[1] or
web_version[0] == local_version[0] and
web_version[1] == local_version[1] and
web_version[2] > local_version[2]):
if flash_message:
flash(_('A newer version of Invenio is available for '
'download. You may want to visit '
'<a href="%(wiki)s">%()s</a>',
wiki='<a href=\"http://invenio-software.org/wiki/'
'/Installation/Download'), 'warning(html_safe)')
return False
except Exception as e:
print(e)
if flash_message:
flash(_('Cannot download or parse release notes '
'from %(release_notes)s', release_notes=release_notes),
'error')
return None
return True
class Manager(FlaskExtManager):
"""Custom manager implementation with signaling support."""
def add_command(self, name, command):
"""Wrap default ``add_command`` method."""
sender = command.run if isinstance(command.run, FunctionType) \
else command.__class__
class SignalingCommand(command.__class__):
def __call__(self, *args, **kwargs):
app = self.app if not len(args) else args[0]
with app.test_request_context():
pre_command.send(sender, args=args, **kwargs)
res = super(SignalingCommand, self).__call__(*args, **kwargs)
with app.test_request_context():
post_command.send(sender, args=args, **kwargs)
return res
command.__class__ = SignalingCommand
return super(Manager, self).add_command(name, command)
def set_serve_static_files(sender, *args, **kwargs):
"""Enable serving of static files for `runserver` command.
Normally Apache serves static files, but during development and if you are
using the Werkzeug standalone development server, you can set this flag to
`True`, to enable static file serving.
"""
current_app.config.setdefault('CFG_FLASK_SERVE_STATIC_FILES', True)
pre_command.connect(set_serve_static_files, sender=Server)
def create_ssl_context(config):
"""Create :class:`ssl.SSLContext` from application config.
:param config: Dict-like application configuration.
:returns: A valid context or in case TLS is not enabled `None`.
The following configuration variables are processed:
============================ ==============================================
`SERVER_TLS_ENABLE` If `True`, a SSL context will be created. In
this case, the required configuration
variables must be provided.
`SERVER_TLS_KEY` (required) Filepath (string) of private key provided as
PEM file.
`SERVER_TLS_CERT` (required) Filepath (string) of your certificate plus
all intermediate certificate, concatenated in
that order and stored as PEM file.
`SERVER_TLS_KEYPASS` If private key is encrypted, a password can be
provided.
`SERVER_TLS_PROTOCOL` String that selects a protocol from
`ssl.PROTOCOL_*`. Defaults to `SSLv23`. See
:mod:`ssl` for details.
`SERVER_TLS_CIPHERS` String that selects possible ciphers according
to the `OpenSSL cipher list format
<https://www.openssl.org/docs/apps/
ciphers.html>`_
`SERVER_TLS_DHPARAMS` Filepath (string) to parameters for
Diffie-Helman key exchange. If not set the
built-in parameters are used.
`SERVER_TLS_ECDHCURVE` Curve (string) that should be used for
Elliptic Curve-based Diffie-Helman key
exchange. If not set, the defaults provided by
OpenSSL are used.
============================ ==============================================
.. note:: In case `None` is returned because of a non-enabling
configuration, TLS will be disabled. It is **not** possible to have a
TLS and non-TLS configuration at the same time. So if TLS is activated,
no non-TLS connection are accepted.
from invenio_utils.. important:: Keep in mind to change `CFG_SITE_URL` and
`CFG_SITE_SECURE_URL` according to your TLS configuration. This does
not only include the protocol (`http` vs `https`) but also the hostname
that has to match the common name in your certificate. If a wildcard
certificate is provided, the hostname stated in
`CFG_SITE[_SECURE]_URL` must match the wildcard pattern.
"""
ssl_context = None
if config.get('SERVER_TLS_ENABLE', False):
if 'SERVER_TLS_KEY' not in config \
or 'SERVER_TLS_CERT' not in config:
raise AttributeError(
'`SERVER_TLS_KEY` and `SERVER_TLS_CERT` required!'
)
# CLIENT_AUTH creates a server context, so do not get confused here
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
if 'SERVER_TLS_PROTOCOL' in config:
ssl_context.protocol = getattr(
ssl,
'PROTOCOL_{}'.format(config.get('SERVER_TLS_PROTOCOL'))
)
ssl_context.load_cert_chain(
certfile=config.get('SERVER_TLS_CERT'),
keyfile=config.get('SERVER_TLS_KEY'),
password=config.get('SERVER_TLS_KEYPASS', None)
)
if 'SERVER_TLS_CIPHERS' in config:
ssl_context.set_ciphers(
config.get('SERVER_TLS_CIPHERS')
)
if 'SERVER_TLS_DHPARAMS' in config:
ssl_context.load_dh_params(
config.get('SERVER_TLS_DHPARAMS')
)
if 'SERVER_TLS_ECDHCURVE' in config:
ssl_context.set_ecdh_curve(
config.get('SERVER_TLS_ECDHCURVE')
)
# that one seems to be required for werkzeug
ssl_context.check_hostname = False
return ssl_context
def register_manager(manager):
"""Register all manager plugins and default commands with the manager."""
from six.moves.urllib.parse import urlparse
managers = RegistryProxy('managers', ModuleAutoDiscoveryRegistry, 'manage')
def extract_name(name):
"""Guess manager name."""
parts = name.split('.')
if len(parts) == 2:
return parts[0].split('_')[-1]
return parts[-2]
with manager.app.app_context():
for script in find_modules('invenio_base.scripts'):
manager.add_command(script.split('.')[-1],
import_string(script + ':manager'))
for script in managers:
if script.__name__ == 'invenio_base.manage':
continue
manager.add_command(extract_name(script.__name__),
getattr(script, 'manager'))
manager.add_command("clean", Clean())
manager.add_command("show-urls", ShowUrls())
manager.add_command("shell", Shell())
parsed_url = urlparse(manager.app.config.get('CFG_SITE_URL'))
host = manager.app.config.get(
'SERVER_BIND_ADDRESS',
parsed_url.hostname or '127.0.0.1'
)
port = manager.app.config.get(
'SERVER_BIND_PORT',
parsed_url.port or 80
)
ssl_context = create_ssl_context(manager.app.config)
runserver = Server(host=host, port=port, ssl_context=ssl_context)
manager.add_command("runserver", runserver)
# FIXME separation of concerns is violated here.
from invenio_ext.collect import collect
collect.init_script(manager)
from invenio_ext.assets import command, bower
manager.add_command("assets", command)
manager.add_command("bower", bower)
|
hachreak/invenio-ext
|
invenio_ext/script/__init__.py
|
Python
|
gpl-2.0
| 12,591
|
[
"VisIt"
] |
0ec0b535685e3cf1f9f696fb166f7e13d98668f0be9415180c0892bb2e014a44
|
"""
=====
utils
=====
"""
def get_atomic_number_symbol(Z=None, symbol=None):
"""This function returns a tuple of matching arrays of atomic numbers
(Z) and chemical symbols (symbol).
:param Z: atomic numbers
:type Z: int, array like object of int's
:param symbol: chemical symbols
:type symbol: str, array like object of str
:return: arrays of atomic numbers and chemical symbols
:rtype: tuple of :class:`numpy.ndarray`
Note: If both Z and symbol are provided the symbol will win out and
change the Z to match.
:examples:
>>> Z, symbol = get_atomic_number_symbol(Z=[12, 24, 26, 48])
>>> print(Z)
[12 24 26 48]
>>> print(symbol) # doctest: +ALLOW_UNICODE
['Mg' 'Cr' 'Fe' 'Cd']
>>> Z, symbol = get_atomic_number_symbol(symbol=['C', 'H', 'N', 'O'])
>>> print(Z)
[6 1 7 8]
>>> print(symbol)
['C' 'H' 'N' 'O']
"""
import numpy as np
from periodictable import elements
if isinstance(Z, int):
Z = [Z]
if isinstance(symbol, str):
symbol = [symbol]
if symbol is None or len(symbol) == 0:
if Z is None or len(Z) == 0:
raise ValueError("Need to provide list of either Z's or symbols.")
else:
Z = np.asarray(Z)
length = len(Z)
symbol = np.empty(length, dtype='<U2')
for i in range(length):
symbol[i] = elements[Z[i]].symbol
symbol[symbol == 'n'] = 'VD' # use Z=0 as VOID type
else:
symbol = np.asarray(symbol, dtype='<U2')
length = len(symbol)
Z = np.empty(length, dtype=np.int64)
for i in range(length):
symbol[i] = symbol[i].capitalize()
if symbol[i] == 'Vd': # Special case for VOID
symbol[i] = 'VD'
Z[i] = 0
else:
Z[i] = elements.symbol(symbol[i]).number
return (Z, symbol)
def get_unitcell(structure):
"""Wrapper to get the unit cell from different structure classes"""
from javelin.unitcell import UnitCell
try: # javelin structure
return structure.unitcell
except AttributeError:
try: # diffpy structure
return UnitCell(structure.lattice.abcABG())
except AttributeError:
try: # ASE structure
from ase.geometry import cell_to_cellpar
return UnitCell(cell_to_cellpar(structure.cell))
except (ImportError, AttributeError):
raise ValueError("Unable to get unit cell from structure")
def get_positions(structure):
"""Wrapper to get the positions from different structure classes"""
try: # ASE structure
return structure.get_scaled_positions()
except AttributeError:
try: # diffpy structure
return structure.xyz
except AttributeError:
raise ValueError("Unable to get positions from structure")
def get_atomic_numbers(structure):
"""Wrapper to get the atomic numbers from different structure classes"""
try: # ASE structure
return structure.get_atomic_numbers()
except AttributeError:
try: # diffpy structure
atomic_numbers, _ = get_atomic_number_symbol(symbol=structure.element)
return atomic_numbers
except AttributeError:
raise ValueError("Unable to get elements from structure")
def is_structure(structure):
"""Check if an object is a structure that javelin can understand.
ase.atoms with have cell, get_scaled_positions and get_atomic_numbers attributes
diffpy.structure with have lattice, xyz, and element attributes
"""
return (((hasattr(structure, 'cell') or hasattr(structure, 'unitcell')) and
hasattr(structure, 'get_scaled_positions') and
hasattr(structure, 'get_atomic_numbers'))
or
(hasattr(structure, 'lattice') and
hasattr(structure, 'xyz') and
hasattr(structure, 'element')))
|
rosswhitfield/javelin
|
javelin/utils.py
|
Python
|
mit
| 4,012
|
[
"ASE"
] |
123c54f09c1beefdbb676860c2ff543f3b187d08c92740fbad007486cfc90ab0
|
########################################################################
# $HeadURL$
# File : HttpStorageAccessHandler.py
# Author : A.T.
########################################################################
""" The HttpStorageAccessHandler is a http server request handler to provide a secure http
access to the DIRAC StorageElement and StorageElementProxy. It is derived from the
SimpleHTTPRequestHandler standard python handler
"""
__RCS__ = "$Id$"
import os
import BaseHTTPServer
import shutil
import random
class HttpStorageAccessHandler(BaseHTTPServer.BaseHTTPRequestHandler):
register = {}
basePath = ''
def do_GET(self):
"""Serve a GET request."""
# Strip off leading slash
key = self.path[1:]
if not self.register.exists(key):
self.send_error(401, "Invalid key provided, access denied")
return None
cache_path = self.register.get(key)
fileList = os.listdir(cache_path)
if len(fileList) == 1:
path = os.path.join(cache_path,fileList[0])
else:
# multiple files, make archive
unique = str( random.getrandbits( 24 ) )
fileString = ' '.join(fileList)
os.system( 'tar -cf %s/dirac_data_%s.tar --remove-files -C %s %s' % (cache_path,unique,cache_path,fileString) )
path = os.path.join(cache_path,'dirac_data_%s.tar' % unique)
f = self.send_head( path )
if f:
shutil.copyfileobj(f, self.wfile)
f.close()
self.register.delete(key)
def send_head(self,path):
""" Prepare headers for the file download
"""
#path = self.translate_path(self.path)
f = None
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", 'application/octet-stream')
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
fname = os.path.basename(path)
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.send_header("Content-Disposition","filename=%s" % fname)
self.end_headers()
return f
|
rajanandakumar/DIRAC
|
DataManagementSystem/private/HttpStorageAccessHandler.py
|
Python
|
gpl-3.0
| 2,383
|
[
"DIRAC"
] |
526550e57f3cd65f7f2e4a6196e7ce182da1ee67035f8d56d57b719407ea182c
|
'''
Spike-timing dependent plasticity
Adapted from Song, Miller and Abbott (2000) and Song and Abbott (2001)
'''
###############################################################################
## PARAMETERS
# select code generation standalone device
devicename = 'cuda_standalone'
# devicename = 'cpp_standalone'
# number of _synapses_ (must be multiple of 1000
N = 1000
# select weather spikes effect postsynaptic neurons
post_effects = True
# select weather we have (delays: 'homogeneous', 'heterogeneous', 'none')
delays = 'none'
# delays = 'homogeneous'
# delays = 'heterogeneous'
# whether to profile run
profiling = True
# folder to store plots and profiling information
resultsfolder = 'results'
# folder for the code
codefolder = 'code'
# monitors (needed for plot generation)
monitors = True
# single precision
single_precision = True
# number of post blocks (None is default)
num_blocks = None
# atomic operations
atomics = True
# push synapse bundles
bundle_mode = True
###############################################################################
## CONFIGURATION
from collections import OrderedDict
# Create paramter dictionary that can be modified from command line
params = OrderedDict([('devicename', devicename),
('delays', delays),
('post_effects', post_effects),
('resultsfolder', resultsfolder),
('codefolder', codefolder),
('N', N),
('profiling', profiling),
('monitors', monitors),
('single_precision', single_precision),
('num_blocks', num_blocks),
('atomics', atomics),
('bundle_mode', bundle_mode)])
# Add parameter restrictions
choices = {'devicename': ['cuda_standalone', 'cpp_standalone'],
'delays': ['none', 'homogeneous', 'heterogeneous']}
from utils import set_prefs, update_from_command_line
# update params from command line
update_from_command_line(params, choices=choices)
for key, options in choices.items():
param = params[key]
assert param in options, \
"Invalid option for {}: {} (choose from {}).".format(key, param,
options)
# do the imports after parsing command line arguments (quicker --help)
import os
import matplotlib
matplotlib.use('Agg')
from brian2 import *
if params['devicename'] == 'cuda_standalone':
import brian2cuda
# set brian2 prefs from params dict
name = set_prefs(params, prefs)
codefolder = os.path.join(params['codefolder'], name)
print('runing example {}'.format(name))
print('compiling model in {}'.format(codefolder))
###############################################################################
## SIMULATION
set_device(params['devicename'], directory=codefolder, compile=True, run=True,
debug=False)
# we draw by random K_poisson out of N_poisson (on avg.) and connect them to each post neuron
N_poisson = 10000
K_poisson = 1000
connection_probability = float(K_poisson) / N_poisson # 10% connection probability if K_poisson=1000, N_poisson=10000
N_lif = int(params['N'] / K_poisson) # => N specifies the number of synapses or equivalently the number of neurons*1000
taum = 10*ms
taupre = 20*ms
taupost = taupre
Ee = 0*mV
vt = -54*mV
vr = -60*mV
El = -74*mV
taue = 5*ms
F = 15*Hz * (1000./K_poisson) # this scaling is not active here since K_poisson == 1000
gmax = .01
dApre = .01
dApost = -dApre * taupre / taupost * 1.05
dApost *= gmax
dApre *= gmax
assert N_lif * K_poisson == params['N'] # ensure we specify the no of synapses N as a multiple of 1000
eqs_neurons = '''
dv/dt = (ge * (Ee-vr) + El - v) / taum : volt
dge/dt = -ge / taue {} : 1
'''
on_pre = ''
if params['post_effects']:
# normal mode => poissongroup spikes make effect on postneurons
eqs_neurons = eqs_neurons.format('')
on_pre += 'ge += w\n'
else:
# second mode => poissongroup spikes are inffective for postneurons
# here: white noise process is added with similar mean and variance as
# poissongroup input that is disabled in this case
gsyn = K_poisson * F * gmax / 2. # assuming avg weight gmax/2 which holds approx. true for the bimodal distrib.
eqs_neurons = eqs_neurons.format('+ gsyn + sqrt(gsyn) * xi')
# eqs_neurons = eqs_neurons.format('')
on_pre += '''Apre += dApre
w = clip(w + Apost, 0, gmax)'''
input = PoissonGroup(N_poisson, rates=F)
neurons = NeuronGroup(N_lif, eqs_neurons, threshold='v>vt', reset='v = vr')
S = Synapses(input, neurons,
'''w : 1
dApre/dt = -Apre / taupre : 1 (event-driven)
dApost/dt = -Apost / taupost : 1 (event-driven)''',
on_pre=on_pre,
on_post='''Apost += dApost
w = clip(w + Apre, 0, gmax)'''
)
S.connect(p=connection_probability)
S.w = 'rand() * gmax'
if params['delays'] == 'homogeneous':
S.delay = 2*ms
elif params['delays'] == 'heterogeneous':
S.delay = "2 * 2*ms * rand()"
n = 2
if params['monitors']:
n = 3
mon = StateMonitor(S, 'w', record=[0, 1])
s_mon = SpikeMonitor(input)
run(100*second, report='text', profile=params['profiling'])
if not os.path.exists(params['resultsfolder']):
os.mkdir(params['resultsfolder']) # for plots and profiling txt file
if params['profiling']:
print(profiling_summary())
profilingpath = os.path.join(params['resultsfolder'], '{}.txt'.format(name))
with open(profilingpath, 'w') as profiling_file:
profiling_file.write(str(profiling_summary()))
print('profiling information saved in {}'.format(profilingpath))
subplot(n,1,1)
plot(S.w / gmax, '.k')
ylabel('Weight / gmax')
xlabel('Synapse index')
subplot(n,1,2)
hist(S.w / gmax, 20)
xlabel('Weight / gmax')
if params['monitors']:
subplot(n,1,3)
plot(mon.t/second, mon.w.T/gmax)
xlabel('Time (s)')
ylabel('Weight / gmax')
tight_layout()
#show()
plotpath = os.path.join(params['resultsfolder'], '{}.png'.format(name))
savefig(plotpath)
print('plot saved in {}'.format(plotpath))
print('the generated model in {} needs to removed manually if wanted'.format(codefolder))
|
brian-team/brian2cuda
|
examples/stdp.py
|
Python
|
gpl-2.0
| 6,236
|
[
"NEURON"
] |
7664fc8c438160fab2a6c2738f24fa91e6596d66553e93e17c26de4cbee46ee4
|
"""
.. moduleauthor:: Johan Comparat <johan.comparat__at__gmail.com>
General purpose:
................
The class StellarMassFunction is a wrapper dedicated to handling the fit of stellar mass function.
*Imports*::
import numpy as np
import astropy.io.fits as pyfits
import astropy.units as u
import glob
import pandas as pd
import os
"""
import numpy as np
import astropy.io.fits as pyfits
import astropy.units as u
import glob
import pandas as pd
import os
import astropy.cosmology as co
cosmo = co.Planck15
class StellarMassFunction:
"""
:param imf_name: choose the `initial mass function <https://en.wikipedia.org/wiki/Initial_mass_function>`_:
* 'ss' for `Salpeter <http://adsabs.harvard.edu/abs/1955ApJ...121..161S>`_or
* 'kr' for `Kroupa <http://adsabs.harvard.edu/cgi-bin/bib_query?arXiv:1112.3340>`_ or
* 'cha' for `Chabrier <http://adsabs.harvard.edu/abs/2003PASP..115..763C>`_.
:param params: List of parameters that characterize the mass function
:param cosmo: cosmology class to be considered. Default Planck 15.
Notes
-----
.. note::
* Mass function : number of stars, :math:`N`, in a volume, :math:`V`, observed at time, :math:`t`, in the logarithmic mass interval, :math:`dlogm`. :math:`\xi(log(m)) = d(N/V)/d(log(m))`. It is coded in self.mass_function(logm, params )
* Mass spectrum : number density per mass interval :math:`\xi(m)=\xi(\log(m))/(m * ln(m))` It is coded in self.mass_spectrum(m, params)
* the present day mass function is related to the observed luminosity function via magnitude - mass relationships.
* the initial mass function differs from the present day mass function by the evolution of the massive star. In the 'scalo86' parametrization it does differ. In the 'chabrier03' parametrization it does not.
"""
def __init__(self, imf_name = 'salpeter', params=n.array([1e-3, -2.35]), cosmo = cosmo):
self.cosmo = cosmo
self.imf_name = imf_name
self.params = params
#
if imf_name == 'salpeter':
# power law as defined by Salpeter (1955) :
self.mass_function = lambda logm : 0.001 * (10**logm)**-2.35 * (u.parsec)**(-3.)
if imf_name == 'scalo86':
self.mass_function = n.piecewise( logm,
[0<=logm & logm<=0.54, 0.54<logm & logm<=1.26, 1.26<logm & logm<=1.80 ],
[lambda logm : (10**logm)**(-4.37) * 0.044 * (u.parsec)**(-3.),
lambda logm : (10**logm)**(-3.53) * 0.015 * (u.parsec)**(-3.),
lambda logm : (10**logm)**(-2.11) * 0.0025 * (u.parsec)**(-3.)]
)
self.present_day_mf = lambda logm : mass_function(logm)
self.initial_mf = lambda logm : (10**logm)**(-1.3) * 0.0443 * (u.parsec)**(-3.)
if imf_name == 'chabrier03':
self.mass_function = lambda logm : 0.158 * n.e**(- (logm - n.log10(0.079))**2./ (2*0.69**2.) ) * (u.parsec)**(-3.)
self.present_day_mf = lambda logm : mass_function(logm)
self.initial_mf = lambda logm : mass_function(logm)
self.mass_spectrum = lambda m : self.mass_function(n.log10(m/u.solMass) ) / (m * n.log(10)) / u.solMass
self.creation_function_logm = lambda logm, t : m*t * (u.parsec)**(-3.)
self.present_day_mf_logm = lambda m, t : m*t * (u.parsec)**(-3.)
self.initial_mf_logm = lambda m : m*t * (u.parsec)**(-3.)
self.creation_function = lambda m, t : m*t * (u.parsec)**(-3.)
self.present_day_mf = lambda m, t : m*t * (u.parsec)**(-3.)
self.initial_mf = lambda m, t : m*t * (u.parsec)**(-3.)
def get_model(self, model_used, imf_used, deltal, vdisp, wave_instrument, r_instrument, ebv_mw):
"""
Retrieves all relevant model files, in their downgraded format.
If they aren't downgraded to the correct resolution / velocity dispersion,
takes the base models in their native form and converts to downgraded files.
:param model_used: list of models to be used, for example ['m11', 'm09'].
:param imf_used: list of imf to be used, for example ['ss', 'cha'].
:param deltal: delta lambda in the models.
:param vdisp: velocity dispersion observed in the galaxy.
:param wave_instrument: wavelength array from the observations
:param r_instrument: resolution array from the observations
:param ebv_mw: E(B-V) from the dust maps for the galaxy.
Workflow
----------
A. loads the models m11 or m09: maps parameters to the right files. Then it constructs the model array. Finally converts wavelengths to air or vacuum.
B. downgrades the model to match data resolution
C. applies attenuation
D. stores models in
self.model_wavelength,
self.model_flux,
self.age_model,
self.metal_model
and returns it as well
"""
return True
|
JohanComparat/pySU
|
spm/python/StellarMassFunction.py
|
Python
|
cc0-1.0
| 4,619
|
[
"Galaxy"
] |
3d4eac3eccd3ab6773c0662adf825951fce455157686ab96f1d2b1eeccfd680f
|
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('show_dataset_provenance')
@click.argument("history_id", type=str)
@click.argument("dataset_id", type=str)
@click.option(
"--follow",
help="If ``True``, recursively fetch dataset provenance information for all inputs and their inputs, etc.",
is_flag=True
)
@pass_context
@custom_exception
@json_output
def cli(ctx, history_id, dataset_id, follow=False):
"""Get details related to how dataset was created (``id``, ``job_id``, ``tool_id``, ``stdout``, ``stderr``, ``parameters``, ``inputs``, etc...).
Output:
Dataset provenance information
For example::
{'id': '6fbd9b2274c62ebe',
'job_id': '5471ba76f274f929',
'parameters': {'chromInfo': '"/usr/local/galaxy/galaxy-dist/tool-data/shared/ucsc/chrom/mm9.len"',
'dbkey': '"mm9"',
'experiment_name': '"H3K4me3_TAC_MACS2"',
'input_chipseq_file1': {'id': '6f0a311a444290f2',
'uuid': 'null'},
'input_control_file1': {'id': 'c21816a91f5dc24e',
'uuid': '16f8ee5e-228f-41e2-921e-a07866edce06'},
'major_command': '{"gsize": "2716965481.0", "bdg": "False", "__current_case__": 0, "advanced_options": {"advanced_options_selector": "off", "__current_case__": 1}, "input_chipseq_file1": 104715, "xls_to_interval": "False", "major_command_selector": "callpeak", "input_control_file1": 104721, "pq_options": {"pq_options_selector": "qvalue", "qvalue": "0.05", "__current_case__": 1}, "bw": "300", "nomodel_type": {"nomodel_type_selector": "create_model", "__current_case__": 1}}'},
'stderr': '',
'stdout': '',
'tool_id': 'toolshed.g2.bx.psu.edu/repos/ziru-zhou/macs2/modencode_peakcalling_macs2/2.0.10.2',
'uuid': '5c0c43f5-8d93-44bd-939d-305e82f213c6'}
"""
return ctx.gi.histories.show_dataset_provenance(history_id, dataset_id, follow=follow)
|
galaxy-iuc/parsec
|
parsec/commands/histories/show_dataset_provenance.py
|
Python
|
apache-2.0
| 2,190
|
[
"Galaxy"
] |
2ca96e455046bdf05f8de2e75ce62eaf4554b93093af28ff6bbbc80f88133597
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.